2018-10-29 12:17:34 +00:00
|
|
|
/*-
|
|
|
|
* BSD LICENSE
|
|
|
|
*
|
|
|
|
* Copyright (c) Intel Corporation.
|
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
*
|
|
|
|
* * Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* * Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in
|
|
|
|
* the documentation and/or other materials provided with the
|
|
|
|
* distribution.
|
|
|
|
* * Neither the name of Intel Corporation nor the names of its
|
|
|
|
* contributors may be used to endorse or promote products derived
|
|
|
|
* from this software without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
|
|
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
|
|
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
|
|
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
|
|
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
|
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
|
|
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
|
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
|
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
|
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
|
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "spdk/crc32.h"
|
|
|
|
#include "spdk/likely.h"
|
|
|
|
#include "spdk/util.h"
|
|
|
|
#include "spdk/ftl.h"
|
|
|
|
|
|
|
|
#include "ftl_band.h"
|
|
|
|
#include "ftl_io.h"
|
|
|
|
#include "ftl_core.h"
|
2018-10-29 13:06:35 +00:00
|
|
|
#include "ftl_reloc.h"
|
2018-10-29 12:17:34 +00:00
|
|
|
#include "ftl_debug.h"
|
|
|
|
|
|
|
|
/* TODO: define some signature for meta version */
|
|
|
|
#define FTL_MD_VER 1
|
|
|
|
|
|
|
|
struct __attribute__((packed)) ftl_md_hdr {
|
|
|
|
/* Device instance */
|
|
|
|
struct spdk_uuid uuid;
|
|
|
|
|
|
|
|
/* Meta version */
|
|
|
|
uint8_t ver;
|
|
|
|
|
|
|
|
/* Sequence number */
|
|
|
|
uint64_t seq;
|
|
|
|
|
|
|
|
/* CRC32 checksum */
|
|
|
|
uint32_t checksum;
|
|
|
|
};
|
|
|
|
|
|
|
|
/* End metadata layout stored on media (with all three being aligned to block size): */
|
|
|
|
/* - header */
|
|
|
|
/* - valid bitmap */
|
|
|
|
/* - LBA map */
|
|
|
|
struct __attribute__((packed)) ftl_tail_md {
|
|
|
|
struct ftl_md_hdr hdr;
|
|
|
|
|
|
|
|
/* Max number of lbks */
|
|
|
|
uint64_t num_lbks;
|
|
|
|
|
|
|
|
uint8_t reserved[4059];
|
|
|
|
};
|
|
|
|
SPDK_STATIC_ASSERT(sizeof(struct ftl_tail_md) == FTL_BLOCK_SIZE, "Incorrect metadata size");
|
|
|
|
|
|
|
|
struct __attribute__((packed)) ftl_head_md {
|
|
|
|
struct ftl_md_hdr hdr;
|
|
|
|
|
|
|
|
/* Number of defrag cycles */
|
|
|
|
uint64_t wr_cnt;
|
|
|
|
|
|
|
|
/* Number of surfaced LBAs */
|
|
|
|
uint64_t lba_cnt;
|
|
|
|
|
|
|
|
/* Transfer size */
|
|
|
|
uint32_t xfer_size;
|
|
|
|
};
|
|
|
|
|
|
|
|
size_t
|
|
|
|
ftl_tail_md_hdr_num_lbks(void)
|
|
|
|
{
|
|
|
|
return spdk_divide_round_up(sizeof(struct ftl_tail_md), FTL_BLOCK_SIZE);
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t
|
|
|
|
ftl_vld_map_num_lbks(const struct spdk_ftl_dev *dev)
|
|
|
|
{
|
|
|
|
return spdk_divide_round_up(ftl_vld_map_size(dev), FTL_BLOCK_SIZE);
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t
|
|
|
|
ftl_lba_map_num_lbks(const struct spdk_ftl_dev *dev)
|
|
|
|
{
|
|
|
|
return spdk_divide_round_up(ftl_num_band_lbks(dev) * sizeof(uint64_t), FTL_BLOCK_SIZE);
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t
|
|
|
|
ftl_head_md_num_lbks(const struct spdk_ftl_dev *dev)
|
|
|
|
{
|
|
|
|
return dev->xfer_size;
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t
|
|
|
|
ftl_tail_md_num_lbks(const struct spdk_ftl_dev *dev)
|
|
|
|
{
|
|
|
|
return spdk_divide_round_up(ftl_tail_md_hdr_num_lbks() +
|
|
|
|
ftl_vld_map_num_lbks(dev) +
|
|
|
|
ftl_lba_map_num_lbks(dev),
|
|
|
|
dev->xfer_size) * dev->xfer_size;
|
|
|
|
}
|
|
|
|
|
|
|
|
static uint64_t
|
2019-05-15 09:13:36 +00:00
|
|
|
ftl_band_tail_md_offset(const struct ftl_band *band)
|
2018-10-29 12:17:34 +00:00
|
|
|
{
|
|
|
|
return ftl_band_num_usable_lbks(band) -
|
|
|
|
ftl_tail_md_num_lbks(band->dev);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
ftl_band_full(struct ftl_band *band, size_t offset)
|
|
|
|
{
|
|
|
|
return offset == ftl_band_tail_md_offset(band);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
ftl_band_write_failed(struct ftl_band *band)
|
|
|
|
{
|
|
|
|
struct spdk_ftl_dev *dev = band->dev;
|
|
|
|
|
|
|
|
band->high_prio = 1;
|
|
|
|
band->tail_md_ppa = ftl_to_ppa(FTL_PPA_INVALID);
|
|
|
|
|
|
|
|
if (!dev->df_band) {
|
|
|
|
dev->df_band = band;
|
|
|
|
}
|
|
|
|
|
2018-10-29 13:06:35 +00:00
|
|
|
ftl_reloc_add(dev->reloc, band, 0, ftl_num_band_lbks(dev), 1);
|
2018-10-29 12:17:34 +00:00
|
|
|
ftl_band_set_state(band, FTL_BAND_STATE_CLOSED);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2019-04-26 14:53:13 +00:00
|
|
|
ftl_band_clear_lba_map(struct ftl_band *band)
|
2018-10-29 12:17:34 +00:00
|
|
|
{
|
2019-04-26 14:53:13 +00:00
|
|
|
struct ftl_lba_map *lba_map = &band->lba_map;
|
|
|
|
|
|
|
|
spdk_bit_array_clear_mask(lba_map->vld);
|
|
|
|
memset(lba_map->map, 0, ftl_num_band_lbks(band->dev) * sizeof(uint64_t));
|
|
|
|
lba_map->num_vld = 0;
|
2018-10-29 12:17:34 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2019-04-26 14:53:13 +00:00
|
|
|
ftl_band_free_lba_map(struct ftl_band *band)
|
2018-10-29 12:17:34 +00:00
|
|
|
{
|
|
|
|
struct spdk_ftl_dev *dev = band->dev;
|
2019-04-26 14:53:13 +00:00
|
|
|
struct ftl_lba_map *lba_map = &band->lba_map;
|
2018-10-29 12:17:34 +00:00
|
|
|
|
|
|
|
assert(band->state == FTL_BAND_STATE_CLOSED ||
|
|
|
|
band->state == FTL_BAND_STATE_FREE);
|
2019-04-26 14:53:13 +00:00
|
|
|
assert(lba_map->ref_cnt == 0);
|
|
|
|
assert(lba_map->map != NULL);
|
2018-10-29 12:17:34 +00:00
|
|
|
assert(!band->high_prio);
|
|
|
|
|
|
|
|
/* Verify that band's metadata is consistent with l2p */
|
|
|
|
if (band->num_chunks) {
|
2019-04-26 14:53:13 +00:00
|
|
|
assert(ftl_band_validate_md(band) == true);
|
2018-10-29 12:17:34 +00:00
|
|
|
}
|
|
|
|
|
2019-04-26 14:53:13 +00:00
|
|
|
spdk_mempool_put(dev->lba_pool, lba_map->map);
|
|
|
|
spdk_dma_free(lba_map->dma_buf);
|
|
|
|
lba_map->map = NULL;
|
|
|
|
lba_map->dma_buf = NULL;
|
2018-10-29 12:17:34 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
_ftl_band_set_free(struct ftl_band *band)
|
|
|
|
{
|
|
|
|
struct spdk_ftl_dev *dev = band->dev;
|
|
|
|
struct ftl_band *lband, *prev;
|
|
|
|
|
|
|
|
if (band == dev->df_band) {
|
|
|
|
dev->df_band = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Remove the band from the closed band list */
|
|
|
|
LIST_REMOVE(band, list_entry);
|
|
|
|
|
|
|
|
/* Keep the list sorted by band's write count */
|
|
|
|
LIST_FOREACH(lband, &dev->free_bands, list_entry) {
|
2019-04-26 14:53:13 +00:00
|
|
|
if (lband->wr_cnt > band->wr_cnt) {
|
2018-10-29 12:17:34 +00:00
|
|
|
LIST_INSERT_BEFORE(lband, band, list_entry);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
prev = lband;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!lband) {
|
|
|
|
if (LIST_EMPTY(&dev->free_bands)) {
|
|
|
|
LIST_INSERT_HEAD(&dev->free_bands, band, list_entry);
|
|
|
|
} else {
|
|
|
|
LIST_INSERT_AFTER(prev, band, list_entry);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#if defined(DEBUG)
|
|
|
|
prev = NULL;
|
|
|
|
LIST_FOREACH(lband, &dev->free_bands, list_entry) {
|
|
|
|
if (!prev) {
|
|
|
|
continue;
|
|
|
|
}
|
2019-04-26 14:53:13 +00:00
|
|
|
assert(prev->wr_cnt <= lband->wr_cnt);
|
2018-10-29 12:17:34 +00:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
dev->num_free++;
|
|
|
|
ftl_apply_limits(dev);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2019-02-18 10:38:48 +00:00
|
|
|
_ftl_band_set_preparing(struct ftl_band *band)
|
2018-10-29 12:17:34 +00:00
|
|
|
{
|
|
|
|
struct spdk_ftl_dev *dev = band->dev;
|
|
|
|
|
2019-02-18 10:38:48 +00:00
|
|
|
/* Remove band from free list */
|
2018-10-29 12:17:34 +00:00
|
|
|
LIST_REMOVE(band, list_entry);
|
|
|
|
|
2019-04-26 14:53:13 +00:00
|
|
|
band->wr_cnt++;
|
2018-10-29 12:17:34 +00:00
|
|
|
|
|
|
|
assert(dev->num_free > 0);
|
|
|
|
dev->num_free--;
|
|
|
|
|
|
|
|
ftl_apply_limits(dev);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
_ftl_band_set_closed(struct ftl_band *band)
|
|
|
|
{
|
|
|
|
struct spdk_ftl_dev *dev = band->dev;
|
|
|
|
struct ftl_chunk *chunk;
|
|
|
|
|
|
|
|
/* Set the state as free_md() checks for that */
|
|
|
|
band->state = FTL_BAND_STATE_CLOSED;
|
|
|
|
|
2019-04-26 14:53:13 +00:00
|
|
|
/* Free the lba map if there are no outstanding IOs */
|
|
|
|
ftl_band_release_lba_map(band);
|
2018-10-29 12:17:34 +00:00
|
|
|
|
|
|
|
if (spdk_likely(band->num_chunks)) {
|
|
|
|
LIST_INSERT_HEAD(&dev->shut_bands, band, list_entry);
|
|
|
|
CIRCLEQ_FOREACH(chunk, &band->chunks, circleq) {
|
|
|
|
chunk->state = FTL_CHUNK_STATE_CLOSED;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
LIST_REMOVE(band, list_entry);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static uint32_t
|
|
|
|
ftl_md_calc_crc(const struct ftl_md_hdr *hdr, size_t size)
|
|
|
|
{
|
|
|
|
size_t checkoff = offsetof(struct ftl_md_hdr, checksum);
|
|
|
|
size_t mdoff = checkoff + sizeof(hdr->checksum);
|
|
|
|
uint32_t crc;
|
|
|
|
|
|
|
|
crc = spdk_crc32c_update(hdr, checkoff, 0);
|
|
|
|
return spdk_crc32c_update((const char *)hdr + mdoff, size - mdoff, crc);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2019-04-26 14:53:13 +00:00
|
|
|
ftl_set_md_hdr(struct ftl_band *band, struct ftl_md_hdr *hdr, size_t size)
|
2018-10-29 12:17:34 +00:00
|
|
|
{
|
2019-04-26 14:53:13 +00:00
|
|
|
hdr->seq = band->seq;
|
2018-10-29 12:17:34 +00:00
|
|
|
hdr->ver = FTL_MD_VER;
|
2019-04-26 14:53:13 +00:00
|
|
|
hdr->uuid = band->dev->uuid;
|
2018-10-29 12:17:34 +00:00
|
|
|
hdr->checksum = ftl_md_calc_crc(hdr, size);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2019-04-26 14:53:13 +00:00
|
|
|
ftl_pack_head_md(struct ftl_band *band)
|
2018-10-29 12:17:34 +00:00
|
|
|
{
|
2019-04-26 14:53:13 +00:00
|
|
|
struct spdk_ftl_dev *dev = band->dev;
|
|
|
|
struct ftl_head_md *head = band->lba_map.dma_buf;
|
2018-10-29 12:17:34 +00:00
|
|
|
|
2019-04-26 14:53:13 +00:00
|
|
|
head->wr_cnt = band->wr_cnt;
|
2018-10-29 12:17:34 +00:00
|
|
|
head->lba_cnt = dev->num_lbas;
|
|
|
|
head->xfer_size = dev->xfer_size;
|
2019-04-26 14:53:13 +00:00
|
|
|
ftl_set_md_hdr(band, &head->hdr, sizeof(struct ftl_head_md));
|
2018-10-29 12:17:34 +00:00
|
|
|
|
|
|
|
return FTL_MD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2019-04-26 14:53:13 +00:00
|
|
|
ftl_pack_tail_md(struct ftl_band *band)
|
2018-10-29 12:17:34 +00:00
|
|
|
{
|
2019-04-26 14:53:13 +00:00
|
|
|
struct spdk_ftl_dev *dev = band->dev;
|
|
|
|
struct ftl_lba_map *lba_map = &band->lba_map;
|
|
|
|
struct ftl_tail_md *tail = lba_map->dma_buf;
|
2018-10-29 12:17:34 +00:00
|
|
|
size_t map_size;
|
|
|
|
void *vld_offset, *map_offset;
|
|
|
|
|
|
|
|
map_size = ftl_num_band_lbks(dev) * sizeof(uint64_t);
|
2019-04-26 14:53:13 +00:00
|
|
|
vld_offset = (char *)tail + ftl_tail_md_hdr_num_lbks() * FTL_BLOCK_SIZE;
|
2018-10-29 12:17:34 +00:00
|
|
|
map_offset = (char *)vld_offset + ftl_vld_map_num_lbks(dev) * FTL_BLOCK_SIZE;
|
|
|
|
|
|
|
|
/* Clear out the buffer */
|
2019-04-26 14:53:13 +00:00
|
|
|
memset(tail, 0, ftl_tail_md_num_lbks(dev) * FTL_BLOCK_SIZE);
|
2018-10-29 12:17:34 +00:00
|
|
|
tail->num_lbks = ftl_num_band_lbks(dev);
|
|
|
|
|
2019-04-26 14:53:13 +00:00
|
|
|
pthread_spin_lock(&lba_map->lock);
|
|
|
|
spdk_bit_array_store_mask(lba_map->vld, vld_offset);
|
|
|
|
pthread_spin_unlock(&lba_map->lock);
|
2018-10-29 12:17:34 +00:00
|
|
|
|
2019-04-26 14:53:13 +00:00
|
|
|
memcpy(map_offset, lba_map->map, map_size);
|
|
|
|
ftl_set_md_hdr(band, &tail->hdr, ftl_tail_md_num_lbks(dev) * FTL_BLOCK_SIZE);
|
2018-10-29 12:17:34 +00:00
|
|
|
|
|
|
|
return FTL_MD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
ftl_md_hdr_vld(struct spdk_ftl_dev *dev, const struct ftl_md_hdr *hdr, size_t size)
|
|
|
|
{
|
|
|
|
if (spdk_uuid_compare(&dev->uuid, &hdr->uuid) != 0) {
|
|
|
|
return FTL_MD_NO_MD;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (hdr->ver != FTL_MD_VER) {
|
|
|
|
return FTL_MD_INVALID_VER;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ftl_md_calc_crc(hdr, size) != hdr->checksum) {
|
|
|
|
return FTL_MD_INVALID_CRC;
|
|
|
|
}
|
|
|
|
|
|
|
|
return FTL_MD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2019-04-26 14:53:13 +00:00
|
|
|
ftl_unpack_tail_md(struct ftl_band *band)
|
2018-10-29 12:17:34 +00:00
|
|
|
{
|
2019-04-26 14:53:13 +00:00
|
|
|
struct spdk_ftl_dev *dev = band->dev;
|
2018-10-29 12:17:34 +00:00
|
|
|
size_t map_size;
|
|
|
|
void *vld_offset, *map_offset;
|
2019-04-26 14:53:13 +00:00
|
|
|
struct ftl_lba_map *lba_map = &band->lba_map;
|
|
|
|
struct ftl_tail_md *tail = lba_map->dma_buf;
|
2018-10-29 12:17:34 +00:00
|
|
|
int rc;
|
|
|
|
|
|
|
|
map_size = ftl_num_band_lbks(dev) * sizeof(uint64_t);
|
2019-04-26 14:53:13 +00:00
|
|
|
vld_offset = (char *)tail + ftl_tail_md_hdr_num_lbks() * FTL_BLOCK_SIZE;
|
2018-10-29 12:17:34 +00:00
|
|
|
map_offset = (char *)vld_offset + ftl_vld_map_num_lbks(dev) * FTL_BLOCK_SIZE;
|
|
|
|
|
|
|
|
rc = ftl_md_hdr_vld(dev, &tail->hdr, ftl_tail_md_num_lbks(dev) * FTL_BLOCK_SIZE);
|
|
|
|
if (rc) {
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (tail->num_lbks != ftl_num_band_lbks(dev)) {
|
|
|
|
return FTL_MD_INVALID_SIZE;
|
|
|
|
}
|
|
|
|
|
2019-04-26 14:53:13 +00:00
|
|
|
if (lba_map->vld) {
|
|
|
|
spdk_bit_array_load_mask(lba_map->vld, vld_offset);
|
2018-10-29 12:17:34 +00:00
|
|
|
}
|
|
|
|
|
2019-04-26 14:53:13 +00:00
|
|
|
if (lba_map->map) {
|
|
|
|
memcpy(lba_map->map, map_offset, map_size);
|
2018-10-29 12:17:34 +00:00
|
|
|
}
|
|
|
|
|
2019-04-26 14:53:13 +00:00
|
|
|
band->seq = tail->hdr.seq;
|
2018-10-29 12:17:34 +00:00
|
|
|
return FTL_MD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2019-04-26 14:53:13 +00:00
|
|
|
ftl_unpack_head_md(struct ftl_band *band)
|
2018-10-29 12:17:34 +00:00
|
|
|
{
|
2019-04-26 14:53:13 +00:00
|
|
|
struct spdk_ftl_dev *dev = band->dev;
|
|
|
|
struct ftl_head_md *head = band->lba_map.dma_buf;
|
2018-10-29 12:17:34 +00:00
|
|
|
int rc;
|
|
|
|
|
|
|
|
rc = ftl_md_hdr_vld(dev, &head->hdr, sizeof(struct ftl_head_md));
|
|
|
|
if (rc) {
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2019-04-26 14:53:13 +00:00
|
|
|
band->seq = head->hdr.seq;
|
|
|
|
band->wr_cnt = head->wr_cnt;
|
2018-10-29 12:17:34 +00:00
|
|
|
|
|
|
|
if (dev->global_md.num_lbas == 0) {
|
|
|
|
dev->global_md.num_lbas = head->lba_cnt;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (dev->global_md.num_lbas != head->lba_cnt) {
|
|
|
|
return FTL_MD_INVALID_SIZE;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (dev->xfer_size != head->xfer_size) {
|
|
|
|
return FTL_MD_INVALID_SIZE;
|
|
|
|
}
|
|
|
|
|
|
|
|
return FTL_MD_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct ftl_ppa
|
|
|
|
ftl_band_tail_md_ppa(struct ftl_band *band)
|
|
|
|
{
|
|
|
|
struct ftl_ppa ppa;
|
|
|
|
struct ftl_chunk *chunk;
|
|
|
|
struct spdk_ftl_dev *dev = band->dev;
|
|
|
|
size_t xfer_size = dev->xfer_size;
|
|
|
|
size_t num_req = ftl_band_tail_md_offset(band) / xfer_size;
|
|
|
|
size_t i;
|
|
|
|
|
|
|
|
if (spdk_unlikely(!band->num_chunks)) {
|
|
|
|
return ftl_to_ppa(FTL_PPA_INVALID);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Metadata should be aligned to xfer size */
|
|
|
|
assert(ftl_band_tail_md_offset(band) % xfer_size == 0);
|
|
|
|
|
|
|
|
chunk = CIRCLEQ_FIRST(&band->chunks);
|
|
|
|
for (i = 0; i < num_req % band->num_chunks; ++i) {
|
|
|
|
chunk = ftl_band_next_chunk(band, chunk);
|
|
|
|
}
|
|
|
|
|
|
|
|
ppa.lbk = (num_req / band->num_chunks) * xfer_size;
|
|
|
|
ppa.chk = band->id;
|
|
|
|
ppa.pu = chunk->punit->start_ppa.pu;
|
|
|
|
ppa.grp = chunk->punit->start_ppa.grp;
|
|
|
|
|
|
|
|
return ppa;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct ftl_ppa
|
|
|
|
ftl_band_head_md_ppa(struct ftl_band *band)
|
|
|
|
{
|
|
|
|
struct ftl_ppa ppa;
|
|
|
|
|
|
|
|
if (spdk_unlikely(!band->num_chunks)) {
|
|
|
|
return ftl_to_ppa(FTL_PPA_INVALID);
|
|
|
|
}
|
|
|
|
|
|
|
|
ppa = CIRCLEQ_FIRST(&band->chunks)->punit->start_ppa;
|
|
|
|
ppa.chk = band->id;
|
|
|
|
|
|
|
|
return ppa;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
ftl_band_set_state(struct ftl_band *band, enum ftl_band_state state)
|
|
|
|
{
|
|
|
|
switch (state) {
|
|
|
|
case FTL_BAND_STATE_FREE:
|
2019-04-24 12:24:39 +00:00
|
|
|
assert(band->state == FTL_BAND_STATE_CLOSED);
|
2018-10-29 12:17:34 +00:00
|
|
|
_ftl_band_set_free(band);
|
|
|
|
break;
|
|
|
|
|
2019-02-18 10:38:48 +00:00
|
|
|
case FTL_BAND_STATE_PREP:
|
2019-04-24 12:24:39 +00:00
|
|
|
assert(band->state == FTL_BAND_STATE_FREE);
|
2019-02-18 10:38:48 +00:00
|
|
|
_ftl_band_set_preparing(band);
|
2018-10-29 12:17:34 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
case FTL_BAND_STATE_CLOSED:
|
2019-04-24 12:24:39 +00:00
|
|
|
if (band->state != FTL_BAND_STATE_CLOSED) {
|
|
|
|
assert(band->state == FTL_BAND_STATE_CLOSING);
|
|
|
|
_ftl_band_set_closed(band);
|
|
|
|
}
|
2018-10-29 12:17:34 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
band->state = state;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
ftl_band_set_addr(struct ftl_band *band, uint64_t lba, struct ftl_ppa ppa)
|
|
|
|
{
|
2019-04-26 14:53:13 +00:00
|
|
|
struct ftl_lba_map *lba_map = &band->lba_map;
|
2018-10-29 12:17:34 +00:00
|
|
|
uint64_t offset;
|
|
|
|
|
|
|
|
assert(lba != FTL_LBA_INVALID);
|
|
|
|
|
|
|
|
offset = ftl_band_lbkoff_from_ppa(band, ppa);
|
2019-04-26 14:53:13 +00:00
|
|
|
pthread_spin_lock(&lba_map->lock);
|
2018-10-29 12:17:34 +00:00
|
|
|
|
2019-04-26 14:53:13 +00:00
|
|
|
lba_map->num_vld++;
|
|
|
|
lba_map->map[offset] = lba;
|
|
|
|
spdk_bit_array_set(lba_map->vld, offset);
|
2018-10-29 12:17:34 +00:00
|
|
|
|
2019-04-26 14:53:13 +00:00
|
|
|
pthread_spin_unlock(&lba_map->lock);
|
2018-10-29 12:17:34 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
size_t
|
|
|
|
ftl_band_age(const struct ftl_band *band)
|
|
|
|
{
|
2019-04-26 14:53:13 +00:00
|
|
|
return (size_t)(band->dev->seq - band->seq);
|
2018-10-29 12:17:34 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
size_t
|
|
|
|
ftl_band_num_usable_lbks(const struct ftl_band *band)
|
|
|
|
{
|
|
|
|
return band->num_chunks * ftl_dev_lbks_in_chunk(band->dev);
|
|
|
|
}
|
|
|
|
|
2019-05-15 09:13:36 +00:00
|
|
|
size_t
|
|
|
|
ftl_band_user_lbks_left(const struct ftl_band *band, size_t offset)
|
|
|
|
{
|
|
|
|
size_t tail_md_offset = ftl_band_tail_md_offset(band);
|
|
|
|
|
|
|
|
if (spdk_unlikely(offset <= ftl_head_md_num_lbks(band->dev))) {
|
|
|
|
return ftl_band_user_lbks(band);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (spdk_unlikely(offset > tail_md_offset)) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
return tail_md_offset - offset;
|
|
|
|
}
|
|
|
|
|
2018-10-29 12:17:34 +00:00
|
|
|
size_t
|
|
|
|
ftl_band_user_lbks(const struct ftl_band *band)
|
|
|
|
{
|
|
|
|
return ftl_band_num_usable_lbks(band) -
|
|
|
|
ftl_head_md_num_lbks(band->dev) -
|
|
|
|
ftl_tail_md_num_lbks(band->dev);
|
|
|
|
}
|
|
|
|
|
|
|
|
struct ftl_band *
|
|
|
|
ftl_band_from_ppa(struct spdk_ftl_dev *dev, struct ftl_ppa ppa)
|
|
|
|
{
|
|
|
|
assert(ppa.chk < ftl_dev_num_bands(dev));
|
|
|
|
return &dev->bands[ppa.chk];
|
|
|
|
}
|
|
|
|
|
|
|
|
struct ftl_chunk *
|
|
|
|
ftl_band_chunk_from_ppa(struct ftl_band *band, struct ftl_ppa ppa)
|
|
|
|
{
|
|
|
|
struct spdk_ftl_dev *dev = band->dev;
|
|
|
|
unsigned int punit;
|
|
|
|
|
|
|
|
punit = ftl_ppa_flatten_punit(dev, ppa);
|
|
|
|
assert(punit < ftl_dev_num_punits(dev));
|
|
|
|
|
|
|
|
return &band->chunk_buf[punit];
|
|
|
|
}
|
|
|
|
|
|
|
|
uint64_t
|
|
|
|
ftl_band_lbkoff_from_ppa(struct ftl_band *band, struct ftl_ppa ppa)
|
|
|
|
{
|
|
|
|
struct spdk_ftl_dev *dev = band->dev;
|
|
|
|
unsigned int punit;
|
|
|
|
|
|
|
|
punit = ftl_ppa_flatten_punit(dev, ppa);
|
|
|
|
assert(ppa.chk == band->id);
|
|
|
|
|
|
|
|
return punit * ftl_dev_lbks_in_chunk(dev) + ppa.lbk;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct ftl_ppa
|
|
|
|
ftl_band_next_xfer_ppa(struct ftl_band *band, struct ftl_ppa ppa, size_t num_lbks)
|
|
|
|
{
|
|
|
|
struct spdk_ftl_dev *dev = band->dev;
|
|
|
|
struct ftl_chunk *chunk;
|
|
|
|
unsigned int punit_num;
|
|
|
|
size_t num_xfers, num_stripes;
|
|
|
|
|
|
|
|
assert(ppa.chk == band->id);
|
|
|
|
|
|
|
|
punit_num = ftl_ppa_flatten_punit(dev, ppa);
|
|
|
|
chunk = &band->chunk_buf[punit_num];
|
|
|
|
|
|
|
|
num_lbks += (ppa.lbk % dev->xfer_size);
|
|
|
|
ppa.lbk -= (ppa.lbk % dev->xfer_size);
|
|
|
|
|
|
|
|
#if defined(DEBUG)
|
|
|
|
/* Check that the number of chunks has not been changed */
|
|
|
|
struct ftl_chunk *_chunk;
|
|
|
|
size_t _num_chunks = 0;
|
|
|
|
CIRCLEQ_FOREACH(_chunk, &band->chunks, circleq) {
|
|
|
|
if (spdk_likely(_chunk->state != FTL_CHUNK_STATE_BAD)) {
|
|
|
|
_num_chunks++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
assert(band->num_chunks == _num_chunks);
|
|
|
|
#endif
|
2019-05-15 12:30:00 +00:00
|
|
|
assert(band->num_chunks != 0);
|
2018-10-29 12:17:34 +00:00
|
|
|
num_stripes = (num_lbks / dev->xfer_size) / band->num_chunks;
|
|
|
|
ppa.lbk += num_stripes * dev->xfer_size;
|
|
|
|
num_lbks -= num_stripes * dev->xfer_size * band->num_chunks;
|
|
|
|
|
|
|
|
if (ppa.lbk > ftl_dev_lbks_in_chunk(dev)) {
|
|
|
|
return ftl_to_ppa(FTL_PPA_INVALID);
|
|
|
|
}
|
|
|
|
|
|
|
|
num_xfers = num_lbks / dev->xfer_size;
|
|
|
|
for (size_t i = 0; i < num_xfers; ++i) {
|
|
|
|
/* When the last chunk is reached the lbk part of the address */
|
|
|
|
/* needs to be increased by xfer_size */
|
|
|
|
if (ftl_band_chunk_is_last(band, chunk)) {
|
|
|
|
ppa.lbk += dev->xfer_size;
|
|
|
|
if (ppa.lbk > ftl_dev_lbks_in_chunk(dev)) {
|
|
|
|
return ftl_to_ppa(FTL_PPA_INVALID);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
chunk = ftl_band_next_operational_chunk(band, chunk);
|
|
|
|
ppa.grp = chunk->start_ppa.grp;
|
|
|
|
ppa.pu = chunk->start_ppa.pu;
|
|
|
|
|
|
|
|
num_lbks -= dev->xfer_size;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (num_lbks) {
|
|
|
|
ppa.lbk += num_lbks;
|
|
|
|
if (ppa.lbk > ftl_dev_lbks_in_chunk(dev)) {
|
|
|
|
return ftl_to_ppa(FTL_PPA_INVALID);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return ppa;
|
|
|
|
}
|
|
|
|
|
2019-03-28 14:48:43 +00:00
|
|
|
static size_t
|
|
|
|
ftl_xfer_offset_from_ppa(struct ftl_band *band, struct ftl_ppa ppa)
|
|
|
|
{
|
|
|
|
struct ftl_chunk *chunk, *current_chunk;
|
|
|
|
unsigned int punit_offset = 0;
|
|
|
|
size_t off, num_stripes, xfer_size = band->dev->xfer_size;
|
|
|
|
|
|
|
|
assert(ppa.chk == band->id);
|
|
|
|
|
|
|
|
num_stripes = (ppa.lbk / xfer_size) * band->num_chunks;
|
|
|
|
off = ppa.lbk % xfer_size;
|
|
|
|
|
|
|
|
current_chunk = ftl_band_chunk_from_ppa(band, ppa);
|
|
|
|
CIRCLEQ_FOREACH(chunk, &band->chunks, circleq) {
|
|
|
|
if (current_chunk == chunk) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
punit_offset++;
|
|
|
|
}
|
|
|
|
|
|
|
|
return xfer_size * (num_stripes + punit_offset) + off;
|
|
|
|
}
|
|
|
|
|
2018-10-29 12:17:34 +00:00
|
|
|
struct ftl_ppa
|
|
|
|
ftl_band_ppa_from_lbkoff(struct ftl_band *band, uint64_t lbkoff)
|
|
|
|
{
|
|
|
|
struct ftl_ppa ppa = { .ppa = 0 };
|
|
|
|
struct spdk_ftl_dev *dev = band->dev;
|
|
|
|
uint64_t punit;
|
|
|
|
|
|
|
|
punit = lbkoff / ftl_dev_lbks_in_chunk(dev) + dev->range.begin;
|
|
|
|
|
|
|
|
ppa.lbk = lbkoff % ftl_dev_lbks_in_chunk(dev);
|
|
|
|
ppa.chk = band->id;
|
|
|
|
ppa.pu = punit / dev->geo.num_grp;
|
|
|
|
ppa.grp = punit % dev->geo.num_grp;
|
|
|
|
|
|
|
|
return ppa;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct ftl_ppa
|
|
|
|
ftl_band_next_ppa(struct ftl_band *band, struct ftl_ppa ppa, size_t offset)
|
|
|
|
{
|
|
|
|
uint64_t lbkoff = ftl_band_lbkoff_from_ppa(band, ppa);
|
|
|
|
return ftl_band_ppa_from_lbkoff(band, lbkoff + offset);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2019-04-26 14:53:13 +00:00
|
|
|
ftl_band_acquire_lba_map(struct ftl_band *band)
|
2018-10-29 12:17:34 +00:00
|
|
|
{
|
2019-04-26 14:53:13 +00:00
|
|
|
assert(band->lba_map.map != NULL);
|
|
|
|
band->lba_map.ref_cnt++;
|
2018-10-29 12:17:34 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2019-04-26 14:53:13 +00:00
|
|
|
ftl_band_alloc_lba_map(struct ftl_band *band)
|
2018-10-29 12:17:34 +00:00
|
|
|
{
|
|
|
|
struct spdk_ftl_dev *dev = band->dev;
|
2019-04-26 14:53:13 +00:00
|
|
|
struct ftl_lba_map *lba_map = &band->lba_map;
|
2018-10-29 12:17:34 +00:00
|
|
|
|
2019-04-26 14:53:13 +00:00
|
|
|
assert(lba_map->ref_cnt == 0);
|
|
|
|
assert(lba_map->map == NULL);
|
2018-10-29 12:17:34 +00:00
|
|
|
|
2019-04-26 14:53:13 +00:00
|
|
|
lba_map->map = spdk_mempool_get(dev->lba_pool);
|
|
|
|
if (!lba_map->map) {
|
2018-10-29 12:17:34 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2019-04-26 14:53:13 +00:00
|
|
|
lba_map->dma_buf = spdk_dma_zmalloc(ftl_tail_md_num_lbks(dev) * FTL_BLOCK_SIZE,
|
|
|
|
FTL_BLOCK_SIZE, NULL);
|
|
|
|
if (!lba_map->dma_buf) {
|
|
|
|
spdk_mempool_put(dev->lba_pool, lba_map->map);
|
2019-03-27 15:23:53 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2019-04-26 14:53:13 +00:00
|
|
|
ftl_band_acquire_lba_map(band);
|
2018-10-29 12:17:34 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2019-04-26 14:53:13 +00:00
|
|
|
ftl_band_release_lba_map(struct ftl_band *band)
|
2018-10-29 12:17:34 +00:00
|
|
|
{
|
2019-04-26 14:53:13 +00:00
|
|
|
struct ftl_lba_map *lba_map = &band->lba_map;
|
2018-10-29 12:17:34 +00:00
|
|
|
|
2019-04-26 14:53:13 +00:00
|
|
|
assert(lba_map->map != NULL);
|
|
|
|
assert(lba_map->ref_cnt > 0);
|
|
|
|
lba_map->ref_cnt--;
|
2018-10-29 12:17:34 +00:00
|
|
|
|
2019-04-26 14:53:13 +00:00
|
|
|
if (lba_map->ref_cnt == 0) {
|
|
|
|
ftl_band_free_lba_map(band);
|
2018-10-29 12:17:34 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2019-05-27 12:56:48 +00:00
|
|
|
ftl_read_md_cb(struct ftl_io *io, void *arg, int status)
|
2018-10-29 12:17:34 +00:00
|
|
|
{
|
2019-05-27 12:56:48 +00:00
|
|
|
struct ftl_md_io *md_io = (struct ftl_md_io *)io;
|
2018-10-29 12:17:34 +00:00
|
|
|
|
|
|
|
if (!status) {
|
2019-04-26 14:53:13 +00:00
|
|
|
status = md_io->pack_fn(md_io->io.band);
|
2018-10-29 12:17:34 +00:00
|
|
|
} else {
|
|
|
|
status = FTL_MD_IO_FAILURE;
|
|
|
|
}
|
|
|
|
|
2019-05-27 12:56:48 +00:00
|
|
|
md_io->cb_fn(io, md_io->cb_ctx, status);
|
2018-10-29 12:17:34 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static struct ftl_md_io *
|
2019-04-26 14:53:13 +00:00
|
|
|
ftl_io_init_md_read(struct spdk_ftl_dev *dev, struct ftl_ppa ppa,
|
2019-05-27 12:56:48 +00:00
|
|
|
struct ftl_band *band, size_t lbk_cnt, ftl_io_fn fn,
|
|
|
|
ftl_md_pack_fn pack_fn, ftl_io_fn cb_fn, void *cb_ctx)
|
2018-10-29 12:17:34 +00:00
|
|
|
{
|
|
|
|
struct ftl_md_io *io;
|
|
|
|
struct ftl_io_init_opts opts = {
|
|
|
|
.dev = dev,
|
|
|
|
.io = NULL,
|
|
|
|
.rwb_batch = NULL,
|
|
|
|
.band = band,
|
|
|
|
.size = sizeof(*io),
|
|
|
|
.flags = FTL_IO_MD | FTL_IO_PPA_MODE,
|
|
|
|
.type = FTL_IO_READ,
|
2019-05-09 14:04:26 +00:00
|
|
|
.lbk_cnt = lbk_cnt,
|
2019-05-27 12:56:48 +00:00
|
|
|
.cb_fn = fn,
|
2019-04-26 14:53:13 +00:00
|
|
|
.data = band->lba_map.dma_buf,
|
2018-10-29 12:17:34 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
io = (struct ftl_md_io *)ftl_io_init_internal(&opts);
|
|
|
|
if (!io) {
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
io->io.ppa = ppa;
|
2019-03-28 14:48:43 +00:00
|
|
|
io->pack_fn = pack_fn;
|
2019-05-27 12:56:48 +00:00
|
|
|
io->cb_fn = cb_fn;
|
|
|
|
io->cb_ctx = cb_ctx;
|
2018-10-29 12:17:34 +00:00
|
|
|
|
|
|
|
return io;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct ftl_io *
|
|
|
|
ftl_io_init_md_write(struct spdk_ftl_dev *dev, struct ftl_band *band,
|
2019-05-27 12:56:48 +00:00
|
|
|
void *data, size_t lbk_cnt, ftl_io_fn cb)
|
2018-10-29 12:17:34 +00:00
|
|
|
{
|
|
|
|
struct ftl_io_init_opts opts = {
|
|
|
|
.dev = dev,
|
|
|
|
.io = NULL,
|
|
|
|
.rwb_batch = NULL,
|
|
|
|
.band = band,
|
|
|
|
.size = sizeof(struct ftl_io),
|
|
|
|
.flags = FTL_IO_MD | FTL_IO_PPA_MODE,
|
|
|
|
.type = FTL_IO_WRITE,
|
2019-05-09 14:04:26 +00:00
|
|
|
.lbk_cnt = lbk_cnt,
|
2019-05-27 12:56:48 +00:00
|
|
|
.cb_fn = cb,
|
2018-10-29 12:17:34 +00:00
|
|
|
.data = data,
|
|
|
|
.md = NULL,
|
|
|
|
};
|
|
|
|
|
|
|
|
return ftl_io_init_internal(&opts);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2019-03-28 14:48:43 +00:00
|
|
|
ftl_band_write_md(struct ftl_band *band, size_t lbk_cnt,
|
2019-05-27 12:56:48 +00:00
|
|
|
ftl_md_pack_fn md_fn, ftl_io_fn cb)
|
2018-10-29 12:17:34 +00:00
|
|
|
{
|
|
|
|
struct spdk_ftl_dev *dev = band->dev;
|
|
|
|
struct ftl_io *io;
|
|
|
|
|
2019-04-26 14:53:13 +00:00
|
|
|
io = ftl_io_init_md_write(dev, band, band->lba_map.dma_buf, lbk_cnt, cb);
|
2018-10-29 12:17:34 +00:00
|
|
|
if (!io) {
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
2019-04-26 14:53:13 +00:00
|
|
|
md_fn(band);
|
2018-10-29 12:17:34 +00:00
|
|
|
|
2019-05-10 08:03:05 +00:00
|
|
|
ftl_io_write(io);
|
|
|
|
return 0;
|
2018-10-29 12:17:34 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2019-04-26 14:53:13 +00:00
|
|
|
ftl_band_md_clear(struct ftl_band *band)
|
2018-10-29 12:17:34 +00:00
|
|
|
{
|
2019-04-26 14:53:13 +00:00
|
|
|
band->seq = 0;
|
|
|
|
band->wr_cnt = 0;
|
|
|
|
band->lba_map.num_vld = 0;
|
|
|
|
band->lba_map.map = NULL;
|
2018-10-29 12:17:34 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2019-05-27 12:56:48 +00:00
|
|
|
ftl_band_write_head_md(struct ftl_band *band, ftl_io_fn cb)
|
2018-10-29 12:17:34 +00:00
|
|
|
{
|
2019-03-28 14:48:43 +00:00
|
|
|
return ftl_band_write_md(band, ftl_head_md_num_lbks(band->dev),
|
2018-10-29 12:17:34 +00:00
|
|
|
ftl_pack_head_md, cb);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2019-05-27 12:56:48 +00:00
|
|
|
ftl_band_write_tail_md(struct ftl_band *band, ftl_io_fn cb)
|
2018-10-29 12:17:34 +00:00
|
|
|
{
|
2019-03-28 14:48:43 +00:00
|
|
|
return ftl_band_write_md(band, ftl_tail_md_num_lbks(band->dev),
|
2018-10-29 12:17:34 +00:00
|
|
|
ftl_pack_tail_md, cb);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct ftl_ppa
|
2019-03-28 14:48:43 +00:00
|
|
|
ftl_band_lba_map_ppa(struct ftl_band *band, size_t offset)
|
2018-10-29 12:17:34 +00:00
|
|
|
{
|
|
|
|
return ftl_band_next_xfer_ppa(band, band->tail_md_ppa,
|
|
|
|
ftl_tail_md_hdr_num_lbks() +
|
2019-03-28 14:48:43 +00:00
|
|
|
ftl_vld_map_num_lbks(band->dev) +
|
|
|
|
offset);
|
2018-10-29 12:17:34 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2019-04-26 14:53:13 +00:00
|
|
|
ftl_band_read_md(struct ftl_band *band, size_t lbk_cnt, struct ftl_ppa start_ppa,
|
2019-05-27 12:56:48 +00:00
|
|
|
ftl_io_fn fn, ftl_md_pack_fn pack_fn, ftl_io_fn cb_fn, void *cb_ctx)
|
2018-10-29 12:17:34 +00:00
|
|
|
{
|
|
|
|
struct spdk_ftl_dev *dev = band->dev;
|
|
|
|
struct ftl_md_io *io;
|
|
|
|
|
|
|
|
if (spdk_unlikely(!band->num_chunks)) {
|
|
|
|
return -ENOENT;
|
|
|
|
}
|
|
|
|
|
2019-05-27 12:56:48 +00:00
|
|
|
io = ftl_io_init_md_read(dev, start_ppa, band, lbk_cnt, fn, pack_fn, cb_fn, cb_ctx);
|
2018-10-29 12:17:34 +00:00
|
|
|
if (!io) {
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
2019-03-06 13:29:11 +00:00
|
|
|
ftl_io_read((struct ftl_io *)io);
|
|
|
|
return 0;
|
2018-10-29 12:17:34 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2019-05-27 12:56:48 +00:00
|
|
|
ftl_band_read_tail_md(struct ftl_band *band, struct ftl_ppa ppa, ftl_io_fn cb_fn, void *cb_ctx)
|
2018-10-29 12:17:34 +00:00
|
|
|
{
|
2019-04-26 14:53:13 +00:00
|
|
|
return ftl_band_read_md(band, ftl_tail_md_num_lbks(band->dev), ppa,
|
2019-05-27 12:56:48 +00:00
|
|
|
ftl_read_md_cb, ftl_unpack_tail_md, cb_fn, cb_ctx);
|
2018-10-29 12:17:34 +00:00
|
|
|
}
|
|
|
|
|
2019-03-28 14:48:43 +00:00
|
|
|
static size_t
|
|
|
|
ftl_lba_map_offset_from_ppa(struct ftl_band *band, struct ftl_ppa ppa)
|
|
|
|
{
|
|
|
|
size_t offset;
|
|
|
|
struct ftl_ppa start_ppa = ftl_band_lba_map_ppa(band, 0);
|
|
|
|
|
|
|
|
offset = ftl_xfer_offset_from_ppa(band, ppa) - ftl_xfer_offset_from_ppa(band, start_ppa);
|
|
|
|
assert(offset < ftl_lba_map_num_lbks(band->dev));
|
|
|
|
|
|
|
|
return offset;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2019-05-27 12:56:48 +00:00
|
|
|
ftl_read_lba_map_cb(struct ftl_io *io, void *arg, int status)
|
2019-03-28 14:48:43 +00:00
|
|
|
{
|
2019-05-27 12:56:48 +00:00
|
|
|
struct ftl_md_io *md_io = (struct ftl_md_io *)io;
|
2019-04-26 14:53:13 +00:00
|
|
|
struct ftl_lba_map *lba_map = &io->band->lba_map;
|
2019-03-28 14:48:43 +00:00
|
|
|
uint64_t offset;
|
|
|
|
|
|
|
|
offset = ftl_lba_map_offset_from_ppa(io->band, io->ppa);
|
|
|
|
assert(offset + io->lbk_cnt <= ftl_lba_map_num_lbks(io->dev));
|
|
|
|
|
|
|
|
if (!status) {
|
2019-04-26 14:53:13 +00:00
|
|
|
memcpy((char *)lba_map->map + offset * FTL_BLOCK_SIZE, lba_map->dma_buf,
|
2019-03-28 14:48:43 +00:00
|
|
|
io->lbk_cnt * FTL_BLOCK_SIZE);
|
|
|
|
}
|
|
|
|
|
2019-05-27 12:56:48 +00:00
|
|
|
md_io->cb_fn(io, md_io->cb_ctx, status);
|
2019-03-28 14:48:43 +00:00
|
|
|
}
|
|
|
|
|
2018-10-29 12:17:34 +00:00
|
|
|
int
|
2019-04-26 14:53:13 +00:00
|
|
|
ftl_band_read_lba_map(struct ftl_band *band, size_t offset, size_t lba_cnt,
|
2019-05-27 12:56:48 +00:00
|
|
|
ftl_io_fn cb_fn, void *cb_ctx)
|
2018-10-29 12:17:34 +00:00
|
|
|
{
|
2019-03-28 14:48:43 +00:00
|
|
|
size_t lbk_cnt, lbk_off;
|
|
|
|
|
|
|
|
lbk_off = offset * sizeof(uint64_t) / FTL_BLOCK_SIZE;
|
|
|
|
lbk_cnt = spdk_divide_round_up(lba_cnt * sizeof(uint64_t), FTL_BLOCK_SIZE);
|
|
|
|
|
|
|
|
assert(lbk_off + lbk_cnt <= ftl_lba_map_num_lbks(band->dev));
|
|
|
|
|
2019-04-26 14:53:13 +00:00
|
|
|
return ftl_band_read_md(band, lbk_cnt, ftl_band_lba_map_ppa(band, lbk_off),
|
2019-05-27 12:56:48 +00:00
|
|
|
ftl_read_lba_map_cb, NULL, cb_fn, cb_ctx);
|
2018-10-29 12:17:34 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2019-05-27 12:56:48 +00:00
|
|
|
ftl_band_read_head_md(struct ftl_band *band, ftl_io_fn cb_fn, void *cb_ctx)
|
2018-10-29 12:17:34 +00:00
|
|
|
{
|
2019-04-26 14:53:13 +00:00
|
|
|
return ftl_band_read_md(band,
|
2018-10-29 12:17:34 +00:00
|
|
|
ftl_head_md_num_lbks(band->dev),
|
|
|
|
ftl_band_head_md_ppa(band),
|
2019-03-28 14:48:43 +00:00
|
|
|
ftl_read_md_cb,
|
2018-10-29 12:17:34 +00:00
|
|
|
ftl_unpack_head_md,
|
2019-05-27 12:56:48 +00:00
|
|
|
cb_fn,
|
|
|
|
cb_ctx);
|
2018-10-29 12:17:34 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
ftl_band_remove_chunk(struct ftl_band *band, struct ftl_chunk *chunk)
|
|
|
|
{
|
|
|
|
CIRCLEQ_REMOVE(&band->chunks, chunk, circleq);
|
|
|
|
band->num_chunks--;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
ftl_erase_fail(struct ftl_io *io, int status)
|
|
|
|
{
|
|
|
|
struct ftl_chunk *chunk;
|
|
|
|
char buf[128];
|
|
|
|
|
|
|
|
SPDK_ERRLOG("Erase failed @ppa: %s, status: %d\n",
|
|
|
|
ftl_ppa2str(io->ppa, buf, sizeof(buf)), status);
|
|
|
|
|
|
|
|
chunk = ftl_band_chunk_from_ppa(io->band, io->ppa);
|
|
|
|
chunk->state = FTL_CHUNK_STATE_BAD;
|
|
|
|
ftl_band_remove_chunk(io->band, chunk);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2019-05-27 12:56:48 +00:00
|
|
|
ftl_band_erase_cb(struct ftl_io *io, void *ctx, int status)
|
2018-10-29 12:17:34 +00:00
|
|
|
{
|
|
|
|
struct ftl_chunk *chunk;
|
|
|
|
|
|
|
|
if (spdk_unlikely(status)) {
|
|
|
|
ftl_erase_fail(io, status);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
chunk = ftl_band_chunk_from_ppa(io->band, io->ppa);
|
|
|
|
chunk->state = FTL_CHUNK_STATE_FREE;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
ftl_band_erase(struct ftl_band *band)
|
|
|
|
{
|
|
|
|
struct ftl_chunk *chunk;
|
|
|
|
struct ftl_io *io;
|
|
|
|
int rc = 0;
|
|
|
|
|
|
|
|
assert(band->state == FTL_BAND_STATE_CLOSED ||
|
|
|
|
band->state == FTL_BAND_STATE_FREE);
|
|
|
|
|
|
|
|
ftl_band_set_state(band, FTL_BAND_STATE_PREP);
|
|
|
|
|
|
|
|
CIRCLEQ_FOREACH(chunk, &band->chunks, circleq) {
|
|
|
|
if (chunk->state == FTL_CHUNK_STATE_FREE) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
io = ftl_io_erase_init(band, 1, ftl_band_erase_cb);
|
|
|
|
if (!io) {
|
|
|
|
rc = -ENOMEM;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
io->ppa = chunk->start_ppa;
|
|
|
|
rc = ftl_io_erase(io);
|
|
|
|
if (rc) {
|
|
|
|
assert(0);
|
|
|
|
/* TODO: change band's state back to close? */
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
ftl_band_write_prep(struct ftl_band *band)
|
|
|
|
{
|
|
|
|
struct spdk_ftl_dev *dev = band->dev;
|
|
|
|
|
2019-04-26 14:53:13 +00:00
|
|
|
if (ftl_band_alloc_lba_map(band)) {
|
2018-10-29 12:17:34 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2019-04-26 14:53:13 +00:00
|
|
|
band->seq = ++dev->seq;
|
2018-10-29 12:17:34 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct ftl_chunk *
|
|
|
|
ftl_band_next_operational_chunk(struct ftl_band *band, struct ftl_chunk *chunk)
|
|
|
|
{
|
|
|
|
struct ftl_chunk *result = NULL;
|
|
|
|
struct ftl_chunk *entry;
|
|
|
|
|
|
|
|
if (spdk_unlikely(!band->num_chunks)) {
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Erasing band may fail after it was assigned to wptr. */
|
|
|
|
/* In such a case chunk is no longer in band->chunks queue. */
|
|
|
|
if (spdk_likely(chunk->state != FTL_CHUNK_STATE_BAD)) {
|
|
|
|
result = ftl_band_next_chunk(band, chunk);
|
|
|
|
} else {
|
|
|
|
CIRCLEQ_FOREACH_REVERSE(entry, &band->chunks, circleq) {
|
|
|
|
if (entry->pos > chunk->pos) {
|
|
|
|
result = entry;
|
|
|
|
} else {
|
|
|
|
if (!result) {
|
|
|
|
result = CIRCLEQ_FIRST(&band->chunks);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return result;
|
|
|
|
}
|