lib/ftl: improved padding during shutdown
Padding requests are issued in multiple batches at the same time when shutdown occurs. This allows for faster removal. Change-Id: Iea40d2418bedbd7cf3c6865e5eb8f85871db13cd Signed-off-by: Kozlowski Mateusz <mateusz.kozlowski@intel.com> Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/454578 Reviewed-by: Darek Stojaczyk <dariusz.stojaczyk@intel.com> Reviewed-by: Ben Walker <benjamin.walker@intel.com> Reviewed-by: Wojciech Malikowski <wojciech.malikowski@intel.com> Reviewed-by: Konrad Sztyber <konrad.sztyber@intel.com> Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
This commit is contained in:
parent
658d118c06
commit
c2868aeb57
@ -120,7 +120,7 @@ ftl_tail_md_num_lbks(const struct spdk_ftl_dev *dev)
|
||||
}
|
||||
|
||||
static uint64_t
|
||||
ftl_band_tail_md_offset(struct ftl_band *band)
|
||||
ftl_band_tail_md_offset(const struct ftl_band *band)
|
||||
{
|
||||
return ftl_band_num_usable_lbks(band) -
|
||||
ftl_tail_md_num_lbks(band->dev);
|
||||
@ -515,6 +515,22 @@ ftl_band_num_usable_lbks(const struct ftl_band *band)
|
||||
return band->num_chunks * ftl_dev_lbks_in_chunk(band->dev);
|
||||
}
|
||||
|
||||
size_t
|
||||
ftl_band_user_lbks_left(const struct ftl_band *band, size_t offset)
|
||||
{
|
||||
size_t tail_md_offset = ftl_band_tail_md_offset(band);
|
||||
|
||||
if (spdk_unlikely(offset <= ftl_head_md_num_lbks(band->dev))) {
|
||||
return ftl_band_user_lbks(band);
|
||||
}
|
||||
|
||||
if (spdk_unlikely(offset > tail_md_offset)) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
return tail_md_offset - offset;
|
||||
}
|
||||
|
||||
size_t
|
||||
ftl_band_user_lbks(const struct ftl_band *band)
|
||||
{
|
||||
|
@ -172,6 +172,7 @@ struct ftl_ppa ftl_band_next_xfer_ppa(struct ftl_band *band, struct ftl_ppa ppa,
|
||||
struct ftl_ppa ftl_band_next_ppa(struct ftl_band *band, struct ftl_ppa ppa,
|
||||
size_t offset);
|
||||
size_t ftl_band_num_usable_lbks(const struct ftl_band *band);
|
||||
size_t ftl_band_user_lbks_left(const struct ftl_band *band, size_t offset);
|
||||
size_t ftl_band_user_lbks(const struct ftl_band *band);
|
||||
void ftl_band_set_addr(struct ftl_band *band, uint64_t lba,
|
||||
struct ftl_ppa ppa);
|
||||
|
@ -470,6 +470,12 @@ ftl_wptr_advance(struct ftl_wptr *wptr, size_t xfer_size)
|
||||
}
|
||||
}
|
||||
|
||||
static size_t
|
||||
ftl_wptr_user_lbks_left(const struct ftl_wptr *wptr)
|
||||
{
|
||||
return ftl_band_user_lbks_left(wptr->band, wptr->offset);
|
||||
}
|
||||
|
||||
static int
|
||||
ftl_wptr_ready(struct ftl_wptr *wptr)
|
||||
{
|
||||
@ -605,11 +611,13 @@ ftl_remove_free_bands(struct spdk_ftl_dev *dev)
|
||||
}
|
||||
|
||||
static void
|
||||
ftl_process_shutdown(struct spdk_ftl_dev *dev)
|
||||
ftl_wptr_process_shutdown(struct ftl_wptr *wptr)
|
||||
{
|
||||
struct spdk_ftl_dev *dev = wptr->dev;
|
||||
size_t size = ftl_rwb_num_acquired(dev->rwb, FTL_RWB_TYPE_INTERNAL) +
|
||||
ftl_rwb_num_acquired(dev->rwb, FTL_RWB_TYPE_USER);
|
||||
size_t num_active = dev->xfer_size * ftl_rwb_get_active_batches(dev->rwb);
|
||||
size_t band_length, rwb_free_space, pad_length;
|
||||
|
||||
num_active = num_active ? num_active : dev->xfer_size;
|
||||
if (size >= num_active) {
|
||||
@ -622,10 +630,12 @@ ftl_process_shutdown(struct spdk_ftl_dev *dev)
|
||||
ftl_remove_free_bands(dev);
|
||||
}
|
||||
|
||||
band_length = ftl_wptr_user_lbks_left(wptr);
|
||||
rwb_free_space = ftl_rwb_size(dev->rwb) - size;
|
||||
pad_length = spdk_min(band_length, rwb_free_space);
|
||||
|
||||
/* Pad write buffer until band is full */
|
||||
/* TODO : It would be better to request padding to as many as PUs possible */
|
||||
/* instead of requesting to one PU at a time */
|
||||
ftl_rwb_pad(dev, num_active - size);
|
||||
ftl_rwb_pad(dev, pad_length);
|
||||
}
|
||||
|
||||
static int
|
||||
@ -1314,7 +1324,7 @@ ftl_wptr_process_writes(struct ftl_wptr *wptr)
|
||||
}
|
||||
|
||||
if (dev->halt) {
|
||||
ftl_process_shutdown(dev);
|
||||
ftl_wptr_process_shutdown(wptr);
|
||||
}
|
||||
|
||||
batch = ftl_rwb_pop(dev->rwb);
|
||||
|
@ -324,6 +324,12 @@ ftl_rwb_num_batches(const struct ftl_rwb *rwb)
|
||||
return rwb->num_batches;
|
||||
}
|
||||
|
||||
size_t
|
||||
ftl_rwb_size(const struct ftl_rwb *rwb)
|
||||
{
|
||||
return rwb->num_batches * rwb->xfer_size;
|
||||
}
|
||||
|
||||
size_t
|
||||
ftl_rwb_batch_get_offset(const struct ftl_rwb_batch *batch)
|
||||
{
|
||||
|
@ -101,6 +101,7 @@ void ftl_rwb_set_limits(struct ftl_rwb *rwb, const size_t limit[FTL_RWB_TYPE_MAX
|
||||
void ftl_rwb_get_limits(struct ftl_rwb *rwb, size_t limit[FTL_RWB_TYPE_MAX]);
|
||||
size_t ftl_rwb_num_acquired(struct ftl_rwb *rwb, enum ftl_rwb_entry_type type);
|
||||
size_t ftl_rwb_num_batches(const struct ftl_rwb *rwb);
|
||||
size_t ftl_rwb_size(const struct ftl_rwb *rwb);
|
||||
struct ftl_rwb_entry *ftl_rwb_acquire(struct ftl_rwb *rwb, enum ftl_rwb_entry_type type);
|
||||
struct ftl_rwb_batch *ftl_rwb_pop(struct ftl_rwb *rwb);
|
||||
struct ftl_rwb_batch *ftl_rwb_first_batch(struct ftl_rwb *rwb);
|
||||
|
Loading…
Reference in New Issue
Block a user