lib/ftl: track number of pending write buffer entries

Track the number of acquired but not yet submitted write buffer entries
to be able to correctly calculate the required number of entries to be
padded.

Change-Id: Ie201681937ad1d03ec125aa5912311c54a7e35c9
Signed-off-by: Konrad Sztyber <konrad.sztyber@intel.com>
Reviewed-on: https://review.gerrithub.io/c/spdk/spdk/+/466934
Reviewed-by: Mateusz Kozlowski <mateusz.kozlowski@intel.com>
Reviewed-by: Wojciech Malikowski <wojciech.malikowski@intel.com>
Reviewed-by: Tomasz Zawadzki <tomasz.zawadzki@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
This commit is contained in:
Konrad Sztyber 2019-08-30 13:46:38 +02:00 committed by Ben Walker
parent cf3d42961b
commit a2714d414f
3 changed files with 22 additions and 5 deletions

View File

@ -769,8 +769,7 @@ static void
ftl_wptr_pad_band(struct ftl_wptr *wptr)
{
struct spdk_ftl_dev *dev = wptr->dev;
size_t size = ftl_rwb_num_acquired(dev->rwb, FTL_RWB_TYPE_INTERNAL) +
ftl_rwb_num_acquired(dev->rwb, FTL_RWB_TYPE_USER);
size_t size = ftl_rwb_num_pending(dev->rwb);
size_t blocks_left, rwb_size, pad_size;
blocks_left = ftl_wptr_user_lbks_left(wptr);
@ -787,8 +786,7 @@ static void
ftl_wptr_process_shutdown(struct ftl_wptr *wptr)
{
struct spdk_ftl_dev *dev = wptr->dev;
size_t size = ftl_rwb_num_acquired(dev->rwb, FTL_RWB_TYPE_INTERNAL) +
ftl_rwb_num_acquired(dev->rwb, FTL_RWB_TYPE_USER);
size_t size = ftl_rwb_num_pending(dev->rwb);
size_t num_active = dev->xfer_size * ftl_rwb_get_active_batches(dev->rwb);
num_active = num_active ? num_active : dev->xfer_size;

View File

@ -83,6 +83,8 @@ struct ftl_rwb {
/* Number of acquired entries */
unsigned int num_acquired[FTL_RWB_TYPE_MAX];
/* Number of acquired but not yet submitted entries */
unsigned int num_pending;
/* User/internal limits */
size_t limits[FTL_RWB_TYPE_MAX];
@ -302,7 +304,7 @@ ftl_rwb_batch_release(struct ftl_rwb_batch *batch)
num_acquired = __atomic_fetch_sub(&rwb->num_acquired[ftl_rwb_entry_type(entry)], 1,
__ATOMIC_SEQ_CST);
entry->band = NULL;
assert(num_acquired > 0);
assert(num_acquired > 0);
}
pthread_spin_lock(&rwb->lock);
@ -371,6 +373,14 @@ ftl_rwb_batch_revert(struct ftl_rwb_batch *batch)
if (spdk_ring_enqueue(rwb->prio_queue, (void **)&batch, 1, NULL) != 1) {
assert(0 && "Should never happen");
}
__atomic_fetch_add(&rwb->num_pending, rwb->xfer_size, __ATOMIC_SEQ_CST);
}
unsigned int
ftl_rwb_num_pending(struct ftl_rwb *rwb)
{
return __atomic_load_n(&rwb->num_pending, __ATOMIC_SEQ_CST);
}
void
@ -457,6 +467,7 @@ ftl_rwb_acquire(struct ftl_rwb *rwb, enum ftl_rwb_entry_type type)
pthread_spin_unlock(&rwb->lock);
__atomic_fetch_add(&rwb->num_acquired[type], 1, __ATOMIC_SEQ_CST);
__atomic_fetch_add(&rwb->num_pending, 1, __ATOMIC_SEQ_CST);
return entry;
error:
pthread_spin_unlock(&rwb->lock);
@ -491,12 +502,19 @@ struct ftl_rwb_batch *
ftl_rwb_pop(struct ftl_rwb *rwb)
{
struct ftl_rwb_batch *batch = NULL;
unsigned int num_pending __attribute__((unused));
if (spdk_ring_dequeue(rwb->prio_queue, (void **)&batch, 1) == 1) {
num_pending = __atomic_fetch_sub(&rwb->num_pending, rwb->xfer_size,
__ATOMIC_SEQ_CST);
assert(num_pending > 0);
return batch;
}
if (spdk_ring_dequeue(rwb->submit_queue, (void **)&batch, 1) == 1) {
num_pending = __atomic_fetch_sub(&rwb->num_pending, rwb->xfer_size,
__ATOMIC_SEQ_CST);
assert(num_pending > 0);
return batch;
}

View File

@ -118,6 +118,7 @@ struct ftl_rwb_entry *ftl_rwb_batch_first_entry(struct ftl_rwb_batch *batch);
void *ftl_rwb_batch_get_data(struct ftl_rwb_batch *batch);
void *ftl_rwb_batch_get_md(struct ftl_rwb_batch *batch);
void ftl_rwb_disable_interleaving(struct ftl_rwb *rwb);
unsigned int ftl_rwb_num_pending(struct ftl_rwb *rwb);
static inline void
_ftl_rwb_entry_set_valid(struct ftl_rwb_entry *entry, bool valid)