bdev: add ability to track bdev queue depth.

This change includes a function to enable this feature on a per-bdev
basis. The new information stored in the bdev includes the following:

measured_queue_depth: The aggregate of the outstanding oparations from
each channel associated with this bdev.
period: The period at which this bdev's measured_queue_depth is being
updated.

With this information, one could calculate the average queue depth and
the disk utilization of the device

Change-Id: Ie0623ee4796e33b125504fb0965d5ef348cbff7d
Signed-off-by: Seth Howell <seth.howell@intel.com>
Reviewed-on: https://review.gerrithub.io/418102
Chandler-Test-Pool: SPDK Automated Test System <sys_sgsw@intel.com>
Reviewed-by: Ben Walker <benjamin.walker@intel.com>
Reviewed-by: Jim Harris <james.r.harris@intel.com>
Tested-by: SPDK CI Jenkins <sys_sgci@intel.com>
This commit is contained in:
Seth Howell 2018-07-05 10:44:30 -07:00 committed by Jim Harris
parent 459e899095
commit 760b868aa8
3 changed files with 116 additions and 0 deletions

View File

@ -389,6 +389,48 @@ bool spdk_bdev_has_write_cache(const struct spdk_bdev *bdev);
*/
const struct spdk_uuid *spdk_bdev_get_uuid(const struct spdk_bdev *bdev);
/**
* Get the most recently measured queue depth from a bdev.
*
* The reported queue depth is the aggregate of outstanding I/O
* across all open channels associated with this bdev.
*
* \param bdev Block device to query.
*
* \return The most recent queue depth measurement for the bdev.
* If tracking is not enabled, the function will return UINT64_MAX
* It is also possible to receive UINT64_MAX after enabling tracking
* but before the first period has expired.
*/
uint64_t
spdk_bdev_get_qd(const struct spdk_bdev *bdev);
/**
* Get the queue depth polling period.
*
* The return value of this function is only valid if the bdev's
* queue depth tracking status is set to true.
*
* \param bdev Block device to query.
*
* \return The period at which this bdev's gueue depth is being refreshed.
*/
uint64_t
spdk_bdev_get_qd_sampling_period(const struct spdk_bdev *bdev);
/**
* Enable or disable queue depth sampling for this bdev.
*
* Enables queue depth sampling when period is greater than 0. Disables it when the period
* is equal to zero. The resulting queue depth is stored in the spdk_bdev object as
* measured_queue_depth.
*
* \param bdev Block device on which to enable queue depth tracking.
* \param period The period at which to poll this bdev's queue depth. If this is set
* to zero, polling will be disabled.
*/
void spdk_bdev_set_qd_sampling_period(struct spdk_bdev *bdev, uint64_t period);
/**
* Obtain an I/O channel for the block device opened by the specified
* descriptor. I/O channels are bound to threads, so the resulting I/O
@ -948,6 +990,7 @@ int spdk_bdev_queue_io_wait(struct spdk_bdev *bdev, struct spdk_io_channel *ch,
void spdk_bdev_get_io_stat(struct spdk_bdev *bdev, struct spdk_io_channel *ch,
struct spdk_bdev_io_stat *stat);
/**
* Return I/O statistics for this bdev. All the required information will be passed
* via the callback function.

View File

@ -306,6 +306,19 @@ struct spdk_bdev {
/** points to a reset bdev_io if one is in progress. */
struct spdk_bdev_io *reset_in_progress;
/** poller for tracking the queue_depth of a device, NULL if not tracking */
struct spdk_poller *qd_poller;
/** period at which we poll for queue depth information */
uint64_t period;
/** used to aggregate queue depth while iterating across the bdev's open channels */
uint64_t temporary_queue_depth;
/** queue depth as calculated the last time the telemetry poller checked. */
uint64_t measured_queue_depth;
} internal;
};

View File

@ -1642,6 +1642,63 @@ spdk_bdev_get_uuid(const struct spdk_bdev *bdev)
return &bdev->uuid;
}
uint64_t
spdk_bdev_get_qd(const struct spdk_bdev *bdev)
{
return bdev->internal.measured_queue_depth;
}
uint64_t
spdk_bdev_get_qd_sampling_period(const struct spdk_bdev *bdev)
{
return bdev->internal.period;
}
static void
_calculate_measured_qd_cpl(struct spdk_io_channel_iter *i, int status)
{
struct spdk_bdev *bdev = spdk_io_channel_iter_get_ctx(i);
bdev->internal.measured_queue_depth = bdev->internal.temporary_queue_depth;
}
static void
_calculate_measured_qd(struct spdk_io_channel_iter *i)
{
struct spdk_bdev *bdev = spdk_io_channel_iter_get_ctx(i);
struct spdk_io_channel *io_ch = spdk_io_channel_iter_get_channel(i);
struct spdk_bdev_channel *ch = spdk_io_channel_get_ctx(io_ch);
bdev->internal.temporary_queue_depth += ch->io_outstanding;
spdk_for_each_channel_continue(i, 0);
}
static int
spdk_bdev_calculate_measured_queue_depth(void *ctx)
{
struct spdk_bdev *bdev = ctx;
bdev->internal.temporary_queue_depth = 0;
spdk_for_each_channel(__bdev_to_io_dev(bdev), _calculate_measured_qd, bdev,
_calculate_measured_qd_cpl);
return 0;
}
void
spdk_bdev_set_qd_sampling_period(struct spdk_bdev *bdev, uint64_t period)
{
bdev->internal.period = period;
if (bdev->internal.qd_poller != NULL) {
spdk_poller_unregister(&bdev->internal.qd_poller);
bdev->internal.measured_queue_depth = UINT64_MAX;
}
if (period != 0) {
bdev->internal.qd_poller = spdk_poller_register(spdk_bdev_calculate_measured_queue_depth, bdev,
period);
}
}
int
spdk_bdev_notify_blockcnt_change(struct spdk_bdev *bdev, uint64_t size)
{
@ -2748,6 +2805,7 @@ spdk_bdev_init(struct spdk_bdev *bdev)
}
bdev->internal.status = SPDK_BDEV_STATUS_READY;
bdev->internal.measured_queue_depth = UINT64_MAX;
TAILQ_INIT(&bdev->internal.open_descs);
@ -3091,6 +3149,8 @@ spdk_bdev_close(struct spdk_bdev_desc *desc)
}
}
spdk_bdev_set_qd_sampling_period(bdev, 0);
if (bdev->internal.status == SPDK_BDEV_STATUS_REMOVING && TAILQ_EMPTY(&bdev->internal.open_descs)) {
do_unregister = true;
}