MFV r315290, r315291: 7303 dynamic metaslab selection

illumos/illumos-gate@8363e80ae7
https://github.com/illumos/illumos-gate/commit/8363e80ae72609660f6090766ca8c2c18

https://www.illumos.org/issues/7303

  This change introduces a new weighting algorithm to improve metaslab selection.
  The new weighting algorithm relies on the SPACEMAP_HISTOGRAM feature. As a result,
  the metaslab weight now encodes the type of weighting algorithm used
  (size-based vs segment-based).

  This also introduce a new allocation tracing facility and two new dcmds to help
  debug allocation problems. Each zio now contains a zio_alloc_list_t structure
  that is populated as the zio goes through the allocations stage. Here's an
  example of how to use the tracing facility:

> c5ec000::print zio_t io_alloc_list | ::walk list | ::metaslab_trace
  MSID    DVA    ASIZE      WEIGHT             RESULT               VDEV
     -      0      400           0    NOT_ALLOCATABLE           ztest.0a
     -      0      400           0    NOT_ALLOCATABLE           ztest.0a
     -      0      400           0             ENOSPC           ztest.0a
     -      0      200           0    NOT_ALLOCATABLE           ztest.0a
     -      0      200           0    NOT_ALLOCATABLE           ztest.0a
     -      0      200           0             ENOSPC           ztest.0a
     1      0      400      1 x 8M            17b1a00           ztest.0a

> 1ff2400::print zio_t io_alloc_list | ::walk list | ::metaslab_trace
  MSID    DVA    ASIZE      WEIGHT             RESULT               VDEV
     -      0      200           0    NOT_ALLOCATABLE           mirror-2
     -      0      200           0    NOT_ALLOCATABLE           mirror-0
     1      0      200      1 x 4M            112ae00           mirror-1
     -      1      200           0    NOT_ALLOCATABLE           mirror-2
     -      1      200           0    NOT_ALLOCATABLE           mirror-0
     1      1      200      1 x 4M            112b000           mirror-1
     -      2      200           0    NOT_ALLOCATABLE           mirror-2

  If the metaslab is using segment-based weighting then the WEIGHT column will
  display the number of segments available in the bucket where the allocation
  attempt was made.

Author: George Wilson <george.wilson@delphix.com>
Reviewed by: Alex Reece <alex@delphix.com>
Reviewed by: Chris Siden <christopher.siden@delphix.com>
Reviewed by: Dan Kimmel <dan.kimmel@delphix.com>
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed by: Paul Dagnelie <paul.dagnelie@delphix.com>
Reviewed by: Pavel Zakharov <pavel.zakharov@delphix.com>
Reviewed by: Prakash Surya <prakash.surya@delphix.com>
Reviewed by: Don Brady <don.brady@intel.com>
Approved by: Richard Lowe <richlowe@richlowe.net>
This commit is contained in:
Alexander Motin 2017-03-24 09:37:00 +00:00
commit 3aef5b286a
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=315896
14 changed files with 985 additions and 223 deletions

View File

@ -2589,10 +2589,21 @@ zdb_leak_init(spa_t *spa, zdb_cb_t *zcb)
if (!dump_opt['L']) {
vdev_t *rvd = spa->spa_root_vdev;
/*
* We are going to be changing the meaning of the metaslab's
* ms_tree. Ensure that the allocator doesn't try to
* use the tree.
*/
spa->spa_normal_class->mc_ops = &zdb_metaslab_ops;
spa->spa_log_class->mc_ops = &zdb_metaslab_ops;
for (uint64_t c = 0; c < rvd->vdev_children; c++) {
vdev_t *vd = rvd->vdev_child[c];
metaslab_group_t *mg = vd->vdev_mg;
for (uint64_t m = 0; m < vd->vdev_ms_count; m++) {
metaslab_t *msp = vd->vdev_ms[m];
ASSERT3P(msp->ms_group, ==, mg);
mutex_enter(&msp->ms_lock);
metaslab_unload(msp);
@ -2613,8 +2624,6 @@ zdb_leak_init(spa_t *spa, zdb_cb_t *zcb)
(longlong_t)m,
(longlong_t)vd->vdev_ms_count);
msp->ms_ops = &zdb_metaslab_ops;
/*
* We don't want to spend the CPU
* manipulating the size-ordered
@ -2624,7 +2633,10 @@ zdb_leak_init(spa_t *spa, zdb_cb_t *zcb)
msp->ms_tree->rt_ops = NULL;
VERIFY0(space_map_load(msp->ms_sm,
msp->ms_tree, SM_ALLOC));
msp->ms_loaded = B_TRUE;
if (!msp->ms_loaded) {
msp->ms_loaded = B_TRUE;
}
}
mutex_exit(&msp->ms_lock);
}
@ -2646,8 +2658,10 @@ zdb_leak_fini(spa_t *spa)
vdev_t *rvd = spa->spa_root_vdev;
for (int c = 0; c < rvd->vdev_children; c++) {
vdev_t *vd = rvd->vdev_child[c];
metaslab_group_t *mg = vd->vdev_mg;
for (int m = 0; m < vd->vdev_ms_count; m++) {
metaslab_t *msp = vd->vdev_ms[m];
ASSERT3P(mg, ==, msp->ms_group);
mutex_enter(&msp->ms_lock);
/*
@ -2661,7 +2675,10 @@ zdb_leak_fini(spa_t *spa)
* from the ms_tree.
*/
range_tree_vacate(msp->ms_tree, zdb_leak, vd);
msp->ms_loaded = B_FALSE;
if (msp->ms_loaded) {
msp->ms_loaded = B_FALSE;
}
mutex_exit(&msp->ms_lock);
}

View File

@ -173,7 +173,7 @@ static const ztest_shared_opts_t ztest_opts_defaults = {
.zo_mirrors = 2,
.zo_raidz = 4,
.zo_raidz_parity = 1,
.zo_vdev_size = SPA_MINDEVSIZE * 2,
.zo_vdev_size = SPA_MINDEVSIZE * 4, /* 256m default size */
.zo_datasets = 7,
.zo_threads = 23,
.zo_passtime = 60, /* 60 seconds */

View File

@ -95,6 +95,11 @@ kstat_create(char *module, int instance, char *name, char *class,
return (NULL);
}
/*ARGSUSED*/
void
kstat_named_init(kstat_named_t *knp, const char *name, uchar_t type)
{}
/*ARGSUSED*/
void
kstat_install(kstat_t *ksp)

View File

@ -129,3 +129,19 @@ kstat_delete(kstat_t *ksp)
sysctl_ctx_free(&ksp->ks_sysctl_ctx);
free(ksp, M_KSTAT);
}
void
kstat_set_string(char *dst, const char *src)
{
bzero(dst, KSTAT_STRLEN);
(void) strncpy(dst, src, KSTAT_STRLEN - 1);
}
void
kstat_named_init(kstat_named_t *knp, const char *name, uchar_t data_type)
{
kstat_set_string(knp->name, name);
knp->data_type = data_type;
}

View File

@ -69,5 +69,7 @@ kstat_t *kstat_create(char *module, int instance, char *name, char *cls,
uchar_t type, ulong_t ndata, uchar_t flags);
void kstat_install(kstat_t *ksp);
void kstat_delete(kstat_t *ksp);
void kstat_set_string(char *, const char *);
void kstat_named_init(kstat_named_t *, const char *, uchar_t);
#endif /* _OPENSOLARIS_SYS_KSTAT_H_ */

File diff suppressed because it is too large Load Diff

View File

@ -1299,6 +1299,19 @@ spa_unload(spa_t *spa)
spa->spa_sync_on = B_FALSE;
}
/*
* Even though vdev_free() also calls vdev_metaslab_fini, we need
* to call it earlier, before we wait for async i/o to complete.
* This ensures that there is no async metaslab prefetching, by
* calling taskq_wait(mg_taskq).
*/
if (spa->spa_root_vdev != NULL) {
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
for (int c = 0; c < spa->spa_root_vdev->vdev_children; c++)
vdev_metaslab_fini(spa->spa_root_vdev->vdev_child[c]);
spa_config_exit(spa, SCL_ALL, FTAG);
}
/*
* Wait for any outstanding async I/O to complete.
*/

View File

@ -2004,6 +2004,7 @@ spa_init(int mode)
refcount_sysinit();
unique_init();
range_tree_init();
metaslab_alloc_trace_init();
zio_init();
lz4_init();
dmu_init();
@ -2033,6 +2034,7 @@ spa_fini(void)
dmu_fini();
lz4_fini();
zio_fini();
metaslab_alloc_trace_fini();
range_tree_fini();
unique_fini();
refcount_fini();

View File

@ -174,7 +174,6 @@ space_map_histogram_add(space_map_t *sm, range_tree_t *rt, dmu_tx_t *tx)
dmu_buf_will_dirty(sm->sm_dbuf, tx);
ASSERT(space_map_histogram_verify(sm, rt));
/*
* Transfer the content of the range tree histogram to the space
* map histogram. The space map histogram contains 32 buckets ranging

View File

@ -20,7 +20,7 @@
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2015 by Delphix. All rights reserved.
* Copyright (c) 2011, 2016 by Delphix. All rights reserved.
*/
#ifndef _SYS_METASLAB_H
@ -36,10 +36,12 @@
extern "C" {
#endif
typedef struct metaslab_ops {
uint64_t (*msop_alloc)(metaslab_t *msp, uint64_t size);
uint64_t (*msop_alloc)(metaslab_t *, uint64_t);
} metaslab_ops_t;
extern metaslab_ops_t *zfs_metaslab_ops;
int metaslab_init(metaslab_group_t *, uint64_t, uint64_t, uint64_t,
@ -63,11 +65,16 @@ uint64_t metaslab_block_maxsize(metaslab_t *);
#define METASLAB_DONT_THROTTLE 0x10
int metaslab_alloc(spa_t *, metaslab_class_t *, uint64_t,
blkptr_t *, int, uint64_t, blkptr_t *, int, zio_t *);
blkptr_t *, int, uint64_t, blkptr_t *, int, zio_alloc_list_t *, zio_t *);
void metaslab_free(spa_t *, const blkptr_t *, uint64_t, boolean_t);
int metaslab_claim(spa_t *, const blkptr_t *, uint64_t);
void metaslab_check_free(spa_t *, const blkptr_t *);
void metaslab_alloc_trace_init(void);
void metaslab_alloc_trace_fini(void);
void metaslab_trace_init(zio_alloc_list_t *);
void metaslab_trace_fini(zio_alloc_list_t *);
metaslab_class_t *metaslab_class_create(spa_t *, metaslab_ops_t *);
void metaslab_class_destroy(metaslab_class_t *);
int metaslab_class_validate(metaslab_class_t *);

View File

@ -41,6 +41,94 @@
extern "C" {
#endif
/*
* Metaslab allocation tracing record.
*/
typedef struct metaslab_alloc_trace {
list_node_t mat_list_node;
metaslab_group_t *mat_mg;
metaslab_t *mat_msp;
uint64_t mat_size;
uint64_t mat_weight;
uint32_t mat_dva_id;
uint64_t mat_offset;
} metaslab_alloc_trace_t;
/*
* Used by the metaslab allocation tracing facility to indicate
* error conditions. These errors are stored to the offset member
* of the metaslab_alloc_trace_t record and displayed by mdb.
*/
typedef enum trace_alloc_type {
TRACE_ALLOC_FAILURE = -1ULL,
TRACE_TOO_SMALL = -2ULL,
TRACE_FORCE_GANG = -3ULL,
TRACE_NOT_ALLOCATABLE = -4ULL,
TRACE_GROUP_FAILURE = -5ULL,
TRACE_ENOSPC = -6ULL,
TRACE_CONDENSING = -7ULL,
TRACE_VDEV_ERROR = -8ULL
} trace_alloc_type_t;
#define METASLAB_WEIGHT_PRIMARY (1ULL << 63)
#define METASLAB_WEIGHT_SECONDARY (1ULL << 62)
#define METASLAB_WEIGHT_TYPE (1ULL << 61)
#define METASLAB_ACTIVE_MASK \
(METASLAB_WEIGHT_PRIMARY | METASLAB_WEIGHT_SECONDARY)
/*
* The metaslab weight is used to encode the amount of free space in a
* metaslab, such that the "best" metaslab appears first when sorting the
* metaslabs by weight. The weight (and therefore the "best" metaslab) can
* be determined in two different ways: by computing a weighted sum of all
* the free space in the metaslab (a space based weight) or by counting only
* the free segments of the largest size (a segment based weight). We prefer
* the segment based weight because it reflects how the free space is
* comprised, but we cannot always use it -- legacy pools do not have the
* space map histogram information necessary to determine the largest
* contiguous regions. Pools that have the space map histogram determine
* the segment weight by looking at each bucket in the histogram and
* determining the free space whose size in bytes is in the range:
* [2^i, 2^(i+1))
* We then encode the largest index, i, that contains regions into the
* segment-weighted value.
*
* Space-based weight:
*
* 64 56 48 40 32 24 16 8 0
* +-------+-------+-------+-------+-------+-------+-------+-------+
* |PS1| weighted-free space |
* +-------+-------+-------+-------+-------+-------+-------+-------+
*
* PS - indicates primary and secondary activation
* space - the fragmentation-weighted space
*
* Segment-based weight:
*
* 64 56 48 40 32 24 16 8 0
* +-------+-------+-------+-------+-------+-------+-------+-------+
* |PS0| idx| count of segments in region |
* +-------+-------+-------+-------+-------+-------+-------+-------+
*
* PS - indicates primary and secondary activation
* idx - index for the highest bucket in the histogram
* count - number of segments in the specified bucket
*/
#define WEIGHT_GET_ACTIVE(weight) BF64_GET((weight), 62, 2)
#define WEIGHT_SET_ACTIVE(weight, x) BF64_SET((weight), 62, 2, x)
#define WEIGHT_IS_SPACEBASED(weight) \
((weight) == 0 || BF64_GET((weight), 61, 1))
#define WEIGHT_SET_SPACEBASED(weight) BF64_SET((weight), 61, 1, 1)
/*
* These macros are only applicable to segment-based weighting.
*/
#define WEIGHT_GET_INDEX(weight) BF64_GET((weight), 55, 6)
#define WEIGHT_SET_INDEX(weight, x) BF64_SET((weight), 55, 6, x)
#define WEIGHT_GET_COUNT(weight) BF64_GET((weight), 0, 55)
#define WEIGHT_SET_COUNT(weight, x) BF64_SET((weight), 0, 55, x)
/*
* A metaslab class encompasses a category of allocatable top-level vdevs.
* Each top-level vdev is associated with a metaslab group which defines
@ -221,7 +309,6 @@ struct metaslab {
kmutex_t ms_lock;
kcondvar_t ms_load_cv;
space_map_t *ms_sm;
metaslab_ops_t *ms_ops;
uint64_t ms_id;
uint64_t ms_start;
uint64_t ms_size;
@ -234,12 +321,27 @@ struct metaslab {
boolean_t ms_condensing; /* condensing? */
boolean_t ms_condense_wanted;
/*
* We must hold both ms_lock and ms_group->mg_lock in order to
* modify ms_loaded.
*/
boolean_t ms_loaded;
boolean_t ms_loading;
int64_t ms_deferspace; /* sum of ms_defermap[] space */
uint64_t ms_weight; /* weight vs. others in group */
uint64_t ms_access_txg;
uint64_t ms_activation_weight; /* activation weight */
/*
* Track of whenever a metaslab is selected for loading or allocation.
* We use this value to determine how long the metaslab should
* stay cached.
*/
uint64_t ms_selected_txg;
uint64_t ms_alloc_txg; /* last successful alloc (debug only) */
uint64_t ms_max_size; /* maximum allocatable size */
/*
* The metaslab block allocators can optionally use a size-ordered

View File

@ -52,14 +52,15 @@ extern int zfs_flags;
extern boolean_t zfs_recover;
extern boolean_t zfs_free_leak_on_eio;
#define ZFS_DEBUG_DPRINTF (1<<0)
#define ZFS_DEBUG_DBUF_VERIFY (1<<1)
#define ZFS_DEBUG_DNODE_VERIFY (1<<2)
#define ZFS_DEBUG_SNAPNAMES (1<<3)
#define ZFS_DEBUG_MODIFY (1<<4)
#define ZFS_DEBUG_SPA (1<<5)
#define ZFS_DEBUG_ZIO_FREE (1<<6)
#define ZFS_DEBUG_HISTOGRAM_VERIFY (1<<7)
#define ZFS_DEBUG_DPRINTF (1 << 0)
#define ZFS_DEBUG_DBUF_VERIFY (1 << 1)
#define ZFS_DEBUG_DNODE_VERIFY (1 << 2)
#define ZFS_DEBUG_SNAPNAMES (1 << 3)
#define ZFS_DEBUG_MODIFY (1 << 4)
#define ZFS_DEBUG_SPA (1 << 5)
#define ZFS_DEBUG_ZIO_FREE (1 << 6)
#define ZFS_DEBUG_HISTOGRAM_VERIFY (1 << 7)
#define ZFS_DEBUG_METASLAB_VERIFY (1 << 8)
#ifdef ZFS_DEBUG
extern void __dprintf(const char *file, const char *func,

View File

@ -376,6 +376,11 @@ typedef int zio_pipe_stage_t(zio_t *zio);
#define ZIO_REEXECUTE_NOW 0x01
#define ZIO_REEXECUTE_SUSPEND 0x02
typedef struct zio_alloc_list {
list_t zal_list;
uint64_t zal_size;
} zio_alloc_list_t;
typedef struct zio_link {
zio_t *zl_parent;
zio_t *zl_child;
@ -463,6 +468,7 @@ struct zio {
avl_node_t io_queue_node;
avl_node_t io_offset_node;
avl_node_t io_alloc_node;
zio_alloc_list_t io_alloc_list;
/* Internal pipeline state */
enum zio_flag io_flags;

View File

@ -638,6 +638,7 @@ zio_create(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp,
offsetof(zio_link_t, zl_parent_node));
list_create(&zio->io_child_list, sizeof (zio_link_t),
offsetof(zio_link_t, zl_child_node));
metaslab_trace_init(&zio->io_alloc_list);
if (vd != NULL)
zio->io_child_type = ZIO_CHILD_VDEV;
@ -696,6 +697,7 @@ zio_create(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp,
static void
zio_destroy(zio_t *zio)
{
metaslab_trace_fini(&zio->io_alloc_list);
list_destroy(&zio->io_parent_list);
list_destroy(&zio->io_child_list);
mutex_destroy(&zio->io_lock);
@ -2224,7 +2226,8 @@ zio_write_gang_block(zio_t *pio)
}
error = metaslab_alloc(spa, mc, SPA_GANGBLOCKSIZE,
bp, gbh_copies, txg, pio == gio ? NULL : gio->io_bp, flags, pio);
bp, gbh_copies, txg, pio == gio ? NULL : gio->io_bp, flags,
&pio->io_alloc_list, pio);
if (error) {
if (pio->io_flags & ZIO_FLAG_IO_ALLOCATING) {
ASSERT(pio->io_priority == ZIO_PRIORITY_ASYNC_WRITE);
@ -2877,7 +2880,8 @@ zio_dva_allocate(zio_t *zio)
}
error = metaslab_alloc(spa, mc, zio->io_size, bp,
zio->io_prop.zp_copies, zio->io_txg, NULL, flags, zio);
zio->io_prop.zp_copies, zio->io_txg, NULL, flags,
&zio->io_alloc_list, zio);
if (error != 0) {
spa_dbgmsg(spa, "%s: metaslab allocation failure: zio %p, "
@ -2941,19 +2945,23 @@ zio_alloc_zil(spa_t *spa, uint64_t txg, blkptr_t *new_bp, blkptr_t *old_bp,
uint64_t size, boolean_t *slog)
{
int error = 1;
zio_alloc_list_t io_alloc_list;
ASSERT(txg > spa_syncing_txg(spa));
error = metaslab_alloc(spa, spa_log_class(spa), size,
new_bp, 1, txg, old_bp, METASLAB_HINTBP_AVOID, NULL);
metaslab_trace_init(&io_alloc_list);
error = metaslab_alloc(spa, spa_log_class(spa), size, new_bp, 1,
txg, old_bp, METASLAB_HINTBP_AVOID, &io_alloc_list, NULL);
if (error == 0) {
*slog = TRUE;
} else {
error = metaslab_alloc(spa, spa_normal_class(spa), size,
new_bp, 1, txg, old_bp, METASLAB_HINTBP_AVOID, NULL);
new_bp, 1, txg, old_bp, METASLAB_HINTBP_AVOID,
&io_alloc_list, NULL);
if (error == 0)
*slog = FALSE;
}
metaslab_trace_fini(&io_alloc_list);
if (error == 0) {
BP_SET_LSIZE(new_bp, size);