OpenZFS 7303 - dynamic metaslab selection

This change introduces a new weighting algorithm to improve
metaslab selection. The new weighting algorithm relies on the
SPACEMAP_HISTOGRAM feature. As a result, the metaslab weight
now encodes the type of weighting algorithm used (size-based
vs segment-based).

Porting Notes: The metaslab allocation tracing code is conditionally
removed on linux (dependent on mdb debugger).

Authored by: George Wilson <george.wilson@delphix.com>
Reviewed by: Alex Reece <alex@delphix.com>
Reviewed by: Chris Siden <christopher.siden@delphix.com>
Reviewed by: Dan Kimmel <dan.kimmel@delphix.com>
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed by: Paul Dagnelie <paul.dagnelie@delphix.com>
Reviewed by: Pavel Zakharov pavel.zakharov@delphix.com
Reviewed by: Prakash Surya <prakash.surya@delphix.com>
Reviewed by: Don Brady <don.brady@intel.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Ported-by: Don Brady <don.brady@intel.com>

OpenZFS-issue: https://www.illumos.org/issues/7303
OpenZFS-commit: https://github.com/openzfs/openzfs/commit/d5190931bd
Closes #5404
This commit is contained in:
Don Brady 2017-01-12 12:52:56 -07:00 committed by Brian Behlendorf
parent 5727b00e06
commit 4e21fd060a
13 changed files with 1042 additions and 226 deletions

View File

@ -2643,10 +2643,21 @@ zdb_leak_init(spa_t *spa, zdb_cb_t *zcb)
if (!dump_opt['L']) { if (!dump_opt['L']) {
vdev_t *rvd = spa->spa_root_vdev; vdev_t *rvd = spa->spa_root_vdev;
/*
* We are going to be changing the meaning of the metaslab's
* ms_tree. Ensure that the allocator doesn't try to
* use the tree.
*/
spa->spa_normal_class->mc_ops = &zdb_metaslab_ops;
spa->spa_log_class->mc_ops = &zdb_metaslab_ops;
for (c = 0; c < rvd->vdev_children; c++) { for (c = 0; c < rvd->vdev_children; c++) {
vdev_t *vd = rvd->vdev_child[c]; vdev_t *vd = rvd->vdev_child[c];
metaslab_group_t *mg = vd->vdev_mg;
for (m = 0; m < vd->vdev_ms_count; m++) { for (m = 0; m < vd->vdev_ms_count; m++) {
metaslab_t *msp = vd->vdev_ms[m]; metaslab_t *msp = vd->vdev_ms[m];
ASSERT3P(msp->ms_group, ==, mg);
mutex_enter(&msp->ms_lock); mutex_enter(&msp->ms_lock);
metaslab_unload(msp); metaslab_unload(msp);
@ -2667,8 +2678,6 @@ zdb_leak_init(spa_t *spa, zdb_cb_t *zcb)
(longlong_t)m, (longlong_t)m,
(longlong_t)vd->vdev_ms_count); (longlong_t)vd->vdev_ms_count);
msp->ms_ops = &zdb_metaslab_ops;
/* /*
* We don't want to spend the CPU * We don't want to spend the CPU
* manipulating the size-ordered * manipulating the size-ordered
@ -2678,7 +2687,9 @@ zdb_leak_init(spa_t *spa, zdb_cb_t *zcb)
msp->ms_tree->rt_ops = NULL; msp->ms_tree->rt_ops = NULL;
VERIFY0(space_map_load(msp->ms_sm, VERIFY0(space_map_load(msp->ms_sm,
msp->ms_tree, SM_ALLOC)); msp->ms_tree, SM_ALLOC));
msp->ms_loaded = B_TRUE;
if (!msp->ms_loaded)
msp->ms_loaded = B_TRUE;
} }
mutex_exit(&msp->ms_lock); mutex_exit(&msp->ms_lock);
} }
@ -2702,8 +2713,10 @@ zdb_leak_fini(spa_t *spa)
vdev_t *rvd = spa->spa_root_vdev; vdev_t *rvd = spa->spa_root_vdev;
for (c = 0; c < rvd->vdev_children; c++) { for (c = 0; c < rvd->vdev_children; c++) {
vdev_t *vd = rvd->vdev_child[c]; vdev_t *vd = rvd->vdev_child[c];
metaslab_group_t *mg = vd->vdev_mg;
for (m = 0; m < vd->vdev_ms_count; m++) { for (m = 0; m < vd->vdev_ms_count; m++) {
metaslab_t *msp = vd->vdev_ms[m]; metaslab_t *msp = vd->vdev_ms[m];
ASSERT3P(mg, ==, msp->ms_group);
mutex_enter(&msp->ms_lock); mutex_enter(&msp->ms_lock);
/* /*
@ -2717,7 +2730,9 @@ zdb_leak_fini(spa_t *spa)
* from the ms_tree. * from the ms_tree.
*/ */
range_tree_vacate(msp->ms_tree, zdb_leak, vd); range_tree_vacate(msp->ms_tree, zdb_leak, vd);
msp->ms_loaded = B_FALSE;
if (msp->ms_loaded)
msp->ms_loaded = B_FALSE;
mutex_exit(&msp->ms_lock); mutex_exit(&msp->ms_lock);
} }

View File

@ -178,7 +178,7 @@ static const ztest_shared_opts_t ztest_opts_defaults = {
.zo_mirrors = 2, .zo_mirrors = 2,
.zo_raidz = 4, .zo_raidz = 4,
.zo_raidz_parity = 1, .zo_raidz_parity = 1,
.zo_vdev_size = SPA_MINDEVSIZE * 2, .zo_vdev_size = SPA_MINDEVSIZE * 4, /* 256m default size */
.zo_datasets = 7, .zo_datasets = 7,
.zo_threads = 23, .zo_threads = 23,
.zo_passtime = 60, /* 60 seconds */ .zo_passtime = 60, /* 60 seconds */

View File

@ -20,7 +20,7 @@
*/ */
/* /*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2015 by Delphix. All rights reserved. * Copyright (c) 2011, 2016 by Delphix. All rights reserved.
*/ */
#ifndef _SYS_METASLAB_H #ifndef _SYS_METASLAB_H
@ -36,10 +36,12 @@
extern "C" { extern "C" {
#endif #endif
typedef struct metaslab_ops { typedef struct metaslab_ops {
uint64_t (*msop_alloc)(metaslab_t *msp, uint64_t size); uint64_t (*msop_alloc)(metaslab_t *, uint64_t);
} metaslab_ops_t; } metaslab_ops_t;
extern metaslab_ops_t *zfs_metaslab_ops; extern metaslab_ops_t *zfs_metaslab_ops;
int metaslab_init(metaslab_group_t *, uint64_t, uint64_t, uint64_t, int metaslab_init(metaslab_group_t *, uint64_t, uint64_t, uint64_t,
@ -64,13 +66,18 @@ uint64_t metaslab_block_maxsize(metaslab_t *);
#define METASLAB_FASTWRITE 0x20 #define METASLAB_FASTWRITE 0x20
int metaslab_alloc(spa_t *, metaslab_class_t *, uint64_t, int metaslab_alloc(spa_t *, metaslab_class_t *, uint64_t,
blkptr_t *, int, uint64_t, blkptr_t *, int, zio_t *); blkptr_t *, int, uint64_t, blkptr_t *, int, zio_alloc_list_t *, zio_t *);
void metaslab_free(spa_t *, const blkptr_t *, uint64_t, boolean_t); void metaslab_free(spa_t *, const blkptr_t *, uint64_t, boolean_t);
int metaslab_claim(spa_t *, const blkptr_t *, uint64_t); int metaslab_claim(spa_t *, const blkptr_t *, uint64_t);
void metaslab_check_free(spa_t *, const blkptr_t *); void metaslab_check_free(spa_t *, const blkptr_t *);
void metaslab_fastwrite_mark(spa_t *, const blkptr_t *); void metaslab_fastwrite_mark(spa_t *, const blkptr_t *);
void metaslab_fastwrite_unmark(spa_t *, const blkptr_t *); void metaslab_fastwrite_unmark(spa_t *, const blkptr_t *);
void metaslab_alloc_trace_init(void);
void metaslab_alloc_trace_fini(void);
void metaslab_trace_init(zio_alloc_list_t *);
void metaslab_trace_fini(zio_alloc_list_t *);
metaslab_class_t *metaslab_class_create(spa_t *, metaslab_ops_t *); metaslab_class_t *metaslab_class_create(spa_t *, metaslab_ops_t *);
void metaslab_class_destroy(metaslab_class_t *); void metaslab_class_destroy(metaslab_class_t *);
int metaslab_class_validate(metaslab_class_t *); int metaslab_class_validate(metaslab_class_t *);

View File

@ -24,7 +24,7 @@
*/ */
/* /*
* Copyright (c) 2011, 2015 by Delphix. All rights reserved. * Copyright (c) 2011, 2016 by Delphix. All rights reserved.
*/ */
#ifndef _SYS_METASLAB_IMPL_H #ifndef _SYS_METASLAB_IMPL_H
@ -41,6 +41,94 @@
extern "C" { extern "C" {
#endif #endif
/*
* Metaslab allocation tracing record.
*/
typedef struct metaslab_alloc_trace {
list_node_t mat_list_node;
metaslab_group_t *mat_mg;
metaslab_t *mat_msp;
uint64_t mat_size;
uint64_t mat_weight;
uint32_t mat_dva_id;
uint64_t mat_offset;
} metaslab_alloc_trace_t;
/*
* Used by the metaslab allocation tracing facility to indicate
* error conditions. These errors are stored to the offset member
* of the metaslab_alloc_trace_t record and displayed by mdb.
*/
typedef enum trace_alloc_type {
TRACE_ALLOC_FAILURE = -1ULL,
TRACE_TOO_SMALL = -2ULL,
TRACE_FORCE_GANG = -3ULL,
TRACE_NOT_ALLOCATABLE = -4ULL,
TRACE_GROUP_FAILURE = -5ULL,
TRACE_ENOSPC = -6ULL,
TRACE_CONDENSING = -7ULL,
TRACE_VDEV_ERROR = -8ULL
} trace_alloc_type_t;
#define METASLAB_WEIGHT_PRIMARY (1ULL << 63)
#define METASLAB_WEIGHT_SECONDARY (1ULL << 62)
#define METASLAB_WEIGHT_TYPE (1ULL << 61)
#define METASLAB_ACTIVE_MASK \
(METASLAB_WEIGHT_PRIMARY | METASLAB_WEIGHT_SECONDARY)
/*
* The metaslab weight is used to encode the amount of free space in a
* metaslab, such that the "best" metaslab appears first when sorting the
* metaslabs by weight. The weight (and therefore the "best" metaslab) can
* be determined in two different ways: by computing a weighted sum of all
* the free space in the metaslab (a space based weight) or by counting only
* the free segments of the largest size (a segment based weight). We prefer
* the segment based weight because it reflects how the free space is
* comprised, but we cannot always use it -- legacy pools do not have the
* space map histogram information necessary to determine the largest
* contiguous regions. Pools that have the space map histogram determine
* the segment weight by looking at each bucket in the histogram and
* determining the free space whose size in bytes is in the range:
* [2^i, 2^(i+1))
* We then encode the largest index, i, that contains regions into the
* segment-weighted value.
*
* Space-based weight:
*
* 64 56 48 40 32 24 16 8 0
* +-------+-------+-------+-------+-------+-------+-------+-------+
* |PS1| weighted-free space |
* +-------+-------+-------+-------+-------+-------+-------+-------+
*
* PS - indicates primary and secondary activation
* space - the fragmentation-weighted space
*
* Segment-based weight:
*
* 64 56 48 40 32 24 16 8 0
* +-------+-------+-------+-------+-------+-------+-------+-------+
* |PS0| idx| count of segments in region |
* +-------+-------+-------+-------+-------+-------+-------+-------+
*
* PS - indicates primary and secondary activation
* idx - index for the highest bucket in the histogram
* count - number of segments in the specified bucket
*/
#define WEIGHT_GET_ACTIVE(weight) BF64_GET((weight), 62, 2)
#define WEIGHT_SET_ACTIVE(weight, x) BF64_SET((weight), 62, 2, x)
#define WEIGHT_IS_SPACEBASED(weight) \
((weight) == 0 || BF64_GET((weight), 61, 1))
#define WEIGHT_SET_SPACEBASED(weight) BF64_SET((weight), 61, 1, 1)
/*
* These macros are only applicable to segment-based weighting.
*/
#define WEIGHT_GET_INDEX(weight) BF64_GET((weight), 55, 6)
#define WEIGHT_SET_INDEX(weight, x) BF64_SET((weight), 55, 6, x)
#define WEIGHT_GET_COUNT(weight) BF64_GET((weight), 0, 55)
#define WEIGHT_SET_COUNT(weight, x) BF64_SET((weight), 0, 55, x)
/* /*
* A metaslab class encompasses a category of allocatable top-level vdevs. * A metaslab class encompasses a category of allocatable top-level vdevs.
* Each top-level vdev is associated with a metaslab group which defines * Each top-level vdev is associated with a metaslab group which defines
@ -220,7 +308,6 @@ struct metaslab {
kmutex_t ms_lock; kmutex_t ms_lock;
kcondvar_t ms_load_cv; kcondvar_t ms_load_cv;
space_map_t *ms_sm; space_map_t *ms_sm;
metaslab_ops_t *ms_ops;
uint64_t ms_id; uint64_t ms_id;
uint64_t ms_start; uint64_t ms_start;
uint64_t ms_size; uint64_t ms_size;
@ -233,12 +320,27 @@ struct metaslab {
boolean_t ms_condensing; /* condensing? */ boolean_t ms_condensing; /* condensing? */
boolean_t ms_condense_wanted; boolean_t ms_condense_wanted;
/*
* We must hold both ms_lock and ms_group->mg_lock in order to
* modify ms_loaded.
*/
boolean_t ms_loaded; boolean_t ms_loaded;
boolean_t ms_loading; boolean_t ms_loading;
int64_t ms_deferspace; /* sum of ms_defermap[] space */ int64_t ms_deferspace; /* sum of ms_defermap[] space */
uint64_t ms_weight; /* weight vs. others in group */ uint64_t ms_weight; /* weight vs. others in group */
uint64_t ms_access_txg; uint64_t ms_activation_weight; /* activation weight */
/*
* Track of whenever a metaslab is selected for loading or allocation.
* We use this value to determine how long the metaslab should
* stay cached.
*/
uint64_t ms_selected_txg;
uint64_t ms_alloc_txg; /* last successful alloc (debug only) */
uint64_t ms_max_size; /* maximum allocatable size */
/* /*
* The metaslab block allocators can optionally use a size-ordered * The metaslab block allocators can optionally use a size-ordered

View File

@ -394,6 +394,7 @@ extern void cv_broadcast(kcondvar_t *cv);
*/ */
extern kstat_t *kstat_create(const char *, int, extern kstat_t *kstat_create(const char *, int,
const char *, const char *, uchar_t, ulong_t, uchar_t); const char *, const char *, uchar_t, ulong_t, uchar_t);
extern void kstat_named_init(kstat_named_t *, const char *, uchar_t);
extern void kstat_install(kstat_t *); extern void kstat_install(kstat_t *);
extern void kstat_delete(kstat_t *); extern void kstat_delete(kstat_t *);
extern void kstat_waitq_enter(kstat_io_t *); extern void kstat_waitq_enter(kstat_io_t *);

View File

@ -42,14 +42,15 @@ extern int zfs_flags;
extern int zfs_recover; extern int zfs_recover;
extern int zfs_free_leak_on_eio; extern int zfs_free_leak_on_eio;
#define ZFS_DEBUG_DPRINTF (1<<0) #define ZFS_DEBUG_DPRINTF (1 << 0)
#define ZFS_DEBUG_DBUF_VERIFY (1<<1) #define ZFS_DEBUG_DBUF_VERIFY (1 << 1)
#define ZFS_DEBUG_DNODE_VERIFY (1<<2) #define ZFS_DEBUG_DNODE_VERIFY (1 << 2)
#define ZFS_DEBUG_SNAPNAMES (1<<3) #define ZFS_DEBUG_SNAPNAMES (1 << 3)
#define ZFS_DEBUG_MODIFY (1<<4) #define ZFS_DEBUG_MODIFY (1 << 4)
#define ZFS_DEBUG_SPA (1<<5) #define ZFS_DEBUG_SPA (1 << 5)
#define ZFS_DEBUG_ZIO_FREE (1<<6) #define ZFS_DEBUG_ZIO_FREE (1 << 6)
#define ZFS_DEBUG_HISTOGRAM_VERIFY (1<<7) #define ZFS_DEBUG_HISTOGRAM_VERIFY (1 << 7)
#define ZFS_DEBUG_METASLAB_VERIFY (1 << 8)
extern void __dprintf(const char *file, const char *func, extern void __dprintf(const char *file, const char *func,
int line, const char *fmt, ...); int line, const char *fmt, ...);

View File

@ -358,6 +358,11 @@ typedef int zio_pipe_stage_t(zio_t *zio);
#define ZIO_REEXECUTE_NOW 0x01 #define ZIO_REEXECUTE_NOW 0x01
#define ZIO_REEXECUTE_SUSPEND 0x02 #define ZIO_REEXECUTE_SUSPEND 0x02
typedef struct zio_alloc_list {
list_t zal_list;
uint64_t zal_size;
} zio_alloc_list_t;
typedef struct zio_link { typedef struct zio_link {
zio_t *zl_parent; zio_t *zl_parent;
zio_t *zl_child; zio_t *zl_child;
@ -417,6 +422,7 @@ struct zio {
avl_node_t io_queue_node; avl_node_t io_queue_node;
avl_node_t io_offset_node; avl_node_t io_offset_node;
avl_node_t io_alloc_node; avl_node_t io_alloc_node;
zio_alloc_list_t io_alloc_list;
/* Internal pipeline state */ /* Internal pipeline state */
enum zio_flag io_flags; enum zio_flag io_flags;

View File

@ -179,6 +179,30 @@ relative to the pool.
Use \fB1\fR for yes (default) and \fB0\fR for no. Use \fB1\fR for yes (default) and \fB0\fR for no.
.RE .RE
.sp
.ne 2
.na
\fBzfs_metaslab_segment_weight_enabled\fR (int)
.ad
.RS 12n
Enable/disable segment-based metaslab selection.
.sp
Use \fB1\fR for yes (default) and \fB0\fR for no.
.RE
.sp
.ne 2
.na
\fBzfs_metaslab_switch_threshold\fR (int)
.ad
.RS 12n
When using segment-based metaslab selection, continue allocating
from the active metaslab until \fBlzfs_metaslab_switch_threshold\fR
worth of buckets have been exhausted.
.sp
Default value: \fB2\fR.
.RE
.sp .sp
.ne 2 .ne 2
.na .na

File diff suppressed because it is too large Load Diff

View File

@ -1313,7 +1313,7 @@ spa_config_parse(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent,
static void static void
spa_unload(spa_t *spa) spa_unload(spa_t *spa)
{ {
int i; int i, c;
ASSERT(MUTEX_HELD(&spa_namespace_lock)); ASSERT(MUTEX_HELD(&spa_namespace_lock));
@ -1330,6 +1330,19 @@ spa_unload(spa_t *spa)
spa->spa_sync_on = B_FALSE; spa->spa_sync_on = B_FALSE;
} }
/*
* Even though vdev_free() also calls vdev_metaslab_fini, we need
* to call it earlier, before we wait for async i/o to complete.
* This ensures that there is no async metaslab prefetching, by
* calling taskq_wait(mg_taskq).
*/
if (spa->spa_root_vdev != NULL) {
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
for (c = 0; c < spa->spa_root_vdev->vdev_children; c++)
vdev_metaslab_fini(spa->spa_root_vdev->vdev_child[c]);
spa_config_exit(spa, SCL_ALL, FTAG);
}
/* /*
* Wait for any outstanding async I/O to complete. * Wait for any outstanding async I/O to complete.
*/ */

View File

@ -1833,6 +1833,7 @@ spa_init(int mode)
refcount_init(); refcount_init();
unique_init(); unique_init();
range_tree_init(); range_tree_init();
metaslab_alloc_trace_init();
ddt_init(); ddt_init();
zio_init(); zio_init();
dmu_init(); dmu_init();
@ -1861,6 +1862,7 @@ spa_fini(void)
dmu_fini(); dmu_fini();
zio_fini(); zio_fini();
ddt_fini(); ddt_fini();
metaslab_alloc_trace_fini();
range_tree_fini(); range_tree_fini();
unique_fini(); unique_fini();
refcount_fini(); refcount_fini();

View File

@ -173,7 +173,6 @@ space_map_histogram_add(space_map_t *sm, range_tree_t *rt, dmu_tx_t *tx)
dmu_buf_will_dirty(sm->sm_dbuf, tx); dmu_buf_will_dirty(sm->sm_dbuf, tx);
ASSERT(space_map_histogram_verify(sm, rt)); ASSERT(space_map_histogram_verify(sm, rt));
/* /*
* Transfer the content of the range tree histogram to the space * Transfer the content of the range tree histogram to the space
* map histogram. The space map histogram contains 32 buckets ranging * map histogram. The space map histogram contains 32 buckets ranging

View File

@ -596,6 +596,7 @@ zio_create(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp,
offsetof(zio_link_t, zl_parent_node)); offsetof(zio_link_t, zl_parent_node));
list_create(&zio->io_child_list, sizeof (zio_link_t), list_create(&zio->io_child_list, sizeof (zio_link_t),
offsetof(zio_link_t, zl_child_node)); offsetof(zio_link_t, zl_child_node));
metaslab_trace_init(&zio->io_alloc_list);
if (vd != NULL) if (vd != NULL)
zio->io_child_type = ZIO_CHILD_VDEV; zio->io_child_type = ZIO_CHILD_VDEV;
@ -657,6 +658,7 @@ zio_create(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp,
static void static void
zio_destroy(zio_t *zio) zio_destroy(zio_t *zio)
{ {
metaslab_trace_fini(&zio->io_alloc_list);
list_destroy(&zio->io_parent_list); list_destroy(&zio->io_parent_list);
list_destroy(&zio->io_child_list); list_destroy(&zio->io_child_list);
mutex_destroy(&zio->io_lock); mutex_destroy(&zio->io_lock);
@ -2299,7 +2301,8 @@ zio_write_gang_block(zio_t *pio)
} }
error = metaslab_alloc(spa, mc, SPA_GANGBLOCKSIZE, error = metaslab_alloc(spa, mc, SPA_GANGBLOCKSIZE,
bp, gbh_copies, txg, pio == gio ? NULL : gio->io_bp, flags, pio); bp, gbh_copies, txg, pio == gio ? NULL : gio->io_bp, flags,
&pio->io_alloc_list, pio);
if (error) { if (error) {
if (pio->io_flags & ZIO_FLAG_IO_ALLOCATING) { if (pio->io_flags & ZIO_FLAG_IO_ALLOCATING) {
ASSERT(pio->io_priority == ZIO_PRIORITY_ASYNC_WRITE); ASSERT(pio->io_priority == ZIO_PRIORITY_ASYNC_WRITE);
@ -3011,7 +3014,8 @@ zio_dva_allocate(zio_t *zio)
flags |= METASLAB_ASYNC_ALLOC; flags |= METASLAB_ASYNC_ALLOC;
error = metaslab_alloc(spa, mc, zio->io_size, bp, error = metaslab_alloc(spa, mc, zio->io_size, bp,
zio->io_prop.zp_copies, zio->io_txg, NULL, flags, zio); zio->io_prop.zp_copies, zio->io_txg, NULL, flags,
&zio->io_alloc_list, zio);
if (error != 0) { if (error != 0) {
spa_dbgmsg(spa, "%s: metaslab allocation failure: zio %p, " spa_dbgmsg(spa, "%s: metaslab allocation failure: zio %p, "
@ -3077,18 +3081,24 @@ zio_alloc_zil(spa_t *spa, uint64_t txg, blkptr_t *new_bp, uint64_t size,
boolean_t use_slog) boolean_t use_slog)
{ {
int error = 1; int error = 1;
zio_alloc_list_t io_alloc_list;
ASSERT(txg > spa_syncing_txg(spa)); ASSERT(txg > spa_syncing_txg(spa));
metaslab_trace_init(&io_alloc_list);
if (use_slog) { if (use_slog) {
error = metaslab_alloc(spa, spa_log_class(spa), size, error = metaslab_alloc(spa, spa_log_class(spa), size,
new_bp, 1, txg, NULL, METASLAB_FASTWRITE, NULL); new_bp, 1, txg, NULL, METASLAB_FASTWRITE,
&io_alloc_list, NULL);
} }
if (error) { if (error) {
error = metaslab_alloc(spa, spa_normal_class(spa), size, error = metaslab_alloc(spa, spa_normal_class(spa), size,
new_bp, 1, txg, NULL, METASLAB_FASTWRITE, NULL); new_bp, 1, txg, NULL, METASLAB_FASTWRITE,
&io_alloc_list, NULL);
} }
metaslab_trace_fini(&io_alloc_list);
if (error == 0) { if (error == 0) {
BP_SET_LSIZE(new_bp, size); BP_SET_LSIZE(new_bp, size);