diff --git a/include/sys/vdev.h b/include/sys/vdev.h index 365789e524d6..7d64cf6bceab 100644 --- a/include/sys/vdev.h +++ b/include/sys/vdev.h @@ -121,8 +121,7 @@ extern void vdev_queue_io_done(zio_t *zio); extern void vdev_config_dirty(vdev_t *vd); extern void vdev_config_clean(vdev_t *vd); -extern int vdev_config_sync(vdev_t **svd, int svdcount, uint64_t txg, - boolean_t); +extern int vdev_config_sync(vdev_t **svd, int svdcount, uint64_t txg); extern void vdev_state_dirty(vdev_t *vd); extern void vdev_state_clean(vdev_t *vd); diff --git a/module/zfs/spa.c b/module/zfs/spa.c index 787b03d2991e..ea4eb9f9a820 100644 --- a/module/zfs/spa.c +++ b/module/zfs/spa.c @@ -6518,16 +6518,10 @@ spa_sync(spa_t *spa, uint64_t txg) if (svdcount == SPA_DVAS_PER_BP) break; } - error = vdev_config_sync(svd, svdcount, txg, B_FALSE); - if (error != 0) - error = vdev_config_sync(svd, svdcount, txg, - B_TRUE); + error = vdev_config_sync(svd, svdcount, txg); } else { error = vdev_config_sync(rvd->vdev_child, - rvd->vdev_children, txg, B_FALSE); - if (error != 0) - error = vdev_config_sync(rvd->vdev_child, - rvd->vdev_children, txg, B_TRUE); + rvd->vdev_children, txg); } if (error == 0) diff --git a/module/zfs/vdev_label.c b/module/zfs/vdev_label.c index 419cbc1a0ebe..9d4229ac89ea 100644 --- a/module/zfs/vdev_label.c +++ b/module/zfs/vdev_label.c @@ -1187,15 +1187,16 @@ vdev_label_sync_list(spa_t *spa, int l, uint64_t txg, int flags) * at any time, you can just call it again, and it will resume its work. */ int -vdev_config_sync(vdev_t **svd, int svdcount, uint64_t txg, boolean_t tryhard) +vdev_config_sync(vdev_t **svd, int svdcount, uint64_t txg) { spa_t *spa = svd[0]->vdev_spa; uberblock_t *ub = &spa->spa_uberblock; vdev_t *vd; zio_t *zio; - int error; + int error = 0; int flags = ZIO_FLAG_CONFIG_WRITER | ZIO_FLAG_CANFAIL; +retry: /* * Normally, we don't want to try too hard to write every label and * uberblock. If there is a flaky disk, we don't want the rest of the @@ -1203,8 +1204,11 @@ vdev_config_sync(vdev_t **svd, int svdcount, uint64_t txg, boolean_t tryhard) * single label out, we should retry with ZIO_FLAG_TRYHARD before * bailing out and declaring the pool faulted. */ - if (tryhard) + if (error != 0) { + if ((flags & ZIO_FLAG_TRYHARD) != 0) + return (error); flags |= ZIO_FLAG_TRYHARD; + } ASSERT(ub->ub_txg <= txg); @@ -1248,7 +1252,7 @@ vdev_config_sync(vdev_t **svd, int svdcount, uint64_t txg, boolean_t tryhard) * are committed to stable storage before the uberblock update. */ if ((error = vdev_label_sync_list(spa, 0, txg, flags)) != 0) - return (error); + goto retry; /* * Sync the uberblocks to all vdevs in svd[]. @@ -1266,7 +1270,7 @@ vdev_config_sync(vdev_t **svd, int svdcount, uint64_t txg, boolean_t tryhard) * to the new uberblocks. */ if ((error = vdev_uberblock_sync_list(svd, svdcount, ub, flags)) != 0) - return (error); + goto retry; /* * Sync out odd labels for every dirty vdev. If the system dies @@ -1278,5 +1282,8 @@ vdev_config_sync(vdev_t **svd, int svdcount, uint64_t txg, boolean_t tryhard) * to disk to ensure that all odd-label updates are committed to * stable storage before the next transaction group begins. */ - return (vdev_label_sync_list(spa, 1, txg, flags)); + if ((error = vdev_label_sync_list(spa, 1, txg, flags)) != 0) + goto retry; + + return (0); }