diff --git a/cmd/zdb/zdb.c b/cmd/zdb/zdb.c index a60f0f255bff..4b07cdb8e0cb 100644 --- a/cmd/zdb/zdb.c +++ b/cmd/zdb/zdb.c @@ -2555,9 +2555,22 @@ dump_uberblock(uberblock_t *ub, const char *header, const char *footer) (void) printf("\tmmp_magic = %016llx\n", (u_longlong_t)ub->ub_mmp_magic); - if (ub->ub_mmp_magic == MMP_MAGIC) + if (MMP_VALID(ub)) { (void) printf("\tmmp_delay = %0llu\n", (u_longlong_t)ub->ub_mmp_delay); + if (MMP_SEQ_VALID(ub)) + (void) printf("\tmmp_seq = %u\n", + (unsigned int) MMP_SEQ(ub)); + if (MMP_FAIL_INT_VALID(ub)) + (void) printf("\tmmp_fail = %u\n", + (unsigned int) MMP_FAIL_INT(ub)); + if (MMP_INTERVAL_VALID(ub)) + (void) printf("\tmmp_write = %u\n", + (unsigned int) MMP_INTERVAL(ub)); + /* After MMP_* to make summarize_uberblock_mmp cleaner */ + (void) printf("\tmmp_valid = %x\n", + (unsigned int) ub->ub_mmp_config & 0xFF); + } if (dump_opt['u'] >= 4) { char blkbuf[BP_SPRINTF_LEN]; diff --git a/include/sys/fs/zfs.h b/include/sys/fs/zfs.h index d25e8b5a5bb5..e49a58f43a28 100644 --- a/include/sys/fs/zfs.h +++ b/include/sys/fs/zfs.h @@ -718,6 +718,7 @@ typedef struct zpool_load_policy { #define ZPOOL_CONFIG_CACHEFILE "cachefile" /* not stored on disk */ #define ZPOOL_CONFIG_MMP_STATE "mmp_state" /* not stored on disk */ #define ZPOOL_CONFIG_MMP_TXG "mmp_txg" /* not stored on disk */ +#define ZPOOL_CONFIG_MMP_SEQ "mmp_seq" /* not stored on disk */ #define ZPOOL_CONFIG_MMP_HOSTNAME "mmp_hostname" /* not stored on disk */ #define ZPOOL_CONFIG_MMP_HOSTID "mmp_hostid" /* not stored on disk */ #define ZPOOL_CONFIG_ALLOCATION_BIAS "alloc_bias" /* not stored on disk */ diff --git a/include/sys/mmp.h b/include/sys/mmp.h index 6f4672a387f4..527e3323b4b9 100644 --- a/include/sys/mmp.h +++ b/include/sys/mmp.h @@ -31,6 +31,11 @@ extern "C" { #define MMP_DEFAULT_INTERVAL 1000 /* ms */ #define MMP_DEFAULT_IMPORT_INTERVALS 20 #define MMP_DEFAULT_FAIL_INTERVALS 10 +#define MMP_MIN_FAIL_INTERVALS 2 /* min if != 0 */ +#define MMP_IMPORT_SAFETY_FACTOR 200 /* pct */ +#define MMP_INTERVAL_OK(interval) MAX(interval, MMP_MIN_INTERVAL) +#define MMP_FAIL_INTVS_OK(fails) (fails == 0 ? 0 : MAX(fails, \ + MMP_MIN_FAIL_INTERVALS)) typedef struct mmp_thread { kmutex_t mmp_thread_lock; /* protect thread mgmt fields */ @@ -46,6 +51,7 @@ typedef struct mmp_thread { int mmp_skip_error; /* reason for last skipped write */ vdev_t *mmp_last_leaf; /* last mmp write sent here */ uint64_t mmp_leaf_last_gen; /* last mmp write sent here */ + uint32_t mmp_seq; /* intra-second update counter */ } mmp_thread_t; diff --git a/include/sys/uberblock_impl.h b/include/sys/uberblock_impl.h index 113df7c61d7d..91699e65131a 100644 --- a/include/sys/uberblock_impl.h +++ b/include/sys/uberblock_impl.h @@ -44,7 +44,36 @@ extern "C" { */ #define UBERBLOCK_MAGIC 0x00bab10c /* oo-ba-bloc! */ #define UBERBLOCK_SHIFT 10 /* up to 1K */ -#define MMP_MAGIC 0xa11cea11 /* all-see-all */ +#define MMP_MAGIC 0xa11cea11 /* all-see-all */ + +#define MMP_INTERVAL_VALID_BIT 0x01 +#define MMP_SEQ_VALID_BIT 0x02 +#define MMP_FAIL_INT_VALID_BIT 0x04 + +#define MMP_VALID(ubp) (ubp->ub_magic == UBERBLOCK_MAGIC && \ + ubp->ub_mmp_magic == MMP_MAGIC) +#define MMP_INTERVAL_VALID(ubp) (MMP_VALID(ubp) && (ubp->ub_mmp_config & \ + MMP_INTERVAL_VALID_BIT)) +#define MMP_SEQ_VALID(ubp) (MMP_VALID(ubp) && (ubp->ub_mmp_config & \ + MMP_SEQ_VALID_BIT)) +#define MMP_FAIL_INT_VALID(ubp) (MMP_VALID(ubp) && (ubp->ub_mmp_config & \ + MMP_FAIL_INT_VALID_BIT)) + +#define MMP_INTERVAL(ubp) ((ubp->ub_mmp_config & 0x00000000FFFFFF00) \ + >> 8) +#define MMP_SEQ(ubp) ((ubp->ub_mmp_config & 0x0000FFFF00000000) \ + >> 32) +#define MMP_FAIL_INT(ubp) ((ubp->ub_mmp_config & 0xFFFF000000000000) \ + >> 48) + +#define MMP_INTERVAL_SET(write) \ + (((uint64_t)(write & 0xFFFFFF) << 8) | MMP_INTERVAL_VALID_BIT) + +#define MMP_SEQ_SET(seq) \ + (((uint64_t)(seq & 0xFFFF) << 32) | MMP_SEQ_VALID_BIT) + +#define MMP_FAIL_INT_SET(fail) \ + (((uint64_t)(fail & 0xFFFF) << 48) | MMP_FAIL_INT_VALID_BIT) struct uberblock { uint64_t ub_magic; /* UBERBLOCK_MAGIC */ @@ -59,8 +88,31 @@ struct uberblock { /* Maybe missing in uberblocks we read, but always written */ uint64_t ub_mmp_magic; /* MMP_MAGIC */ - uint64_t ub_mmp_delay; /* nanosec since last MMP write */ - uint64_t ub_mmp_seq; /* reserved for sequence number */ + /* + * If ub_mmp_delay == 0 and ub_mmp_magic is valid, MMP is off. + * Otherwise, nanosec since last MMP write. + */ + uint64_t ub_mmp_delay; + + /* + * The ub_mmp_config contains the multihost write interval, multihost + * fail intervals, sequence number for sub-second granularity, and + * valid bit mask. This layout is as follows: + * + * 64 56 48 40 32 24 16 8 0 + * +-------+-------+-------+-------+-------+-------+-------+-------+ + * 0 | Fail Intervals| Seq | Write Interval (ms) | VALID | + * +-------+-------+-------+-------+-------+-------+-------+-------+ + * + * This allows a write_interval of (2^24/1000)s, over 4.5 hours + * + * VALID Bits: + * - 0x01 - Write Interval (ms) + * - 0x02 - Sequence number exists + * - 0x04 - Fail Intervals + * - 0xf8 - Reserved + */ + uint64_t ub_mmp_config; /* * ub_checkpoint_txg indicates two things about the current uberblock: diff --git a/man/man5/zfs-module-parameters.5 b/man/man5/zfs-module-parameters.5 index f75f09917445..c1994f340122 100644 --- a/man/man5/zfs-module-parameters.5 +++ b/man/man5/zfs-module-parameters.5 @@ -1849,20 +1849,14 @@ Default value: \fB0\fR. .ad .RS 12n Used to control the frequency of multihost writes which are performed when the -\fBmultihost\fR pool property is on. This is one factor used to determine -the length of the activity check during import. +\fBmultihost\fR pool property is on. This is one factor used to determine the +length of the activity check during import. .sp -The multihost write period is \fBzfs_multihost_interval / leaf-vdevs\fR milliseconds. -This means that on average a multihost write will be issued for each leaf vdev every -\fBzfs_multihost_interval\fR milliseconds. In practice, the observed period can -vary with the I/O load and this observed value is the delay which is stored in -the uberblock. -.sp -On import the activity check waits a minimum amount of time determined by -\fBzfs_multihost_interval * zfs_multihost_import_intervals\fR. The activity -check time may be further extended if the value of mmp delay found in the best -uberblock indicates actual multihost updates happened at longer intervals than -\fBzfs_multihost_interval\fR. A minimum value of \fB100ms\fR is enforced. +The multihost write period is \fBzfs_multihost_interval / leaf-vdevs\fR +milliseconds. On average a multihost write will be issued for each leaf vdev +every \fBzfs_multihost_interval\fR milliseconds. In practice, the observed +period can vary with the I/O load and this observed value is the delay which is +stored in the uberblock. .sp Default value: \fB1000\fR. .RE @@ -1876,8 +1870,17 @@ Default value: \fB1000\fR. Used to control the duration of the activity test on import. Smaller values of \fBzfs_multihost_import_intervals\fR will reduce the import time but increase the risk of failing to detect an active pool. The total activity check time is -never allowed to drop below one second. A value of 0 is ignored and treated as -if it was set to 1 +never allowed to drop below one second. +.sp +On import the activity check waits a minimum amount of time determined by +\fBzfs_multihost_interval * zfs_multihost_import_intervals\fR, or the same +product computed on the host which last had the pool imported (whichever is +greater). The activity check time may be further extended if the value of mmp +delay found in the best uberblock indicates actual multihost updates happened +at longer intervals than \fBzfs_multihost_interval\fR. A minimum value of +\fB100ms\fR is enforced. +.sp +A value of 0 is ignored and treated as if it was set to 1. .sp Default value: \fB20\fR. .RE @@ -1888,17 +1891,22 @@ Default value: \fB20\fR. \fBzfs_multihost_fail_intervals\fR (uint) .ad .RS 12n -Controls the behavior of the pool when multihost write failures are detected. +Controls the behavior of the pool when multihost write failures or delays are +detected. .sp -When \fBzfs_multihost_fail_intervals = 0\fR then multihost write failures are ignored. -The failures will still be reported to the ZED which depending on its -configuration may take action such as suspending the pool or offlining a device. +When \fBzfs_multihost_fail_intervals = 0\fR, multihost write failures or delays +are ignored. The failures will still be reported to the ZED which depending on +its configuration may take action such as suspending the pool or offlining a +device. + .sp -When \fBzfs_multihost_fail_intervals > 0\fR then sequential multihost write failures -will cause the pool to be suspended. This occurs when -\fBzfs_multihost_fail_intervals * zfs_multihost_interval\fR milliseconds have -passed since the last successful multihost write. This guarantees the activity test -will see multihost writes if the pool is imported. +When \fBzfs_multihost_fail_intervals > 0\fR, the pool will be suspended if +\fBzfs_multihost_fail_intervals * zfs_multihost_interval\fR milliseconds pass +without a successful mmp write. This guarantees the activity test will see +mmp writes if the pool is imported. A value of 1 is ignored and treated as +if it was set to 2. This is necessary to prevent the pool from being suspended +due to normal, small I/O latency variations. + .sp Default value: \fB10\fR. .RE diff --git a/module/zfs/mmp.c b/module/zfs/mmp.c index 16975dd98eaa..cd5603a1a5cd 100644 --- a/module/zfs/mmp.c +++ b/module/zfs/mmp.c @@ -49,40 +49,101 @@ * Uberblocks written by the txg_sync thread always go into the first * (N-MMP_BLOCKS_PER_LABEL) slots, the remaining slots are reserved for MMP. * They are used to hold uberblocks which are exactly the same as the last - * synced uberblock except that the ub_timestamp is frequently updated. - * Like all other uberblocks, the slot is written with an embedded checksum, - * and slots with invalid checksums are ignored. This provides the + * synced uberblock except that the ub_timestamp and mmp_config are frequently + * updated. Like all other uberblocks, the slot is written with an embedded + * checksum, and slots with invalid checksums are ignored. This provides the * "heartbeat", with no risk of overwriting good uberblocks that must be * preserved, e.g. previous txgs and associated block pointers. * - * Two optional fields are added to uberblock structure: ub_mmp_magic and - * ub_mmp_delay. The magic field allows zfs to tell whether ub_mmp_delay is - * valid. The delay field is a decaying average of the amount of time between - * completion of successive MMP writes, in nanoseconds. It is used to predict - * how long the import must wait to detect activity in the pool, before - * concluding it is not in use. + * Three optional fields are added to uberblock structure; ub_mmp_magic, + * ub_mmp_config, and ub_mmp_delay. The ub_mmp_magic value allows zfs to tell + * whether the other ub_mmp_* fields are valid. The ub_mmp_config field tells + * the importing host the settings of zfs_multihost_interval and + * zfs_multihost_fail_intervals on the host which last had (or currently has) + * the pool imported. These determine how long a host must wait to detect + * activity in the pool, before concluding the pool is not in use. The + * mmp_delay field is a decaying average of the amount of time between + * completion of successive MMP writes, in nanoseconds. It indicates whether + * MMP is enabled. * * During import an activity test may now be performed to determine if * the pool is in use. The activity test is typically required if the * ZPOOL_CONFIG_HOSTID does not match the system hostid, the pool state is * POOL_STATE_ACTIVE, and the pool is not a root pool. * - * The activity test finds the "best" uberblock (highest txg & timestamp), - * waits some time, and then finds the "best" uberblock again. If the txg - * and timestamp in both "best" uberblocks do not match, the pool is in use - * by another host and the import fails. Since the granularity of the - * timestamp is in seconds this activity test must take a bare minimum of one - * second. In order to assure the accuracy of the activity test, the default - * values result in an activity test duration of 10x the mmp write interval. + * The activity test finds the "best" uberblock (highest txg, timestamp, and, if + * ub_mmp_magic is valid, sequence number from ub_mmp_config). It then waits + * some time, and finds the "best" uberblock again. If any of the mentioned + * fields have different values in the newly read uberblock, the pool is in use + * by another host and the import fails. In order to assure the accuracy of the + * activity test, the default values result in an activity test duration of 20x + * the mmp write interval. * - * The "zpool import" activity test can be expected to take a minimum time of - * zfs_multihost_import_intervals * zfs_multihost_interval milliseconds. If the - * "best" uberblock has a valid ub_mmp_delay field, then the duration of the - * test may take longer if MMP writes were occurring less frequently than - * expected. Additionally, the duration is then extended by a random 25% to - * attempt to to detect simultaneous imports. For example, if both partner - * hosts are rebooted at the same time and automatically attempt to import the - * pool. + * The duration of the "zpool import" activity test depends on the information + * available in the "best" uberblock: + * + * 1) If uberblock was written by zfs-0.8 or newer and fail_intervals > 0: + * ub_mmp_config.fail_intervals * ub_mmp_config.multihost_interval * 2 + * + * In this case, a weak guarantee is provided. Since the host which last had + * the pool imported will suspend the pool if no mmp writes land within + * fail_intervals * multihost_interval ms, the absense of writes during that + * time means either the pool is not imported, or it is imported but the pool + * is suspended and no further writes will occur. + * + * Note that resuming the suspended pool on the remote host would invalidate + * this gurantee, and so it is not allowed. + * + * The factor of 2 provides a conservative safety factor and derives from + * MMP_IMPORT_SAFETY_FACTOR; + * + * 2) If uberblock was written by zfs-0.8 or newer and fail_intervals == 0: + * (ub_mmp_config.multihost_interval + ub_mmp_delay) * + * zfs_multihost_import_intervals + * + * In this case no guarantee can provided. However, as long as some devices + * are healthy and connected, it is likely that at least one write will land + * within (multihost_interval + mmp_delay) because multihost_interval is + * enough time for a write to be attempted to each leaf vdev, and mmp_delay + * is enough for one to land, based on past delays. Multiplying by + * zfs_multihost_import_intervals provides a conservative safety factor. + * + * 3) If uberblock was written by zfs-0.7: + * (zfs_multihost_interval + ub_mmp_delay) * zfs_multihost_import_intervals + * + * The same logic as case #2 applies, but we do not know remote tunables. + * + * We use the local value for zfs_multihost_interval because the original MMP + * did not record this value in the uberblock. + * + * ub_mmp_delay >= (zfs_multihost_interval / leaves), so if the other host + * has a much larger zfs_multihost_interval set, ub_mmp_delay will reflect + * that. We will have waited enough time for zfs_multihost_import_intervals + * writes to be issued and all but one to land. + * + * single device pool example delays + * + * import_delay = (1 + 1) * 20 = 40s #defaults, no I/O delay + * import_delay = (1 + 10) * 20 = 220s #defaults, 10s I/O delay + * import_delay = (10 + 10) * 20 = 400s #10s multihost_interval, + * no I/O delay + * 100 device pool example delays + * + * import_delay = (1 + .01) * 20 = 20s #defaults, no I/O delay + * import_delay = (1 + 10) * 20 = 220s #defaults, 10s I/O delay + * import_delay = (10 + .1) * 20 = 202s #10s multihost_interval, + * no I/O delay + * + * 4) Otherwise, this uberblock was written by a pre-MMP zfs: + * zfs_multihost_import_intervals * zfs_multihost_interval + * + * In this case local tunables are used. By default this product = 10s, long + * enough for a pool with any activity at all to write at least one + * uberblock. No guarantee can be provided. + * + * Additionally, the duration is then extended by a random 25% to attempt to to + * detect simultaneous imports. For example, if both partner hosts are rebooted + * at the same time and automatically attempt to import the pool. */ /* @@ -90,10 +151,9 @@ * 'multihost' pool property is on. This is one factor used to determine the * length of the activity check during import. * - * The mmp write period is zfs_multihost_interval / leaf-vdevs milliseconds. - * This means that on average an mmp write will be issued for each leaf vdev - * every zfs_multihost_interval milliseconds. In practice, the observed period - * can vary with the I/O load and this observed value is the delay which is + * On average an mmp write will be issued for each leaf vdev every + * zfs_multihost_interval milliseconds. In practice, the observed period can + * vary with the I/O load and this observed value is the ub_mmp_delay which is * stored in the uberblock. The minimum allowed value is 100 ms. */ ulong_t zfs_multihost_interval = MMP_DEFAULT_INTERVAL; @@ -108,19 +168,21 @@ ulong_t zfs_multihost_interval = MMP_DEFAULT_INTERVAL; uint_t zfs_multihost_import_intervals = MMP_DEFAULT_IMPORT_INTERVALS; /* - * Controls the behavior of the pool when mmp write failures are detected. + * Controls the behavior of the pool when mmp write failures or delays are + * detected. * - * When zfs_multihost_fail_intervals = 0 then mmp write failures are ignored. - * The failures will still be reported to the ZED which depending on its - * configuration may take action such as suspending the pool or taking a + * When zfs_multihost_fail_intervals = 0, mmp write failures or delays are + * ignored. The failures will still be reported to the ZED which depending on + * its configuration may take action such as suspending the pool or taking a * device offline. * - * When zfs_multihost_fail_intervals > 0 then sequential mmp write failures will - * cause the pool to be suspended. This occurs when - * zfs_multihost_fail_intervals * zfs_multihost_interval milliseconds have - * passed since the last successful mmp write. This guarantees the activity - * test will see mmp writes if the - * pool is imported. + * When zfs_multihost_fail_intervals > 0, the pool will be suspended if + * zfs_multihost_fail_intervals * zfs_multihost_interval milliseconds pass + * without a successful mmp write. This guarantees the activity test will see + * mmp writes if the pool is imported. A value of 1 is ignored and treated as + * if it was set to 2, because a single leaf vdev pool will issue a write once + * per multihost_interval and thus any variation in latency would cause the + * pool to be suspended. */ uint_t zfs_multihost_fail_intervals = MMP_DEFAULT_FAIL_INTERVALS; @@ -136,6 +198,14 @@ mmp_init(spa_t *spa) cv_init(&mmp->mmp_thread_cv, NULL, CV_DEFAULT, NULL); mutex_init(&mmp->mmp_io_lock, NULL, MUTEX_DEFAULT, NULL); mmp->mmp_kstat_id = 1; + + /* + * mmp_write_done() calculates mmp_delay based on prior mmp_delay and + * the elapsed time since the last write. For the first mmp write, + * there is no "last write", so we start with fake non-zero values. + */ + mmp->mmp_last_write = gethrtime(); + mmp->mmp_delay = MSEC2NSEC(MMP_INTERVAL_OK(zfs_multihost_interval)); } void @@ -173,10 +243,10 @@ mmp_thread_start(spa_t *spa) if (spa_writeable(spa)) { mutex_enter(&mmp->mmp_thread_lock); if (!mmp->mmp_thread) { - dprintf("mmp_thread_start pool %s\n", - spa->spa_name); mmp->mmp_thread = thread_create(NULL, 0, mmp_thread, spa, 0, &p0, TS_RUN, defclsyspri); + zfs_dbgmsg("MMP thread started pool '%s' " + "gethrtime %llu", spa_name(spa), gethrtime()); } mutex_exit(&mmp->mmp_thread_lock); } @@ -195,6 +265,8 @@ mmp_thread_stop(spa_t *spa) cv_wait(&mmp->mmp_thread_cv, &mmp->mmp_thread_lock); } mutex_exit(&mmp->mmp_thread_lock); + zfs_dbgmsg("MMP thread stopped pool '%s' gethrtime %llu", + spa_name(spa), gethrtime()); ASSERT(mmp->mmp_thread == NULL); mmp->mmp_thread_exiting = 0; @@ -304,7 +376,8 @@ mmp_delay_update(spa_t *spa, boolean_t write_completed) * strictly less than, in case delay was changed above. */ if (delay < mts->mmp_delay) { - hrtime_t min_delay = MSEC2NSEC(zfs_multihost_interval) / + hrtime_t min_delay = + MSEC2NSEC(MMP_INTERVAL_OK(zfs_multihost_interval)) / MAX(1, vdev_count_leaves(spa)); mts->mmp_delay = MAX(((delay + mts->mmp_delay * 127) / 128), min_delay); @@ -348,6 +421,7 @@ mmp_update_uberblock(spa_t *spa, uberblock_t *ub) mutex_enter(&mmp->mmp_io_lock); mmp->mmp_ub = *ub; + mmp->mmp_seq = 1; mmp->mmp_ub.ub_timestamp = gethrestime_sec(); mmp_delay_update(spa, B_TRUE); mutex_exit(&mmp->mmp_io_lock); @@ -372,8 +446,9 @@ mmp_write_uberblock(spa_t *spa) spa_config_enter(spa, SCL_STATE, mmp_tag, RW_READER); lock_acquire_time = gethrtime() - lock_acquire_time; if (lock_acquire_time > (MSEC2NSEC(MMP_MIN_INTERVAL) / 10)) - zfs_dbgmsg("SCL_STATE acquisition took %llu ns\n", - (u_longlong_t)lock_acquire_time); + zfs_dbgmsg("MMP SCL_STATE acquisition pool '%s' took %llu ns " + "gethrtime %llu", spa_name(spa), lock_acquire_time, + gethrtime()); mutex_enter(&mmp->mmp_io_lock); @@ -396,6 +471,9 @@ mmp_write_uberblock(spa_t *spa) spa_mmp_history_add(spa, mmp->mmp_ub.ub_txg, gethrestime_sec(), mmp->mmp_delay, NULL, 0, mmp->mmp_kstat_id++, error); + zfs_dbgmsg("MMP error choosing leaf pool '%s' " + "gethrtime %llu fail_mask %#x", spa_name(spa), + gethrtime(), error); } mutex_exit(&mmp->mmp_io_lock); spa_config_exit(spa, SCL_STATE, mmp_tag); @@ -403,16 +481,34 @@ mmp_write_uberblock(spa_t *spa) } vd = spa->spa_mmp.mmp_last_leaf; - mmp->mmp_skip_error = 0; + if (mmp->mmp_skip_error != 0) { + mmp->mmp_skip_error = 0; + zfs_dbgmsg("MMP write after skipping due to unavailable " + "leaves, pool '%s' gethrtime %llu leaf %#llu", + spa_name(spa), gethrtime(), vd->vdev_guid); + } if (mmp->mmp_zio_root == NULL) mmp->mmp_zio_root = zio_root(spa, NULL, NULL, flags | ZIO_FLAG_GODFATHER); + if (mmp->mmp_ub.ub_timestamp != gethrestime_sec()) { + /* + * Want to reset mmp_seq when timestamp advances because after + * an mmp_seq wrap new values will not be chosen by + * uberblock_compare() as the "best". + */ + mmp->mmp_ub.ub_timestamp = gethrestime_sec(); + mmp->mmp_seq = 1; + } + ub = &mmp->mmp_ub; - ub->ub_timestamp = gethrestime_sec(); ub->ub_mmp_magic = MMP_MAGIC; ub->ub_mmp_delay = mmp->mmp_delay; + ub->ub_mmp_config = MMP_SEQ_SET(mmp->mmp_seq) | + MMP_INTERVAL_SET(MMP_INTERVAL_OK(zfs_multihost_interval)) | + MMP_FAIL_INT_SET(MMP_FAIL_INTVS_OK( + zfs_multihost_fail_intervals)); vd->vdev_mmp_pending = gethrtime(); vd->vdev_mmp_kstat_id = mmp->mmp_kstat_id; @@ -421,6 +517,7 @@ mmp_write_uberblock(spa_t *spa) abd_zero(ub_abd, VDEV_UBERBLOCK_SIZE(vd)); abd_copy_from_buf(ub_abd, ub, sizeof (uberblock_t)); + mmp->mmp_seq++; mmp->mmp_kstat_id++; mutex_exit(&mmp->mmp_io_lock); @@ -443,38 +540,75 @@ mmp_thread(void *arg) { spa_t *spa = (spa_t *)arg; mmp_thread_t *mmp = &spa->spa_mmp; - boolean_t last_spa_suspended = spa_suspended(spa); - boolean_t last_spa_multihost = spa_multihost(spa); + boolean_t suspended = spa_suspended(spa); + boolean_t multihost = spa_multihost(spa); + uint64_t mmp_interval = MSEC2NSEC(MMP_INTERVAL_OK( + zfs_multihost_interval)); + uint32_t mmp_fail_intervals = MMP_FAIL_INTVS_OK( + zfs_multihost_fail_intervals); + hrtime_t mmp_fail_ns = mmp_fail_intervals * mmp_interval; + boolean_t last_spa_suspended = suspended; + boolean_t last_spa_multihost = multihost; + uint64_t last_mmp_interval = mmp_interval; + uint32_t last_mmp_fail_intervals = mmp_fail_intervals; + hrtime_t last_mmp_fail_ns = mmp_fail_ns; callb_cpr_t cpr; - hrtime_t max_fail_ns = zfs_multihost_fail_intervals * - MSEC2NSEC(MAX(zfs_multihost_interval, MMP_MIN_INTERVAL)); + int skip_wait = 0; mmp_thread_enter(mmp, &cpr); - /* - * The mmp_write_done() function calculates mmp_delay based on the - * prior value of mmp_delay and the elapsed time since the last write. - * For the first mmp write, there is no "last write", so we start - * with fake, but reasonable, default non-zero values. - */ - mmp->mmp_delay = MSEC2NSEC(MAX(zfs_multihost_interval, - MMP_MIN_INTERVAL)) / MAX(vdev_count_leaves(spa), 1); - mmp->mmp_last_write = gethrtime() - mmp->mmp_delay; - while (!mmp->mmp_thread_exiting) { - uint64_t mmp_fail_intervals = zfs_multihost_fail_intervals; - uint64_t mmp_interval = MSEC2NSEC( - MAX(zfs_multihost_interval, MMP_MIN_INTERVAL)); - boolean_t suspended = spa_suspended(spa); - boolean_t multihost = spa_multihost(spa); - hrtime_t next_time; + hrtime_t next_time = gethrtime() + + MSEC2NSEC(MMP_DEFAULT_INTERVAL); + int leaves = MAX(vdev_count_leaves(spa), 1); + + /* Detect changes in tunables or state */ + + last_spa_suspended = suspended; + last_spa_multihost = multihost; + suspended = spa_suspended(spa); + multihost = spa_multihost(spa); + + last_mmp_interval = mmp_interval; + last_mmp_fail_intervals = mmp_fail_intervals; + last_mmp_fail_ns = mmp_fail_ns; + mmp_interval = MSEC2NSEC(MMP_INTERVAL_OK( + zfs_multihost_interval)); + mmp_fail_intervals = MMP_FAIL_INTVS_OK( + zfs_multihost_fail_intervals); + + /* Smooth so pool is not suspended when reducing tunables */ + if (mmp_fail_intervals * mmp_interval < mmp_fail_ns) { + mmp_fail_ns = (mmp_fail_ns * 31 + + mmp_fail_intervals * mmp_interval) / 32; + } else { + mmp_fail_ns = mmp_fail_intervals * + mmp_interval; + } + + if (mmp_interval != last_mmp_interval || + mmp_fail_intervals != last_mmp_fail_intervals) { + /* + * We want other hosts to see new tunables as quickly as + * possible. Write out at higher frequency than usual. + */ + skip_wait += leaves; + } if (multihost) - next_time = gethrtime() + mmp_interval / - MAX(vdev_count_leaves(spa), 1); - else - next_time = gethrtime() + - MSEC2NSEC(MMP_DEFAULT_INTERVAL); + next_time = gethrtime() + mmp_interval / leaves; + + if (mmp_fail_ns != last_mmp_fail_ns) { + zfs_dbgmsg("MMP interval change pool '%s' " + "gethrtime %llu last_mmp_interval %llu " + "mmp_interval %llu last_mmp_fail_intervals %u " + "mmp_fail_intervals %u mmp_fail_ns %llu " + "skip_wait %d leaves %d next_time %llu", + spa_name(spa), gethrtime(), last_mmp_interval, + mmp_interval, last_mmp_fail_intervals, + mmp_fail_intervals, mmp_fail_ns, skip_wait, leaves, + next_time); + } /* * MMP off => on, or suspended => !suspended: @@ -483,8 +617,14 @@ mmp_thread(void *arg) */ if ((!last_spa_multihost && multihost) || (last_spa_suspended && !suspended)) { + zfs_dbgmsg("MMP state change pool '%s': gethrtime %llu " + "last_spa_multihost %u multihost %u " + "last_spa_suspended %u suspended %u", + spa_name(spa), last_spa_multihost, multihost, + last_spa_suspended, suspended); mutex_enter(&mmp->mmp_io_lock); mmp->mmp_last_write = gethrtime(); + mmp->mmp_delay = mmp_interval; mutex_exit(&mmp->mmp_io_lock); } @@ -497,38 +637,39 @@ mmp_thread(void *arg) mmp->mmp_delay = 0; mutex_exit(&mmp->mmp_io_lock); } - last_spa_multihost = multihost; - last_spa_suspended = suspended; - - /* - * Smooth max_fail_ns when its factors are decreased, because - * making (max_fail_ns < mmp_interval) results in the pool being - * immediately suspended before writes can occur at the new - * higher frequency. - */ - if ((mmp_interval * mmp_fail_intervals) < max_fail_ns) { - max_fail_ns = ((31 * max_fail_ns) + (mmp_interval * - mmp_fail_intervals)) / 32; - } else { - max_fail_ns = mmp_interval * mmp_fail_intervals; - } /* * Suspend the pool if no MMP write has succeeded in over * mmp_interval * mmp_fail_intervals nanoseconds. */ - if (!suspended && mmp_fail_intervals && multihost && - (gethrtime() - mmp->mmp_last_write) > max_fail_ns) { + if (multihost && !suspended && mmp_fail_intervals && + (gethrtime() - mmp->mmp_last_write) > mmp_fail_ns) { + zfs_dbgmsg("MMP suspending pool '%s': gethrtime %llu " + "mmp_last_write %llu mmp_interval %llu " + "mmp_fail_intervals %llu mmp_fail_ns %llu", + spa_name(spa), (u_longlong_t)gethrtime(), + (u_longlong_t)mmp->mmp_last_write, + (u_longlong_t)mmp_interval, + (u_longlong_t)mmp_fail_intervals, + (u_longlong_t)mmp_fail_ns); cmn_err(CE_WARN, "MMP writes to pool '%s' have not " - "succeeded in over %llus; suspending pool", + "succeeded in over %llu ms; suspending pool. " + "Hrtime %llu", spa_name(spa), - NSEC2SEC(gethrtime() - mmp->mmp_last_write)); + NSEC2MSEC(gethrtime() - mmp->mmp_last_write), + gethrtime()); zio_suspend(spa, NULL, ZIO_SUSPEND_MMP); } if (multihost && !suspended) mmp_write_uberblock(spa); + if (skip_wait > 0) { + next_time = gethrtime() + MSEC2NSEC(MMP_MIN_INTERVAL) / + leaves; + skip_wait--; + } + CALLB_CPR_SAFE_BEGIN(&cpr); (void) cv_timedwait_sig_hires(&mmp->mmp_thread_cv, &mmp->mmp_thread_lock, next_time, USEC2NSEC(1), diff --git a/module/zfs/spa.c b/module/zfs/spa.c index d95a43001e29..9d798ebac4da 100644 --- a/module/zfs/spa.c +++ b/module/zfs/spa.c @@ -2437,6 +2437,7 @@ spa_activity_check_required(spa_t *spa, uberblock_t *ub, nvlist_t *label, uint64_t hostid = 0; uint64_t tryconfig_txg = 0; uint64_t tryconfig_timestamp = 0; + uint16_t tryconfig_mmp_seq = 0; nvlist_t *nvinfo; if (nvlist_exists(config, ZPOOL_CONFIG_LOAD_INFO)) { @@ -2445,6 +2446,8 @@ spa_activity_check_required(spa_t *spa, uberblock_t *ub, nvlist_t *label, &tryconfig_txg); (void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_TIMESTAMP, &tryconfig_timestamp); + (void) nvlist_lookup_uint16(nvinfo, ZPOOL_CONFIG_MMP_SEQ, + &tryconfig_mmp_seq); } (void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE, &state); @@ -2462,13 +2465,15 @@ spa_activity_check_required(spa_t *spa, uberblock_t *ub, nvlist_t *label, if (ub->ub_mmp_magic == MMP_MAGIC && ub->ub_mmp_delay == 0) return (B_FALSE); /* - * If the tryconfig_* values are nonzero, they are the results of an - * earlier tryimport. If they match the uberblock we just found, then - * the pool has not changed and we return false so we do not test a - * second time. + * If the tryconfig_ values are nonzero, they are the results of an + * earlier tryimport. If they all match the uberblock we just found, + * then the pool has not changed and we return false so we do not test + * a second time. */ if (tryconfig_txg && tryconfig_txg == ub->ub_txg && - tryconfig_timestamp && tryconfig_timestamp == ub->ub_timestamp) + tryconfig_timestamp && tryconfig_timestamp == ub->ub_timestamp && + tryconfig_mmp_seq && tryconfig_mmp_seq == + (MMP_SEQ_VALID(ub) ? MMP_SEQ(ub) : 0)) return (B_FALSE); /* @@ -2491,6 +2496,76 @@ spa_activity_check_required(spa_t *spa, uberblock_t *ub, nvlist_t *label, return (B_TRUE); } +/* + * Nanoseconds the activity check must watch for changes on-disk. + */ +static uint64_t +spa_activity_check_duration(spa_t *spa, uberblock_t *ub) +{ + uint64_t import_intervals = MAX(zfs_multihost_import_intervals, 1); + uint64_t multihost_interval = MSEC2NSEC( + MMP_INTERVAL_OK(zfs_multihost_interval)); + uint64_t import_delay = MAX(NANOSEC, import_intervals * + multihost_interval); + + /* + * Local tunables determine a minimum duration except for the case + * where we know when the remote host will suspend the pool if MMP + * writes do not land. + * + * See Big Theory comment at the top of mmp.c for the reasoning behind + * these cases and times. + */ + + ASSERT(MMP_IMPORT_SAFETY_FACTOR >= 100); + + if (MMP_INTERVAL_VALID(ub) && MMP_FAIL_INT_VALID(ub) && + MMP_FAIL_INT(ub) > 0) { + + /* MMP on remote host will suspend pool after failed writes */ + import_delay = MMP_FAIL_INT(ub) * MSEC2NSEC(MMP_INTERVAL(ub)) * + MMP_IMPORT_SAFETY_FACTOR / 100; + + zfs_dbgmsg("fail_intvals>0 import_delay=%llu ub_mmp " + "mmp_fails=%llu ub_mmp mmp_interval=%llu " + "import_intervals=%u", import_delay, MMP_FAIL_INT(ub), + MMP_INTERVAL(ub), import_intervals); + + } else if (MMP_INTERVAL_VALID(ub) && MMP_FAIL_INT_VALID(ub) && + MMP_FAIL_INT(ub) == 0) { + + /* MMP on remote host will never suspend pool */ + import_delay = MAX(import_delay, (MSEC2NSEC(MMP_INTERVAL(ub)) + + ub->ub_mmp_delay) * import_intervals); + + zfs_dbgmsg("fail_intvals=0 import_delay=%llu ub_mmp " + "mmp_interval=%llu ub_mmp_delay=%llu " + "import_intervals=%u", import_delay, MMP_INTERVAL(ub), + ub->ub_mmp_delay, import_intervals); + + } else if (MMP_VALID(ub)) { + /* + * zfs-0.7 compatability case + */ + + import_delay = MAX(import_delay, (multihost_interval + + ub->ub_mmp_delay) * import_intervals); + + zfs_dbgmsg("import_delay=%llu ub_mmp_delay=%llu " + "import_intervals=%u leaves=%u", import_delay, + ub->ub_mmp_delay, import_intervals, + vdev_count_leaves(spa)); + } else { + /* Using local tunings is the only reasonable option */ + zfs_dbgmsg("pool last imported on non-MMP aware " + "host using import_delay=%llu multihost_interval=%llu " + "import_intervals=%u", import_delay, multihost_interval, + import_intervals); + } + + return (import_delay); +} + /* * Perform the import activity check. If the user canceled the import or * we detected activity then fail. @@ -2498,10 +2573,11 @@ spa_activity_check_required(spa_t *spa, uberblock_t *ub, nvlist_t *label, static int spa_activity_check(spa_t *spa, uberblock_t *ub, nvlist_t *config) { - uint64_t import_intervals = MAX(zfs_multihost_import_intervals, 1); uint64_t txg = ub->ub_txg; uint64_t timestamp = ub->ub_timestamp; - uint64_t import_delay = NANOSEC; + uint64_t mmp_config = ub->ub_mmp_config; + uint16_t mmp_seq = MMP_SEQ_VALID(ub) ? MMP_SEQ(ub) : 0; + uint64_t import_delay; hrtime_t import_expire; nvlist_t *mmp_label = NULL; vdev_t *rvd = spa->spa_root_vdev; @@ -2518,7 +2594,7 @@ spa_activity_check(spa_t *spa, uberblock_t *ub, nvlist_t *config) * during the earlier tryimport. If the txg recorded there is 0 then * the pool is known to be active on another host. * - * Otherwise, the pool might be in use on another node. Check for + * Otherwise, the pool might be in use on another host. Check for * changes in the uberblocks on disk if necessary. */ if (nvlist_exists(config, ZPOOL_CONFIG_LOAD_INFO)) { @@ -2533,23 +2609,7 @@ spa_activity_check(spa_t *spa, uberblock_t *ub, nvlist_t *config) } } - /* - * Preferentially use the zfs_multihost_interval from the node which - * last imported the pool. This value is stored in an MMP uberblock as. - * - * ub_mmp_delay * vdev_count_leaves() == zfs_multihost_interval - */ - if (ub->ub_mmp_magic == MMP_MAGIC && ub->ub_mmp_delay) - import_delay = MAX(import_delay, import_intervals * - ub->ub_mmp_delay * MAX(vdev_count_leaves(spa), 1)); - - /* Apply a floor using the local default values. */ - import_delay = MAX(import_delay, import_intervals * - MSEC2NSEC(MAX(zfs_multihost_interval, MMP_MIN_INTERVAL))); - - zfs_dbgmsg("import_delay=%llu ub_mmp_delay=%llu import_intervals=%u " - "leaves=%u", import_delay, ub->ub_mmp_delay, import_intervals, - vdev_count_leaves(spa)); + import_delay = spa_activity_check_duration(spa, ub); /* Add a small random factor in case of simultaneous imports (0-25%) */ import_expire = gethrtime() + import_delay + @@ -2558,7 +2618,15 @@ spa_activity_check(spa_t *spa, uberblock_t *ub, nvlist_t *config) while (gethrtime() < import_expire) { vdev_uberblock_load(rvd, ub, &mmp_label); - if (txg != ub->ub_txg || timestamp != ub->ub_timestamp) { + if (txg != ub->ub_txg || timestamp != ub->ub_timestamp || + mmp_seq != (MMP_SEQ_VALID(ub) ? MMP_SEQ(ub) : 0)) { + zfs_dbgmsg("multihost activity detected " + "txg %llu ub_txg %llu " + "timestamp %llu ub_timestamp %llu " + "mmp_config %#llx ub_mmp_config %#llx", + txg, ub->ub_txg, timestamp, ub->ub_timestamp, + mmp_config, ub->ub_mmp_config); + error = SET_ERROR(EREMOTEIO); break; } @@ -2944,6 +3012,9 @@ spa_ld_select_uberblock(spa_t *spa, spa_import_type_t type) ZPOOL_CONFIG_MMP_STATE, MMP_STATE_INACTIVE); fnvlist_add_uint64(spa->spa_load_info, ZPOOL_CONFIG_MMP_TXG, ub->ub_txg); + fnvlist_add_uint16(spa->spa_load_info, + ZPOOL_CONFIG_MMP_SEQ, + (MMP_SEQ_VALID(ub) ? MMP_SEQ(ub) : 0)); } /* diff --git a/module/zfs/uberblock.c b/module/zfs/uberblock.c index 3b85260764d0..b8857d74d810 100644 --- a/module/zfs/uberblock.c +++ b/module/zfs/uberblock.c @@ -26,6 +26,7 @@ #include #include #include +#include int uberblock_verify(uberblock_t *ub) @@ -58,8 +59,15 @@ uberblock_update(uberblock_t *ub, vdev_t *rvd, uint64_t txg, uint64_t mmp_delay) ub->ub_timestamp = gethrestime_sec(); ub->ub_software_version = SPA_VERSION; ub->ub_mmp_magic = MMP_MAGIC; - ub->ub_mmp_delay = spa_multihost(rvd->vdev_spa) ? mmp_delay : 0; - ub->ub_mmp_seq = 0; + if (spa_multihost(rvd->vdev_spa)) { + ub->ub_mmp_delay = mmp_delay; + ub->ub_mmp_config = MMP_SEQ_SET(0) | + MMP_INTERVAL_SET(zfs_multihost_interval) | + MMP_FAIL_INT_SET(zfs_multihost_fail_intervals); + } else { + ub->ub_mmp_delay = 0; + ub->ub_mmp_config = 0; + } ub->ub_checkpoint_txg = 0; return (ub->ub_rootbp.blk_birth == txg); diff --git a/module/zfs/vdev_label.c b/module/zfs/vdev_label.c index 65b847d66470..a03722d05e25 100644 --- a/module/zfs/vdev_label.c +++ b/module/zfs/vdev_label.c @@ -1181,10 +1181,35 @@ static int vdev_uberblock_compare(const uberblock_t *ub1, const uberblock_t *ub2) { int cmp = AVL_CMP(ub1->ub_txg, ub2->ub_txg); + if (likely(cmp)) return (cmp); - return (AVL_CMP(ub1->ub_timestamp, ub2->ub_timestamp)); + cmp = AVL_CMP(ub1->ub_timestamp, ub2->ub_timestamp); + if (likely(cmp)) + return (cmp); + + /* + * If MMP_VALID(ub) && MMP_SEQ_VALID(ub) then the host has an MMP-aware + * ZFS, e.g. zfsonlinux >= 0.7. + * + * If one ub has MMP and the other does not, they were written by + * different hosts, which matters for MMP. So we treat no MMP/no SEQ as + * a 0 value. + * + * Since timestamp and txg are the same if we get this far, either is + * acceptable for importing the pool. + */ + unsigned int seq1 = 0; + unsigned int seq2 = 0; + + if (MMP_VALID(ub1) && MMP_SEQ_VALID(ub1)) + seq1 = MMP_SEQ(ub1); + + if (MMP_VALID(ub2) && MMP_SEQ_VALID(ub2)) + seq2 = MMP_SEQ(ub2); + + return (AVL_CMP(seq1, seq2)); } struct ubl_cbdata { diff --git a/tests/zfs-tests/tests/functional/mmp/cleanup.ksh b/tests/zfs-tests/tests/functional/mmp/cleanup.ksh index 6e438d88dbc2..8146f773a215 100755 --- a/tests/zfs-tests/tests/functional/mmp/cleanup.ksh +++ b/tests/zfs-tests/tests/functional/mmp/cleanup.ksh @@ -23,6 +23,6 @@ verify_runnable "global" -log_must set_tunable64 zfs_multihost_history 0 +log_must set_tunable64 zfs_multihost_history $MMP_HISTORY_OFF log_pass "mmp cleanup passed" diff --git a/tests/zfs-tests/tests/functional/mmp/mmp.cfg b/tests/zfs-tests/tests/functional/mmp/mmp.cfg index 52680c275aae..9f7e76e27018 100644 --- a/tests/zfs-tests/tests/functional/mmp/mmp.cfg +++ b/tests/zfs-tests/tests/functional/mmp/mmp.cfg @@ -38,3 +38,9 @@ export MMP_HISTORY_OFF=0 export MMP_INTERVAL_HOUR=$((60*60*1000)) export MMP_INTERVAL_DEFAULT=1000 export MMP_INTERVAL_MIN=100 + +export MMP_IMPORT_INTERVALS=20 +export MMP_FAIL_INTERVALS_DEFAULT=10 +export MMP_FAIL_INTERVALS_MIN=2 + +export MMP_TEST_DURATION_DEFAULT=$((MMP_IMPORT_INTERVALS*MMP_INTERVAL_DEFAULT/1000)) diff --git a/tests/zfs-tests/tests/functional/mmp/mmp.kshlib b/tests/zfs-tests/tests/functional/mmp/mmp.kshlib index e74f04a5b66d..fda57c002cbf 100644 --- a/tests/zfs-tests/tests/functional/mmp/mmp.kshlib +++ b/tests/zfs-tests/tests/functional/mmp/mmp.kshlib @@ -162,15 +162,42 @@ function mmp_pool_set_hostid # pool hostid return 0 } - # Return the number of seconds the activity check portion of the import process -# will take. Does not include the time to find devices and assemble the -# preliminary pool configuration passed into the kernel. +# will take. Does not include the time to find devices and assemble a config. +# Note that the activity check may be skipped, e.g. if the pool and host +# hostid's match, but this will return non-zero because mmp_* are populated. function seconds_mmp_waits_for_activity { + typeset pool=$1 + typeset devpath=$2 + + typeset seconds=0 + typeset devices=${#DISK[@]} typeset import_intervals=$(get_tunable zfs_multihost_import_intervals) - typeset interval=$(get_tunable zfs_multihost_interval) - typeset seconds=$((interval*import_intervals/1000)) + typeset import_interval=$(get_tunable zfs_multihost_interval) + typeset tmpfile=$(mktemp) + typeset mmp_fail + typeset mmp_write + typeset mmp_delay + + log_must zdb -e -p $devpath $pool >$tmpfile 2>/dev/null + mmp_fail=$(awk '/mmp_fail/ {print $NF}' $tmpfile) + mmp_write=$(awk '/mmp_write/ {print $NF}' $tmpfile) + mmp_delay=$(awk '/mmp_delay/ {print $NF}' $tmpfile) + if [ -f $tmpfile ]; then + rm $tmpfile + fi + + # In order of preference: + if [ -n $mmp_fail -a -n $mmp_write ]; then + seconds=$((2*mmp_fail*mmp_write/1000)) + elif [ -n $mmp_delay ]; then + # MMP V0: Based on mmp_delay from the best Uberblock + seconds=$((import_intervals*devices*mmp_delay/1000000000)) + else + # Non-MMP aware: Based on zfs_multihost_interval and import_intervals + seconds=$((import_intervals*import_interval/1000)) + fi echo $seconds } @@ -180,34 +207,33 @@ function import_no_activity_check # pool opts typeset pool=$1 typeset opts=$2 - typeset max_duration=$(seconds_mmp_waits_for_activity) + typeset max_duration=$((MMP_TEST_DURATION_DEFAULT-1)) SECONDS=0 zpool import $opts $pool typeset rc=$? if [[ $SECONDS -gt $max_duration ]]; then - log_fail "unexpected activity check (${SECONDS}s gt \ -$max_duration)" + log_fail "ERROR: import_no_activity_check unexpected activity \ +check (${SECONDS}s gt $max_duration)" fi return $rc } -function import_activity_check # pool opts +function import_activity_check # pool opts act_test_duration { typeset pool=$1 typeset opts=$2 - - typeset min_duration=$(seconds_mmp_waits_for_activity) + typeset min_duration=${3:-$MMP_TEST_DURATION_DEFAULT} SECONDS=0 zpool import $opts $pool typeset rc=$? if [[ $SECONDS -le $min_duration ]]; then - log_fail "expected activity check (${SECONDS}s le \ -$min_duration)" + log_fail "ERROR: import_activity_check expected activity check \ +(${SECONDS}s le min_duration $min_duration)" fi return $rc @@ -238,3 +264,70 @@ function count_mmp_writes # pool duration sleep $duration awk 'BEGIN {count=0}; $NF != "-" {count++}; END {print count};' "$hist_path" } + +function summarize_uberblock_mmp # device +{ + typeset device=$1 + + zdb -luuuu $device | awk ' + BEGIN {write_fail_present=0; write_fail_missing=0; uber_invalid=0;} + /Uberblock\[[0-9][0-9]*\]/ {delay=-99; write=-99; fail=-99; total++; if (/invalid/) {uber_invalid++};}; + /mmp_fail/ {fail=$3}; + /mmp_seq/ {seq=$3}; + /mmp_write/ {write=$3}; + /mmp_delay/ {delay=$3; if (delay==0) {delay_zero++};}; + /mmp_valid/ && delay>0 && write>0 && fail>0 {write_fail_present++}; + /mmp_valid/ && delay>0 && (write<=0 || fail<=0) {write_fail_missing++}; + /mmp_valid/ && delay>0 && write<=0 {write_missing++}; + /mmp_valid/ && delay>0 && fail<=0 {fail_missing++}; + /mmp_valid/ && delay>0 && seq>0 {seq_nonzero++}; + END { + print "total_uberblocks " total; + print "delay_zero " delay_zero; + print "write_fail_present " write_fail_present; + print "write_fail_missing " write_fail_missing; + print "write_missing " write_missing; + print "fail_missing " fail_missing; + print "seq_nonzero " seq_nonzero; + print "uberblock_invalid " uber_invalid; + }' +} + +function count_mmp_write_fail_present # device +{ + typeset device=$1 + + summarize_uberblock_mmp $device | awk '/write_fail_present/ {print $NF}' +} + +function count_mmp_write_fail_missing # device +{ + typeset device=$1 + + summarize_uberblock_mmp $device | awk '/write_fail_missing/ {print $NF}' +} + +function verify_mmp_write_fail_present # device +{ + typeset device=$1 + + count=$(count_mmp_write_fail_present $device) + log_note "present count: $count" + if [ $count -eq 0 ]; then + summarize_uberblock_mmp $device + log_note "----- snip -----" + zdb -luuuu $device + log_note "----- snip -----" + log_fail "No Uberblocks contain valid mmp_write and fail values" + fi + + count=$(count_mmp_write_fail_missing $device) + log_note "missing count: $count" + if [ $count -gt 0 ]; then + summarize_uberblock_mmp $device + log_note "----- snip -----" + zdb -luuuu $device + log_note "----- snip -----" + log_fail "Uberblocks missing mmp_write or mmp_fail" + fi +} diff --git a/tests/zfs-tests/tests/functional/mmp/mmp_active_import.ksh b/tests/zfs-tests/tests/functional/mmp/mmp_active_import.ksh index e39c5ab309f9..c4ed894826ad 100755 --- a/tests/zfs-tests/tests/functional/mmp/mmp_active_import.ksh +++ b/tests/zfs-tests/tests/functional/mmp/mmp_active_import.ksh @@ -42,8 +42,19 @@ verify_runnable "both" function cleanup { mmp_pool_destroy $MMP_POOL $MMP_DIR - log_must set_tunable64 zfs_multihost_interval $MMP_INTERVAL_DEFAULT log_must mmp_clear_hostid + ZTESTPID=$(pgrep ztest) + if [ -n "$ZTESTPID" ]; then + for pid in $ZTESTPID; do + log_must kill -9 $pid + done + else + # if ztest not running and log present, ztest crashed + if [ -f $MMP_ZTEST_LOG ]; then + log_note "ztest appears to have crashed. Tail of log:" + tail -n 50 $MMP_ZTEST_LOG + fi + fi } log_assert "multihost=on|off active pool activity checks" @@ -55,7 +66,6 @@ mmp_pool_create $MMP_POOL $MMP_DIR # 2. Verify 'zpool import' reports an active pool. log_must mmp_set_hostid $HOSTID2 -log_must set_tunable64 zfs_multihost_interval $MMP_INTERVAL_MIN log_must is_pool_imported $MMP_POOL "-d $MMP_DIR" # 3. Verify 'zpool import [-f] $MMP_POOL' cannot import the pool. @@ -79,6 +89,9 @@ if [ -n "$ZTESTPID" ]; then log_must kill -9 $ZTESTPID fi log_must wait_pool_imported $MMP_POOL "-d $MMP_DIR" +if [ -f $MMP_ZTEST_LOG ]; then + log_must rm $MMP_ZTEST_LOG +fi # 5. Verify 'zpool import' fails with the expected error message, when # - hostid=0: - configuration error @@ -103,9 +116,6 @@ MMP_IMPORTED_MSG="pool was previously in use from another system." log_must try_pool_import $MMP_POOL "-d $MMP_DIR" "$MMP_IMPORTED_MSG" # 7. Verify 'zpool import -f $MMP_POOL' can now import the pool. -# Default interval results in minimum activity test 10s which -# makes detection of the activity test reliable. -log_must set_tunable64 zfs_multihost_interval $MMP_INTERVAL_DEFAULT log_must import_activity_check $MMP_POOL "-f -d $MMP_DIR" # 8 Verify pool may be exported/imported without -f argument. diff --git a/tests/zfs-tests/tests/functional/mmp/mmp_inactive_import.ksh b/tests/zfs-tests/tests/functional/mmp/mmp_inactive_import.ksh index c5c66373e0d3..64ed9bf9741d 100755 --- a/tests/zfs-tests/tests/functional/mmp/mmp_inactive_import.ksh +++ b/tests/zfs-tests/tests/functional/mmp/mmp_inactive_import.ksh @@ -28,7 +28,9 @@ # 4. Verify multihost=off and hostid allowed (no activity check) # 5. Verify multihost=on and hostids match (no activity check) # 6. Verify multihost=on and hostids differ (activity check) -# 7. Verify multihost=on and hostid zero fails (no activity check) +# 7. Verify mmp_write and mmp_fail are set correctly +# 8. Verify multihost=on and hostid zero fails (no activity check) +# 9. Verify activity check duration based on mmp_write and mmp_fail # . $STF_SUITE/include/libtest.shlib @@ -41,6 +43,7 @@ function cleanup { default_cleanup_noexit log_must mmp_clear_hostid + log_must set_tunable64 zfs_multihost_interval $MMP_INTERVAL_DEFAULT } log_assert "multihost=on|off inactive pool activity checks" @@ -87,11 +90,22 @@ log_must mmp_set_hostid $HOSTID2 log_mustnot import_activity_check $TESTPOOL "" log_must import_activity_check $TESTPOOL "-f" -# 7. Verify multihost=on and hostid zero fails (no activity check) +# 7. Verify mmp_write and mmp_fail are set correctly log_must zpool export -F $TESTPOOL +log_must verify_mmp_write_fail_present ${DISK[0]} + +# 8. Verify multihost=on and hostid zero fails (no activity check) log_must mmp_clear_hostid MMP_IMPORTED_MSG="Set a unique system hostid" log_must check_pool_import $TESTPOOL "-f" "action" "$MMP_IMPORTED_MSG" log_mustnot import_no_activity_check $TESTPOOL "-f" +# 9. Verify activity check duration based on mmp_write and mmp_fail +# Specify a short test via tunables but import pool imported while +# tunables set to default duration. +log_must set_tunable64 zfs_multihost_interval $MMP_INTERVAL_MIN +log_must mmp_clear_hostid +log_must mmp_set_hostid $HOSTID1 +log_must import_activity_check $TESTPOOL "-f" $MMP_TEST_DURATION_DEFAULT + log_pass "multihost=on|off inactive pool activity checks passed" diff --git a/tests/zfs-tests/tests/functional/mmp/mmp_on_uberblocks.ksh b/tests/zfs-tests/tests/functional/mmp/mmp_on_uberblocks.ksh index 0cb38f8899ff..bf1eb54a7389 100755 --- a/tests/zfs-tests/tests/functional/mmp/mmp_on_uberblocks.ksh +++ b/tests/zfs-tests/tests/functional/mmp/mmp_on_uberblocks.ksh @@ -19,7 +19,7 @@ # # DESCRIPTION: -# Ensure that MMP updates uberblocks at the expected intervals. +# Ensure that MMP updates uberblocks with MMP info at expected intervals. # # STRATEGY: # 1. Set zfs_txg_timeout to large value @@ -28,6 +28,7 @@ # 4. Sleep, then collect count of uberblocks written # 5. If number of changes seen is less than min threshold, then fail # 6. If number of changes seen is more than max threshold, then fail +# 7. Sequence number increments when no TXGs are syncing # . $STF_SUITE/include/libtest.shlib @@ -39,12 +40,14 @@ verify_runnable "both" UBER_CHANGES=0 EXPECTED=$(($(echo $DISKS | wc -w) * 10)) FUDGE=$((EXPECTED * 20 / 100)) -MIN=$((EXPECTED - FUDGE)) -MAX=$((EXPECTED + FUDGE)) +MIN_UB_WRITES=$((EXPECTED - FUDGE)) +MAX_UB_WRITES=$((EXPECTED + FUDGE)) +MIN_SEQ_VALUES=7 function cleanup { default_cleanup_noexit + log_must set_tunable64 zfs_multihost_interval $MMP_INTERVAL_DEFAULT set_tunable64 zfs_txg_timeout $TXG_TIMEOUT_DEFAULT log_must mmp_clear_hostid } @@ -62,12 +65,21 @@ UBER_CHANGES=$(count_mmp_writes $TESTPOOL 10) log_note "Uberblock changed $UBER_CHANGES times" -if [ $UBER_CHANGES -lt $MIN ]; then +if [ $UBER_CHANGES -lt $MIN_UB_WRITES ]; then log_fail "Fewer uberblock writes occured than expected ($EXPECTED)" fi -if [ $UBER_CHANGES -gt $MAX ]; then +if [ $UBER_CHANGES -gt $MAX_UB_WRITES ]; then log_fail "More uberblock writes occured than expected ($EXPECTED)" fi +log_must set_tunable64 zfs_multihost_interval $MMP_INTERVAL_MIN +SEQ_BEFORE=$(zdb -luuuu ${DISK[0]} | awk '/mmp_seq/ {if ($NF>max) max=$NF}; END {print max}') +sleep 1 +SEQ_AFTER=$(zdb -luuuu ${DISK[0]} | awk '/mmp_seq/ {if ($NF>max) max=$NF}; END {print max}') +if [ $((SEQ_AFTER - SEQ_BEFORE)) -lt $MIN_SEQ_VALUES ]; then + zdb -luuuu ${DISK[0]} + log_fail "ERROR: mmp_seq did not increase by $MIN_SEQ_VALUES; before $SEQ_BEFORE after $SEQ_AFTER" +fi + log_pass "Ensure MMP uberblocks update at the correct interval passed" diff --git a/tests/zfs-tests/tests/functional/mmp/mmp_reset_interval.ksh b/tests/zfs-tests/tests/functional/mmp/mmp_reset_interval.ksh index 3c8f00cde90b..842df284b803 100755 --- a/tests/zfs-tests/tests/functional/mmp/mmp_reset_interval.ksh +++ b/tests/zfs-tests/tests/functional/mmp/mmp_reset_interval.ksh @@ -20,7 +20,8 @@ # DESCRIPTION: # Ensure that the MMP thread is notified when zfs_multihost_interval is -# reduced. +# reduced, and that changes to zfs_multihost_interval and +# zfs_multihost_fail_intervals do not trigger pool suspensions. # # STRATEGY: # 1. Set zfs_multihost_interval to much longer than the test duration @@ -29,6 +30,8 @@ # 4. Set zfs_multihost_interval to 1 second # 5. Sleep briefly # 6. Verify MMP writes began +# 7. Verify mmp_fail and mmp_write in uberblock reflect tunables +# 8. Repeatedly change tunables relating to pool suspension # . $STF_SUITE/include/libtest.shlib @@ -41,6 +44,8 @@ function cleanup { default_cleanup_noexit log_must set_tunable64 zfs_multihost_interval $MMP_INTERVAL_DEFAULT + log_must set_tunable64 zfs_multihost_fail_intervals \ + $MMP_FAIL_INTERVALS_DEFAULT log_must mmp_clear_hostid } @@ -58,7 +63,57 @@ log_must set_tunable64 zfs_multihost_interval $MMP_INTERVAL_DEFAULT uber_count=$(count_mmp_writes $TESTPOOL 1) if [ $uber_count -eq 0 ]; then - log_fail "mmp writes did not start when zfs_multihost_interval reduced" + log_fail "ERROR: mmp writes did not start when zfs_multihost_interval reduced" fi +# 7. Verify mmp_write and mmp_fail are written +for fails in $(seq $MMP_FAIL_INTERVALS_MIN $((MMP_FAIL_INTERVALS_MIN*2))); do + for interval in $(seq $MMP_INTERVAL_MIN 200 $MMP_INTERVAL_DEFAULT); do + log_must set_tunable64 zfs_multihost_fail_intervals $fails + log_must set_tunable64 zfs_multihost_interval $interval + log_must sync_pool $TESTPOOL + typeset mmp_fail=$(zdb $TESTPOOL 2>/dev/null | + awk '/mmp_fail/ {print $NF}') + if [ $fails -ne $mmp_fail ]; then + log_fail "ERROR: mmp_fail $mmp_fail != $fails" + fi + typeset mmp_write=$(zdb $TESTPOOL 2>/dev/null | + awk '/mmp_write/ {print $NF}') + if [ $interval -ne $mmp_write ]; then + log_fail "ERROR: mmp_write $mmp_write != $interval" + fi + done +done + + +# 8. Repeatedly change zfs_multihost_interval and fail_intervals +for x in $(seq 10); do + typeset new_interval=$(( (RANDOM % 20 + 1) * $MMP_INTERVAL_MIN )) + log_must set_tunable64 zfs_multihost_interval $new_interval + typeset action=$((RANDOM %10)) + if [ $action -eq 0 ]; then + log_must zpool export -a + log_must mmp_clear_hostid + log_must mmp_set_hostid $HOSTID1 + log_must zpool import $TESTPOOL + elif [ $action -eq 1 ]; then + log_must zpool export -F $TESTPOOL + log_must zpool import $TESTPOOL + elif [ $action -eq 2 ]; then + log_must zpool export -F $TESTPOOL + log_must mmp_clear_hostid + log_must mmp_set_hostid $HOSTID2 + log_must zpool import -f $TESTPOOL + elif [ $action -eq 3 ]; then + log_must zpool export -F $TESTPOOL + log_must set_tunable64 zfs_multihost_interval $MMP_INTERVAL_MIN + log_must zpool import $TESTPOOL + elif [ $action -eq 4 ]; then + log_must set_tunable64 zfs_multihost_fail_intervals \ + $((RANDOM % MMP_FAIL_INTERVALS_DEFAULT)) + fi + sleep 5 +done + + log_pass "mmp threads notified when zfs_multihost_interval reduced" diff --git a/tests/zfs-tests/tests/functional/mmp/setup.ksh b/tests/zfs-tests/tests/functional/mmp/setup.ksh index fde5e3bb7773..c91f61979c59 100755 --- a/tests/zfs-tests/tests/functional/mmp/setup.ksh +++ b/tests/zfs-tests/tests/functional/mmp/setup.ksh @@ -28,5 +28,7 @@ if [ -e $HOSTID_FILE ]; then fi log_must set_tunable64 zfs_multihost_history $MMP_HISTORY +log_must set_tunable64 zfs_multihost_interval $MMP_INTERVAL_DEFAULT +log_must set_tunable64 zfs_multihost_fail_intervals $MMP_FAIL_INTERVALS_DEFAULT log_pass "mmp setup pass"