MFV r329762: 8961 SPA load/import should tell us why it failed

illumos/illumos-gate@3ee8c80c74

When we fail to open or import a storage pool, we typically don't get any
additional diagnostic information, just "no pool found" or "can not import".

While there may be no additional user-consumable information, we should at
least make this situation easier to debug/diagnose for developers and support.
For example, we could start by using `zfs_dbgmsg()` to log each thing that we
try when importing, and which things failed. E.g. "tried uberblock of txg X
from label Y of device Z". Also, we could log each of the stages that we go
through in `spa_load_impl()`.

Reviewed by: George Wilson <george.wilson@delphix.com>
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed by: Andrew Stormont <andyjstormont@gmail.com>
Approved by: Dan McDonald <danmcd@joyent.com>
Author: Pavel Zakharov <pavel.zakharov@delphix.com>
This commit is contained in:
Alexander Motin 2018-02-22 00:03:14 +00:00
commit fa607d017d
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=329765
8 changed files with 290 additions and 80 deletions

View File

@ -1296,6 +1296,8 @@ spa_unload(spa_t *spa)
ASSERT(MUTEX_HELD(&spa_namespace_lock));
spa_load_note(spa, "UNLOADING");
/*
* Stop TRIM thread.
*/
@ -2069,6 +2071,13 @@ spa_load_verify(spa_t *spa)
ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE);
if (spa_load_verify_metadata) {
if (spa->spa_extreme_rewind) {
spa_load_note(spa, "performing a complete scan of the "
"pool since extreme rewind is on. This may take "
"a very long time.\n (spa_load_verify_data=%u, "
"spa_load_verify_metadata=%u)",
spa_load_verify_data, spa_load_verify_metadata);
}
error = traverse_pool(spa, spa->spa_verify_min_txg,
TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA,
spa_load_verify_cb, rio);
@ -2121,10 +2130,17 @@ spa_prop_find(spa_t *spa, zpool_prop_t prop, uint64_t *val)
* Find a value in the pool directory object.
*/
static int
spa_dir_prop(spa_t *spa, const char *name, uint64_t *val)
spa_dir_prop(spa_t *spa, const char *name, uint64_t *val, boolean_t log_enoent)
{
return (zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
name, sizeof (uint64_t), 1, val));
int error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
name, sizeof (uint64_t), 1, val);
if (error != 0 && (error != ENOENT || log_enoent)) {
spa_load_failed(spa, "couldn't get '%s' value in MOS directory "
"[error=%d]", name, error);
}
return (error);
}
static int
@ -2316,19 +2332,18 @@ vdev_count_verify_zaps(vdev_t *vd)
static int
spa_ld_parse_config(spa_t *spa, uint64_t pool_guid, nvlist_t *config,
spa_load_state_t state, spa_import_type_t type)
spa_import_type_t type)
{
int error = 0;
nvlist_t *nvtree = NULL;
int parse;
vdev_t *rvd;
ASSERT(MUTEX_HELD(&spa_namespace_lock));
spa->spa_load_state = state;
if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvtree))
if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvtree)) {
spa_load_failed(spa, "invalid config provided: '%s' missing",
ZPOOL_CONFIG_VDEV_TREE);
return (SET_ERROR(EINVAL));
}
parse = (type == SPA_IMPORT_EXISTING ?
VDEV_ALLOC_LOAD : VDEV_ALLOC_SPLIT);
@ -2353,8 +2368,11 @@ spa_ld_parse_config(spa_t *spa, uint64_t pool_guid, nvlist_t *config,
error = spa_config_parse(spa, &rvd, nvtree, NULL, 0, parse);
spa_config_exit(spa, SCL_ALL, FTAG);
if (error != 0)
if (error != 0) {
spa_load_failed(spa, "unable to parse config [error=%d]",
error);
return (error);
}
ASSERT(spa->spa_root_vdev == rvd);
ASSERT3U(spa->spa_min_ashift, >=, SPA_MINBLOCKSHIFT);
@ -2375,6 +2393,10 @@ spa_ld_open_vdevs(spa_t *spa)
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
error = vdev_open(spa->spa_root_vdev);
spa_config_exit(spa, SCL_ALL, FTAG);
if (error != 0) {
spa_load_failed(spa, "unable to open vdev tree [error=%d]",
error);
}
return (error);
}
@ -2404,11 +2426,17 @@ spa_ld_validate_vdevs(spa_t *spa, spa_import_type_t type,
error = vdev_validate(rvd, trust_config);
spa_config_exit(spa, SCL_ALL, FTAG);
if (error != 0)
if (error != 0) {
spa_load_failed(spa, "vdev_validate failed [error=%d]",
error);
return (error);
}
if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN)
if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN) {
spa_load_failed(spa, "cannot open vdev tree after "
"invalidating some vdevs");
return (SET_ERROR(ENXIO));
}
}
return (0);
@ -2433,14 +2461,20 @@ spa_ld_select_uberblock(spa_t *spa, nvlist_t *config, spa_import_type_t type,
*/
if (ub->ub_txg == 0) {
nvlist_free(label);
spa_load_failed(spa, "no valid uberblock found");
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, ENXIO));
}
spa_load_note(spa, "using uberblock with txg=%llu",
(u_longlong_t)ub->ub_txg);
/*
* If the pool has an unsupported version we can't open it.
*/
if (!SPA_VERSION_IS_SUPPORTED(ub->ub_version)) {
nvlist_free(label);
spa_load_failed(spa, "version %llu is not supported",
(u_longlong_t)ub->ub_version);
return (spa_vdev_err(rvd, VDEV_AUX_VERSION_NEWER, ENOTSUP));
}
@ -2451,9 +2485,17 @@ spa_ld_select_uberblock(spa_t *spa, nvlist_t *config, spa_import_type_t type,
* If we weren't able to find what's necessary for reading the
* MOS in the label, return failure.
*/
if (label == NULL || nvlist_lookup_nvlist(label,
ZPOOL_CONFIG_FEATURES_FOR_READ, &features) != 0) {
if (label == NULL) {
spa_load_failed(spa, "label config unavailable");
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA,
ENXIO));
}
if (nvlist_lookup_nvlist(label, ZPOOL_CONFIG_FEATURES_FOR_READ,
&features) != 0) {
nvlist_free(label);
spa_load_failed(spa, "invalid label: '%s' missing",
ZPOOL_CONFIG_FEATURES_FOR_READ);
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA,
ENXIO));
}
@ -2492,6 +2534,7 @@ spa_ld_select_uberblock(spa_t *spa, nvlist_t *config, spa_import_type_t type,
VERIFY(nvlist_add_nvlist(spa->spa_load_info,
ZPOOL_CONFIG_UNSUP_FEAT, unsup_feat) == 0);
nvlist_free(unsup_feat);
spa_load_failed(spa, "some features are unsupported");
return (spa_vdev_err(rvd, VDEV_AUX_UNSUP_FEAT,
ENOTSUP));
}
@ -2508,8 +2551,13 @@ spa_ld_select_uberblock(spa_t *spa, nvlist_t *config, spa_import_type_t type,
*/
if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_VDEV_CHILDREN,
&children) != 0 && trust_config && type != SPA_IMPORT_ASSEMBLE &&
rvd->vdev_guid_sum != ub->ub_guid_sum)
rvd->vdev_guid_sum != ub->ub_guid_sum) {
spa_load_failed(spa, "guid sum in config doesn't match guid "
"sum in uberblock (%llu != %llu)",
(u_longlong_t)rvd->vdev_guid_sum,
(u_longlong_t)ub->ub_guid_sum);
return (spa_vdev_err(rvd, VDEV_AUX_BAD_GUID_SUM, ENXIO));
}
if (type != SPA_IMPORT_ASSEMBLE && spa->spa_config_splitting) {
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
@ -2541,8 +2589,11 @@ spa_ld_open_rootbp(spa_t *spa)
vdev_t *rvd = spa->spa_root_vdev;
error = dsl_pool_init(spa, spa->spa_first_txg, &spa->spa_dsl_pool);
if (error)
if (error != 0) {
spa_load_failed(spa, "unable to open rootbp in dsl_pool_init "
"[error=%d]", error);
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
}
spa->spa_meta_objset = spa->spa_dsl_pool->dp_meta_objset;
return (0);
@ -2553,7 +2604,8 @@ spa_ld_validate_config(spa_t *spa, spa_import_type_t type)
{
vdev_t *rvd = spa->spa_root_vdev;
if (spa_dir_prop(spa, DMU_POOL_CONFIG, &spa->spa_config_object) != 0)
if (spa_dir_prop(spa, DMU_POOL_CONFIG, &spa->spa_config_object, B_TRUE)
!= 0)
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
/*
@ -2565,11 +2617,16 @@ spa_ld_validate_config(spa_t *spa, spa_import_type_t type)
*/
if (type != SPA_IMPORT_ASSEMBLE) {
nvlist_t *mos_config;
if (load_nvlist(spa, spa->spa_config_object, &mos_config) != 0)
if (load_nvlist(spa, spa->spa_config_object, &mos_config)
!= 0) {
spa_load_failed(spa, "unable to retrieve MOS config");
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
}
if (!spa_config_valid(spa, mos_config)) {
nvlist_free(mos_config);
spa_load_failed(spa, "mismatch between config provided "
"and config stored in MOS");
return (spa_vdev_err(rvd, VDEV_AUX_BAD_GUID_SUM,
ENXIO));
}
@ -2580,8 +2637,10 @@ spa_ld_validate_config(spa_t *spa, spa_import_type_t type)
* root vdev. If it can't be opened, it indicates one or
* more toplevel vdevs are faulted.
*/
if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN)
if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN) {
spa_load_failed(spa, "some top vdevs are unavailable");
return (SET_ERROR(ENXIO));
}
}
return (0);
@ -2597,14 +2656,20 @@ spa_ld_open_indirect_vdev_metadata(spa_t *spa)
* Everything that we read before spa_remove_init() must be stored
* on concreted vdevs. Therefore we do this as early as possible.
*/
if (spa_remove_init(spa) != 0)
error = spa_remove_init(spa);
if (error != 0) {
spa_load_failed(spa, "spa_remove_init failed [error=%d]",
error);
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
}
/*
* Retrieve information needed to condense indirect vdev mappings.
*/
error = spa_condense_init(spa);
if (error != 0) {
spa_load_failed(spa, "spa_condense_init failed [error=%d]",
error);
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, error));
}
@ -2612,8 +2677,7 @@ spa_ld_open_indirect_vdev_metadata(spa_t *spa)
}
static int
spa_ld_check_features(spa_t *spa, spa_load_state_t state,
boolean_t *missing_feat_writep)
spa_ld_check_features(spa_t *spa, boolean_t *missing_feat_writep)
{
int error = 0;
vdev_t *rvd = spa->spa_root_vdev;
@ -2623,17 +2687,17 @@ spa_ld_check_features(spa_t *spa, spa_load_state_t state,
nvlist_t *unsup_feat, *enabled_feat;
if (spa_dir_prop(spa, DMU_POOL_FEATURES_FOR_READ,
&spa->spa_feat_for_read_obj) != 0) {
&spa->spa_feat_for_read_obj, B_TRUE) != 0) {
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
}
if (spa_dir_prop(spa, DMU_POOL_FEATURES_FOR_WRITE,
&spa->spa_feat_for_write_obj) != 0) {
&spa->spa_feat_for_write_obj, B_TRUE) != 0) {
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
}
if (spa_dir_prop(spa, DMU_POOL_FEATURE_DESCRIPTIONS,
&spa->spa_feat_desc_obj) != 0) {
&spa->spa_feat_desc_obj, B_TRUE) != 0) {
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
}
@ -2644,7 +2708,8 @@ spa_ld_check_features(spa_t *spa, spa_load_state_t state,
unsup_feat, enabled_feat))
missing_feat_read = B_TRUE;
if (spa_writeable(spa) || state == SPA_LOAD_TRYIMPORT) {
if (spa_writeable(spa) ||
spa->spa_load_state == SPA_LOAD_TRYIMPORT) {
if (!spa_features_check(spa, B_TRUE,
unsup_feat, enabled_feat)) {
*missing_feat_writep = B_TRUE;
@ -2688,6 +2753,7 @@ spa_ld_check_features(spa_t *spa, spa_load_state_t state,
*/
if (missing_feat_read || (*missing_feat_writep &&
spa_writeable(spa))) {
spa_load_failed(spa, "pool uses unsupported features");
return (spa_vdev_err(rvd, VDEV_AUX_UNSUP_FEAT,
ENOTSUP));
}
@ -2707,6 +2773,9 @@ spa_ld_check_features(spa_t *spa, spa_load_state_t state,
spa->spa_feat_refcount_cache[i] =
SPA_FEATURE_DISABLED;
} else {
spa_load_failed(spa, "error getting refcount "
"for feature %s [error=%d]",
spa_feature_table[i].fi_guid, error);
return (spa_vdev_err(rvd,
VDEV_AUX_CORRUPT_DATA, EIO));
}
@ -2715,7 +2784,7 @@ spa_ld_check_features(spa_t *spa, spa_load_state_t state,
if (spa_feature_is_active(spa, SPA_FEATURE_ENABLED_TXG)) {
if (spa_dir_prop(spa, DMU_POOL_FEATURE_ENABLED_TXG,
&spa->spa_feat_enabled_txg_obj) != 0)
&spa->spa_feat_enabled_txg_obj, B_TRUE) != 0)
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
}
@ -2731,8 +2800,10 @@ spa_ld_load_special_directories(spa_t *spa)
spa->spa_is_initializing = B_TRUE;
error = dsl_pool_open(spa->spa_dsl_pool);
spa->spa_is_initializing = B_FALSE;
if (error != 0)
if (error != 0) {
spa_load_failed(spa, "dsl_pool_open failed [error=%d]", error);
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
}
return (0);
}
@ -2746,8 +2817,10 @@ spa_ld_prepare_for_reload(spa_t *spa, int orig_mode)
nvlist_t *policy = NULL;
nvlist_t *mos_config;
if (load_nvlist(spa, spa->spa_config_object, &mos_config) != 0)
if (load_nvlist(spa, spa->spa_config_object, &mos_config) != 0) {
spa_load_failed(spa, "unable to retrieve MOS config");
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
}
if (!spa_is_root(spa) && nvlist_lookup_uint64(mos_config,
ZPOOL_CONFIG_HOSTID, &hostid) == 0) {
@ -2808,26 +2881,31 @@ spa_ld_get_props(spa_t *spa)
(void) random_get_pseudo_bytes(spa->spa_cksum_salt.zcs_bytes,
sizeof (spa->spa_cksum_salt.zcs_bytes));
} else if (error != 0) {
spa_load_failed(spa, "unable to retrieve checksum salt from "
"MOS [error=%d]", error);
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
}
if (spa_dir_prop(spa, DMU_POOL_SYNC_BPOBJ, &obj) != 0)
if (spa_dir_prop(spa, DMU_POOL_SYNC_BPOBJ, &obj, B_TRUE) != 0)
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
error = bpobj_open(&spa->spa_deferred_bpobj, spa->spa_meta_objset, obj);
if (error != 0)
if (error != 0) {
spa_load_failed(spa, "error opening deferred-frees bpobj "
"[error=%d]", error);
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
}
/*
* Load the bit that tells us to use the new accounting function
* (raid-z deflation). If we have an older pool, this will not
* be present.
*/
error = spa_dir_prop(spa, DMU_POOL_DEFLATE, &spa->spa_deflate);
error = spa_dir_prop(spa, DMU_POOL_DEFLATE, &spa->spa_deflate, B_FALSE);
if (error != 0 && error != ENOENT)
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
error = spa_dir_prop(spa, DMU_POOL_CREATION_VERSION,
&spa->spa_creation_version);
&spa->spa_creation_version, B_FALSE);
if (error != 0 && error != ENOENT)
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
@ -2835,12 +2913,13 @@ spa_ld_get_props(spa_t *spa)
* Load the persistent error log. If we have an older pool, this will
* not be present.
*/
error = spa_dir_prop(spa, DMU_POOL_ERRLOG_LAST, &spa->spa_errlog_last);
error = spa_dir_prop(spa, DMU_POOL_ERRLOG_LAST, &spa->spa_errlog_last,
B_FALSE);
if (error != 0 && error != ENOENT)
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
error = spa_dir_prop(spa, DMU_POOL_ERRLOG_SCRUB,
&spa->spa_errlog_scrub);
&spa->spa_errlog_scrub, B_FALSE);
if (error != 0 && error != ENOENT)
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
@ -2848,7 +2927,7 @@ spa_ld_get_props(spa_t *spa)
* Load the history object. If we have an older pool, this
* will not be present.
*/
error = spa_dir_prop(spa, DMU_POOL_HISTORY, &spa->spa_history);
error = spa_dir_prop(spa, DMU_POOL_HISTORY, &spa->spa_history, B_FALSE);
if (error != 0 && error != ENOENT)
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
@ -2861,11 +2940,13 @@ spa_ld_get_props(spa_t *spa)
/* The sentinel is only available in the MOS config. */
nvlist_t *mos_config;
if (load_nvlist(spa, spa->spa_config_object, &mos_config) != 0)
if (load_nvlist(spa, spa->spa_config_object, &mos_config) != 0) {
spa_load_failed(spa, "unable to retrieve MOS config");
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
}
error = spa_dir_prop(spa, DMU_POOL_VDEV_ZAP_MAP,
&spa->spa_all_vdev_zaps);
&spa->spa_all_vdev_zaps, B_FALSE);
if (error == ENOENT) {
VERIFY(!nvlist_exists(mos_config,
@ -2891,7 +2972,8 @@ spa_ld_get_props(spa_t *spa)
spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION);
error = spa_dir_prop(spa, DMU_POOL_PROPS, &spa->spa_pool_props_object);
error = spa_dir_prop(spa, DMU_POOL_PROPS, &spa->spa_pool_props_object,
B_FALSE);
if (error && error != ENOENT)
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
@ -2927,14 +3009,17 @@ spa_ld_open_aux_vdevs(spa_t *spa, spa_import_type_t type)
/*
* Load any hot spares for this pool.
*/
error = spa_dir_prop(spa, DMU_POOL_SPARES, &spa->spa_spares.sav_object);
error = spa_dir_prop(spa, DMU_POOL_SPARES, &spa->spa_spares.sav_object,
B_FALSE);
if (error != 0 && error != ENOENT)
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
if (error == 0 && type != SPA_IMPORT_ASSEMBLE) {
ASSERT(spa_version(spa) >= SPA_VERSION_SPARES);
if (load_nvlist(spa, spa->spa_spares.sav_object,
&spa->spa_spares.sav_config) != 0)
&spa->spa_spares.sav_config) != 0) {
spa_load_failed(spa, "error loading spares nvlist");
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
}
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
spa_load_spares(spa);
@ -2947,14 +3032,16 @@ spa_ld_open_aux_vdevs(spa_t *spa, spa_import_type_t type)
* Load any level 2 ARC devices for this pool.
*/
error = spa_dir_prop(spa, DMU_POOL_L2CACHE,
&spa->spa_l2cache.sav_object);
&spa->spa_l2cache.sav_object, B_FALSE);
if (error != 0 && error != ENOENT)
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
if (error == 0 && type != SPA_IMPORT_ASSEMBLE) {
ASSERT(spa_version(spa) >= SPA_VERSION_L2CACHE);
if (load_nvlist(spa, spa->spa_l2cache.sav_object,
&spa->spa_l2cache.sav_config) != 0)
&spa->spa_l2cache.sav_config) != 0) {
spa_load_failed(spa, "error loading l2cache nvlist");
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
}
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
spa_load_l2cache(spa);
@ -2967,7 +3054,7 @@ spa_ld_open_aux_vdevs(spa_t *spa, spa_import_type_t type)
}
static int
spa_ld_load_vdev_metadata(spa_t *spa, spa_load_state_t state)
spa_ld_load_vdev_metadata(spa_t *spa)
{
int error = 0;
vdev_t *rvd = spa->spa_root_vdev;
@ -2979,14 +3066,14 @@ spa_ld_load_vdev_metadata(spa_t *spa, spa_load_state_t state)
* unopenable vdevs so that the normal autoreplace handler can take
* over.
*/
if (spa->spa_autoreplace && state != SPA_LOAD_TRYIMPORT) {
if (spa->spa_autoreplace && spa->spa_load_state != SPA_LOAD_TRYIMPORT) {
spa_check_removed(spa->spa_root_vdev);
/*
* For the import case, this is done in spa_import(), because
* at this point we're using the spare definitions from
* the MOS config, not necessarily from the userland config.
*/
if (state != SPA_LOAD_IMPORT) {
if (spa->spa_load_state != SPA_LOAD_IMPORT) {
spa_aux_check_removed(&spa->spa_spares);
spa_aux_check_removed(&spa->spa_l2cache);
}
@ -2997,6 +3084,7 @@ spa_ld_load_vdev_metadata(spa_t *spa, spa_load_state_t state)
*/
error = vdev_load(rvd);
if (error != 0) {
spa_load_failed(spa, "vdev_load failed [error=%d]", error);
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, error));
}
@ -3017,8 +3105,10 @@ spa_ld_load_dedup_tables(spa_t *spa)
vdev_t *rvd = spa->spa_root_vdev;
error = ddt_load(spa);
if (error != 0)
if (error != 0) {
spa_load_failed(spa, "ddt_load failed [error=%d]", error);
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
}
return (0);
}
@ -3028,17 +3118,20 @@ spa_ld_verify_logs(spa_t *spa, spa_import_type_t type, char **ereport)
{
vdev_t *rvd = spa->spa_root_vdev;
if (type != SPA_IMPORT_ASSEMBLE && spa_writeable(spa) &&
spa_check_logs(spa)) {
*ereport = FM_EREPORT_ZFS_LOG_REPLAY;
return (spa_vdev_err(rvd, VDEV_AUX_BAD_LOG, ENXIO));
if (type != SPA_IMPORT_ASSEMBLE && spa_writeable(spa)) {
boolean_t missing = spa_check_logs(spa);
if (missing) {
*ereport = FM_EREPORT_ZFS_LOG_REPLAY;
spa_load_failed(spa, "spa_check_logs failed");
return (spa_vdev_err(rvd, VDEV_AUX_BAD_LOG, ENXIO));
}
}
return (0);
}
static int
spa_ld_verify_pool_data(spa_t *spa, spa_load_state_t state)
spa_ld_verify_pool_data(spa_t *spa)
{
int error = 0;
vdev_t *rvd = spa->spa_root_vdev;
@ -3047,9 +3140,11 @@ spa_ld_verify_pool_data(spa_t *spa, spa_load_state_t state)
* We've successfully opened the pool, verify that we're ready
* to start pushing transactions.
*/
if (state != SPA_LOAD_TRYIMPORT) {
if (spa->spa_load_state != SPA_LOAD_TRYIMPORT) {
error = spa_load_verify(spa);
if (error != 0) {
spa_load_failed(spa, "spa_load_verify failed "
"[error=%d]", error);
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA,
error));
}
@ -3084,8 +3179,7 @@ spa_ld_claim_log_blocks(spa_t *spa)
}
static void
spa_ld_check_for_config_update(spa_t *spa, spa_load_state_t state,
int64_t config_cache_txg)
spa_ld_check_for_config_update(spa_t *spa, uint64_t config_cache_txg)
{
vdev_t *rvd = spa->spa_root_vdev;
int need_update = B_FALSE;
@ -3098,8 +3192,8 @@ spa_ld_check_for_config_update(spa_t *spa, spa_load_state_t state,
* in-core spa_config and update the disk labels.
*/
if (config_cache_txg != spa->spa_config_txg ||
state == SPA_LOAD_IMPORT ||
state == SPA_LOAD_RECOVER ||
spa->spa_load_state == SPA_LOAD_IMPORT ||
spa->spa_load_state == SPA_LOAD_RECOVER ||
(spa->spa_import_flags & ZFS_IMPORT_VERBATIM))
need_update = B_TRUE;
@ -3131,6 +3225,11 @@ spa_load_impl(spa_t *spa, uint64_t pool_guid, nvlist_t *config,
int orig_mode = spa->spa_mode;
boolean_t missing_feat_write = B_FALSE;
ASSERT(MUTEX_HELD(&spa_namespace_lock));
spa->spa_load_state = state;
spa_load_note(spa, "LOADING");
/*
* If this is an untrusted config, first access the pool in read-only
* mode. We will then retrieve a trusted copy of the config from the MOS
@ -3142,7 +3241,7 @@ spa_load_impl(spa_t *spa, uint64_t pool_guid, nvlist_t *config,
/*
* Parse the config provided to create a vdev tree.
*/
error = spa_ld_parse_config(spa, pool_guid, config, state, type);
error = spa_ld_parse_config(spa, pool_guid, config, type);
if (error != 0)
return (error);
@ -3210,7 +3309,7 @@ spa_load_impl(spa_t *spa, uint64_t pool_guid, nvlist_t *config,
* Retrieve the full list of active features from the MOS and check if
* they are all supported.
*/
error = spa_ld_check_features(spa, state, &missing_feat_write);
error = spa_ld_check_features(spa, &missing_feat_write);
if (error != 0)
return (error);
@ -3230,6 +3329,8 @@ spa_load_impl(spa_t *spa, uint64_t pool_guid, nvlist_t *config,
error = spa_ld_prepare_for_reload(spa, orig_mode);
if (error != 0)
return (error);
spa_load_note(spa, "RELOADING");
return (spa_load(spa, state, SPA_IMPORT_EXISTING, B_TRUE));
}
@ -3252,7 +3353,7 @@ spa_load_impl(spa_t *spa, uint64_t pool_guid, nvlist_t *config,
* Load the metadata for all vdevs. Also check if unopenable devices
* should be autoreplaced.
*/
error = spa_ld_load_vdev_metadata(spa, state);
error = spa_ld_load_vdev_metadata(spa);
if (error != 0)
return (error);
@ -3285,7 +3386,7 @@ spa_load_impl(spa_t *spa, uint64_t pool_guid, nvlist_t *config,
* state. When performing an extreme rewind, we verify the whole pool,
* which can take a very long time.
*/
error = spa_ld_verify_pool_data(spa, state);
error = spa_ld_verify_pool_data(spa);
if (error != 0)
return (error);
@ -3342,7 +3443,7 @@ spa_load_impl(spa_t *spa, uint64_t pool_guid, nvlist_t *config,
* next sync, we would update the config stored in vdev labels
* and the cachefile (by default /etc/zfs/zpool.cache).
*/
spa_ld_check_for_config_update(spa, state, config_cache_txg);
spa_ld_check_for_config_update(spa, config_cache_txg);
/*
* Check all DTLs to see if anything needs resilvering.
@ -3379,6 +3480,8 @@ spa_load_impl(spa_t *spa, uint64_t pool_guid, nvlist_t *config,
spa_condense_indirect_restart(spa);
}
spa_load_note(spa, "LOADED");
return (0);
}
@ -3395,6 +3498,9 @@ spa_load_retry(spa_t *spa, spa_load_state_t state, int trust_config)
spa_activate(spa, mode);
spa_async_suspend(spa);
spa_load_note(spa, "spa_load_retry: rewind, max txg: %llu",
(u_longlong_t)spa->spa_load_max_txg);
return (spa_load(spa, state, SPA_IMPORT_EXISTING, trust_config));
}
@ -3549,6 +3655,7 @@ spa_open_common(const char *pool, spa_t **spapp, void *tag, nvlist_t *nvpolicy,
if (state != SPA_LOAD_RECOVER)
spa->spa_last_ubsync_txg = spa->spa_load_txg = 0;
zfs_dbgmsg("spa_open_common: opening %s", pool);
error = spa_load_best(spa, state, B_FALSE, policy.zrp_txg,
policy.zrp_request);
@ -4771,7 +4878,7 @@ spa_import(const char *pool, nvlist_t *config, nvlist_t *props, uint64_t flags)
spa_write_cachefile(spa, B_FALSE, B_TRUE);
spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_IMPORT);
zfs_dbgmsg("spa_import: verbatim import of %s", pool);
mutex_exit(&spa_namespace_lock);
return (0);
}
@ -4795,6 +4902,8 @@ spa_import(const char *pool, nvlist_t *config, nvlist_t *props, uint64_t flags)
if (state != SPA_LOAD_RECOVER)
spa->spa_last_ubsync_txg = spa->spa_load_txg = 0;
zfs_dbgmsg("spa_import: importing %s%s", pool,
(state == SPA_LOAD_RECOVER) ? " (RECOVERY MODE)" : "");
error = spa_load_best(spa, state, B_TRUE, policy.zrp_txg,
policy.zrp_request);
@ -4937,6 +5046,8 @@ spa_tryimport(nvlist_t *tryconfig)
spa = spa_add(TRYIMPORT_NAME, tryconfig, NULL);
spa_activate(spa, FREAD);
zfs_dbgmsg("spa_tryimport: importing %s", poolname);
/*
* Pass off the heavy lifting to spa_load().
* Pass TRUE for trust_config because the user-supplied config

View File

@ -431,6 +431,34 @@ SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, spa_min_slop, CTLFLAG_RWTUN,
&spa_min_slop, 0,
"Minimal value of reserved space");
/*PRINTFLIKE2*/
void
spa_load_failed(spa_t *spa, const char *fmt, ...)
{
va_list adx;
char buf[256];
va_start(adx, fmt);
(void) vsnprintf(buf, sizeof (buf), fmt, adx);
va_end(adx);
zfs_dbgmsg("spa_load(%s): FAILED: %s", spa->spa_name, buf);
}
/*PRINTFLIKE2*/
void
spa_load_note(spa_t *spa, const char *fmt, ...)
{
va_list adx;
char buf[256];
va_start(adx, fmt);
(void) vsnprintf(buf, sizeof (buf), fmt, adx);
va_end(adx);
zfs_dbgmsg("spa_load(%s): %s", spa->spa_name, buf);
}
/*
* ==========================================================================
* SPA config locking

View File

@ -827,6 +827,8 @@ extern objset_t *spa_meta_objset(spa_t *spa);
extern uint64_t spa_deadman_synctime(spa_t *spa);
/* Miscellaneous support routines */
extern void spa_load_failed(spa_t *spa, const char *fmt, ...);
extern void spa_load_note(spa_t *spa, const char *fmt, ...);
extern void spa_activate_mos_feature(spa_t *spa, const char *feature,
dmu_tx_t *tx);
extern void spa_deactivate_mos_feature(spa_t *spa, const char *feature);

View File

@ -21,7 +21,7 @@
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2015 by Delphix. All rights reserved.
* Copyright (c) 2011, 2016 by Delphix. All rights reserved.
*/
#ifndef _SYS_VDEV_H
@ -48,6 +48,7 @@ typedef enum vdev_dtl_type {
extern boolean_t zfs_nocacheflush;
extern boolean_t zfs_trim_enabled;
extern void vdev_dbgmsg(vdev_t *vd, const char *fmt, ...);
extern int vdev_open(vdev_t *);
extern void vdev_open_children(vdev_t *);
extern boolean_t vdev_uses_zvols(vdev_t *);

View File

@ -171,6 +171,28 @@ SYSCTL_INT(_vfs_zfs_vdev, OID_AUTO, metaslabs_per_vdev, CTLFLAG_RDTUN,
&metaslabs_per_vdev, 0,
"When a vdev is added, how many metaslabs the vdev should be divided into");
/*PRINTFLIKE2*/
void
vdev_dbgmsg(vdev_t *vd, const char *fmt, ...)
{
va_list adx;
char buf[256];
va_start(adx, fmt);
(void) vsnprintf(buf, sizeof (buf), fmt, adx);
va_end(adx);
if (vd->vdev_path != NULL) {
zfs_dbgmsg("%s vdev '%s': %s", vd->vdev_ops->vdev_op_type,
vd->vdev_path, buf);
} else {
zfs_dbgmsg("%s-%llu vdev (guid %llu): %s",
vd->vdev_ops->vdev_op_type,
(u_longlong_t)vd->vdev_id,
(u_longlong_t)vd->vdev_guid, buf);
}
}
/*
* Given a vdev type, return the appropriate ops vector.
*/
@ -1030,14 +1052,20 @@ vdev_metaslab_init(vdev_t *vd, uint64_t txg)
error = dmu_read(mos, vd->vdev_ms_array,
m * sizeof (uint64_t), sizeof (uint64_t), &object,
DMU_READ_PREFETCH);
if (error)
if (error != 0) {
vdev_dbgmsg(vd, "unable to read the metaslab "
"array [error=%d]", error);
return (error);
}
}
error = metaslab_init(vd->vdev_mg, m, object, txg,
&(vd->vdev_ms[m]));
if (error)
if (error != 0) {
vdev_dbgmsg(vd, "metaslab_init failed [error=%d]",
error);
return (error);
}
}
if (txg == 0)
@ -1119,8 +1147,7 @@ vdev_probe_done(zio_t *zio)
zio->io_error = 0;
} else {
ASSERT(zio->io_error != 0);
zfs_dbgmsg("failed probe on vdev %llu",
(longlong_t)vd->vdev_id);
vdev_dbgmsg(vd, "failed probe");
zfs_ereport_post(FM_EREPORT_ZFS_PROBE_FAILURE,
spa, vd, NULL, 0, 0);
zio->io_error = SET_ERROR(ENXIO);
@ -1576,6 +1603,7 @@ vdev_validate(vdev_t *vd, boolean_t strict)
if ((label = vdev_label_read_config(vd, txg)) == NULL) {
vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_BAD_LABEL);
vdev_dbgmsg(vd, "vdev_validate: failed reading config");
return (0);
}
@ -1588,6 +1616,8 @@ vdev_validate(vdev_t *vd, boolean_t strict)
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_SPLIT_POOL);
nvlist_free(label);
vdev_dbgmsg(vd, "vdev_validate: vdev split into other "
"pool");
return (0);
}
@ -1597,6 +1627,10 @@ vdev_validate(vdev_t *vd, boolean_t strict)
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_CORRUPT_DATA);
nvlist_free(label);
vdev_dbgmsg(vd, "vdev_validate: vdev label pool_guid "
"doesn't match config (%llu != %llu)",
(u_longlong_t)guid,
(u_longlong_t)spa_guid(spa));
return (0);
}
@ -1626,6 +1660,9 @@ vdev_validate(vdev_t *vd, boolean_t strict)
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_CORRUPT_DATA);
nvlist_free(label);
vdev_dbgmsg(vd, "vdev_validate: config guid doesn't "
"match label guid (%llu != %llu)",
(u_longlong_t)vd->vdev_guid, (u_longlong_t)guid);
return (0);
}
@ -1634,6 +1671,8 @@ vdev_validate(vdev_t *vd, boolean_t strict)
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_CORRUPT_DATA);
nvlist_free(label);
vdev_dbgmsg(vd, "vdev_validate: '%s' missing",
ZPOOL_CONFIG_POOL_STATE);
return (0);
}
@ -1645,8 +1684,12 @@ vdev_validate(vdev_t *vd, boolean_t strict)
*/
if (!(spa->spa_import_flags & ZFS_IMPORT_VERBATIM) &&
spa_load_state(spa) == SPA_LOAD_OPEN &&
state != POOL_STATE_ACTIVE)
state != POOL_STATE_ACTIVE) {
vdev_dbgmsg(vd, "vdev_validate: invalid pool state "
"(%llu) for spa %s", (u_longlong_t)state,
spa->spa_name);
return (SET_ERROR(EBADF));
}
/*
* If we were able to open and validate a vdev that was
@ -2294,9 +2337,10 @@ vdev_dtl_sync(vdev_t *vd, uint64_t txg)
* the top level so that we update the config.
*/
if (object != space_map_object(vd->vdev_dtl_sm)) {
zfs_dbgmsg("txg %llu, spa %s, DTL old object %llu, "
"new object %llu", txg, spa_name(spa), object,
space_map_object(vd->vdev_dtl_sm));
vdev_dbgmsg(vd, "txg %llu, spa %s, DTL old object %llu, "
"new object %llu", (u_longlong_t)txg, spa_name(spa),
(u_longlong_t)object,
(u_longlong_t)space_map_object(vd->vdev_dtl_sm));
vdev_config_dirty(vd->vdev_top);
}
@ -2404,8 +2448,13 @@ vdev_load(vdev_t *vd)
if (vd->vdev_ashift == 0 || vd->vdev_asize == 0) {
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_CORRUPT_DATA);
vdev_dbgmsg(vd, "vdev_load: invalid size. ashift=%llu, "
"asize=%llu", (u_longlong_t)vd->vdev_ashift,
(u_longlong_t)vd->vdev_asize);
return (SET_ERROR(ENXIO));
} else if ((error = vdev_metaslab_init(vd, 0)) != 0) {
vdev_dbgmsg(vd, "vdev_load: metaslab_init failed "
"[error=%d]", error);
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_CORRUPT_DATA);
return (error);
@ -2418,6 +2467,8 @@ vdev_load(vdev_t *vd)
if (vd->vdev_ops->vdev_op_leaf && (error = vdev_dtl_load(vd)) != 0) {
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_CORRUPT_DATA);
vdev_dbgmsg(vd, "vdev_load: vdev_dtl_load failed "
"[error=%d]", error);
return (error);
}
@ -2431,6 +2482,9 @@ vdev_load(vdev_t *vd)
obsolete_sm_object, 0, vd->vdev_asize, 0))) {
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_CORRUPT_DATA);
vdev_dbgmsg(vd, "vdev_load: space_map_open failed for "
"obsolete spacemap (obj %llu) [error=%d]",
(u_longlong_t)obsolete_sm_object, error);
return (error);
}
space_map_update(vd->vdev_obsolete_sm);
@ -3805,9 +3859,9 @@ vdev_deadman(vdev_t *vd)
fio = avl_first(&vq->vq_active_tree);
delta = gethrtime() - fio->io_timestamp;
if (delta > spa_deadman_synctime(spa)) {
zfs_dbgmsg("SLOW IO: zio timestamp %lluns, "
"delta %lluns, last io %lluns",
fio->io_timestamp, delta,
vdev_dbgmsg(vd, "SLOW IO: zio timestamp "
"%lluns, delta %lluns, last io %lluns",
fio->io_timestamp, (u_longlong_t)delta,
vq->vq_io_complete_ts);
fm_panic("I/O to pool '%s' appears to be "
"hung on vdev guid %llu at '%s'.",

View File

@ -20,7 +20,7 @@
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2015 by Delphix. All rights reserved.
* Copyright (c) 2012, 2016 by Delphix. All rights reserved.
* Copyright 2016 Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2013 Joyent, Inc. All rights reserved.
*/
@ -325,6 +325,8 @@ vdev_disk_open(vdev_t *vd, uint64_t *psize, uint64_t *max_psize,
if (ddi_devid_str_decode(vd->vdev_devid, &dvd->vd_devid,
&dvd->vd_minor) != 0) {
vd->vdev_stat.vs_aux = VDEV_AUX_BAD_LABEL;
vdev_dbgmsg(vd, "vdev_disk_open: invalid "
"vdev_devid '%s'", vd->vdev_devid);
return (SET_ERROR(EINVAL));
}
}
@ -417,6 +419,8 @@ vdev_disk_open(vdev_t *vd, uint64_t *psize, uint64_t *max_psize,
if (error) {
vd->vdev_stat.vs_aux = VDEV_AUX_OPEN_FAILED;
vdev_dbgmsg(vd, "vdev_disk_open: failed to open [error=%d]",
error);
return (error);
}
@ -430,8 +434,8 @@ vdev_disk_open(vdev_t *vd, uint64_t *psize, uint64_t *max_psize,
char *vd_devid;
vd_devid = ddi_devid_str_encode(devid, dvd->vd_minor);
zfs_dbgmsg("vdev %s: update devid from %s, "
"to %s", vd->vdev_path, vd->vdev_devid, vd_devid);
vdev_dbgmsg(vd, "vdev_disk_open: update devid from "
"'%s' to '%s'", vd->vdev_devid, vd_devid);
spa_strfree(vd->vdev_devid);
vd->vdev_devid = spa_strdup(vd_devid);
ddi_devid_str_free(vd_devid);
@ -491,6 +495,7 @@ vdev_disk_open(vdev_t *vd, uint64_t *psize, uint64_t *max_psize,
*/
if (ldi_get_size(dvd->vd_lh, psize) != 0) {
vd->vdev_stat.vs_aux = VDEV_AUX_OPEN_FAILED;
vdev_dbgmsg(vd, "vdev_disk_open: failed to get size");
return (SET_ERROR(EINVAL));
}

View File

@ -905,6 +905,8 @@ vdev_geom_open(vdev_t *vd, uint64_t *psize, uint64_t *max_psize,
PICKUP_GIANT();
if (cp == NULL) {
vd->vdev_stat.vs_aux = VDEV_AUX_OPEN_FAILED;
vdev_dbgmsg(vd, "vdev_geom_open: failed to open [error=%d]",
error);
return (error);
}
skip_open:

View File

@ -21,7 +21,7 @@
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2015 by Delphix. All rights reserved.
* Copyright (c) 2012, 2016 by Delphix. All rights reserved.
*/
/*
@ -1112,8 +1112,15 @@ vdev_uberblock_load(vdev_t *rvd, uberblock_t *ub, nvlist_t **config)
* Search all labels on this vdev to find the configuration that
* matches the txg for our uberblock.
*/
if (cb.ubl_vd != NULL)
if (cb.ubl_vd != NULL) {
vdev_dbgmsg(cb.ubl_vd, "best uberblock found for spa %s. "
"txg %llu", spa->spa_name, (u_longlong_t)ub->ub_txg);
*config = vdev_label_read_config(cb.ubl_vd, ub->ub_txg);
if (*config == NULL) {
vdev_dbgmsg(cb.ubl_vd, "failed to read label config");
}
}
spa_config_exit(spa, SCL_ALL, FTAG);
}