Correct reporting of missing leaf vdevs so that the GUID required to

perform pool actions is always displayed.

cddl/contrib/opensolaris/cmd/zpool/zpool_main.c:
	The "zpool status" command reports the "last seen at"
	device node path when the vdev name is being reported
	by GUID.  Augment this code to assume a GUID is reported
	when a device goes missing after initial boot in addition
	to the previous behavior of doing this for devices that
	aren't seen at boot.

cddl/contrib/opensolaris/lib/libzfs/common/libzfs_pool.c:
	In zpool_vdev_name(), report recently missing devices
	by GUID.  There is no guarantee they will return at
	their previous location.
This commit is contained in:
Justin T. Gibbs 2011-07-18 03:00:59 +00:00
parent 6c1942802d
commit d7a00114ea
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=224170
2 changed files with 18 additions and 8 deletions

View File

@ -1084,10 +1084,11 @@ print_status_config(zpool_handle_t *zhp, const char *name, nvlist_t *nv,
}
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT,
&notpresent) == 0) {
&notpresent) == 0 ||
vs->vs_state <= VDEV_STATE_CANT_OPEN) {
char *path;
verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0);
(void) printf(" was %s", path);
if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0)
(void) printf(" was %s", path);
} else if (vs->vs_aux != 0) {
(void) printf(" ");

View File

@ -3110,15 +3110,25 @@ zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv,
char buf[64];
vdev_stat_t *vs;
uint_t vsc;
int have_stats;
int have_path;
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT,
&value) == 0) {
have_stats = nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
(uint64_t **)&vs, &vsc) == 0;
have_path = nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0;
/*
* If the device is not currently present, assume it will not
* come back at the same device path. Display the device by GUID.
*/
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, &value) == 0 ||
have_path && have_stats && vs->vs_state <= VDEV_STATE_CANT_OPEN) {
verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
&value) == 0);
(void) snprintf(buf, sizeof (buf), "%llu",
(u_longlong_t)value);
path = buf;
} else if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
} else if (have_path) {
/*
* If the device is dead (faulted, offline, etc) then don't
@ -3126,8 +3136,7 @@ zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv,
* open a misbehaving device, which can have undesirable
* effects.
*/
if ((nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
(uint64_t **)&vs, &vsc) != 0 ||
if ((have_stats == 0 ||
vs->vs_state >= VDEV_STATE_DEGRADED) &&
zhp != NULL &&
nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &devid) == 0) {