freebsd-dev/lib/libzfs/libzfs_import.c

1625 lines
39 KiB
C
Raw Normal View History

2008-11-20 20:01:55 +00:00
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright 2011 Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2012 by Delphix. All rights reserved.
2008-11-20 20:01:55 +00:00
*/
/*
* Pool import support functions.
*
* To import a pool, we rely on reading the configuration information from the
* ZFS label of each device. If we successfully read the label, then we
* organize the configuration information in the following hierarchy:
*
* pool guid -> toplevel vdev guid -> label txg
*
* Duplicate entries matching this same tuple will be discarded. Once we have
* examined every device, we pick the best label txg config for each toplevel
* vdev. We then arrange these toplevel vdevs into a complete pool config, and
* update any paths that have changed. Finally, we attempt to import the pool
* using our derived config, and record the results.
*/
#include <ctype.h>
2008-11-20 20:01:55 +00:00
#include <devid.h>
#include <dirent.h>
#include <errno.h>
#include <libintl.h>
#include <stddef.h>
2008-11-20 20:01:55 +00:00
#include <stdlib.h>
#include <string.h>
#include <sys/stat.h>
#include <unistd.h>
#include <fcntl.h>
#include <sys/vtoc.h>
#include <sys/dktp/fdisk.h>
#include <sys/efi_partition.h>
2008-11-20 20:01:55 +00:00
#include <sys/vdev_impl.h>
#ifdef HAVE_LIBBLKID
#include <blkid/blkid.h>
#endif
2008-11-20 20:01:55 +00:00
#include "libzfs.h"
#include "libzfs_impl.h"
/*
* Intermediate structures used to gather configuration information.
*/
typedef struct config_entry {
uint64_t ce_txg;
nvlist_t *ce_config;
struct config_entry *ce_next;
} config_entry_t;
typedef struct vdev_entry {
uint64_t ve_guid;
config_entry_t *ve_configs;
struct vdev_entry *ve_next;
} vdev_entry_t;
typedef struct pool_entry {
uint64_t pe_guid;
vdev_entry_t *pe_vdevs;
struct pool_entry *pe_next;
} pool_entry_t;
typedef struct name_entry {
char *ne_name;
uint64_t ne_guid;
Improve `zpool import` search behavior The goal of this change is to make 'zpool import' prefer to use the peristent /dev/mapper or /dev/disk/by-* paths. These are far preferable to the devices in /dev/ whos names are not persistent and are determined by the order in which a device is detected. This patch improves things by changing the default search path from just to the top level /dev/ directory to (in order): /dev/disk/by-vdev - Custom rules, use first if they exist /dev/disk/zpool - Custom rules, use first if they exist /dev/mapper - Use multipath devices before components /dev/disk/by-uuid - Single unique entry and persistent /dev/disk/by-id - May be multiple entries and persistent /dev/disk/by-path - Encodes physical location and persistent /dev/disk/by-label - Custom persistent labels /dev - UNSAFE device names will change The default search path can be overriden by setting the ZPOOL_IMPORT_PATH environment variable. This must be a colon delimited list of paths which are searched for vdevs. If the 'zpool import -d' option is specified only those listed paths will be searched. Finally, when multiple paths to the same device are found. If one of the paths is an exact match for the path used last time to import the pool it will be used. When there are no exact matches the prefered path will be determined by the provided search order. This means you can still import a pool and force specific names by providing the -d <path> option. And the prefered names will persist as long as those paths exist on your system. Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov> Closes #965
2012-09-15 20:25:21 +00:00
uint64_t ne_order;
2008-11-20 20:01:55 +00:00
struct name_entry *ne_next;
} name_entry_t;
typedef struct pool_list {
pool_entry_t *pools;
name_entry_t *names;
} pool_list_t;
static char *
get_devid(const char *path)
{
int fd;
ddi_devid_t devid;
char *minor, *ret;
if ((fd = open(path, O_RDONLY)) < 0)
return (NULL);
minor = NULL;
ret = NULL;
if (devid_get(fd, &devid) == 0) {
if (devid_get_minor_name(fd, &minor) == 0)
ret = devid_str_encode(devid, minor);
if (minor != NULL)
devid_str_free(minor);
devid_free(devid);
}
(void) close(fd);
return (ret);
}
/*
* Go through and fix up any path and/or devid information for the given vdev
* configuration.
*/
static int
fix_paths(nvlist_t *nv, name_entry_t *names)
{
nvlist_t **child;
uint_t c, children;
uint64_t guid;
name_entry_t *ne, *best;
char *path, *devid;
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
&child, &children) == 0) {
for (c = 0; c < children; c++)
if (fix_paths(child[c], names) != 0)
return (-1);
return (0);
}
/*
* This is a leaf (file or disk) vdev. In either case, go through
* the name list and see if we find a matching guid. If so, replace
* the path and see if we can calculate a new devid.
*
* There may be multiple names associated with a particular guid, in
Improve `zpool import` search behavior The goal of this change is to make 'zpool import' prefer to use the peristent /dev/mapper or /dev/disk/by-* paths. These are far preferable to the devices in /dev/ whos names are not persistent and are determined by the order in which a device is detected. This patch improves things by changing the default search path from just to the top level /dev/ directory to (in order): /dev/disk/by-vdev - Custom rules, use first if they exist /dev/disk/zpool - Custom rules, use first if they exist /dev/mapper - Use multipath devices before components /dev/disk/by-uuid - Single unique entry and persistent /dev/disk/by-id - May be multiple entries and persistent /dev/disk/by-path - Encodes physical location and persistent /dev/disk/by-label - Custom persistent labels /dev - UNSAFE device names will change The default search path can be overriden by setting the ZPOOL_IMPORT_PATH environment variable. This must be a colon delimited list of paths which are searched for vdevs. If the 'zpool import -d' option is specified only those listed paths will be searched. Finally, when multiple paths to the same device are found. If one of the paths is an exact match for the path used last time to import the pool it will be used. When there are no exact matches the prefered path will be determined by the provided search order. This means you can still import a pool and force specific names by providing the -d <path> option. And the prefered names will persist as long as those paths exist on your system. Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov> Closes #965
2012-09-15 20:25:21 +00:00
* which case we have overlapping partitions or multiple paths to the
* same disk. In this case we prefer to use the path name which
* matches the ZPOOL_CONFIG_PATH. If no matching entry is found we
* use the lowest order device which corresponds to the first match
* while traversing the ZPOOL_IMPORT_PATH search path.
2008-11-20 20:01:55 +00:00
*/
verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) == 0);
if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) != 0)
path = NULL;
best = NULL;
for (ne = names; ne != NULL; ne = ne->ne_next) {
if (ne->ne_guid == guid) {
if (path == NULL) {
best = ne;
break;
}
Improve `zpool import` search behavior The goal of this change is to make 'zpool import' prefer to use the peristent /dev/mapper or /dev/disk/by-* paths. These are far preferable to the devices in /dev/ whos names are not persistent and are determined by the order in which a device is detected. This patch improves things by changing the default search path from just to the top level /dev/ directory to (in order): /dev/disk/by-vdev - Custom rules, use first if they exist /dev/disk/zpool - Custom rules, use first if they exist /dev/mapper - Use multipath devices before components /dev/disk/by-uuid - Single unique entry and persistent /dev/disk/by-id - May be multiple entries and persistent /dev/disk/by-path - Encodes physical location and persistent /dev/disk/by-label - Custom persistent labels /dev - UNSAFE device names will change The default search path can be overriden by setting the ZPOOL_IMPORT_PATH environment variable. This must be a colon delimited list of paths which are searched for vdevs. If the 'zpool import -d' option is specified only those listed paths will be searched. Finally, when multiple paths to the same device are found. If one of the paths is an exact match for the path used last time to import the pool it will be used. When there are no exact matches the prefered path will be determined by the provided search order. This means you can still import a pool and force specific names by providing the -d <path> option. And the prefered names will persist as long as those paths exist on your system. Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov> Closes #965
2012-09-15 20:25:21 +00:00
if ((strlen(path) == strlen(ne->ne_name)) &&
!strncmp(path, ne->ne_name, strlen(path))) {
2008-11-20 20:01:55 +00:00
best = ne;
Improve `zpool import` search behavior The goal of this change is to make 'zpool import' prefer to use the peristent /dev/mapper or /dev/disk/by-* paths. These are far preferable to the devices in /dev/ whos names are not persistent and are determined by the order in which a device is detected. This patch improves things by changing the default search path from just to the top level /dev/ directory to (in order): /dev/disk/by-vdev - Custom rules, use first if they exist /dev/disk/zpool - Custom rules, use first if they exist /dev/mapper - Use multipath devices before components /dev/disk/by-uuid - Single unique entry and persistent /dev/disk/by-id - May be multiple entries and persistent /dev/disk/by-path - Encodes physical location and persistent /dev/disk/by-label - Custom persistent labels /dev - UNSAFE device names will change The default search path can be overriden by setting the ZPOOL_IMPORT_PATH environment variable. This must be a colon delimited list of paths which are searched for vdevs. If the 'zpool import -d' option is specified only those listed paths will be searched. Finally, when multiple paths to the same device are found. If one of the paths is an exact match for the path used last time to import the pool it will be used. When there are no exact matches the prefered path will be determined by the provided search order. This means you can still import a pool and force specific names by providing the -d <path> option. And the prefered names will persist as long as those paths exist on your system. Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov> Closes #965
2012-09-15 20:25:21 +00:00
break;
2008-11-20 20:01:55 +00:00
}
Improve `zpool import` search behavior The goal of this change is to make 'zpool import' prefer to use the peristent /dev/mapper or /dev/disk/by-* paths. These are far preferable to the devices in /dev/ whos names are not persistent and are determined by the order in which a device is detected. This patch improves things by changing the default search path from just to the top level /dev/ directory to (in order): /dev/disk/by-vdev - Custom rules, use first if they exist /dev/disk/zpool - Custom rules, use first if they exist /dev/mapper - Use multipath devices before components /dev/disk/by-uuid - Single unique entry and persistent /dev/disk/by-id - May be multiple entries and persistent /dev/disk/by-path - Encodes physical location and persistent /dev/disk/by-label - Custom persistent labels /dev - UNSAFE device names will change The default search path can be overriden by setting the ZPOOL_IMPORT_PATH environment variable. This must be a colon delimited list of paths which are searched for vdevs. If the 'zpool import -d' option is specified only those listed paths will be searched. Finally, when multiple paths to the same device are found. If one of the paths is an exact match for the path used last time to import the pool it will be used. When there are no exact matches the prefered path will be determined by the provided search order. This means you can still import a pool and force specific names by providing the -d <path> option. And the prefered names will persist as long as those paths exist on your system. Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov> Closes #965
2012-09-15 20:25:21 +00:00
if (best == NULL || ne->ne_order < best->ne_order)
Improve `zpool import` search behavior The goal of this change is to make 'zpool import' prefer to use the peristent /dev/mapper or /dev/disk/by-* paths. These are far preferable to the devices in /dev/ whos names are not persistent and are determined by the order in which a device is detected. This patch improves things by changing the default search path from just to the top level /dev/ directory to (in order): /dev/disk/by-vdev - Custom rules, use first if they exist /dev/disk/zpool - Custom rules, use first if they exist /dev/mapper - Use multipath devices before components /dev/disk/by-uuid - Single unique entry and persistent /dev/disk/by-id - May be multiple entries and persistent /dev/disk/by-path - Encodes physical location and persistent /dev/disk/by-label - Custom persistent labels /dev - UNSAFE device names will change The default search path can be overriden by setting the ZPOOL_IMPORT_PATH environment variable. This must be a colon delimited list of paths which are searched for vdevs. If the 'zpool import -d' option is specified only those listed paths will be searched. Finally, when multiple paths to the same device are found. If one of the paths is an exact match for the path used last time to import the pool it will be used. When there are no exact matches the prefered path will be determined by the provided search order. This means you can still import a pool and force specific names by providing the -d <path> option. And the prefered names will persist as long as those paths exist on your system. Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov> Closes #965
2012-09-15 20:25:21 +00:00
best = ne;
2008-11-20 20:01:55 +00:00
}
}
if (best == NULL)
return (0);
if (nvlist_add_string(nv, ZPOOL_CONFIG_PATH, best->ne_name) != 0)
return (-1);
if ((devid = get_devid(best->ne_name)) == NULL) {
(void) nvlist_remove_all(nv, ZPOOL_CONFIG_DEVID);
} else {
if (nvlist_add_string(nv, ZPOOL_CONFIG_DEVID, devid) != 0)
return (-1);
devid_str_free(devid);
}
return (0);
}
/*
* Add the given configuration to the list of known devices.
*/
static int
add_config(libzfs_handle_t *hdl, pool_list_t *pl, const char *path,
Improve `zpool import` search behavior The goal of this change is to make 'zpool import' prefer to use the peristent /dev/mapper or /dev/disk/by-* paths. These are far preferable to the devices in /dev/ whos names are not persistent and are determined by the order in which a device is detected. This patch improves things by changing the default search path from just to the top level /dev/ directory to (in order): /dev/disk/by-vdev - Custom rules, use first if they exist /dev/disk/zpool - Custom rules, use first if they exist /dev/mapper - Use multipath devices before components /dev/disk/by-uuid - Single unique entry and persistent /dev/disk/by-id - May be multiple entries and persistent /dev/disk/by-path - Encodes physical location and persistent /dev/disk/by-label - Custom persistent labels /dev - UNSAFE device names will change The default search path can be overriden by setting the ZPOOL_IMPORT_PATH environment variable. This must be a colon delimited list of paths which are searched for vdevs. If the 'zpool import -d' option is specified only those listed paths will be searched. Finally, when multiple paths to the same device are found. If one of the paths is an exact match for the path used last time to import the pool it will be used. When there are no exact matches the prefered path will be determined by the provided search order. This means you can still import a pool and force specific names by providing the -d <path> option. And the prefered names will persist as long as those paths exist on your system. Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov> Closes #965
2012-09-15 20:25:21 +00:00
int order, nvlist_t *config)
2008-11-20 20:01:55 +00:00
{
uint64_t pool_guid, vdev_guid, top_guid, txg, state;
pool_entry_t *pe;
vdev_entry_t *ve;
config_entry_t *ce;
name_entry_t *ne;
/*
* If this is a hot spare not currently in use or level 2 cache
* device, add it to the list of names to translate, but don't do
* anything else.
*/
if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
&state) == 0 &&
(state == POOL_STATE_SPARE || state == POOL_STATE_L2CACHE) &&
nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID, &vdev_guid) == 0) {
if ((ne = zfs_alloc(hdl, sizeof (name_entry_t))) == NULL)
return (-1);
if ((ne->ne_name = zfs_strdup(hdl, path)) == NULL) {
free(ne);
return (-1);
}
ne->ne_guid = vdev_guid;
Improve `zpool import` search behavior The goal of this change is to make 'zpool import' prefer to use the peristent /dev/mapper or /dev/disk/by-* paths. These are far preferable to the devices in /dev/ whos names are not persistent and are determined by the order in which a device is detected. This patch improves things by changing the default search path from just to the top level /dev/ directory to (in order): /dev/disk/by-vdev - Custom rules, use first if they exist /dev/disk/zpool - Custom rules, use first if they exist /dev/mapper - Use multipath devices before components /dev/disk/by-uuid - Single unique entry and persistent /dev/disk/by-id - May be multiple entries and persistent /dev/disk/by-path - Encodes physical location and persistent /dev/disk/by-label - Custom persistent labels /dev - UNSAFE device names will change The default search path can be overriden by setting the ZPOOL_IMPORT_PATH environment variable. This must be a colon delimited list of paths which are searched for vdevs. If the 'zpool import -d' option is specified only those listed paths will be searched. Finally, when multiple paths to the same device are found. If one of the paths is an exact match for the path used last time to import the pool it will be used. When there are no exact matches the prefered path will be determined by the provided search order. This means you can still import a pool and force specific names by providing the -d <path> option. And the prefered names will persist as long as those paths exist on your system. Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov> Closes #965
2012-09-15 20:25:21 +00:00
ne->ne_order = order;
2008-11-20 20:01:55 +00:00
ne->ne_next = pl->names;
pl->names = ne;
return (0);
}
/*
* If we have a valid config but cannot read any of these fields, then
* it means we have a half-initialized label. In vdev_label_init()
* we write a label with txg == 0 so that we can identify the device
* in case the user refers to the same disk later on. If we fail to
* create the pool, we'll be left with a label in this state
* which should not be considered part of a valid pool.
*/
if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
&pool_guid) != 0 ||
nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID,
&vdev_guid) != 0 ||
nvlist_lookup_uint64(config, ZPOOL_CONFIG_TOP_GUID,
&top_guid) != 0 ||
nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG,
&txg) != 0 || txg == 0) {
nvlist_free(config);
return (0);
}
/*
* First, see if we know about this pool. If not, then add it to the
* list of known pools.
*/
for (pe = pl->pools; pe != NULL; pe = pe->pe_next) {
if (pe->pe_guid == pool_guid)
break;
}
if (pe == NULL) {
if ((pe = zfs_alloc(hdl, sizeof (pool_entry_t))) == NULL) {
nvlist_free(config);
return (-1);
}
pe->pe_guid = pool_guid;
pe->pe_next = pl->pools;
pl->pools = pe;
}
/*
* Second, see if we know about this toplevel vdev. Add it if its
* missing.
*/
for (ve = pe->pe_vdevs; ve != NULL; ve = ve->ve_next) {
if (ve->ve_guid == top_guid)
break;
}
if (ve == NULL) {
if ((ve = zfs_alloc(hdl, sizeof (vdev_entry_t))) == NULL) {
nvlist_free(config);
return (-1);
}
ve->ve_guid = top_guid;
ve->ve_next = pe->pe_vdevs;
pe->pe_vdevs = ve;
}
/*
* Third, see if we have a config with a matching transaction group. If
* so, then we do nothing. Otherwise, add it to the list of known
* configs.
*/
for (ce = ve->ve_configs; ce != NULL; ce = ce->ce_next) {
if (ce->ce_txg == txg)
break;
}
if (ce == NULL) {
if ((ce = zfs_alloc(hdl, sizeof (config_entry_t))) == NULL) {
nvlist_free(config);
return (-1);
}
ce->ce_txg = txg;
ce->ce_config = config;
ce->ce_next = ve->ve_configs;
ve->ve_configs = ce;
} else {
nvlist_free(config);
}
/*
* At this point we've successfully added our config to the list of
* known configs. The last thing to do is add the vdev guid -> path
* mappings so that we can fix up the configuration as necessary before
* doing the import.
*/
if ((ne = zfs_alloc(hdl, sizeof (name_entry_t))) == NULL)
return (-1);
if ((ne->ne_name = zfs_strdup(hdl, path)) == NULL) {
free(ne);
return (-1);
}
ne->ne_guid = vdev_guid;
Improve `zpool import` search behavior The goal of this change is to make 'zpool import' prefer to use the peristent /dev/mapper or /dev/disk/by-* paths. These are far preferable to the devices in /dev/ whos names are not persistent and are determined by the order in which a device is detected. This patch improves things by changing the default search path from just to the top level /dev/ directory to (in order): /dev/disk/by-vdev - Custom rules, use first if they exist /dev/disk/zpool - Custom rules, use first if they exist /dev/mapper - Use multipath devices before components /dev/disk/by-uuid - Single unique entry and persistent /dev/disk/by-id - May be multiple entries and persistent /dev/disk/by-path - Encodes physical location and persistent /dev/disk/by-label - Custom persistent labels /dev - UNSAFE device names will change The default search path can be overriden by setting the ZPOOL_IMPORT_PATH environment variable. This must be a colon delimited list of paths which are searched for vdevs. If the 'zpool import -d' option is specified only those listed paths will be searched. Finally, when multiple paths to the same device are found. If one of the paths is an exact match for the path used last time to import the pool it will be used. When there are no exact matches the prefered path will be determined by the provided search order. This means you can still import a pool and force specific names by providing the -d <path> option. And the prefered names will persist as long as those paths exist on your system. Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov> Closes #965
2012-09-15 20:25:21 +00:00
ne->ne_order = order;
2008-11-20 20:01:55 +00:00
ne->ne_next = pl->names;
pl->names = ne;
return (0);
}
/*
* Returns true if the named pool matches the given GUID.
*/
static int
pool_active(libzfs_handle_t *hdl, const char *name, uint64_t guid,
boolean_t *isactive)
{
zpool_handle_t *zhp;
uint64_t theguid;
if (zpool_open_silent(hdl, name, &zhp) != 0)
return (-1);
if (zhp == NULL) {
*isactive = B_FALSE;
return (0);
}
verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_POOL_GUID,
&theguid) == 0);
zpool_close(zhp);
*isactive = (theguid == guid);
return (0);
}
static nvlist_t *
refresh_config(libzfs_handle_t *hdl, nvlist_t *config)
{
nvlist_t *nvl;
zfs_cmd_t zc = {"\0"};
2008-11-20 20:01:55 +00:00
int err;
if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0)
return (NULL);
if (zcmd_alloc_dst_nvlist(hdl, &zc,
zc.zc_nvlist_conf_size * 2) != 0) {
zcmd_free_nvlists(&zc);
return (NULL);
}
while ((err = ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_TRYIMPORT,
&zc)) != 0 && errno == ENOMEM) {
if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
zcmd_free_nvlists(&zc);
return (NULL);
}
}
if (err) {
zcmd_free_nvlists(&zc);
return (NULL);
}
if (zcmd_read_dst_nvlist(hdl, &zc, &nvl) != 0) {
zcmd_free_nvlists(&zc);
return (NULL);
}
zcmd_free_nvlists(&zc);
return (nvl);
}
/*
* Determine if the vdev id is a hole in the namespace.
*/
boolean_t
vdev_is_hole(uint64_t *hole_array, uint_t holes, uint_t id)
{
int c;
for (c = 0; c < holes; c++) {
/* Top-level is a hole */
if (hole_array[c] == id)
return (B_TRUE);
}
return (B_FALSE);
}
2008-11-20 20:01:55 +00:00
/*
* Convert our list of pools into the definitive set of configurations. We
* start by picking the best config for each toplevel vdev. Once that's done,
* we assemble the toplevel vdevs into a full config for the pool. We make a
* pass to fix up any incorrect paths, and then add it to the main list to
* return to the user.
*/
static nvlist_t *
get_configs(libzfs_handle_t *hdl, pool_list_t *pl, boolean_t active_ok)
{
pool_entry_t *pe;
vdev_entry_t *ve;
config_entry_t *ce;
nvlist_t *ret = NULL, *config = NULL, *tmp = NULL, *nvtop, *nvroot;
2008-11-20 20:01:55 +00:00
nvlist_t **spares, **l2cache;
uint_t i, nspares, nl2cache;
boolean_t config_seen;
uint64_t best_txg;
char *name, *hostname = NULL;
uint64_t guid;
2008-11-20 20:01:55 +00:00
uint_t children = 0;
nvlist_t **child = NULL;
uint_t holes;
uint64_t *hole_array, max_id;
2008-11-20 20:01:55 +00:00
uint_t c;
boolean_t isactive;
uint64_t hostid;
nvlist_t *nvl;
boolean_t found_one = B_FALSE;
boolean_t valid_top_config = B_FALSE;
2008-11-20 20:01:55 +00:00
if (nvlist_alloc(&ret, 0, 0) != 0)
goto nomem;
for (pe = pl->pools; pe != NULL; pe = pe->pe_next) {
uint64_t id, max_txg = 0;
2008-11-20 20:01:55 +00:00
if (nvlist_alloc(&config, NV_UNIQUE_NAME, 0) != 0)
goto nomem;
config_seen = B_FALSE;
/*
* Iterate over all toplevel vdevs. Grab the pool configuration
* from the first one we find, and then go through the rest and
* add them as necessary to the 'vdevs' member of the config.
*/
for (ve = pe->pe_vdevs; ve != NULL; ve = ve->ve_next) {
/*
* Determine the best configuration for this vdev by
* selecting the config with the latest transaction
* group.
*/
best_txg = 0;
for (ce = ve->ve_configs; ce != NULL;
ce = ce->ce_next) {
if (ce->ce_txg > best_txg) {
tmp = ce->ce_config;
best_txg = ce->ce_txg;
}
}
/*
* We rely on the fact that the max txg for the
* pool will contain the most up-to-date information
* about the valid top-levels in the vdev namespace.
*/
if (best_txg > max_txg) {
(void) nvlist_remove(config,
ZPOOL_CONFIG_VDEV_CHILDREN,
DATA_TYPE_UINT64);
(void) nvlist_remove(config,
ZPOOL_CONFIG_HOLE_ARRAY,
DATA_TYPE_UINT64_ARRAY);
max_txg = best_txg;
hole_array = NULL;
holes = 0;
max_id = 0;
valid_top_config = B_FALSE;
if (nvlist_lookup_uint64(tmp,
ZPOOL_CONFIG_VDEV_CHILDREN, &max_id) == 0) {
verify(nvlist_add_uint64(config,
ZPOOL_CONFIG_VDEV_CHILDREN,
max_id) == 0);
valid_top_config = B_TRUE;
}
if (nvlist_lookup_uint64_array(tmp,
ZPOOL_CONFIG_HOLE_ARRAY, &hole_array,
&holes) == 0) {
verify(nvlist_add_uint64_array(config,
ZPOOL_CONFIG_HOLE_ARRAY,
hole_array, holes) == 0);
}
}
2008-11-20 20:01:55 +00:00
if (!config_seen) {
/*
* Copy the relevant pieces of data to the pool
* configuration:
*
* version
* pool guid
* name
* comment (if available)
* pool state
2008-11-20 20:01:55 +00:00
* hostid (if available)
* hostname (if available)
*/
uint64_t state, version;
char *comment = NULL;
version = fnvlist_lookup_uint64(tmp,
ZPOOL_CONFIG_VERSION);
fnvlist_add_uint64(config,
ZPOOL_CONFIG_VERSION, version);
guid = fnvlist_lookup_uint64(tmp,
ZPOOL_CONFIG_POOL_GUID);
fnvlist_add_uint64(config,
ZPOOL_CONFIG_POOL_GUID, guid);
name = fnvlist_lookup_string(tmp,
ZPOOL_CONFIG_POOL_NAME);
fnvlist_add_string(config,
ZPOOL_CONFIG_POOL_NAME, name);
2008-11-20 20:01:55 +00:00
if (nvlist_lookup_string(tmp,
ZPOOL_CONFIG_COMMENT, &comment) == 0)
fnvlist_add_string(config,
ZPOOL_CONFIG_COMMENT, comment);
state = fnvlist_lookup_uint64(tmp,
ZPOOL_CONFIG_POOL_STATE);
fnvlist_add_uint64(config,
ZPOOL_CONFIG_POOL_STATE, state);
2008-11-20 20:01:55 +00:00
hostid = 0;
if (nvlist_lookup_uint64(tmp,
ZPOOL_CONFIG_HOSTID, &hostid) == 0) {
fnvlist_add_uint64(config,
ZPOOL_CONFIG_HOSTID, hostid);
hostname = fnvlist_lookup_string(tmp,
ZPOOL_CONFIG_HOSTNAME);
fnvlist_add_string(config,
ZPOOL_CONFIG_HOSTNAME, hostname);
2008-11-20 20:01:55 +00:00
}
config_seen = B_TRUE;
}
/*
* Add this top-level vdev to the child array.
*/
verify(nvlist_lookup_nvlist(tmp,
ZPOOL_CONFIG_VDEV_TREE, &nvtop) == 0);
verify(nvlist_lookup_uint64(nvtop, ZPOOL_CONFIG_ID,
&id) == 0);
2008-11-20 20:01:55 +00:00
if (id >= children) {
nvlist_t **newchild;
newchild = zfs_alloc(hdl, (id + 1) *
sizeof (nvlist_t *));
if (newchild == NULL)
goto nomem;
for (c = 0; c < children; c++)
newchild[c] = child[c];
free(child);
child = newchild;
children = id + 1;
}
if (nvlist_dup(nvtop, &child[id], 0) != 0)
goto nomem;
}
/*
* If we have information about all the top-levels then
* clean up the nvlist which we've constructed. This
* means removing any extraneous devices that are
* beyond the valid range or adding devices to the end
* of our array which appear to be missing.
*/
if (valid_top_config) {
if (max_id < children) {
for (c = max_id; c < children; c++)
nvlist_free(child[c]);
children = max_id;
} else if (max_id > children) {
nvlist_t **newchild;
newchild = zfs_alloc(hdl, (max_id) *
sizeof (nvlist_t *));
if (newchild == NULL)
goto nomem;
for (c = 0; c < children; c++)
newchild[c] = child[c];
free(child);
child = newchild;
children = max_id;
}
}
2008-11-20 20:01:55 +00:00
verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
&guid) == 0);
/*
* The vdev namespace may contain holes as a result of
* device removal. We must add them back into the vdev
* tree before we process any missing devices.
*/
if (holes > 0) {
ASSERT(valid_top_config);
for (c = 0; c < children; c++) {
nvlist_t *holey;
if (child[c] != NULL ||
!vdev_is_hole(hole_array, holes, c))
continue;
if (nvlist_alloc(&holey, NV_UNIQUE_NAME,
0) != 0)
goto nomem;
/*
* Holes in the namespace are treated as
* "hole" top-level vdevs and have a
* special flag set on them.
*/
if (nvlist_add_string(holey,
ZPOOL_CONFIG_TYPE,
VDEV_TYPE_HOLE) != 0 ||
nvlist_add_uint64(holey,
ZPOOL_CONFIG_ID, c) != 0 ||
nvlist_add_uint64(holey,
ZPOOL_CONFIG_GUID, 0ULL) != 0)
goto nomem;
child[c] = holey;
}
}
2008-11-20 20:01:55 +00:00
/*
* Look for any missing top-level vdevs. If this is the case,
* create a faked up 'missing' vdev as a placeholder. We cannot
* simply compress the child array, because the kernel performs
* certain checks to make sure the vdev IDs match their location
* in the configuration.
*/
for (c = 0; c < children; c++) {
2008-11-20 20:01:55 +00:00
if (child[c] == NULL) {
nvlist_t *missing;
if (nvlist_alloc(&missing, NV_UNIQUE_NAME,
0) != 0)
goto nomem;
if (nvlist_add_string(missing,
ZPOOL_CONFIG_TYPE,
VDEV_TYPE_MISSING) != 0 ||
nvlist_add_uint64(missing,
ZPOOL_CONFIG_ID, c) != 0 ||
nvlist_add_uint64(missing,
ZPOOL_CONFIG_GUID, 0ULL) != 0) {
nvlist_free(missing);
goto nomem;
}
child[c] = missing;
}
}
2008-11-20 20:01:55 +00:00
/*
* Put all of this pool's top-level vdevs into a root vdev.
*/
if (nvlist_alloc(&nvroot, NV_UNIQUE_NAME, 0) != 0)
goto nomem;
if (nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE,
VDEV_TYPE_ROOT) != 0 ||
nvlist_add_uint64(nvroot, ZPOOL_CONFIG_ID, 0ULL) != 0 ||
nvlist_add_uint64(nvroot, ZPOOL_CONFIG_GUID, guid) != 0 ||
nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
child, children) != 0) {
nvlist_free(nvroot);
goto nomem;
}
for (c = 0; c < children; c++)
nvlist_free(child[c]);
free(child);
children = 0;
child = NULL;
/*
* Go through and fix up any paths and/or devids based on our
* known list of vdev GUID -> path mappings.
*/
if (fix_paths(nvroot, pl->names) != 0) {
nvlist_free(nvroot);
goto nomem;
}
/*
* Add the root vdev to this pool's configuration.
*/
if (nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
nvroot) != 0) {
nvlist_free(nvroot);
goto nomem;
}
nvlist_free(nvroot);
/*
* zdb uses this path to report on active pools that were
* imported or created using -R.
*/
if (active_ok)
goto add_pool;
/*
* Determine if this pool is currently active, in which case we
* can't actually import it.
*/
verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
&name) == 0);
verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
&guid) == 0);
if (pool_active(hdl, name, guid, &isactive) != 0)
goto error;
if (isactive) {
nvlist_free(config);
config = NULL;
continue;
}
if ((nvl = refresh_config(hdl, config)) == NULL) {
nvlist_free(config);
config = NULL;
continue;
}
2008-11-20 20:01:55 +00:00
nvlist_free(config);
config = nvl;
/*
* Go through and update the paths for spares, now that we have
* them.
*/
verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
&nvroot) == 0);
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
&spares, &nspares) == 0) {
for (i = 0; i < nspares; i++) {
if (fix_paths(spares[i], pl->names) != 0)
goto nomem;
}
}
/*
* Update the paths for l2cache devices.
*/
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
&l2cache, &nl2cache) == 0) {
for (i = 0; i < nl2cache; i++) {
if (fix_paths(l2cache[i], pl->names) != 0)
goto nomem;
}
}
/*
* Restore the original information read from the actual label.
*/
(void) nvlist_remove(config, ZPOOL_CONFIG_HOSTID,
DATA_TYPE_UINT64);
(void) nvlist_remove(config, ZPOOL_CONFIG_HOSTNAME,
DATA_TYPE_STRING);
if (hostid != 0) {
verify(nvlist_add_uint64(config, ZPOOL_CONFIG_HOSTID,
hostid) == 0);
verify(nvlist_add_string(config, ZPOOL_CONFIG_HOSTNAME,
hostname) == 0);
}
add_pool:
/*
* Add this pool to the list of configs.
*/
verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
&name) == 0);
if (nvlist_add_nvlist(ret, name, config) != 0)
goto nomem;
found_one = B_TRUE;
2008-11-20 20:01:55 +00:00
nvlist_free(config);
config = NULL;
}
if (!found_one) {
nvlist_free(ret);
ret = NULL;
}
2008-11-20 20:01:55 +00:00
return (ret);
nomem:
(void) no_memory(hdl);
error:
nvlist_free(config);
nvlist_free(ret);
for (c = 0; c < children; c++)
nvlist_free(child[c]);
free(child);
return (NULL);
}
/*
* Return the offset of the given label.
*/
static uint64_t
label_offset(uint64_t size, int l)
{
ASSERT(P2PHASE_TYPED(size, sizeof (vdev_label_t), uint64_t) == 0);
return (l * sizeof (vdev_label_t) + (l < VDEV_LABELS / 2 ?
0 : size - VDEV_LABELS * sizeof (vdev_label_t)));
}
/*
* Given a file descriptor, read the label information and return an nvlist
* describing the configuration, if there is one.
*/
int
zpool_read_label(int fd, nvlist_t **config)
{
struct stat64 statbuf;
int l;
vdev_label_t *label;
uint64_t state, txg, size;
*config = NULL;
if (fstat64_blk(fd, &statbuf) == -1)
2008-11-20 20:01:55 +00:00
return (0);
size = P2ALIGN_TYPED(statbuf.st_size, sizeof (vdev_label_t), uint64_t);
if ((label = malloc(sizeof (vdev_label_t))) == NULL)
return (-1);
for (l = 0; l < VDEV_LABELS; l++) {
if (pread64(fd, label, sizeof (vdev_label_t),
2008-11-20 20:01:55 +00:00
label_offset(size, l)) != sizeof (vdev_label_t))
continue;
if (nvlist_unpack(label->vl_vdev_phys.vp_nvlist,
sizeof (label->vl_vdev_phys.vp_nvlist), config, 0) != 0)
continue;
if (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_STATE,
&state) != 0 || state > POOL_STATE_L2CACHE) {
nvlist_free(*config);
continue;
}
if (state != POOL_STATE_SPARE && state != POOL_STATE_L2CACHE &&
(nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_TXG,
&txg) != 0 || txg == 0)) {
nvlist_free(*config);
continue;
}
free(label);
return (0);
}
free(label);
*config = NULL;
return (0);
}
/*
* Given a file descriptor, clear (zero) the label information. This function
* is used in the appliance stack as part of the ZFS sysevent module and
* to implement the "zpool labelclear" command.
*/
int
zpool_clear_label(int fd)
{
struct stat64 statbuf;
int l;
vdev_label_t *label;
uint64_t size;
if (fstat64_blk(fd, &statbuf) == -1)
return (0);
size = P2ALIGN_TYPED(statbuf.st_size, sizeof (vdev_label_t), uint64_t);
if ((label = calloc(sizeof (vdev_label_t), 1)) == NULL)
return (-1);
for (l = 0; l < VDEV_LABELS; l++) {
if (pwrite64(fd, label, sizeof (vdev_label_t),
label_offset(size, l)) != sizeof (vdev_label_t))
return (-1);
}
free(label);
return (0);
}
#ifdef HAVE_LIBBLKID
/*
* Use libblkid to quickly search for zfs devices
*/
static int
zpool_find_import_blkid(libzfs_handle_t *hdl, pool_list_t *pools)
{
blkid_cache cache;
blkid_dev_iterate iter;
blkid_dev dev;
const char *devname;
nvlist_t *config;
int fd, err;
err = blkid_get_cache(&cache, NULL);
if (err != 0) {
(void) zfs_error_fmt(hdl, EZFS_BADCACHE,
dgettext(TEXT_DOMAIN, "blkid_get_cache() %d"), err);
goto err_blkid1;
}
err = blkid_probe_all(cache);
if (err != 0) {
(void) zfs_error_fmt(hdl, EZFS_BADCACHE,
dgettext(TEXT_DOMAIN, "blkid_probe_all() %d"), err);
goto err_blkid2;
}
iter = blkid_dev_iterate_begin(cache);
if (iter == NULL) {
(void) zfs_error_fmt(hdl, EZFS_BADCACHE,
dgettext(TEXT_DOMAIN, "blkid_dev_iterate_begin()"));
goto err_blkid2;
}
err = blkid_dev_set_search(iter, "TYPE", "zfs_member");
if (err != 0) {
(void) zfs_error_fmt(hdl, EZFS_BADCACHE,
dgettext(TEXT_DOMAIN, "blkid_dev_set_search() %d"), err);
goto err_blkid3;
}
while (blkid_dev_next(iter, &dev) == 0) {
devname = blkid_dev_devname(dev);
if ((fd = open64(devname, O_RDONLY)) < 0)
continue;
err = zpool_read_label(fd, &config);
(void) close(fd);
if (err != 0) {
(void) no_memory(hdl);
goto err_blkid3;
}
if (config != NULL) {
Improve `zpool import` search behavior The goal of this change is to make 'zpool import' prefer to use the peristent /dev/mapper or /dev/disk/by-* paths. These are far preferable to the devices in /dev/ whos names are not persistent and are determined by the order in which a device is detected. This patch improves things by changing the default search path from just to the top level /dev/ directory to (in order): /dev/disk/by-vdev - Custom rules, use first if they exist /dev/disk/zpool - Custom rules, use first if they exist /dev/mapper - Use multipath devices before components /dev/disk/by-uuid - Single unique entry and persistent /dev/disk/by-id - May be multiple entries and persistent /dev/disk/by-path - Encodes physical location and persistent /dev/disk/by-label - Custom persistent labels /dev - UNSAFE device names will change The default search path can be overriden by setting the ZPOOL_IMPORT_PATH environment variable. This must be a colon delimited list of paths which are searched for vdevs. If the 'zpool import -d' option is specified only those listed paths will be searched. Finally, when multiple paths to the same device are found. If one of the paths is an exact match for the path used last time to import the pool it will be used. When there are no exact matches the prefered path will be determined by the provided search order. This means you can still import a pool and force specific names by providing the -d <path> option. And the prefered names will persist as long as those paths exist on your system. Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov> Closes #965
2012-09-15 20:25:21 +00:00
err = add_config(hdl, pools, devname, 0, config);
if (err != 0)
goto err_blkid3;
}
}
err_blkid3:
blkid_dev_iterate_end(iter);
err_blkid2:
blkid_put_cache(cache);
err_blkid1:
return err;
}
#endif /* HAVE_LIBBLKID */
2012-10-17 23:58:54 +00:00
char *
Improve `zpool import` search behavior The goal of this change is to make 'zpool import' prefer to use the peristent /dev/mapper or /dev/disk/by-* paths. These are far preferable to the devices in /dev/ whos names are not persistent and are determined by the order in which a device is detected. This patch improves things by changing the default search path from just to the top level /dev/ directory to (in order): /dev/disk/by-vdev - Custom rules, use first if they exist /dev/disk/zpool - Custom rules, use first if they exist /dev/mapper - Use multipath devices before components /dev/disk/by-uuid - Single unique entry and persistent /dev/disk/by-id - May be multiple entries and persistent /dev/disk/by-path - Encodes physical location and persistent /dev/disk/by-label - Custom persistent labels /dev - UNSAFE device names will change The default search path can be overriden by setting the ZPOOL_IMPORT_PATH environment variable. This must be a colon delimited list of paths which are searched for vdevs. If the 'zpool import -d' option is specified only those listed paths will be searched. Finally, when multiple paths to the same device are found. If one of the paths is an exact match for the path used last time to import the pool it will be used. When there are no exact matches the prefered path will be determined by the provided search order. This means you can still import a pool and force specific names by providing the -d <path> option. And the prefered names will persist as long as those paths exist on your system. Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov> Closes #965
2012-09-15 20:25:21 +00:00
zpool_default_import_path[DEFAULT_IMPORT_PATH_SIZE] = {
"/dev/disk/by-vdev", /* Custom rules, use first if they exist */
"/dev/mapper", /* Use multipath devices before components */
"/dev/disk/by-uuid", /* Single unique entry and persistent */
"/dev/disk/by-id", /* May be multiple entries and persistent */
"/dev/disk/by-path", /* Encodes physical location and persistent */
"/dev/disk/by-label", /* Custom persistent labels */
"/dev" /* UNSAFE device names will change */
};
2008-11-20 20:01:55 +00:00
/*
* Given a list of directories to search, find all pools stored on disk. This
* includes partial pools which are not available to import. If no args are
* given (argc is 0), then the default directory (/dev/dsk) is searched.
* poolname or guid (but not both) are provided by the caller when trying
* to import a specific pool.
2008-11-20 20:01:55 +00:00
*/
static nvlist_t *
zpool_find_import_impl(libzfs_handle_t *hdl, importargs_t *iarg)
2008-11-20 20:01:55 +00:00
{
int i, dirs = iarg->paths;
2008-11-20 20:01:55 +00:00
DIR *dirp = NULL;
struct dirent64 *dp;
char path[MAXPATHLEN];
char *end, **dir = iarg->path;
2008-11-20 20:01:55 +00:00
size_t pathleft;
struct stat64 statbuf;
nvlist_t *ret = NULL, *config;
int fd;
2008-11-20 20:01:55 +00:00
pool_list_t pools = { 0 };
pool_entry_t *pe, *penext;
vdev_entry_t *ve, *venext;
config_entry_t *ce, *cenext;
name_entry_t *ne, *nenext;
verify(iarg->poolname == NULL || iarg->guid == 0);
2008-11-20 20:01:55 +00:00
if (dirs == 0) {
#ifdef HAVE_LIBBLKID
/* Use libblkid to scan all device for their type */
if (zpool_find_import_blkid(hdl, &pools) == 0)
goto skip_scanning;
(void) zfs_error_fmt(hdl, EZFS_BADCACHE,
dgettext(TEXT_DOMAIN, "blkid failure falling back "
"to manual probing"));
#endif /* HAVE_LIBBLKID */
Improve `zpool import` search behavior The goal of this change is to make 'zpool import' prefer to use the peristent /dev/mapper or /dev/disk/by-* paths. These are far preferable to the devices in /dev/ whos names are not persistent and are determined by the order in which a device is detected. This patch improves things by changing the default search path from just to the top level /dev/ directory to (in order): /dev/disk/by-vdev - Custom rules, use first if they exist /dev/disk/zpool - Custom rules, use first if they exist /dev/mapper - Use multipath devices before components /dev/disk/by-uuid - Single unique entry and persistent /dev/disk/by-id - May be multiple entries and persistent /dev/disk/by-path - Encodes physical location and persistent /dev/disk/by-label - Custom persistent labels /dev - UNSAFE device names will change The default search path can be overriden by setting the ZPOOL_IMPORT_PATH environment variable. This must be a colon delimited list of paths which are searched for vdevs. If the 'zpool import -d' option is specified only those listed paths will be searched. Finally, when multiple paths to the same device are found. If one of the paths is an exact match for the path used last time to import the pool it will be used. When there are no exact matches the prefered path will be determined by the provided search order. This means you can still import a pool and force specific names by providing the -d <path> option. And the prefered names will persist as long as those paths exist on your system. Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov> Closes #965
2012-09-15 20:25:21 +00:00
dir = zpool_default_import_path;
dirs = DEFAULT_IMPORT_PATH_SIZE;
2008-11-20 20:01:55 +00:00
}
/*
* Go through and read the label configuration information from every
* possible device, organizing the information according to pool GUID
* and toplevel GUID.
*/
for (i = 0; i < dirs; i++) {
2008-11-20 20:01:55 +00:00
char *rdsk;
int dfd;
/* use realpath to normalize the path */
if (realpath(dir[i], path) == 0) {
Improve `zpool import` search behavior The goal of this change is to make 'zpool import' prefer to use the peristent /dev/mapper or /dev/disk/by-* paths. These are far preferable to the devices in /dev/ whos names are not persistent and are determined by the order in which a device is detected. This patch improves things by changing the default search path from just to the top level /dev/ directory to (in order): /dev/disk/by-vdev - Custom rules, use first if they exist /dev/disk/zpool - Custom rules, use first if they exist /dev/mapper - Use multipath devices before components /dev/disk/by-uuid - Single unique entry and persistent /dev/disk/by-id - May be multiple entries and persistent /dev/disk/by-path - Encodes physical location and persistent /dev/disk/by-label - Custom persistent labels /dev - UNSAFE device names will change The default search path can be overriden by setting the ZPOOL_IMPORT_PATH environment variable. This must be a colon delimited list of paths which are searched for vdevs. If the 'zpool import -d' option is specified only those listed paths will be searched. Finally, when multiple paths to the same device are found. If one of the paths is an exact match for the path used last time to import the pool it will be used. When there are no exact matches the prefered path will be determined by the provided search order. This means you can still import a pool and force specific names by providing the -d <path> option. And the prefered names will persist as long as those paths exist on your system. Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov> Closes #965
2012-09-15 20:25:21 +00:00
/* it is safe to skip missing search paths */
if (errno == ENOENT)
continue;
zfs_error_aux(hdl, strerror(errno));
2008-11-20 20:01:55 +00:00
(void) zfs_error_fmt(hdl, EZFS_BADPATH,
dgettext(TEXT_DOMAIN, "cannot open '%s'"), dir[i]);
2008-11-20 20:01:55 +00:00
goto error;
}
end = &path[strlen(path)];
*end++ = '/';
*end = 0;
pathleft = &path[sizeof (path)] - end;
/*
* Using raw devices instead of block devices when we're
* reading the labels skips a bunch of slow operations during
* close(2) processing, so we replace /dev/dsk with /dev/rdsk.
*/
if (strcmp(path, "/dev/dsk/") == 0)
rdsk = "/dev/rdsk/";
else
rdsk = path;
if ((dfd = open64(rdsk, O_RDONLY)) < 0 ||
(dirp = fdopendir(dfd)) == NULL) {
zfs_error_aux(hdl, strerror(errno));
(void) zfs_error_fmt(hdl, EZFS_BADPATH,
dgettext(TEXT_DOMAIN, "cannot open '%s'"),
rdsk);
goto error;
}
/*
* This is not MT-safe, but we have no MT consumers of libzfs
*/
while ((dp = readdir64(dirp)) != NULL) {
const char *name = dp->d_name;
if (name[0] == '.' &&
(name[1] == 0 || (name[1] == '.' && name[2] == 0)))
continue;
/*
* Skip checking devices with well known prefixes:
* watchdog - A special close is required to avoid
* triggering it and resetting the system.
* fuse - Fuse control device.
* ppp - Generic PPP driver.
* tty* - Generic serial interface.
* vcs* - Virtual console memory.
* parport* - Parallel port interface.
* lp* - Printer interface.
* fd* - Floppy interface.
* hpet - High Precision Event Timer, crashes qemu
* when accessed from a virtual machine.
* core - Symlink to /proc/kcore, causes a crash
* when access from Xen dom0.
*/
if ((strncmp(name, "watchdog", 8) == 0) ||
(strncmp(name, "fuse", 4) == 0) ||
(strncmp(name, "ppp", 3) == 0) ||
(strncmp(name, "tty", 3) == 0) ||
(strncmp(name, "vcs", 3) == 0) ||
(strncmp(name, "parport", 7) == 0) ||
(strncmp(name, "lp", 2) == 0) ||
(strncmp(name, "fd", 2) == 0) ||
(strncmp(name, "hpet", 4) == 0) ||
(strncmp(name, "core", 4) == 0))
continue;
/*
* Ignore failed stats. We only want regular
* files and block devices.
*/
if ((fstatat64(dfd, name, &statbuf, 0) != 0) ||
(!S_ISREG(statbuf.st_mode) &&
!S_ISBLK(statbuf.st_mode)))
continue;
if ((fd = openat64(dfd, name, O_RDONLY)) < 0)
continue;
if ((zpool_read_label(fd, &config)) != 0) {
(void) close(fd);
(void) no_memory(hdl);
goto error;
}
(void) close(fd);
if (config != NULL) {
boolean_t matched = B_TRUE;
char *pname;
if ((iarg->poolname != NULL) &&
(nvlist_lookup_string(config,
ZPOOL_CONFIG_POOL_NAME, &pname) == 0)) {
if (strcmp(iarg->poolname, pname))
matched = B_FALSE;
} else if (iarg->guid != 0) {
uint64_t this_guid;
matched = nvlist_lookup_uint64(config,
ZPOOL_CONFIG_POOL_GUID,
&this_guid) == 0 &&
iarg->guid == this_guid;
}
if (!matched) {
nvlist_free(config);
config = NULL;
continue;
}
2008-11-20 20:01:55 +00:00
/* use the non-raw path for the config */
(void) strlcpy(end, name, pathleft);
Improve `zpool import` search behavior The goal of this change is to make 'zpool import' prefer to use the peristent /dev/mapper or /dev/disk/by-* paths. These are far preferable to the devices in /dev/ whos names are not persistent and are determined by the order in which a device is detected. This patch improves things by changing the default search path from just to the top level /dev/ directory to (in order): /dev/disk/by-vdev - Custom rules, use first if they exist /dev/disk/zpool - Custom rules, use first if they exist /dev/mapper - Use multipath devices before components /dev/disk/by-uuid - Single unique entry and persistent /dev/disk/by-id - May be multiple entries and persistent /dev/disk/by-path - Encodes physical location and persistent /dev/disk/by-label - Custom persistent labels /dev - UNSAFE device names will change The default search path can be overriden by setting the ZPOOL_IMPORT_PATH environment variable. This must be a colon delimited list of paths which are searched for vdevs. If the 'zpool import -d' option is specified only those listed paths will be searched. Finally, when multiple paths to the same device are found. If one of the paths is an exact match for the path used last time to import the pool it will be used. When there are no exact matches the prefered path will be determined by the provided search order. This means you can still import a pool and force specific names by providing the -d <path> option. And the prefered names will persist as long as those paths exist on your system. Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov> Closes #965
2012-09-15 20:25:21 +00:00
if (add_config(hdl, &pools, path, i+1, config))
2008-11-20 20:01:55 +00:00
goto error;
}
}
(void) closedir(dirp);
dirp = NULL;
}
#ifdef HAVE_LIBBLKID
skip_scanning:
#endif
ret = get_configs(hdl, &pools, iarg->can_be_active);
2008-11-20 20:01:55 +00:00
error:
for (pe = pools.pools; pe != NULL; pe = penext) {
penext = pe->pe_next;
for (ve = pe->pe_vdevs; ve != NULL; ve = venext) {
venext = ve->ve_next;
for (ce = ve->ve_configs; ce != NULL; ce = cenext) {
cenext = ce->ce_next;
if (ce->ce_config)
nvlist_free(ce->ce_config);
free(ce);
}
free(ve);
}
free(pe);
}
for (ne = pools.names; ne != NULL; ne = nenext) {
nenext = ne->ne_next;
if (ne->ne_name)
free(ne->ne_name);
free(ne);
}
if (dirp)
(void) closedir(dirp);
return (ret);
}
nvlist_t *
zpool_find_import(libzfs_handle_t *hdl, int argc, char **argv)
{
importargs_t iarg = { 0 };
iarg.paths = argc;
iarg.path = argv;
return (zpool_find_import_impl(hdl, &iarg));
}
2008-11-20 20:01:55 +00:00
/*
* Given a cache file, return the contents as a list of importable pools.
* poolname or guid (but not both) are provided by the caller when trying
* to import a specific pool.
2008-11-20 20:01:55 +00:00
*/
nvlist_t *
zpool_find_import_cached(libzfs_handle_t *hdl, const char *cachefile,
char *poolname, uint64_t guid)
2008-11-20 20:01:55 +00:00
{
char *buf;
int fd;
struct stat64 statbuf;
nvlist_t *raw, *src, *dst;
nvlist_t *pools;
nvpair_t *elem;
char *name;
uint64_t this_guid;
2008-11-20 20:01:55 +00:00
boolean_t active;
verify(poolname == NULL || guid == 0);
2008-11-20 20:01:55 +00:00
if ((fd = open(cachefile, O_RDONLY)) < 0) {
zfs_error_aux(hdl, "%s", strerror(errno));
(void) zfs_error(hdl, EZFS_BADCACHE,
dgettext(TEXT_DOMAIN, "failed to open cache file"));
return (NULL);
}
if (fstat64(fd, &statbuf) != 0) {
zfs_error_aux(hdl, "%s", strerror(errno));
(void) close(fd);
(void) zfs_error(hdl, EZFS_BADCACHE,
dgettext(TEXT_DOMAIN, "failed to get size of cache file"));
return (NULL);
}
if ((buf = zfs_alloc(hdl, statbuf.st_size)) == NULL) {
(void) close(fd);
return (NULL);
}
if (read(fd, buf, statbuf.st_size) != statbuf.st_size) {
(void) close(fd);
free(buf);
(void) zfs_error(hdl, EZFS_BADCACHE,
dgettext(TEXT_DOMAIN,
"failed to read cache file contents"));
return (NULL);
}
(void) close(fd);
if (nvlist_unpack(buf, statbuf.st_size, &raw, 0) != 0) {
free(buf);
(void) zfs_error(hdl, EZFS_BADCACHE,
dgettext(TEXT_DOMAIN,
"invalid or corrupt cache file contents"));
return (NULL);
}
free(buf);
/*
* Go through and get the current state of the pools and refresh their
* state.
*/
if (nvlist_alloc(&pools, 0, 0) != 0) {
(void) no_memory(hdl);
nvlist_free(raw);
return (NULL);
}
elem = NULL;
while ((elem = nvlist_next_nvpair(raw, elem)) != NULL) {
verify(nvpair_value_nvlist(elem, &src) == 0);
verify(nvlist_lookup_string(src, ZPOOL_CONFIG_POOL_NAME,
&name) == 0);
if (poolname != NULL && strcmp(poolname, name) != 0)
continue;
2008-11-20 20:01:55 +00:00
verify(nvlist_lookup_uint64(src, ZPOOL_CONFIG_POOL_GUID,
&this_guid) == 0);
if (guid != 0) {
verify(nvlist_lookup_uint64(src, ZPOOL_CONFIG_POOL_GUID,
&this_guid) == 0);
if (guid != this_guid)
continue;
}
2008-11-20 20:01:55 +00:00
if (pool_active(hdl, name, this_guid, &active) != 0) {
nvlist_free(raw);
nvlist_free(pools);
return (NULL);
}
2008-11-20 20:01:55 +00:00
if (active)
continue;
2008-11-20 20:01:55 +00:00
if ((dst = refresh_config(hdl, src)) == NULL) {
nvlist_free(raw);
nvlist_free(pools);
return (NULL);
}
2008-11-20 20:01:55 +00:00
if (nvlist_add_nvlist(pools, nvpair_name(elem), dst) != 0) {
(void) no_memory(hdl);
2008-11-20 20:01:55 +00:00
nvlist_free(dst);
nvlist_free(raw);
nvlist_free(pools);
return (NULL);
2008-11-20 20:01:55 +00:00
}
nvlist_free(dst);
2008-11-20 20:01:55 +00:00
}
nvlist_free(raw);
return (pools);
}
static int
name_or_guid_exists(zpool_handle_t *zhp, void *data)
{
importargs_t *import = data;
int found = 0;
if (import->poolname != NULL) {
char *pool_name;
verify(nvlist_lookup_string(zhp->zpool_config,
ZPOOL_CONFIG_POOL_NAME, &pool_name) == 0);
if (strcmp(pool_name, import->poolname) == 0)
found = 1;
} else {
uint64_t pool_guid;
verify(nvlist_lookup_uint64(zhp->zpool_config,
ZPOOL_CONFIG_POOL_GUID, &pool_guid) == 0);
if (pool_guid == import->guid)
found = 1;
}
zpool_close(zhp);
return (found);
}
nvlist_t *
zpool_search_import(libzfs_handle_t *hdl, importargs_t *import)
{
verify(import->poolname == NULL || import->guid == 0);
if (import->unique)
import->exists = zpool_iter(hdl, name_or_guid_exists, import);
if (import->cachefile != NULL)
return (zpool_find_import_cached(hdl, import->cachefile,
import->poolname, import->guid));
return (zpool_find_import_impl(hdl, import));
}
2008-11-20 20:01:55 +00:00
boolean_t
find_guid(nvlist_t *nv, uint64_t guid)
{
uint64_t tmp;
nvlist_t **child;
uint_t c, children;
verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &tmp) == 0);
if (tmp == guid)
return (B_TRUE);
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
&child, &children) == 0) {
for (c = 0; c < children; c++)
if (find_guid(child[c], guid))
return (B_TRUE);
}
return (B_FALSE);
}
typedef struct aux_cbdata {
const char *cb_type;
uint64_t cb_guid;
zpool_handle_t *cb_zhp;
} aux_cbdata_t;
static int
find_aux(zpool_handle_t *zhp, void *data)
{
aux_cbdata_t *cbp = data;
nvlist_t **list;
uint_t i, count;
uint64_t guid;
nvlist_t *nvroot;
verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
&nvroot) == 0);
if (nvlist_lookup_nvlist_array(nvroot, cbp->cb_type,
&list, &count) == 0) {
for (i = 0; i < count; i++) {
verify(nvlist_lookup_uint64(list[i],
ZPOOL_CONFIG_GUID, &guid) == 0);
if (guid == cbp->cb_guid) {
cbp->cb_zhp = zhp;
return (1);
}
}
}
zpool_close(zhp);
return (0);
}
/*
* Determines if the pool is in use. If so, it returns true and the state of
* the pool as well as the name of the pool. Both strings are allocated and
* must be freed by the caller.
*/
int
zpool_in_use(libzfs_handle_t *hdl, int fd, pool_state_t *state, char **namestr,
boolean_t *inuse)
{
nvlist_t *config;
char *name;
boolean_t ret;
uint64_t guid, vdev_guid;
zpool_handle_t *zhp;
nvlist_t *pool_config;
uint64_t stateval, isspare;
aux_cbdata_t cb = { 0 };
boolean_t isactive;
*inuse = B_FALSE;
if (zpool_read_label(fd, &config) != 0) {
(void) no_memory(hdl);
return (-1);
}
if (config == NULL)
return (0);
verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
&stateval) == 0);
verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID,
&vdev_guid) == 0);
if (stateval != POOL_STATE_SPARE && stateval != POOL_STATE_L2CACHE) {
verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
&name) == 0);
verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
&guid) == 0);
}
switch (stateval) {
case POOL_STATE_EXPORTED:
/*
* A pool with an exported state may in fact be imported
* read-only, so check the in-core state to see if it's
* active and imported read-only. If it is, set
* its state to active.
*/
if (pool_active(hdl, name, guid, &isactive) == 0 && isactive &&
(zhp = zpool_open_canfail(hdl, name)) != NULL &&
zpool_get_prop_int(zhp, ZPOOL_PROP_READONLY, NULL))
stateval = POOL_STATE_ACTIVE;
2008-11-20 20:01:55 +00:00
ret = B_TRUE;
break;
case POOL_STATE_ACTIVE:
/*
* For an active pool, we have to determine if it's really part
* of a currently active pool (in which case the pool will exist
* and the guid will be the same), or whether it's part of an
* active pool that was disconnected without being explicitly
* exported.
*/
if (pool_active(hdl, name, guid, &isactive) != 0) {
nvlist_free(config);
return (-1);
}
if (isactive) {
/*
* Because the device may have been removed while
* offlined, we only report it as active if the vdev is
* still present in the config. Otherwise, pretend like
* it's not in use.
*/
if ((zhp = zpool_open_canfail(hdl, name)) != NULL &&
(pool_config = zpool_get_config(zhp, NULL))
!= NULL) {
nvlist_t *nvroot;
verify(nvlist_lookup_nvlist(pool_config,
ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
ret = find_guid(nvroot, vdev_guid);
} else {
ret = B_FALSE;
}
/*
* If this is an active spare within another pool, we
* treat it like an unused hot spare. This allows the
* user to create a pool with a hot spare that currently
* in use within another pool. Since we return B_TRUE,
* libdiskmgt will continue to prevent generic consumers
* from using the device.
*/
if (ret && nvlist_lookup_uint64(config,
ZPOOL_CONFIG_IS_SPARE, &isspare) == 0 && isspare)
stateval = POOL_STATE_SPARE;
if (zhp != NULL)
zpool_close(zhp);
} else {
stateval = POOL_STATE_POTENTIALLY_ACTIVE;
ret = B_TRUE;
}
break;
case POOL_STATE_SPARE:
/*
* For a hot spare, it can be either definitively in use, or
* potentially active. To determine if it's in use, we iterate
* over all pools in the system and search for one with a spare
* with a matching guid.
*
* Due to the shared nature of spares, we don't actually report
* the potentially active case as in use. This means the user
* can freely create pools on the hot spares of exported pools,
* but to do otherwise makes the resulting code complicated, and
* we end up having to deal with this case anyway.
*/
cb.cb_zhp = NULL;
cb.cb_guid = vdev_guid;
cb.cb_type = ZPOOL_CONFIG_SPARES;
if (zpool_iter(hdl, find_aux, &cb) == 1) {
name = (char *)zpool_get_name(cb.cb_zhp);
ret = TRUE;
} else {
ret = FALSE;
}
break;
case POOL_STATE_L2CACHE:
/*
* Check if any pool is currently using this l2cache device.
*/
cb.cb_zhp = NULL;
cb.cb_guid = vdev_guid;
cb.cb_type = ZPOOL_CONFIG_L2CACHE;
if (zpool_iter(hdl, find_aux, &cb) == 1) {
name = (char *)zpool_get_name(cb.cb_zhp);
ret = TRUE;
} else {
ret = FALSE;
}
break;
default:
ret = B_FALSE;
}
if (ret) {
if ((*namestr = zfs_strdup(hdl, name)) == NULL) {
if (cb.cb_zhp)
zpool_close(cb.cb_zhp);
nvlist_free(config);
return (-1);
}
*state = (pool_state_t)stateval;
}
if (cb.cb_zhp)
zpool_close(cb.cb_zhp);
nvlist_free(config);
*inuse = ret;
return (0);
}