freebsd-dev/lib/libzpool/util.c

355 lines
9.0 KiB
C
Raw Normal View History

2008-11-20 20:01:55 +00:00
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 by Delphix. All rights reserved.
* Copyright 2017 Jason King
* Copyright (c) 2017, Intel Corporation.
2008-11-20 20:01:55 +00:00
*/
#include <assert.h>
#include <sys/zfs_context.h>
#include <sys/avl.h>
#include <string.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/spa.h>
#include <sys/fs/zfs.h>
#include <sys/zfs_refcount.h>
#include <sys/zfs_ioctl.h>
#include <dlfcn.h>
#include <libzutil.h>
2008-11-20 20:01:55 +00:00
/*
* Routines needed by more than one client of libzpool.
*/
static void
show_vdev_stats(const char *desc, const char *ctype, nvlist_t *nv, int indent)
2008-11-20 20:01:55 +00:00
{
vdev_stat_t *vs;
Add -lhHpw options to "zpool iostat" for avg latency, histograms, & queues Update the zfs module to collect statistics on average latencies, queue sizes, and keep an internal histogram of all IO latencies. Along with this, update "zpool iostat" with some new options to print out the stats: -l: Include average IO latencies stats: total_wait disk_wait syncq_wait asyncq_wait scrub read write read write read write read write wait ----- ----- ----- ----- ----- ----- ----- ----- ----- - 41ms - 2ms - 46ms - 4ms - - 5ms - 1ms - 1us - 4ms - - 5ms - 1ms - 1us - 4ms - - - - - - - - - - - 49ms - 2ms - 47ms - - - - - - - - - - - - - 2ms - 1ms - - - 1ms - ----- ----- ----- ----- ----- ----- ----- ----- ----- 1ms 1ms 1ms 413us 16us 25us - 5ms - 1ms 1ms 1ms 413us 16us 25us - 5ms - 2ms 1ms 2ms 412us 26us 25us - 5ms - - 1ms - 413us - 25us - 5ms - - 1ms - 460us - 29us - 5ms - 196us 1ms 196us 370us 7us 23us - 5ms - ----- ----- ----- ----- ----- ----- ----- ----- ----- -w: Print out latency histograms: sdb total disk sync_queue async_queue latency read write read write read write read write scrub ------- ------ ------ ------ ------ ------ ------ ------ ------ ------ 1ns 0 0 0 0 0 0 0 0 0 ... 33us 0 0 0 0 0 0 0 0 0 66us 0 0 107 2486 2 788 12 12 0 131us 2 797 359 4499 10 558 184 184 6 262us 22 801 264 1563 10 286 287 287 24 524us 87 575 71 52086 15 1063 136 136 92 1ms 152 1190 5 41292 4 1693 252 252 141 2ms 245 2018 0 50007 0 2322 371 371 220 4ms 189 7455 22 162957 0 3912 6726 6726 199 8ms 108 9461 0 102320 0 5775 2526 2526 86 17ms 23 11287 0 37142 0 8043 1813 1813 19 34ms 0 14725 0 24015 0 11732 3071 3071 0 67ms 0 23597 0 7914 0 18113 5025 5025 0 134ms 0 33798 0 254 0 25755 7326 7326 0 268ms 0 51780 0 12 0 41593 10002 10002 0 537ms 0 77808 0 0 0 64255 13120 13120 0 1s 0 105281 0 0 0 83805 20841 20841 0 2s 0 88248 0 0 0 73772 14006 14006 0 4s 0 47266 0 0 0 29783 17176 17176 0 9s 0 10460 0 0 0 4130 6295 6295 0 17s 0 0 0 0 0 0 0 0 0 34s 0 0 0 0 0 0 0 0 0 69s 0 0 0 0 0 0 0 0 0 137s 0 0 0 0 0 0 0 0 0 ------------------------------------------------------------------------------- -h: Help -H: Scripted mode. Do not display headers, and separate fields by a single tab instead of arbitrary space. -q: Include current number of entries in sync & async read/write queues, and scrub queue: syncq_read syncq_write asyncq_read asyncq_write scrubq_read pend activ pend activ pend activ pend activ pend activ ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- 0 0 0 0 78 29 0 0 0 0 0 0 0 0 78 29 0 0 0 0 0 0 0 0 0 0 0 0 0 0 - - - - - - - - - - 0 0 0 0 0 0 0 0 0 0 - - - - - - - - - - 0 0 0 0 0 0 0 0 0 0 ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- 0 0 227 394 0 19 0 0 0 0 0 0 227 394 0 19 0 0 0 0 0 0 108 98 0 19 0 0 0 0 0 0 19 98 0 0 0 0 0 0 0 0 78 98 0 0 0 0 0 0 0 0 19 88 0 0 0 0 0 0 ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- -p: Display numbers in parseable (exact) values. Also, update iostat syntax to allow the user to specify specific vdevs to show statistics for. The three options for choosing pools/vdevs are: Display a list of pools: zpool iostat ... [pool ...] Display a list of vdevs from a specific pool: zpool iostat ... [pool vdev ...] Display a list of vdevs from any pools: zpool iostat ... [vdev ...] Lastly, allow zpool command "interval" value to be floating point: zpool iostat -v 0.5 Signed-off-by: Tony Hutter <hutter2@llnl.gov Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov> Closes #4433
2016-02-29 18:05:23 +00:00
vdev_stat_t *v0 = { 0 };
2008-11-20 20:01:55 +00:00
uint64_t sec;
uint64_t is_log = 0;
nvlist_t **child;
uint_t c, children;
2008-11-20 20:01:55 +00:00
char used[6], avail[6];
char rops[6], wops[6], rbytes[6], wbytes[6], rerr[6], werr[6], cerr[6];
Add -lhHpw options to "zpool iostat" for avg latency, histograms, & queues Update the zfs module to collect statistics on average latencies, queue sizes, and keep an internal histogram of all IO latencies. Along with this, update "zpool iostat" with some new options to print out the stats: -l: Include average IO latencies stats: total_wait disk_wait syncq_wait asyncq_wait scrub read write read write read write read write wait ----- ----- ----- ----- ----- ----- ----- ----- ----- - 41ms - 2ms - 46ms - 4ms - - 5ms - 1ms - 1us - 4ms - - 5ms - 1ms - 1us - 4ms - - - - - - - - - - - 49ms - 2ms - 47ms - - - - - - - - - - - - - 2ms - 1ms - - - 1ms - ----- ----- ----- ----- ----- ----- ----- ----- ----- 1ms 1ms 1ms 413us 16us 25us - 5ms - 1ms 1ms 1ms 413us 16us 25us - 5ms - 2ms 1ms 2ms 412us 26us 25us - 5ms - - 1ms - 413us - 25us - 5ms - - 1ms - 460us - 29us - 5ms - 196us 1ms 196us 370us 7us 23us - 5ms - ----- ----- ----- ----- ----- ----- ----- ----- ----- -w: Print out latency histograms: sdb total disk sync_queue async_queue latency read write read write read write read write scrub ------- ------ ------ ------ ------ ------ ------ ------ ------ ------ 1ns 0 0 0 0 0 0 0 0 0 ... 33us 0 0 0 0 0 0 0 0 0 66us 0 0 107 2486 2 788 12 12 0 131us 2 797 359 4499 10 558 184 184 6 262us 22 801 264 1563 10 286 287 287 24 524us 87 575 71 52086 15 1063 136 136 92 1ms 152 1190 5 41292 4 1693 252 252 141 2ms 245 2018 0 50007 0 2322 371 371 220 4ms 189 7455 22 162957 0 3912 6726 6726 199 8ms 108 9461 0 102320 0 5775 2526 2526 86 17ms 23 11287 0 37142 0 8043 1813 1813 19 34ms 0 14725 0 24015 0 11732 3071 3071 0 67ms 0 23597 0 7914 0 18113 5025 5025 0 134ms 0 33798 0 254 0 25755 7326 7326 0 268ms 0 51780 0 12 0 41593 10002 10002 0 537ms 0 77808 0 0 0 64255 13120 13120 0 1s 0 105281 0 0 0 83805 20841 20841 0 2s 0 88248 0 0 0 73772 14006 14006 0 4s 0 47266 0 0 0 29783 17176 17176 0 9s 0 10460 0 0 0 4130 6295 6295 0 17s 0 0 0 0 0 0 0 0 0 34s 0 0 0 0 0 0 0 0 0 69s 0 0 0 0 0 0 0 0 0 137s 0 0 0 0 0 0 0 0 0 ------------------------------------------------------------------------------- -h: Help -H: Scripted mode. Do not display headers, and separate fields by a single tab instead of arbitrary space. -q: Include current number of entries in sync & async read/write queues, and scrub queue: syncq_read syncq_write asyncq_read asyncq_write scrubq_read pend activ pend activ pend activ pend activ pend activ ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- 0 0 0 0 78 29 0 0 0 0 0 0 0 0 78 29 0 0 0 0 0 0 0 0 0 0 0 0 0 0 - - - - - - - - - - 0 0 0 0 0 0 0 0 0 0 - - - - - - - - - - 0 0 0 0 0 0 0 0 0 0 ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- 0 0 227 394 0 19 0 0 0 0 0 0 227 394 0 19 0 0 0 0 0 0 108 98 0 19 0 0 0 0 0 0 19 98 0 0 0 0 0 0 0 0 78 98 0 0 0 0 0 0 0 0 19 88 0 0 0 0 0 0 ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- -p: Display numbers in parseable (exact) values. Also, update iostat syntax to allow the user to specify specific vdevs to show statistics for. The three options for choosing pools/vdevs are: Display a list of pools: zpool iostat ... [pool ...] Display a list of vdevs from a specific pool: zpool iostat ... [pool vdev ...] Display a list of vdevs from any pools: zpool iostat ... [vdev ...] Lastly, allow zpool command "interval" value to be floating point: zpool iostat -v 0.5 Signed-off-by: Tony Hutter <hutter2@llnl.gov Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov> Closes #4433
2016-02-29 18:05:23 +00:00
v0 = umem_zalloc(sizeof (*v0), UMEM_NOFAIL);
if (indent == 0 && desc != NULL) {
(void) printf(" "
2008-11-20 20:01:55 +00:00
" capacity operations bandwidth ---- errors ----\n");
(void) printf("description "
2008-11-20 20:01:55 +00:00
"used avail read write read write read write cksum\n");
}
if (desc != NULL) {
char *suffix = "", *bias = NULL;
char bias_suffix[32];
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG, &is_log);
(void) nvlist_lookup_string(nv, ZPOOL_CONFIG_ALLOCATION_BIAS,
&bias);
if (nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
(uint64_t **)&vs, &c) != 0)
Add -lhHpw options to "zpool iostat" for avg latency, histograms, & queues Update the zfs module to collect statistics on average latencies, queue sizes, and keep an internal histogram of all IO latencies. Along with this, update "zpool iostat" with some new options to print out the stats: -l: Include average IO latencies stats: total_wait disk_wait syncq_wait asyncq_wait scrub read write read write read write read write wait ----- ----- ----- ----- ----- ----- ----- ----- ----- - 41ms - 2ms - 46ms - 4ms - - 5ms - 1ms - 1us - 4ms - - 5ms - 1ms - 1us - 4ms - - - - - - - - - - - 49ms - 2ms - 47ms - - - - - - - - - - - - - 2ms - 1ms - - - 1ms - ----- ----- ----- ----- ----- ----- ----- ----- ----- 1ms 1ms 1ms 413us 16us 25us - 5ms - 1ms 1ms 1ms 413us 16us 25us - 5ms - 2ms 1ms 2ms 412us 26us 25us - 5ms - - 1ms - 413us - 25us - 5ms - - 1ms - 460us - 29us - 5ms - 196us 1ms 196us 370us 7us 23us - 5ms - ----- ----- ----- ----- ----- ----- ----- ----- ----- -w: Print out latency histograms: sdb total disk sync_queue async_queue latency read write read write read write read write scrub ------- ------ ------ ------ ------ ------ ------ ------ ------ ------ 1ns 0 0 0 0 0 0 0 0 0 ... 33us 0 0 0 0 0 0 0 0 0 66us 0 0 107 2486 2 788 12 12 0 131us 2 797 359 4499 10 558 184 184 6 262us 22 801 264 1563 10 286 287 287 24 524us 87 575 71 52086 15 1063 136 136 92 1ms 152 1190 5 41292 4 1693 252 252 141 2ms 245 2018 0 50007 0 2322 371 371 220 4ms 189 7455 22 162957 0 3912 6726 6726 199 8ms 108 9461 0 102320 0 5775 2526 2526 86 17ms 23 11287 0 37142 0 8043 1813 1813 19 34ms 0 14725 0 24015 0 11732 3071 3071 0 67ms 0 23597 0 7914 0 18113 5025 5025 0 134ms 0 33798 0 254 0 25755 7326 7326 0 268ms 0 51780 0 12 0 41593 10002 10002 0 537ms 0 77808 0 0 0 64255 13120 13120 0 1s 0 105281 0 0 0 83805 20841 20841 0 2s 0 88248 0 0 0 73772 14006 14006 0 4s 0 47266 0 0 0 29783 17176 17176 0 9s 0 10460 0 0 0 4130 6295 6295 0 17s 0 0 0 0 0 0 0 0 0 34s 0 0 0 0 0 0 0 0 0 69s 0 0 0 0 0 0 0 0 0 137s 0 0 0 0 0 0 0 0 0 ------------------------------------------------------------------------------- -h: Help -H: Scripted mode. Do not display headers, and separate fields by a single tab instead of arbitrary space. -q: Include current number of entries in sync & async read/write queues, and scrub queue: syncq_read syncq_write asyncq_read asyncq_write scrubq_read pend activ pend activ pend activ pend activ pend activ ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- 0 0 0 0 78 29 0 0 0 0 0 0 0 0 78 29 0 0 0 0 0 0 0 0 0 0 0 0 0 0 - - - - - - - - - - 0 0 0 0 0 0 0 0 0 0 - - - - - - - - - - 0 0 0 0 0 0 0 0 0 0 ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- 0 0 227 394 0 19 0 0 0 0 0 0 227 394 0 19 0 0 0 0 0 0 108 98 0 19 0 0 0 0 0 0 19 98 0 0 0 0 0 0 0 0 78 98 0 0 0 0 0 0 0 0 19 88 0 0 0 0 0 0 ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- -p: Display numbers in parseable (exact) values. Also, update iostat syntax to allow the user to specify specific vdevs to show statistics for. The three options for choosing pools/vdevs are: Display a list of pools: zpool iostat ... [pool ...] Display a list of vdevs from a specific pool: zpool iostat ... [pool vdev ...] Display a list of vdevs from any pools: zpool iostat ... [vdev ...] Lastly, allow zpool command "interval" value to be floating point: zpool iostat -v 0.5 Signed-off-by: Tony Hutter <hutter2@llnl.gov Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov> Closes #4433
2016-02-29 18:05:23 +00:00
vs = v0;
if (bias != NULL) {
(void) snprintf(bias_suffix, sizeof (bias_suffix),
" (%s)", bias);
suffix = bias_suffix;
} else if (is_log) {
suffix = " (log)";
}
sec = MAX(1, vs->vs_timestamp / NANOSEC);
nicenum(vs->vs_alloc, used, sizeof (used));
nicenum(vs->vs_space - vs->vs_alloc, avail, sizeof (avail));
nicenum(vs->vs_ops[ZIO_TYPE_READ] / sec, rops, sizeof (rops));
nicenum(vs->vs_ops[ZIO_TYPE_WRITE] / sec, wops, sizeof (wops));
nicenum(vs->vs_bytes[ZIO_TYPE_READ] / sec, rbytes,
sizeof (rbytes));
nicenum(vs->vs_bytes[ZIO_TYPE_WRITE] / sec, wbytes,
sizeof (wbytes));
nicenum(vs->vs_read_errors, rerr, sizeof (rerr));
nicenum(vs->vs_write_errors, werr, sizeof (werr));
nicenum(vs->vs_checksum_errors, cerr, sizeof (cerr));
(void) printf("%*s%s%*s%*s%*s %5s %5s %5s %5s %5s %5s %5s\n",
indent, "",
desc,
(int)(indent+strlen(desc)-25-(vs->vs_space ? 0 : 12)),
suffix,
vs->vs_space ? 6 : 0, vs->vs_space ? used : "",
vs->vs_space ? 6 : 0, vs->vs_space ? avail : "",
rops, wops, rbytes, wbytes, rerr, werr, cerr);
}
umem_free(v0, sizeof (*v0));
if (nvlist_lookup_nvlist_array(nv, ctype, &child, &children) != 0)
2008-11-20 20:01:55 +00:00
return;
for (c = 0; c < children; c++) {
nvlist_t *cnv = child[c];
char *cname = NULL, *tname;
2008-11-20 20:01:55 +00:00
uint64_t np;
int len;
2008-11-20 20:01:55 +00:00
if (nvlist_lookup_string(cnv, ZPOOL_CONFIG_PATH, &cname) &&
nvlist_lookup_string(cnv, ZPOOL_CONFIG_TYPE, &cname))
cname = "<unknown>";
len = strlen(cname) + 2;
tname = umem_zalloc(len, UMEM_NOFAIL);
(void) strlcpy(tname, cname, len);
2008-11-20 20:01:55 +00:00
if (nvlist_lookup_uint64(cnv, ZPOOL_CONFIG_NPARITY, &np) == 0)
tname[strlen(tname)] = '0' + np;
show_vdev_stats(tname, ctype, cnv, indent + 2);
umem_free(tname, len);
2008-11-20 20:01:55 +00:00
}
}
void
show_pool_stats(spa_t *spa)
{
nvlist_t *config, *nvroot;
char *name;
VERIFY(spa_get_stats(spa_name(spa), &config, NULL, 0) == 0);
2008-11-20 20:01:55 +00:00
VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
&nvroot) == 0);
VERIFY(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
&name) == 0);
show_vdev_stats(name, ZPOOL_CONFIG_CHILDREN, nvroot, 0);
show_vdev_stats(NULL, ZPOOL_CONFIG_L2CACHE, nvroot, 0);
show_vdev_stats(NULL, ZPOOL_CONFIG_SPARES, nvroot, 0);
nvlist_free(config);
2008-11-20 20:01:55 +00:00
}
/* *k_out must be freed by the caller */
static int
set_global_var_parse_kv(const char *arg, char **k_out, u_longlong_t *v_out)
{
int err;
VERIFY(arg);
char *d = strdup(arg);
char *save = NULL;
char *k = strtok_r(d, "=", &save);
char *v_str = strtok_r(NULL, "=", &save);
char *follow = strtok_r(NULL, "=", &save);
if (k == NULL || v_str == NULL || follow != NULL) {
err = EINVAL;
goto err_free;
}
u_longlong_t val = strtoull(v_str, NULL, 0);
if (val > UINT32_MAX) {
fprintf(stderr, "Value for global variable '%s' must "
"be a 32-bit unsigned integer, got '%s'\n", k, v_str);
err = EOVERFLOW;
goto err_free;
}
*k_out = k;
*v_out = val;
return (0);
err_free:
free(k);
return (err);
}
/*
* Sets given global variable in libzpool to given unsigned 32-bit value.
* arg: "<variable>=<value>"
*/
int
set_global_var(char const *arg)
{
void *zpoolhdl;
char *varname;
u_longlong_t val;
int ret;
#ifndef _ZFS_LITTLE_ENDIAN
/*
* On big endian systems changing a 64-bit variable would set the high
* 32 bits instead of the low 32 bits, which could cause unexpected
* results.
*/
fprintf(stderr, "Setting global variables is only supported on "
"little-endian systems\n");
ret = ENOTSUP;
goto out_ret;
#endif
if ((ret = set_global_var_parse_kv(arg, &varname, &val)) != 0) {
goto out_ret;
}
zpoolhdl = dlopen("libzpool.so", RTLD_LAZY);
if (zpoolhdl != NULL) {
uint32_t *var;
var = dlsym(zpoolhdl, varname);
if (var == NULL) {
fprintf(stderr, "Global variable '%s' does not exist "
"in libzpool.so\n", varname);
ret = EINVAL;
goto out_dlclose;
}
*var = (uint32_t)val;
} else {
fprintf(stderr, "Failed to open libzpool.so to set global "
"variable\n");
ret = EIO;
goto out_dlclose;
}
ret = 0;
out_dlclose:
dlclose(zpoolhdl);
free(varname);
out_ret:
return (ret);
}
static nvlist_t *
refresh_config(void *unused, nvlist_t *tryconfig)
{
(void) unused;
return (spa_tryimport(tryconfig));
}
#if defined(__FreeBSD__)
#include <sys/param.h>
#include <sys/sysctl.h>
#include <os/freebsd/zfs/sys/zfs_ioctl_compat.h>
static int
pool_active(void *unused, const char *name, uint64_t guid, boolean_t *isactive)
{
(void) unused, (void) guid;
zfs_iocparm_t zp;
zfs_cmd_t *zc = NULL;
zfs_cmd_legacy_t *zcl = NULL;
unsigned long request;
int ret;
int fd = open(ZFS_DEV, O_RDWR | O_CLOEXEC);
if (fd < 0)
return (-1);
/*
* Use ZFS_IOC_POOL_STATS to check if the pool is active. We want to
* avoid adding a dependency on libzfs_core solely for this ioctl(),
* therefore we manually craft the stats command. Note that the command
* ID is identical between the openzfs and legacy ioctl() formats.
*/
int ver = ZFS_IOCVER_NONE;
size_t ver_size = sizeof (ver);
sysctlbyname("vfs.zfs.version.ioctl", &ver, &ver_size, NULL, 0);
switch (ver) {
case ZFS_IOCVER_OZFS:
zc = umem_zalloc(sizeof (zfs_cmd_t), UMEM_NOFAIL);
(void) strlcpy(zc->zc_name, name, sizeof (zc->zc_name));
zp.zfs_cmd = (uint64_t)(uintptr_t)zc;
zp.zfs_cmd_size = sizeof (zfs_cmd_t);
zp.zfs_ioctl_version = ZFS_IOCVER_OZFS;
request = _IOWR('Z', ZFS_IOC_POOL_STATS, zfs_iocparm_t);
ret = ioctl(fd, request, &zp);
free((void *)(uintptr_t)zc->zc_nvlist_dst);
umem_free(zc, sizeof (zfs_cmd_t));
break;
case ZFS_IOCVER_LEGACY:
zcl = umem_zalloc(sizeof (zfs_cmd_legacy_t), UMEM_NOFAIL);
(void) strlcpy(zcl->zc_name, name, sizeof (zcl->zc_name));
zp.zfs_cmd = (uint64_t)(uintptr_t)zcl;
zp.zfs_cmd_size = sizeof (zfs_cmd_legacy_t);
zp.zfs_ioctl_version = ZFS_IOCVER_LEGACY;
request = _IOWR('Z', ZFS_IOC_POOL_STATS, zfs_iocparm_t);
ret = ioctl(fd, request, &zp);
free((void *)(uintptr_t)zcl->zc_nvlist_dst);
umem_free(zcl, sizeof (zfs_cmd_legacy_t));
break;
default:
fprintf(stderr, "unrecognized zfs ioctl version %d", ver);
exit(1);
}
(void) close(fd);
*isactive = (ret == 0);
return (0);
}
#else
static int
pool_active(void *unused, const char *name, uint64_t guid,
boolean_t *isactive)
{
(void) unused, (void) guid;
int fd = open(ZFS_DEV, O_RDWR | O_CLOEXEC);
if (fd < 0)
return (-1);
/*
* Use ZFS_IOC_POOL_STATS to check if a pool is active.
*/
zfs_cmd_t *zcp = umem_zalloc(sizeof (zfs_cmd_t), UMEM_NOFAIL);
(void) strlcpy(zcp->zc_name, name, sizeof (zcp->zc_name));
int ret = ioctl(fd, ZFS_IOC_POOL_STATS, zcp);
free((void *)(uintptr_t)zcp->zc_nvlist_dst);
umem_free(zcp, sizeof (zfs_cmd_t));
(void) close(fd);
*isactive = (ret == 0);
return (0);
}
#endif
const pool_config_ops_t libzpool_config_ops = {
.pco_refresh_config = refresh_config,
.pco_pool_active = pool_active,
};