Move properties, parameters, events, and concepts around manual sections

The pages moved as follows:
  zpool-features.{5 => 7}
  spl{-module-parameters.5 => .4}
  zfs{-module-parameters.5 => .4}
  zfs-events.5 => into zpool-events.8
  zfsconcepts.{8 => 7}
  zfsprops.{8 => 7}
  zpoolconcepts.{8 => 7}
  zpoolprops.{8 => 7}

Reviewed-by: Richard Laager <rlaager@wiktel.com>
Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Signed-off-by: Ahelenia Ziemiańska <nabijaczleweli@nabijaczleweli.xyz>
Co-authored-by: Daniel Ebdrup Jensen <debdrup@FreeBSD.org>
Closes #12149
Closes #12212
This commit is contained in:
наб 2021-06-04 22:29:26 +02:00 committed by Brian Behlendorf
parent b0f3e8a6eb
commit 2badb3457a
50 changed files with 543 additions and 582 deletions

View File

@ -533,7 +533,7 @@ usage(boolean_t requested)
(void) fprintf(fp, "YES disabled | enabled | active\n");
(void) fprintf(fp, gettext("\nThe feature@ properties must be "
"appended with a feature name.\nSee zpool-features(5).\n"));
"appended with a feature name.\nSee zpool-features(7).\n"));
}
/*
@ -8248,7 +8248,7 @@ status_callback(zpool_handle_t *zhp, void *data)
printf_color(ANSI_YELLOW, gettext("Enable all features using "
"'zpool upgrade'. Once this is done,\n\tthe pool may no "
"longer be accessible by software that does not support\n\t"
"the features. See zpool-features(5) for details.\n"));
"the features. See zpool-features(7) for details.\n"));
break;
case ZPOOL_STATUS_COMPATIBILITY_ERR:
@ -8951,7 +8951,7 @@ upgrade_list_disabled_cb(zpool_handle_t *zhp, void *arg)
"pool may become incompatible with "
"software\nthat does not support "
"the feature. See "
"zpool-features(5) for "
"zpool-features(7) for "
"details.\n\n"
"Note that the pool "
"'compatibility' feature can be "

View File

@ -1360,7 +1360,7 @@
"type": "row"
},
{
"content": "I/O requests that are satisfied by accessing pool devices are managed by the ZIO scheduler.\nThe total latency is measured from the start of the I/O to completion by the disk.\nLatency through each queue is shown prior to its submission to the disk queue.\n\nThis view is useful for observing the effects of tuning the ZIO scheduler min and max values\n(see zfs-module-parameters(5) and [ZFS on Linux Module Parameters](https://openzfs.github.io/openzfs-docs/Performance%20and%20tuning/ZFS%20on%20Linux%20Module%20Parameters.html)):\n+ *zfs_vdev_max_active* controls the ZIO scheduler's disk queue depth (do not confuse with the block device's nr_requests)\n+ *zfs_vdev_sync_read_min_active* and *zfs_vdev_sync_read_max_active* control the synchronous queue for reads: most reads are sync\n+ *zfs_vdev_sync_write_min_active* and *zfs_vdev_sync_write_max_active* control the synchronous queue for writes: \nusually metadata or user data depending on the \"sync\" property setting or I/Os that are requested to be flushed\n+ *zfs_vdev_async_read_min_active* and *zfs_vdev_async_read_max_active* control the asynchronous queue for reads: usually prefetches\n+ *zfs_vdev_async_write_min_active* and *zfs_vdev_async_write_max_active* control the asynchronous queue for writes: \nusually the bulk of all writes at transaction group (txg) commit\n+ *zfs_vdev_scrub_min_active* and *zfs_vdev_scrub_max_active* controls the scan reads: usually scrub or resilver\n\n",
"content": "I/O requests that are satisfied by accessing pool devices are managed by the ZIO scheduler.\nThe total latency is measured from the start of the I/O to completion by the disk.\nLatency through each queue is shown prior to its submission to the disk queue.\n\nThis view is useful for observing the effects of tuning the ZIO scheduler min and max values\n(see zfs(4) and [ZFS on Linux Module Parameters](https://openzfs.github.io/openzfs-docs/Performance%20and%20tuning/ZFS%20on%20Linux%20Module%20Parameters.html)):\n+ *zfs_vdev_max_active* controls the ZIO scheduler's disk queue depth (do not confuse with the block device's nr_requests)\n+ *zfs_vdev_sync_read_min_active* and *zfs_vdev_sync_read_max_active* control the synchronous queue for reads: most reads are sync\n+ *zfs_vdev_sync_write_min_active* and *zfs_vdev_sync_write_max_active* control the synchronous queue for writes: \nusually metadata or user data depending on the \"sync\" property setting or I/Os that are requested to be flushed\n+ *zfs_vdev_async_read_min_active* and *zfs_vdev_async_read_max_active* control the asynchronous queue for reads: usually prefetches\n+ *zfs_vdev_async_write_min_active* and *zfs_vdev_async_write_max_active* control the asynchronous queue for writes: \nusually the bulk of all writes at transaction group (txg) commit\n+ *zfs_vdev_scrub_min_active* and *zfs_vdev_scrub_max_active* controls the scan reads: usually scrub or resilver\n\n",
"datasource": "${DS_MACBOOK-INFLUX}",
"fieldConfig": {
"defaults": {

View File

@ -11,18 +11,21 @@ dist_man_MANS = \
man1/arcstat.1 \
\
man5/vdev_id.conf.5 \
man5/zpool-features.5 \
man5/spl-module-parameters.5 \
man5/zfs-module-parameters.5 \
man5/zfs-events.5 \
\
man4/spl.4 \
man4/zfs.4 \
\
man7/zpool-features.7 \
man7/zfsconcepts.7 \
man7/zfsprops.7 \
man7/zpoolconcepts.7 \
man7/zpoolprops.7 \
\
man8/fsck.zfs.8 \
man8/mount.zfs.8 \
man8/vdev_id.8 \
man8/zdb.8 \
man8/zfs.8 \
man8/zfsconcepts.8 \
man8/zfsprops.8 \
man8/zfs-allow.8 \
man8/zfs-bookmark.8 \
man8/zfs-change-key.8 \
@ -63,8 +66,6 @@ dist_man_MANS = \
man8/zgenhostid.8 \
man8/zinject.8 \
man8/zpool.8 \
man8/zpoolconcepts.8 \
man8/zpoolprops.8 \
man8/zpool-add.8 \
man8/zpool-attach.8 \
man8/zpool-checkpoint.8 \

View File

@ -138,5 +138,5 @@ descriptions_obj:
.
.Sh SEE ALSO
.Xr ztest 1 ,
.Xr zpool-features 5 ,
.Xr zpool-features 7 ,
.Xr zfs 8

View File

@ -230,4 +230,4 @@ By default the stack size is limited to
.Xr zdb 1 ,
.Xr zfs 1 ,
.Xr zpool 1 ,
.Xr spl-module-parameters 5
.Xr spl 4

View File

@ -15,11 +15,11 @@
.\" Copyright 2013 Turbo Fredriksson <turbo@bayour.com>. All rights reserved.
.\"
.Dd August 24, 2020
.Dt SPL-MODULE-PARAMETERS 5
.Dt SPL 4
.Os
.
.Sh NAME
.Nm spl-module-parameters
.Nm spl
.Nd parameters of the SPL kernel module
.
.Sh DESCRIPTION
@ -192,5 +192,4 @@ The proc file will walk the lists with lock held,
reading it could cause a lock-up if the list grow too large
without limiting the output.
"(truncated)" will be shown if the list is larger than the limit.
.
.El

View File

@ -16,14 +16,15 @@
.\" Portions Copyright [yyyy] [name of copyright owner]
.\"
.Dd June 1, 2021
.Dt ZFS-MODULE-PARAMETERS 5
.Dt ZFS 4
.Os
.
.Sh NAME
.Nm zfs-module-parameters
.Nd parameters of the ZFS kernel module
.Nm zfs
.Nd tuning of the ZFS kernel module
.
.Sh DESCRIPTION
The ZFS module supports these parameters:
.Bl -tag -width Ds
.It Sy dbuf_cache_max_bytes Ns = Ns Sy ULONG_MAX Ns B Pq ulong
Maximum size in bytes of the dbuf cache.

View File

@ -1,448 +0,0 @@
.\"
.\" Copyright (c) 2013 by Turbo Fredriksson <turbo@bayour.com>. All rights reserved.
.\" Portions Copyright 2018 by Richard Elling
.\" The contents of this file are subject to the terms of the Common Development
.\" and Distribution License (the "License"). You may not use this file except
.\" in compliance with the License. You can obtain a copy of the license at
.\" usr/src/OPENSOLARIS.LICENSE or http://www.opensolaris.org/os/licensing.
.\"
.\" See the License for the specific language governing permissions and
.\" limitations under the License. When distributing Covered Code, include this
.\" CDDL HEADER in each file and include the License file at
.\" usr/src/OPENSOLARIS.LICENSE. If applicable, add the following below this
.\" CDDL HEADER, with the fields enclosed by brackets "[]" replaced with your
.\" own identifying information:
.\" Portions Copyright [yyyy] [name of copyright owner]
.\"
.Dd May 26, 2021
.Dt ZFS-EVENTS 5
.Os
.
.Sh NAME
.Nm zfs-events
.Nd Events created by the ZFS filesystem
.Sh DESCRIPTION
Description of the different events generated by the ZFS stack.
.Pp
Most of these don't have any description.
The events generated by ZFS have never been publicly documented.
What is here is intended as a starting point to provide documentation
for all possible events.
.Pp
To view all events created since the loading of the ZFS infrastructure
(i.e, "the module"), run
.Dl Nm zpool Cm events
to get a short list, and
.Dl Nm zpool Cm events Fl v
to get a full detail of the events and what information
is available about it.
.Pp
This manual page lists the different subclasses that are issued
in the case of an event.
The full event name would be
.Sy ereport.fs.zfs.\& Ns Em SUBCLASS ,
but we only list the last part here.
.
.Sh EVENTS (SUBCLASS)
.Bl -tag -compact -width "vdev.bad_guid_sum"
.It Sy checksum
Issued when a checksum error has been detected.
.It Sy io
Issued when there is an I/O error in a vdev in the pool.
.It Sy data
Issued when there have been data errors in the pool.
.It Sy deadman
Issued when an I/O request is determined to be "hung", this can be caused
by lost completion events due to flaky hardware or drivers.
See
.Sy zfs_deadman_failmode
in
.Xr zfs-module-parameters 5
for additional information regarding "hung" I/O detection and configuration.
.It Sy delay
Issued when a completed I/O request exceeds the maximum allowed time
specified by the
.Sy zio_slow_io_ms
module parameter.
This can be an indicator of problems with the underlying storage device.
The number of delay events is ratelimited by the
.Sy zfs_slow_io_events_per_second
module parameter.
.It Sy config
Issued every time a vdev change have been done to the pool.
.It Sy zpool
Issued when a pool cannot be imported.
.It Sy zpool.destroy
Issued when a pool is destroyed.
.It Sy zpool.export
Issued when a pool is exported.
.It Sy zpool.import
Issued when a pool is imported.
.It Sy zpool.reguid
Issued when a REGUID (new unique identifier for the pool have been regenerated) have been detected.
.It Sy vdev.unknown
Issued when the vdev is unknown.
Such as trying to clear device errors on a vdev that have failed/been kicked
from the system/pool and is no longer available.
.It Sy vdev.open_failed
Issued when a vdev could not be opened (because it didn't exist for example).
.It Sy vdev.corrupt_data
Issued when corrupt data have been detected on a vdev.
.It Sy vdev.no_replicas
Issued when there are no more replicas to sustain the pool.
This would lead to the pool being
.Em DEGRADED .
.It Sy vdev.bad_guid_sum
Issued when a missing device in the pool have been detected.
.It Sy vdev.too_small
Issued when the system (kernel) have removed a device, and ZFS
notices that the device isn't there any more.
This is usually followed by a
.Sy probe_failure
event.
.It Sy vdev.bad_label
Issued when the label is OK but invalid.
.It Sy vdev.bad_ashift
Issued when the ashift alignment requirement has increased.
.It Sy vdev.remove
Issued when a vdev is detached from a mirror (or a spare detached from a
vdev where it have been used to replace a failed drive - only works if
the original drive have been readded).
.It Sy vdev.clear
Issued when clearing device errors in a pool.
Such as running
.Nm zpool Cm clear
on a device in the pool.
.It Sy vdev.check
Issued when a check to see if a given vdev could be opened is started.
.It Sy vdev.spare
Issued when a spare have kicked in to replace a failed device.
.It Sy vdev.autoexpand
Issued when a vdev can be automatically expanded.
.It Sy io_failure
Issued when there is an I/O failure in a vdev in the pool.
.It Sy probe_failure
Issued when a probe fails on a vdev.
This would occur if a vdev
have been kicked from the system outside of ZFS (such as the kernel
have removed the device).
.It Sy log_replay
Issued when the intent log cannot be replayed.
The can occur in the case of a missing or damaged log device.
.It Sy resilver.start
Issued when a resilver is started.
.It Sy resilver.finish
Issued when the running resilver have finished.
.It Sy scrub.start
Issued when a scrub is started on a pool.
.It Sy scrub.finish
Issued when a pool has finished scrubbing.
.It Sy scrub.abort
Issued when a scrub is aborted on a pool.
.It Sy scrub.resume
Issued when a scrub is resumed on a pool.
.It Sy scrub.paused
Issued when a scrub is paused on a pool.
.It Sy bootfs.vdev.attach
.El
.
.Sh PAYLOADS
This is the payload (data, information) that accompanies an
event.
.Pp
For
.Xr zed 8 ,
these are set to uppercase and prefixed with
.Sy ZEVENT_ .
.Bl -tag -compact -width "vdev_cksum_errors"
.It Sy pool
Pool name.
.It Sy pool_failmode
Failmode -
.Sy wait ,
.Sy continue ,
or
.Sy panic .
See the
.Sy failmode
property in
.Xr zpoolprops 8
for more information.
.It Sy pool_guid
The GUID of the pool.
.It Sy pool_context
The load state for the pool (0=none, 1=open, 2=import, 3=tryimport, 4=recover
5=error).
.It Sy vdev_guid
The GUID of the vdev in question (the vdev failing or operated upon with
.Nm zpool Cm clear ,
etc.).
.It Sy vdev_type
Type of vdev -
.Sy disk ,
.Sy file ,
.Sy mirror ,
etc.
See the
.Sy Virtual Devices
section of
.Xr zpoolconcepts 8
for more information on possible values.
.It Sy vdev_path
Full path of the vdev, including any
.Em -partX .
.It Sy vdev_devid
ID of vdev (if any).
.It Sy vdev_fru
Physical FRU location.
.It Sy vdev_state
State of vdev (0=uninitialized, 1=closed, 2=offline, 3=removed, 4=failed to open, 5=faulted, 6=degraded, 7=healthy).
.It Sy vdev_ashift
The ashift value of the vdev.
.It Sy vdev_complete_ts
The time the last I/O request completed for the specified vdev.
.It Sy vdev_delta_ts
The time since the last I/O request completed for the specified vdev.
.It Sy vdev_spare_paths
List of spares, including full path and any
.Em -partX .
.It Sy vdev_spare_guids
GUID(s) of spares.
.It Sy vdev_read_errors
How many read errors that have been detected on the vdev.
.It Sy vdev_write_errors
How many write errors that have been detected on the vdev.
.It Sy vdev_cksum_errors
How many checksum errors that have been detected on the vdev.
.It Sy parent_guid
GUID of the vdev parent.
.It Sy parent_type
Type of parent.
See
.Sy vdev_type .
.It Sy parent_path
Path of the vdev parent (if any).
.It Sy parent_devid
ID of the vdev parent (if any).
.It Sy zio_objset
The object set number for a given I/O request.
.It Sy zio_object
The object number for a given I/O request.
.It Sy zio_level
The indirect level for the block.
Level 0 is the lowest level and includes data blocks.
Values > 0 indicate metadata blocks at the appropriate level.
.It Sy zio_blkid
The block ID for a given I/O request.
.It Sy zio_err
The error number for a failure when handling a given I/O request,
compatible with
.Xr errno 3
with the value of
.Sy EBADE
used to indicate a ZFS checksum error.
.It Sy zio_offset
The offset in bytes of where to write the I/O request for the specified vdev.
.It Sy zio_size
The size in bytes of the I/O request.
.It Sy zio_flags
The current flags describing how the I/O request should be handled.
See the
.Sy I/O FLAGS
section for the full list of I/O flags.
.It Sy zio_stage
The current stage of the I/O in the pipeline.
See the
.Sy I/O STAGES
section for a full list of all the I/O stages.
.It Sy zio_pipeline
The valid pipeline stages for the I/O.
See the
.Sy I/O STAGES
section for a full list of all the I/O stages.
.It Sy zio_delay
The time elapsed (in nanoseconds) waiting for the block layer to complete the
I/O request.
Unlike
.Sy zio_delta ,
this does not include any vdev queuing time and is
therefore solely a measure of the block layer performance.
.It Sy zio_timestamp
The time when a given I/O request was submitted.
.It Sy zio_delta
The time required to service a given I/O request.
.It Sy prev_state
The previous state of the vdev.
.It Sy cksum_expected
The expected checksum value for the block.
.It Sy cksum_actual
The actual checksum value for an errant block.
.It Sy cksum_algorithm
Checksum algorithm used.
See
.Xr zfsprops 8
for more information on the available checksum algorithms.
.It Sy cksum_byteswap
Whether or not the data is byteswapped.
.It Sy bad_ranges
.No [\& Ns Ar start , end )
pairs of corruption offsets.
Offsets are always aligned on a 64-bit boundary,
and can include some gaps of non-corruption.
(See
.Sy bad_ranges_min_gap )
.It Sy bad_ranges_min_gap
In order to bound the size of the
.Sy bad_ranges
array, gaps of non-corruption
less than or equal to
.Sy bad_ranges_min_gap
bytes have been merged with
adjacent corruption.
Always at least 8 bytes, since corruption is detected on a 64-bit word basis.
.It Sy bad_range_sets
This array has one element per range in
.Sy bad_ranges .
Each element contains
the count of bits in that range which were clear in the good data and set
in the bad data.
.It Sy bad_range_clears
This array has one element per range in
.Sy bad_ranges .
Each element contains
the count of bits for that range which were set in the good data and clear in
the bad data.
.It Sy bad_set_bits
If this field exists, it is an array of
.Pq Ar bad data No & ~( Ns Ar good data ) ;
that is, the bits set in the bad data which are cleared in the good data.
Each element corresponds a byte whose offset is in a range in
.Sy bad_ranges ,
and the array is ordered by offset.
Thus, the first element is the first byte in the first
.Sy bad_ranges
range, and the last element is the last byte in the last
.Sy bad_ranges
range.
.It Sy bad_cleared_bits
Like
.Sy bad_set_bits ,
but contains
.Pq Ar good data No & ~( Ns Ar bad data ) ;
that is, the bits set in the good data which are cleared in the bad data.
.It Sy bad_set_histogram
If this field exists, it is an array of counters.
Each entry counts bits set in a particular bit of a big-endian uint64 type.
The first entry counts bits
set in the high-order bit of the first byte, the 9th byte, etc, and the last
entry counts bits set of the low-order bit of the 8th byte, the 16th byte, etc.
This information is useful for observing a stuck bit in a parallel data path,
such as IDE or parallel SCSI.
.It Sy bad_cleared_histogram
If this field exists, it is an array of counters.
Each entry counts bit clears in a particular bit of a big-endian uint64 type.
The first entry counts bits
clears of the high-order bit of the first byte, the 9th byte, etc, and the
last entry counts clears of the low-order bit of the 8th byte, the 16th byte, etc.
This information is useful for observing a stuck bit in a parallel data
path, such as IDE or parallel SCSI.
.El
.
.Sh I/O STAGES
The ZFS I/O pipeline is comprised of various stages which are defined below.
The individual stages are used to construct these basic I/O
operations: Read, Write, Free, Claim, and Ioctl.
These stages may be
set on an event to describe the life cycle of a given I/O request.
.Pp
.TS
tab(:);
l l l .
Stage:Bit Mask:Operations
_:_:_
ZIO_STAGE_OPEN:0x00000001:RWFCI
ZIO_STAGE_READ_BP_INIT:0x00000002:R----
ZIO_STAGE_WRITE_BP_INIT:0x00000004:-W---
ZIO_STAGE_FREE_BP_INIT:0x00000008:--F--
ZIO_STAGE_ISSUE_ASYNC:0x00000010:RWF--
ZIO_STAGE_WRITE_COMPRESS:0x00000020:-W---
ZIO_STAGE_ENCRYPT:0x00000040:-W---
ZIO_STAGE_CHECKSUM_GENERATE:0x00000080:-W---
ZIO_STAGE_NOP_WRITE:0x00000100:-W---
ZIO_STAGE_DDT_READ_START:0x00000200:R----
ZIO_STAGE_DDT_READ_DONE:0x00000400:R----
ZIO_STAGE_DDT_WRITE:0x00000800:-W---
ZIO_STAGE_DDT_FREE:0x00001000:--F--
ZIO_STAGE_GANG_ASSEMBLE:0x00002000:RWFC-
ZIO_STAGE_GANG_ISSUE:0x00004000:RWFC-
ZIO_STAGE_DVA_THROTTLE:0x00008000:-W---
ZIO_STAGE_DVA_ALLOCATE:0x00010000:-W---
ZIO_STAGE_DVA_FREE:0x00020000:--F--
ZIO_STAGE_DVA_CLAIM:0x00040000:---C-
ZIO_STAGE_READY:0x00080000:RWFCI
ZIO_STAGE_VDEV_IO_START:0x00100000:RW--I
ZIO_STAGE_VDEV_IO_DONE:0x00200000:RW--I
ZIO_STAGE_VDEV_IO_ASSESS:0x00400000:RW--I
ZIO_STAGE_CHECKSUM_VERIFY:0x00800000:R----
ZIO_STAGE_DONE:0x01000000:RWFCI
.TE
.
.Sh I/O FLAGS
Every I/O request in the pipeline contains a set of flags which describe its
function and are used to govern its behavior.
These flags will be set in an event as a
.Sy zio_flags
payload entry.
.Pp
.TS
tab(:);
l l .
Flag:Bit Mask
_:_
ZIO_FLAG_DONT_AGGREGATE:0x00000001
ZIO_FLAG_IO_REPAIR:0x00000002
ZIO_FLAG_SELF_HEAL:0x00000004
ZIO_FLAG_RESILVER:0x00000008
ZIO_FLAG_SCRUB:0x00000010
ZIO_FLAG_SCAN_THREAD:0x00000020
ZIO_FLAG_PHYSICAL:0x00000040
ZIO_FLAG_CANFAIL:0x00000080
ZIO_FLAG_SPECULATIVE:0x00000100
ZIO_FLAG_CONFIG_WRITER:0x00000200
ZIO_FLAG_DONT_RETRY:0x00000400
ZIO_FLAG_DONT_CACHE:0x00000800
ZIO_FLAG_NODATA:0x00001000
ZIO_FLAG_INDUCE_DAMAGE:0x00002000
ZIO_FLAG_IO_ALLOCATING:0x00004000
ZIO_FLAG_IO_RETRY:0x00008000
ZIO_FLAG_PROBE:0x00010000
ZIO_FLAG_TRYHARD:0x00020000
ZIO_FLAG_OPTIONAL:0x00040000
ZIO_FLAG_DONT_QUEUE:0x00080000
ZIO_FLAG_DONT_PROPAGATE:0x00100000
ZIO_FLAG_IO_BYPASS:0x00200000
ZIO_FLAG_IO_REWRITE:0x00400000
ZIO_FLAG_RAW_COMPRESS:0x00800000
ZIO_FLAG_RAW_ENCRYPT:0x01000000
ZIO_FLAG_GANG_CHILD:0x02000000
ZIO_FLAG_DDT_CHILD:0x04000000
ZIO_FLAG_GODFATHER:0x08000000
ZIO_FLAG_NOPWRITE:0x10000000
ZIO_FLAG_REEXECUTED:0x20000000
ZIO_FLAG_DELEGATED:0x40000000
ZIO_FLAG_FASTWRITE:0x80000000
.TE

View File

@ -30,7 +30,7 @@
.\" Copyright 2019 Joyent, Inc.
.\"
.Dd June 30, 2019
.Dt ZFSCONCEPTS 8
.Dt ZFSCONCEPTS 7
.Os
.
.Sh NAME

View File

@ -38,7 +38,7 @@
.\" Copyright (c) 2019, Kjeld Schouten-Lebbing
.\"
.Dd May 24, 2021
.Dt ZFSPROPS 8
.Dt ZFSPROPS 7
.Os
.
.Sh NAME
@ -304,7 +304,7 @@ The used space of a snapshot
.Po see the
.Sx Snapshots
section of
.Xr zfsconcepts 8
.Xr zfsconcepts 7
.Pc
is space that is referenced exclusively by this snapshot.
If this snapshot is destroyed, the amount of
@ -777,7 +777,7 @@ does not support the
algorithm.
.Pp
Please see
.Xr zpool-features 5
.Xr zpool-features 7
for more information on these algorithms.
.Pp
Changing this property affects only newly-written data.
@ -818,7 +818,7 @@ but can only be used on pools with the
feature set to
.Sy enabled .
See
.Xr zpool-features 5
.Xr zpool-features 7
for details on ZFS feature flags and the
.Sy lz4_compress
feature.
@ -1019,7 +1019,7 @@ be enabled on a system.
See the
.Sx Deduplication
section of
.Xr zfsconcepts 8 .
.Xr zfsconcepts 7 .
.It Xo
.Sy dnodesize Ns = Ns Sy legacy Ns | Ns Sy auto Ns | Ns Sy 1k Ns | Ns
.Sy 2k Ns | Ns Sy 4k Ns | Ns Sy 8k Ns | Ns Sy 16k
@ -1192,7 +1192,7 @@ does not override the ancestor's
but rather imposes an additional limit.
This feature must be enabled to be used
.Po see
.Xr zpool-features 5
.Xr zpool-features 7
.Pc .
.It Sy special_small_blocks Ns = Ns Ar size
This value represents the threshold block size for including small file
@ -1207,14 +1207,14 @@ will be allocated in the special class.
Before setting this property, a special class vdev must be added to the
pool.
See
.Xr zpoolconcepts 8
.Xr zpoolconcepts 7
for more details on the special allocation class.
.It Sy mountpoint Ns = Ns Pa path Ns | Ns Sy none Ns | Ns Sy legacy
Controls the mount point used for this file system.
See the
.Sx Mount Points
section of
.Xr zfsconcepts 8
.Xr zfsconcepts 7
for more information on how this property is used.
.Pp
When the
@ -1292,7 +1292,7 @@ For example, this means that recursive snapshots taken from the global zone are
counted against each delegated dataset within a zone.
This feature must be enabled to be used
.Po see
.Xr zpool-features 5
.Xr zpool-features 7
.Pc .
.It Sy userquota@ Ns Ar user Ns = Ns Ar size Ns | Ns Sy none
Limits the amount of space consumed by the specified user.
@ -1435,7 +1435,7 @@ If the
feature is enabled on the pool, the size may be up to
.Ar 1MB .
See
.Xr zpool-features 5
.Xr zpool-features 7
for details on ZFS feature flags.
.Pp
Changing the file system's
@ -1686,7 +1686,7 @@ directory is hidden or visible in the root of the file system as discussed in
the
.Sx Snapshots
section of
.Xr zfsconcepts 8 .
.Xr zfsconcepts 7 .
The default value is
.Sy hidden .
.It Sy sync Ns = Ns Sy standard Ns | Ns Sy always Ns | Ns Sy disabled

View File

@ -19,7 +19,7 @@
.\" Copyright (c) 2021, Colm Buckley <colm@tuatha.org>
.\"
.Dd May 31, 2021
.Dt ZPOOL-FEATURES 5
.Dt ZPOOL-FEATURES 7
.Os
.
.Sh NAME
@ -521,7 +521,7 @@ will not match the source.
Its use by
.Nm zfs Cm send Fl i
has been disabled by default
.Pq see Sy send_holes_without_birth_time No in Xr zfs-module-parameters 5 .
.Pq see Sy send_holes_without_birth_time No in Xr zfs 4 .
.Pp
This feature improves performance of incremental sends
.Pq Nm zfs Cm send Fl i

View File

@ -27,7 +27,7 @@
.\" Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
.\"
.Dd June 2, 2021
.Dt ZPOOLCONCEPTS 8
.Dt ZPOOLCONCEPTS 7
.Os
.
.Sh NAME
@ -508,5 +508,5 @@ in the special class by setting the
.Sy special_small_blocks
property to nonzero.
See
.Xr zfsprops 8
.Xr zfsprops 7
for more info on this property.

View File

@ -28,7 +28,7 @@
.\" Copyright (c) 2021, Colm Buckley <colm@tuatha.org>
.\"
.Dd May 27, 2021
.Dt ZPOOLPROPS 8
.Dt ZPOOLPROPS 7
.Os
.
.Sh NAME
@ -89,7 +89,7 @@ This discrepancy is due to several factors, including raidz parity;
zfs reservation, quota, refreservation, and refquota properties; and space set aside by
.Sy spa_slop_shift
(see
.Xr zfs-module-parameters 5
.Xr zfs 4
for more information).
.It Sy freeing
After a file system or snapshot is destroyed, the space it was using is
@ -121,7 +121,7 @@ Total size of the storage pool.
.It Sy unsupported@ Ns Em guid
Information about unsupported features that are enabled on the pool.
See
.Xr zpool-features 5
.Xr zpool-features 7
for details.
.El
.Pp
@ -316,7 +316,7 @@ whitespace and/or commas.
Only features present in all files may be enabled.
.Pp
See
.Xr zpool-features 5 ,
.Xr zpool-features 7 ,
.Xr zpool-create 8
and
.Xr zpool-upgrade 8
@ -357,7 +357,7 @@ which moves
.Ar feature_name
to the enabled state.
See
.Xr zpool-features 5
.Xr zpool-features 7
for details on feature states.
.It Sy listsnapshots Ns = Ns Sy on Ns | Ns Sy off
Controls whether information about snapshots associated with this pool is
@ -390,13 +390,13 @@ in use.
See
.Sy zfs_multihost_interval
in the
.Xr zfs-module-parameters 5
.Xr zfs 4
manual page.
In order to enable this property each host must set a unique hostid.
See
.Xr genhostid 1
.Xr zgenhostid 8
.Xr spl-module-parameters 5
.Xr spl 4
for additional details.
The default value is
.Sy off .

View File

@ -56,7 +56,7 @@ in most cases.
are handled according to the
.Em Temporary Mount Point Properties
section in
.Xr zfsprops 8 ,
.Xr zfsprops 7 ,
except for those described below.
.Pp
If

View File

@ -237,7 +237,6 @@ Terminate the daemon.
.El
.
.Sh SEE ALSO
.Xr zfs-events 5 ,
.Xr zfs 8 ,
.Xr zpool 8 ,
.Xr zpool-events 8

View File

@ -56,7 +56,7 @@ a redaction bookmark.
.Pp
This feature must be enabled to be used.
See
.Xr zpool-features 5
.Xr zpool-features 7
for details on ZFS feature flags and the
.Sy bookmarks
feature.

View File

@ -47,7 +47,7 @@
See the
.Sx Clones
section of
.Xr zfsconcepts 8
.Xr zfsconcepts 7
for details.
The target dataset can be located anywhere in the ZFS hierarchy,
and is created as the same type as the original.

View File

@ -184,7 +184,7 @@ See
in the
.Em Native Properties
section of
.Xr zfsprops 8
.Xr zfsprops 7
for more information about sparse volumes.
.It Fl n
Do a dry-run

View File

@ -119,5 +119,5 @@ or name
.Ar jailname .
.El
.Sh SEE ALSO
.Xr jail 8 ,
.Xr zfsprops 8
.Xr zfsprops 7 ,
.Xr jail 8

View File

@ -90,7 +90,7 @@ The property must be:
One of the properties described in the
.Sx Native Properties
section of
.Xr zfsprops 8
.Xr zfsprops 7
.It
A user property
.It
@ -118,7 +118,7 @@ value of the property.
The property must be one of the properties described in the
.Sx Properties
section of
.Xr zfsprops 8
.Xr zfsprops 7
or the value
.Sy name
to sort by the dataset name.
@ -158,5 +158,5 @@ displays only snapshots.
.El
.
.Sh SEE ALSO
.Xr zfs-get 8 ,
.Xr zfsprops 8
.Xr zfsprops 7 ,
.Xr zfs-get 8

View File

@ -296,6 +296,6 @@ Deduplication with encryption will leak information about which blocks
are equivalent in a dataset and will incur an extra CPU cost for each block written.
.
.Sh SEE ALSO
.Xr zfsprops 7 ,
.Xr zfs-create 8 ,
.Xr zfs-set 8 ,
.Xr zfsprops 8
.Xr zfs-set 8

View File

@ -186,7 +186,7 @@ to re-run all generators:
.Xr systemd.mount 5 ,
.Xr systemd.target 5 ,
.Xr zfs 5 ,
.Xr zfs-events 5 ,
.Xr systemd.generator 7 ,
.Xr systemd.special 7 ,
.Xr zed 8
.Xr zed 8 ,
.Xr zpool-events 8

View File

@ -91,7 +91,7 @@ duration of the mount.
See the
.Em Temporary Mount Point Properties
section of
.Xr zfsprops 8
.Xr zfsprops 7
for details.
.It Fl l
Load keys for encrypted filesystems as they are being mounted.

View File

@ -357,7 +357,7 @@ To use this flag, the storage pool must have the
.Sy extensible_dataset
feature enabled.
See
.Xr zpool-features 5
.Xr zpool-features 7
for details on ZFS feature flags.
.It Fl u
File system that is associated with the received stream is not mounted.

View File

@ -110,7 +110,7 @@ The receiving system must have the
.Sy large_blocks
pool feature enabled as well.
See
.Xr zpool-features 5
.Xr zpool-features 7
for details on ZFS feature flags and the
.Sy large_blocks
feature.
@ -161,7 +161,7 @@ received as an encrypted dataset, since encrypted datasets cannot use the
.Sy embedded_data
feature.
See
.Xr zpool-features 5
.Xr zpool-features 7
for details on ZFS feature flags and the
.Sy embedded_data
feature.
@ -308,7 +308,7 @@ The receiving system must have the
.Sy large_blocks
pool feature enabled as well.
See
.Xr zpool-features 5
.Xr zpool-features 7
for details on ZFS feature flags and the
.Sy large_blocks
feature.
@ -372,7 +372,7 @@ since encrypted datasets cannot use the
.Sy embedded_data
feature.
See
.Xr zpool-features 5
.Xr zpool-features 7
for details on ZFS feature flags and the
.Sy embedded_data
feature.

View File

@ -65,7 +65,7 @@
.Xc
Only some properties can be edited.
See
.Xr zfsprops 8
.Xr zfsprops 7
for more information on what properties can be set and acceptable
values.
Numeric values can be specified as exact values, or in a human-readable form
@ -78,7 +78,7 @@ User properties can be set on snapshots.
For more information, see the
.Em User Properties
section of
.Xr zfsprops 8 .
.Xr zfsprops 7 .
.It Xo
.Nm zfs
.Cm get
@ -114,7 +114,7 @@ This command takes a comma-separated list of properties as described in the
and
.Sx User Properties
sections of
.Xr zfsprops 8 .
.Xr zfsprops 7 .
.Pp
The value
.Sy all
@ -163,7 +163,7 @@ restored to default if no ancestor has the property set, or with the
.Fl S
option reverted to the received value if one exists.
See
.Xr zfsprops 8
.Xr zfsprops 7
for a listing of default values, and details on which properties can be
inherited.
.Bl -tag -width "-r"
@ -178,5 +178,5 @@ option was not specified.
.El
.
.Sh SEE ALSO
.Xr zfs-list 8 ,
.Xr zfsprops 8
.Xr zfsprops 7 ,
.Xr zfs-list 8

View File

@ -87,4 +87,4 @@ The command can also be given a path to a ZFS file system shared on the system.
.Sh SEE ALSO
.Xr exports 5 ,
.Xr smb.conf 5 ,
.Xr zfsprops 8
.Xr zfsprops 7

View File

@ -54,7 +54,7 @@ can be used as an alias for
See the
.Sx Snapshots
section of
.Xr zfsconcepts 8
.Xr zfsconcepts 7
for details.
.Bl -tag -width "-o"
.It Fl o Ar property Ns = Ns Ar value

View File

@ -77,7 +77,7 @@ systems running older versions of ZFS.
.Pp
In general, the file system version is independent of the pool version.
See
.Xr zpool-features 5
.Xr zpool-features 7
for information on features of ZFS storage pools.
.Pp
In some cases, the file system version and the pool version are interrelated and

View File

@ -183,5 +183,5 @@ for types.
.El
.
.Sh SEE ALSO
.Xr zfs-set 8 ,
.Xr zfsprops 8
.Xr zfsprops 7 ,
.Xr zfs-set 8

View File

@ -96,7 +96,7 @@ or
.El
.Pp
See
.Xr zfsconcepts 8
.Xr zfsconcepts 7
for details.
.
.Ss Properties
@ -108,7 +108,7 @@ In addition, native properties are either editable or read-only.
User properties have no effect on ZFS behavior, but you can use them to annotate
datasets in a way that is meaningful in your environment.
For more information about properties, see
.Xr zfsprops 8 .
.Xr zfsprops 7 .
.
.Ss Encryption
Enabling the
@ -354,7 +354,7 @@ Snapshots are displayed if
The default is
.Sy off .
See
.Xr zpoolprops 8
.Xr zpoolprops 7
for more information on pool properties.
.Bd -literal -compact -offset Ds
.No # Nm zfs Cm list
@ -728,6 +728,8 @@ This option is provided for backwards compatibility with older ZFS versions.
.Xr acl 5 ,
.Xr attributes 5 ,
.Xr exports 5 ,
.Xr zfsconcepts 7 ,
.Xr zfsprops 7 ,
.Xr exportfs 8 ,
.Xr mount 8 ,
.Xr net 8 ,
@ -768,6 +770,4 @@ This option is provided for backwards compatibility with older ZFS versions.
.Xr zfs-upgrade 8 ,
.Xr zfs-userspace 8 ,
.Xr zfs-wait 8 ,
.Xr zfsconcepts 8 ,
.Xr zfsprops 8 ,
.Xr zpool 8

View File

@ -83,7 +83,7 @@ digits long, optionally prefixed by
.Xr genhostid 1 ,
.Xr hostid 1 ,
.Xr sethostid 3 ,
.Xr spl-module-parameters 5
.Xr spl 4
.Sh HISTORY
.Nm
emulates the

View File

@ -46,7 +46,7 @@ The
specification is described in the
.Em Virtual Devices
section of
.Xr zpoolconcepts 8 .
.Xr zpoolconcepts 7 .
The behavior of the
.Fl f
option, and the device checks performed are described in the
@ -87,7 +87,7 @@ flag.
.It Fl o Ar property Ns = Ns Ar value
Sets the given pool properties.
See the
.Xr zpoolprops 8
.Xr zpoolprops 7
manual page for a list of valid properties that can be set.
The only property supported at the moment is
.Sy ashift .

View File

@ -71,7 +71,7 @@ Not all devices can be overridden in this manner.
.It Fl o Ar property Ns = Ns Ar value
Sets the given pool properties.
See the
.Xr zpoolprops 8
.Xr zpoolprops 7
manual page for a list of valid properties that can be set.
The only property supported at the moment is
.Sy ashift .

View File

@ -80,7 +80,7 @@ The
specification is described in the
.Sx Virtual Devices
section of
.Xr zpoolconcepts 8 .
.Xr zpoolconcepts 7 .
.Pp
The command attempts to verify that each device specified is accessible and not
currently in use by another subsystem.
@ -139,7 +139,7 @@ Individual features can be enabled by setting their corresponding properties to
with
.Fl o .
See
.Xr zpool-features 5
.Xr zpool-features 7
for details about feature properties.
.It Fl f
Forces use of
@ -160,7 +160,7 @@ The mount point must be an absolute path,
or
.Sy none .
For more information on dataset mount points, see
.Xr zfsprops 8 .
.Xr zfsprops 7 .
.It Fl n
Displays the configuration that would be used without actually creating the
pool.
@ -169,23 +169,23 @@ device sharing.
.It Fl o Ar property Ns = Ns Ar value
Sets the given pool properties.
See
.Xr zpoolprops 8
.Xr zpoolprops 7
for a list of valid properties that can be set.
.It Fl o Ar compatibility Ns = Ns Sy off Ns | Ns Sy legacy Ns | Ns Ar file Ns Oo , Ns Ar file Oc Ns
Specifies compatibility feature sets.
See
.Xr zpool-features 5
.Xr zpool-features 7
for more information about compatibility feature sets.
.It Fl o Sy feature@ Ns Ar feature Ns = Ns Ar value
Sets the given pool feature.
See the
.Xr zpool-features 5
.Xr zpool-features 7
section for a list of valid features that can be set.
Value can be either disabled or enabled.
.It Fl O Ar file-system-property Ns = Ns Ar value
Sets the given file system properties in the root file system of the pool.
See
.Xr zfsprops 8
.Xr zfsprops 7
for a list of valid properties that can be set.
.It Fl R Ar root
Equivalent to

View File

@ -49,10 +49,11 @@ These events are consumed by the
and used to automate administrative tasks such as replacing a failed device
with a hot spare.
For more information about the subclasses and event payloads
that can be generated see the
.Xr zfs-events 5
man page.
.Pp
that can be generated see
.Sx EVENTS
and the following sections.
.
.Sh OPTIONS
.Bl -tag -compact -width Ds
.It Fl c
Clear all previous events.
@ -66,8 +67,417 @@ single tab instead of arbitrary space.
Print the entire payload for each event.
.El
.
.Sh EVENTS
Theese are the different event subclasses.
The full event name would be
.Sy ereport.fs.zfs.\& Ns Em SUBCLASS ,
but only the last part is listed here.
.Pp
.Bl -tag -compact -width "vdev.bad_guid_sum"
.It Sy checksum
Issued when a checksum error has been detected.
.It Sy io
Issued when there is an I/O error in a vdev in the pool.
.It Sy data
Issued when there have been data errors in the pool.
.It Sy deadman
Issued when an I/O request is determined to be "hung", this can be caused
by lost completion events due to flaky hardware or drivers.
See
.Sy zfs_deadman_failmode
in
.Xr zfs 4
for additional information regarding "hung" I/O detection and configuration.
.It Sy delay
Issued when a completed I/O request exceeds the maximum allowed time
specified by the
.Sy zio_slow_io_ms
module parameter.
This can be an indicator of problems with the underlying storage device.
The number of delay events is ratelimited by the
.Sy zfs_slow_io_events_per_second
module parameter.
.It Sy config
Issued every time a vdev change have been done to the pool.
.It Sy zpool
Issued when a pool cannot be imported.
.It Sy zpool.destroy
Issued when a pool is destroyed.
.It Sy zpool.export
Issued when a pool is exported.
.It Sy zpool.import
Issued when a pool is imported.
.It Sy zpool.reguid
Issued when a REGUID (new unique identifier for the pool have been regenerated) have been detected.
.It Sy vdev.unknown
Issued when the vdev is unknown.
Such as trying to clear device errors on a vdev that have failed/been kicked
from the system/pool and is no longer available.
.It Sy vdev.open_failed
Issued when a vdev could not be opened (because it didn't exist for example).
.It Sy vdev.corrupt_data
Issued when corrupt data have been detected on a vdev.
.It Sy vdev.no_replicas
Issued when there are no more replicas to sustain the pool.
This would lead to the pool being
.Em DEGRADED .
.It Sy vdev.bad_guid_sum
Issued when a missing device in the pool have been detected.
.It Sy vdev.too_small
Issued when the system (kernel) have removed a device, and ZFS
notices that the device isn't there any more.
This is usually followed by a
.Sy probe_failure
event.
.It Sy vdev.bad_label
Issued when the label is OK but invalid.
.It Sy vdev.bad_ashift
Issued when the ashift alignment requirement has increased.
.It Sy vdev.remove
Issued when a vdev is detached from a mirror (or a spare detached from a
vdev where it have been used to replace a failed drive - only works if
the original drive have been readded).
.It Sy vdev.clear
Issued when clearing device errors in a pool.
Such as running
.Nm zpool Cm clear
on a device in the pool.
.It Sy vdev.check
Issued when a check to see if a given vdev could be opened is started.
.It Sy vdev.spare
Issued when a spare have kicked in to replace a failed device.
.It Sy vdev.autoexpand
Issued when a vdev can be automatically expanded.
.It Sy io_failure
Issued when there is an I/O failure in a vdev in the pool.
.It Sy probe_failure
Issued when a probe fails on a vdev.
This would occur if a vdev
have been kicked from the system outside of ZFS (such as the kernel
have removed the device).
.It Sy log_replay
Issued when the intent log cannot be replayed.
The can occur in the case of a missing or damaged log device.
.It Sy resilver.start
Issued when a resilver is started.
.It Sy resilver.finish
Issued when the running resilver have finished.
.It Sy scrub.start
Issued when a scrub is started on a pool.
.It Sy scrub.finish
Issued when a pool has finished scrubbing.
.It Sy scrub.abort
Issued when a scrub is aborted on a pool.
.It Sy scrub.resume
Issued when a scrub is resumed on a pool.
.It Sy scrub.paused
Issued when a scrub is paused on a pool.
.It Sy bootfs.vdev.attach
.El
.
.Sh PAYLOADS
This is the payload (data, information) that accompanies an
event.
.Pp
For
.Xr zed 8 ,
these are set to uppercase and prefixed with
.Sy ZEVENT_ .
.Pp
.Bl -tag -compact -width "vdev_cksum_errors"
.It Sy pool
Pool name.
.It Sy pool_failmode
Failmode -
.Sy wait ,
.Sy continue ,
or
.Sy panic .
See the
.Sy failmode
property in
.Xr zpoolprops 7
for more information.
.It Sy pool_guid
The GUID of the pool.
.It Sy pool_context
The load state for the pool (0=none, 1=open, 2=import, 3=tryimport, 4=recover
5=error).
.It Sy vdev_guid
The GUID of the vdev in question (the vdev failing or operated upon with
.Nm zpool Cm clear ,
etc.).
.It Sy vdev_type
Type of vdev -
.Sy disk ,
.Sy file ,
.Sy mirror ,
etc.
See the
.Sy Virtual Devices
section of
.Xr zpoolconcepts 7
for more information on possible values.
.It Sy vdev_path
Full path of the vdev, including any
.Em -partX .
.It Sy vdev_devid
ID of vdev (if any).
.It Sy vdev_fru
Physical FRU location.
.It Sy vdev_state
State of vdev (0=uninitialized, 1=closed, 2=offline, 3=removed, 4=failed to open, 5=faulted, 6=degraded, 7=healthy).
.It Sy vdev_ashift
The ashift value of the vdev.
.It Sy vdev_complete_ts
The time the last I/O request completed for the specified vdev.
.It Sy vdev_delta_ts
The time since the last I/O request completed for the specified vdev.
.It Sy vdev_spare_paths
List of spares, including full path and any
.Em -partX .
.It Sy vdev_spare_guids
GUID(s) of spares.
.It Sy vdev_read_errors
How many read errors that have been detected on the vdev.
.It Sy vdev_write_errors
How many write errors that have been detected on the vdev.
.It Sy vdev_cksum_errors
How many checksum errors that have been detected on the vdev.
.It Sy parent_guid
GUID of the vdev parent.
.It Sy parent_type
Type of parent.
See
.Sy vdev_type .
.It Sy parent_path
Path of the vdev parent (if any).
.It Sy parent_devid
ID of the vdev parent (if any).
.It Sy zio_objset
The object set number for a given I/O request.
.It Sy zio_object
The object number for a given I/O request.
.It Sy zio_level
The indirect level for the block.
Level 0 is the lowest level and includes data blocks.
Values > 0 indicate metadata blocks at the appropriate level.
.It Sy zio_blkid
The block ID for a given I/O request.
.It Sy zio_err
The error number for a failure when handling a given I/O request,
compatible with
.Xr errno 3
with the value of
.Sy EBADE
used to indicate a ZFS checksum error.
.It Sy zio_offset
The offset in bytes of where to write the I/O request for the specified vdev.
.It Sy zio_size
The size in bytes of the I/O request.
.It Sy zio_flags
The current flags describing how the I/O request should be handled.
See the
.Sy I/O FLAGS
section for the full list of I/O flags.
.It Sy zio_stage
The current stage of the I/O in the pipeline.
See the
.Sy I/O STAGES
section for a full list of all the I/O stages.
.It Sy zio_pipeline
The valid pipeline stages for the I/O.
See the
.Sy I/O STAGES
section for a full list of all the I/O stages.
.It Sy zio_delay
The time elapsed (in nanoseconds) waiting for the block layer to complete the
I/O request.
Unlike
.Sy zio_delta ,
this does not include any vdev queuing time and is
therefore solely a measure of the block layer performance.
.It Sy zio_timestamp
The time when a given I/O request was submitted.
.It Sy zio_delta
The time required to service a given I/O request.
.It Sy prev_state
The previous state of the vdev.
.It Sy cksum_expected
The expected checksum value for the block.
.It Sy cksum_actual
The actual checksum value for an errant block.
.It Sy cksum_algorithm
Checksum algorithm used.
See
.Xr zfsprops 7
for more information on the available checksum algorithms.
.It Sy cksum_byteswap
Whether or not the data is byteswapped.
.It Sy bad_ranges
.No [\& Ns Ar start , end )
pairs of corruption offsets.
Offsets are always aligned on a 64-bit boundary,
and can include some gaps of non-corruption.
(See
.Sy bad_ranges_min_gap )
.It Sy bad_ranges_min_gap
In order to bound the size of the
.Sy bad_ranges
array, gaps of non-corruption
less than or equal to
.Sy bad_ranges_min_gap
bytes have been merged with
adjacent corruption.
Always at least 8 bytes, since corruption is detected on a 64-bit word basis.
.It Sy bad_range_sets
This array has one element per range in
.Sy bad_ranges .
Each element contains
the count of bits in that range which were clear in the good data and set
in the bad data.
.It Sy bad_range_clears
This array has one element per range in
.Sy bad_ranges .
Each element contains
the count of bits for that range which were set in the good data and clear in
the bad data.
.It Sy bad_set_bits
If this field exists, it is an array of
.Pq Ar bad data No & ~( Ns Ar good data ) ;
that is, the bits set in the bad data which are cleared in the good data.
Each element corresponds a byte whose offset is in a range in
.Sy bad_ranges ,
and the array is ordered by offset.
Thus, the first element is the first byte in the first
.Sy bad_ranges
range, and the last element is the last byte in the last
.Sy bad_ranges
range.
.It Sy bad_cleared_bits
Like
.Sy bad_set_bits ,
but contains
.Pq Ar good data No & ~( Ns Ar bad data ) ;
that is, the bits set in the good data which are cleared in the bad data.
.It Sy bad_set_histogram
If this field exists, it is an array of counters.
Each entry counts bits set in a particular bit of a big-endian uint64 type.
The first entry counts bits
set in the high-order bit of the first byte, the 9th byte, etc, and the last
entry counts bits set of the low-order bit of the 8th byte, the 16th byte, etc.
This information is useful for observing a stuck bit in a parallel data path,
such as IDE or parallel SCSI.
.It Sy bad_cleared_histogram
If this field exists, it is an array of counters.
Each entry counts bit clears in a particular bit of a big-endian uint64 type.
The first entry counts bits
clears of the high-order bit of the first byte, the 9th byte, etc, and the
last entry counts clears of the low-order bit of the 8th byte, the 16th byte, etc.
This information is useful for observing a stuck bit in a parallel data
path, such as IDE or parallel SCSI.
.El
.
.Sh I/O STAGES
The ZFS I/O pipeline is comprised of various stages which are defined below.
The individual stages are used to construct these basic I/O
operations: Read, Write, Free, Claim, and Ioctl.
These stages may be
set on an event to describe the life cycle of a given I/O request.
.Pp
.TS
tab(:);
l l l .
Stage:Bit Mask:Operations
_:_:_
ZIO_STAGE_OPEN:0x00000001:RWFCI
ZIO_STAGE_READ_BP_INIT:0x00000002:R----
ZIO_STAGE_WRITE_BP_INIT:0x00000004:-W---
ZIO_STAGE_FREE_BP_INIT:0x00000008:--F--
ZIO_STAGE_ISSUE_ASYNC:0x00000010:RWF--
ZIO_STAGE_WRITE_COMPRESS:0x00000020:-W---
ZIO_STAGE_ENCRYPT:0x00000040:-W---
ZIO_STAGE_CHECKSUM_GENERATE:0x00000080:-W---
ZIO_STAGE_NOP_WRITE:0x00000100:-W---
ZIO_STAGE_DDT_READ_START:0x00000200:R----
ZIO_STAGE_DDT_READ_DONE:0x00000400:R----
ZIO_STAGE_DDT_WRITE:0x00000800:-W---
ZIO_STAGE_DDT_FREE:0x00001000:--F--
ZIO_STAGE_GANG_ASSEMBLE:0x00002000:RWFC-
ZIO_STAGE_GANG_ISSUE:0x00004000:RWFC-
ZIO_STAGE_DVA_THROTTLE:0x00008000:-W---
ZIO_STAGE_DVA_ALLOCATE:0x00010000:-W---
ZIO_STAGE_DVA_FREE:0x00020000:--F--
ZIO_STAGE_DVA_CLAIM:0x00040000:---C-
ZIO_STAGE_READY:0x00080000:RWFCI
ZIO_STAGE_VDEV_IO_START:0x00100000:RW--I
ZIO_STAGE_VDEV_IO_DONE:0x00200000:RW--I
ZIO_STAGE_VDEV_IO_ASSESS:0x00400000:RW--I
ZIO_STAGE_CHECKSUM_VERIFY:0x00800000:R----
ZIO_STAGE_DONE:0x01000000:RWFCI
.TE
.
.Sh I/O FLAGS
Every I/O request in the pipeline contains a set of flags which describe its
function and are used to govern its behavior.
These flags will be set in an event as a
.Sy zio_flags
payload entry.
.Pp
.TS
tab(:);
l l .
Flag:Bit Mask
_:_
ZIO_FLAG_DONT_AGGREGATE:0x00000001
ZIO_FLAG_IO_REPAIR:0x00000002
ZIO_FLAG_SELF_HEAL:0x00000004
ZIO_FLAG_RESILVER:0x00000008
ZIO_FLAG_SCRUB:0x00000010
ZIO_FLAG_SCAN_THREAD:0x00000020
ZIO_FLAG_PHYSICAL:0x00000040
ZIO_FLAG_CANFAIL:0x00000080
ZIO_FLAG_SPECULATIVE:0x00000100
ZIO_FLAG_CONFIG_WRITER:0x00000200
ZIO_FLAG_DONT_RETRY:0x00000400
ZIO_FLAG_DONT_CACHE:0x00000800
ZIO_FLAG_NODATA:0x00001000
ZIO_FLAG_INDUCE_DAMAGE:0x00002000
ZIO_FLAG_IO_ALLOCATING:0x00004000
ZIO_FLAG_IO_RETRY:0x00008000
ZIO_FLAG_PROBE:0x00010000
ZIO_FLAG_TRYHARD:0x00020000
ZIO_FLAG_OPTIONAL:0x00040000
ZIO_FLAG_DONT_QUEUE:0x00080000
ZIO_FLAG_DONT_PROPAGATE:0x00100000
ZIO_FLAG_IO_BYPASS:0x00200000
ZIO_FLAG_IO_REWRITE:0x00400000
ZIO_FLAG_RAW_COMPRESS:0x00800000
ZIO_FLAG_RAW_ENCRYPT:0x01000000
ZIO_FLAG_GANG_CHILD:0x02000000
ZIO_FLAG_DDT_CHILD:0x04000000
ZIO_FLAG_GODFATHER:0x08000000
ZIO_FLAG_NOPWRITE:0x10000000
ZIO_FLAG_REEXECUTED:0x20000000
ZIO_FLAG_DELEGATED:0x40000000
ZIO_FLAG_FASTWRITE:0x80000000
.TE
.
.Sh SEE ALSO
.Xr zfs-events 5 ,
.Xr zfs-module-parameters 5 ,
.Xr zfs 4 ,
.Xr zed 8 ,
.Xr zpool-wait 8

View File

@ -76,7 +76,7 @@ Property source, either
.El
.Pp
See the
.Xr zpoolprops 8
.Xr zpoolprops 7
manual page for more information on the available pool properties.
.Bl -tag -compact -offset Ds -width "-o field"
.It Fl H
@ -97,12 +97,12 @@ Display numbers in parsable (exact) values.
.Xc
Sets the given property on the specified pool.
See the
.Xr zpoolprops 8
.Xr zpoolprops 7
manual page for more information on what properties can be set and acceptable
values.
.El
.
.Sh SEE ALSO
.Xr zpool-features 5 ,
.Xr zpool-list 8 ,
.Xr zpoolprops 8
.Xr zpool-features 7 ,
.Xr zpoolprops 7 ,
.Xr zpool-list 8

View File

@ -201,7 +201,7 @@ for a description of dataset properties and mount options.
.It Fl o Ar property Ns = Ns Ar value
Sets the specified property on the imported pool.
See the
.Xr zpoolprops 8
.Xr zpoolprops 7
manual page for more information on the available pool properties.
.It Fl R Ar root
Sets the
@ -347,7 +347,7 @@ for a description of dataset properties and mount options.
.It Fl o Ar property Ns = Ns Ar value
Sets the specified property on the imported pool.
See the
.Xr zpoolprops 8
.Xr zpoolprops 7
manual page for more information on the available pool properties.
.It Fl R Ar root
Sets the

View File

@ -69,7 +69,7 @@ space.
.It Fl o Ar property
Comma-separated list of properties to display.
See the
.Xr zpoolprops 8
.Xr zpoolprops 7
manual page for a list of valid properties.
The default list is
.Sy name , size , allocated , free , checkpoint, expandsize , fragmentation ,

View File

@ -70,7 +70,7 @@ If an IO error is encountered during the removal process it will be cancelled.
The
.Sy device_removal
feature flag must be enabled to remove a top-level vdev, see
.Xr zpool-features 5 .
.Xr zpool-features 7 .
.Pp
A mirrored top-level device (log or data) can be removed by specifying the top-level mirror for the
same.

View File

@ -77,7 +77,7 @@ Not all devices can be overridden in this manner.
.It Fl o Ar property Ns = Ns Ar value
Sets the given pool properties.
See the
.Xr zpoolprops 8
.Xr zpoolprops 7
manual page for a list of valid properties that can be set.
The only property supported at the moment is
.Sy ashift .

View File

@ -98,7 +98,7 @@ flag.
Sets the specified property for
.Ar newpool .
See the
.Xr zpoolprops 8
.Xr zpoolprops 7
manual page for more information on the available pool properties.
.It Fl R Ar root
Set

View File

@ -50,7 +50,7 @@ is specified, then the status of each pool in the system is displayed.
For more information on pool and device health, see the
.Sx Device Failure and Recovery
section of
.Xr zpoolconcepts 8 .
.Xr zpoolconcepts 7 .
.Pp
If a scrub or resilver is in progress, this command reports the percentage done
and the estimated time to completion.

View File

@ -48,6 +48,6 @@ will sync all pools on the system.
Otherwise, it will sync only the specified pools.
.
.Sh SEE ALSO
.Xr zpoolconcepts 7 ,
.Xr zpool-export 8 ,
.Xr zpool-iostat 8 ,
.Xr zpoolconcepts 8
.Xr zpool-iostat 8

View File

@ -86,6 +86,6 @@ Wait until the devices are done being trimmed before returning.
.El
.
.Sh SEE ALSO
.Xr zpoolprops 7 ,
.Xr zpool-initialize 8 ,
.Xr zpool-wait 8 ,
.Xr zpoolprops 8
.Xr zpool-wait 8

View File

@ -66,7 +66,7 @@ property).
.Xc
Displays legacy ZFS versions supported by the this version of ZFS.
See
.Xr zpool-features 5
.Xr zpool-features 7
for a description of feature flags features supported by this version of ZFS.
.It Xo
.Nm zpool
@ -87,7 +87,7 @@ then no upgrade will take place.
Once this is done, the pool will no longer be accessible on systems that do not
support feature flags.
See
.Xr zpool-features 5
.Xr zpool-features 7
for details on compatibility with systems that support feature flags, but do not
support all features enabled on the pool.
.Bl -tag -width Ds
@ -103,7 +103,7 @@ supported legacy version number.
.El
.
.Sh SEE ALSO
.Xr zpool-features 5 ,
.Xr zpool-history 8 ,
.Xr zpoolconcepts 8 ,
.Xr zpoolprops 8
.Xr zpool-features 7 ,
.Xr zpoolconcepts 7 ,
.Xr zpoolprops 7 ,
.Xr zpool-history 8

View File

@ -54,7 +54,7 @@ See
for information on managing datasets.
.Pp
For an overview of creating and managing ZFS storage pools see the
.Xr zpoolconcepts 8
.Xr zpoolconcepts 7
manual page.
.
.Sh SUBCOMMANDS
@ -126,7 +126,7 @@ Creates a new pool by splitting all mirrors in an existing pool (which decreases
.
.Ss Properties
Available pool properties listed in the
.Xr zpoolprops 8
.Xr zpoolprops 7
manual page.
.Bl -tag -width Ds
.It Xr zpool-list 8
@ -157,10 +157,8 @@ These events are consumed by the
.Xr zed 8
and used to automate administrative tasks such as replacing a failed device
with a hot spare.
For more information about the subclasses and event payloads
that can be generated see the
.Xr zfs-events 5
man page.
That manual page also describes the subclasses and event payloads
that can be generated.
.It Xr zpool-history 8
Displays the command history of the specified pool(s) or all pools if no pool is
specified.
@ -523,9 +521,10 @@ is not set, it is assumed that the user is allowed to run
.Sy Evolving
.
.Sh SEE ALSO
.Xr zfs-events 5 ,
.Xr zfs-module-parameters 5 ,
.Xr zpool-features 5 ,
.Xr zfs 4 ,
.Xr zpool-features 7 ,
.Xr zpoolconcepts 7 ,
.Xr zpoolprops 7 ,
.Xr zed 8 ,
.Xr zfs 8 ,
.Xr zpool-add 8 ,
@ -558,6 +557,4 @@ is not set, it is assumed that the user is allowed to run
.Xr zpool-sync 8 ,
.Xr zpool-trim 8 ,
.Xr zpool-upgrade 8 ,
.Xr zpool-wait 8 ,
.Xr zpoolconcepts 8 ,
.Xr zpoolprops 8
.Xr zpool-wait 8

View File

@ -477,7 +477,9 @@ systemctl --system daemon-reload >/dev/null || true
%{_bindir}/dbufstat
# Man pages
%{_mandir}/man1/*
%{_mandir}/man4/*
%{_mandir}/man5/*
%{_mandir}/man7/*
%{_mandir}/man8/*
# Configuration files and scripts
%{_libexecdir}/%{name}

View File

@ -12,7 +12,7 @@ s:lib/libzfs:usr/src/lib/libzfs/common:g
s:lib/libzfs_core:usr/src/lib/libzfs_core/common:g
s:lib/libzpool:lib/libzpool/common:g
s:lib/libzpool:usr/src/lib/libzpool:g
s:man/man5/zpool-features.5:usr/src/man/man5/zpool-features.5:g
s:man/man7/zpool-features.7:usr/src/man/man5/zpool-features.5:g
s:man/man8/zfs.8:usr/src/man/man1m/zfs.1m:g
s:module/nvpair:usr/src/common/nvpair:g
s:module/zcommon:usr/src/common/zfs/:g

View File

@ -84,7 +84,7 @@ log_must zpool import -d $VDIR $TESTPOOL
# from ARC, accessed later on as prefetches and transition to MRU as
# prefetches.
# If accessed again they are counted as MRU and the l2arc_mru_asize arcstat
# will not be 0 (mentioned also in zfs-module-parameters.5)
# will not be 0 (mentioned also in zfs.4)
# For the purposes of this test we mitigate this by disabling (predictive)
# ZFS prefetches with zfs_prefetch_disable=1.
log_must test $(get_arcstat l2_mru_asize) -eq 0