MFp4: Synchronize with vendor (mostly 'zfs rename -r').

This commit is contained in:
Pawel Jakub Dawidek 2007-04-12 23:16:02 +00:00
parent 1da61b3665
commit 6704017a15
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=168676
26 changed files with 904 additions and 314 deletions

View File

@ -28,7 +28,6 @@ zdb \- ZFS debugger
.fi
.SH DESCRIPTION
.LP
The \fBzdb\fR command is used by support engineers to diagnose failures and gather statistics. Since the \fBZFS\fR file system is always consistent on disk and is self-repairing, \fBzdb\fR should only be run under the direction by a support engineer.
.LP
@ -36,7 +35,6 @@ If no arguments are specified, \fBzdb\fR, performs basic consistency checks on t
.LP
Any options supported by this command are internal to Sun and subject to change at any time.
.SH EXIT STATUS
.LP
The following exit values are returned:
.sp
@ -73,7 +71,6 @@ Invalid command line options were specified.
.RE
.SH ATTRIBUTES
.LP
See \fBattributes\fR(5) for descriptions of the following attributes:
.sp
@ -92,6 +89,5 @@ Interface StabilityUnstable
.TE
.SH SEE ALSO
.LP
\fBzfs\fR(1M), \fBzpool\fR(1M), \fBattributes\fR(5)

View File

@ -26,103 +26,127 @@ zfs \- configures ZFS file systems
.nf
\fBzfs\fR [\fB-?\fR]
.fi
.LP
.nf
\fBzfs\fR \fBcreate\fR [[\fB-o\fR property=\fIvalue\fR]]... \fIfilesystem\fR
.fi
.LP
.nf
\fBzfs\fR \fBcreate\fR [\fB-s\fR] [\fB-b\fR \fIblocksize\fR] [[\fB-o\fR property=\fIvalue\fR]]... \fB-V\fR \fIsize\fR \fIvolume\fR
.fi
.LP
.nf
\fBzfs\fR \fBdestroy\fR [\fB-rRf\fR] \fIfilesystem\fR|\fIvolume\fR|\fIsnapshot\fR
.fi
.LP
.nf
\fBzfs\fR \fBclone\fR \fIsnapshot\fR \fIfilesystem\fR|\fIvolume\fR
.fi
.LP
.nf
\fBzfs\fR \fBpromote\fR \fIfilesystem\fR
.fi
.LP
.nf
\fBzfs\fR \fBrename\fR \fIfilesystem\fR|\fIvolume\fR|\fIsnapshot\fR
[\fIfilesystem\fR|\fIvolume\fR|\fIsnapshot\fR]
.fi
.LP
.nf
\fBzfs\fR \fBsnapshot\fR [\fB-r\fR] \fIfilesystem@name\fR|\fIvolume@name\fR
.fi
.LP
.nf
\fBzfs\fR \fBrollback\fR [\fB-rRf\fR] \fIsnapshot\fR
.fi
.LP
.nf
\fBzfs\fR \fBlist\fR [\fB-rH\fR] [\fB-o\fR \fIprop\fR[,\fIprop\fR] ]... [ \fB-t\fR \fItype\fR[,\fItype\fR]...]
[ \fB-s\fR \fIprop\fR [\fB-s\fR \fIprop\fR]... [ \fB-S\fR \fIprop\fR [\fB-S\fR \fIprop\fR]...
[\fIfilesystem\fR|\fIvolume\fR|\fIsnapshot\fR|\fI/pathname\fR|.\fI/pathname\fR ...
.fi
.LP
.nf
\fBzfs\fR \fBset\fR \fIproperty\fR=\fIvalue\fR \fIfilesystem\fR|\fIvolume\fR ...
.fi
.LP
.nf
\fBzfs\fR \fBget\fR [\fB-rHp\fR] [\fB-o\fR \fIfield\fR[,\fIfield\fR]...]
[\fB-s\fR \fIsource\fR[,\fIsource\fR]...] \fIall\fR | \fIproperty\fR[,\fIproperty\fR]...
\fIfilesystem\fR|\fIvolume\fR|\fIsnapshot\fR ...
.fi
.LP
.nf
\fBzfs\fR \fBinherit\fR [\fB-r\fR] \fIproperty\fR \fIfilesystem\fR|\fIvolume\fR... ...
.fi
.LP
.nf
\fBzfs\fR \fBmount\fR
.fi
.LP
.nf
\fBzfs\fR \fBmount\fR [\fB-o \fIoptions\fR\fR] [\fB-O\fR] \fB-a\fR
.fi
.LP
.nf
\fBzfs\fR \fBmount\fR [\fB-o \fIoptions\fR\fR] [\fB-O\fR] \fIfilesystem\fR
.fi
.LP
.nf
\fBzfs\fR \fBunmount\fR [\fB-f\fR] \fB-a\fR
.fi
.LP
.nf
\fBzfs\fR \fBunmount\fR [\fB-f\fR] \fB\fIfilesystem\fR|\fImountpoint\fR\fR
.fi
.LP
.nf
\fBzfs\fR \fBshare\fR \fB-a\fR
.fi
.LP
.nf
\fBzfs\fR \fBshare\fR \fIfilesystem\fR
.fi
.LP
.nf
\fBzfs\fR \fBunshare\fR [\fB-f\fR] \fB-a\fR
.fi
.LP
.nf
\fBzfs\fR \fBunshare\fR [\fB-f\fR] \fB\fIfilesystem\fR|\fImountpoint\fR\fR
.fi
.LP
.nf
\fBzfs\fR \fBsend\fR [\fB-i\fR \fIsnapshot1\fR] \fB\fIsnapshot2\fR\fR
.fi
.LP
.nf
\fBzfs\fR \fBreceive\fR [\fB-vnF\fR ] \fIfilesystem\fR|\fIvolume\fR|\fIsnapshot\fR
.fi
.LP
.nf
\fBzfs\fR \fBreceive\fR [\fB-vnF\fR ] \fB-d\fR \fB\fIfilesystem\fR\fR
@ -137,7 +161,6 @@ zfs \- configures ZFS file systems
.fi
.SH DESCRIPTION
.LP
The \fBzfs\fR command configures \fBZFS\fR datasets within a \fBZFS\fR storage pool, as described in \fBzpool\fR(1M). A
dataset is identified by a unique path within the \fBZFS\fR namespace. For example:
@ -186,16 +209,14 @@ A logical volume exported as a raw or block device. This type of dataset should
A read-only version of a file system or volume at a given point in time. It is specified as \fIfilesystem@name\fR or \fIvolume@name\fR.
.RE
.SS ZFS File System Hierarchy
.SS "ZFS File System Hierarchy"
.LP
A \fBZFS\fR storage pool is a logical collection of devices that provide space for datasets. A storage pool is also the root of the \fBZFS\fR file system hierarchy.
.LP
The root of the pool can be accessed as a file system, such as mounting and unmounting, taking snapshots, and setting properties. The physical storage characteristics, however, are managed by the \fBzpool\fR(1M) command.
.LP
See \fBzpool\fR(1M) for more information on creating and administering pools.
.SS Snapshots
.SS "Snapshots"
.LP
A snapshot is a read-only copy of a file system or volume. Snapshots can be created extremely quickly, and initially consume no additional space within the pool. As data within the active dataset changes, the snapshot consumes more data than would otherwise be shared with the active dataset.
.LP
@ -203,8 +224,7 @@ Snapshots can have arbitrary names. Snapshots of volumes can be cloned or rolled
.LP
File system snapshots can be accessed under the ".zfs/snapshot" directory in the root of the file system. Snapshots are automatically mounted on demand and may be unmounted at regular intervals. The visibility of the ".zfs" directory can be controlled by the "snapdir"
property.
.SS Clones
.SS "Clones"
.LP
A clone is a writable volume or file system whose initial contents are the same as another dataset. As with snapshots, creating a clone is nearly instantaneous, and initially consumes no additional space.
.LP
@ -213,8 +233,7 @@ property exposes this dependency, and the \fBdestroy\fR command lists any such d
.LP
The clone parent-child dependency relationship can be reversed by using the "\fBpromote\fR" subcommand. This causes the "origin" file system to become a clone of the specified file system, which makes it possible to destroy the file system that the clone
was created from.
.SS Mount Points
.SS "Mount Points"
.LP
Creating a \fBZFS\fR file system is a simple operation, so the number of file systems per system will likely be numerous. To cope with this, \fBZFS\fR automatically manages mounting and unmounting file systems without the need to edit the \fB/etc/vfstab\fR file.
All automatically managed file systems are mounted by \fBZFS\fR at boot time.
@ -227,8 +246,7 @@ A file system mountpoint property of "none" prevents the file system from being
.LP
If needed, \fBZFS\fR file systems can also be managed with traditional tools (\fBmount\fR, \fBumount\fR, \fB/etc/vfstab\fR). If a file system's mount point is set to "legacy", \fBZFS\fR makes no attempt to manage
the file system, and the administrator is responsible for mounting and unmounting the file system.
.SS Zones
.SS "Zones"
.LP
A \fBZFS\fR file system can be added to a non-global zone by using zonecfg's "\fBadd fs\fR" subcommand. A \fBZFS\fR file system that is added to a non-global zone must have its mountpoint property set to legacy.
.LP
@ -244,8 +262,7 @@ For more information about \fBzonecfg\fR syntax, see \fBzonecfg\fR(1M).
After a dataset is delegated to a non-global zone, the "zoned" property is automatically set. A zoned file system cannot be mounted in the global zone, since the zone administrator might have to set the mount point to an unacceptable value.
.LP
The global administrator can forcibly clear the "zoned" property, though this should be done with extreme care. The global administrator should verify that all the mount points are acceptable before clearing the property.
.SS Native Properties
.SS "Native Properties"
.LP
Properties are divided into two types, native properties and user defined properties. Native properties either export internal statistics or control \fBZFS\fR behavior. In addition, native properties are either editable or read-only. User properties have no effect on \fBZFS\fR behavior,
but you can use them to annotate datasets in a way that is meaningful in your environment. For more information about user properties, see the "User Properties" section.
@ -507,11 +524,13 @@ checking on user data. Disabling checksums is NOT a recommended practice.
.ne 2
.mk
.na
\fBcompression=\fIon\fR | \fIoff\fR | \fIlzjb\fR\fR
\fBcompression=\fIon\fR | \fIoff\fR | \fIlzjb\fR | \fIgzip\fR | \fIgzip-N\fR\fR
.ad
.sp .6
.RS 4n
Controls the compression algorithm used for this dataset. There is currently only one algorithm, "\fIlzjb\fR", though this may change in future releases. The default value is "off".
Controls the compression algorithm used for this dataset. The "lzjb" compression algorithm is optimized for performance while providing decent data compression. Setting compression to "on" uses the "lzjb" compression algorithm. The "gzip"
compression algorithm uses the same compression as the \fBgzip\fR(1) command. You can specify the "gzip" level by using the value "gzip-\fIN\fR",
where \fIN\fR is an integer from 1 (fastest) to 9 (best compression ratio). Currently, "gzip" is equivalent to "gzip-6" (which is also the default for \fBgzip\fR(1)).
.sp
This property can also be referred to by its shortened column name "compress".
.RE
@ -655,10 +674,10 @@ Controls whether extended attributes are enabled for this file system. The defau
.ad
.sp .6
.RS 4n
Controls the number of copies of data stored for this dataset. These copies are in addition to any redundancy provided by the pool (for example, mirroring or raid-z). The copies are stored on different disks if possible. The space used by multiple copies is charged to the associated
file and dataset, changing the "used" property and counting against quotas and reservations.
Controls the number of copies of data stored for this dataset. These copies are in addition to any redundancy provided by the pool, for example, mirroring or raid-z. The copies are stored on different disks, if possible. The space used by multiple copies is charged to the associated
file and dataset, changing the "used" property and counting against quotas and reservations.
.sp
Changing this property only affects newly-written data. Therefore, it is recommended that this property be set at file system creation time, using the "\fB-o\fR copies=" option.
Changing this property only affects newly-written data. Therefore, set this property at file system creation time by using the "\fB-o\fR copies=" option.
.RE
.sp
@ -672,12 +691,10 @@ Changing this property only affects newly-written data. Therefore, it is recomme
Controls whether the dataset is managed from within a jail. The default value is "off".
.RE
.SS iscsioptions
.SS "iscsioptions"
.LP
This read-only property, which is hidden, is used by the \fBiSCSI\fR target daemon to store persistent information, such as the \fBIQN\fR. It cannot be viewed or modified using the \fBzfs\fR command. The contents are not intended for external consumers.
.SS Temporary Mount Point Properties
.SS "Temporary Mount Point Properties"
.LP
When a file system is mounted, either through \fBmount\fR(1M) for legacy mounts or the "\fBzfs mount\fR" command for normal file systems,
its mount options are set according to its properties. The correlation between properties and mount options is as follows:
@ -697,8 +714,7 @@ its mount options are set according to its properties. The correlation between p
.LP
In addition, these options can be set on a per-mount basis using the \fB-o\fR option, without affecting the property that is stored on disk. The values specified on the command line override the values stored in the dataset. The \fB-nosuid\fR option is an alias for "nodevices,nosetuid".
These properties are reported as "temporary" by the "\fBzfs get\fR" command. If the properties are changed while the dataset is mounted, the new setting overrides any temporary settings.
.SS User Properties
.SS "User Properties"
.LP
In addition to the standard native properties, \fBZFS\fR supports arbitrary user properties. User properties have no effect on \fBZFS\fR behavior, but applications or administrators can use them to annotate datasets.
.LP
@ -711,8 +727,7 @@ different purposes. Property names beginning with "com.sun." are reserved for us
.LP
The values of user properties are arbitrary strings, are always inherited, and are never validated. All of the commands that operate on properties ("zfs list", "zfs get", "zfs set", etc.) can be used to manipulate both native properties and user properties.
Use the "\fBzfs inherit\fR" command to clear a user property . If the property is not defined in any parent dataset, it is removed entirely. Property values are limited to 1024 characters.
.SS Volumes as Swap or Dump Devices
.SS "Volumes as Swap or Dump Devices"
.LP
To set up a swap area, create a \fBZFS\fR volume of a specific size and then enable swap on that device. For more information, see the EXAMPLES section.
.LP
@ -720,7 +735,6 @@ Do not swap to a file on a \fBZFS\fR file system. A \fBZFS\fR swap file configur
.LP
Using a \fBZFS\fR volume as a dump device is not supported.
.SH SUBCOMMANDS
.LP
All subcommands that modify state are logged persistently to the pool in their original form.
.sp
@ -1466,10 +1480,10 @@ Detaches the given file system from the given jail.
.SH EXAMPLES
.LP
\fBExample 1 \fRCreating a ZFS File System Hierarchy
.LP
The following commands create a file system named "\fBpool/home\fR" and a file system named "\fBpool/home/bob\fR". The mount point "\fB/export/home\fR" is set for the parent file system, and automatically inherited
by the child file system.
.sp
.in +2
.nf
@ -1479,11 +1493,12 @@ by the child file system.
.fi
.in -2
.sp
.LP
\fBExample 2 \fRCreating a ZFS Snapshot
.LP
\fBExample 2 \fRCreating a ZFS Snapshot
.LP
The following command creates a snapshot named "yesterday". This snapshot is mounted on demand in the ".zfs/snapshot" directory at the root of the "\fBpool/home/bob\fR" file system.
.sp
.in +2
.nf
@ -1491,12 +1506,13 @@ The following command creates a snapshot named "yesterday". This snapshot is mou
.fi
.in -2
.sp
.LP
\fBExample 3 \fRTaking and destroying multiple snapshots
.LP
The following command creates snapshots named "\fByesterday\fR" of "\fBpool/home\fR" and all of its descendant file systems. Each snapshot is mounted on demand in the ".zfs/snapshot" directory at the root of its file system. The
second command destroys the newly created snapshots.
.sp
.in +2
.nf
@ -1505,11 +1521,12 @@ second command destroys the newly created snapshots.
.fi
.in -2
.sp
.LP
\fBExample 4 \fRTurning Off Compression
.LP
\fBExample 4 \fRTurning Off Compression
.LP
The following commands turn compression off for all file systems under "\fBpool/home\fR", but explicitly turns it on for "\fBpool/home/anne\fR".
.sp
.in +2
.nf
@ -1518,11 +1535,12 @@ The following commands turn compression off for all file systems under "\fBpool/
.fi
.in -2
.sp
.LP
\fBExample 5 \fRListing ZFS Datasets
.LP
\fBExample 5 \fRListing ZFS Datasets
.LP
The following command lists all active file systems and volumes in the system.
.sp
.in +2
.nf
@ -1538,11 +1556,12 @@ The following command lists all active file systems and volumes in the system.
.fi
.in -2
.sp
.LP
\fBExample 6 \fRSetting a Quota on a ZFS File System
.LP
\fBExample 6 \fRSetting a Quota on a ZFS File System
.LP
The following command sets a quota of 50 gbytes for "\fBpool/home/bob\fR".
.sp
.in +2
.nf
@ -1550,11 +1569,12 @@ The following command sets a quota of 50 gbytes for "\fBpool/home/bob\fR".
.fi
.in -2
.sp
.LP
\fBExample 7 \fRListing ZFS Properties
.LP
\fBExample 7 \fRListing ZFS Properties
.LP
The following command lists all properties for "\fBpool/home/bob\fR".
.sp
.in +2
.nf
@ -1596,6 +1616,7 @@ The following command lists all properties for "\fBpool/home/bob\fR".
.LP
The following command gets a single property value.
.sp
.in +2
.nf
@ -1607,6 +1628,7 @@ on
.LP
The following command lists all properties with local settings for "\fBpool/home/bob\fR".
.sp
.in +2
.nf
@ -1618,11 +1640,12 @@ The following command lists all properties with local settings for "\fBpool/home
.fi
.in -2
.sp
.LP
\fBExample 8 \fRRolling Back a ZFS File System
.LP
\fBExample 8 \fRRolling Back a ZFS File System
.LP
The following command reverts the contents of "\fBpool/home/anne\fR" to the snapshot named "\fByesterday\fR", deleting all intermediate snapshots.
.sp
.in +2
.nf
@ -1630,11 +1653,12 @@ The following command reverts the contents of "\fBpool/home/anne\fR" to the snap
.fi
.in -2
.sp
.LP
\fBExample 9 \fRCreating a ZFS Clone
.LP
\fBExample 9 \fRCreating a ZFS Clone
.LP
The following command creates a writable file system whose initial contents are the same as "\fBpool/home/bob@yesterday\fR".
.sp
.in +2
.nf
@ -1642,11 +1666,12 @@ The following command creates a writable file system whose initial contents are
.fi
.in -2
.sp
.LP
\fBExample 10 \fRPromoting a ZFS Clone
.LP
\fBExample 10 \fRPromoting a ZFS Clone
.LP
The following commands illustrate how to test out changes to a file system, and then replace the original file system with the changed one, using clones, clone promotion, and renaming:
.sp
.in +2
.nf
@ -1664,11 +1689,12 @@ The following commands illustrate how to test out changes to a file system, and
.fi
.in -2
.sp
.LP
\fBExample 11 \fRInheriting ZFS Properties
.LP
\fBExample 11 \fRInheriting ZFS Properties
.LP
The following command causes "\fBpool/home/bob\fR" and "\fBpool/home/anne\fR" to inherit the "checksum" property from their parent.
.sp
.in +2
.nf
@ -1676,12 +1702,13 @@ The following command causes "\fBpool/home/bob\fR" and "\fBpool/home/anne\fR" to
.fi
.in -2
.sp
.LP
\fBExample 12 \fRRemotely Replicating ZFS Data
.LP
The following commands send a full stream and then an incremental stream to a remote machine, restoring them into "\fBpoolB/received/fs\fR@a" and "\fBpoolB/received/fs@b\fR", respectively. "\fBpoolB\fR" must contain
the file system "\fBpoolB/received\fR", and must not initially contain "\fBpoolB/received/fs\fR".
.sp
.in +2
.nf
@ -1692,12 +1719,13 @@ the file system "\fBpoolB/received\fR", and must not initially contain "\fBpoolB
.fi
.in -2
.sp
.LP
\fBExample 13 \fRUsing the zfs receive -d Option
.LP
The following command sends a full stream of "\fBpoolA/fsA/fsB@snap\fR" to a remote machine, receiving it into "\fBpoolB/received/fsA/fsB@snap\fR". The "\fBfsA/fsB@snap\fR" portion of the received snapshot's name
is determined from the name of the sent snapshot. "\fBpoolB\fR" must contain the file system "\fBpoolB/received\fR". If "\fBpoolB/received/fsA\fR" does not exist, it will be created as an empty file system.
.sp
.in +2
.nf
@ -1707,11 +1735,12 @@ is determined from the name of the sent snapshot. "\fBpoolB\fR" must contain the
.fi
.in -2
.sp
.LP
\fBExample 14 \fRCreating a ZFS volume as a Swap Device
.LP
\fBExample 14 \fRCreating a ZFS volume as a Swap Device
.LP
The following example shows how to create a 5-Gbyte ZFS volume and then add the volume as a swap device.
.sp
.in +2
.nf
@ -1720,11 +1749,12 @@ The following example shows how to create a 5-Gbyte ZFS volume and then add the
.fi
.in -2
.sp
.LP
\fBExample 15 \fRSetting User Properties
.LP
\fBExample 15 \fRSetting User Properties
.LP
The following example sets the user defined "com.example:department" property for a dataset.
.sp
.in +2
.nf
@ -1732,11 +1762,12 @@ The following example sets the user defined "com.example:department" property fo
.fi
.in -2
.sp
.LP
\fBExample 16 \fRCreating a ZFS Volume as a iSCSI Target Device
.LP
\fBExample 16 \fRCreating a ZFS Volume as a iSCSI Target Device
.LP
The following example shows how to create a \fBZFS\fR volume as an \fBiSCSI\fR target.
.sp
.in +2
.nf
@ -1754,7 +1785,6 @@ Connections: 0
.LP
After the \fBiSCSI\fR target is created, set up the \fBiSCSI\fR initiator. For more information about the Solaris \fBiSCSI\fR initiator, see the Solaris Administration Guide: Devices and File Systems.
.SH EXIT STATUS
.LP
The following exit values are returned:
.sp
@ -1791,7 +1821,6 @@ Invalid command line options were specified.
.RE
.SH ATTRIBUTES
.LP
See \fBattributes\fR(5) for descriptions of the following attributes:
.sp
@ -1810,6 +1839,5 @@ Interface StabilityEvolving
.TE
.SH SEE ALSO
.LP
\fBssh\fR(1), \fBmount\fR(1M), \fBshare\fR(1M), \fBunshare\fR(1M), \fBzonecfg\fR(1M), \fBzpool\fR(1M), \fBchmod\fR(2), \fBstat\fR(2), \fBfsync\fR(3c), \fBdfstab\fR(4), \fBattributes\fR(5)
\fBgzip\fR(1), \fBssh\fR(1), \fBmount\fR(1M), \fBshare\fR(1M), \fBunshare\fR(1M), \fBzonecfg\fR(1M), \fBzpool\fR(1M), \fBchmod\fR(2), \fBstat\fR(2), \fBfsync\fR(3c), \fBdfstab\fR(4), \fBattributes\fR(5)

View File

@ -206,7 +206,8 @@ get_usage(zfs_help_t idx)
"\treceive [-vnF] -d <filesystem>\n"));
case HELP_RENAME:
return (gettext("\trename <filesystem|volume|snapshot> "
"<filesystem|volume|snapshot>\n"));
"<filesystem|volume|snapshot>\n"
"\trename -r <snapshot> <snapshot>"));
case HELP_ROLLBACK:
return (gettext("\trollback [-rRf] <snapshot>\n"));
case HELP_SEND:
@ -1485,7 +1486,7 @@ zfs_do_list(int argc, char **argv)
}
/*
* zfs rename <fs | snap | vol> <fs | snap | vol>
* zfs rename [-r] <fs | snap | vol> <fs | snap | vol>
*
* Renames the given dataset to another of the same type.
*/
@ -1494,38 +1495,57 @@ static int
zfs_do_rename(int argc, char **argv)
{
zfs_handle_t *zhp;
int c;
int ret;
int recurse = 0;
/* check options */
if (argc > 1 && argv[1][0] == '-') {
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
argv[1][1]);
usage(B_FALSE);
while ((c = getopt(argc, argv, "r")) != -1) {
switch (c) {
case 'r':
recurse = 1;
break;
case '?':
default:
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
/* check number of arguments */
if (argc < 2) {
if (argc < 1) {
(void) fprintf(stderr, gettext("missing source dataset "
"argument\n"));
usage(B_FALSE);
}
if (argc < 3) {
if (argc < 2) {
(void) fprintf(stderr, gettext("missing target dataset "
"argument\n"));
usage(B_FALSE);
}
if (argc > 3) {
if (argc > 2) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
if ((zhp = zfs_open(g_zfs, argv[1], ZFS_TYPE_ANY)) == NULL)
if (recurse && strchr(argv[0], '@') == 0) {
(void) fprintf(stderr, gettext("source dataset for recursive "
"rename must be a snapshot\n"));
usage(B_FALSE);
}
if ((zhp = zfs_open(g_zfs, argv[0], ZFS_TYPE_ANY)) == NULL)
return (1);
ret = (zfs_rename(zhp, argv[2]) != 0);
ret = (zfs_rename(zhp, argv[1], recurse) != 0);
if (!ret)
zpool_log_history(g_zfs, argc, argv, argv[2], B_FALSE, B_FALSE);
zpool_log_history(g_zfs, argc + optind, argv - optind, argv[1],
B_FALSE, B_FALSE);
zfs_close(zhp);
return (ret);

View File

@ -26,104 +26,124 @@ zpool \- configures ZFS storage pools
.nf
\fBzpool\fR [\fB-?\fR]
.fi
.LP
.nf
\fBzpool create\fR [\fB-fn\fR] [\fB-R\fR \fIroot\fR] [\fB-m\fR \fImountpoint\fR] \fIpool\fR \fIvdev ...\fR
.fi
.LP
.nf
\fBzpool destroy\fR [\fB-f\fR] \fIpool\fR
.fi
.LP
.nf
\fBzpool add\fR [\fB-fn\fR] \fIpool\fR \fIvdev\fR
.fi
.LP
.nf
\fBzpool remove\fR \fIpool\fR \fIvdev\fR
.fi
.LP
.nf
\fBzpool \fR \fBlist\fR [\fB-H\fR] [\fB-o\fR \fIfield\fR[,\fIfield\fR]*] [\fIpool\fR] ...
.fi
.LP
.nf
\fBzpool iostat\fR [\fB-v\fR] [\fIpool\fR] ... [\fIinterval\fR [\fIcount\fR]]
.fi
.LP
.nf
\fBzpool status\fR [\fB-xv\fR] [\fIpool\fR] ...
.fi
.LP
.nf
\fBzpool offline\fR [\fB-t\fR] \fIpool\fR \fIdevice\fR ...
.fi
.LP
.nf
\fBzpool online\fR \fIpool\fR \fIdevice\fR ...
.fi
.LP
.nf
\fBzpool clear\fR \fIpool\fR [\fIdevice\fR] ...
.fi
.LP
.nf
\fBzpool attach\fR [\fB-f\fR] \fIpool\fR \fIdevice\fR \fInew_device\fR
.fi
.LP
.nf
\fBzpool detach\fR \fIpool\fR \fIdevice\fR
.fi
.LP
.nf
\fBzpool replace\fR [\fB-f\fR] \fIpool\fR \fIdevice\fR [\fInew_device\fR]
.fi
.LP
.nf
\fBzpool scrub\fR [\fB-s\fR] \fIpool\fR ...
.fi
.LP
.nf
\fBzpool export\fR [\fB-f\fR] \fIpool\fR
.fi
.LP
.nf
\fBzpool import\fR [\fB-d\fR \fIdir\fR] [\fB-D\fR]
.fi
.LP
.nf
\fBzpool import\fR [\fB-d\fR \fIdir\fR] [\fB-D\fR] [\fB-f\fR] [\fB-o \fIopts\fR\fR] [\fB-R \fR\fIroot\fR] \fIpool\fR | \fIid\fR
[\fInewpool\fR]
.fi
.LP
.nf
\fBzpool import\fR [\fB-d\fR \fIdir\fR] [\fB-D\fR] [\fB-f\fR] [\fB-a\fR]
.fi
.LP
.nf
\fBzpool upgrade\fR
.fi
.LP
.nf
\fBzpool upgrade\fR \fB-v\fR
.fi
.LP
.nf
\fBzpool upgrade\fR [\fB-a\fR | \fIpool\fR]
.fi
.LP
.nf
\fBzpool history\fR [\fIpool\fR] ...
.fi
.SH DESCRIPTION
.LP
The \fBzpool\fR command configures \fBZFS\fR storage pools. A storage pool is a collection of devices that provides physical storage and data replication for \fBZFS\fR datasets.
.LP
All datasets within a storage pool share the same space. See \fBzfs\fR(1M) for information on managing datasets.
.SS Virtual Devices (vdevs)
.SS "Virtual Devices (vdevs)"
.LP
A "virtual device" describes a single device or a collection of devices organized according to certain performance and fault characteristics. The following virtual devices are supported:
.sp
@ -212,8 +232,7 @@ Virtual devices are specified one at a time on the command line, separated by wh
.in -2
.sp
.SS Device Failure and Recovery
.SS "Device Failure and Recovery"
.LP
\fBZFS\fR supports a rich set of mechanisms for handling device failure and data corruption. All metadata and data is checksummed, and \fBZFS\fR automatically repairs bad data from a good copy when corruption is detected.
.LP
@ -222,8 +241,7 @@ strongly discouraged. A single case of bit corruption can render some or all of
.LP
A pool's health status is described by one of three states: online, degraded, or faulted. An online pool has all devices operating normally. A degraded pool is one in which one or more devices have failed, but the data is still available due to a redundant configuration. A faulted pool has
one or more failed devices, and there is insufficient redundancy to replicate the missing data.
.SS Hot Spares
.SS "Hot Spares"
.LP
\fBZFS\fR allows devices to be associated with pools as "hot spares". These devices are not actively used in the pool, but when an active device fails, it is automatically replaced by a hot spare. To create a pool with hot spares, specify a "spare" \fBvdev\fR with any number of devices. For example,
.sp
@ -239,8 +257,7 @@ Spares can be shared across multiple pools, and can be added with the "zpool add
will remain there until the original device is replaced. At this point, the hot spare becomes available again if another device fails.
.LP
An in-progress spare replacement can be cancelled by detaching the hot spare. If the original faulted device is detached, then the hot spare assumes its place in the configuration, and is removed from the spare list of all active pools.
.SS Alternate Root Pools
.SS "Alternate Root Pools"
.LP
The "zpool create -R" and "zpool import -R" commands allow users to create and import a pool with a different root path. By default, whenever a pool is created or imported on a system, it is permanently added so that it is available whenever the system boots. For
removable media, or when in recovery situations, this may not always be desirable. An alternate root pool does not persist on the system. Instead, it exists only until exported or the system is rebooted, at which point it will have to be imported again.
@ -248,8 +265,7 @@ removable media, or when in recovery situations, this may not always be desirabl
In addition, all mount points in the pool are prefixed with the given root, so a pool can be constrained to a particular area of the file system. This is most useful when importing unknown pools from removable media, as the mount points of any file systems cannot be trusted.
.LP
When creating an alternate root pool, the default mount point is "/", rather than the normal default "/\fIpool\fR".
.SS Subcommands
.SS "Subcommands"
.LP
All subcommands that modify state are logged persistently to the pool in their original form.
.LP
@ -879,9 +895,9 @@ Displays the command history of the specified pools (or all pools if no pool is
.SH EXAMPLES
.LP
\fBExample 1 \fRCreating a RAID-Z Storage Pool
.LP
The following command creates a pool with a single \fBraidz\fR root \fIvdev\fR that consists of six disks.
.sp
.in +2
.nf
@ -889,11 +905,12 @@ The following command creates a pool with a single \fBraidz\fR root \fIvdev\fR t
.fi
.in -2
.sp
.LP
\fBExample 2 \fRCreating a Mirrored Storage Pool
.LP
\fBExample 2 \fRCreating a Mirrored Storage Pool
.LP
The following command creates a pool with two mirrors, where each mirror contains two disks.
.sp
.in +2
.nf
@ -901,11 +918,12 @@ The following command creates a pool with two mirrors, where each mirror contain
.fi
.in -2
.sp
.LP
\fBExample 3 \fRCreating a ZFS Storage Pool by Using Slices
.LP
\fBExample 3 \fRCreating a ZFS Storage Pool by Using Slices
.LP
The following command creates an unmirrored pool using two disk slices.
.sp
.in +2
.nf
@ -913,11 +931,12 @@ The following command creates an unmirrored pool using two disk slices.
.fi
.in -2
.sp
.LP
\fBExample 4 \fRCreating a ZFS Storage Pool by Using Files
.LP
\fBExample 4 \fRCreating a ZFS Storage Pool by Using Files
.LP
The following command creates an unmirrored pool using files. While not recommended, a pool based on files can be useful for experimental purposes.
.sp
.in +2
.nf
@ -925,11 +944,12 @@ The following command creates an unmirrored pool using files. While not recommen
.fi
.in -2
.sp
.LP
\fBExample 5 \fRAdding a Mirror to a ZFS Storage Pool
.LP
\fBExample 5 \fRAdding a Mirror to a ZFS Storage Pool
.LP
The following command adds two mirrored disks to the pool "\fItank\fR", assuming the pool is already made up of two-way mirrors. The additional space is immediately available to any datasets within the pool.
.sp
.in +2
.nf
@ -937,13 +957,15 @@ The following command adds two mirrored disks to the pool "\fItank\fR", assuming
.fi
.in -2
.sp
.LP
\fBExample 6 \fRListing Available ZFS Storage Pools
.LP
\fBExample 6 \fRListing Available ZFS Storage Pools
.LP
The following command lists all available pools on the system. In this case, the pool \fIzion\fR is faulted due to a missing device.
.LP
The results from this command are similar to the following:
.sp
.in +2
.nf
@ -955,11 +977,12 @@ The results from this command are similar to the following:
.fi
.in -2
.sp
.LP
\fBExample 7 \fRDestroying a ZFS Storage Pool
.LP
\fBExample 7 \fRDestroying a ZFS Storage Pool
.LP
The following command destroys the pool "\fItank\fR" and any datasets contained within.
.sp
.in +2
.nf
@ -967,11 +990,12 @@ The following command destroys the pool "\fItank\fR" and any datasets contained
.fi
.in -2
.sp
.LP
\fBExample 8 \fRExporting a ZFS Storage Pool
.LP
\fBExample 8 \fRExporting a ZFS Storage Pool
.LP
The following command exports the devices in pool \fItank\fR so that they can be relocated or later imported.
.sp
.in +2
.nf
@ -979,13 +1003,15 @@ The following command exports the devices in pool \fItank\fR so that they can be
.fi
.in -2
.sp
.LP
\fBExample 9 \fRImporting a ZFS Storage Pool
.LP
\fBExample 9 \fRImporting a ZFS Storage Pool
.LP
The following command displays available pools, and then imports the pool "tank" for use on the system.
.LP
The results from this command are similar to the following:
.sp
.in +2
.nf
@ -1005,11 +1031,12 @@ config:
.fi
.in -2
.sp
.LP
\fBExample 10 \fRUpgrading All ZFS Storage Pools to the Current Version
.LP
\fBExample 10 \fRUpgrading All ZFS Storage Pools to the Current Version
.LP
The following command upgrades all ZFS Storage pools to the current version of the software.
.sp
.in +2
.nf
@ -1018,11 +1045,12 @@ This system is currently running ZFS version 2.
.fi
.in -2
.sp
.LP
\fBExample 11 \fRManaging Hot Spares
.LP
\fBExample 11 \fRManaging Hot Spares
.LP
The following command creates a new pool with an available hot spare:
.sp
.in +2
.nf
@ -1033,6 +1061,7 @@ The following command creates a new pool with an available hot spare:
.LP
If one of the disks were to fail, the pool would be reduced to the degraded state. The failed device can be replaced using the following command:
.sp
.in +2
.nf
@ -1043,6 +1072,7 @@ If one of the disks were to fail, the pool would be reduced to the degraded stat
.LP
Once the data has been resilvered, the spare is automatically removed and is made available should another device fails. The hot spare can be permanently removed from the pool using the following command:
.sp
.in +2
.nf
@ -1052,7 +1082,6 @@ Once the data has been resilvered, the spare is automatically removed and is mad
.sp
.SH EXIT STATUS
.LP
The following exit values are returned:
.sp
@ -1089,7 +1118,6 @@ Invalid command line options were specified.
.RE
.SH ATTRIBUTES
.LP
See \fBattributes\fR(5) for descriptions of the following attributes:
.sp
@ -1108,6 +1136,5 @@ Interface StabilityEvolving
.TE
.SH SEE ALSO
.LP
\fBzfs\fR(1M), \fBattributes\fR(5)

View File

@ -246,7 +246,7 @@ extern uint16_t zio_zil_fail_shift;
#define ZTEST_DIROBJ_BLOCKSIZE (1 << 10)
#define ZTEST_DIRSIZE 256
static void usage(boolean_t);
static void usage(boolean_t) __NORETURN;
/*
* These libumem hooks provide a reasonable set of defaults for the allocator's

View File

@ -336,7 +336,7 @@ extern int zfs_destroy_snaps(zfs_handle_t *, char *);
extern int zfs_clone(zfs_handle_t *, const char *, nvlist_t *);
extern int zfs_snapshot(libzfs_handle_t *, const char *, boolean_t);
extern int zfs_rollback(zfs_handle_t *, zfs_handle_t *, int);
extern int zfs_rename(zfs_handle_t *, const char *);
extern int zfs_rename(zfs_handle_t *, const char *, int);
extern int zfs_send(zfs_handle_t *, const char *, int);
extern int zfs_receive(libzfs_handle_t *, const char *, int, int, int,
boolean_t, int);

View File

@ -50,6 +50,8 @@
#include "zfs_prop.h"
#include "libzfs_impl.h"
static int zvol_create_link_common(libzfs_handle_t *, const char *, int);
/*
* Given a single type (not a mask of types), return the type in a human
* readable form.
@ -2531,10 +2533,15 @@ zfs_promote(zfs_handle_t *zhp)
return (ret);
}
struct createdata {
const char *cd_snapname;
int cd_ifexists;
};
static int
zfs_create_link_cb(zfs_handle_t *zhp, void *arg)
{
char *snapname = arg;
struct createdata *cd = arg;
int ret;
if (zhp->zfs_type == ZFS_TYPE_VOLUME) {
@ -2542,8 +2549,9 @@ zfs_create_link_cb(zfs_handle_t *zhp, void *arg)
(void) strlcpy(name, zhp->zfs_name, sizeof (name));
(void) strlcat(name, "@", sizeof (name));
(void) strlcat(name, snapname, sizeof (name));
(void) zvol_create_link(zhp->zfs_hdl, name);
(void) strlcat(name, cd->cd_snapname, sizeof (name));
(void) zvol_create_link_common(zhp->zfs_hdl, name,
cd->cd_ifexists);
/*
* NB: this is simply a best-effort. We don't want to
* return an error, because then we wouldn't visit all
@ -2551,7 +2559,7 @@ zfs_create_link_cb(zfs_handle_t *zhp, void *arg)
*/
}
ret = zfs_iter_filesystems(zhp, zfs_create_link_cb, snapname);
ret = zfs_iter_filesystems(zhp, zfs_create_link_cb, cd);
zfs_close(zhp);
@ -2603,8 +2611,11 @@ zfs_snapshot(libzfs_handle_t *hdl, const char *path, boolean_t recursive)
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot create snapshot '%s@%s'"), zc.zc_name, zc.zc_value);
if (ret == 0 && recursive) {
(void) zfs_iter_filesystems(zhp,
zfs_create_link_cb, (char *)delim+1);
struct createdata cd;
cd.cd_snapname = delim + 1;
cd.cd_ifexists = B_FALSE;
(void) zfs_iter_filesystems(zhp, zfs_create_link_cb, &cd);
}
if (ret == 0 && zhp->zfs_type == ZFS_TYPE_VOLUME) {
ret = zvol_create_link(zhp->zfs_hdl, path);
@ -3199,12 +3210,14 @@ zfs_iter_dependents(zfs_handle_t *zhp, boolean_t allowrecursion,
* Renames the given dataset.
*/
int
zfs_rename(zfs_handle_t *zhp, const char *target)
zfs_rename(zfs_handle_t *zhp, const char *target, int recursive)
{
int ret;
zfs_cmd_t zc = { 0 };
char *delim;
prop_changelist_t *cl;
prop_changelist_t *cl = NULL;
zfs_handle_t *zhrp = NULL;
char *parentname = NULL;
char parent[ZFS_MAXNAMELEN];
libzfs_handle_t *hdl = zhp->zfs_hdl;
char errbuf[1024];
@ -3252,6 +3265,12 @@ zfs_rename(zfs_handle_t *zhp, const char *target)
if (!zfs_validate_name(hdl, target, zhp->zfs_type))
return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf));
} else {
if (recursive) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"recursive rename must be a snapshot"));
return (zfs_error(hdl, EZFS_BADTYPE, errbuf));
}
if (!zfs_validate_name(hdl, target, zhp->zfs_type))
return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf));
uint64_t unused;
@ -3291,20 +3310,42 @@ zfs_rename(zfs_handle_t *zhp, const char *target)
return (zfs_error(hdl, EZFS_ZONED, errbuf));
}
if ((cl = changelist_gather(zhp, ZFS_PROP_NAME, 0)) == NULL)
return (-1);
if (recursive) {
struct destroydata dd;
if (changelist_haszonedchild(cl)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"child dataset with inherited mountpoint is used "
"in a non-global zone"));
(void) zfs_error(hdl, EZFS_ZONED, errbuf);
goto error;
parentname = strdup(zhp->zfs_name);
delim = strchr(parentname, '@');
*delim = '\0';
zhrp = zfs_open(zhp->zfs_hdl, parentname, ZFS_TYPE_ANY);
if (zhrp == NULL) {
return (-1);
}
dd.snapname = delim + 1;
dd.gotone = B_FALSE;
dd.closezhp = B_FALSE;
/* We remove any zvol links prior to renaming them */
ret = zfs_iter_filesystems(zhrp, zfs_remove_link_cb, &dd);
if (ret) {
goto error;
}
} else {
if ((cl = changelist_gather(zhp, ZFS_PROP_NAME, 0)) == NULL)
return (-1);
if (changelist_haszonedchild(cl)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"child dataset with inherited mountpoint is used "
"in a non-global zone"));
(void) zfs_error(hdl, EZFS_ZONED, errbuf);
goto error;
}
if ((ret = changelist_prefix(cl)) != 0)
goto error;
}
if ((ret = changelist_prefix(cl)) != 0)
goto error;
if (ZFS_IS_VOLUME(zhp))
zc.zc_objset_type = DMU_OST_ZVOL;
else
@ -3313,22 +3354,65 @@ zfs_rename(zfs_handle_t *zhp, const char *target)
(void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
(void) strlcpy(zc.zc_value, target, sizeof (zc.zc_value));
zc.zc_cookie = recursive;
if ((ret = ioctl(zhp->zfs_hdl->libzfs_fd, ZFS_IOC_RENAME, &zc)) != 0) {
(void) zfs_standard_error(zhp->zfs_hdl, errno, errbuf);
/*
* if it was recursive, the one that actually failed will
* be in zc.zc_name
*/
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot rename to '%s'"), zc.zc_name);
if (recursive && errno == EEXIST) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"a child dataset already has a snapshot "
"with the new name"));
(void) zfs_error(hdl, EZFS_CROSSTARGET, errbuf);
} else {
(void) zfs_standard_error(zhp->zfs_hdl, errno, errbuf);
}
/*
* On failure, we still want to remount any filesystems that
* were previously mounted, so we don't alter the system state.
*/
(void) changelist_postfix(cl);
} else {
changelist_rename(cl, zfs_get_name(zhp), target);
if (recursive) {
struct createdata cd;
ret = changelist_postfix(cl);
/* only create links for datasets that had existed */
cd.cd_snapname = delim + 1;
cd.cd_ifexists = B_TRUE;
(void) zfs_iter_filesystems(zhrp, zfs_create_link_cb,
&cd);
} else {
(void) changelist_postfix(cl);
}
} else {
if (recursive) {
struct createdata cd;
/* only create links for datasets that had existed */
cd.cd_snapname = strchr(target, '@') + 1;
cd.cd_ifexists = B_TRUE;
ret = zfs_iter_filesystems(zhrp, zfs_create_link_cb,
&cd);
} else {
changelist_rename(cl, zfs_get_name(zhp), target);
ret = changelist_postfix(cl);
}
}
error:
changelist_free(cl);
if (parentname) {
free(parentname);
}
if (zhrp) {
zfs_close(zhrp);
}
if (cl) {
changelist_free(cl);
}
return (ret);
}
@ -3338,6 +3422,12 @@ zfs_rename(zfs_handle_t *zhp, const char *target)
*/
int
zvol_create_link(libzfs_handle_t *hdl, const char *dataset)
{
return (zvol_create_link_common(hdl, dataset, B_FALSE));
}
static int
zvol_create_link_common(libzfs_handle_t *hdl, const char *dataset, int ifexists)
{
zfs_cmd_t zc = { 0 };
#if 0
@ -3359,6 +3449,18 @@ zvol_create_link(libzfs_handle_t *hdl, const char *dataset)
*/
return (0);
case ENOENT:
/*
* Dataset does not exist in the kernel. If we
* don't care (see zfs_rename), then ignore the
* error quietly.
*/
if (ifexists) {
return (0);
}
/* FALLTHROUGH */
default:
return (zfs_standard_error_fmt(hdl, errno,
dgettext(TEXT_DOMAIN, "cannot create device links "

View File

@ -28,7 +28,6 @@ zdb \- ZFS debugger
.fi
.SH DESCRIPTION
.LP
The \fBzdb\fR command is used by support engineers to diagnose failures and gather statistics. Since the \fBZFS\fR file system is always consistent on disk and is self-repairing, \fBzdb\fR should only be run under the direction by a support engineer.
.LP
@ -36,7 +35,6 @@ If no arguments are specified, \fBzdb\fR, performs basic consistency checks on t
.LP
Any options supported by this command are internal to Sun and subject to change at any time.
.SH EXIT STATUS
.LP
The following exit values are returned:
.sp
@ -73,7 +71,6 @@ Invalid command line options were specified.
.RE
.SH ATTRIBUTES
.LP
See \fBattributes\fR(5) for descriptions of the following attributes:
.sp
@ -92,6 +89,5 @@ Interface StabilityUnstable
.TE
.SH SEE ALSO
.LP
\fBzfs\fR(1M), \fBzpool\fR(1M), \fBattributes\fR(5)

View File

@ -26,103 +26,127 @@ zfs \- configures ZFS file systems
.nf
\fBzfs\fR [\fB-?\fR]
.fi
.LP
.nf
\fBzfs\fR \fBcreate\fR [[\fB-o\fR property=\fIvalue\fR]]... \fIfilesystem\fR
.fi
.LP
.nf
\fBzfs\fR \fBcreate\fR [\fB-s\fR] [\fB-b\fR \fIblocksize\fR] [[\fB-o\fR property=\fIvalue\fR]]... \fB-V\fR \fIsize\fR \fIvolume\fR
.fi
.LP
.nf
\fBzfs\fR \fBdestroy\fR [\fB-rRf\fR] \fIfilesystem\fR|\fIvolume\fR|\fIsnapshot\fR
.fi
.LP
.nf
\fBzfs\fR \fBclone\fR \fIsnapshot\fR \fIfilesystem\fR|\fIvolume\fR
.fi
.LP
.nf
\fBzfs\fR \fBpromote\fR \fIfilesystem\fR
.fi
.LP
.nf
\fBzfs\fR \fBrename\fR \fIfilesystem\fR|\fIvolume\fR|\fIsnapshot\fR
[\fIfilesystem\fR|\fIvolume\fR|\fIsnapshot\fR]
.fi
.LP
.nf
\fBzfs\fR \fBsnapshot\fR [\fB-r\fR] \fIfilesystem@name\fR|\fIvolume@name\fR
.fi
.LP
.nf
\fBzfs\fR \fBrollback\fR [\fB-rRf\fR] \fIsnapshot\fR
.fi
.LP
.nf
\fBzfs\fR \fBlist\fR [\fB-rH\fR] [\fB-o\fR \fIprop\fR[,\fIprop\fR] ]... [ \fB-t\fR \fItype\fR[,\fItype\fR]...]
[ \fB-s\fR \fIprop\fR [\fB-s\fR \fIprop\fR]... [ \fB-S\fR \fIprop\fR [\fB-S\fR \fIprop\fR]...
[\fIfilesystem\fR|\fIvolume\fR|\fIsnapshot\fR|\fI/pathname\fR|.\fI/pathname\fR ...
.fi
.LP
.nf
\fBzfs\fR \fBset\fR \fIproperty\fR=\fIvalue\fR \fIfilesystem\fR|\fIvolume\fR ...
.fi
.LP
.nf
\fBzfs\fR \fBget\fR [\fB-rHp\fR] [\fB-o\fR \fIfield\fR[,\fIfield\fR]...]
[\fB-s\fR \fIsource\fR[,\fIsource\fR]...] \fIall\fR | \fIproperty\fR[,\fIproperty\fR]...
\fIfilesystem\fR|\fIvolume\fR|\fIsnapshot\fR ...
.fi
.LP
.nf
\fBzfs\fR \fBinherit\fR [\fB-r\fR] \fIproperty\fR \fIfilesystem\fR|\fIvolume\fR... ...
.fi
.LP
.nf
\fBzfs\fR \fBmount\fR
.fi
.LP
.nf
\fBzfs\fR \fBmount\fR [\fB-o \fIoptions\fR\fR] [\fB-O\fR] \fB-a\fR
.fi
.LP
.nf
\fBzfs\fR \fBmount\fR [\fB-o \fIoptions\fR\fR] [\fB-O\fR] \fIfilesystem\fR
.fi
.LP
.nf
\fBzfs\fR \fBunmount\fR [\fB-f\fR] \fB-a\fR
.fi
.LP
.nf
\fBzfs\fR \fBunmount\fR [\fB-f\fR] \fB\fIfilesystem\fR|\fImountpoint\fR\fR
.fi
.LP
.nf
\fBzfs\fR \fBshare\fR \fB-a\fR
.fi
.LP
.nf
\fBzfs\fR \fBshare\fR \fIfilesystem\fR
.fi
.LP
.nf
\fBzfs\fR \fBunshare\fR [\fB-f\fR] \fB-a\fR
.fi
.LP
.nf
\fBzfs\fR \fBunshare\fR [\fB-f\fR] \fB\fIfilesystem\fR|\fImountpoint\fR\fR
.fi
.LP
.nf
\fBzfs\fR \fBsend\fR [\fB-i\fR \fIsnapshot1\fR] \fB\fIsnapshot2\fR\fR
.fi
.LP
.nf
\fBzfs\fR \fBreceive\fR [\fB-vnF\fR ] \fIfilesystem\fR|\fIvolume\fR|\fIsnapshot\fR
.fi
.LP
.nf
\fBzfs\fR \fBreceive\fR [\fB-vnF\fR ] \fB-d\fR \fB\fIfilesystem\fR\fR
@ -137,7 +161,6 @@ zfs \- configures ZFS file systems
.fi
.SH DESCRIPTION
.LP
The \fBzfs\fR command configures \fBZFS\fR datasets within a \fBZFS\fR storage pool, as described in \fBzpool\fR(1M). A
dataset is identified by a unique path within the \fBZFS\fR namespace. For example:
@ -186,16 +209,14 @@ A logical volume exported as a raw or block device. This type of dataset should
A read-only version of a file system or volume at a given point in time. It is specified as \fIfilesystem@name\fR or \fIvolume@name\fR.
.RE
.SS ZFS File System Hierarchy
.SS "ZFS File System Hierarchy"
.LP
A \fBZFS\fR storage pool is a logical collection of devices that provide space for datasets. A storage pool is also the root of the \fBZFS\fR file system hierarchy.
.LP
The root of the pool can be accessed as a file system, such as mounting and unmounting, taking snapshots, and setting properties. The physical storage characteristics, however, are managed by the \fBzpool\fR(1M) command.
.LP
See \fBzpool\fR(1M) for more information on creating and administering pools.
.SS Snapshots
.SS "Snapshots"
.LP
A snapshot is a read-only copy of a file system or volume. Snapshots can be created extremely quickly, and initially consume no additional space within the pool. As data within the active dataset changes, the snapshot consumes more data than would otherwise be shared with the active dataset.
.LP
@ -203,8 +224,7 @@ Snapshots can have arbitrary names. Snapshots of volumes can be cloned or rolled
.LP
File system snapshots can be accessed under the ".zfs/snapshot" directory in the root of the file system. Snapshots are automatically mounted on demand and may be unmounted at regular intervals. The visibility of the ".zfs" directory can be controlled by the "snapdir"
property.
.SS Clones
.SS "Clones"
.LP
A clone is a writable volume or file system whose initial contents are the same as another dataset. As with snapshots, creating a clone is nearly instantaneous, and initially consumes no additional space.
.LP
@ -213,8 +233,7 @@ property exposes this dependency, and the \fBdestroy\fR command lists any such d
.LP
The clone parent-child dependency relationship can be reversed by using the "\fBpromote\fR" subcommand. This causes the "origin" file system to become a clone of the specified file system, which makes it possible to destroy the file system that the clone
was created from.
.SS Mount Points
.SS "Mount Points"
.LP
Creating a \fBZFS\fR file system is a simple operation, so the number of file systems per system will likely be numerous. To cope with this, \fBZFS\fR automatically manages mounting and unmounting file systems without the need to edit the \fB/etc/vfstab\fR file.
All automatically managed file systems are mounted by \fBZFS\fR at boot time.
@ -227,8 +246,7 @@ A file system mountpoint property of "none" prevents the file system from being
.LP
If needed, \fBZFS\fR file systems can also be managed with traditional tools (\fBmount\fR, \fBumount\fR, \fB/etc/vfstab\fR). If a file system's mount point is set to "legacy", \fBZFS\fR makes no attempt to manage
the file system, and the administrator is responsible for mounting and unmounting the file system.
.SS Zones
.SS "Zones"
.LP
A \fBZFS\fR file system can be added to a non-global zone by using zonecfg's "\fBadd fs\fR" subcommand. A \fBZFS\fR file system that is added to a non-global zone must have its mountpoint property set to legacy.
.LP
@ -244,8 +262,7 @@ For more information about \fBzonecfg\fR syntax, see \fBzonecfg\fR(1M).
After a dataset is delegated to a non-global zone, the "zoned" property is automatically set. A zoned file system cannot be mounted in the global zone, since the zone administrator might have to set the mount point to an unacceptable value.
.LP
The global administrator can forcibly clear the "zoned" property, though this should be done with extreme care. The global administrator should verify that all the mount points are acceptable before clearing the property.
.SS Native Properties
.SS "Native Properties"
.LP
Properties are divided into two types, native properties and user defined properties. Native properties either export internal statistics or control \fBZFS\fR behavior. In addition, native properties are either editable or read-only. User properties have no effect on \fBZFS\fR behavior,
but you can use them to annotate datasets in a way that is meaningful in your environment. For more information about user properties, see the "User Properties" section.
@ -507,11 +524,13 @@ checking on user data. Disabling checksums is NOT a recommended practice.
.ne 2
.mk
.na
\fBcompression=\fIon\fR | \fIoff\fR | \fIlzjb\fR\fR
\fBcompression=\fIon\fR | \fIoff\fR | \fIlzjb\fR | \fIgzip\fR | \fIgzip-N\fR\fR
.ad
.sp .6
.RS 4n
Controls the compression algorithm used for this dataset. There is currently only one algorithm, "\fIlzjb\fR", though this may change in future releases. The default value is "off".
Controls the compression algorithm used for this dataset. The "lzjb" compression algorithm is optimized for performance while providing decent data compression. Setting compression to "on" uses the "lzjb" compression algorithm. The "gzip"
compression algorithm uses the same compression as the \fBgzip\fR(1) command. You can specify the "gzip" level by using the value "gzip-\fIN\fR",
where \fIN\fR is an integer from 1 (fastest) to 9 (best compression ratio). Currently, "gzip" is equivalent to "gzip-6" (which is also the default for \fBgzip\fR(1)).
.sp
This property can also be referred to by its shortened column name "compress".
.RE
@ -655,10 +674,10 @@ Controls whether extended attributes are enabled for this file system. The defau
.ad
.sp .6
.RS 4n
Controls the number of copies of data stored for this dataset. These copies are in addition to any redundancy provided by the pool (for example, mirroring or raid-z). The copies are stored on different disks if possible. The space used by multiple copies is charged to the associated
file and dataset, changing the "used" property and counting against quotas and reservations.
Controls the number of copies of data stored for this dataset. These copies are in addition to any redundancy provided by the pool, for example, mirroring or raid-z. The copies are stored on different disks, if possible. The space used by multiple copies is charged to the associated
file and dataset, changing the "used" property and counting against quotas and reservations.
.sp
Changing this property only affects newly-written data. Therefore, it is recommended that this property be set at file system creation time, using the "\fB-o\fR copies=" option.
Changing this property only affects newly-written data. Therefore, set this property at file system creation time by using the "\fB-o\fR copies=" option.
.RE
.sp
@ -672,12 +691,10 @@ Changing this property only affects newly-written data. Therefore, it is recomme
Controls whether the dataset is managed from within a jail. The default value is "off".
.RE
.SS iscsioptions
.SS "iscsioptions"
.LP
This read-only property, which is hidden, is used by the \fBiSCSI\fR target daemon to store persistent information, such as the \fBIQN\fR. It cannot be viewed or modified using the \fBzfs\fR command. The contents are not intended for external consumers.
.SS Temporary Mount Point Properties
.SS "Temporary Mount Point Properties"
.LP
When a file system is mounted, either through \fBmount\fR(1M) for legacy mounts or the "\fBzfs mount\fR" command for normal file systems,
its mount options are set according to its properties. The correlation between properties and mount options is as follows:
@ -697,8 +714,7 @@ its mount options are set according to its properties. The correlation between p
.LP
In addition, these options can be set on a per-mount basis using the \fB-o\fR option, without affecting the property that is stored on disk. The values specified on the command line override the values stored in the dataset. The \fB-nosuid\fR option is an alias for "nodevices,nosetuid".
These properties are reported as "temporary" by the "\fBzfs get\fR" command. If the properties are changed while the dataset is mounted, the new setting overrides any temporary settings.
.SS User Properties
.SS "User Properties"
.LP
In addition to the standard native properties, \fBZFS\fR supports arbitrary user properties. User properties have no effect on \fBZFS\fR behavior, but applications or administrators can use them to annotate datasets.
.LP
@ -711,8 +727,7 @@ different purposes. Property names beginning with "com.sun." are reserved for us
.LP
The values of user properties are arbitrary strings, are always inherited, and are never validated. All of the commands that operate on properties ("zfs list", "zfs get", "zfs set", etc.) can be used to manipulate both native properties and user properties.
Use the "\fBzfs inherit\fR" command to clear a user property . If the property is not defined in any parent dataset, it is removed entirely. Property values are limited to 1024 characters.
.SS Volumes as Swap or Dump Devices
.SS "Volumes as Swap or Dump Devices"
.LP
To set up a swap area, create a \fBZFS\fR volume of a specific size and then enable swap on that device. For more information, see the EXAMPLES section.
.LP
@ -720,7 +735,6 @@ Do not swap to a file on a \fBZFS\fR file system. A \fBZFS\fR swap file configur
.LP
Using a \fBZFS\fR volume as a dump device is not supported.
.SH SUBCOMMANDS
.LP
All subcommands that modify state are logged persistently to the pool in their original form.
.sp
@ -1466,10 +1480,10 @@ Detaches the given file system from the given jail.
.SH EXAMPLES
.LP
\fBExample 1 \fRCreating a ZFS File System Hierarchy
.LP
The following commands create a file system named "\fBpool/home\fR" and a file system named "\fBpool/home/bob\fR". The mount point "\fB/export/home\fR" is set for the parent file system, and automatically inherited
by the child file system.
.sp
.in +2
.nf
@ -1479,11 +1493,12 @@ by the child file system.
.fi
.in -2
.sp
.LP
\fBExample 2 \fRCreating a ZFS Snapshot
.LP
\fBExample 2 \fRCreating a ZFS Snapshot
.LP
The following command creates a snapshot named "yesterday". This snapshot is mounted on demand in the ".zfs/snapshot" directory at the root of the "\fBpool/home/bob\fR" file system.
.sp
.in +2
.nf
@ -1491,12 +1506,13 @@ The following command creates a snapshot named "yesterday". This snapshot is mou
.fi
.in -2
.sp
.LP
\fBExample 3 \fRTaking and destroying multiple snapshots
.LP
The following command creates snapshots named "\fByesterday\fR" of "\fBpool/home\fR" and all of its descendant file systems. Each snapshot is mounted on demand in the ".zfs/snapshot" directory at the root of its file system. The
second command destroys the newly created snapshots.
.sp
.in +2
.nf
@ -1505,11 +1521,12 @@ second command destroys the newly created snapshots.
.fi
.in -2
.sp
.LP
\fBExample 4 \fRTurning Off Compression
.LP
\fBExample 4 \fRTurning Off Compression
.LP
The following commands turn compression off for all file systems under "\fBpool/home\fR", but explicitly turns it on for "\fBpool/home/anne\fR".
.sp
.in +2
.nf
@ -1518,11 +1535,12 @@ The following commands turn compression off for all file systems under "\fBpool/
.fi
.in -2
.sp
.LP
\fBExample 5 \fRListing ZFS Datasets
.LP
\fBExample 5 \fRListing ZFS Datasets
.LP
The following command lists all active file systems and volumes in the system.
.sp
.in +2
.nf
@ -1538,11 +1556,12 @@ The following command lists all active file systems and volumes in the system.
.fi
.in -2
.sp
.LP
\fBExample 6 \fRSetting a Quota on a ZFS File System
.LP
\fBExample 6 \fRSetting a Quota on a ZFS File System
.LP
The following command sets a quota of 50 gbytes for "\fBpool/home/bob\fR".
.sp
.in +2
.nf
@ -1550,11 +1569,12 @@ The following command sets a quota of 50 gbytes for "\fBpool/home/bob\fR".
.fi
.in -2
.sp
.LP
\fBExample 7 \fRListing ZFS Properties
.LP
\fBExample 7 \fRListing ZFS Properties
.LP
The following command lists all properties for "\fBpool/home/bob\fR".
.sp
.in +2
.nf
@ -1596,6 +1616,7 @@ The following command lists all properties for "\fBpool/home/bob\fR".
.LP
The following command gets a single property value.
.sp
.in +2
.nf
@ -1607,6 +1628,7 @@ on
.LP
The following command lists all properties with local settings for "\fBpool/home/bob\fR".
.sp
.in +2
.nf
@ -1618,11 +1640,12 @@ The following command lists all properties with local settings for "\fBpool/home
.fi
.in -2
.sp
.LP
\fBExample 8 \fRRolling Back a ZFS File System
.LP
\fBExample 8 \fRRolling Back a ZFS File System
.LP
The following command reverts the contents of "\fBpool/home/anne\fR" to the snapshot named "\fByesterday\fR", deleting all intermediate snapshots.
.sp
.in +2
.nf
@ -1630,11 +1653,12 @@ The following command reverts the contents of "\fBpool/home/anne\fR" to the snap
.fi
.in -2
.sp
.LP
\fBExample 9 \fRCreating a ZFS Clone
.LP
\fBExample 9 \fRCreating a ZFS Clone
.LP
The following command creates a writable file system whose initial contents are the same as "\fBpool/home/bob@yesterday\fR".
.sp
.in +2
.nf
@ -1642,11 +1666,12 @@ The following command creates a writable file system whose initial contents are
.fi
.in -2
.sp
.LP
\fBExample 10 \fRPromoting a ZFS Clone
.LP
\fBExample 10 \fRPromoting a ZFS Clone
.LP
The following commands illustrate how to test out changes to a file system, and then replace the original file system with the changed one, using clones, clone promotion, and renaming:
.sp
.in +2
.nf
@ -1664,11 +1689,12 @@ The following commands illustrate how to test out changes to a file system, and
.fi
.in -2
.sp
.LP
\fBExample 11 \fRInheriting ZFS Properties
.LP
\fBExample 11 \fRInheriting ZFS Properties
.LP
The following command causes "\fBpool/home/bob\fR" and "\fBpool/home/anne\fR" to inherit the "checksum" property from their parent.
.sp
.in +2
.nf
@ -1676,12 +1702,13 @@ The following command causes "\fBpool/home/bob\fR" and "\fBpool/home/anne\fR" to
.fi
.in -2
.sp
.LP
\fBExample 12 \fRRemotely Replicating ZFS Data
.LP
The following commands send a full stream and then an incremental stream to a remote machine, restoring them into "\fBpoolB/received/fs\fR@a" and "\fBpoolB/received/fs@b\fR", respectively. "\fBpoolB\fR" must contain
the file system "\fBpoolB/received\fR", and must not initially contain "\fBpoolB/received/fs\fR".
.sp
.in +2
.nf
@ -1692,12 +1719,13 @@ the file system "\fBpoolB/received\fR", and must not initially contain "\fBpoolB
.fi
.in -2
.sp
.LP
\fBExample 13 \fRUsing the zfs receive -d Option
.LP
The following command sends a full stream of "\fBpoolA/fsA/fsB@snap\fR" to a remote machine, receiving it into "\fBpoolB/received/fsA/fsB@snap\fR". The "\fBfsA/fsB@snap\fR" portion of the received snapshot's name
is determined from the name of the sent snapshot. "\fBpoolB\fR" must contain the file system "\fBpoolB/received\fR". If "\fBpoolB/received/fsA\fR" does not exist, it will be created as an empty file system.
.sp
.in +2
.nf
@ -1707,11 +1735,12 @@ is determined from the name of the sent snapshot. "\fBpoolB\fR" must contain the
.fi
.in -2
.sp
.LP
\fBExample 14 \fRCreating a ZFS volume as a Swap Device
.LP
\fBExample 14 \fRCreating a ZFS volume as a Swap Device
.LP
The following example shows how to create a 5-Gbyte ZFS volume and then add the volume as a swap device.
.sp
.in +2
.nf
@ -1720,11 +1749,12 @@ The following example shows how to create a 5-Gbyte ZFS volume and then add the
.fi
.in -2
.sp
.LP
\fBExample 15 \fRSetting User Properties
.LP
\fBExample 15 \fRSetting User Properties
.LP
The following example sets the user defined "com.example:department" property for a dataset.
.sp
.in +2
.nf
@ -1732,11 +1762,12 @@ The following example sets the user defined "com.example:department" property fo
.fi
.in -2
.sp
.LP
\fBExample 16 \fRCreating a ZFS Volume as a iSCSI Target Device
.LP
\fBExample 16 \fRCreating a ZFS Volume as a iSCSI Target Device
.LP
The following example shows how to create a \fBZFS\fR volume as an \fBiSCSI\fR target.
.sp
.in +2
.nf
@ -1754,7 +1785,6 @@ Connections: 0
.LP
After the \fBiSCSI\fR target is created, set up the \fBiSCSI\fR initiator. For more information about the Solaris \fBiSCSI\fR initiator, see the Solaris Administration Guide: Devices and File Systems.
.SH EXIT STATUS
.LP
The following exit values are returned:
.sp
@ -1791,7 +1821,6 @@ Invalid command line options were specified.
.RE
.SH ATTRIBUTES
.LP
See \fBattributes\fR(5) for descriptions of the following attributes:
.sp
@ -1810,6 +1839,5 @@ Interface StabilityEvolving
.TE
.SH SEE ALSO
.LP
\fBssh\fR(1), \fBmount\fR(1M), \fBshare\fR(1M), \fBunshare\fR(1M), \fBzonecfg\fR(1M), \fBzpool\fR(1M), \fBchmod\fR(2), \fBstat\fR(2), \fBfsync\fR(3c), \fBdfstab\fR(4), \fBattributes\fR(5)
\fBgzip\fR(1), \fBssh\fR(1), \fBmount\fR(1M), \fBshare\fR(1M), \fBunshare\fR(1M), \fBzonecfg\fR(1M), \fBzpool\fR(1M), \fBchmod\fR(2), \fBstat\fR(2), \fBfsync\fR(3c), \fBdfstab\fR(4), \fBattributes\fR(5)

View File

@ -206,7 +206,8 @@ get_usage(zfs_help_t idx)
"\treceive [-vnF] -d <filesystem>\n"));
case HELP_RENAME:
return (gettext("\trename <filesystem|volume|snapshot> "
"<filesystem|volume|snapshot>\n"));
"<filesystem|volume|snapshot>\n"
"\trename -r <snapshot> <snapshot>"));
case HELP_ROLLBACK:
return (gettext("\trollback [-rRf] <snapshot>\n"));
case HELP_SEND:
@ -1485,7 +1486,7 @@ zfs_do_list(int argc, char **argv)
}
/*
* zfs rename <fs | snap | vol> <fs | snap | vol>
* zfs rename [-r] <fs | snap | vol> <fs | snap | vol>
*
* Renames the given dataset to another of the same type.
*/
@ -1494,38 +1495,57 @@ static int
zfs_do_rename(int argc, char **argv)
{
zfs_handle_t *zhp;
int c;
int ret;
int recurse = 0;
/* check options */
if (argc > 1 && argv[1][0] == '-') {
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
argv[1][1]);
usage(B_FALSE);
while ((c = getopt(argc, argv, "r")) != -1) {
switch (c) {
case 'r':
recurse = 1;
break;
case '?':
default:
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
/* check number of arguments */
if (argc < 2) {
if (argc < 1) {
(void) fprintf(stderr, gettext("missing source dataset "
"argument\n"));
usage(B_FALSE);
}
if (argc < 3) {
if (argc < 2) {
(void) fprintf(stderr, gettext("missing target dataset "
"argument\n"));
usage(B_FALSE);
}
if (argc > 3) {
if (argc > 2) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
if ((zhp = zfs_open(g_zfs, argv[1], ZFS_TYPE_ANY)) == NULL)
if (recurse && strchr(argv[0], '@') == 0) {
(void) fprintf(stderr, gettext("source dataset for recursive "
"rename must be a snapshot\n"));
usage(B_FALSE);
}
if ((zhp = zfs_open(g_zfs, argv[0], ZFS_TYPE_ANY)) == NULL)
return (1);
ret = (zfs_rename(zhp, argv[2]) != 0);
ret = (zfs_rename(zhp, argv[1], recurse) != 0);
if (!ret)
zpool_log_history(g_zfs, argc, argv, argv[2], B_FALSE, B_FALSE);
zpool_log_history(g_zfs, argc + optind, argv - optind, argv[1],
B_FALSE, B_FALSE);
zfs_close(zhp);
return (ret);

View File

@ -26,104 +26,124 @@ zpool \- configures ZFS storage pools
.nf
\fBzpool\fR [\fB-?\fR]
.fi
.LP
.nf
\fBzpool create\fR [\fB-fn\fR] [\fB-R\fR \fIroot\fR] [\fB-m\fR \fImountpoint\fR] \fIpool\fR \fIvdev ...\fR
.fi
.LP
.nf
\fBzpool destroy\fR [\fB-f\fR] \fIpool\fR
.fi
.LP
.nf
\fBzpool add\fR [\fB-fn\fR] \fIpool\fR \fIvdev\fR
.fi
.LP
.nf
\fBzpool remove\fR \fIpool\fR \fIvdev\fR
.fi
.LP
.nf
\fBzpool \fR \fBlist\fR [\fB-H\fR] [\fB-o\fR \fIfield\fR[,\fIfield\fR]*] [\fIpool\fR] ...
.fi
.LP
.nf
\fBzpool iostat\fR [\fB-v\fR] [\fIpool\fR] ... [\fIinterval\fR [\fIcount\fR]]
.fi
.LP
.nf
\fBzpool status\fR [\fB-xv\fR] [\fIpool\fR] ...
.fi
.LP
.nf
\fBzpool offline\fR [\fB-t\fR] \fIpool\fR \fIdevice\fR ...
.fi
.LP
.nf
\fBzpool online\fR \fIpool\fR \fIdevice\fR ...
.fi
.LP
.nf
\fBzpool clear\fR \fIpool\fR [\fIdevice\fR] ...
.fi
.LP
.nf
\fBzpool attach\fR [\fB-f\fR] \fIpool\fR \fIdevice\fR \fInew_device\fR
.fi
.LP
.nf
\fBzpool detach\fR \fIpool\fR \fIdevice\fR
.fi
.LP
.nf
\fBzpool replace\fR [\fB-f\fR] \fIpool\fR \fIdevice\fR [\fInew_device\fR]
.fi
.LP
.nf
\fBzpool scrub\fR [\fB-s\fR] \fIpool\fR ...
.fi
.LP
.nf
\fBzpool export\fR [\fB-f\fR] \fIpool\fR
.fi
.LP
.nf
\fBzpool import\fR [\fB-d\fR \fIdir\fR] [\fB-D\fR]
.fi
.LP
.nf
\fBzpool import\fR [\fB-d\fR \fIdir\fR] [\fB-D\fR] [\fB-f\fR] [\fB-o \fIopts\fR\fR] [\fB-R \fR\fIroot\fR] \fIpool\fR | \fIid\fR
[\fInewpool\fR]
.fi
.LP
.nf
\fBzpool import\fR [\fB-d\fR \fIdir\fR] [\fB-D\fR] [\fB-f\fR] [\fB-a\fR]
.fi
.LP
.nf
\fBzpool upgrade\fR
.fi
.LP
.nf
\fBzpool upgrade\fR \fB-v\fR
.fi
.LP
.nf
\fBzpool upgrade\fR [\fB-a\fR | \fIpool\fR]
.fi
.LP
.nf
\fBzpool history\fR [\fIpool\fR] ...
.fi
.SH DESCRIPTION
.LP
The \fBzpool\fR command configures \fBZFS\fR storage pools. A storage pool is a collection of devices that provides physical storage and data replication for \fBZFS\fR datasets.
.LP
All datasets within a storage pool share the same space. See \fBzfs\fR(1M) for information on managing datasets.
.SS Virtual Devices (vdevs)
.SS "Virtual Devices (vdevs)"
.LP
A "virtual device" describes a single device or a collection of devices organized according to certain performance and fault characteristics. The following virtual devices are supported:
.sp
@ -212,8 +232,7 @@ Virtual devices are specified one at a time on the command line, separated by wh
.in -2
.sp
.SS Device Failure and Recovery
.SS "Device Failure and Recovery"
.LP
\fBZFS\fR supports a rich set of mechanisms for handling device failure and data corruption. All metadata and data is checksummed, and \fBZFS\fR automatically repairs bad data from a good copy when corruption is detected.
.LP
@ -222,8 +241,7 @@ strongly discouraged. A single case of bit corruption can render some or all of
.LP
A pool's health status is described by one of three states: online, degraded, or faulted. An online pool has all devices operating normally. A degraded pool is one in which one or more devices have failed, but the data is still available due to a redundant configuration. A faulted pool has
one or more failed devices, and there is insufficient redundancy to replicate the missing data.
.SS Hot Spares
.SS "Hot Spares"
.LP
\fBZFS\fR allows devices to be associated with pools as "hot spares". These devices are not actively used in the pool, but when an active device fails, it is automatically replaced by a hot spare. To create a pool with hot spares, specify a "spare" \fBvdev\fR with any number of devices. For example,
.sp
@ -239,8 +257,7 @@ Spares can be shared across multiple pools, and can be added with the "zpool add
will remain there until the original device is replaced. At this point, the hot spare becomes available again if another device fails.
.LP
An in-progress spare replacement can be cancelled by detaching the hot spare. If the original faulted device is detached, then the hot spare assumes its place in the configuration, and is removed from the spare list of all active pools.
.SS Alternate Root Pools
.SS "Alternate Root Pools"
.LP
The "zpool create -R" and "zpool import -R" commands allow users to create and import a pool with a different root path. By default, whenever a pool is created or imported on a system, it is permanently added so that it is available whenever the system boots. For
removable media, or when in recovery situations, this may not always be desirable. An alternate root pool does not persist on the system. Instead, it exists only until exported or the system is rebooted, at which point it will have to be imported again.
@ -248,8 +265,7 @@ removable media, or when in recovery situations, this may not always be desirabl
In addition, all mount points in the pool are prefixed with the given root, so a pool can be constrained to a particular area of the file system. This is most useful when importing unknown pools from removable media, as the mount points of any file systems cannot be trusted.
.LP
When creating an alternate root pool, the default mount point is "/", rather than the normal default "/\fIpool\fR".
.SS Subcommands
.SS "Subcommands"
.LP
All subcommands that modify state are logged persistently to the pool in their original form.
.LP
@ -879,9 +895,9 @@ Displays the command history of the specified pools (or all pools if no pool is
.SH EXAMPLES
.LP
\fBExample 1 \fRCreating a RAID-Z Storage Pool
.LP
The following command creates a pool with a single \fBraidz\fR root \fIvdev\fR that consists of six disks.
.sp
.in +2
.nf
@ -889,11 +905,12 @@ The following command creates a pool with a single \fBraidz\fR root \fIvdev\fR t
.fi
.in -2
.sp
.LP
\fBExample 2 \fRCreating a Mirrored Storage Pool
.LP
\fBExample 2 \fRCreating a Mirrored Storage Pool
.LP
The following command creates a pool with two mirrors, where each mirror contains two disks.
.sp
.in +2
.nf
@ -901,11 +918,12 @@ The following command creates a pool with two mirrors, where each mirror contain
.fi
.in -2
.sp
.LP
\fBExample 3 \fRCreating a ZFS Storage Pool by Using Slices
.LP
\fBExample 3 \fRCreating a ZFS Storage Pool by Using Slices
.LP
The following command creates an unmirrored pool using two disk slices.
.sp
.in +2
.nf
@ -913,11 +931,12 @@ The following command creates an unmirrored pool using two disk slices.
.fi
.in -2
.sp
.LP
\fBExample 4 \fRCreating a ZFS Storage Pool by Using Files
.LP
\fBExample 4 \fRCreating a ZFS Storage Pool by Using Files
.LP
The following command creates an unmirrored pool using files. While not recommended, a pool based on files can be useful for experimental purposes.
.sp
.in +2
.nf
@ -925,11 +944,12 @@ The following command creates an unmirrored pool using files. While not recommen
.fi
.in -2
.sp
.LP
\fBExample 5 \fRAdding a Mirror to a ZFS Storage Pool
.LP
\fBExample 5 \fRAdding a Mirror to a ZFS Storage Pool
.LP
The following command adds two mirrored disks to the pool "\fItank\fR", assuming the pool is already made up of two-way mirrors. The additional space is immediately available to any datasets within the pool.
.sp
.in +2
.nf
@ -937,13 +957,15 @@ The following command adds two mirrored disks to the pool "\fItank\fR", assuming
.fi
.in -2
.sp
.LP
\fBExample 6 \fRListing Available ZFS Storage Pools
.LP
\fBExample 6 \fRListing Available ZFS Storage Pools
.LP
The following command lists all available pools on the system. In this case, the pool \fIzion\fR is faulted due to a missing device.
.LP
The results from this command are similar to the following:
.sp
.in +2
.nf
@ -955,11 +977,12 @@ The results from this command are similar to the following:
.fi
.in -2
.sp
.LP
\fBExample 7 \fRDestroying a ZFS Storage Pool
.LP
\fBExample 7 \fRDestroying a ZFS Storage Pool
.LP
The following command destroys the pool "\fItank\fR" and any datasets contained within.
.sp
.in +2
.nf
@ -967,11 +990,12 @@ The following command destroys the pool "\fItank\fR" and any datasets contained
.fi
.in -2
.sp
.LP
\fBExample 8 \fRExporting a ZFS Storage Pool
.LP
\fBExample 8 \fRExporting a ZFS Storage Pool
.LP
The following command exports the devices in pool \fItank\fR so that they can be relocated or later imported.
.sp
.in +2
.nf
@ -979,13 +1003,15 @@ The following command exports the devices in pool \fItank\fR so that they can be
.fi
.in -2
.sp
.LP
\fBExample 9 \fRImporting a ZFS Storage Pool
.LP
\fBExample 9 \fRImporting a ZFS Storage Pool
.LP
The following command displays available pools, and then imports the pool "tank" for use on the system.
.LP
The results from this command are similar to the following:
.sp
.in +2
.nf
@ -1005,11 +1031,12 @@ config:
.fi
.in -2
.sp
.LP
\fBExample 10 \fRUpgrading All ZFS Storage Pools to the Current Version
.LP
\fBExample 10 \fRUpgrading All ZFS Storage Pools to the Current Version
.LP
The following command upgrades all ZFS Storage pools to the current version of the software.
.sp
.in +2
.nf
@ -1018,11 +1045,12 @@ This system is currently running ZFS version 2.
.fi
.in -2
.sp
.LP
\fBExample 11 \fRManaging Hot Spares
.LP
\fBExample 11 \fRManaging Hot Spares
.LP
The following command creates a new pool with an available hot spare:
.sp
.in +2
.nf
@ -1033,6 +1061,7 @@ The following command creates a new pool with an available hot spare:
.LP
If one of the disks were to fail, the pool would be reduced to the degraded state. The failed device can be replaced using the following command:
.sp
.in +2
.nf
@ -1043,6 +1072,7 @@ If one of the disks were to fail, the pool would be reduced to the degraded stat
.LP
Once the data has been resilvered, the spare is automatically removed and is made available should another device fails. The hot spare can be permanently removed from the pool using the following command:
.sp
.in +2
.nf
@ -1052,7 +1082,6 @@ Once the data has been resilvered, the spare is automatically removed and is mad
.sp
.SH EXIT STATUS
.LP
The following exit values are returned:
.sp
@ -1089,7 +1118,6 @@ Invalid command line options were specified.
.RE
.SH ATTRIBUTES
.LP
See \fBattributes\fR(5) for descriptions of the following attributes:
.sp
@ -1108,6 +1136,5 @@ Interface StabilityEvolving
.TE
.SH SEE ALSO
.LP
\fBzfs\fR(1M), \fBattributes\fR(5)

View File

@ -246,7 +246,7 @@ extern uint16_t zio_zil_fail_shift;
#define ZTEST_DIROBJ_BLOCKSIZE (1 << 10)
#define ZTEST_DIRSIZE 256
static void usage(boolean_t);
static void usage(boolean_t) __NORETURN;
/*
* These libumem hooks provide a reasonable set of defaults for the allocator's

View File

@ -336,7 +336,7 @@ extern int zfs_destroy_snaps(zfs_handle_t *, char *);
extern int zfs_clone(zfs_handle_t *, const char *, nvlist_t *);
extern int zfs_snapshot(libzfs_handle_t *, const char *, boolean_t);
extern int zfs_rollback(zfs_handle_t *, zfs_handle_t *, int);
extern int zfs_rename(zfs_handle_t *, const char *);
extern int zfs_rename(zfs_handle_t *, const char *, int);
extern int zfs_send(zfs_handle_t *, const char *, int);
extern int zfs_receive(libzfs_handle_t *, const char *, int, int, int,
boolean_t, int);

View File

@ -50,6 +50,8 @@
#include "zfs_prop.h"
#include "libzfs_impl.h"
static int zvol_create_link_common(libzfs_handle_t *, const char *, int);
/*
* Given a single type (not a mask of types), return the type in a human
* readable form.
@ -2531,10 +2533,15 @@ zfs_promote(zfs_handle_t *zhp)
return (ret);
}
struct createdata {
const char *cd_snapname;
int cd_ifexists;
};
static int
zfs_create_link_cb(zfs_handle_t *zhp, void *arg)
{
char *snapname = arg;
struct createdata *cd = arg;
int ret;
if (zhp->zfs_type == ZFS_TYPE_VOLUME) {
@ -2542,8 +2549,9 @@ zfs_create_link_cb(zfs_handle_t *zhp, void *arg)
(void) strlcpy(name, zhp->zfs_name, sizeof (name));
(void) strlcat(name, "@", sizeof (name));
(void) strlcat(name, snapname, sizeof (name));
(void) zvol_create_link(zhp->zfs_hdl, name);
(void) strlcat(name, cd->cd_snapname, sizeof (name));
(void) zvol_create_link_common(zhp->zfs_hdl, name,
cd->cd_ifexists);
/*
* NB: this is simply a best-effort. We don't want to
* return an error, because then we wouldn't visit all
@ -2551,7 +2559,7 @@ zfs_create_link_cb(zfs_handle_t *zhp, void *arg)
*/
}
ret = zfs_iter_filesystems(zhp, zfs_create_link_cb, snapname);
ret = zfs_iter_filesystems(zhp, zfs_create_link_cb, cd);
zfs_close(zhp);
@ -2603,8 +2611,11 @@ zfs_snapshot(libzfs_handle_t *hdl, const char *path, boolean_t recursive)
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot create snapshot '%s@%s'"), zc.zc_name, zc.zc_value);
if (ret == 0 && recursive) {
(void) zfs_iter_filesystems(zhp,
zfs_create_link_cb, (char *)delim+1);
struct createdata cd;
cd.cd_snapname = delim + 1;
cd.cd_ifexists = B_FALSE;
(void) zfs_iter_filesystems(zhp, zfs_create_link_cb, &cd);
}
if (ret == 0 && zhp->zfs_type == ZFS_TYPE_VOLUME) {
ret = zvol_create_link(zhp->zfs_hdl, path);
@ -3199,12 +3210,14 @@ zfs_iter_dependents(zfs_handle_t *zhp, boolean_t allowrecursion,
* Renames the given dataset.
*/
int
zfs_rename(zfs_handle_t *zhp, const char *target)
zfs_rename(zfs_handle_t *zhp, const char *target, int recursive)
{
int ret;
zfs_cmd_t zc = { 0 };
char *delim;
prop_changelist_t *cl;
prop_changelist_t *cl = NULL;
zfs_handle_t *zhrp = NULL;
char *parentname = NULL;
char parent[ZFS_MAXNAMELEN];
libzfs_handle_t *hdl = zhp->zfs_hdl;
char errbuf[1024];
@ -3252,6 +3265,12 @@ zfs_rename(zfs_handle_t *zhp, const char *target)
if (!zfs_validate_name(hdl, target, zhp->zfs_type))
return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf));
} else {
if (recursive) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"recursive rename must be a snapshot"));
return (zfs_error(hdl, EZFS_BADTYPE, errbuf));
}
if (!zfs_validate_name(hdl, target, zhp->zfs_type))
return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf));
uint64_t unused;
@ -3291,20 +3310,42 @@ zfs_rename(zfs_handle_t *zhp, const char *target)
return (zfs_error(hdl, EZFS_ZONED, errbuf));
}
if ((cl = changelist_gather(zhp, ZFS_PROP_NAME, 0)) == NULL)
return (-1);
if (recursive) {
struct destroydata dd;
if (changelist_haszonedchild(cl)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"child dataset with inherited mountpoint is used "
"in a non-global zone"));
(void) zfs_error(hdl, EZFS_ZONED, errbuf);
goto error;
parentname = strdup(zhp->zfs_name);
delim = strchr(parentname, '@');
*delim = '\0';
zhrp = zfs_open(zhp->zfs_hdl, parentname, ZFS_TYPE_ANY);
if (zhrp == NULL) {
return (-1);
}
dd.snapname = delim + 1;
dd.gotone = B_FALSE;
dd.closezhp = B_FALSE;
/* We remove any zvol links prior to renaming them */
ret = zfs_iter_filesystems(zhrp, zfs_remove_link_cb, &dd);
if (ret) {
goto error;
}
} else {
if ((cl = changelist_gather(zhp, ZFS_PROP_NAME, 0)) == NULL)
return (-1);
if (changelist_haszonedchild(cl)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"child dataset with inherited mountpoint is used "
"in a non-global zone"));
(void) zfs_error(hdl, EZFS_ZONED, errbuf);
goto error;
}
if ((ret = changelist_prefix(cl)) != 0)
goto error;
}
if ((ret = changelist_prefix(cl)) != 0)
goto error;
if (ZFS_IS_VOLUME(zhp))
zc.zc_objset_type = DMU_OST_ZVOL;
else
@ -3313,22 +3354,65 @@ zfs_rename(zfs_handle_t *zhp, const char *target)
(void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
(void) strlcpy(zc.zc_value, target, sizeof (zc.zc_value));
zc.zc_cookie = recursive;
if ((ret = ioctl(zhp->zfs_hdl->libzfs_fd, ZFS_IOC_RENAME, &zc)) != 0) {
(void) zfs_standard_error(zhp->zfs_hdl, errno, errbuf);
/*
* if it was recursive, the one that actually failed will
* be in zc.zc_name
*/
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot rename to '%s'"), zc.zc_name);
if (recursive && errno == EEXIST) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"a child dataset already has a snapshot "
"with the new name"));
(void) zfs_error(hdl, EZFS_CROSSTARGET, errbuf);
} else {
(void) zfs_standard_error(zhp->zfs_hdl, errno, errbuf);
}
/*
* On failure, we still want to remount any filesystems that
* were previously mounted, so we don't alter the system state.
*/
(void) changelist_postfix(cl);
} else {
changelist_rename(cl, zfs_get_name(zhp), target);
if (recursive) {
struct createdata cd;
ret = changelist_postfix(cl);
/* only create links for datasets that had existed */
cd.cd_snapname = delim + 1;
cd.cd_ifexists = B_TRUE;
(void) zfs_iter_filesystems(zhrp, zfs_create_link_cb,
&cd);
} else {
(void) changelist_postfix(cl);
}
} else {
if (recursive) {
struct createdata cd;
/* only create links for datasets that had existed */
cd.cd_snapname = strchr(target, '@') + 1;
cd.cd_ifexists = B_TRUE;
ret = zfs_iter_filesystems(zhrp, zfs_create_link_cb,
&cd);
} else {
changelist_rename(cl, zfs_get_name(zhp), target);
ret = changelist_postfix(cl);
}
}
error:
changelist_free(cl);
if (parentname) {
free(parentname);
}
if (zhrp) {
zfs_close(zhrp);
}
if (cl) {
changelist_free(cl);
}
return (ret);
}
@ -3338,6 +3422,12 @@ zfs_rename(zfs_handle_t *zhp, const char *target)
*/
int
zvol_create_link(libzfs_handle_t *hdl, const char *dataset)
{
return (zvol_create_link_common(hdl, dataset, B_FALSE));
}
static int
zvol_create_link_common(libzfs_handle_t *hdl, const char *dataset, int ifexists)
{
zfs_cmd_t zc = { 0 };
#if 0
@ -3359,6 +3449,18 @@ zvol_create_link(libzfs_handle_t *hdl, const char *dataset)
*/
return (0);
case ENOENT:
/*
* Dataset does not exist in the kernel. If we
* don't care (see zfs_rename), then ignore the
* error quietly.
*/
if (ifexists) {
return (0);
}
/* FALLTHROUGH */
default:
return (zfs_standard_error_fmt(hdl, errno,
dgettext(TEXT_DOMAIN, "cannot create device links "

View File

@ -37,6 +37,7 @@
#include <sys/zap.h>
#include <sys/unique.h>
#include <sys/zfs_context.h>
#include <sys/zfs_ioctl.h>
static dsl_checkfunc_t dsl_dataset_destroy_begin_check;
static dsl_syncfunc_t dsl_dataset_destroy_begin_sync;
@ -639,7 +640,6 @@ dsl_dataset_create_sync(dsl_dir_t *pdd,
struct destroyarg {
dsl_sync_task_group_t *dstg;
char *snapname;
void *tag;
char *failed;
};
@ -655,7 +655,7 @@ dsl_snapshot_destroy_one(char *name, void *arg)
(void) strcat(name, da->snapname);
err = dsl_dataset_open(name,
DS_MODE_EXCLUSIVE | DS_MODE_READONLY | DS_MODE_INCONSISTENT,
da->tag, &ds);
da->dstg, &ds);
cp = strchr(name, '@');
*cp = '\0';
if (err == ENOENT)
@ -666,7 +666,7 @@ dsl_snapshot_destroy_one(char *name, void *arg)
}
dsl_sync_task_create(da->dstg, dsl_dataset_destroy_check,
dsl_dataset_destroy_sync, ds, da->tag, 0);
dsl_dataset_destroy_sync, ds, da->dstg, 0);
return (0);
}
@ -695,7 +695,6 @@ dsl_snapshots_destroy(char *fsname, char *snapname)
return (err);
da.dstg = dsl_sync_task_group_create(spa_get_dsl(spa));
da.snapname = snapname;
da.tag = FTAG;
da.failed = fsname;
err = dmu_objset_find(fsname,
@ -717,7 +716,7 @@ dsl_snapshots_destroy(char *fsname, char *snapname)
* closed the ds
*/
if (err)
dsl_dataset_close(ds, DS_MODE_EXCLUSIVE, FTAG);
dsl_dataset_close(ds, DS_MODE_EXCLUSIVE, da.dstg);
}
dsl_sync_task_group_destroy(da.dstg);
@ -1546,6 +1545,11 @@ dsl_dataset_snapshot_rename_check(void *arg1, void *arg2, dmu_tx_t *tx)
err = EEXIST;
else if (err == ENOENT)
err = 0;
/* dataset name + 1 for the "@" + the new snapshot name must fit */
if (dsl_dir_namelen(ds->ds_dir) + 1 + strlen(newsnapname) >= MAXNAMELEN)
err = ENAMETOOLONG;
return (err);
}
@ -1578,9 +1582,114 @@ dsl_dataset_snapshot_rename_sync(void *arg1, void *arg2, dmu_tx_t *tx)
dsl_dataset_close(hds, DS_MODE_NONE, FTAG);
}
struct renamearg {
dsl_sync_task_group_t *dstg;
char failed[MAXPATHLEN];
char *oldsnap;
char *newsnap;
};
static int
dsl_snapshot_rename_one(char *name, void *arg)
{
struct renamearg *ra = arg;
dsl_dataset_t *ds = NULL;
char *cp;
int err;
cp = name + strlen(name);
*cp = '@';
(void) strcpy(cp + 1, ra->oldsnap);
err = dsl_dataset_open(name, DS_MODE_READONLY | DS_MODE_STANDARD,
ra->dstg, &ds);
if (err == ENOENT) {
*cp = '\0';
return (0);
}
if (err) {
(void) strcpy(ra->failed, name);
*cp = '\0';
dsl_dataset_close(ds, DS_MODE_STANDARD, ra->dstg);
return (err);
}
#ifdef _KERNEL
/* for all filesystems undergoing rename, we'll need to unmount it */
(void) zfs_unmount_snap(name, NULL);
#endif
*cp = '\0';
dsl_sync_task_create(ra->dstg, dsl_dataset_snapshot_rename_check,
dsl_dataset_snapshot_rename_sync, ds, ra->newsnap, 0);
return (0);
}
static int
dsl_recursive_rename(char *oldname, const char *newname)
{
int err;
struct renamearg *ra;
dsl_sync_task_t *dst;
spa_t *spa;
char *cp, *fsname = spa_strdup(oldname);
int len = strlen(oldname);
/* truncate the snapshot name to get the fsname */
cp = strchr(fsname, '@');
*cp = '\0';
cp = strchr(fsname, '/');
if (cp) {
*cp = '\0';
err = spa_open(fsname, &spa, FTAG);
*cp = '/';
} else {
err = spa_open(fsname, &spa, FTAG);
}
if (err) {
kmem_free(fsname, len + 1);
return (err);
}
ra = kmem_alloc(sizeof (struct renamearg), KM_SLEEP);
ra->dstg = dsl_sync_task_group_create(spa_get_dsl(spa));
ra->oldsnap = strchr(oldname, '@') + 1;
ra->newsnap = strchr(newname, '@') + 1;
*ra->failed = '\0';
err = dmu_objset_find(fsname, dsl_snapshot_rename_one, ra,
DS_FIND_CHILDREN);
kmem_free(fsname, len + 1);
if (err == 0) {
err = dsl_sync_task_group_wait(ra->dstg);
}
for (dst = list_head(&ra->dstg->dstg_tasks); dst;
dst = list_next(&ra->dstg->dstg_tasks, dst)) {
dsl_dataset_t *ds = dst->dst_arg1;
if (dst->dst_err) {
dsl_dir_name(ds->ds_dir, ra->failed);
(void) strcat(ra->failed, "@");
(void) strcat(ra->failed, ra->newsnap);
}
dsl_dataset_close(ds, DS_MODE_STANDARD, ra->dstg);
}
(void) strcpy(oldname, ra->failed);
dsl_sync_task_group_destroy(ra->dstg);
kmem_free(ra, sizeof (struct renamearg));
spa_close(spa, FTAG);
return (err);
}
#pragma weak dmu_objset_rename = dsl_dataset_rename
int
dsl_dataset_rename(const char *oldname, const char *newname)
dsl_dataset_rename(char *oldname, const char *newname,
boolean_t recursive)
{
dsl_dir_t *dd;
dsl_dataset_t *ds;
@ -1611,16 +1720,20 @@ dsl_dataset_rename(const char *oldname, const char *newname)
if (strncmp(oldname, newname, tail - newname) != 0)
return (EXDEV);
err = dsl_dataset_open(oldname,
DS_MODE_READONLY | DS_MODE_STANDARD, FTAG, &ds);
if (err)
return (err);
if (recursive) {
err = dsl_recursive_rename(oldname, newname);
} else {
err = dsl_dataset_open(oldname,
DS_MODE_READONLY | DS_MODE_STANDARD, FTAG, &ds);
if (err)
return (err);
err = dsl_sync_task_do(ds->ds_dir->dd_pool,
dsl_dataset_snapshot_rename_check,
dsl_dataset_snapshot_rename_sync, ds, (char *)tail, 1);
err = dsl_sync_task_do(ds->ds_dir->dd_pool,
dsl_dataset_snapshot_rename_check,
dsl_dataset_snapshot_rename_sync, ds, (char *)tail, 1);
dsl_dataset_close(ds, DS_MODE_STANDARD, FTAG);
dsl_dataset_close(ds, DS_MODE_STANDARD, FTAG);
}
return (err);
}

View File

@ -164,7 +164,8 @@ int dmu_objset_destroy(const char *name);
int dmu_snapshots_destroy(char *fsname, char *snapname);
int dmu_objset_rollback(const char *name);
int dmu_objset_snapshot(char *fsname, char *snapname, boolean_t recursive);
int dmu_objset_rename(const char *name, const char *newname);
int dmu_objset_rename(const char *name, const char *newname,
boolean_t recursive);
int dmu_objset_find(char *name, int func(char *, void *), void *arg,
int flags);
void dmu_objset_byteswap(void *buf, size_t size);

View File

@ -132,7 +132,7 @@ int dsl_snapshots_destroy(char *fsname, char *snapname);
dsl_checkfunc_t dsl_dataset_snapshot_check;
dsl_syncfunc_t dsl_dataset_snapshot_sync;
int dsl_dataset_rollback(dsl_dataset_t *ds);
int dsl_dataset_rename(const char *name, const char *newname);
int dsl_dataset_rename(char *name, const char *newname, boolean_t recursive);
int dsl_dataset_promote(const char *name);
void *dsl_dataset_set_user_ptr(dsl_dataset_t *ds,

View File

@ -152,6 +152,7 @@ typedef struct zfs_create_data {
extern int zfs_secpolicy_write(const char *dataset, cred_t *cr);
extern int zfs_busy(void);
extern int zfs_unmount_snap(char *, void *);
#endif /* _KERNEL */

View File

@ -590,7 +590,7 @@ zfsctl_snapdir_rename(vnode_t *sdvp, char *snm, vnode_t *tdvp, char *tnm,
return (ENOENT);
}
err = dmu_objset_rename(from, to);
err = dmu_objset_rename(from, to, B_FALSE);
if (err == 0)
zfsctl_rename_snap(sdp, sep, tnm);

View File

@ -1354,7 +1354,7 @@ zfs_ioc_snapshot(zfs_cmd_t *zc)
zc->zc_value, zc->zc_cookie));
}
static int
int
zfs_unmount_snap(char *name, void *arg)
{
char *snapname = arg;
@ -1430,18 +1430,25 @@ zfs_ioc_rollback(zfs_cmd_t *zc)
static int
zfs_ioc_rename(zfs_cmd_t *zc)
{
int recursive = zc->zc_cookie & 1;
zc->zc_value[sizeof (zc->zc_value) - 1] = '\0';
if (dataset_namecheck(zc->zc_value, NULL, NULL) != 0)
return (EINVAL);
if (strchr(zc->zc_name, '@') != NULL &&
/*
* Unmount snapshot unless we're doing a recursive rename,
* in which case the dataset code figures out which snapshots
* to unmount.
*/
if (!recursive && strchr(zc->zc_name, '@') != NULL &&
zc->zc_objset_type == DMU_OST_ZFS) {
int err = zfs_unmount_snap(zc->zc_name, NULL);
if (err)
return (err);
}
return (dmu_objset_rename(zc->zc_name, zc->zc_value));
return (dmu_objset_rename(zc->zc_name, zc->zc_value, recursive));
}
static int

View File

@ -37,6 +37,7 @@
#include <sys/zap.h>
#include <sys/unique.h>
#include <sys/zfs_context.h>
#include <sys/zfs_ioctl.h>
static dsl_checkfunc_t dsl_dataset_destroy_begin_check;
static dsl_syncfunc_t dsl_dataset_destroy_begin_sync;
@ -639,7 +640,6 @@ dsl_dataset_create_sync(dsl_dir_t *pdd,
struct destroyarg {
dsl_sync_task_group_t *dstg;
char *snapname;
void *tag;
char *failed;
};
@ -655,7 +655,7 @@ dsl_snapshot_destroy_one(char *name, void *arg)
(void) strcat(name, da->snapname);
err = dsl_dataset_open(name,
DS_MODE_EXCLUSIVE | DS_MODE_READONLY | DS_MODE_INCONSISTENT,
da->tag, &ds);
da->dstg, &ds);
cp = strchr(name, '@');
*cp = '\0';
if (err == ENOENT)
@ -666,7 +666,7 @@ dsl_snapshot_destroy_one(char *name, void *arg)
}
dsl_sync_task_create(da->dstg, dsl_dataset_destroy_check,
dsl_dataset_destroy_sync, ds, da->tag, 0);
dsl_dataset_destroy_sync, ds, da->dstg, 0);
return (0);
}
@ -695,7 +695,6 @@ dsl_snapshots_destroy(char *fsname, char *snapname)
return (err);
da.dstg = dsl_sync_task_group_create(spa_get_dsl(spa));
da.snapname = snapname;
da.tag = FTAG;
da.failed = fsname;
err = dmu_objset_find(fsname,
@ -717,7 +716,7 @@ dsl_snapshots_destroy(char *fsname, char *snapname)
* closed the ds
*/
if (err)
dsl_dataset_close(ds, DS_MODE_EXCLUSIVE, FTAG);
dsl_dataset_close(ds, DS_MODE_EXCLUSIVE, da.dstg);
}
dsl_sync_task_group_destroy(da.dstg);
@ -1546,6 +1545,11 @@ dsl_dataset_snapshot_rename_check(void *arg1, void *arg2, dmu_tx_t *tx)
err = EEXIST;
else if (err == ENOENT)
err = 0;
/* dataset name + 1 for the "@" + the new snapshot name must fit */
if (dsl_dir_namelen(ds->ds_dir) + 1 + strlen(newsnapname) >= MAXNAMELEN)
err = ENAMETOOLONG;
return (err);
}
@ -1578,9 +1582,114 @@ dsl_dataset_snapshot_rename_sync(void *arg1, void *arg2, dmu_tx_t *tx)
dsl_dataset_close(hds, DS_MODE_NONE, FTAG);
}
struct renamearg {
dsl_sync_task_group_t *dstg;
char failed[MAXPATHLEN];
char *oldsnap;
char *newsnap;
};
static int
dsl_snapshot_rename_one(char *name, void *arg)
{
struct renamearg *ra = arg;
dsl_dataset_t *ds = NULL;
char *cp;
int err;
cp = name + strlen(name);
*cp = '@';
(void) strcpy(cp + 1, ra->oldsnap);
err = dsl_dataset_open(name, DS_MODE_READONLY | DS_MODE_STANDARD,
ra->dstg, &ds);
if (err == ENOENT) {
*cp = '\0';
return (0);
}
if (err) {
(void) strcpy(ra->failed, name);
*cp = '\0';
dsl_dataset_close(ds, DS_MODE_STANDARD, ra->dstg);
return (err);
}
#ifdef _KERNEL
/* for all filesystems undergoing rename, we'll need to unmount it */
(void) zfs_unmount_snap(name, NULL);
#endif
*cp = '\0';
dsl_sync_task_create(ra->dstg, dsl_dataset_snapshot_rename_check,
dsl_dataset_snapshot_rename_sync, ds, ra->newsnap, 0);
return (0);
}
static int
dsl_recursive_rename(char *oldname, const char *newname)
{
int err;
struct renamearg *ra;
dsl_sync_task_t *dst;
spa_t *spa;
char *cp, *fsname = spa_strdup(oldname);
int len = strlen(oldname);
/* truncate the snapshot name to get the fsname */
cp = strchr(fsname, '@');
*cp = '\0';
cp = strchr(fsname, '/');
if (cp) {
*cp = '\0';
err = spa_open(fsname, &spa, FTAG);
*cp = '/';
} else {
err = spa_open(fsname, &spa, FTAG);
}
if (err) {
kmem_free(fsname, len + 1);
return (err);
}
ra = kmem_alloc(sizeof (struct renamearg), KM_SLEEP);
ra->dstg = dsl_sync_task_group_create(spa_get_dsl(spa));
ra->oldsnap = strchr(oldname, '@') + 1;
ra->newsnap = strchr(newname, '@') + 1;
*ra->failed = '\0';
err = dmu_objset_find(fsname, dsl_snapshot_rename_one, ra,
DS_FIND_CHILDREN);
kmem_free(fsname, len + 1);
if (err == 0) {
err = dsl_sync_task_group_wait(ra->dstg);
}
for (dst = list_head(&ra->dstg->dstg_tasks); dst;
dst = list_next(&ra->dstg->dstg_tasks, dst)) {
dsl_dataset_t *ds = dst->dst_arg1;
if (dst->dst_err) {
dsl_dir_name(ds->ds_dir, ra->failed);
(void) strcat(ra->failed, "@");
(void) strcat(ra->failed, ra->newsnap);
}
dsl_dataset_close(ds, DS_MODE_STANDARD, ra->dstg);
}
(void) strcpy(oldname, ra->failed);
dsl_sync_task_group_destroy(ra->dstg);
kmem_free(ra, sizeof (struct renamearg));
spa_close(spa, FTAG);
return (err);
}
#pragma weak dmu_objset_rename = dsl_dataset_rename
int
dsl_dataset_rename(const char *oldname, const char *newname)
dsl_dataset_rename(char *oldname, const char *newname,
boolean_t recursive)
{
dsl_dir_t *dd;
dsl_dataset_t *ds;
@ -1611,16 +1720,20 @@ dsl_dataset_rename(const char *oldname, const char *newname)
if (strncmp(oldname, newname, tail - newname) != 0)
return (EXDEV);
err = dsl_dataset_open(oldname,
DS_MODE_READONLY | DS_MODE_STANDARD, FTAG, &ds);
if (err)
return (err);
if (recursive) {
err = dsl_recursive_rename(oldname, newname);
} else {
err = dsl_dataset_open(oldname,
DS_MODE_READONLY | DS_MODE_STANDARD, FTAG, &ds);
if (err)
return (err);
err = dsl_sync_task_do(ds->ds_dir->dd_pool,
dsl_dataset_snapshot_rename_check,
dsl_dataset_snapshot_rename_sync, ds, (char *)tail, 1);
err = dsl_sync_task_do(ds->ds_dir->dd_pool,
dsl_dataset_snapshot_rename_check,
dsl_dataset_snapshot_rename_sync, ds, (char *)tail, 1);
dsl_dataset_close(ds, DS_MODE_STANDARD, FTAG);
dsl_dataset_close(ds, DS_MODE_STANDARD, FTAG);
}
return (err);
}

View File

@ -164,7 +164,8 @@ int dmu_objset_destroy(const char *name);
int dmu_snapshots_destroy(char *fsname, char *snapname);
int dmu_objset_rollback(const char *name);
int dmu_objset_snapshot(char *fsname, char *snapname, boolean_t recursive);
int dmu_objset_rename(const char *name, const char *newname);
int dmu_objset_rename(const char *name, const char *newname,
boolean_t recursive);
int dmu_objset_find(char *name, int func(char *, void *), void *arg,
int flags);
void dmu_objset_byteswap(void *buf, size_t size);

View File

@ -132,7 +132,7 @@ int dsl_snapshots_destroy(char *fsname, char *snapname);
dsl_checkfunc_t dsl_dataset_snapshot_check;
dsl_syncfunc_t dsl_dataset_snapshot_sync;
int dsl_dataset_rollback(dsl_dataset_t *ds);
int dsl_dataset_rename(const char *name, const char *newname);
int dsl_dataset_rename(char *name, const char *newname, boolean_t recursive);
int dsl_dataset_promote(const char *name);
void *dsl_dataset_set_user_ptr(dsl_dataset_t *ds,

View File

@ -152,6 +152,7 @@ typedef struct zfs_create_data {
extern int zfs_secpolicy_write(const char *dataset, cred_t *cr);
extern int zfs_busy(void);
extern int zfs_unmount_snap(char *, void *);
#endif /* _KERNEL */

View File

@ -590,7 +590,7 @@ zfsctl_snapdir_rename(vnode_t *sdvp, char *snm, vnode_t *tdvp, char *tnm,
return (ENOENT);
}
err = dmu_objset_rename(from, to);
err = dmu_objset_rename(from, to, B_FALSE);
if (err == 0)
zfsctl_rename_snap(sdp, sep, tnm);

View File

@ -1354,7 +1354,7 @@ zfs_ioc_snapshot(zfs_cmd_t *zc)
zc->zc_value, zc->zc_cookie));
}
static int
int
zfs_unmount_snap(char *name, void *arg)
{
char *snapname = arg;
@ -1430,18 +1430,25 @@ zfs_ioc_rollback(zfs_cmd_t *zc)
static int
zfs_ioc_rename(zfs_cmd_t *zc)
{
int recursive = zc->zc_cookie & 1;
zc->zc_value[sizeof (zc->zc_value) - 1] = '\0';
if (dataset_namecheck(zc->zc_value, NULL, NULL) != 0)
return (EINVAL);
if (strchr(zc->zc_name, '@') != NULL &&
/*
* Unmount snapshot unless we're doing a recursive rename,
* in which case the dataset code figures out which snapshots
* to unmount.
*/
if (!recursive && strchr(zc->zc_name, '@') != NULL &&
zc->zc_objset_type == DMU_OST_ZFS) {
int err = zfs_unmount_snap(zc->zc_name, NULL);
if (err)
return (err);
}
return (dmu_objset_rename(zc->zc_name, zc->zc_value));
return (dmu_objset_rename(zc->zc_name, zc->zc_value, recursive));
}
static int