update vendor openzfs to a00c61 (2.0-rc1)

This commit is contained in:
Matt Macy 2020-08-28 18:41:28 +00:00
parent 3b0ce0e28d
commit 93ddd0259d
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/vendor-sys/openzfs/dist/; revision=364928
svn path=/vendor-sys/openzfs/2.0-rc1-ga00c61/; revision=364929; tag=vendor/openzfs/2.0-rc1-ga00c61
50 changed files with 320 additions and 94 deletions

8
META
View File

@ -1,10 +1,10 @@
Meta: 1 Meta: 1
Name: zfs Name: zfs
Branch: 1.0 Branch: 1.0
Version: 0.8.0 Version: 2.0.0
Release: 1 Release: rc1
Release-Tags: relext Release-Tags: relext
License: CDDL License: CDDL
Author: OpenZFS on Linux Author: OpenZFS
Linux-Maximum: 5.6 Linux-Maximum: 5.8
Linux-Minimum: 3.10 Linux-Minimum: 3.10

2
NEWS
View File

@ -1,3 +1,3 @@
Descriptions of all releases can be found on github: Descriptions of all releases can be found on github:
https://github.com/zfsonlinux/zfs/releases https://github.com/openzfs/zfs/releases

View File

@ -2816,7 +2816,8 @@ show_import(nvlist_t *config)
if (msgid != NULL) { if (msgid != NULL) {
(void) printf(gettext( (void) printf(gettext(
" see: https://zfsonlinux.org/msg/%s\n"), msgid); " see: https://openzfs.github.io/openzfs-docs/msg/%s\n"),
msgid);
} }
(void) printf(gettext(" config:\n\n")); (void) printf(gettext(" config:\n\n"));
@ -7804,7 +7805,7 @@ print_dedup_stats(nvlist_t *config)
* pool: tank * pool: tank
* status: DEGRADED * status: DEGRADED
* reason: One or more devices ... * reason: One or more devices ...
* see: https://zfsonlinux.org/msg/ZFS-xxxx-01 * see: https://openzfs.github.io/openzfs-docs/msg/ZFS-xxxx-01
* config: * config:
* mirror DEGRADED * mirror DEGRADED
* c1t0d0 OK * c1t0d0 OK
@ -8193,7 +8194,9 @@ status_callback(zpool_handle_t *zhp, void *data)
if (msgid != NULL) { if (msgid != NULL) {
printf(" "); printf(" ");
printf_color(ANSI_BOLD, gettext("see:")); printf_color(ANSI_BOLD, gettext("see:"));
printf(gettext(" https://zfsonlinux.org/msg/%s\n"), msgid); printf(gettext(
" https://openzfs.github.io/openzfs-docs/msg/%s\n"),
msgid);
} }
if (config != NULL) { if (config != NULL) {

View File

@ -71,6 +71,7 @@ struct libzfs_handle {
char libzfs_chassis_id[256]; char libzfs_chassis_id[256];
boolean_t libzfs_prop_debug; boolean_t libzfs_prop_debug;
regex_t libzfs_urire; regex_t libzfs_urire;
uint64_t libzfs_max_nvlist;
}; };
struct zfs_handle { struct zfs_handle {

View File

@ -26,8 +26,6 @@
#ifndef _SYS_ACL_IMPL_H #ifndef _SYS_ACL_IMPL_H
#define _SYS_ACL_IMPL_H #define _SYS_ACL_IMPL_H
#pragma ident "%Z%%M% %I% %E% SMI"
#ifdef __cplusplus #ifdef __cplusplus
extern "C" { extern "C" {
#endif #endif

View File

@ -31,8 +31,6 @@
#ifndef _SYS_CMN_ERR_H #ifndef _SYS_CMN_ERR_H
#define _SYS_CMN_ERR_H #define _SYS_CMN_ERR_H
#pragma ident "%Z%%M% %I% %E% SMI"
#if !defined(_ASM) #if !defined(_ASM)
#include <sys/_stdarg.h> #include <sys/_stdarg.h>
#endif #endif

View File

@ -26,8 +26,6 @@
#ifndef _SYS_EXTDIRENT_H #ifndef _SYS_EXTDIRENT_H
#define _SYS_EXTDIRENT_H #define _SYS_EXTDIRENT_H
#pragma ident "%Z%%M% %I% %E% SMI"
#ifdef __cplusplus #ifdef __cplusplus
extern "C" { extern "C" {
#endif #endif

View File

@ -26,8 +26,6 @@
#ifndef _SYS_LIST_H #ifndef _SYS_LIST_H
#define _SYS_LIST_H #define _SYS_LIST_H
#pragma ident "%Z%%M% %I% %E% SMI"
#include <sys/list_impl.h> #include <sys/list_impl.h>
#ifdef __cplusplus #ifdef __cplusplus

View File

@ -27,8 +27,6 @@
#ifndef _SYS_LIST_IMPL_H #ifndef _SYS_LIST_IMPL_H
#define _SYS_LIST_IMPL_H #define _SYS_LIST_IMPL_H
#pragma ident "%Z%%M% %I% %E% SMI"
#include <sys/types.h> #include <sys/types.h>
#ifdef __cplusplus #ifdef __cplusplus

View File

@ -27,8 +27,6 @@
#ifndef _ZMOD_H #ifndef _ZMOD_H
#define _ZMOD_H #define _ZMOD_H
#pragma ident "%Z%%M% %I% %E% SMI"
#ifdef __cplusplus #ifdef __cplusplus
extern "C" { extern "C" {
#endif #endif

View File

@ -1013,10 +1013,10 @@ typedef struct vdev_rebuild_stat {
} vdev_rebuild_stat_t; } vdev_rebuild_stat_t;
/* /*
* Errata described by https://zfsonlinux.org/msg/ZFS-8000-ER. The ordering * Errata described by https://openzfs.github.io/openzfs-docs/msg/ZFS-8000-ER.
* of this enum must be maintained to ensure the errata identifiers map to * The ordering of this enum must be maintained to ensure the errata identifiers
* the correct documentation. New errata may only be appended to the list * map to the correct documentation. New errata may only be appended to the
* and must contain corresponding documentation at the above link. * list and must contain corresponding documentation at the above link.
*/ */
typedef enum zpool_errata { typedef enum zpool_errata {
ZPOOL_ERRATA_NONE, ZPOOL_ERRATA_NONE,

View File

@ -35,12 +35,8 @@
#include <string.h> #include <string.h>
#include <stdlib.h> #include <stdlib.h>
/*
* Some old glibc headers don't define BLKGETSIZE64
* and we don't want to require the kernel headers
*/
#if !defined(BLKGETSIZE64) #if !defined(BLKGETSIZE64)
#define BLKGETSIZE64 _IOR(0x12, 114, size_t) #define BLKGETSIZE64 DIOCGMEDIASIZE
#endif #endif
/* /*

View File

@ -610,6 +610,18 @@ send_iterate_fs(zfs_handle_t *zhp, void *arg)
fnvlist_free(sd->snapprops); fnvlist_free(sd->snapprops);
fnvlist_free(sd->snapholds); fnvlist_free(sd->snapholds);
/* Do not allow the size of the properties list to exceed the limit */
if ((fnvlist_size(nvfs) + fnvlist_size(sd->fss)) >
zhp->zfs_hdl->libzfs_max_nvlist) {
(void) fprintf(stderr, dgettext(TEXT_DOMAIN,
"warning: cannot send %s@%s: the size of the list of "
"snapshots and properties is too large to be received "
"successfully.\n"
"Select a smaller number of snapshots to send.\n"),
zhp->zfs_name, sd->tosnap);
rv = EZFS_NOSPC;
goto out;
}
/* add this fs to nvlist */ /* add this fs to nvlist */
(void) snprintf(guidstring, sizeof (guidstring), (void) snprintf(guidstring, sizeof (guidstring),
"0x%llx", (longlong_t)guid); "0x%llx", (longlong_t)guid);
@ -2015,6 +2027,21 @@ send_prelim_records(zfs_handle_t *zhp, const char *from, int fd,
return (zfs_error(zhp->zfs_hdl, EZFS_BADBACKUP, return (zfs_error(zhp->zfs_hdl, EZFS_BADBACKUP,
errbuf)); errbuf));
} }
/*
* Do not allow the size of the properties list to exceed
* the limit
*/
if ((fnvlist_size(fss) + fnvlist_size(hdrnv)) >
zhp->zfs_hdl->libzfs_max_nvlist) {
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "warning: cannot send '%s': "
"the size of the list of snapshots and properties "
"is too large to be received successfully.\n"
"Select a smaller number of snapshots to send.\n"),
zhp->zfs_name);
return (zfs_error(zhp->zfs_hdl, EZFS_NOSPC,
errbuf));
}
fnvlist_add_nvlist(hdrnv, "fss", fss); fnvlist_add_nvlist(hdrnv, "fss", fss);
VERIFY0(nvlist_pack(hdrnv, &packbuf, &buflen, NV_ENCODE_XDR, VERIFY0(nvlist_pack(hdrnv, &packbuf, &buflen, NV_ENCODE_XDR,
0)); 0));
@ -2578,8 +2605,6 @@ recv_read(libzfs_handle_t *hdl, int fd, void *buf, int ilen,
int rv; int rv;
int len = ilen; int len = ilen;
assert(ilen <= SPA_MAXBLOCKSIZE);
do { do {
rv = read(fd, cp, len); rv = read(fd, cp, len);
cp += rv; cp += rv;
@ -2613,6 +2638,11 @@ recv_read_nvlist(libzfs_handle_t *hdl, int fd, int len, nvlist_t **nvp,
if (buf == NULL) if (buf == NULL)
return (ENOMEM); return (ENOMEM);
if (len > hdl->libzfs_max_nvlist) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "nvlist too large"));
return (ENOMEM);
}
err = recv_read(hdl, fd, buf, len, byteswap, zc); err = recv_read(hdl, fd, buf, len, byteswap, zc);
if (err != 0) { if (err != 0) {
free(buf); free(buf);
@ -3783,6 +3813,7 @@ recv_skip(libzfs_handle_t *hdl, int fd, boolean_t byteswap)
} }
payload_size = payload_size =
DRR_WRITE_PAYLOAD_SIZE(&drr->drr_u.drr_write); DRR_WRITE_PAYLOAD_SIZE(&drr->drr_u.drr_write);
assert(payload_size <= SPA_MAXBLOCKSIZE);
(void) recv_read(hdl, fd, buf, (void) recv_read(hdl, fd, buf,
payload_size, B_FALSE, NULL); payload_size, B_FALSE, NULL);
break; break;
@ -4819,7 +4850,8 @@ zfs_receive_one(libzfs_handle_t *hdl, int infd, const char *tosnap,
case ZFS_ERR_FROM_IVSET_GUID_MISSING: case ZFS_ERR_FROM_IVSET_GUID_MISSING:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"IV set guid missing. See errata %u at " "IV set guid missing. See errata %u at "
"https://zfsonlinux.org/msg/ZFS-8000-ER."), "https://openzfs.github.io/openzfs-docs/msg/"
"ZFS-8000-ER."),
ZPOOL_ERRATA_ZOL_8308_ENCRYPTION); ZPOOL_ERRATA_ZOL_8308_ENCRYPTION);
(void) zfs_error(hdl, EZFS_BADSTREAM, errbuf); (void) zfs_error(hdl, EZFS_BADSTREAM, errbuf);
break; break;

View File

@ -1008,6 +1008,7 @@ libzfs_init(void)
{ {
libzfs_handle_t *hdl; libzfs_handle_t *hdl;
int error; int error;
char *env;
error = libzfs_load_module(); error = libzfs_load_module();
if (error) { if (error) {
@ -1055,6 +1056,15 @@ libzfs_init(void)
if (getenv("ZFS_PROP_DEBUG") != NULL) { if (getenv("ZFS_PROP_DEBUG") != NULL) {
hdl->libzfs_prop_debug = B_TRUE; hdl->libzfs_prop_debug = B_TRUE;
} }
if ((env = getenv("ZFS_SENDRECV_MAX_NVLIST")) != NULL) {
if ((error = zfs_nicestrtonum(hdl, env,
&hdl->libzfs_max_nvlist))) {
errno = error;
return (NULL);
}
} else {
hdl->libzfs_max_nvlist = (SPA_MAXBLOCKSIZE * 4);
}
/* /*
* For testing, remove some settable properties and features * For testing, remove some settable properties and features

View File

@ -13,7 +13,7 @@
.\" Copyright (c) 2015 by Delphix. All rights reserved. .\" Copyright (c) 2015 by Delphix. All rights reserved.
.\" Copyright (c) 2020 by AJ Jordan. All rights reserved. .\" Copyright (c) 2020 by AJ Jordan. All rights reserved.
.\" .\"
.TH ARCSTAT 1 "May 7, 2020" .TH ARCSTAT 1 "Aug 24, 2020" OpenZFS
.SH NAME .SH NAME
arcstat \- report ZFS ARC and L2ARC statistics arcstat \- report ZFS ARC and L2ARC statistics
.SH SYNOPSIS .SH SYNOPSIS

View File

@ -20,7 +20,7 @@
.\" .\"
.\" CDDL HEADER END .\" CDDL HEADER END
.\" .\"
.TH cstyle 1 "28 March 2005" .TH CSTYLE 1 "Aug 24, 2020" OpenZFS
.SH NAME .SH NAME
.I cstyle .I cstyle
\- check for some common stylistic errors in C source files \- check for some common stylistic errors in C source files

View File

@ -22,7 +22,7 @@
.\" .\"
.\" Copyright (c) 2016 Gvozden Nešković. All rights reserved. .\" Copyright (c) 2016 Gvozden Nešković. All rights reserved.
.\" .\"
.TH raidz_test 1 "2016" "ZFS on Linux" "User Commands" .TH RAIDZ_TEST 1 "Aug 24, 2020" OpenZFS
.SH NAME .SH NAME
\fBraidz_test\fR \- raidz implementation verification and benchmarking tool \fBraidz_test\fR \- raidz implementation verification and benchmarking tool

View File

@ -22,7 +22,7 @@
.\" .\"
.\" Copyright 2013 Darik Horn <dajhorn@vanadac.com>. All rights reserved. .\" Copyright 2013 Darik Horn <dajhorn@vanadac.com>. All rights reserved.
.\" .\"
.TH zhack 1 "2013 MAR 16" "ZFS on Linux" "User Commands" .TH ZHACK 1 "Aug 24, 2020" OpenZFS
.SH NAME .SH NAME
zhack \- libzpool debugging tool zhack \- libzpool debugging tool

View File

@ -24,7 +24,7 @@
.\" Copyright (c) 2009 Michael Gebetsroither <michael.geb@gmx.at>. All rights .\" Copyright (c) 2009 Michael Gebetsroither <michael.geb@gmx.at>. All rights
.\" reserved. .\" reserved.
.\" .\"
.TH ztest 1 "2009 NOV 01" "ZFS on Linux" "User Commands" .TH ZTEST 1 "Aug 24, 2020" OpenZFS
.SH NAME .SH NAME
\fBztest\fR \- was written by the ZFS Developers as a ZFS unit test. \fBztest\fR \- was written by the ZFS Developers as a ZFS unit test.

View File

@ -2,7 +2,7 @@
.\" .\"
.\" Copyright 2013 Turbo Fredriksson <turbo@bayour.com>. All rights reserved. .\" Copyright 2013 Turbo Fredriksson <turbo@bayour.com>. All rights reserved.
.\" .\"
.TH SPL-MODULE-PARAMETERS 5 "Oct 28, 2017" .TH SPL-MODULE-PARAMETERS 5 "Aug 24, 2020" OpenZFS
.SH NAME .SH NAME
spl\-module\-parameters \- SPL module parameters spl\-module\-parameters \- SPL module parameters
.SH DESCRIPTION .SH DESCRIPTION
@ -225,7 +225,7 @@ Default value: \fB/etc/hostid\fR
\fBspl_panic_halt\fR (uint) \fBspl_panic_halt\fR (uint)
.ad .ad
.RS 12n .RS 12n
Cause a kernel panic on assertion failures. When not enabled, the thread is Cause a kernel panic on assertion failures. When not enabled, the thread is
halted to facilitate further debugging. halted to facilitate further debugging.
.sp .sp
Set to a non-zero value to enable. Set to a non-zero value to enable.

View File

@ -1,4 +1,4 @@
.TH vdev_id.conf 5 .TH VDEV_ID.CONF 5 "Aug 24, 2020" OpenZFS
.SH NAME .SH NAME
vdev_id.conf \- Configuration file for vdev_id vdev_id.conf \- Configuration file for vdev_id
.SH DESCRIPTION .SH DESCRIPTION

View File

@ -13,7 +13,7 @@
.\" CDDL HEADER, with the fields enclosed by brackets "[]" replaced with your .\" CDDL HEADER, with the fields enclosed by brackets "[]" replaced with your
.\" own identifying information: .\" own identifying information:
.\" Portions Copyright [yyyy] [name of copyright owner] .\" Portions Copyright [yyyy] [name of copyright owner]
.TH ZFS-EVENTS 5 "Oct 24, 2018" .TH ZFS-EVENTS 5 "Aug 24, 2020" OpenZFS
.SH NAME .SH NAME
zfs\-events \- Events created by the ZFS filesystem. zfs\-events \- Events created by the ZFS filesystem.
.SH DESCRIPTION .SH DESCRIPTION

View File

@ -14,7 +14,7 @@
.\" CDDL HEADER, with the fields enclosed by brackets "[]" replaced with your .\" CDDL HEADER, with the fields enclosed by brackets "[]" replaced with your
.\" own identifying information: .\" own identifying information:
.\" Portions Copyright [yyyy] [name of copyright owner] .\" Portions Copyright [yyyy] [name of copyright owner]
.TH ZFS-MODULE-PARAMETERS 5 "Feb 15, 2019" .TH ZFS-MODULE-PARAMETERS 5 "Aug 24, 2020" OpenZFS
.SH NAME .SH NAME
zfs\-module\-parameters \- ZFS module parameters zfs\-module\-parameters \- ZFS module parameters
.SH DESCRIPTION .SH DESCRIPTION
@ -198,6 +198,20 @@ feature.
Default value: \fB200\fR%. Default value: \fB200\fR%.
.RE .RE
.sp
.ne 2
.na
\fBl2arc_meta_percent\fR (int)
.ad
.RS 12n
Percent of ARC size allowed for L2ARC-only headers.
Since L2ARC buffers are not evicted on memory pressure, too large amount of
headers on system with irrationaly large L2ARC can render it slow or unusable.
This parameter limits L2ARC writes and rebuild to achieve it.
.sp
Default value: \fB33\fR%.
.RE
.sp .sp
.ne 2 .ne 2
.na .na
@ -426,7 +440,7 @@ Default value: \fB16,777,216\fR (16MB)
.RS 12n .RS 12n
If we are not searching forward (due to metaslab_df_max_search, If we are not searching forward (due to metaslab_df_max_search,
metaslab_df_free_pct, or metaslab_df_alloc_threshold), this tunable controls metaslab_df_free_pct, or metaslab_df_alloc_threshold), this tunable controls
what segment is used. If it is set, we will use the largest free segment. what segment is used. If it is set, we will use the largest free segment.
If it is not set, we will use a segment of exactly the requested size (or If it is not set, we will use a segment of exactly the requested size (or
larger). larger).
.sp .sp
@ -3256,7 +3270,7 @@ Default value: \fB25\fR.
\fBzfs_sync_pass_dont_compress\fR (int) \fBzfs_sync_pass_dont_compress\fR (int)
.ad .ad
.RS 12n .RS 12n
Starting in this sync pass, we disable compression (including of metadata). Starting in this sync pass, we disable compression (including of metadata).
With the default setting, in practice, we don't have this many sync passes, With the default setting, in practice, we don't have this many sync passes,
so this has no effect. so this has no effect.
.sp .sp

View File

@ -16,7 +16,7 @@
.\" Portions Copyright [yyyy] [name of copyright owner] .\" Portions Copyright [yyyy] [name of copyright owner]
.\" Copyright (c) 2019, Klara Inc. .\" Copyright (c) 2019, Klara Inc.
.\" Copyright (c) 2019, Allan Jude .\" Copyright (c) 2019, Allan Jude
.TH ZPOOL-FEATURES 5 "Jun 8, 2018" .TH ZPOOL-FEATURES 5 "Aug 24, 2020" OpenZFS
.SH NAME .SH NAME
zpool\-features \- ZFS pool feature descriptions zpool\-features \- ZFS pool feature descriptions
.SH DESCRIPTION .SH DESCRIPTION

View File

@ -22,7 +22,7 @@
.\" .\"
.\" Copyright 2013 Darik Horn <dajhorn@vanadac.com>. All rights reserved. .\" Copyright 2013 Darik Horn <dajhorn@vanadac.com>. All rights reserved.
.\" .\"
.TH fsck.zfs 8 "2013 MAR 16" "ZFS on Linux" "System Administration Commands" .TH FSCK.ZFS 8 "Aug 24, 2020" OpenZFS
.SH NAME .SH NAME
fsck.zfs \- Dummy ZFS filesystem checker. fsck.zfs \- Dummy ZFS filesystem checker.

View File

@ -22,7 +22,7 @@
.\" .\"
.\" Copyright 2013 Darik Horn <dajhorn@vanadac.com>. All rights reserved. .\" Copyright 2013 Darik Horn <dajhorn@vanadac.com>. All rights reserved.
.\" .\"
.TH mount.zfs 8 "2013 FEB 28" "ZFS on Linux" "System Administration Commands" .TH MOUNT.ZFS 8 "Aug 24, 2020" OpenZFS
.SH NAME .SH NAME
mount.zfs \- mount a ZFS filesystem mount.zfs \- mount a ZFS filesystem

View File

@ -1,4 +1,4 @@
.TH vdev_id 8 .TH VDEV_ID 8 "Aug 24, 2020" OpenZFS
.SH NAME .SH NAME
vdev_id \- generate user-friendly names for JBOD disks vdev_id \- generate user-friendly names for JBOD disks
.SH SYNOPSIS .SH SYNOPSIS

View File

@ -11,7 +11,7 @@
.\" "OPENSOLARIS.LICENSE" or at <http://opensource.org/licenses/CDDL-1.0>. .\" "OPENSOLARIS.LICENSE" or at <http://opensource.org/licenses/CDDL-1.0>.
.\" You may not use this file except in compliance with the license. .\" You may not use this file except in compliance with the license.
.\" .\"
.TH ZED 8 "Octember 1, 2013" "ZFS on Linux" "System Administration Commands" .TH ZED 8 "Aug 24, 2020" OpenZFS
.SH NAME .SH NAME
ZED \- ZFS Event Daemon ZED \- ZFS Event Daemon

View File

@ -22,7 +22,7 @@
.\" OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION .\" OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
.\" WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. .\" WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
.TH "ZFS\-MOUNT\-GENERATOR" "8" "2020-01-19" "ZFS" "zfs-mount-generator" "\"" .TH ZFS-MOUNT-GENERATOR 8 "Aug 24, 2020" OpenZFS
.SH "NAME" .SH "NAME"
zfs\-mount\-generator \- generates systemd mount units for ZFS zfs\-mount\-generator \- generates systemd mount units for ZFS

View File

@ -736,6 +736,42 @@ to mount zfs datasets. This option is provided for backwards compatibility with
.Xr mount 8 , .Xr mount 8 ,
.Xr net 8 , .Xr net 8 ,
.Xr selinux 8 , .Xr selinux 8 ,
.Xr zfs-allow 8 ,
.Xr zfs-bookmark 8 ,
.Xr zfs-change-key 8 ,
.Xr zfs-clone 8 ,
.Xr zfs-create 8 ,
.Xr zfs-destroy 8 ,
.Xr zfs-diff 8 ,
.Xr zfs-get 8 ,
.Xr zfs-groupspace 8 ,
.Xr zfs-hold 8 ,
.Xr zfs-inherit 8 ,
.Xr zfs-jail 8 ,
.Xr zfs-list 8 ,
.Xr zfs-load-key 8 ,
.Xr zfs-mount 8 ,
.Xr zfs-program 8 ,
.Xr zfs-project 8 ,
.Xr zfs-projectspace 8 ,
.Xr zfs-promote 8 ,
.Xr zfs-receive 8 ,
.Xr zfs-redact 8 ,
.Xr zfs-release 8 ,
.Xr zfs-rename 8 ,
.Xr zfs-rollback 8 ,
.Xr zfs-send 8 ,
.Xr zfs-set 8 ,
.Xr zfs-share 8 ,
.Xr zfs-snapshot 8 ,
.Xr zfs-unallow 8 ,
.Xr zfs-unjail 8 ,
.Xr zfs-unload-key 8 ,
.Xr zfs-unmount 8 ,
.Xr zfs-unshare 8 ,
.Xr zfs-upgrade 8 ,
.Xr zfs-userspace 8 ,
.Xr zfs-wait 8 ,
.Xr zfsconcepts 8 , .Xr zfsconcepts 8 ,
.Xr zfsprops 8 , .Xr zfsprops 8 ,
.Xr zpool 8 .Xr zpool 8

View File

@ -22,7 +22,7 @@
.\" .\"
.\" Copyright 2013 Darik Horn <dajhorn@vanadac.com>. All rights reserved. .\" Copyright 2013 Darik Horn <dajhorn@vanadac.com>. All rights reserved.
.\" .\"
.TH zinject 8 "2013 FEB 28" "ZFS on Linux" "System Administration Commands" .TH ZINJECT 8 "Aug 24, 2020" OpenZFS
.SH NAME .SH NAME
zinject \- ZFS Fault Injector zinject \- ZFS Fault Injector

View File

@ -65,9 +65,9 @@ When given an
.Ar interval , .Ar interval ,
the statistics are printed every the statistics are printed every
.Ar interval .Ar interval
seconds until ^C is pressed. If seconds until ^C is pressed. If
.Fl n .Fl n
flag is specified the headers are displayed only once, otherwise they are flag is specified the headers are displayed only once, otherwise they are
displayed periodically. If count is specified, the command exits displayed periodically. If count is specified, the command exits
after count reports are printed. The first report printed is always after count reports are printed. The first report printed is always
the statistics since boot regardless of whether the statistics since boot regardless of whether

View File

@ -559,10 +559,41 @@ is not set, it is assumed that the user is allowed to run
.Sh INTERFACE STABILITY .Sh INTERFACE STABILITY
.Sy Evolving .Sy Evolving
.Sh SEE ALSO .Sh SEE ALSO
.Xr zpoolconcepts 8 ,
.Xr zpoolprops 8 ,
.Xr zfs-events 5 , .Xr zfs-events 5 ,
.Xr zfs-module-parameters 5 , .Xr zfs-module-parameters 5 ,
.Xr zpool-features 5 , .Xr zpool-features 5 ,
.Xr zed 8 , .Xr zed 8 ,
.Xr zfs 8 .Xr zfs 8 ,
.Xr zpool-add 8 ,
.Xr zpool-attach 8 ,
.Xr zpool-checkpoint 8 ,
.Xr zpool-clear 8 ,
.Xr zpool-create 8 ,
.Xr zpool-destroy 8 ,
.Xr zpool-detach 8 ,
.Xr zpool-events 8 ,
.Xr zpool-export 8 ,
.Xr zpool-get 8 ,
.Xr zpool-history 8 ,
.Xr zpool-import 8 ,
.Xr zpool-initialize 8 ,
.Xr zpool-iostat 8 ,
.Xr zpool-labelclear 8 ,
.Xr zpool-list 8 ,
.Xr zpool-offline 8 ,
.Xr zpool-online 8 ,
.Xr zpool-reguid 8 ,
.Xr zpool-remove 8 ,
.Xr zpool-reopen 8 ,
.Xr zpool-replace 8 ,
.Xr zpool-resilver 8 ,
.Xr zpool-scrub 8 ,
.Xr zpool-set 8 ,
.Xr zpool-split 8 ,
.Xr zpool-status 8 ,
.Xr zpool-sync 8 ,
.Xr zpool-trim 8 ,
.Xr zpool-upgrade 8 ,
.Xr zpool-wait 8 ,
.Xr zpoolconcepts 8 ,
.Xr zpoolprops 8

View File

@ -3,7 +3,7 @@
.\" The contents of this file are subject to the terms of the Common Development and Distribution License (the "License"). You may not use this file except in compliance with the License. You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE or http://www.opensolaris.org/os/licensing. .\" The contents of this file are subject to the terms of the Common Development and Distribution License (the "License"). You may not use this file except in compliance with the License. You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE or http://www.opensolaris.org/os/licensing.
.\" See the License for the specific language governing permissions and limitations under the License. When distributing Covered Code, include this CDDL HEADER in each file and include the License file at usr/src/OPENSOLARIS.LICENSE. If applicable, add the following below this CDDL HEADER, with .\" See the License for the specific language governing permissions and limitations under the License. When distributing Covered Code, include this CDDL HEADER in each file and include the License file at usr/src/OPENSOLARIS.LICENSE. If applicable, add the following below this CDDL HEADER, with
.\" the fields enclosed by brackets "[]" replaced with your own identifying information: Portions Copyright [yyyy] [name of copyright owner] .\" the fields enclosed by brackets "[]" replaced with your own identifying information: Portions Copyright [yyyy] [name of copyright owner]
.TH zstreamdump 8 "29 Aug 2012" "ZFS pool 28, filesystem 5" "System Administration Commands" .TH ZSTREAMDUMP 8 "Aug 24, 2020" OpenZFS
.SH NAME .SH NAME
zstreamdump \- filter data in zfs send stream zstreamdump \- filter data in zfs send stream
.SH SYNOPSIS .SH SYNOPSIS

View File

@ -356,4 +356,4 @@ CFLAGS.zil.c= -Wno-cast-qual
CFLAGS.zio.c= -Wno-cast-qual CFLAGS.zio.c= -Wno-cast-qual
CFLAGS.zrlock.c= -Wno-cast-qual CFLAGS.zrlock.c= -Wno-cast-qual
CFLAGS.zfs_zstd.c= -Wno-cast-qual -Wno-pointer-arith CFLAGS.zfs_zstd.c= -Wno-cast-qual -Wno-pointer-arith
CFLAGS.zstd.c= -fno-tree-vectorize CFLAGS.zstd.c= -fno-tree-vectorize -U__BMI__

View File

@ -56,7 +56,7 @@
#define ENTRY(name) \ #define ENTRY(name) \
.align 2 ; \ .align 2 ; \
.type name,@function; \ .type name,@function; \
.globl name; \ .weak name; \
name: name:
#else /* PPC64_ELF_ABI_v1 */ #else /* PPC64_ELF_ABI_v1 */
@ -65,8 +65,8 @@ name:
#define GLUE(a,b) XGLUE(a,b) #define GLUE(a,b) XGLUE(a,b)
#define ENTRY(name) \ #define ENTRY(name) \
.align 2 ; \ .align 2 ; \
.globl name; \ .weak name; \
.globl GLUE(.,name); \ .weak GLUE(.,name); \
.pushsection ".opd","aw"; \ .pushsection ".opd","aw"; \
name: \ name: \
.quad GLUE(.,name); \ .quad GLUE(.,name); \
@ -83,8 +83,8 @@ GLUE(.,name):
#define ENTRY(name) \ #define ENTRY(name) \
.text; \ .text; \
.p2align 4; \ .p2align 4; \
.globl name; \ .weak name; \
.type name,@function; \ .type name,@function; \
name: name:
#endif /* __powerpc64__ */ #endif /* __powerpc64__ */

View File

@ -23,8 +23,6 @@
* Use is subject to license terms. * Use is subject to license terms.
*/ */
#pragma ident "%Z%%M% %I% %E% SMI"
/* /*
* Generic doubly-linked list implementation * Generic doubly-linked list implementation
*/ */

View File

@ -31,6 +31,7 @@
* Copyright 2015, OmniTI Computer Consulting, Inc. All rights reserved. * Copyright 2015, OmniTI Computer Consulting, Inc. All rights reserved.
* Copyright (c) 2018 George Melikov. All Rights Reserved. * Copyright (c) 2018 George Melikov. All Rights Reserved.
* Copyright (c) 2019 Datto, Inc. All rights reserved. * Copyright (c) 2019 Datto, Inc. All rights reserved.
* Copyright (c) 2020 The MathWorks, Inc. All rights reserved.
*/ */
/* /*
@ -977,6 +978,22 @@ zfsctl_snapdir_mkdir(struct inode *dip, char *dirname, vattr_t *vap,
return (error); return (error);
} }
/*
* Flush everything out of the kernel's export table and such.
* This is needed as once the snapshot is used over NFS, its
* entries in svc_export and svc_expkey caches hold reference
* to the snapshot mount point. There is no known way of flushing
* only the entries related to the snapshot.
*/
static void
exportfs_flush(void)
{
char *argv[] = { "/usr/sbin/exportfs", "-f", NULL };
char *envp[] = { NULL };
(void) call_usermodehelper(argv[0], argv, envp, UMH_WAIT_PROC);
}
/* /*
* Attempt to unmount a snapshot by making a call to user space. * Attempt to unmount a snapshot by making a call to user space.
* There is no assurance that this can or will succeed, is just a * There is no assurance that this can or will succeed, is just a
@ -999,6 +1016,8 @@ zfsctl_snapshot_unmount(char *snapname, int flags)
} }
rw_exit(&zfs_snapshot_lock); rw_exit(&zfs_snapshot_lock);
exportfs_flush();
if (flags & MNT_FORCE) if (flags & MNT_FORCE)
argv[4] = "-fn"; argv[4] = "-fn";
argv[5] = se->se_path; argv[5] = se->se_path;

View File

@ -187,7 +187,7 @@ static const fletcher_4_ops_t *fletcher_4_impls[] = {
#if defined(__x86_64) && defined(HAVE_AVX512BW) #if defined(__x86_64) && defined(HAVE_AVX512BW)
&fletcher_4_avx512bw_ops, &fletcher_4_avx512bw_ops,
#endif #endif
#if defined(__aarch64__) #if defined(__aarch64__) && !defined(__FreeBSD__)
&fletcher_4_aarch64_neon_ops, &fletcher_4_aarch64_neon_ops,
#endif #endif
}; };

View File

@ -823,6 +823,7 @@ unsigned long l2arc_feed_min_ms = L2ARC_FEED_MIN_MS; /* min interval msecs */
int l2arc_noprefetch = B_TRUE; /* don't cache prefetch bufs */ int l2arc_noprefetch = B_TRUE; /* don't cache prefetch bufs */
int l2arc_feed_again = B_TRUE; /* turbo warmup */ int l2arc_feed_again = B_TRUE; /* turbo warmup */
int l2arc_norw = B_FALSE; /* no reads during writes */ int l2arc_norw = B_FALSE; /* no reads during writes */
int l2arc_meta_percent = 33; /* limit on headers size */
/* /*
* L2ARC Internals * L2ARC Internals
@ -4988,9 +4989,6 @@ arc_adapt(int bytes, arc_state_t *state)
int64_t mrug_size = zfs_refcount_count(&arc_mru_ghost->arcs_size); int64_t mrug_size = zfs_refcount_count(&arc_mru_ghost->arcs_size);
int64_t mfug_size = zfs_refcount_count(&arc_mfu_ghost->arcs_size); int64_t mfug_size = zfs_refcount_count(&arc_mfu_ghost->arcs_size);
if (state == arc_l2c_only)
return;
ASSERT(bytes > 0); ASSERT(bytes > 0);
/* /*
* Adapt the target size of the MRU list: * Adapt the target size of the MRU list:
@ -9144,6 +9142,15 @@ l2arc_write_buffers(spa_t *spa, l2arc_dev_t *dev, uint64_t target_sz)
return (write_asize); return (write_asize);
} }
static boolean_t
l2arc_hdr_limit_reached(void)
{
int64_t s = aggsum_upper_bound(&astat_l2_hdr_size);
return (arc_reclaim_needed() || (s > arc_meta_limit * 3 / 4) ||
(s > (arc_warm ? arc_c : arc_c_max) * l2arc_meta_percent / 100));
}
/* /*
* This thread feeds the L2ARC at regular intervals. This is the beating * This thread feeds the L2ARC at regular intervals. This is the beating
* heart of the L2ARC. * heart of the L2ARC.
@ -9211,7 +9218,7 @@ l2arc_feed_thread(void *unused)
/* /*
* Avoid contributing to memory pressure. * Avoid contributing to memory pressure.
*/ */
if (arc_reclaim_needed()) { if (l2arc_hdr_limit_reached()) {
ARCSTAT_BUMP(arcstat_l2_abort_lowmem); ARCSTAT_BUMP(arcstat_l2_abort_lowmem);
spa_config_exit(spa, SCL_L2ARC, dev); spa_config_exit(spa, SCL_L2ARC, dev);
continue; continue;
@ -9662,7 +9669,7 @@ l2arc_rebuild(l2arc_dev_t *dev)
* online the L2ARC dev at a later time (or re-import the pool) * online the L2ARC dev at a later time (or re-import the pool)
* to reconstruct it (when there's less memory pressure). * to reconstruct it (when there's less memory pressure).
*/ */
if (arc_reclaim_needed()) { if (l2arc_hdr_limit_reached()) {
ARCSTAT_BUMP(arcstat_l2_rebuild_abort_lowmem); ARCSTAT_BUMP(arcstat_l2_rebuild_abort_lowmem);
cmn_err(CE_NOTE, "System running low on memory, " cmn_err(CE_NOTE, "System running low on memory, "
"aborting L2ARC rebuild."); "aborting L2ARC rebuild.");
@ -10002,6 +10009,13 @@ l2arc_log_blk_restore(l2arc_dev_t *dev, const l2arc_log_blk_phys_t *lb,
uint64_t size = 0, asize = 0; uint64_t size = 0, asize = 0;
uint64_t log_entries = dev->l2ad_log_entries; uint64_t log_entries = dev->l2ad_log_entries;
/*
* Usually arc_adapt() is called only for data, not headers, but
* since we may allocate significant amount of memory here, let ARC
* grow its arc_c.
*/
arc_adapt(log_entries * HDR_L2ONLY_SIZE, arc_l2c_only);
for (int i = log_entries - 1; i >= 0; i--) { for (int i = log_entries - 1; i >= 0; i--) {
/* /*
* Restore goes in the reverse temporal direction to preserve * Restore goes in the reverse temporal direction to preserve
@ -10538,6 +10552,9 @@ ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, feed_again, INT, ZMOD_RW,
ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, norw, INT, ZMOD_RW, ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, norw, INT, ZMOD_RW,
"No reads during writes"); "No reads during writes");
ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, meta_percent, INT, ZMOD_RW,
"Percent of ARC size allowed for L2ARC-only headers");
ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, rebuild_enabled, INT, ZMOD_RW, ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, rebuild_enabled, INT, ZMOD_RW,
"Rebuild the L2ARC when importing a pool"); "Rebuild the L2ARC when importing a pool");

View File

@ -2458,6 +2458,7 @@ EXPORT_SYMBOL(dmu_object_set_blocksize);
EXPORT_SYMBOL(dmu_object_set_maxblkid); EXPORT_SYMBOL(dmu_object_set_maxblkid);
EXPORT_SYMBOL(dmu_object_set_checksum); EXPORT_SYMBOL(dmu_object_set_checksum);
EXPORT_SYMBOL(dmu_object_set_compress); EXPORT_SYMBOL(dmu_object_set_compress);
EXPORT_SYMBOL(dmu_offset_next);
EXPORT_SYMBOL(dmu_write_policy); EXPORT_SYMBOL(dmu_write_policy);
EXPORT_SYMBOL(dmu_sync); EXPORT_SYMBOL(dmu_sync);
EXPORT_SYMBOL(dmu_request_arcbuf); EXPORT_SYMBOL(dmu_request_arcbuf);

View File

@ -23,6 +23,7 @@
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2018 by Delphix. All rights reserved. * Copyright (c) 2012, 2018 by Delphix. All rights reserved.
* Copyright (c) 2014 Spectra Logic Corporation, All rights reserved. * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
* Copyright 2020 Oxide Computer Company
*/ */
#include <sys/zfs_context.h> #include <sys/zfs_context.h>
@ -762,13 +763,22 @@ dnode_sync(dnode_t *dn, dmu_tx_t *tx)
dsfra.dsfra_dnode = dn; dsfra.dsfra_dnode = dn;
dsfra.dsfra_tx = tx; dsfra.dsfra_tx = tx;
dsfra.dsfra_free_indirects = freeing_dnode; dsfra.dsfra_free_indirects = freeing_dnode;
mutex_enter(&dn->dn_mtx);
if (freeing_dnode) { if (freeing_dnode) {
ASSERT(range_tree_contains(dn->dn_free_ranges[txgoff], ASSERT(range_tree_contains(dn->dn_free_ranges[txgoff],
0, dn->dn_maxblkid + 1)); 0, dn->dn_maxblkid + 1));
} }
mutex_enter(&dn->dn_mtx); /*
range_tree_vacate(dn->dn_free_ranges[txgoff], * Because dnode_sync_free_range() must drop dn_mtx during its
* processing, using it as a callback to range_tree_vacate() is
* not safe. No other operations (besides destroy) are allowed
* once range_tree_vacate() has begun, and dropping dn_mtx
* would leave a window open for another thread to observe that
* invalid (and unsafe) state.
*/
range_tree_walk(dn->dn_free_ranges[txgoff],
dnode_sync_free_range, &dsfra); dnode_sync_free_range, &dsfra);
range_tree_vacate(dn->dn_free_ranges[txgoff], NULL, NULL);
range_tree_destroy(dn->dn_free_ranges[txgoff]); range_tree_destroy(dn->dn_free_ranges[txgoff]);
dn->dn_free_ranges[txgoff] = NULL; dn->dn_free_ranges[txgoff] = NULL;
mutex_exit(&dn->dn_mtx); mutex_exit(&dn->dn_mtx);

View File

@ -121,13 +121,6 @@
* and updated by dsl_fs_ss_count_adjust(). A new limit value is setup in * and updated by dsl_fs_ss_count_adjust(). A new limit value is setup in
* dsl_dir_activate_fs_ss_limit() and the counts are adjusted, if necessary, by * dsl_dir_activate_fs_ss_limit() and the counts are adjusted, if necessary, by
* dsl_dir_init_fs_ss_count(). * dsl_dir_init_fs_ss_count().
*
* There is a special case when we receive a filesystem that already exists. In
* this case a temporary clone name of %X is created (see dmu_recv_begin). We
* never update the filesystem counts for temporary clones.
*
* Likewise, we do not update the snapshot counts for temporary snapshots,
* such as those created by zfs diff.
*/ */
extern inline dsl_dir_phys_t *dsl_dir_phys(dsl_dir_t *dd); extern inline dsl_dir_phys_t *dsl_dir_phys(dsl_dir_t *dd);
@ -593,11 +586,9 @@ dsl_dir_init_fs_ss_count(dsl_dir_t *dd, dmu_tx_t *tx)
&chld_dd)); &chld_dd));
/* /*
* Ignore hidden ($FREE, $MOS & $ORIGIN) objsets and * Ignore hidden ($FREE, $MOS & $ORIGIN) objsets.
* temporary datasets.
*/ */
if (chld_dd->dd_myname[0] == '$' || if (chld_dd->dd_myname[0] == '$') {
chld_dd->dd_myname[0] == '%') {
dsl_dir_rele(chld_dd, FTAG); dsl_dir_rele(chld_dd, FTAG);
continue; continue;
} }
@ -910,14 +901,12 @@ dsl_fs_ss_count_adjust(dsl_dir_t *dd, int64_t delta, const char *prop,
strcmp(prop, DD_FIELD_SNAPSHOT_COUNT) == 0); strcmp(prop, DD_FIELD_SNAPSHOT_COUNT) == 0);
/* /*
* When we receive an incremental stream into a filesystem that already * We don't do accounting for hidden ($FREE, $MOS & $ORIGIN) objsets.
* exists, a temporary clone is created. We don't count this temporary
* clone, whose name begins with a '%'. We also ignore hidden ($FREE,
* $MOS & $ORIGIN) objsets.
*/ */
if ((dd->dd_myname[0] == '%' || dd->dd_myname[0] == '$') && if (dd->dd_myname[0] == '$' && strcmp(prop,
strcmp(prop, DD_FIELD_FILESYSTEM_COUNT) == 0) DD_FIELD_FILESYSTEM_COUNT) == 0) {
return; return;
}
/* /*
* e.g. if renaming a dataset with no snapshots, count adjustment is 0 * e.g. if renaming a dataset with no snapshots, count adjustment is 0

View File

@ -39,7 +39,6 @@
#include <sys/sa.h> #include <sys/sa.h>
#include <sys/sunddi.h> #include <sys/sunddi.h>
#include <sys/sa_impl.h> #include <sys/sa_impl.h>
#include <sys/dnode.h>
#include <sys/errno.h> #include <sys/errno.h>
#include <sys/zfs_context.h> #include <sys/zfs_context.h>

View File

@ -3201,7 +3201,8 @@ spa_verify_host(spa_t *spa, nvlist_t *mos_config)
cmn_err(CE_WARN, "pool '%s' could not be " cmn_err(CE_WARN, "pool '%s' could not be "
"loaded as it was last accessed by " "loaded as it was last accessed by "
"another system (host: %s hostid: 0x%llx). " "another system (host: %s hostid: 0x%llx). "
"See: http://illumos.org/msg/ZFS-8000-EY", "See: https://openzfs.github.io/openzfs-docs/msg/"
"ZFS-8000-EY",
spa_name(spa), hostname, (u_longlong_t)hostid); spa_name(spa), hostname, (u_longlong_t)hostid);
spa_load_failed(spa, "hostid verification failed: pool " spa_load_failed(spa, "hostid verification failed: pool "
"last accessed by host: %s (hostid: 0x%llx)", "last accessed by host: %s (hostid: 0x%llx)",

View File

@ -61,7 +61,7 @@ const raidz_impl_ops_t *raidz_all_maths[] = {
#if defined(__x86_64) && defined(HAVE_AVX512BW) /* only x86_64 for now */ #if defined(__x86_64) && defined(HAVE_AVX512BW) /* only x86_64 for now */
&vdev_raidz_avx512bw_impl, &vdev_raidz_avx512bw_impl,
#endif #endif
#if defined(__aarch64__) #if defined(__aarch64__) && !defined(__FreeBSD__)
&vdev_raidz_aarch64_neon_impl, &vdev_raidz_aarch64_neon_impl,
&vdev_raidz_aarch64_neonx2_impl, &vdev_raidz_aarch64_neonx2_impl,
#endif #endif

View File

@ -20,6 +20,9 @@ ccflags-y += -O3
# Set it for other compilers, too. # Set it for other compilers, too.
$(obj)/lib/zstd.o: c_flags += -fno-tree-vectorize $(obj)/lib/zstd.o: c_flags += -fno-tree-vectorize
# SSE register return with SSE disabled if -march=znverX is passed
$(obj)/lib/zstd.o: c_flags += -U__BMI__
# Quiet warnings about frame size due to unused code in unmodified zstd lib # Quiet warnings about frame size due to unused code in unmodified zstd lib
$(obj)/lib/zstd.o: c_flags += -Wframe-larger-than=20480 $(obj)/lib/zstd.o: c_flags += -Wframe-larger-than=20480

View File

@ -216,7 +216,7 @@ tests = ['zfs_receive_001_pos', 'zfs_receive_002_pos', 'zfs_receive_003_pos',
'zfs_receive_016_pos', 'receive-o-x_props_override', 'zfs_receive_016_pos', 'receive-o-x_props_override',
'zfs_receive_from_encrypted', 'zfs_receive_to_encrypted', 'zfs_receive_from_encrypted', 'zfs_receive_to_encrypted',
'zfs_receive_raw', 'zfs_receive_raw_incremental', 'zfs_receive_-e', 'zfs_receive_raw', 'zfs_receive_raw_incremental', 'zfs_receive_-e',
'zfs_receive_raw_-d', 'zfs_receive_from_zstd'] 'zfs_receive_raw_-d', 'zfs_receive_from_zstd', 'zfs_receive_new_props']
tags = ['functional', 'cli_root', 'zfs_receive'] tags = ['functional', 'cli_root', 'zfs_receive']
[tests/functional/cli_root/zfs_rename] [tests/functional/cli_root/zfs_rename]

View File

@ -21,6 +21,7 @@ dist_pkgdata_SCRIPTS = \
receive-o-x_props_override.ksh \ receive-o-x_props_override.ksh \
zfs_receive_from_encrypted.ksh \ zfs_receive_from_encrypted.ksh \
zfs_receive_from_zstd.ksh \ zfs_receive_from_zstd.ksh \
zfs_receive_new_props.ksh \
zfs_receive_to_encrypted.ksh \ zfs_receive_to_encrypted.ksh \
zfs_receive_raw.ksh \ zfs_receive_raw.ksh \
zfs_receive_raw_incremental.ksh \ zfs_receive_raw_incremental.ksh \

View File

@ -0,0 +1,77 @@
#!/bin/ksh -p
#
# CDDL HEADER START
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
# CDDL HEADER END
#
#
# Copyright (c) 2020 by Delphix. All rights reserved.
#
. $STF_SUITE/include/libtest.shlib
#
# DESCRIPTION:
# ZFS receive test to handle Issue #10698
#
# STRATEGY:
# 1. Create a pool with filesystem_limits disabled
# 2. Create a filesystem on that pool
# 3. Enable filesystem limits on that pool
# 4. On a pool with filesystem limits enabled, create a filesystem and set a
# limit
# 5. Snapshot limited filesystem
# 6. send -R limited filesystem and receive over filesystem with limits disabled
#
verify_runnable "both"
function cleanup
{
destroy_pool "$poolname"
destroy_pool "$rpoolname"
log_must rm -f "$vdevfile"
log_must rm -f "$rvdevfile"
log_must rm -f "$streamfile"
}
log_onexit cleanup
log_assert "ZFS should handle receiving streams with filesystem limits on \
pools where the feature was recently enabled"
poolname=sendpool
rpoolname=recvpool
vdevfile="$TEST_BASE_DIR/vdevfile.$$"
rvdevfile="$TEST_BASE_DIR/rvdevfile.$$"
sendfs="$poolname/fs"
recvfs="$rpoolname/rfs"
streamfile="$TEST_BASE_DIR/streamfile.$$"
log_must truncate -s $MINVDEVSIZE "$rvdevfile"
log_must truncate -s $MINVDEVSIZE "$vdevfile"
log_must zpool create -O mountpoint=none -o feature@filesystem_limits=disabled \
"$rpoolname" "$rvdevfile"
log_must zpool create -O mountpoint=none "$poolname" "$vdevfile"
log_must zfs create "$recvfs"
log_must zpool set feature@filesystem_limits=enabled "$rpoolname"
log_must zfs create -o filesystem_limit=100 "$sendfs"
log_must zfs snapshot "$sendfs@a"
log_must zfs send -R "$sendfs@a" >"$streamfile"
log_must eval "zfs recv -svuF $recvfs <$streamfile"
log_pass "ZFS can handle receiving streams with filesystem limits on \
pools where the feature was recently enabled"