Notable upstream pull request merges:

  #11680 Add support for zpool user properties
  #14145 Storage device expansion "silently" fails on degraded vdev
  #14405 Create zap for root vdev
  #14659 Allow MMP to bypass waiting for other threads
  #14674 Miscellaneous FreBSD compilation bugfixes
  #14692 Fix some signedness issues in arc_evict()
  #14702 Fix typo in check_clones()
  #14715 module: small fixes for FreeBSD/aarch64
  #14716 Trim needless zeroes from checksum events
  #14719 vdev: expose zfs_vdev_max_ms_shift as a module parameter
  #14722 Fix "Detach spare vdev in case if resilvering does not happen"
  #14723 freebsd clone range fixes
  #14728 Fix BLAKE3 aarch64 assembly for FreeBSD and macOS
  #14735 Fix in check_filesystem()
  #14739 Fix data corruption when cloning embedded blocks
  #14758 Fix VERIFY(!zil_replaying(zilog, tx)) panic
  #14761 Revert "ZFS_IOC_COUNT_FILLED does unnecessary txg_wait_synced()"
  #14774 FreeBSD .zfs fixups
  #14776 FreeBSD: make zfs_vfs_held() definition consistent with declaration
  #14779 powerpc64: Support ELFv2 asm on Big Endian
  #14788 FreeBSD: add missing vop_fplookup assignments
  #14789 PAM: support the authentication facility
  #14790 Revert "Fix data race between zil_commit() and zil_suspend()"
  #14795 Fix positive ABD size assertion in abd_verify()
  #14798 Mark TX_COMMIT transaction with TXG_NOTHROTTLE
  #14804 Correct ABD size for split block ZIOs
  #14806 Use correct block pointer in block cloning case.
  #14808 blake3: fix up bogus checksums in face of cpu migration

Obtained from:	OpenZFS
OpenZFS commit:	d96e29576c
This commit is contained in:
Martin Matuska 2023-05-03 12:04:55 +02:00
commit d411c1d696
130 changed files with 2037 additions and 643 deletions

View File

@ -318,7 +318,7 @@ be_promote_dependent_clones(zfs_handle_t *zfs_hdl, struct be_destroy_data *bdd)
struct promote_entry *entry; struct promote_entry *entry;
snprintf(bdd->target_name, BE_MAXPATHLEN, "%s/", zfs_get_name(zfs_hdl)); snprintf(bdd->target_name, BE_MAXPATHLEN, "%s/", zfs_get_name(zfs_hdl));
err = zfs_iter_dependents(zfs_hdl, 0, true, be_dependent_clone_cb, bdd); err = zfs_iter_dependents(zfs_hdl, true, be_dependent_clone_cb, bdd);
/* /*
* Drain the list and walk away from it if we're only deleting a * Drain the list and walk away from it if we're only deleting a
@ -360,13 +360,13 @@ be_destroy_cb(zfs_handle_t *zfs_hdl, void *data)
bdd = (struct be_destroy_data *)data; bdd = (struct be_destroy_data *)data;
if (bdd->snapname == NULL) { if (bdd->snapname == NULL) {
err = zfs_iter_children(zfs_hdl, 0, be_destroy_cb, data); err = zfs_iter_children(zfs_hdl, be_destroy_cb, data);
if (err != 0) if (err != 0)
return (err); return (err);
return (zfs_destroy(zfs_hdl, false)); return (zfs_destroy(zfs_hdl, false));
} }
/* If we're dealing with snapshots instead, delete that one alone */ /* If we're dealing with snapshots instead, delete that one alone */
err = zfs_iter_filesystems(zfs_hdl, 0, be_destroy_cb, data); err = zfs_iter_filesystems(zfs_hdl, be_destroy_cb, data);
if (err != 0) if (err != 0)
return (err); return (err);
/* /*
@ -777,7 +777,7 @@ be_clone_cb(zfs_handle_t *ds, void *data)
if (ldc->depth_limit == -1 || ldc->depth < ldc->depth_limit) { if (ldc->depth_limit == -1 || ldc->depth < ldc->depth_limit) {
ldc->depth++; ldc->depth++;
err = zfs_iter_filesystems(ds, 0, be_clone_cb, ldc); err = zfs_iter_filesystems(ds, be_clone_cb, ldc);
ldc->depth--; ldc->depth--;
} }

View File

@ -141,7 +141,7 @@ be_mount_iter(zfs_handle_t *zfs_hdl, void *data)
skipmount: skipmount:
++info->depth; ++info->depth;
err = zfs_iter_filesystems(zfs_hdl, 0, be_mount_iter, info); err = zfs_iter_filesystems(zfs_hdl, be_mount_iter, info);
--info->depth; --info->depth;
return (err); return (err);
} }
@ -158,7 +158,7 @@ be_umount_iter(zfs_handle_t *zfs_hdl, void *data)
info = (struct be_mount_info *)data; info = (struct be_mount_info *)data;
++info->depth; ++info->depth;
if((err = zfs_iter_filesystems(zfs_hdl, 0, be_umount_iter, info)) != 0) { if((err = zfs_iter_filesystems(zfs_hdl, be_umount_iter, info)) != 0) {
return (err); return (err);
} }
--info->depth; --info->depth;
@ -205,7 +205,7 @@ be_mounted_at(libbe_handle_t *lbh, const char *path, nvlist_t *details)
info.path = path; info.path = path;
info.name = NULL; info.name = NULL;
zfs_iter_filesystems(root_hdl, 0, be_mountcheck_cb, &info); zfs_iter_filesystems(root_hdl, be_mountcheck_cb, &info);
zfs_close(root_hdl); zfs_close(root_hdl);
if (info.name != NULL) { if (info.name != NULL) {

View File

@ -258,7 +258,7 @@ be_proplist_update(prop_data_t *data)
&data->bootonce); &data->bootonce);
/* XXX TODO: some error checking here */ /* XXX TODO: some error checking here */
zfs_iter_filesystems(root_hdl, 0, prop_list_builder_cb, data); zfs_iter_filesystems(root_hdl, prop_list_builder_cb, data);
zfs_close(root_hdl); zfs_close(root_hdl);
@ -269,7 +269,7 @@ static int
snapshot_proplist_update(zfs_handle_t *hdl, prop_data_t *data) snapshot_proplist_update(zfs_handle_t *hdl, prop_data_t *data)
{ {
return (zfs_iter_snapshots_sorted(hdl, 0, prop_list_builder_cb, data, return (zfs_iter_snapshots_sorted(hdl, prop_list_builder_cb, data,
0, 0)); 0, 0));
} }

View File

@ -6,5 +6,5 @@ Release: 1
Release-Tags: relext Release-Tags: relext
License: CDDL License: CDDL
Author: OpenZFS Author: OpenZFS
Linux-Maximum: 6.1 Linux-Maximum: 6.2
Linux-Minimum: 3.10 Linux-Minimum: 3.10

View File

@ -6812,12 +6812,15 @@ dump_block_stats(spa_t *spa)
if (dump_opt['b'] >= 2) { if (dump_opt['b'] >= 2) {
int l, t, level; int l, t, level;
char csize[32], lsize[32], psize[32], asize[32];
char avg[32], gang[32];
(void) printf("\nBlocks\tLSIZE\tPSIZE\tASIZE" (void) printf("\nBlocks\tLSIZE\tPSIZE\tASIZE"
"\t avg\t comp\t%%Total\tType\n"); "\t avg\t comp\t%%Total\tType\n");
zfs_blkstat_t *mdstats = umem_zalloc(sizeof (zfs_blkstat_t),
UMEM_NOFAIL);
for (t = 0; t <= ZDB_OT_TOTAL; t++) { for (t = 0; t <= ZDB_OT_TOTAL; t++) {
char csize[32], lsize[32], psize[32], asize[32];
char avg[32], gang[32];
const char *typename; const char *typename;
/* make sure nicenum has enough space */ /* make sure nicenum has enough space */
@ -6860,6 +6863,15 @@ dump_block_stats(spa_t *spa)
if (zb->zb_asize == 0) if (zb->zb_asize == 0)
continue; continue;
if (level != ZB_TOTAL && t < DMU_OT_NUMTYPES &&
(level > 0 || DMU_OT_IS_METADATA(t))) {
mdstats->zb_count += zb->zb_count;
mdstats->zb_lsize += zb->zb_lsize;
mdstats->zb_psize += zb->zb_psize;
mdstats->zb_asize += zb->zb_asize;
mdstats->zb_gangs += zb->zb_gangs;
}
if (dump_opt['b'] < 3 && level != ZB_TOTAL) if (dump_opt['b'] < 3 && level != ZB_TOTAL)
continue; continue;
@ -6905,11 +6917,31 @@ dump_block_stats(spa_t *spa)
} }
} }
} }
zdb_nicenum(mdstats->zb_count, csize,
sizeof (csize));
zdb_nicenum(mdstats->zb_lsize, lsize,
sizeof (lsize));
zdb_nicenum(mdstats->zb_psize, psize,
sizeof (psize));
zdb_nicenum(mdstats->zb_asize, asize,
sizeof (asize));
zdb_nicenum(mdstats->zb_asize / mdstats->zb_count, avg,
sizeof (avg));
zdb_nicenum(mdstats->zb_gangs, gang, sizeof (gang));
(void) printf("%6s\t%5s\t%5s\t%5s\t%5s"
"\t%5.2f\t%6.2f\t",
csize, lsize, psize, asize, avg,
(double)mdstats->zb_lsize / mdstats->zb_psize,
100.0 * mdstats->zb_asize / tzb->zb_asize);
(void) printf("%s\n", "Metadata Total");
/* Output a table summarizing block sizes in the pool */ /* Output a table summarizing block sizes in the pool */
if (dump_opt['b'] >= 2) { if (dump_opt['b'] >= 2) {
dump_size_histograms(zcb); dump_size_histograms(zcb);
} }
umem_free(mdstats, sizeof (zfs_blkstat_t));
} }
(void) printf("\n"); (void) printf("\n");
@ -7630,6 +7662,9 @@ mos_leak_vdev(vdev_t *vd)
mos_obj_refd(space_map_object(ms->ms_sm)); mos_obj_refd(space_map_object(ms->ms_sm));
} }
if (vd->vdev_root_zap != 0)
mos_obj_refd(vd->vdev_root_zap);
if (vd->vdev_top_zap != 0) { if (vd->vdev_top_zap != 0) {
mos_obj_refd(vd->vdev_top_zap); mos_obj_refd(vd->vdev_top_zap);
mos_leak_vdev_top_zap(vd); mos_leak_vdev_top_zap(vd);

View File

@ -143,19 +143,19 @@ zfs_callback(zfs_handle_t *zhp, void *data)
(cb->cb_types & (cb->cb_types &
(ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME))) && (ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME))) &&
zfs_get_type(zhp) == ZFS_TYPE_FILESYSTEM) { zfs_get_type(zhp) == ZFS_TYPE_FILESYSTEM) {
(void) zfs_iter_filesystems(zhp, cb->cb_flags, (void) zfs_iter_filesystems_v2(zhp, cb->cb_flags,
zfs_callback, data); zfs_callback, data);
} }
if (((zfs_get_type(zhp) & (ZFS_TYPE_SNAPSHOT | if (((zfs_get_type(zhp) & (ZFS_TYPE_SNAPSHOT |
ZFS_TYPE_BOOKMARK)) == 0) && include_snaps) { ZFS_TYPE_BOOKMARK)) == 0) && include_snaps) {
(void) zfs_iter_snapshots(zhp, cb->cb_flags, (void) zfs_iter_snapshots_v2(zhp, cb->cb_flags,
zfs_callback, data, 0, 0); zfs_callback, data, 0, 0);
} }
if (((zfs_get_type(zhp) & (ZFS_TYPE_SNAPSHOT | if (((zfs_get_type(zhp) & (ZFS_TYPE_SNAPSHOT |
ZFS_TYPE_BOOKMARK)) == 0) && include_bmarks) { ZFS_TYPE_BOOKMARK)) == 0) && include_bmarks) {
(void) zfs_iter_bookmarks(zhp, cb->cb_flags, (void) zfs_iter_bookmarks_v2(zhp, cb->cb_flags,
zfs_callback, data); zfs_callback, data);
} }

View File

@ -1532,7 +1532,8 @@ destroy_print_snapshots(zfs_handle_t *fs_zhp, destroy_cbdata_t *cb)
int err; int err;
assert(cb->cb_firstsnap == NULL); assert(cb->cb_firstsnap == NULL);
assert(cb->cb_prevsnap == NULL); assert(cb->cb_prevsnap == NULL);
err = zfs_iter_snapshots_sorted(fs_zhp, 0, destroy_print_cb, cb, 0, 0); err = zfs_iter_snapshots_sorted_v2(fs_zhp, 0, destroy_print_cb, cb, 0,
0);
if (cb->cb_firstsnap != NULL) { if (cb->cb_firstsnap != NULL) {
uint64_t used = 0; uint64_t used = 0;
if (err == 0) { if (err == 0) {
@ -1558,7 +1559,7 @@ snapshot_to_nvl_cb(zfs_handle_t *zhp, void *arg)
if (!cb->cb_doclones && !cb->cb_defer_destroy) { if (!cb->cb_doclones && !cb->cb_defer_destroy) {
cb->cb_target = zhp; cb->cb_target = zhp;
cb->cb_first = B_TRUE; cb->cb_first = B_TRUE;
err = zfs_iter_dependents(zhp, 0, B_TRUE, err = zfs_iter_dependents_v2(zhp, 0, B_TRUE,
destroy_check_dependent, cb); destroy_check_dependent, cb);
} }
@ -1576,7 +1577,7 @@ gather_snapshots(zfs_handle_t *zhp, void *arg)
destroy_cbdata_t *cb = arg; destroy_cbdata_t *cb = arg;
int err = 0; int err = 0;
err = zfs_iter_snapspec(zhp, 0, cb->cb_snapspec, err = zfs_iter_snapspec_v2(zhp, 0, cb->cb_snapspec,
snapshot_to_nvl_cb, cb); snapshot_to_nvl_cb, cb);
if (err == ENOENT) if (err == ENOENT)
err = 0; err = 0;
@ -1590,7 +1591,7 @@ gather_snapshots(zfs_handle_t *zhp, void *arg)
} }
if (cb->cb_recurse) if (cb->cb_recurse)
err = zfs_iter_filesystems(zhp, 0, gather_snapshots, cb); err = zfs_iter_filesystems_v2(zhp, 0, gather_snapshots, cb);
out: out:
zfs_close(zhp); zfs_close(zhp);
@ -1615,7 +1616,7 @@ destroy_clones(destroy_cbdata_t *cb)
* false while destroying the clones. * false while destroying the clones.
*/ */
cb->cb_defer_destroy = B_FALSE; cb->cb_defer_destroy = B_FALSE;
err = zfs_iter_dependents(zhp, 0, B_FALSE, err = zfs_iter_dependents_v2(zhp, 0, B_FALSE,
destroy_callback, cb); destroy_callback, cb);
cb->cb_defer_destroy = defer; cb->cb_defer_destroy = defer;
zfs_close(zhp); zfs_close(zhp);
@ -1825,9 +1826,8 @@ zfs_do_destroy(int argc, char **argv)
* Check for any dependents and/or clones. * Check for any dependents and/or clones.
*/ */
cb.cb_first = B_TRUE; cb.cb_first = B_TRUE;
if (!cb.cb_doclones && if (!cb.cb_doclones && zfs_iter_dependents_v2(zhp, 0, B_TRUE,
zfs_iter_dependents(zhp, 0, B_TRUE, destroy_check_dependent, destroy_check_dependent, &cb) != 0) {
&cb) != 0) {
rv = 1; rv = 1;
goto out; goto out;
} }
@ -1837,7 +1837,7 @@ zfs_do_destroy(int argc, char **argv)
goto out; goto out;
} }
cb.cb_batchedsnaps = fnvlist_alloc(); cb.cb_batchedsnaps = fnvlist_alloc();
if (zfs_iter_dependents(zhp, 0, B_FALSE, destroy_callback, if (zfs_iter_dependents_v2(zhp, 0, B_FALSE, destroy_callback,
&cb) != 0) { &cb) != 0) {
rv = 1; rv = 1;
goto out; goto out;
@ -3556,8 +3556,21 @@ print_dataset(zfs_handle_t *zhp, list_cbdata_t *cb)
right_justify = B_FALSE; right_justify = B_FALSE;
} }
if (pl->pl_prop == ZFS_PROP_AVAILABLE) /*
color_start(zfs_list_avail_color(zhp)); * zfs_list_avail_color() needs ZFS_PROP_AVAILABLE + USED
* - so we need another for() search for the USED part
* - when no colors wanted, we can skip the whole thing
*/
if (use_color() && pl->pl_prop == ZFS_PROP_AVAILABLE) {
zprop_list_t *pl2 = cb->cb_proplist;
for (; pl2 != NULL; pl2 = pl2->pl_next) {
if (pl2->pl_prop == ZFS_PROP_USED) {
color_start(zfs_list_avail_color(zhp));
/* found it, no need for more loops */
break;
}
}
}
/* /*
* If this is being called in scripted mode, or if this is the * If this is being called in scripted mode, or if this is the
@ -4052,7 +4065,7 @@ rollback_check(zfs_handle_t *zhp, void *data)
} }
if (cbp->cb_recurse) { if (cbp->cb_recurse) {
if (zfs_iter_dependents(zhp, 0, B_TRUE, if (zfs_iter_dependents_v2(zhp, 0, B_TRUE,
rollback_check_dependent, cbp) != 0) { rollback_check_dependent, cbp) != 0) {
zfs_close(zhp); zfs_close(zhp);
return (-1); return (-1);
@ -4151,10 +4164,10 @@ zfs_do_rollback(int argc, char **argv)
if (cb.cb_create > 0) if (cb.cb_create > 0)
min_txg = cb.cb_create; min_txg = cb.cb_create;
if ((ret = zfs_iter_snapshots(zhp, 0, rollback_check, &cb, if ((ret = zfs_iter_snapshots_v2(zhp, 0, rollback_check, &cb,
min_txg, 0)) != 0) min_txg, 0)) != 0)
goto out; goto out;
if ((ret = zfs_iter_bookmarks(zhp, 0, rollback_check, &cb)) != 0) if ((ret = zfs_iter_bookmarks_v2(zhp, 0, rollback_check, &cb)) != 0)
goto out; goto out;
if ((ret = cb.cb_error) != 0) if ((ret = cb.cb_error) != 0)
@ -4296,7 +4309,7 @@ zfs_snapshot_cb(zfs_handle_t *zhp, void *arg)
free(name); free(name);
if (sd->sd_recursive) if (sd->sd_recursive)
rv = zfs_iter_filesystems(zhp, 0, zfs_snapshot_cb, sd); rv = zfs_iter_filesystems_v2(zhp, 0, zfs_snapshot_cb, sd);
zfs_close(zhp); zfs_close(zhp);
return (rv); return (rv);
} }
@ -6360,7 +6373,7 @@ zfs_do_allow_unallow_impl(int argc, char **argv, boolean_t un)
if (un && opts.recursive) { if (un && opts.recursive) {
struct deleg_perms data = { un, update_perm_nvl }; struct deleg_perms data = { un, update_perm_nvl };
if (zfs_iter_filesystems(zhp, 0, set_deleg_perms, if (zfs_iter_filesystems_v2(zhp, 0, set_deleg_perms,
&data) != 0) &data) != 0)
goto cleanup0; goto cleanup0;
} }
@ -6738,7 +6751,7 @@ get_one_dataset(zfs_handle_t *zhp, void *data)
/* /*
* Iterate over any nested datasets. * Iterate over any nested datasets.
*/ */
if (zfs_iter_filesystems(zhp, 0, get_one_dataset, data) != 0) { if (zfs_iter_filesystems_v2(zhp, 0, get_one_dataset, data) != 0) {
zfs_close(zhp); zfs_close(zhp);
return (1); return (1);
} }

View File

@ -4272,13 +4272,17 @@ print_iostat_header(iostat_cbdata_t *cb)
* by order of magnitude. Uses column_size to add padding. * by order of magnitude. Uses column_size to add padding.
*/ */
static void static void
print_stat_color(char *statbuf, unsigned int column_size) print_stat_color(const char *statbuf, unsigned int column_size)
{ {
fputs(" ", stdout); fputs(" ", stdout);
size_t len = strlen(statbuf);
while (len < column_size) {
fputc(' ', stdout);
column_size--;
}
if (*statbuf == '0') { if (*statbuf == '0') {
color_start(ANSI_GRAY); color_start(ANSI_GRAY);
fputc('0', stdout); fputc('0', stdout);
column_size--;
} else { } else {
for (; *statbuf; statbuf++) { for (; *statbuf; statbuf++) {
if (*statbuf == 'K') color_start(ANSI_GREEN); if (*statbuf == 'K') color_start(ANSI_GREEN);
@ -4293,8 +4297,6 @@ print_stat_color(char *statbuf, unsigned int column_size)
} }
} }
color_end(); color_end();
for (; column_size > 0; column_size--)
fputc(' ', stdout);
} }
/* /*
@ -6939,6 +6941,17 @@ zpool_do_online(int argc, char **argv)
return (1); return (1);
for (i = 1; i < argc; i++) { for (i = 1; i < argc; i++) {
vdev_state_t oldstate;
boolean_t avail_spare, l2cache;
nvlist_t *tgt = zpool_find_vdev(zhp, argv[i], &avail_spare,
&l2cache, NULL);
if (tgt == NULL) {
ret = 1;
continue;
}
uint_t vsc;
oldstate = ((vdev_stat_t *)fnvlist_lookup_uint64_array(tgt,
ZPOOL_CONFIG_VDEV_STATS, &vsc))->vs_state;
if (zpool_vdev_online(zhp, argv[i], flags, &newstate) == 0) { if (zpool_vdev_online(zhp, argv[i], flags, &newstate) == 0) {
if (newstate != VDEV_STATE_HEALTHY) { if (newstate != VDEV_STATE_HEALTHY) {
(void) printf(gettext("warning: device '%s' " (void) printf(gettext("warning: device '%s' "
@ -6952,6 +6965,17 @@ zpool_do_online(int argc, char **argv)
(void) printf(gettext("use 'zpool " (void) printf(gettext("use 'zpool "
"replace' to replace devices " "replace' to replace devices "
"that are no longer present\n")); "that are no longer present\n"));
if ((flags & ZFS_ONLINE_EXPAND)) {
(void) printf(gettext("%s: failed "
"to expand usable space on "
"unhealthy device '%s'\n"),
(oldstate >= VDEV_STATE_DEGRADED ?
"error" : "warning"), argv[i]);
if (oldstate >= VDEV_STATE_DEGRADED) {
ret = 1;
break;
}
}
} }
} else { } else {
ret = 1; ret = 1;
@ -8836,7 +8860,7 @@ check_unsupp_fs(zfs_handle_t *zhp, void *unsupp_fs)
(*count)++; (*count)++;
} }
zfs_iter_filesystems(zhp, 0, check_unsupp_fs, unsupp_fs); zfs_iter_filesystems_v2(zhp, 0, check_unsupp_fs, unsupp_fs);
zfs_close(zhp); zfs_close(zhp);
@ -9980,33 +10004,33 @@ get_callback_vdev(zpool_handle_t *zhp, char *vdevname, void *data)
return (0); return (0);
} }
static int
get_callback_vdev_width_cb(void *zhp_data, nvlist_t *nv, void *data)
{
zpool_handle_t *zhp = zhp_data;
zprop_get_cbdata_t *cbp = (zprop_get_cbdata_t *)data;
char *vdevname = zpool_vdev_name(g_zfs, zhp, nv,
cbp->cb_vdevs.cb_name_flags);
int ret;
/* Adjust the column widths for the vdev properties */
ret = vdev_expand_proplist(zhp, vdevname, &cbp->cb_proplist);
return (ret);
}
static int static int
get_callback_vdev_cb(void *zhp_data, nvlist_t *nv, void *data) get_callback_vdev_cb(void *zhp_data, nvlist_t *nv, void *data)
{ {
zpool_handle_t *zhp = zhp_data; zpool_handle_t *zhp = zhp_data;
zprop_get_cbdata_t *cbp = (zprop_get_cbdata_t *)data; zprop_get_cbdata_t *cbp = (zprop_get_cbdata_t *)data;
char *vdevname = zpool_vdev_name(g_zfs, zhp, nv, char *vdevname;
cbp->cb_vdevs.cb_name_flags); const char *type;
int ret; int ret;
/* Display the properties */ /*
* zpool_vdev_name() transforms the root vdev name (i.e., root-0) to the
* pool name for display purposes, which is not desired. Fallback to
* zpool_vdev_name() when not dealing with the root vdev.
*/
type = fnvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE);
if (zhp != NULL && strcmp(type, "root") == 0)
vdevname = strdup("root-0");
else
vdevname = zpool_vdev_name(g_zfs, zhp, nv,
cbp->cb_vdevs.cb_name_flags);
(void) vdev_expand_proplist(zhp, vdevname, &cbp->cb_proplist);
ret = get_callback_vdev(zhp, vdevname, data); ret = get_callback_vdev(zhp, vdevname, data);
free(vdevname);
return (ret); return (ret);
} }
@ -10021,7 +10045,6 @@ get_callback(zpool_handle_t *zhp, void *data)
if (cbp->cb_type == ZFS_TYPE_VDEV) { if (cbp->cb_type == ZFS_TYPE_VDEV) {
if (strcmp(cbp->cb_vdevs.cb_names[0], "all-vdevs") == 0) { if (strcmp(cbp->cb_vdevs.cb_names[0], "all-vdevs") == 0) {
for_each_vdev(zhp, get_callback_vdev_width_cb, data);
for_each_vdev(zhp, get_callback_vdev_cb, data); for_each_vdev(zhp, get_callback_vdev_cb, data);
} else { } else {
/* Adjust column widths for vdev properties */ /* Adjust column widths for vdev properties */
@ -10109,6 +10132,7 @@ zpool_do_get(int argc, char **argv)
int ret; int ret;
int c, i; int c, i;
char *propstr = NULL; char *propstr = NULL;
char *vdev = NULL;
cb.cb_first = B_TRUE; cb.cb_first = B_TRUE;
@ -10206,10 +10230,17 @@ zpool_do_get(int argc, char **argv)
} else if (are_all_pools(1, argv)) { } else if (are_all_pools(1, argv)) {
/* The first arg is a pool name */ /* The first arg is a pool name */
if ((argc == 2 && strcmp(argv[1], "all-vdevs") == 0) || if ((argc == 2 && strcmp(argv[1], "all-vdevs") == 0) ||
(argc == 2 && strcmp(argv[1], "root") == 0) ||
are_vdevs_in_pool(argc - 1, argv + 1, argv[0], are_vdevs_in_pool(argc - 1, argv + 1, argv[0],
&cb.cb_vdevs)) { &cb.cb_vdevs)) {
if (strcmp(argv[1], "root") == 0)
vdev = strdup("root-0");
else
vdev = strdup(argv[1]);
/* ... and the rest are vdev names */ /* ... and the rest are vdev names */
cb.cb_vdevs.cb_names = argv + 1; cb.cb_vdevs.cb_names = &vdev;
cb.cb_vdevs.cb_names_count = argc - 1; cb.cb_vdevs.cb_names_count = argc - 1;
cb.cb_type = ZFS_TYPE_VDEV; cb.cb_type = ZFS_TYPE_VDEV;
argc = 1; /* One pool to process */ argc = 1; /* One pool to process */
@ -10254,6 +10285,9 @@ zpool_do_get(int argc, char **argv)
else else
zprop_free_list(cb.cb_proplist); zprop_free_list(cb.cb_proplist);
if (vdev != NULL)
free(vdev);
return (ret); return (ret);
} }
@ -10355,6 +10389,7 @@ zpool_do_set(int argc, char **argv)
{ {
set_cbdata_t cb = { 0 }; set_cbdata_t cb = { 0 };
int error; int error;
char *vdev = NULL;
current_prop_type = ZFS_TYPE_POOL; current_prop_type = ZFS_TYPE_POOL;
if (argc > 1 && argv[1][0] == '-') { if (argc > 1 && argv[1][0] == '-') {
@ -10403,13 +10438,20 @@ zpool_do_set(int argc, char **argv)
/* argv[1], when supplied, is vdev name */ /* argv[1], when supplied, is vdev name */
if (argc == 2) { if (argc == 2) {
if (!are_vdevs_in_pool(1, argv + 1, argv[0], &cb.cb_vdevs)) {
if (strcmp(argv[1], "root") == 0)
vdev = strdup("root-0");
else
vdev = strdup(argv[1]);
if (!are_vdevs_in_pool(1, &vdev, argv[0], &cb.cb_vdevs)) {
(void) fprintf(stderr, gettext( (void) fprintf(stderr, gettext(
"cannot find '%s' in '%s': device not in pool\n"), "cannot find '%s' in '%s': device not in pool\n"),
argv[1], argv[0]); vdev, argv[0]);
free(vdev);
return (EINVAL); return (EINVAL);
} }
cb.cb_vdevs.cb_names = argv + 1; cb.cb_vdevs.cb_names = &vdev;
cb.cb_vdevs.cb_names_count = 1; cb.cb_vdevs.cb_names_count = 1;
cb.cb_type = ZFS_TYPE_VDEV; cb.cb_type = ZFS_TYPE_VDEV;
} }
@ -10417,6 +10459,9 @@ zpool_do_set(int argc, char **argv)
error = for_each_pool(1, argv, B_TRUE, NULL, ZFS_TYPE_POOL, error = for_each_pool(1, argv, B_TRUE, NULL, ZFS_TYPE_POOL,
B_FALSE, set_callback, &cb); B_FALSE, set_callback, &cb);
if (vdev != NULL)
free(vdev);
return (error); return (error);
} }

View File

@ -236,7 +236,22 @@ dnl #
dnl # 6.2 API change, dnl # 6.2 API change,
dnl # set_acl() second paramter changed to a struct dentry * dnl # set_acl() second paramter changed to a struct dentry *
dnl # dnl #
dnl # 6.3 API change,
dnl # set_acl() first parameter changed to struct mnt_idmap *
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_INODE_OPERATIONS_SET_ACL], [ AC_DEFUN([ZFS_AC_KERNEL_SRC_INODE_OPERATIONS_SET_ACL], [
ZFS_LINUX_TEST_SRC([inode_operations_set_acl_mnt_idmap_dentry], [
#include <linux/fs.h>
int set_acl_fn(struct mnt_idmap *idmap,
struct dentry *dent, struct posix_acl *acl,
int type) { return 0; }
static const struct inode_operations
iops __attribute__ ((unused)) = {
.set_acl = set_acl_fn,
};
],[])
ZFS_LINUX_TEST_SRC([inode_operations_set_acl_userns_dentry], [ ZFS_LINUX_TEST_SRC([inode_operations_set_acl_userns_dentry], [
#include <linux/fs.h> #include <linux/fs.h>
@ -281,17 +296,24 @@ AC_DEFUN([ZFS_AC_KERNEL_INODE_OPERATIONS_SET_ACL], [
AC_DEFINE(HAVE_SET_ACL, 1, [iops->set_acl() exists]) AC_DEFINE(HAVE_SET_ACL, 1, [iops->set_acl() exists])
AC_DEFINE(HAVE_SET_ACL_USERNS, 1, [iops->set_acl() takes 4 args]) AC_DEFINE(HAVE_SET_ACL_USERNS, 1, [iops->set_acl() takes 4 args])
],[ ],[
ZFS_LINUX_TEST_RESULT([inode_operations_set_acl_userns_dentry], [ ZFS_LINUX_TEST_RESULT([inode_operations_set_acl_mnt_idmap_dentry], [
AC_MSG_RESULT(yes) AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_SET_ACL, 1, [iops->set_acl() exists]) AC_DEFINE(HAVE_SET_ACL, 1, [iops->set_acl() exists])
AC_DEFINE(HAVE_SET_ACL_USERNS_DENTRY_ARG2, 1, AC_DEFINE(HAVE_SET_ACL_IDMAP_DENTRY, 1,
[iops->set_acl() takes 4 args, arg2 is struct dentry *]) [iops->set_acl() takes 4 args, arg1 is struct mnt_idmap *])
],[ ],[
ZFS_LINUX_TEST_RESULT([inode_operations_set_acl], [ ZFS_LINUX_TEST_RESULT([inode_operations_set_acl_userns_dentry], [
AC_MSG_RESULT(yes) AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_SET_ACL, 1, [iops->set_acl() exists, takes 3 args]) AC_DEFINE(HAVE_SET_ACL, 1, [iops->set_acl() exists])
AC_DEFINE(HAVE_SET_ACL_USERNS_DENTRY_ARG2, 1,
[iops->set_acl() takes 4 args, arg2 is struct dentry *])
],[ ],[
ZFS_LINUX_REQUIRE_API([i_op->set_acl()], [3.14]) ZFS_LINUX_TEST_RESULT([inode_operations_set_acl], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_SET_ACL, 1, [iops->set_acl() exists, takes 3 args])
],[
ZFS_LINUX_REQUIRE_API([i_op->set_acl()], [3.14])
])
]) ])
]) ])
]) ])

View File

@ -4,7 +4,10 @@ dnl #
dnl # generic_fillattr in linux/fs.h now requires a struct user_namespace* dnl # generic_fillattr in linux/fs.h now requires a struct user_namespace*
dnl # as the first arg, to support idmapped mounts. dnl # as the first arg, to support idmapped mounts.
dnl # dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_GENERIC_FILLATTR_USERNS], [ dnl # 6.3 API
dnl # generic_fillattr() now takes struct mnt_idmap* as the first argument
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_GENERIC_FILLATTR], [
ZFS_LINUX_TEST_SRC([generic_fillattr_userns], [ ZFS_LINUX_TEST_SRC([generic_fillattr_userns], [
#include <linux/fs.h> #include <linux/fs.h>
],[ ],[
@ -13,16 +16,32 @@ AC_DEFUN([ZFS_AC_KERNEL_SRC_GENERIC_FILLATTR_USERNS], [
struct kstat *k = NULL; struct kstat *k = NULL;
generic_fillattr(userns, in, k); generic_fillattr(userns, in, k);
]) ])
])
AC_DEFUN([ZFS_AC_KERNEL_GENERIC_FILLATTR_USERNS], [ ZFS_LINUX_TEST_SRC([generic_fillattr_mnt_idmap], [
AC_MSG_CHECKING([whether generic_fillattr requires struct user_namespace*]) #include <linux/fs.h>
ZFS_LINUX_TEST_RESULT([generic_fillattr_userns], [
AC_MSG_RESULT([yes])
AC_DEFINE(HAVE_GENERIC_FILLATTR_USERNS, 1,
[generic_fillattr requires struct user_namespace*])
],[ ],[
AC_MSG_RESULT([no]) struct mnt_idmap *idmap = NULL;
struct inode *in = NULL;
struct kstat *k = NULL;
generic_fillattr(idmap, in, k);
])
])
AC_DEFUN([ZFS_AC_KERNEL_GENERIC_FILLATTR], [
AC_MSG_CHECKING([whether generic_fillattr requires struct mnt_idmap*])
ZFS_LINUX_TEST_RESULT([generic_fillattr_mnt_idmap], [
AC_MSG_RESULT([yes])
AC_DEFINE(HAVE_GENERIC_FILLATTR_IDMAP, 1,
[generic_fillattr requires struct mnt_idmap*])
],[
AC_MSG_CHECKING([whether generic_fillattr requires struct user_namespace*])
ZFS_LINUX_TEST_RESULT([generic_fillattr_userns], [
AC_MSG_RESULT([yes])
AC_DEFINE(HAVE_GENERIC_FILLATTR_USERNS, 1,
[generic_fillattr requires struct user_namespace*])
],[
AC_MSG_RESULT([no])
])
]) ])
]) ])

View File

@ -1,4 +1,22 @@
AC_DEFUN([ZFS_AC_KERNEL_SRC_CREATE], [ AC_DEFUN([ZFS_AC_KERNEL_SRC_CREATE], [
dnl #
dnl # 6.3 API change
dnl # The first arg is changed to struct mnt_idmap *
dnl #
ZFS_LINUX_TEST_SRC([create_mnt_idmap], [
#include <linux/fs.h>
#include <linux/sched.h>
int inode_create(struct mnt_idmap *idmap,
struct inode *inode ,struct dentry *dentry,
umode_t umode, bool flag) { return 0; }
static const struct inode_operations
iops __attribute__ ((unused)) = {
.create = inode_create,
};
],[])
dnl # dnl #
dnl # 5.12 API change that added the struct user_namespace* arg dnl # 5.12 API change that added the struct user_namespace* arg
dnl # to the front of this function type's arg list. dnl # to the front of this function type's arg list.
@ -35,19 +53,28 @@ AC_DEFUN([ZFS_AC_KERNEL_SRC_CREATE], [
]) ])
AC_DEFUN([ZFS_AC_KERNEL_CREATE], [ AC_DEFUN([ZFS_AC_KERNEL_CREATE], [
AC_MSG_CHECKING([whether iops->create() takes struct user_namespace*]) AC_MSG_CHECKING([whether iops->create() takes struct mnt_idmap*])
ZFS_LINUX_TEST_RESULT([create_userns], [ ZFS_LINUX_TEST_RESULT([create_mnt_idmap], [
AC_MSG_RESULT(yes) AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_IOPS_CREATE_USERNS, 1, AC_DEFINE(HAVE_IOPS_CREATE_IDMAP, 1,
[iops->create() takes struct user_namespace*]) [iops->create() takes struct mnt_idmap*])
],[ ],[
AC_MSG_RESULT(no) AC_MSG_RESULT(no)
AC_MSG_CHECKING([whether iops->create() passes flags]) AC_MSG_CHECKING([whether iops->create() takes struct user_namespace*])
ZFS_LINUX_TEST_RESULT([create_flags], [ ZFS_LINUX_TEST_RESULT([create_userns], [
AC_MSG_RESULT(yes) AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_IOPS_CREATE_USERNS, 1,
[iops->create() takes struct user_namespace*])
],[ ],[
ZFS_LINUX_TEST_ERROR([iops->create()]) AC_MSG_RESULT(no)
AC_MSG_CHECKING([whether iops->create() passes flags])
ZFS_LINUX_TEST_RESULT([create_flags], [
AC_MSG_RESULT(yes)
],[
ZFS_LINUX_TEST_ERROR([iops->create()])
])
]) ])
]) ])
]) ])

View File

@ -1,4 +1,24 @@
AC_DEFUN([ZFS_AC_KERNEL_SRC_INODE_GETATTR], [ AC_DEFUN([ZFS_AC_KERNEL_SRC_INODE_GETATTR], [
dnl #
dnl # Linux 6.3 API
dnl # The first arg of getattr I/O operations handler type
dnl # is changed to struct mnt_idmap*
dnl #
ZFS_LINUX_TEST_SRC([inode_operations_getattr_mnt_idmap], [
#include <linux/fs.h>
int test_getattr(
struct mnt_idmap *idmap,
const struct path *p, struct kstat *k,
u32 request_mask, unsigned int query_flags)
{ return 0; }
static const struct inode_operations
iops __attribute__ ((unused)) = {
.getattr = test_getattr,
};
],[])
dnl # dnl #
dnl # Linux 5.12 API dnl # Linux 5.12 API
dnl # The getattr I/O operations handler type was extended to require dnl # The getattr I/O operations handler type was extended to require
@ -55,37 +75,48 @@ AC_DEFUN([ZFS_AC_KERNEL_SRC_INODE_GETATTR], [
AC_DEFUN([ZFS_AC_KERNEL_INODE_GETATTR], [ AC_DEFUN([ZFS_AC_KERNEL_INODE_GETATTR], [
dnl # dnl #
dnl # Kernel 5.12 test dnl # Kernel 6.3 test
dnl # dnl #
AC_MSG_CHECKING([whether iops->getattr() takes user_namespace]) AC_MSG_CHECKING([whether iops->getattr() takes mnt_idmap])
ZFS_LINUX_TEST_RESULT([inode_operations_getattr_userns], [ ZFS_LINUX_TEST_RESULT([inode_operations_getattr_mnt_idmap], [
AC_MSG_RESULT(yes) AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_USERNS_IOPS_GETATTR, 1, AC_DEFINE(HAVE_IDMAP_IOPS_GETATTR, 1,
[iops->getattr() takes struct user_namespace*]) [iops->getattr() takes struct mnt_idmap*])
],[ ],[
AC_MSG_RESULT(no) AC_MSG_RESULT(no)
dnl # dnl #
dnl # Kernel 4.11 test dnl # Kernel 5.12 test
dnl # dnl #
AC_MSG_CHECKING([whether iops->getattr() takes a path]) AC_MSG_CHECKING([whether iops->getattr() takes user_namespace])
ZFS_LINUX_TEST_RESULT([inode_operations_getattr_path], [ ZFS_LINUX_TEST_RESULT([inode_operations_getattr_userns], [
AC_MSG_RESULT(yes) AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_PATH_IOPS_GETATTR, 1, AC_DEFINE(HAVE_USERNS_IOPS_GETATTR, 1,
[iops->getattr() takes a path]) [iops->getattr() takes struct user_namespace*])
],[ ],[
AC_MSG_RESULT(no) AC_MSG_RESULT(no)
dnl # dnl #
dnl # Kernel < 4.11 test dnl # Kernel 4.11 test
dnl # dnl #
AC_MSG_CHECKING([whether iops->getattr() takes a vfsmount]) AC_MSG_CHECKING([whether iops->getattr() takes a path])
ZFS_LINUX_TEST_RESULT([inode_operations_getattr_vfsmount], [ ZFS_LINUX_TEST_RESULT([inode_operations_getattr_path], [
AC_MSG_RESULT(yes) AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_VFSMOUNT_IOPS_GETATTR, 1, AC_DEFINE(HAVE_PATH_IOPS_GETATTR, 1,
[iops->getattr() takes a vfsmount]) [iops->getattr() takes a path])
],[ ],[
AC_MSG_RESULT(no) AC_MSG_RESULT(no)
dnl #
dnl # Kernel < 4.11 test
dnl #
AC_MSG_CHECKING([whether iops->getattr() takes a vfsmount])
ZFS_LINUX_TEST_RESULT([inode_operations_getattr_vfsmount], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_VFSMOUNT_IOPS_GETATTR, 1,
[iops->getattr() takes a vfsmount])
],[
AC_MSG_RESULT(no)
])
]) ])
]) ])
]) ])

View File

@ -1,4 +1,22 @@
AC_DEFUN([ZFS_AC_KERNEL_SRC_PERMISSION], [ AC_DEFUN([ZFS_AC_KERNEL_SRC_PERMISSION], [
dnl #
dnl # 6.3 API change
dnl # iops->permission() now takes struct mnt_idmap*
dnl # as its first arg
dnl #
ZFS_LINUX_TEST_SRC([permission_mnt_idmap], [
#include <linux/fs.h>
#include <linux/sched.h>
int inode_permission(struct mnt_idmap *idmap,
struct inode *inode, int mask) { return 0; }
static const struct inode_operations
iops __attribute__ ((unused)) = {
.permission = inode_permission,
};
],[])
dnl # dnl #
dnl # 5.12 API change that added the struct user_namespace* arg dnl # 5.12 API change that added the struct user_namespace* arg
dnl # to the front of this function type's arg list. dnl # to the front of this function type's arg list.
@ -18,12 +36,19 @@ AC_DEFUN([ZFS_AC_KERNEL_SRC_PERMISSION], [
]) ])
AC_DEFUN([ZFS_AC_KERNEL_PERMISSION], [ AC_DEFUN([ZFS_AC_KERNEL_PERMISSION], [
AC_MSG_CHECKING([whether iops->permission() takes struct user_namespace*]) AC_MSG_CHECKING([whether iops->permission() takes struct mnt_idmap*])
ZFS_LINUX_TEST_RESULT([permission_userns], [ ZFS_LINUX_TEST_RESULT([permission_mnt_idmap], [
AC_MSG_RESULT(yes) AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_IOPS_PERMISSION_USERNS, 1, AC_DEFINE(HAVE_IOPS_PERMISSION_IDMAP, 1,
[iops->permission() takes struct user_namespace*]) [iops->permission() takes struct mnt_idmap*])
],[ ],[
AC_MSG_RESULT(no) AC_MSG_CHECKING([whether iops->permission() takes struct user_namespace*])
ZFS_LINUX_TEST_RESULT([permission_userns], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_IOPS_PERMISSION_USERNS, 1,
[iops->permission() takes struct user_namespace*])
],[
AC_MSG_RESULT(no)
])
]) ])
]) ])

View File

@ -0,0 +1,87 @@
AC_DEFUN([ZFS_AC_KERNEL_SRC_INODE_SETATTR], [
dnl #
dnl # Linux 6.3 API
dnl # The first arg of setattr I/O operations handler type
dnl # is changed to struct mnt_idmap*
dnl #
ZFS_LINUX_TEST_SRC([inode_operations_setattr_mnt_idmap], [
#include <linux/fs.h>
int test_setattr(
struct mnt_idmap *idmap,
struct dentry *de, struct iattr *ia)
{ return 0; }
static const struct inode_operations
iops __attribute__ ((unused)) = {
.setattr = test_setattr,
};
],[])
dnl #
dnl # Linux 5.12 API
dnl # The setattr I/O operations handler type was extended to require
dnl # a struct user_namespace* as its first arg, to support idmapped
dnl # mounts.
dnl #
ZFS_LINUX_TEST_SRC([inode_operations_setattr_userns], [
#include <linux/fs.h>
int test_setattr(
struct user_namespace *userns,
struct dentry *de, struct iattr *ia)
{ return 0; }
static const struct inode_operations
iops __attribute__ ((unused)) = {
.setattr = test_setattr,
};
],[])
ZFS_LINUX_TEST_SRC([inode_operations_setattr], [
#include <linux/fs.h>
int test_setattr(
struct dentry *de, struct iattr *ia)
{ return 0; }
static const struct inode_operations
iops __attribute__ ((unused)) = {
.setattr = test_setattr,
};
],[])
])
AC_DEFUN([ZFS_AC_KERNEL_INODE_SETATTR], [
dnl #
dnl # Kernel 6.3 test
dnl #
AC_MSG_CHECKING([whether iops->setattr() takes mnt_idmap])
ZFS_LINUX_TEST_RESULT([inode_operations_setattr_mnt_idmap], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_IDMAP_IOPS_SETATTR, 1,
[iops->setattr() takes struct mnt_idmap*])
],[
AC_MSG_RESULT(no)
dnl #
dnl # Kernel 5.12 test
dnl #
AC_MSG_CHECKING([whether iops->setattr() takes user_namespace])
ZFS_LINUX_TEST_RESULT([inode_operations_setattr_userns], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_USERNS_IOPS_SETATTR, 1,
[iops->setattr() takes struct user_namespace*])
],[
AC_MSG_RESULT(no)
AC_MSG_CHECKING([whether iops->setattr() exists])
ZFS_LINUX_TEST_RESULT([inode_operations_setattr], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_IOPS_SETATTR, 1,
[iops->setattr() exists])
],[
AC_MSG_RESULT(no)
])
])
])
])

View File

@ -16,12 +16,20 @@ AC_DEFUN([ZFS_AC_KERNEL_SRC_INODE_OWNER_OR_CAPABLE], [
(void) inode_owner_or_capable(ip); (void) inode_owner_or_capable(ip);
]) ])
ZFS_LINUX_TEST_SRC([inode_owner_or_capable_idmapped], [ ZFS_LINUX_TEST_SRC([inode_owner_or_capable_userns], [
#include <linux/fs.h> #include <linux/fs.h>
],[ ],[
struct inode *ip = NULL; struct inode *ip = NULL;
(void) inode_owner_or_capable(&init_user_ns, ip); (void) inode_owner_or_capable(&init_user_ns, ip);
]) ])
ZFS_LINUX_TEST_SRC([inode_owner_or_capable_mnt_idmap], [
#include <linux/fs.h>
#include <linux/mnt_idmapping.h>
],[
struct inode *ip = NULL;
(void) inode_owner_or_capable(&nop_mnt_idmap, ip);
])
]) ])
AC_DEFUN([ZFS_AC_KERNEL_INODE_OWNER_OR_CAPABLE], [ AC_DEFUN([ZFS_AC_KERNEL_INODE_OWNER_OR_CAPABLE], [
@ -35,12 +43,21 @@ AC_DEFUN([ZFS_AC_KERNEL_INODE_OWNER_OR_CAPABLE], [
AC_MSG_CHECKING( AC_MSG_CHECKING(
[whether inode_owner_or_capable() takes user_ns]) [whether inode_owner_or_capable() takes user_ns])
ZFS_LINUX_TEST_RESULT([inode_owner_or_capable_idmapped], [ ZFS_LINUX_TEST_RESULT([inode_owner_or_capable_userns], [
AC_MSG_RESULT(yes) AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_INODE_OWNER_OR_CAPABLE_IDMAPPED, 1, AC_DEFINE(HAVE_INODE_OWNER_OR_CAPABLE_USERNS, 1,
[inode_owner_or_capable() takes user_ns]) [inode_owner_or_capable() takes user_ns])
],[ ],[
ZFS_LINUX_TEST_ERROR([capability]) AC_MSG_RESULT(no)
AC_MSG_CHECKING(
[whether inode_owner_or_capable() takes mnt_idmap])
ZFS_LINUX_TEST_RESULT([inode_owner_or_capable_mnt_idmap], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_INODE_OWNER_OR_CAPABLE_IDMAP, 1,
[inode_owner_or_capable() takes mnt_idmap])
], [
ZFS_LINUX_TEST_ERROR([capability])
])
]) ])
]) ])
]) ])

View File

@ -2,6 +2,22 @@ dnl #
dnl # Supported mkdir() interfaces checked newest to oldest. dnl # Supported mkdir() interfaces checked newest to oldest.
dnl # dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_MKDIR], [ AC_DEFUN([ZFS_AC_KERNEL_SRC_MKDIR], [
dnl #
dnl # 6.3 API change
dnl # mkdir() takes struct mnt_idmap * as the first arg
dnl #
ZFS_LINUX_TEST_SRC([mkdir_mnt_idmap], [
#include <linux/fs.h>
int mkdir(struct mnt_idmap *idmap,
struct inode *inode, struct dentry *dentry,
umode_t umode) { return 0; }
static const struct inode_operations
iops __attribute__ ((unused)) = {
.mkdir = mkdir,
};
],[])
dnl # dnl #
dnl # 5.12 API change dnl # 5.12 API change
dnl # The struct user_namespace arg was added as the first argument to dnl # The struct user_namespace arg was added as the first argument to
@ -43,25 +59,36 @@ AC_DEFUN([ZFS_AC_KERNEL_SRC_MKDIR], [
AC_DEFUN([ZFS_AC_KERNEL_MKDIR], [ AC_DEFUN([ZFS_AC_KERNEL_MKDIR], [
dnl # dnl #
dnl # 5.12 API change dnl # 6.3 API change
dnl # The struct user_namespace arg was added as the first argument to dnl # mkdir() takes struct mnt_idmap * as the first arg
dnl # mkdir() of the iops structure.
dnl # dnl #
AC_MSG_CHECKING([whether iops->mkdir() takes struct user_namespace*]) AC_MSG_CHECKING([whether iops->mkdir() takes struct mnt_idmap*])
ZFS_LINUX_TEST_RESULT([mkdir_user_namespace], [ ZFS_LINUX_TEST_RESULT([mkdir_mnt_idmap], [
AC_MSG_RESULT(yes) AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_IOPS_MKDIR_USERNS, 1, AC_DEFINE(HAVE_IOPS_MKDIR_IDMAP, 1,
[iops->mkdir() takes struct user_namespace*]) [iops->mkdir() takes struct mnt_idmap*])
],[ ],[
AC_MSG_RESULT(no) dnl #
dnl # 5.12 API change
AC_MSG_CHECKING([whether iops->mkdir() takes umode_t]) dnl # The struct user_namespace arg was added as the first argument to
ZFS_LINUX_TEST_RESULT([inode_operations_mkdir], [ dnl # mkdir() of the iops structure.
dnl #
AC_MSG_CHECKING([whether iops->mkdir() takes struct user_namespace*])
ZFS_LINUX_TEST_RESULT([mkdir_user_namespace], [
AC_MSG_RESULT(yes) AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_MKDIR_UMODE_T, 1, AC_DEFINE(HAVE_IOPS_MKDIR_USERNS, 1,
[iops->mkdir() takes umode_t]) [iops->mkdir() takes struct user_namespace*])
],[ ],[
ZFS_LINUX_TEST_ERROR([mkdir()]) AC_MSG_RESULT(no)
AC_MSG_CHECKING([whether iops->mkdir() takes umode_t])
ZFS_LINUX_TEST_RESULT([inode_operations_mkdir], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_MKDIR_UMODE_T, 1,
[iops->mkdir() takes umode_t])
],[
ZFS_LINUX_TEST_ERROR([mkdir()])
])
]) ])
]) ])
]) ])

View File

@ -1,4 +1,22 @@
AC_DEFUN([ZFS_AC_KERNEL_SRC_MKNOD], [ AC_DEFUN([ZFS_AC_KERNEL_SRC_MKNOD], [
dnl #
dnl # 6.3 API change
dnl # The first arg is now struct mnt_idmap*
dnl #
ZFS_LINUX_TEST_SRC([mknod_mnt_idmap], [
#include <linux/fs.h>
#include <linux/sched.h>
int tmp_mknod(struct mnt_idmap *idmap,
struct inode *inode ,struct dentry *dentry,
umode_t u, dev_t d) { return 0; }
static const struct inode_operations
iops __attribute__ ((unused)) = {
.mknod = tmp_mknod,
};
],[])
dnl # dnl #
dnl # 5.12 API change that added the struct user_namespace* arg dnl # 5.12 API change that added the struct user_namespace* arg
dnl # to the front of this function type's arg list. dnl # to the front of this function type's arg list.
@ -19,12 +37,20 @@ AC_DEFUN([ZFS_AC_KERNEL_SRC_MKNOD], [
]) ])
AC_DEFUN([ZFS_AC_KERNEL_MKNOD], [ AC_DEFUN([ZFS_AC_KERNEL_MKNOD], [
AC_MSG_CHECKING([whether iops->mknod() takes struct user_namespace*]) AC_MSG_CHECKING([whether iops->mknod() takes struct mnt_idmap*])
ZFS_LINUX_TEST_RESULT([mknod_userns], [ ZFS_LINUX_TEST_RESULT([mknod_mnt_idmap], [
AC_MSG_RESULT(yes) AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_IOPS_MKNOD_USERNS, 1, AC_DEFINE(HAVE_IOPS_MKNOD_IDMAP, 1,
[iops->mknod() takes struct user_namespace*]) [iops->mknod() takes struct mnt_idmap*])
],[ ],[
AC_MSG_RESULT(no) AC_MSG_RESULT(no)
AC_MSG_CHECKING([whether iops->mknod() takes struct user_namespace*])
ZFS_LINUX_TEST_RESULT([mknod_userns], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_IOPS_MKNOD_USERNS, 1,
[iops->mknod() takes struct user_namespace*])
],[
AC_MSG_RESULT(no)
])
]) ])
]) ])

View File

@ -71,39 +71,61 @@ AC_DEFUN([ZFS_AC_KERNEL_SRC_RENAME], [
.rename = rename_fn, .rename = rename_fn,
}; };
],[]) ],[])
dnl #
dnl # 6.3 API change - the first arg is now struct mnt_idmap*
dnl #
ZFS_LINUX_TEST_SRC([inode_operations_rename_mnt_idmap], [
#include <linux/fs.h>
int rename_fn(struct mnt_idmap *idmap, struct inode *sip,
struct dentry *sdp, struct inode *tip, struct dentry *tdp,
unsigned int flags) { return 0; }
static const struct inode_operations
iops __attribute__ ((unused)) = {
.rename = rename_fn,
};
],[])
]) ])
AC_DEFUN([ZFS_AC_KERNEL_RENAME], [ AC_DEFUN([ZFS_AC_KERNEL_RENAME], [
AC_MSG_CHECKING([whether iops->rename() takes struct user_namespace*]) AC_MSG_CHECKING([whether iops->rename() takes struct mnt_idmap*])
ZFS_LINUX_TEST_RESULT([inode_operations_rename_userns], [ ZFS_LINUX_TEST_RESULT([inode_operations_rename_mnt_idmap], [
AC_MSG_RESULT(yes) AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_IOPS_RENAME_USERNS, 1, AC_DEFINE(HAVE_IOPS_RENAME_IDMAP, 1,
[iops->rename() takes struct user_namespace*]) [iops->rename() takes struct mnt_idmap*])
],[ ],[
AC_MSG_RESULT(no) AC_MSG_CHECKING([whether iops->rename() takes struct user_namespace*])
ZFS_LINUX_TEST_RESULT([inode_operations_rename_userns], [
AC_MSG_CHECKING([whether iops->rename2() exists])
ZFS_LINUX_TEST_RESULT([inode_operations_rename2], [
AC_MSG_RESULT(yes) AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_RENAME2, 1, [iops->rename2() exists]) AC_DEFINE(HAVE_IOPS_RENAME_USERNS, 1,
[iops->rename() takes struct user_namespace*])
],[ ],[
AC_MSG_RESULT(no) AC_MSG_RESULT(no)
AC_MSG_CHECKING([whether iops->rename() wants flags]) AC_MSG_CHECKING([whether iops->rename2() exists])
ZFS_LINUX_TEST_RESULT([inode_operations_rename_flags], [ ZFS_LINUX_TEST_RESULT([inode_operations_rename2], [
AC_MSG_RESULT(yes) AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_RENAME_WANTS_FLAGS, 1, AC_DEFINE(HAVE_RENAME2, 1, [iops->rename2() exists])
[iops->rename() wants flags])
],[ ],[
AC_MSG_RESULT(no) AC_MSG_RESULT(no)
AC_MSG_CHECKING([whether struct inode_operations_wrapper takes .rename2()]) AC_MSG_CHECKING([whether iops->rename() wants flags])
ZFS_LINUX_TEST_RESULT([dir_inode_operations_wrapper_rename2], [ ZFS_LINUX_TEST_RESULT([inode_operations_rename_flags], [
AC_MSG_RESULT(yes) AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_RENAME2_OPERATIONS_WRAPPER, 1, AC_DEFINE(HAVE_RENAME_WANTS_FLAGS, 1,
[struct inode_operations_wrapper takes .rename2()]) [iops->rename() wants flags])
],[ ],[
AC_MSG_RESULT(no) AC_MSG_RESULT(no)
AC_MSG_CHECKING([whether struct inode_operations_wrapper takes .rename2()])
ZFS_LINUX_TEST_RESULT([dir_inode_operations_wrapper_rename2], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_RENAME2_OPERATIONS_WRAPPER, 1,
[struct inode_operations_wrapper takes .rename2()])
],[
AC_MSG_RESULT(no)
])
]) ])
]) ])
]) ])

View File

@ -27,26 +27,48 @@ AC_DEFUN([ZFS_AC_KERNEL_SRC_SETATTR_PREPARE], [
int error __attribute__ ((unused)) = int error __attribute__ ((unused)) =
setattr_prepare(userns, dentry, attr); setattr_prepare(userns, dentry, attr);
]) ])
dnl #
dnl # 6.3 API change
dnl # The first arg of setattr_prepare() is changed to struct mnt_idmap*
dnl #
ZFS_LINUX_TEST_SRC([setattr_prepare_mnt_idmap], [
#include <linux/fs.h>
], [
struct dentry *dentry = NULL;
struct iattr *attr = NULL;
struct mnt_idmap *idmap = NULL;
int error __attribute__ ((unused)) =
setattr_prepare(idmap, dentry, attr);
])
]) ])
AC_DEFUN([ZFS_AC_KERNEL_SETATTR_PREPARE], [ AC_DEFUN([ZFS_AC_KERNEL_SETATTR_PREPARE], [
AC_MSG_CHECKING([whether setattr_prepare() is available and accepts struct user_namespace*]) AC_MSG_CHECKING([whether setattr_prepare() is available and accepts struct mnt_idmap*])
ZFS_LINUX_TEST_RESULT_SYMBOL([setattr_prepare_userns], ZFS_LINUX_TEST_RESULT_SYMBOL([setattr_prepare_mnt_idmap],
[setattr_prepare], [fs/attr.c], [ [setattr_prepare], [fs/attr.c], [
AC_MSG_RESULT(yes) AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_SETATTR_PREPARE_USERNS, 1, AC_DEFINE(HAVE_SETATTR_PREPARE_IDMAP, 1,
[setattr_prepare() accepts user_namespace]) [setattr_prepare() accepts mnt_idmap])
], [ ], [
AC_MSG_RESULT(no) AC_MSG_CHECKING([whether setattr_prepare() is available and accepts struct user_namespace*])
ZFS_LINUX_TEST_RESULT_SYMBOL([setattr_prepare_userns],
AC_MSG_CHECKING([whether setattr_prepare() is available, doesn't accept user_namespace]) [setattr_prepare], [fs/attr.c], [
ZFS_LINUX_TEST_RESULT_SYMBOL([setattr_prepare],
[setattr_prepare], [fs/attr.c], [
AC_MSG_RESULT(yes) AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_SETATTR_PREPARE_NO_USERNS, 1, AC_DEFINE(HAVE_SETATTR_PREPARE_USERNS, 1,
[setattr_prepare() is available, doesn't accept user_namespace]) [setattr_prepare() accepts user_namespace])
], [ ], [
AC_MSG_RESULT(no) AC_MSG_RESULT(no)
AC_MSG_CHECKING([whether setattr_prepare() is available, doesn't accept user_namespace])
ZFS_LINUX_TEST_RESULT_SYMBOL([setattr_prepare],
[setattr_prepare], [fs/attr.c], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_SETATTR_PREPARE_NO_USERNS, 1,
[setattr_prepare() is available, doesn't accept user_namespace])
], [
AC_MSG_RESULT(no)
])
]) ])
]) ])
]) ])

View File

@ -1,4 +1,20 @@
AC_DEFUN([ZFS_AC_KERNEL_SRC_SYMLINK], [ AC_DEFUN([ZFS_AC_KERNEL_SRC_SYMLINK], [
dnl #
dnl # 6.3 API change that changed the first arg
dnl # to struct mnt_idmap*
dnl #
ZFS_LINUX_TEST_SRC([symlink_mnt_idmap], [
#include <linux/fs.h>
#include <linux/sched.h>
int tmp_symlink(struct mnt_idmap *idmap,
struct inode *inode ,struct dentry *dentry,
const char *path) { return 0; }
static const struct inode_operations
iops __attribute__ ((unused)) = {
.symlink = tmp_symlink,
};
],[])
dnl # dnl #
dnl # 5.12 API change that added the struct user_namespace* arg dnl # 5.12 API change that added the struct user_namespace* arg
dnl # to the front of this function type's arg list. dnl # to the front of this function type's arg list.
@ -19,12 +35,19 @@ AC_DEFUN([ZFS_AC_KERNEL_SRC_SYMLINK], [
]) ])
AC_DEFUN([ZFS_AC_KERNEL_SYMLINK], [ AC_DEFUN([ZFS_AC_KERNEL_SYMLINK], [
AC_MSG_CHECKING([whether iops->symlink() takes struct user_namespace*]) AC_MSG_CHECKING([whether iops->symlink() takes struct mnt_idmap*])
ZFS_LINUX_TEST_RESULT([symlink_userns], [ ZFS_LINUX_TEST_RESULT([symlink_mnt_idmap], [
AC_MSG_RESULT(yes) AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_IOPS_SYMLINK_USERNS, 1, AC_DEFINE(HAVE_IOPS_SYMLINK_IDMAP, 1,
[iops->symlink() takes struct user_namespace*]) [iops->symlink() takes struct mnt_idmap*])
],[ ],[
AC_MSG_RESULT(no) AC_MSG_CHECKING([whether iops->symlink() takes struct user_namespace*])
ZFS_LINUX_TEST_RESULT([symlink_userns], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_IOPS_SYMLINK_USERNS, 1,
[iops->symlink() takes struct user_namespace*])
],[
AC_MSG_RESULT(no)
])
]) ])
]) ])

View File

@ -4,6 +4,19 @@ dnl # Add support for i_op->tmpfile
dnl # dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_TMPFILE], [ AC_DEFUN([ZFS_AC_KERNEL_SRC_TMPFILE], [
dnl # dnl #
dnl # 6.3 API change
dnl # The first arg is now struct mnt_idmap *
dnl #
ZFS_LINUX_TEST_SRC([inode_operations_tmpfile_mnt_idmap], [
#include <linux/fs.h>
int tmpfile(struct mnt_idmap *idmap,
struct inode *inode, struct file *file,
umode_t mode) { return 0; }
static struct inode_operations
iops __attribute__ ((unused)) = {
.tmpfile = tmpfile,
};
],[])
dnl # 6.1 API change dnl # 6.1 API change
dnl # use struct file instead of struct dentry dnl # use struct file instead of struct dentry
dnl # dnl #
@ -44,23 +57,29 @@ AC_DEFUN([ZFS_AC_KERNEL_SRC_TMPFILE], [
AC_DEFUN([ZFS_AC_KERNEL_TMPFILE], [ AC_DEFUN([ZFS_AC_KERNEL_TMPFILE], [
AC_MSG_CHECKING([whether i_op->tmpfile() exists]) AC_MSG_CHECKING([whether i_op->tmpfile() exists])
ZFS_LINUX_TEST_RESULT([inode_operations_tmpfile], [ ZFS_LINUX_TEST_RESULT([inode_operations_tmpfile_mnt_idmap], [
AC_MSG_RESULT(yes) AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_TMPFILE, 1, [i_op->tmpfile() exists]) AC_DEFINE(HAVE_TMPFILE, 1, [i_op->tmpfile() exists])
AC_DEFINE(HAVE_TMPFILE_USERNS, 1, [i_op->tmpfile() has userns]) AC_DEFINE(HAVE_TMPFILE_IDMAP, 1, [i_op->tmpfile() has mnt_idmap])
],[ ], [
ZFS_LINUX_TEST_RESULT([inode_operations_tmpfile_dentry_userns], [ ZFS_LINUX_TEST_RESULT([inode_operations_tmpfile], [
AC_MSG_RESULT(yes) AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_TMPFILE, 1, [i_op->tmpfile() exists]) AC_DEFINE(HAVE_TMPFILE, 1, [i_op->tmpfile() exists])
AC_DEFINE(HAVE_TMPFILE_USERNS, 1, [i_op->tmpfile() has userns]) AC_DEFINE(HAVE_TMPFILE_USERNS, 1, [i_op->tmpfile() has userns])
AC_DEFINE(HAVE_TMPFILE_DENTRY, 1, [i_op->tmpfile() uses old dentry signature])
],[ ],[
ZFS_LINUX_TEST_RESULT([inode_operations_tmpfile_dentry], [ ZFS_LINUX_TEST_RESULT([inode_operations_tmpfile_dentry_userns], [
AC_MSG_RESULT(yes) AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_TMPFILE, 1, [i_op->tmpfile() exists]) AC_DEFINE(HAVE_TMPFILE, 1, [i_op->tmpfile() exists])
AC_DEFINE(HAVE_TMPFILE_USERNS, 1, [i_op->tmpfile() has userns])
AC_DEFINE(HAVE_TMPFILE_DENTRY, 1, [i_op->tmpfile() uses old dentry signature]) AC_DEFINE(HAVE_TMPFILE_DENTRY, 1, [i_op->tmpfile() uses old dentry signature])
],[ ],[
ZFS_LINUX_REQUIRE_API([i_op->tmpfile()], [3.11]) ZFS_LINUX_TEST_RESULT([inode_operations_tmpfile_dentry], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_TMPFILE, 1, [i_op->tmpfile() exists])
AC_DEFINE(HAVE_TMPFILE_DENTRY, 1, [i_op->tmpfile() uses old dentry signature])
],[
ZFS_LINUX_REQUIRE_API([i_op->tmpfile()], [3.11])
])
]) ])
]) ])
]) ])

View File

@ -0,0 +1,26 @@
AC_DEFUN([ZFS_AC_KERNEL_SRC_WRITEPAGE_T], [
dnl #
dnl # 6.3 API change
dnl # The writepage_t function type now has its first argument as
dnl # struct folio* instead of struct page*
dnl #
ZFS_LINUX_TEST_SRC([writepage_t_folio], [
#include <linux/writeback.h>
int putpage(struct folio *folio,
struct writeback_control *wbc, void *data)
{ return 0; }
writepage_t func = putpage;
],[])
])
AC_DEFUN([ZFS_AC_KERNEL_WRITEPAGE_T], [
AC_MSG_CHECKING([whether int (*writepage_t)() takes struct folio*])
ZFS_LINUX_TEST_RESULT([writepage_t_folio], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_WRITEPAGE_T_FOLIO, 1,
[int (*writepage_t)() takes struct folio*])
],[
AC_MSG_RESULT(no)
])
])

View File

@ -179,6 +179,21 @@ dnl #
dnl # Supported xattr handler set() interfaces checked newest to oldest. dnl # Supported xattr handler set() interfaces checked newest to oldest.
dnl # dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_XATTR_HANDLER_SET], [ AC_DEFUN([ZFS_AC_KERNEL_SRC_XATTR_HANDLER_SET], [
ZFS_LINUX_TEST_SRC([xattr_handler_set_mnt_idmap], [
#include <linux/xattr.h>
int set(const struct xattr_handler *handler,
struct mnt_idmap *idmap,
struct dentry *dentry, struct inode *inode,
const char *name, const void *buffer,
size_t size, int flags)
{ return 0; }
static const struct xattr_handler
xops __attribute__ ((unused)) = {
.set = set,
};
],[])
ZFS_LINUX_TEST_SRC([xattr_handler_set_userns], [ ZFS_LINUX_TEST_SRC([xattr_handler_set_userns], [
#include <linux/xattr.h> #include <linux/xattr.h>
@ -240,53 +255,63 @@ AC_DEFUN([ZFS_AC_KERNEL_XATTR_HANDLER_SET], [
dnl # The xattr_handler->set() callback was changed to 8 arguments, and dnl # The xattr_handler->set() callback was changed to 8 arguments, and
dnl # struct user_namespace* was inserted as arg #2 dnl # struct user_namespace* was inserted as arg #2
dnl # dnl #
AC_MSG_CHECKING([whether xattr_handler->set() wants dentry, inode, and user_namespace]) dnl # 6.3 API change,
ZFS_LINUX_TEST_RESULT([xattr_handler_set_userns], [ dnl # The xattr_handler->set() callback 2nd arg is now struct mnt_idmap *
dnl #
AC_MSG_CHECKING([whether xattr_handler->set() wants dentry, inode, and mnt_idmap])
ZFS_LINUX_TEST_RESULT([xattr_handler_set_mnt_idmap], [
AC_MSG_RESULT(yes) AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_XATTR_SET_USERNS, 1, AC_DEFINE(HAVE_XATTR_SET_IDMAP, 1,
[xattr_handler->set() takes user_namespace]) [xattr_handler->set() takes mnt_idmap])
],[ ], [
dnl # AC_MSG_CHECKING([whether xattr_handler->set() wants dentry, inode, and user_namespace])
dnl # 4.7 API change, ZFS_LINUX_TEST_RESULT([xattr_handler_set_userns], [
dnl # The xattr_handler->set() callback was changed to take both
dnl # dentry and inode.
dnl #
AC_MSG_RESULT(no)
AC_MSG_CHECKING([whether xattr_handler->set() wants dentry and inode])
ZFS_LINUX_TEST_RESULT([xattr_handler_set_dentry_inode], [
AC_MSG_RESULT(yes) AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_XATTR_SET_DENTRY_INODE, 1, AC_DEFINE(HAVE_XATTR_SET_USERNS, 1,
[xattr_handler->set() wants both dentry and inode]) [xattr_handler->set() takes user_namespace])
],[ ],[
dnl # dnl #
dnl # 4.4 API change, dnl # 4.7 API change,
dnl # The xattr_handler->set() callback was changed to take a dnl # The xattr_handler->set() callback was changed to take both
dnl # xattr_handler, and handler_flags argument was removed and dnl # dentry and inode.
dnl # should be accessed by handler->flags.
dnl # dnl #
AC_MSG_RESULT(no) AC_MSG_RESULT(no)
AC_MSG_CHECKING( AC_MSG_CHECKING([whether xattr_handler->set() wants dentry and inode])
[whether xattr_handler->set() wants xattr_handler]) ZFS_LINUX_TEST_RESULT([xattr_handler_set_dentry_inode], [
ZFS_LINUX_TEST_RESULT([xattr_handler_set_xattr_handler], [
AC_MSG_RESULT(yes) AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_XATTR_SET_HANDLER, 1, AC_DEFINE(HAVE_XATTR_SET_DENTRY_INODE, 1,
[xattr_handler->set() wants xattr_handler]) [xattr_handler->set() wants both dentry and inode])
],[ ],[
dnl # dnl #
dnl # 2.6.33 API change, dnl # 4.4 API change,
dnl # The xattr_handler->set() callback was changed dnl # The xattr_handler->set() callback was changed to take a
dnl # to take a dentry instead of an inode, and a dnl # xattr_handler, and handler_flags argument was removed and
dnl # handler_flags argument was added. dnl # should be accessed by handler->flags.
dnl # dnl #
AC_MSG_RESULT(no) AC_MSG_RESULT(no)
AC_MSG_CHECKING( AC_MSG_CHECKING(
[whether xattr_handler->set() wants dentry]) [whether xattr_handler->set() wants xattr_handler])
ZFS_LINUX_TEST_RESULT([xattr_handler_set_dentry], [ ZFS_LINUX_TEST_RESULT([xattr_handler_set_xattr_handler], [
AC_MSG_RESULT(yes) AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_XATTR_SET_DENTRY, 1, AC_DEFINE(HAVE_XATTR_SET_HANDLER, 1,
[xattr_handler->set() wants dentry]) [xattr_handler->set() wants xattr_handler])
],[ ],[
ZFS_LINUX_TEST_ERROR([xattr set()]) dnl #
dnl # 2.6.33 API change,
dnl # The xattr_handler->set() callback was changed
dnl # to take a dentry instead of an inode, and a
dnl # handler_flags argument was added.
dnl #
AC_MSG_RESULT(no)
AC_MSG_CHECKING(
[whether xattr_handler->set() wants dentry])
ZFS_LINUX_TEST_RESULT([xattr_handler_set_dentry], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_XATTR_SET_DENTRY, 1,
[xattr_handler->set() wants dentry])
],[
ZFS_LINUX_TEST_ERROR([xattr set()])
])
]) ])
]) ])
]) ])

View File

@ -71,6 +71,7 @@ AC_DEFUN([ZFS_AC_KERNEL_TEST_SRC], [
ZFS_AC_KERNEL_SRC_INODE_OWNER_OR_CAPABLE ZFS_AC_KERNEL_SRC_INODE_OWNER_OR_CAPABLE
ZFS_AC_KERNEL_SRC_XATTR ZFS_AC_KERNEL_SRC_XATTR
ZFS_AC_KERNEL_SRC_ACL ZFS_AC_KERNEL_SRC_ACL
ZFS_AC_KERNEL_SRC_INODE_SETATTR
ZFS_AC_KERNEL_SRC_INODE_GETATTR ZFS_AC_KERNEL_SRC_INODE_GETATTR
ZFS_AC_KERNEL_SRC_INODE_SET_FLAGS ZFS_AC_KERNEL_SRC_INODE_SET_FLAGS
ZFS_AC_KERNEL_SRC_INODE_SET_IVERSION ZFS_AC_KERNEL_SRC_INODE_SET_IVERSION
@ -133,7 +134,7 @@ AC_DEFUN([ZFS_AC_KERNEL_TEST_SRC], [
ZFS_AC_KERNEL_SRC_KSTRTOUL ZFS_AC_KERNEL_SRC_KSTRTOUL
ZFS_AC_KERNEL_SRC_PERCPU ZFS_AC_KERNEL_SRC_PERCPU
ZFS_AC_KERNEL_SRC_CPU_HOTPLUG ZFS_AC_KERNEL_SRC_CPU_HOTPLUG
ZFS_AC_KERNEL_SRC_GENERIC_FILLATTR_USERNS ZFS_AC_KERNEL_SRC_GENERIC_FILLATTR
ZFS_AC_KERNEL_SRC_MKNOD ZFS_AC_KERNEL_SRC_MKNOD
ZFS_AC_KERNEL_SRC_SYMLINK ZFS_AC_KERNEL_SRC_SYMLINK
ZFS_AC_KERNEL_SRC_BIO_MAX_SEGS ZFS_AC_KERNEL_SRC_BIO_MAX_SEGS
@ -151,6 +152,7 @@ AC_DEFUN([ZFS_AC_KERNEL_TEST_SRC], [
ZFS_AC_KERNEL_SRC_IDMAP_MNT_API ZFS_AC_KERNEL_SRC_IDMAP_MNT_API
ZFS_AC_KERNEL_SRC_IATTR_VFSID ZFS_AC_KERNEL_SRC_IATTR_VFSID
ZFS_AC_KERNEL_SRC_FILEMAP ZFS_AC_KERNEL_SRC_FILEMAP
ZFS_AC_KERNEL_SRC_WRITEPAGE_T
case "$host_cpu" in case "$host_cpu" in
powerpc*) powerpc*)
ZFS_AC_KERNEL_SRC_CPU_HAS_FEATURE ZFS_AC_KERNEL_SRC_CPU_HAS_FEATURE
@ -201,6 +203,7 @@ AC_DEFUN([ZFS_AC_KERNEL_TEST_RESULT], [
ZFS_AC_KERNEL_INODE_OWNER_OR_CAPABLE ZFS_AC_KERNEL_INODE_OWNER_OR_CAPABLE
ZFS_AC_KERNEL_XATTR ZFS_AC_KERNEL_XATTR
ZFS_AC_KERNEL_ACL ZFS_AC_KERNEL_ACL
ZFS_AC_KERNEL_INODE_SETATTR
ZFS_AC_KERNEL_INODE_GETATTR ZFS_AC_KERNEL_INODE_GETATTR
ZFS_AC_KERNEL_INODE_SET_FLAGS ZFS_AC_KERNEL_INODE_SET_FLAGS
ZFS_AC_KERNEL_INODE_SET_IVERSION ZFS_AC_KERNEL_INODE_SET_IVERSION
@ -263,7 +266,7 @@ AC_DEFUN([ZFS_AC_KERNEL_TEST_RESULT], [
ZFS_AC_KERNEL_KSTRTOUL ZFS_AC_KERNEL_KSTRTOUL
ZFS_AC_KERNEL_PERCPU ZFS_AC_KERNEL_PERCPU
ZFS_AC_KERNEL_CPU_HOTPLUG ZFS_AC_KERNEL_CPU_HOTPLUG
ZFS_AC_KERNEL_GENERIC_FILLATTR_USERNS ZFS_AC_KERNEL_GENERIC_FILLATTR
ZFS_AC_KERNEL_MKNOD ZFS_AC_KERNEL_MKNOD
ZFS_AC_KERNEL_SYMLINK ZFS_AC_KERNEL_SYMLINK
ZFS_AC_KERNEL_BIO_MAX_SEGS ZFS_AC_KERNEL_BIO_MAX_SEGS
@ -281,6 +284,7 @@ AC_DEFUN([ZFS_AC_KERNEL_TEST_RESULT], [
ZFS_AC_KERNEL_IDMAP_MNT_API ZFS_AC_KERNEL_IDMAP_MNT_API
ZFS_AC_KERNEL_IATTR_VFSID ZFS_AC_KERNEL_IATTR_VFSID
ZFS_AC_KERNEL_FILEMAP ZFS_AC_KERNEL_FILEMAP
ZFS_AC_KERNEL_WRITEPAGE_T
case "$host_cpu" in case "$host_cpu" in
powerpc*) powerpc*)
ZFS_AC_KERNEL_CPU_HAS_FEATURE ZFS_AC_KERNEL_CPU_HAS_FEATURE

View File

@ -5,7 +5,7 @@ Maintainer: ZFS on Linux specific mailing list <zfs-discuss@list.zfsonlinux.org>
Build-Depends: debhelper-compat (= 10), Build-Depends: debhelper-compat (= 10),
dkms (>> 2.1.1.2-5), dkms (>> 2.1.1.2-5),
libtool, libtool,
linux-headers-_KVERS_ linux-headers-_KVERS_ | raspberrypi-kernel-headers
Standards-Version: 4.3.0 Standards-Version: 4.3.0
Homepage: http://www.openzfs.org/ Homepage: http://www.openzfs.org/
Vcs-Git: https://github.com/openzfs/zfs.git Vcs-Git: https://github.com/openzfs/zfs.git
@ -14,7 +14,7 @@ Vcs-Browser: https://github.com/openzfs/zfs
Package: openzfs-zfs-modules-_KVERS_ Package: openzfs-zfs-modules-_KVERS_
Architecture: _ARCH_ Architecture: _ARCH_
Provides: openzfs-zfs-modules Provides: openzfs-zfs-modules
Depends: linux-image-_KVERS_ Depends: linux-image-_KVERS_ | raspberrypi-kernel
Recommends: openzfsutils Recommends: openzfsutils
Replaces: zfs-modules-_KVERS_ Replaces: zfs-modules-_KVERS_
Conflicts: zfs-modules-_KVERS_ Conflicts: zfs-modules-_KVERS_

View File

@ -81,6 +81,9 @@ install() {
inst_simple "${moddir}/zfs-env-bootfs.service" "${systemdsystemunitdir}/zfs-env-bootfs.service" inst_simple "${moddir}/zfs-env-bootfs.service" "${systemdsystemunitdir}/zfs-env-bootfs.service"
systemctl -q --root "${initdir}" add-wants zfs-import.target zfs-env-bootfs.service systemctl -q --root "${initdir}" add-wants zfs-import.target zfs-env-bootfs.service
inst_simple "${moddir}/zfs-nonroot-necessities.service" "${systemdsystemunitdir}/zfs-nonroot-necessities.service"
systemctl -q --root "${initdir}" add-requires initrd-root-fs.target zfs-nonroot-necessities.service
# Add user-provided unit overrides: # Add user-provided unit overrides:
# - /etc/systemd/system/${_service} # - /etc/systemd/system/${_service}
# - /etc/systemd/system/${_service}.d/overrides.conf # - /etc/systemd/system/${_service}.d/overrides.conf

View File

@ -1,6 +1,5 @@
[Unit] [Unit]
Description=Set BOOTFS environment for dracut Description=Set BOOTFS and BOOTFSFLAGS environment variables for dracut
Documentation=man:zpool(8)
DefaultDependencies=no DefaultDependencies=no
After=zfs-import-cache.service After=zfs-import-cache.service
After=zfs-import-scan.service After=zfs-import-scan.service
@ -8,7 +7,17 @@ Before=zfs-import.target
[Service] [Service]
Type=oneshot Type=oneshot
ExecStart=/bin/sh -c "exec systemctl set-environment BOOTFS=$(@sbindir@/zpool list -H -o bootfs | grep -m1 -vFx -)" ExecStart=/bin/sh -c ' \
. /lib/dracut-zfs-lib.sh; \
decode_root_args || exit 0; \
[ "$root" = "zfs:AUTO" ] && root="$(@sbindir@/zpool list -H -o bootfs | grep -m1 -vFx -)"; \
rootflags="$(getarg rootflags=)"; \
case ",$rootflags," in \
*,zfsutil,*) ;; \
,,) rootflags=zfsutil ;; \
*) rootflags="zfsutil,$rootflags" ;; \
esac; \
exec systemctl set-environment BOOTFS="$root" BOOTFSFLAGS="$rootflags"'
[Install] [Install]
WantedBy=zfs-import.target WantedBy=zfs-import.target

View File

@ -14,81 +14,24 @@ GENERATOR_DIR="$1"
. /lib/dracut-zfs-lib.sh . /lib/dracut-zfs-lib.sh
decode_root_args || exit 0 decode_root_args || exit 0
[ -z "${rootflags}" ] && rootflags=$(getarg rootflags=)
case ",${rootflags}," in
*,zfsutil,*) ;;
,,) rootflags=zfsutil ;;
*) rootflags="zfsutil,${rootflags}" ;;
esac
[ -n "$debug" ] && echo "zfs-generator: writing extension for sysroot.mount to $GENERATOR_DIR/sysroot.mount.d/zfs-enhancement.conf" >> /dev/kmsg [ -n "$debug" ] && echo "zfs-generator: writing extension for sysroot.mount to $GENERATOR_DIR/sysroot.mount.d/zfs-enhancement.conf" >> /dev/kmsg
mkdir -p "$GENERATOR_DIR"/sysroot.mount.d "$GENERATOR_DIR"/initrd-root-fs.target.requires "$GENERATOR_DIR"/dracut-pre-mount.service.d mkdir -p "$GENERATOR_DIR"/sysroot.mount.d "$GENERATOR_DIR"/dracut-pre-mount.service.d
{ {
echo "[Unit]" echo "[Unit]"
echo "Before=initrd-root-fs.target" echo "Before=initrd-root-fs.target"
echo "After=zfs-import.target" echo "After=zfs-import.target"
echo echo
echo "[Mount]" echo "[Mount]"
if [ "${root}" = "zfs:AUTO" ]; then echo "PassEnvironment=BOOTFS BOOTFSFLAGS"
echo "PassEnvironment=BOOTFS" echo 'What=${BOOTFS}'
echo 'What=${BOOTFS}'
else
echo "What=${root}"
fi
echo "Type=zfs" echo "Type=zfs"
echo "Options=${rootflags}" echo 'Options=${BOOTFSFLAGS}'
} > "$GENERATOR_DIR"/sysroot.mount.d/zfs-enhancement.conf } > "$GENERATOR_DIR"/sysroot.mount.d/zfs-enhancement.conf
ln -fs ../sysroot.mount "$GENERATOR_DIR"/initrd-root-fs.target.requires/sysroot.mount ln -fs ../sysroot.mount "$GENERATOR_DIR"/initrd-root-fs.target.requires/sysroot.mount
if [ "${root}" = "zfs:AUTO" ]; then
{
echo "[Unit]"
echo "Before=initrd-root-fs.target"
echo "After=sysroot.mount"
echo "DefaultDependencies=no"
echo
echo "[Service]"
echo "Type=oneshot"
echo "PassEnvironment=BOOTFS"
echo "ExecStart=/bin/sh -c '" ' \
. /lib/dracut-zfs-lib.sh; \
_zfs_nonroot_necessities_cb() { \
zfs mount | grep -m1 -q "^$1 " && return 0; \
echo "Mounting $1 on /sysroot$2"; \
mount -o zfsutil -t zfs "$1" "/sysroot$2"; \
}; \
for_relevant_root_children "${BOOTFS}" _zfs_nonroot_necessities_cb;' \
"'"
} > "$GENERATOR_DIR"/zfs-nonroot-necessities.service
ln -fs ../zfs-nonroot-necessities.service "$GENERATOR_DIR"/initrd-root-fs.target.requires/zfs-nonroot-necessities.service
else
# We can solve this statically at generation time, so do!
_zfs_generator_cb() {
dset="${1}"
mpnt="${2}"
unit="$(systemd-escape --suffix=mount -p "/sysroot${mpnt}")"
{
echo "[Unit]"
echo "Before=initrd-root-fs.target"
echo "After=sysroot.mount"
echo
echo "[Mount]"
echo "Where=/sysroot${mpnt}"
echo "What=${dset}"
echo "Type=zfs"
echo "Options=zfsutil"
} > "$GENERATOR_DIR/${unit}"
ln -fs ../"${unit}" "$GENERATOR_DIR"/initrd-root-fs.target.requires/"${unit}"
}
for_relevant_root_children "${root}" _zfs_generator_cb
fi
{ {
echo "[Unit]" echo "[Unit]"
echo "After=zfs-import.target" echo "After=zfs-import.target"

View File

@ -39,7 +39,7 @@ mount_dataset() {
# for_relevant_root_children DATASET EXEC # for_relevant_root_children DATASET EXEC
# Runs "EXEC dataset mountpoint" for all children of DATASET that are needed for system bringup # Runs "EXEC dataset mountpoint" for all children of DATASET that are needed for system bringup
# Used by zfs-generator.sh and friends, too! # Used by zfs-nonroot-necessities.service and friends, too!
for_relevant_root_children() { for_relevant_root_children() {
dataset="${1}" dataset="${1}"
exec="${2}" exec="${2}"

View File

@ -0,0 +1,20 @@
[Unit]
Before=initrd-root-fs.target
After=sysroot.mount
DefaultDependencies=no
ConditionEnvironment=BOOTFS
[Service]
Type=oneshot
PassEnvironment=BOOTFS
ExecStart=/bin/sh -c ' \
. /lib/dracut-zfs-lib.sh; \
_zfs_nonroot_necessities_cb() { \
@sbindir@/zfs mount | grep -m1 -q "^$1 " && return 0; \
echo "Mounting $1 on /sysroot$2"; \
mount -o zfsutil -t zfs "$1" "/sysroot$2"; \
}; \
for_relevant_root_children "${BOOTFS}" _zfs_nonroot_necessities_cb'
[Install]
RequiredBy=initrd-root-fs.target

View File

@ -5,8 +5,9 @@ After=zfs-import.target dracut-pre-mount.service zfs-snapshot-bootfs.service
Before=dracut-mount.service Before=dracut-mount.service
DefaultDependencies=no DefaultDependencies=no
ConditionKernelCommandLine=bootfs.rollback ConditionKernelCommandLine=bootfs.rollback
ConditionEnvironment=BOOTFS
[Service] [Service]
Type=oneshot Type=oneshot
ExecStart=/bin/sh -c '. /lib/dracut-zfs-lib.sh; decode_root_args || exit; [ "$root" = "zfs:AUTO" ] && root="$BOOTFS"; SNAPNAME="$(getarg bootfs.rollback)"; exec @sbindir@/zfs rollback -Rf "$root@${SNAPNAME:-%v}"' ExecStart=/bin/sh -c '. /lib/dracut-lib.sh; SNAPNAME="$(getarg bootfs.rollback)"; exec @sbindir@/zfs rollback -Rf "$BOOTFS@${SNAPNAME:-%v}"'
RemainAfterExit=yes RemainAfterExit=yes

View File

@ -5,8 +5,9 @@ After=zfs-import.target dracut-pre-mount.service
Before=dracut-mount.service Before=dracut-mount.service
DefaultDependencies=no DefaultDependencies=no
ConditionKernelCommandLine=bootfs.snapshot ConditionKernelCommandLine=bootfs.snapshot
ConditionEnvironment=BOOTFS
[Service] [Service]
Type=oneshot Type=oneshot
ExecStart=-/bin/sh -c '. /lib/dracut-zfs-lib.sh; decode_root_args || exit; [ "$root" = "zfs:AUTO" ] && root="$BOOTFS"; SNAPNAME="$(getarg bootfs.snapshot)"; exec @sbindir@/zfs snapshot "$root@${SNAPNAME:-%v}"' ExecStart=-/bin/sh -c '. /lib/dracut-lib.sh; SNAPNAME="$(getarg bootfs.snapshot)"; exec @sbindir@/zfs snapshot "$BOOTFS@${SNAPNAME:-%v}"'
RemainAfterExit=yes RemainAfterExit=yes

View File

@ -16,6 +16,7 @@ pkgdracut_90_SCRIPTS = \
pkgdracut_90_DATA = \ pkgdracut_90_DATA = \
%D%/90zfs/zfs-env-bootfs.service \ %D%/90zfs/zfs-env-bootfs.service \
%D%/90zfs/zfs-nonroot-necessities.service \
%D%/90zfs/zfs-rollback-bootfs.service \ %D%/90zfs/zfs-rollback-bootfs.service \
%D%/90zfs/zfs-snapshot-bootfs.service %D%/90zfs/zfs-snapshot-bootfs.service

View File

@ -78,7 +78,30 @@ To use this feature:
1. Install the `dropbear-initramfs` package. You may wish to uninstall the 1. Install the `dropbear-initramfs` package. You may wish to uninstall the
`cryptsetup-initramfs` package to avoid warnings. `cryptsetup-initramfs` package to avoid warnings.
2. Add your SSH key(s) to `/etc/dropbear-initramfs/authorized_keys`. Note 2. Add your SSH key(s) to `/etc/dropbear-initramfs/authorized_keys`. Note
that Dropbear does not support ed25519 keys before version 2020.79; that Dropbear does not support ed25519 keys before version 2020.79;
in that case, use RSA (2048-bit or more) instead. in that case, use RSA (2048-bit or more) instead.
3. Rebuild the initramfs with your keys: `update-initramfs -u` 3. Rebuild the initramfs with your keys: `update-initramfs -u`
4. During the system boot, login via SSH and run: `zfsunlock` 4. During the system boot, login via SSH and run: `zfsunlock`
### Unlocking a ZFS encrypted root via alternate means
If present, a shell program at `/etc/zfs/initramfs-tools-load-key`
and files matching `/etc/zfs/initramfs-tools-load-key.d/*`
will be copied to the initramfs during generation
and sourced to load the key, if required.
The `$ENCRYPTIONROOT` to load the key for and `$KEYLOCATION` variables are set,
and all initramfs-tools functions are available;
use unquoted `$ZPOOL` and `$ZFS` to run `zpool` and `zfs`.
A successful return (and loaded key) stops the search.
A failure return is non-fatal,
and loading keys proceeds as normal if no hook succeeds.
A trivial example of a key-loading drop-in that uses the BLAKE2 checksum
of the file at the `keylocation` as the key follows.
```sh
key="$(b2sum "${KEYLOCATION#file://}")" || return
printf '%s\n' "${key%% *}" | $ZFS load-key -L prompt "$ENCRYPTIONROOT"
```

View File

@ -41,6 +41,9 @@ copy_file cache "@sysconfdir@/zfs/zpool.cache"
copy_file config "@initconfdir@/zfs" copy_file config "@initconfdir@/zfs"
copy_file config "@sysconfdir@/zfs/zfs-functions" copy_file config "@sysconfdir@/zfs/zfs-functions"
copy_file config "@sysconfdir@/zfs/vdev_id.conf" copy_file config "@sysconfdir@/zfs/vdev_id.conf"
for f in "@sysconfdir@/zfs/initramfs-tools-load-key" "@sysconfdir@/zfs/initramfs-tools-load-key.d/"*; do
copy_file config "$f"
done
copy_file rule "@udevruledir@/60-zvol.rules" copy_file rule "@udevruledir@/60-zvol.rules"
copy_file rule "@udevruledir@/69-vdev.rules" copy_file rule "@udevruledir@/69-vdev.rules"

View File

@ -420,6 +420,16 @@ decrypt_fs()
# Continue only if the key needs to be loaded # Continue only if the key needs to be loaded
[ "$KEYSTATUS" = "unavailable" ] || return 0 [ "$KEYSTATUS" = "unavailable" ] || return 0
# Try extensions first
for f in "/etc/zfs/initramfs-tools-load-key" "/etc/zfs/initramfs-tools-load-key.d/"*; do
[ -r "$f" ] || continue
(. "$f") && {
# Successful return and actually-loaded key: we're done
KEYSTATUS="$(get_fs_value "${ENCRYPTIONROOT}" keystatus)"
[ "$KEYSTATUS" = "unavailable" ] || return 0
}
done
# Do not prompt if key is stored noninteractively, # Do not prompt if key is stored noninteractively,
if ! [ "${KEYLOCATION}" = "prompt" ]; then if ! [ "${KEYLOCATION}" = "prompt" ]; then
$ZFS load-key "${ENCRYPTIONROOT}" $ZFS load-key "${ENCRYPTIONROOT}"

View File

@ -371,7 +371,7 @@ change_key(pam_handle_t *pamh, const char *ds_name,
static int static int
decrypt_mount(pam_handle_t *pamh, const char *ds_name, decrypt_mount(pam_handle_t *pamh, const char *ds_name,
const char *passphrase) const char *passphrase, boolean_t noop)
{ {
zfs_handle_t *ds = zfs_open(g_zfs, ds_name, ZFS_TYPE_FILESYSTEM); zfs_handle_t *ds = zfs_open(g_zfs, ds_name, ZFS_TYPE_FILESYSTEM);
if (ds == NULL) { if (ds == NULL) {
@ -383,7 +383,7 @@ decrypt_mount(pam_handle_t *pamh, const char *ds_name,
zfs_close(ds); zfs_close(ds);
return (-1); return (-1);
} }
int ret = lzc_load_key(ds_name, B_FALSE, (uint8_t *)key->value, int ret = lzc_load_key(ds_name, noop, (uint8_t *)key->value,
WRAPPING_KEY_LEN); WRAPPING_KEY_LEN);
pw_free(key); pw_free(key);
if (ret) { if (ret) {
@ -391,12 +391,16 @@ decrypt_mount(pam_handle_t *pamh, const char *ds_name,
zfs_close(ds); zfs_close(ds);
return (-1); return (-1);
} }
if (noop) {
goto out;
}
ret = zfs_mount(ds, NULL, 0); ret = zfs_mount(ds, NULL, 0);
if (ret) { if (ret) {
pam_syslog(pamh, LOG_ERR, "mount failed: %d", ret); pam_syslog(pamh, LOG_ERR, "mount failed: %d", ret);
zfs_close(ds); zfs_close(ds);
return (-1); return (-1);
} }
out:
zfs_close(ds); zfs_close(ds);
return (0); return (0);
} }
@ -443,13 +447,13 @@ zfs_key_config_load(pam_handle_t *pamh, zfs_key_config_t *config,
config->homes_prefix = strdup("rpool/home"); config->homes_prefix = strdup("rpool/home");
if (config->homes_prefix == NULL) { if (config->homes_prefix == NULL) {
pam_syslog(pamh, LOG_ERR, "strdup failure"); pam_syslog(pamh, LOG_ERR, "strdup failure");
return (-1); return (PAM_SERVICE_ERR);
} }
config->runstatedir = strdup(RUNSTATEDIR "/pam_zfs_key"); config->runstatedir = strdup(RUNSTATEDIR "/pam_zfs_key");
if (config->runstatedir == NULL) { if (config->runstatedir == NULL) {
pam_syslog(pamh, LOG_ERR, "strdup failure"); pam_syslog(pamh, LOG_ERR, "strdup failure");
free(config->homes_prefix); free(config->homes_prefix);
return (-1); return (PAM_SERVICE_ERR);
} }
const char *name; const char *name;
if (pam_get_user(pamh, &name, NULL) != PAM_SUCCESS) { if (pam_get_user(pamh, &name, NULL) != PAM_SUCCESS) {
@ -457,13 +461,13 @@ zfs_key_config_load(pam_handle_t *pamh, zfs_key_config_t *config,
"couldn't get username from PAM stack"); "couldn't get username from PAM stack");
free(config->runstatedir); free(config->runstatedir);
free(config->homes_prefix); free(config->homes_prefix);
return (-1); return (PAM_SERVICE_ERR);
} }
struct passwd *entry = getpwnam(name); struct passwd *entry = getpwnam(name);
if (!entry) { if (!entry) {
free(config->runstatedir); free(config->runstatedir);
free(config->homes_prefix); free(config->homes_prefix);
return (-1); return (PAM_USER_UNKNOWN);
} }
config->uid = entry->pw_uid; config->uid = entry->pw_uid;
config->username = name; config->username = name;
@ -484,7 +488,7 @@ zfs_key_config_load(pam_handle_t *pamh, zfs_key_config_t *config,
config->homedir = strdup(entry->pw_dir); config->homedir = strdup(entry->pw_dir);
} }
} }
return (0); return (PAM_SUCCESS);
} }
static void static void
@ -535,8 +539,8 @@ zfs_key_config_get_dataset(zfs_key_config_t *config)
return (NULL); return (NULL);
} }
(void) zfs_iter_filesystems(zhp, 0, find_dsname_by_prop_value, (void) zfs_iter_filesystems_v2(zhp, 0,
config); find_dsname_by_prop_value, config);
zfs_close(zhp); zfs_close(zhp);
char *dsname = config->dsname; char *dsname = config->dsname;
config->dsname = NULL; config->dsname = NULL;
@ -644,12 +648,43 @@ PAM_EXTERN int
pam_sm_authenticate(pam_handle_t *pamh, int flags, pam_sm_authenticate(pam_handle_t *pamh, int flags,
int argc, const char **argv) int argc, const char **argv)
{ {
(void) flags, (void) argc, (void) argv; (void) flags;
if (pw_fetch_lazy(pamh) == NULL) { if (geteuid() != 0) {
return (PAM_AUTH_ERR); pam_syslog(pamh, LOG_ERR,
"Cannot zfs_mount when not being root.");
return (PAM_SERVICE_ERR);
}
zfs_key_config_t config;
int config_err = zfs_key_config_load(pamh, &config, argc, argv);
if (config_err != PAM_SUCCESS) {
return (config_err);
} }
const pw_password_t *token = pw_fetch_lazy(pamh);
if (token == NULL) {
zfs_key_config_free(&config);
return (PAM_AUTH_ERR);
}
if (pam_zfs_init(pamh) != 0) {
zfs_key_config_free(&config);
return (PAM_SERVICE_ERR);
}
char *dataset = zfs_key_config_get_dataset(&config);
if (!dataset) {
pam_zfs_free();
zfs_key_config_free(&config);
return (PAM_SERVICE_ERR);
}
if (decrypt_mount(pamh, dataset, token->value, B_TRUE) == -1) {
free(dataset);
pam_zfs_free();
zfs_key_config_free(&config);
return (PAM_AUTH_ERR);
}
free(dataset);
pam_zfs_free();
zfs_key_config_free(&config);
return (PAM_SUCCESS); return (PAM_SUCCESS);
} }
@ -673,7 +708,7 @@ pam_sm_chauthtok(pam_handle_t *pamh, int flags,
return (PAM_PERM_DENIED); return (PAM_PERM_DENIED);
} }
zfs_key_config_t config; zfs_key_config_t config;
if (zfs_key_config_load(pamh, &config, argc, argv) == -1) { if (zfs_key_config_load(pamh, &config, argc, argv) != PAM_SUCCESS) {
return (PAM_SERVICE_ERR); return (PAM_SERVICE_ERR);
} }
if (config.uid < 1000) { if (config.uid < 1000) {
@ -754,7 +789,7 @@ pam_sm_open_session(pam_handle_t *pamh, int flags,
return (PAM_SUCCESS); return (PAM_SUCCESS);
} }
zfs_key_config_t config; zfs_key_config_t config;
if (zfs_key_config_load(pamh, &config, argc, argv) != 0) { if (zfs_key_config_load(pamh, &config, argc, argv) != PAM_SUCCESS) {
return (PAM_SESSION_ERR); return (PAM_SESSION_ERR);
} }
@ -784,7 +819,7 @@ pam_sm_open_session(pam_handle_t *pamh, int flags,
zfs_key_config_free(&config); zfs_key_config_free(&config);
return (PAM_SERVICE_ERR); return (PAM_SERVICE_ERR);
} }
if (decrypt_mount(pamh, dataset, token->value) == -1) { if (decrypt_mount(pamh, dataset, token->value, B_FALSE) == -1) {
free(dataset); free(dataset);
pam_zfs_free(); pam_zfs_free();
zfs_key_config_free(&config); zfs_key_config_free(&config);
@ -813,7 +848,7 @@ pam_sm_close_session(pam_handle_t *pamh, int flags,
return (PAM_SUCCESS); return (PAM_SUCCESS);
} }
zfs_key_config_t config; zfs_key_config_t config;
if (zfs_key_config_load(pamh, &config, argc, argv) != 0) { if (zfs_key_config_load(pamh, &config, argc, argv) != PAM_SUCCESS) {
return (PAM_SESSION_ERR); return (PAM_SESSION_ERR);
} }
if (config.uid < 1000) { if (config.uid < 1000) {

View File

@ -658,17 +658,29 @@ _LIBZFS_H void zprop_print_one_property(const char *, zprop_get_cbdata_t *,
typedef int (*zfs_iter_f)(zfs_handle_t *, void *); typedef int (*zfs_iter_f)(zfs_handle_t *, void *);
_LIBZFS_H int zfs_iter_root(libzfs_handle_t *, zfs_iter_f, void *); _LIBZFS_H int zfs_iter_root(libzfs_handle_t *, zfs_iter_f, void *);
_LIBZFS_H int zfs_iter_children(zfs_handle_t *, int, zfs_iter_f, void *); _LIBZFS_H int zfs_iter_children(zfs_handle_t *, zfs_iter_f, void *);
_LIBZFS_H int zfs_iter_dependents(zfs_handle_t *, int, boolean_t, zfs_iter_f, _LIBZFS_H int zfs_iter_dependents(zfs_handle_t *, boolean_t, zfs_iter_f,
void *); void *);
_LIBZFS_H int zfs_iter_filesystems(zfs_handle_t *, int, zfs_iter_f, void *); _LIBZFS_H int zfs_iter_filesystems(zfs_handle_t *, zfs_iter_f, void *);
_LIBZFS_H int zfs_iter_snapshots(zfs_handle_t *, int, zfs_iter_f, void *, _LIBZFS_H int zfs_iter_snapshots(zfs_handle_t *, boolean_t, zfs_iter_f, void *,
uint64_t, uint64_t); uint64_t, uint64_t);
_LIBZFS_H int zfs_iter_snapshots_sorted(zfs_handle_t *, int, zfs_iter_f, void *, _LIBZFS_H int zfs_iter_snapshots_sorted(zfs_handle_t *, zfs_iter_f, void *,
uint64_t, uint64_t); uint64_t, uint64_t);
_LIBZFS_H int zfs_iter_snapspec(zfs_handle_t *, int, const char *, zfs_iter_f, _LIBZFS_H int zfs_iter_snapspec(zfs_handle_t *, const char *, zfs_iter_f,
void *); void *);
_LIBZFS_H int zfs_iter_bookmarks(zfs_handle_t *, int, zfs_iter_f, void *); _LIBZFS_H int zfs_iter_bookmarks(zfs_handle_t *, zfs_iter_f, void *);
_LIBZFS_H int zfs_iter_children_v2(zfs_handle_t *, int, zfs_iter_f, void *);
_LIBZFS_H int zfs_iter_dependents_v2(zfs_handle_t *, int, boolean_t, zfs_iter_f,
void *);
_LIBZFS_H int zfs_iter_filesystems_v2(zfs_handle_t *, int, zfs_iter_f, void *);
_LIBZFS_H int zfs_iter_snapshots_v2(zfs_handle_t *, int, zfs_iter_f, void *,
uint64_t, uint64_t);
_LIBZFS_H int zfs_iter_snapshots_sorted_v2(zfs_handle_t *, int, zfs_iter_f,
void *, uint64_t, uint64_t);
_LIBZFS_H int zfs_iter_snapspec_v2(zfs_handle_t *, int, const char *,
zfs_iter_f, void *);
_LIBZFS_H int zfs_iter_bookmarks_v2(zfs_handle_t *, int, zfs_iter_f, void *);
_LIBZFS_H int zfs_iter_mounted(zfs_handle_t *, zfs_iter_f, void *); _LIBZFS_H int zfs_iter_mounted(zfs_handle_t *, zfs_iter_f, void *);
typedef struct get_all_cb { typedef struct get_all_cb {

View File

@ -182,6 +182,7 @@ struct zfs_cmd;
#define ANSI_RESET "\033[0m" #define ANSI_RESET "\033[0m"
#define ANSI_BOLD "\033[1m" #define ANSI_BOLD "\033[1m"
_LIBZUTIL_H int use_color(void);
_LIBZUTIL_H void color_start(const char *color); _LIBZUTIL_H void color_start(const char *color);
_LIBZUTIL_H void color_end(void); _LIBZUTIL_H void color_end(void);
_LIBZUTIL_H int printf_color(const char *color, const char *format, ...); _LIBZUTIL_H int printf_color(const char *color, const char *format, ...);

View File

@ -62,12 +62,12 @@
#define kfpu_begin() do { \ #define kfpu_begin() do { \
if (__predict_false(!is_fpu_kern_thread(0))) \ if (__predict_false(!is_fpu_kern_thread(0))) \
fpu_kern_enter(curthread, NULL, FPU_KERN_NOCTX); \ fpu_kern_enter(curthread, NULL, FPU_KERN_NOCTX); \
} while(0) } while (0)
#define kfpu_end() do { \ #define kfpu_end() do { \
if (__predict_false(curthread->td_pcb->pcb_fpflags & PCB_FP_NOSAVE)) \ if (__predict_false(curthread->td_pcb->pcb_fpflags & PCB_FP_NOSAVE)) \
fpu_kern_leave(curthread, NULL); \ fpu_kern_leave(curthread, NULL); \
} while(0) } while (0)
#endif #endif

View File

@ -105,7 +105,7 @@ typedef u_longlong_t len_t;
typedef longlong_t diskaddr_t; typedef longlong_t diskaddr_t;
typedef void zuserns_t; typedef void zidmap_t;
#include <sys/debug.h> #include <sys/debug.h>
#endif /* !_OPENSOLARIS_SYS_TYPES_H_ */ #endif /* !_OPENSOLARIS_SYS_TYPES_H_ */

View File

@ -35,23 +35,23 @@ int dmu_read_pages(objset_t *os, uint64_t object, vm_page_t *ma, int count,
int *rbehind, int *rahead, int last_size); int *rbehind, int *rahead, int last_size);
extern int zfs_remove(znode_t *dzp, const char *name, cred_t *cr, int flags); extern int zfs_remove(znode_t *dzp, const char *name, cred_t *cr, int flags);
extern int zfs_mkdir(znode_t *dzp, const char *dirname, vattr_t *vap, extern int zfs_mkdir(znode_t *dzp, const char *dirname, vattr_t *vap,
znode_t **zpp, cred_t *cr, int flags, vsecattr_t *vsecp, zuserns_t *mnt_ns); znode_t **zpp, cred_t *cr, int flags, vsecattr_t *vsecp, zidmap_t *mnt_ns);
extern int zfs_rmdir(znode_t *dzp, const char *name, znode_t *cwd, extern int zfs_rmdir(znode_t *dzp, const char *name, znode_t *cwd,
cred_t *cr, int flags); cred_t *cr, int flags);
extern int zfs_setattr(znode_t *zp, vattr_t *vap, int flag, cred_t *cr, extern int zfs_setattr(znode_t *zp, vattr_t *vap, int flag, cred_t *cr,
zuserns_t *mnt_ns); zidmap_t *mnt_ns);
extern int zfs_rename(znode_t *sdzp, const char *snm, znode_t *tdzp, extern int zfs_rename(znode_t *sdzp, const char *snm, znode_t *tdzp,
const char *tnm, cred_t *cr, int flags, uint64_t rflags, vattr_t *wo_vap, const char *tnm, cred_t *cr, int flags, uint64_t rflags, vattr_t *wo_vap,
zuserns_t *mnt_ns); zidmap_t *mnt_ns);
extern int zfs_symlink(znode_t *dzp, const char *name, vattr_t *vap, extern int zfs_symlink(znode_t *dzp, const char *name, vattr_t *vap,
const char *link, znode_t **zpp, cred_t *cr, int flags, zuserns_t *mnt_ns); const char *link, znode_t **zpp, cred_t *cr, int flags, zidmap_t *mnt_ns);
extern int zfs_link(znode_t *tdzp, znode_t *sp, extern int zfs_link(znode_t *tdzp, znode_t *sp,
const char *name, cred_t *cr, int flags); const char *name, cred_t *cr, int flags);
extern int zfs_space(znode_t *zp, int cmd, struct flock *bfp, int flag, extern int zfs_space(znode_t *zp, int cmd, struct flock *bfp, int flag,
offset_t offset, cred_t *cr); offset_t offset, cred_t *cr);
extern int zfs_create(znode_t *dzp, const char *name, vattr_t *vap, int excl, extern int zfs_create(znode_t *dzp, const char *name, vattr_t *vap, int excl,
int mode, znode_t **zpp, cred_t *cr, int flag, vsecattr_t *vsecp, int mode, znode_t **zpp, cred_t *cr, int flag, vsecattr_t *vsecp,
zuserns_t *mnt_ns); zidmap_t *mnt_ns);
extern int zfs_setsecattr(znode_t *zp, vsecattr_t *vsecp, int flag, extern int zfs_setsecattr(znode_t *zp, vsecattr_t *vsecp, int flag,
cred_t *cr); cred_t *cr);
extern int zfs_write_simple(znode_t *zp, const void *data, size_t len, extern int zfs_write_simple(znode_t *zp, const void *data, size_t len,

View File

@ -426,7 +426,7 @@ static inline void
bio_set_flush(struct bio *bio) bio_set_flush(struct bio *bio)
{ {
#if defined(HAVE_REQ_PREFLUSH) /* >= 4.10 */ #if defined(HAVE_REQ_PREFLUSH) /* >= 4.10 */
bio_set_op_attrs(bio, 0, REQ_PREFLUSH); bio_set_op_attrs(bio, 0, REQ_PREFLUSH | REQ_OP_WRITE);
#elif defined(WRITE_FLUSH_FUA) /* >= 2.6.37 and <= 4.9 */ #elif defined(WRITE_FLUSH_FUA) /* >= 2.6.37 and <= 4.9 */
bio_set_op_attrs(bio, 0, WRITE_FLUSH_FUA); bio_set_op_attrs(bio, 0, WRITE_FLUSH_FUA);
#else #else

View File

@ -341,7 +341,8 @@ static inline void zfs_gid_write(struct inode *ip, gid_t gid)
* 4.9 API change * 4.9 API change
*/ */
#if !(defined(HAVE_SETATTR_PREPARE_NO_USERNS) || \ #if !(defined(HAVE_SETATTR_PREPARE_NO_USERNS) || \
defined(HAVE_SETATTR_PREPARE_USERNS)) defined(HAVE_SETATTR_PREPARE_USERNS) || \
defined(HAVE_SETATTR_PREPARE_IDMAP))
static inline int static inline int
setattr_prepare(struct dentry *dentry, struct iattr *ia) setattr_prepare(struct dentry *dentry, struct iattr *ia)
{ {
@ -396,6 +397,15 @@ func(struct user_namespace *user_ns, const struct path *path, \
return (func##_impl(user_ns, path, stat, request_mask, \ return (func##_impl(user_ns, path, stat, request_mask, \
query_flags)); \ query_flags)); \
} }
#elif defined(HAVE_IDMAP_IOPS_GETATTR)
#define ZPL_GETATTR_WRAPPER(func) \
static int \
func(struct mnt_idmap *user_ns, const struct path *path, \
struct kstat *stat, u32 request_mask, unsigned int query_flags) \
{ \
return (func##_impl(user_ns, path, stat, request_mask, \
query_flags)); \
}
#else #else
#error #error
#endif #endif
@ -447,8 +457,15 @@ zpl_is_32bit_api(void)
* 5.12 API change * 5.12 API change
* To support id-mapped mounts, generic_fillattr() was modified to * To support id-mapped mounts, generic_fillattr() was modified to
* accept a new struct user_namespace* as its first arg. * accept a new struct user_namespace* as its first arg.
*
* 6.3 API change
* generic_fillattr() first arg is changed to struct mnt_idmap *
*
*/ */
#ifdef HAVE_GENERIC_FILLATTR_USERNS #ifdef HAVE_GENERIC_FILLATTR_IDMAP
#define zpl_generic_fillattr(idmap, ip, sp) \
generic_fillattr(idmap, ip, sp)
#elif defined(HAVE_GENERIC_FILLATTR_USERNS)
#define zpl_generic_fillattr(user_ns, ip, sp) \ #define zpl_generic_fillattr(user_ns, ip, sp) \
generic_fillattr(user_ns, ip, sp) generic_fillattr(user_ns, ip, sp)
#else #else

View File

@ -133,13 +133,28 @@ fn(const struct xattr_handler *handler, struct dentry *dentry, \
#error "Unsupported kernel" #error "Unsupported kernel"
#endif #endif
/*
* 6.3 API change,
* The xattr_handler->set() callback was changed to take the
* struct mnt_idmap* as the first arg, to support idmapped
* mounts.
*/
#if defined(HAVE_XATTR_SET_IDMAP)
#define ZPL_XATTR_SET_WRAPPER(fn) \
static int \
fn(const struct xattr_handler *handler, struct mnt_idmap *user_ns, \
struct dentry *dentry, struct inode *inode, const char *name, \
const void *buffer, size_t size, int flags) \
{ \
return (__ ## fn(user_ns, inode, name, buffer, size, flags)); \
}
/* /*
* 5.12 API change, * 5.12 API change,
* The xattr_handler->set() callback was changed to take the * The xattr_handler->set() callback was changed to take the
* struct user_namespace* as the first arg, to support idmapped * struct user_namespace* as the first arg, to support idmapped
* mounts. * mounts.
*/ */
#if defined(HAVE_XATTR_SET_USERNS) #elif defined(HAVE_XATTR_SET_USERNS)
#define ZPL_XATTR_SET_WRAPPER(fn) \ #define ZPL_XATTR_SET_WRAPPER(fn) \
static int \ static int \
fn(const struct xattr_handler *handler, struct user_namespace *user_ns, \ fn(const struct xattr_handler *handler, struct user_namespace *user_ns, \

View File

@ -48,6 +48,8 @@ extern struct task_struct init_task;
#define SGID_TO_KGID(x) (KGIDT_INIT(x)) #define SGID_TO_KGID(x) (KGIDT_INIT(x))
#define KGIDP_TO_SGIDP(x) (&(x)->val) #define KGIDP_TO_SGIDP(x) (&(x)->val)
extern zidmap_t *zfs_get_init_idmap(void);
/* Check if the user ns is the initial one */ /* Check if the user ns is the initial one */
static inline boolean_t static inline boolean_t
zfs_is_init_userns(struct user_namespace *user_ns) zfs_is_init_userns(struct user_namespace *user_ns)
@ -74,36 +76,39 @@ static inline boolean_t zfs_no_idmapping(struct user_namespace *mnt_userns,
return (zfs_is_init_userns(mnt_userns) || mnt_userns == fs_userns); return (zfs_is_init_userns(mnt_userns) || mnt_userns == fs_userns);
} }
static inline uid_t zfs_uid_to_vfsuid(struct user_namespace *mnt_userns, static inline uid_t zfs_uid_to_vfsuid(zidmap_t *mnt_userns,
struct user_namespace *fs_userns, uid_t uid) struct user_namespace *fs_userns, uid_t uid)
{ {
if (zfs_no_idmapping(mnt_userns, fs_userns)) struct user_namespace *owner = idmap_owner(mnt_userns);
if (zfs_no_idmapping(owner, fs_userns))
return (uid); return (uid);
if (!zfs_is_init_userns(fs_userns)) if (!zfs_is_init_userns(fs_userns))
uid = from_kuid(fs_userns, KUIDT_INIT(uid)); uid = from_kuid(fs_userns, KUIDT_INIT(uid));
if (uid == (uid_t)-1) if (uid == (uid_t)-1)
return (uid); return (uid);
return (__kuid_val(make_kuid(mnt_userns, uid))); return (__kuid_val(make_kuid(owner, uid)));
} }
static inline gid_t zfs_gid_to_vfsgid(struct user_namespace *mnt_userns, static inline gid_t zfs_gid_to_vfsgid(zidmap_t *mnt_userns,
struct user_namespace *fs_userns, gid_t gid) struct user_namespace *fs_userns, gid_t gid)
{ {
if (zfs_no_idmapping(mnt_userns, fs_userns)) struct user_namespace *owner = idmap_owner(mnt_userns);
if (zfs_no_idmapping(owner, fs_userns))
return (gid); return (gid);
if (!zfs_is_init_userns(fs_userns)) if (!zfs_is_init_userns(fs_userns))
gid = from_kgid(fs_userns, KGIDT_INIT(gid)); gid = from_kgid(fs_userns, KGIDT_INIT(gid));
if (gid == (gid_t)-1) if (gid == (gid_t)-1)
return (gid); return (gid);
return (__kgid_val(make_kgid(mnt_userns, gid))); return (__kgid_val(make_kgid(owner, gid)));
} }
static inline uid_t zfs_vfsuid_to_uid(struct user_namespace *mnt_userns, static inline uid_t zfs_vfsuid_to_uid(zidmap_t *mnt_userns,
struct user_namespace *fs_userns, uid_t uid) struct user_namespace *fs_userns, uid_t uid)
{ {
if (zfs_no_idmapping(mnt_userns, fs_userns)) struct user_namespace *owner = idmap_owner(mnt_userns);
if (zfs_no_idmapping(owner, fs_userns))
return (uid); return (uid);
uid = from_kuid(mnt_userns, KUIDT_INIT(uid)); uid = from_kuid(owner, KUIDT_INIT(uid));
if (uid == (uid_t)-1) if (uid == (uid_t)-1)
return (uid); return (uid);
if (zfs_is_init_userns(fs_userns)) if (zfs_is_init_userns(fs_userns))
@ -111,12 +116,13 @@ static inline uid_t zfs_vfsuid_to_uid(struct user_namespace *mnt_userns,
return (__kuid_val(make_kuid(fs_userns, uid))); return (__kuid_val(make_kuid(fs_userns, uid)));
} }
static inline gid_t zfs_vfsgid_to_gid(struct user_namespace *mnt_userns, static inline gid_t zfs_vfsgid_to_gid(zidmap_t *mnt_userns,
struct user_namespace *fs_userns, gid_t gid) struct user_namespace *fs_userns, gid_t gid)
{ {
if (zfs_no_idmapping(mnt_userns, fs_userns)) struct user_namespace *owner = idmap_owner(mnt_userns);
if (zfs_no_idmapping(owner, fs_userns))
return (gid); return (gid);
gid = from_kgid(mnt_userns, KGIDT_INIT(gid)); gid = from_kgid(owner, KGIDT_INIT(gid));
if (gid == (gid_t)-1) if (gid == (gid_t)-1)
return (gid); return (gid);
if (zfs_is_init_userns(fs_userns)) if (zfs_is_init_userns(fs_userns))

View File

@ -195,10 +195,26 @@
#define _SUNOS_VTOC_16 #define _SUNOS_VTOC_16
/*
* LoongArch arch specific defines
* only LoongArch64 is supported yet
*/
#elif defined(__loongarch__) && defined(__loongarch_lp64)
#if !defined(_LP64)
#define _LP64
#endif
#define _ZFS_LITTLE_ENDIAN
#define _SUNOS_VTOC_16
/* not all LoongArch cores support unaligned accesses in hardware */
#define _ALIGNMENT_REQUIRED 1
#else #else
/* /*
* Currently supported: * Currently supported:
* x86_64, x32, i386, arm, powerpc, s390, sparc, mips, and RV64G * x86_64, x32, i386, arm, powerpc, s390, sparc, mips, RV64G, and LoongArch64
*/ */
#error "Unsupported ISA type" #error "Unsupported ISA type"
#endif #endif

View File

@ -55,6 +55,19 @@ typedef int major_t;
typedef int minor_t; typedef int minor_t;
struct user_namespace; struct user_namespace;
typedef struct user_namespace zuserns_t; #ifdef HAVE_IOPS_CREATE_IDMAP
#include <linux/refcount.h>
struct mnt_idmap {
struct user_namespace *owner;
refcount_t count;
};
typedef struct mnt_idmap zidmap_t;
#define idmap_owner(p) (((struct mnt_idmap *)p)->owner)
#else
typedef struct user_namespace zidmap_t;
#define idmap_owner(p) ((struct user_namespace *)p)
#endif
extern zidmap_t *zfs_init_idmap;
#endif /* _SPL_TYPES_H */ #endif /* _SPL_TYPES_H */

View File

@ -47,14 +47,14 @@ int secpolicy_vnode_create_gid(const cred_t *);
int secpolicy_vnode_remove(const cred_t *); int secpolicy_vnode_remove(const cred_t *);
int secpolicy_vnode_setdac(const cred_t *, uid_t); int secpolicy_vnode_setdac(const cred_t *, uid_t);
int secpolicy_vnode_setid_retain(struct znode *, const cred_t *, boolean_t); int secpolicy_vnode_setid_retain(struct znode *, const cred_t *, boolean_t);
int secpolicy_vnode_setids_setgids(const cred_t *, gid_t, zuserns_t *, int secpolicy_vnode_setids_setgids(const cred_t *, gid_t, zidmap_t *,
zuserns_t *); struct user_namespace *);
int secpolicy_zinject(const cred_t *); int secpolicy_zinject(const cred_t *);
int secpolicy_zfs(const cred_t *); int secpolicy_zfs(const cred_t *);
int secpolicy_zfs_proc(const cred_t *, proc_t *); int secpolicy_zfs_proc(const cred_t *, proc_t *);
void secpolicy_setid_clear(vattr_t *, cred_t *); void secpolicy_setid_clear(vattr_t *, cred_t *);
int secpolicy_setid_setsticky_clear(struct inode *, vattr_t *, int secpolicy_setid_setsticky_clear(struct inode *, vattr_t *,
const vattr_t *, cred_t *, zuserns_t *, zuserns_t *); const vattr_t *, cred_t *, zidmap_t *, struct user_namespace *);
int secpolicy_xvattr(xvattr_t *, uid_t, cred_t *, mode_t); int secpolicy_xvattr(xvattr_t *, uid_t, cred_t *, mode_t);
int secpolicy_vnode_setattr(cred_t *, struct inode *, struct vattr *, int secpolicy_vnode_setattr(cred_t *, struct inode *, struct vattr *,
const struct vattr *, int, int (void *, int, cred_t *), void *); const struct vattr *, int, int (void *, int, cred_t *), void *);

View File

@ -152,6 +152,11 @@
* zilog_t *, ..., * zilog_t *, ...,
* itx_t *, ...); * itx_t *, ...);
*/ */
#if defined(__clang__)
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wordered-compare-function-pointers"
#endif
/* BEGIN CSTYLED */ /* BEGIN CSTYLED */
DECLARE_EVENT_CLASS(zfs_zil_process_itx_class, DECLARE_EVENT_CLASS(zfs_zil_process_itx_class,
TP_PROTO(zilog_t *zilog, itx_t *itx), TP_PROTO(zilog_t *zilog, itx_t *itx),
@ -169,6 +174,9 @@ DECLARE_EVENT_CLASS(zfs_zil_process_itx_class,
ZILOG_TP_PRINTK_ARGS, ITX_TP_PRINTK_ARGS) ZILOG_TP_PRINTK_ARGS, ITX_TP_PRINTK_ARGS)
); );
/* END CSTYLED */ /* END CSTYLED */
#if defined(__clang__)
#pragma clang diagnostic pop
#endif
#define DEFINE_ZIL_PROCESS_ITX_EVENT(name) \ #define DEFINE_ZIL_PROCESS_ITX_EVENT(name) \
DEFINE_EVENT(zfs_zil_process_itx_class, name, \ DEFINE_EVENT(zfs_zil_process_itx_class, name, \

View File

@ -46,25 +46,24 @@ extern int zfs_lookup(znode_t *dzp, char *nm, znode_t **zpp, int flags,
cred_t *cr, int *direntflags, pathname_t *realpnp); cred_t *cr, int *direntflags, pathname_t *realpnp);
extern int zfs_create(znode_t *dzp, char *name, vattr_t *vap, int excl, extern int zfs_create(znode_t *dzp, char *name, vattr_t *vap, int excl,
int mode, znode_t **zpp, cred_t *cr, int flag, vsecattr_t *vsecp, int mode, znode_t **zpp, cred_t *cr, int flag, vsecattr_t *vsecp,
zuserns_t *mnt_ns); zidmap_t *mnt_ns);
extern int zfs_tmpfile(struct inode *dip, vattr_t *vapzfs, int excl, extern int zfs_tmpfile(struct inode *dip, vattr_t *vapzfs, int excl,
int mode, struct inode **ipp, cred_t *cr, int flag, vsecattr_t *vsecp, int mode, struct inode **ipp, cred_t *cr, int flag, vsecattr_t *vsecp,
zuserns_t *mnt_ns); zidmap_t *mnt_ns);
extern int zfs_remove(znode_t *dzp, char *name, cred_t *cr, int flags); extern int zfs_remove(znode_t *dzp, char *name, cred_t *cr, int flags);
extern int zfs_mkdir(znode_t *dzp, char *dirname, vattr_t *vap, extern int zfs_mkdir(znode_t *dzp, char *dirname, vattr_t *vap,
znode_t **zpp, cred_t *cr, int flags, vsecattr_t *vsecp, zuserns_t *mnt_ns); znode_t **zpp, cred_t *cr, int flags, vsecattr_t *vsecp, zidmap_t *mnt_ns);
extern int zfs_rmdir(znode_t *dzp, char *name, znode_t *cwd, extern int zfs_rmdir(znode_t *dzp, char *name, znode_t *cwd,
cred_t *cr, int flags); cred_t *cr, int flags);
extern int zfs_readdir(struct inode *ip, zpl_dir_context_t *ctx, cred_t *cr); extern int zfs_readdir(struct inode *ip, zpl_dir_context_t *ctx, cred_t *cr);
extern int zfs_getattr_fast(struct user_namespace *, struct inode *ip, extern int zfs_getattr_fast(zidmap_t *, struct inode *ip, struct kstat *sp);
struct kstat *sp);
extern int zfs_setattr(znode_t *zp, vattr_t *vap, int flag, cred_t *cr, extern int zfs_setattr(znode_t *zp, vattr_t *vap, int flag, cred_t *cr,
zuserns_t *mnt_ns); zidmap_t *mnt_ns);
extern int zfs_rename(znode_t *sdzp, char *snm, znode_t *tdzp, extern int zfs_rename(znode_t *sdzp, char *snm, znode_t *tdzp,
char *tnm, cred_t *cr, int flags, uint64_t rflags, vattr_t *wo_vap, char *tnm, cred_t *cr, int flags, uint64_t rflags, vattr_t *wo_vap,
zuserns_t *mnt_ns); zidmap_t *mnt_ns);
extern int zfs_symlink(znode_t *dzp, char *name, vattr_t *vap, extern int zfs_symlink(znode_t *dzp, char *name, vattr_t *vap,
char *link, znode_t **zpp, cred_t *cr, int flags, zuserns_t *mnt_ns); char *link, znode_t **zpp, cred_t *cr, int flags, zidmap_t *mnt_ns);
extern int zfs_readlink(struct inode *ip, zfs_uio_t *uio, cred_t *cr); extern int zfs_readlink(struct inode *ip, zfs_uio_t *uio, cred_t *cr);
extern int zfs_link(znode_t *tdzp, znode_t *szp, extern int zfs_link(znode_t *tdzp, znode_t *szp,
char *name, cred_t *cr, int flags); char *name, cred_t *cr, int flags);

View File

@ -39,7 +39,7 @@
/* zpl_inode.c */ /* zpl_inode.c */
extern void zpl_vap_init(vattr_t *vap, struct inode *dir, extern void zpl_vap_init(vattr_t *vap, struct inode *dir,
umode_t mode, cred_t *cr, zuserns_t *mnt_ns); umode_t mode, cred_t *cr, zidmap_t *mnt_ns);
extern const struct inode_operations zpl_inode_operations; extern const struct inode_operations zpl_inode_operations;
#ifdef HAVE_RENAME2_OPERATIONS_WRAPPER #ifdef HAVE_RENAME2_OPERATIONS_WRAPPER
@ -68,7 +68,10 @@ extern int zpl_xattr_security_init(struct inode *ip, struct inode *dip,
const struct qstr *qstr); const struct qstr *qstr);
#if defined(CONFIG_FS_POSIX_ACL) #if defined(CONFIG_FS_POSIX_ACL)
#if defined(HAVE_SET_ACL) #if defined(HAVE_SET_ACL)
#if defined(HAVE_SET_ACL_USERNS) #if defined(HAVE_SET_ACL_IDMAP_DENTRY)
extern int zpl_set_acl(struct mnt_idmap *idmap, struct dentry *dentry,
struct posix_acl *acl, int type);
#elif defined(HAVE_SET_ACL_USERNS)
extern int zpl_set_acl(struct user_namespace *userns, struct inode *ip, extern int zpl_set_acl(struct user_namespace *userns, struct inode *ip,
struct posix_acl *acl, int type); struct posix_acl *acl, int type);
#elif defined(HAVE_SET_ACL_USERNS_DENTRY_ARG2) #elif defined(HAVE_SET_ACL_USERNS_DENTRY_ARG2)
@ -189,13 +192,15 @@ zpl_dir_emit_dots(struct file *file, zpl_dir_context_t *ctx)
#if defined(HAVE_INODE_OWNER_OR_CAPABLE) #if defined(HAVE_INODE_OWNER_OR_CAPABLE)
#define zpl_inode_owner_or_capable(ns, ip) inode_owner_or_capable(ip) #define zpl_inode_owner_or_capable(ns, ip) inode_owner_or_capable(ip)
#elif defined(HAVE_INODE_OWNER_OR_CAPABLE_IDMAPPED) #elif defined(HAVE_INODE_OWNER_OR_CAPABLE_USERNS)
#define zpl_inode_owner_or_capable(ns, ip) inode_owner_or_capable(ns, ip) #define zpl_inode_owner_or_capable(ns, ip) inode_owner_or_capable(ns, ip)
#elif defined(HAVE_INODE_OWNER_OR_CAPABLE_IDMAP)
#define zpl_inode_owner_or_capable(idmap, ip) inode_owner_or_capable(idmap, ip)
#else #else
#error "Unsupported kernel" #error "Unsupported kernel"
#endif #endif
#ifdef HAVE_SETATTR_PREPARE_USERNS #if defined(HAVE_SETATTR_PREPARE_USERNS) || defined(HAVE_SETATTR_PREPARE_IDMAP)
#define zpl_setattr_prepare(ns, dentry, ia) setattr_prepare(ns, dentry, ia) #define zpl_setattr_prepare(ns, dentry, ia) setattr_prepare(ns, dentry, ia)
#else #else
/* /*

View File

@ -72,6 +72,10 @@ struct dmu_tx;
*/ */
#define OBJSET_CRYPT_PORTABLE_FLAGS_MASK (0) #define OBJSET_CRYPT_PORTABLE_FLAGS_MASK (0)
#if defined(__clang__)
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wgnu-variable-sized-type-not-at-end"
#endif
typedef struct objset_phys { typedef struct objset_phys {
dnode_phys_t os_meta_dnode; dnode_phys_t os_meta_dnode;
zil_header_t os_zil_header; zil_header_t os_zil_header;
@ -88,6 +92,9 @@ typedef struct objset_phys {
char os_pad1[OBJSET_PHYS_SIZE_V3 - OBJSET_PHYS_SIZE_V2 - char os_pad1[OBJSET_PHYS_SIZE_V3 - OBJSET_PHYS_SIZE_V2 -
sizeof (dnode_phys_t)]; sizeof (dnode_phys_t)];
} objset_phys_t; } objset_phys_t;
#if defined(__clang__)
#pragma clang diagnostic pop
#endif
typedef int (*dmu_objset_upgrade_cb_t)(objset_t *); typedef int (*dmu_objset_upgrade_cb_t)(objset_t *);

View File

@ -120,7 +120,11 @@ extern "C" {
#define DN_MAX_LEVELS (DIV_ROUND_UP(DN_MAX_OFFSET_SHIFT - SPA_MINBLOCKSHIFT, \ #define DN_MAX_LEVELS (DIV_ROUND_UP(DN_MAX_OFFSET_SHIFT - SPA_MINBLOCKSHIFT, \
DN_MIN_INDBLKSHIFT - SPA_BLKPTRSHIFT) + 1) DN_MIN_INDBLKSHIFT - SPA_BLKPTRSHIFT) + 1)
#define DN_BONUS(dnp) ((void*)((dnp)->dn_bonus + \ /*
* Use the flexible array instead of the fixed length one dn_bonus
* to address memcpy/memmove fortify error
*/
#define DN_BONUS(dnp) ((void*)((dnp)->dn_bonus_flexible + \
(((dnp)->dn_nblkptr - 1) * sizeof (blkptr_t)))) (((dnp)->dn_nblkptr - 1) * sizeof (blkptr_t))))
#define DN_MAX_BONUS_LEN(dnp) \ #define DN_MAX_BONUS_LEN(dnp) \
((dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) ? \ ((dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) ? \
@ -266,6 +270,10 @@ typedef struct dnode_phys {
sizeof (blkptr_t)]; sizeof (blkptr_t)];
blkptr_t dn_spill; blkptr_t dn_spill;
}; };
struct {
blkptr_t __dn_ignore4;
uint8_t dn_bonus_flexible[];
};
}; };
} dnode_phys_t; } dnode_phys_t;

View File

@ -816,6 +816,7 @@ typedef struct zpool_load_policy {
#define ZPOOL_CONFIG_FEATURES_FOR_READ "features_for_read" #define ZPOOL_CONFIG_FEATURES_FOR_READ "features_for_read"
#define ZPOOL_CONFIG_FEATURE_STATS "feature_stats" /* not stored on disk */ #define ZPOOL_CONFIG_FEATURE_STATS "feature_stats" /* not stored on disk */
#define ZPOOL_CONFIG_ERRATA "errata" /* not stored on disk */ #define ZPOOL_CONFIG_ERRATA "errata" /* not stored on disk */
#define ZPOOL_CONFIG_VDEV_ROOT_ZAP "com.klarasystems:vdev_zap_root"
#define ZPOOL_CONFIG_VDEV_TOP_ZAP "com.delphix:vdev_zap_top" #define ZPOOL_CONFIG_VDEV_TOP_ZAP "com.delphix:vdev_zap_top"
#define ZPOOL_CONFIG_VDEV_LEAF_ZAP "com.delphix:vdev_zap_leaf" #define ZPOOL_CONFIG_VDEV_LEAF_ZAP "com.delphix:vdev_zap_leaf"
#define ZPOOL_CONFIG_HAS_PER_VDEV_ZAPS "com.delphix:has_per_vdev_zaps" #define ZPOOL_CONFIG_HAS_PER_VDEV_ZAPS "com.delphix:has_per_vdev_zaps"

View File

@ -787,6 +787,7 @@ extern int bpobj_enqueue_free_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx);
#define SPA_ASYNC_L2CACHE_REBUILD 0x800 #define SPA_ASYNC_L2CACHE_REBUILD 0x800
#define SPA_ASYNC_L2CACHE_TRIM 0x1000 #define SPA_ASYNC_L2CACHE_TRIM 0x1000
#define SPA_ASYNC_REBUILD_DONE 0x2000 #define SPA_ASYNC_REBUILD_DONE 0x2000
#define SPA_ASYNC_DETACH_SPARE 0x4000
/* device manipulation */ /* device manipulation */
extern int spa_vdev_add(spa_t *spa, nvlist_t *nvroot); extern int spa_vdev_add(spa_t *spa, nvlist_t *nvroot);
@ -976,6 +977,8 @@ extern int spa_import_progress_set_state(uint64_t pool_guid,
extern int spa_config_tryenter(spa_t *spa, int locks, const void *tag, extern int spa_config_tryenter(spa_t *spa, int locks, const void *tag,
krw_t rw); krw_t rw);
extern void spa_config_enter(spa_t *spa, int locks, const void *tag, krw_t rw); extern void spa_config_enter(spa_t *spa, int locks, const void *tag, krw_t rw);
extern void spa_config_enter_mmp(spa_t *spa, int locks, const void *tag,
krw_t rw);
extern void spa_config_exit(spa_t *spa, int locks, const void *tag); extern void spa_config_exit(spa_t *spa, int locks, const void *tag);
extern int spa_config_held(spa_t *spa, int locks, krw_t rw); extern int spa_config_held(spa_t *spa, int locks, krw_t rw);

View File

@ -277,6 +277,7 @@ struct vdev {
kthread_t *vdev_open_thread; /* thread opening children */ kthread_t *vdev_open_thread; /* thread opening children */
kthread_t *vdev_validate_thread; /* thread validating children */ kthread_t *vdev_validate_thread; /* thread validating children */
uint64_t vdev_crtxg; /* txg when top-level was added */ uint64_t vdev_crtxg; /* txg when top-level was added */
uint64_t vdev_root_zap;
/* /*
* Top-level vdev state. * Top-level vdev state.

View File

@ -206,7 +206,7 @@ struct zfsvfs;
#ifdef _KERNEL #ifdef _KERNEL
int zfs_acl_ids_create(struct znode *, int, vattr_t *, int zfs_acl_ids_create(struct znode *, int, vattr_t *,
cred_t *, vsecattr_t *, zfs_acl_ids_t *, zuserns_t *); cred_t *, vsecattr_t *, zfs_acl_ids_t *, zidmap_t *);
void zfs_acl_ids_free(zfs_acl_ids_t *); void zfs_acl_ids_free(zfs_acl_ids_t *);
boolean_t zfs_acl_ids_overquota(struct zfsvfs *, zfs_acl_ids_t *, uint64_t); boolean_t zfs_acl_ids_overquota(struct zfsvfs *, zfs_acl_ids_t *, uint64_t);
int zfs_getacl(struct znode *, vsecattr_t *, boolean_t, cred_t *); int zfs_getacl(struct znode *, vsecattr_t *, boolean_t, cred_t *);
@ -216,15 +216,15 @@ void zfs_oldace_byteswap(ace_t *, int);
void zfs_ace_byteswap(void *, size_t, boolean_t); void zfs_ace_byteswap(void *, size_t, boolean_t);
extern boolean_t zfs_has_access(struct znode *zp, cred_t *cr); extern boolean_t zfs_has_access(struct znode *zp, cred_t *cr);
extern int zfs_zaccess(struct znode *, int, int, boolean_t, cred_t *, extern int zfs_zaccess(struct znode *, int, int, boolean_t, cred_t *,
zuserns_t *); zidmap_t *);
int zfs_fastaccesschk_execute(struct znode *, cred_t *); int zfs_fastaccesschk_execute(struct znode *, cred_t *);
extern int zfs_zaccess_rwx(struct znode *, mode_t, int, cred_t *, zuserns_t *); extern int zfs_zaccess_rwx(struct znode *, mode_t, int, cred_t *, zidmap_t *);
extern int zfs_zaccess_unix(void *, int, cred_t *); extern int zfs_zaccess_unix(void *, int, cred_t *);
extern int zfs_acl_access(struct znode *, int, cred_t *); extern int zfs_acl_access(struct znode *, int, cred_t *);
int zfs_acl_chmod_setattr(struct znode *, zfs_acl_t **, uint64_t); int zfs_acl_chmod_setattr(struct znode *, zfs_acl_t **, uint64_t);
int zfs_zaccess_delete(struct znode *, struct znode *, cred_t *, zuserns_t *); int zfs_zaccess_delete(struct znode *, struct znode *, cred_t *, zidmap_t *);
int zfs_zaccess_rename(struct znode *, struct znode *, int zfs_zaccess_rename(struct znode *, struct znode *,
struct znode *, struct znode *, cred_t *cr, zuserns_t *mnt_ns); struct znode *, struct znode *, cred_t *cr, zidmap_t *mnt_ns);
void zfs_acl_free(zfs_acl_t *); void zfs_acl_free(zfs_acl_t *);
int zfs_vsec_2_aclp(struct zfsvfs *, umode_t, vsecattr_t *, cred_t *, int zfs_vsec_2_aclp(struct zfsvfs *, umode_t, vsecattr_t *, cred_t *,
struct zfs_fuid_info **, zfs_acl_t **); struct zfs_fuid_info **, zfs_acl_t **);

View File

@ -79,6 +79,7 @@ typedef enum spa_feature {
SPA_FEATURE_HEAD_ERRLOG, SPA_FEATURE_HEAD_ERRLOG,
SPA_FEATURE_BLAKE3, SPA_FEATURE_BLAKE3,
SPA_FEATURE_BLOCK_CLONING, SPA_FEATURE_BLOCK_CLONING,
SPA_FEATURE_AVZ_V2,
SPA_FEATURES SPA_FEATURES
} spa_feature_t; } spa_feature_t;

View File

@ -246,10 +246,26 @@ extern "C" {
#define _SUNOS_VTOC_16 #define _SUNOS_VTOC_16
/*
* LoongArch arch specific defines
* only LoongArch64 is supported yet
*/
#elif defined(__loongarch__) && defined(__loongarch_lp64)
#if !defined(_LP64)
#define _LP64
#endif
#define _ZFS_LITTLE_ENDIAN
#define _SUNOS_VTOC_16
/* not all LoongArch cores support unaligned accesses in hardware */
#define _ALIGNMENT_REQUIRED 1
#else #else
/* /*
* Currently supported: * Currently supported:
* x86_64, x32, i386, arm, powerpc, s390, sparc, mips, and RV64G * x86_64, x32, i386, arm, powerpc, s390, sparc, mips, RV64G, and LoongArch64
*/ */
#error "Unsupported ISA type" #error "Unsupported ISA type"
#endif #endif

View File

@ -260,6 +260,7 @@
<elf-symbol name='tpool_suspended' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/> <elf-symbol name='tpool_suspended' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='tpool_wait' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/> <elf-symbol name='tpool_wait' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='update_vdev_config_dev_strs' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/> <elf-symbol name='update_vdev_config_dev_strs' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='use_color' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='vdev_expand_proplist' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/> <elf-symbol name='vdev_expand_proplist' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='vdev_name_to_prop' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/> <elf-symbol name='vdev_name_to_prop' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='vdev_prop_align_right' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/> <elf-symbol name='vdev_prop_align_right' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
@ -338,14 +339,21 @@
<elf-symbol name='zfs_is_shared' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/> <elf-symbol name='zfs_is_shared' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_isnumber' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/> <elf-symbol name='zfs_isnumber' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_iter_bookmarks' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/> <elf-symbol name='zfs_iter_bookmarks' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_iter_bookmarks_v2' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_iter_children' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/> <elf-symbol name='zfs_iter_children' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_iter_children_v2' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_iter_dependents' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/> <elf-symbol name='zfs_iter_dependents' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_iter_dependents_v2' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_iter_filesystems' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/> <elf-symbol name='zfs_iter_filesystems' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_iter_filesystems_v2' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_iter_mounted' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/> <elf-symbol name='zfs_iter_mounted' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_iter_root' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/> <elf-symbol name='zfs_iter_root' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_iter_snapshots' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/> <elf-symbol name='zfs_iter_snapshots' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_iter_snapshots_sorted' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/> <elf-symbol name='zfs_iter_snapshots_sorted' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_iter_snapshots_sorted_v2' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_iter_snapshots_v2' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_iter_snapspec' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/> <elf-symbol name='zfs_iter_snapspec' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_iter_snapspec_v2' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_mod_supported' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/> <elf-symbol name='zfs_mod_supported' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_mount' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/> <elf-symbol name='zfs_mount' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_mount_at' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/> <elf-symbol name='zfs_mount_at' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
@ -588,7 +596,7 @@
<elf-symbol name='fletcher_4_superscalar_ops' size='128' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/> <elf-symbol name='fletcher_4_superscalar_ops' size='128' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='libzfs_config_ops' size='16' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/> <elf-symbol name='libzfs_config_ops' size='16' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='sa_protocol_names' size='16' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/> <elf-symbol name='sa_protocol_names' size='16' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='spa_feature_table' size='2128' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/> <elf-symbol name='spa_feature_table' size='2184' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfeature_checks_disable' size='4' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/> <elf-symbol name='zfeature_checks_disable' size='4' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_deleg_perm_tab' size='512' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/> <elf-symbol name='zfs_deleg_perm_tab' size='512' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_history_event_names' size='328' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/> <elf-symbol name='zfs_history_event_names' size='328' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
@ -2260,14 +2268,14 @@
<parameter type-id='58603c44'/> <parameter type-id='58603c44'/>
<return type-id='9c313c2d'/> <return type-id='9c313c2d'/>
</function-decl> </function-decl>
<function-decl name='zfs_iter_children' mangled-name='zfs_iter_children' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_iter_children'> <function-decl name='zfs_iter_children_v2' mangled-name='zfs_iter_children_v2' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_iter_children_v2'>
<parameter type-id='9200a744'/> <parameter type-id='9200a744'/>
<parameter type-id='95e97e5e'/> <parameter type-id='95e97e5e'/>
<parameter type-id='d8e49ab9'/> <parameter type-id='d8e49ab9'/>
<parameter type-id='eaa32e2f'/> <parameter type-id='eaa32e2f'/>
<return type-id='95e97e5e'/> <return type-id='95e97e5e'/>
</function-decl> </function-decl>
<function-decl name='zfs_iter_dependents' mangled-name='zfs_iter_dependents' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_iter_dependents'> <function-decl name='zfs_iter_dependents_v2' mangled-name='zfs_iter_dependents_v2' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_iter_dependents_v2'>
<parameter type-id='9200a744'/> <parameter type-id='9200a744'/>
<parameter type-id='95e97e5e'/> <parameter type-id='95e97e5e'/>
<parameter type-id='c19b74c3'/> <parameter type-id='c19b74c3'/>
@ -3304,7 +3312,7 @@
<parameter type-id='58603c44'/> <parameter type-id='58603c44'/>
<return type-id='80f4b756'/> <return type-id='80f4b756'/>
</function-decl> </function-decl>
<function-decl name='zfs_iter_filesystems' mangled-name='zfs_iter_filesystems' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_iter_filesystems'> <function-decl name='zfs_iter_filesystems_v2' mangled-name='zfs_iter_filesystems_v2' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_iter_filesystems_v2'>
<parameter type-id='9200a744'/> <parameter type-id='9200a744'/>
<parameter type-id='95e97e5e'/> <parameter type-id='95e97e5e'/>
<parameter type-id='d8e49ab9'/> <parameter type-id='d8e49ab9'/>
@ -3881,7 +3889,7 @@
<parameter type-id='b59d7dce'/> <parameter type-id='b59d7dce'/>
<return type-id='95e97e5e'/> <return type-id='95e97e5e'/>
</function-decl> </function-decl>
<function-decl name='zfs_iter_snapshots' mangled-name='zfs_iter_snapshots' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_iter_snapshots'> <function-decl name='zfs_iter_snapshots_v2' mangled-name='zfs_iter_snapshots_v2' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_iter_snapshots_v2'>
<parameter type-id='9200a744'/> <parameter type-id='9200a744'/>
<parameter type-id='95e97e5e'/> <parameter type-id='95e97e5e'/>
<parameter type-id='d8e49ab9'/> <parameter type-id='d8e49ab9'/>
@ -3890,7 +3898,7 @@
<parameter type-id='9c313c2d'/> <parameter type-id='9c313c2d'/>
<return type-id='95e97e5e'/> <return type-id='95e97e5e'/>
</function-decl> </function-decl>
<function-decl name='zfs_iter_bookmarks' mangled-name='zfs_iter_bookmarks' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_iter_bookmarks'> <function-decl name='zfs_iter_bookmarks_v2' mangled-name='zfs_iter_bookmarks_v2' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_iter_bookmarks_v2'>
<parameter type-id='9200a744'/> <parameter type-id='9200a744'/>
<parameter type-id='95e97e5e'/> <parameter type-id='95e97e5e'/>
<parameter type-id='d8e49ab9'/> <parameter type-id='d8e49ab9'/>
@ -5090,7 +5098,36 @@
<parameter type-id='5ce45b60'/> <parameter type-id='5ce45b60'/>
<return type-id='9200a744'/> <return type-id='9200a744'/>
</function-decl> </function-decl>
<function-decl name='zfs_iter_filesystems' mangled-name='zfs_iter_filesystems' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_iter_filesystems'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='d8e49ab9' name='func'/>
<parameter type-id='eaa32e2f' name='data'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_iter_snapshots' mangled-name='zfs_iter_snapshots' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_iter_snapshots'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='c19b74c3' name='simple'/>
<parameter type-id='d8e49ab9' name='func'/>
<parameter type-id='eaa32e2f' name='data'/>
<parameter type-id='9c313c2d' name='min_txg'/>
<parameter type-id='9c313c2d' name='max_txg'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_iter_bookmarks' mangled-name='zfs_iter_bookmarks' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_iter_bookmarks'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='d8e49ab9' name='func'/>
<parameter type-id='eaa32e2f' name='data'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_iter_snapshots_sorted' mangled-name='zfs_iter_snapshots_sorted' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_iter_snapshots_sorted'> <function-decl name='zfs_iter_snapshots_sorted' mangled-name='zfs_iter_snapshots_sorted' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_iter_snapshots_sorted'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='d8e49ab9' name='callback'/>
<parameter type-id='eaa32e2f' name='data'/>
<parameter type-id='9c313c2d' name='min_txg'/>
<parameter type-id='9c313c2d' name='max_txg'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_iter_snapshots_sorted_v2' mangled-name='zfs_iter_snapshots_sorted_v2' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_iter_snapshots_sorted_v2'>
<parameter type-id='9200a744' name='zhp'/> <parameter type-id='9200a744' name='zhp'/>
<parameter type-id='95e97e5e' name='flags'/> <parameter type-id='95e97e5e' name='flags'/>
<parameter type-id='d8e49ab9' name='callback'/> <parameter type-id='d8e49ab9' name='callback'/>
@ -5100,6 +5137,13 @@
<return type-id='95e97e5e'/> <return type-id='95e97e5e'/>
</function-decl> </function-decl>
<function-decl name='zfs_iter_snapspec' mangled-name='zfs_iter_snapspec' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_iter_snapspec'> <function-decl name='zfs_iter_snapspec' mangled-name='zfs_iter_snapspec' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_iter_snapspec'>
<parameter type-id='9200a744' name='fs_zhp'/>
<parameter type-id='80f4b756' name='spec_orig'/>
<parameter type-id='d8e49ab9' name='func'/>
<parameter type-id='eaa32e2f' name='arg'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_iter_snapspec_v2' mangled-name='zfs_iter_snapspec_v2' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_iter_snapspec_v2'>
<parameter type-id='9200a744' name='fs_zhp'/> <parameter type-id='9200a744' name='fs_zhp'/>
<parameter type-id='95e97e5e' name='flags'/> <parameter type-id='95e97e5e' name='flags'/>
<parameter type-id='80f4b756' name='spec_orig'/> <parameter type-id='80f4b756' name='spec_orig'/>
@ -5107,6 +5151,19 @@
<parameter type-id='eaa32e2f' name='arg'/> <parameter type-id='eaa32e2f' name='arg'/>
<return type-id='95e97e5e'/> <return type-id='95e97e5e'/>
</function-decl> </function-decl>
<function-decl name='zfs_iter_children' mangled-name='zfs_iter_children' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_iter_children'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='d8e49ab9' name='func'/>
<parameter type-id='eaa32e2f' name='data'/>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='zfs_iter_dependents' mangled-name='zfs_iter_dependents' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_iter_dependents'>
<parameter type-id='9200a744' name='zhp'/>
<parameter type-id='c19b74c3' name='allowrecursion'/>
<parameter type-id='d8e49ab9' name='func'/>
<parameter type-id='eaa32e2f' name='data'/>
<return type-id='95e97e5e'/>
</function-decl>
</abi-instr> </abi-instr>
<abi-instr address-size='64' path='lib/libzfs/libzfs_mount.c' language='LANG_C99'> <abi-instr address-size='64' path='lib/libzfs/libzfs_mount.c' language='LANG_C99'>
<array-type-def dimensions='1' type-id='6028cbfe' size-in-bits='256' id='b39b9aa7'> <array-type-def dimensions='1' type-id='6028cbfe' size-in-bits='256' id='b39b9aa7'>
@ -5749,7 +5806,8 @@
<enumerator name='SPA_FEATURE_HEAD_ERRLOG' value='35'/> <enumerator name='SPA_FEATURE_HEAD_ERRLOG' value='35'/>
<enumerator name='SPA_FEATURE_BLAKE3' value='36'/> <enumerator name='SPA_FEATURE_BLAKE3' value='36'/>
<enumerator name='SPA_FEATURE_BLOCK_CLONING' value='37'/> <enumerator name='SPA_FEATURE_BLOCK_CLONING' value='37'/>
<enumerator name='SPA_FEATURES' value='38'/> <enumerator name='SPA_FEATURE_AVZ_V2' value='38'/>
<enumerator name='SPA_FEATURES' value='39'/>
</enum-decl> </enum-decl>
<typedef-decl name='spa_feature_t' type-id='33ecb627' id='d6618c78'/> <typedef-decl name='spa_feature_t' type-id='33ecb627' id='d6618c78'/>
<qualified-type-def type-id='22cce67b' const='yes' id='d2816df0'/> <qualified-type-def type-id='22cce67b' const='yes' id='d2816df0'/>
@ -7800,6 +7858,9 @@
<function-decl name='zfs_version_print' mangled-name='zfs_version_print' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_version_print'> <function-decl name='zfs_version_print' mangled-name='zfs_version_print' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_version_print'>
<return type-id='95e97e5e'/> <return type-id='95e97e5e'/>
</function-decl> </function-decl>
<function-decl name='use_color' mangled-name='use_color' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='use_color'>
<return type-id='95e97e5e'/>
</function-decl>
<function-decl name='printf_color' mangled-name='printf_color' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='printf_color'> <function-decl name='printf_color' mangled-name='printf_color' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='printf_color'>
<parameter type-id='80f4b756' name='color'/> <parameter type-id='80f4b756' name='color'/>
<parameter type-id='80f4b756' name='format'/> <parameter type-id='80f4b756' name='format'/>
@ -8643,8 +8704,8 @@
</function-decl> </function-decl>
</abi-instr> </abi-instr>
<abi-instr address-size='64' path='module/zcommon/zfeature_common.c' language='LANG_C99'> <abi-instr address-size='64' path='module/zcommon/zfeature_common.c' language='LANG_C99'>
<array-type-def dimensions='1' type-id='83f29ca2' size-in-bits='17024' id='9944fffc'> <array-type-def dimensions='1' type-id='83f29ca2' size-in-bits='17472' id='dd432c71'>
<subrange length='38' type-id='7359adad' id='aa4ccdac'/> <subrange length='39' type-id='7359adad' id='ae4a9561'/>
</array-type-def> </array-type-def>
<enum-decl name='zfeature_flags' id='6db816a4'> <enum-decl name='zfeature_flags' id='6db816a4'>
<underlying-type type-id='9cac1fee'/> <underlying-type type-id='9cac1fee'/>
@ -8721,7 +8782,7 @@
<pointer-type-def type-id='611586a1' size-in-bits='64' id='2e243169'/> <pointer-type-def type-id='611586a1' size-in-bits='64' id='2e243169'/>
<qualified-type-def type-id='eaa32e2f' const='yes' id='83be723c'/> <qualified-type-def type-id='eaa32e2f' const='yes' id='83be723c'/>
<pointer-type-def type-id='83be723c' size-in-bits='64' id='7acd98a2'/> <pointer-type-def type-id='83be723c' size-in-bits='64' id='7acd98a2'/>
<var-decl name='spa_feature_table' type-id='9944fffc' mangled-name='spa_feature_table' visibility='default' elf-symbol-id='spa_feature_table'/> <var-decl name='spa_feature_table' type-id='dd432c71' mangled-name='spa_feature_table' visibility='default' elf-symbol-id='spa_feature_table'/>
<var-decl name='zfeature_checks_disable' type-id='c19b74c3' mangled-name='zfeature_checks_disable' visibility='default' elf-symbol-id='zfeature_checks_disable'/> <var-decl name='zfeature_checks_disable' type-id='c19b74c3' mangled-name='zfeature_checks_disable' visibility='default' elf-symbol-id='zfeature_checks_disable'/>
<function-decl name='opendir' visibility='default' binding='global' size-in-bits='64'> <function-decl name='opendir' visibility='default' binding='global' size-in-bits='64'>
<parameter type-id='80f4b756'/> <parameter type-id='80f4b756'/>

View File

@ -552,7 +552,7 @@ change_one(zfs_handle_t *zhp, void *data)
} }
if (!clp->cl_alldependents) if (!clp->cl_alldependents)
ret = zfs_iter_children(zhp, 0, change_one, data); ret = zfs_iter_children_v2(zhp, 0, change_one, data);
/* /*
* If we added the handle to the changelist, we will re-use it * If we added the handle to the changelist, we will re-use it
@ -721,11 +721,12 @@ changelist_gather(zfs_handle_t *zhp, zfs_prop_t prop, int gather_flags,
return (NULL); return (NULL);
} }
} else if (clp->cl_alldependents) { } else if (clp->cl_alldependents) {
if (zfs_iter_dependents(zhp, 0, B_TRUE, change_one, clp) != 0) { if (zfs_iter_dependents_v2(zhp, 0, B_TRUE, change_one,
clp) != 0) {
changelist_free(clp); changelist_free(clp);
return (NULL); return (NULL);
} }
} else if (zfs_iter_children(zhp, 0, change_one, clp) != 0) { } else if (zfs_iter_children_v2(zhp, 0, change_one, clp) != 0) {
changelist_free(clp); changelist_free(clp);
return (NULL); return (NULL);
} }

View File

@ -1226,7 +1226,7 @@ load_keys_cb(zfs_handle_t *zhp, void *arg)
cb->cb_numfailed++; cb->cb_numfailed++;
out: out:
(void) zfs_iter_filesystems(zhp, 0, load_keys_cb, cb); (void) zfs_iter_filesystems_v2(zhp, 0, load_keys_cb, cb);
zfs_close(zhp); zfs_close(zhp);
/* always return 0, since this function is best effort */ /* always return 0, since this function is best effort */

View File

@ -757,7 +757,7 @@ zfs_open(libzfs_handle_t *hdl, const char *path, int types)
* Iterate bookmarks to find the right one. * Iterate bookmarks to find the right one.
*/ */
errno = 0; errno = 0;
if ((zfs_iter_bookmarks(pzhp, 0, zfs_open_bookmarks_cb, if ((zfs_iter_bookmarks_v2(pzhp, 0, zfs_open_bookmarks_cb,
&cb_data) == 0) && (cb_data.zhp == NULL)) { &cb_data) == 0) && (cb_data.zhp == NULL)) {
(void) zfs_error(hdl, EZFS_NOENT, errbuf); (void) zfs_error(hdl, EZFS_NOENT, errbuf);
zfs_close(pzhp); zfs_close(pzhp);
@ -2476,7 +2476,7 @@ get_clones_cb(zfs_handle_t *zhp, void *arg)
} }
out: out:
(void) zfs_iter_children(zhp, 0, get_clones_cb, gca); (void) zfs_iter_children_v2(zhp, 0, get_clones_cb, gca);
zfs_close(zhp); zfs_close(zhp);
return (0); return (0);
} }
@ -3925,7 +3925,7 @@ zfs_check_snap_cb(zfs_handle_t *zhp, void *arg)
if (lzc_exists(name)) if (lzc_exists(name))
fnvlist_add_boolean(dd->nvl, name); fnvlist_add_boolean(dd->nvl, name);
rv = zfs_iter_filesystems(zhp, 0, zfs_check_snap_cb, dd); rv = zfs_iter_filesystems_v2(zhp, 0, zfs_check_snap_cb, dd);
zfs_close(zhp); zfs_close(zhp);
return (rv); return (rv);
} }
@ -4163,7 +4163,7 @@ zfs_snapshot_cb(zfs_handle_t *zhp, void *arg)
fnvlist_add_boolean(sd->sd_nvl, name); fnvlist_add_boolean(sd->sd_nvl, name);
rv = zfs_iter_filesystems(zhp, 0, zfs_snapshot_cb, sd); rv = zfs_iter_filesystems_v2(zhp, 0, zfs_snapshot_cb, sd);
} }
zfs_close(zhp); zfs_close(zhp);
@ -4340,7 +4340,7 @@ rollback_destroy(zfs_handle_t *zhp, void *data)
rollback_data_t *cbp = data; rollback_data_t *cbp = data;
if (zfs_prop_get_int(zhp, ZFS_PROP_CREATETXG) > cbp->cb_create) { if (zfs_prop_get_int(zhp, ZFS_PROP_CREATETXG) > cbp->cb_create) {
cbp->cb_error |= zfs_iter_dependents(zhp, 0, B_FALSE, cbp->cb_error |= zfs_iter_dependents_v2(zhp, 0, B_FALSE,
rollback_destroy_dependent, cbp); rollback_destroy_dependent, cbp);
cbp->cb_error |= zfs_destroy(zhp, B_FALSE); cbp->cb_error |= zfs_destroy(zhp, B_FALSE);
@ -4380,10 +4380,10 @@ zfs_rollback(zfs_handle_t *zhp, zfs_handle_t *snap, boolean_t force)
if (cb.cb_create > 0) if (cb.cb_create > 0)
min_txg = cb.cb_create; min_txg = cb.cb_create;
(void) zfs_iter_snapshots(zhp, 0, rollback_destroy, &cb, (void) zfs_iter_snapshots_v2(zhp, 0, rollback_destroy, &cb,
min_txg, 0); min_txg, 0);
(void) zfs_iter_bookmarks(zhp, 0, rollback_destroy, &cb); (void) zfs_iter_bookmarks_v2(zhp, 0, rollback_destroy, &cb);
if (cb.cb_error) if (cb.cb_error)
return (-1); return (-1);
@ -4964,7 +4964,7 @@ zfs_hold_one(zfs_handle_t *zhp, void *arg)
fnvlist_add_string(ha->nvl, name, ha->tag); fnvlist_add_string(ha->nvl, name, ha->tag);
if (ha->recursive) if (ha->recursive)
rv = zfs_iter_filesystems(zhp, 0, zfs_hold_one, ha); rv = zfs_iter_filesystems_v2(zhp, 0, zfs_hold_one, ha);
zfs_close(zhp); zfs_close(zhp);
return (rv); return (rv);
} }
@ -5095,7 +5095,7 @@ zfs_release_one(zfs_handle_t *zhp, void *arg)
} }
if (ha->recursive) if (ha->recursive)
rv = zfs_iter_filesystems(zhp, 0, zfs_release_one, ha); rv = zfs_iter_filesystems_v2(zhp, 0, zfs_release_one, ha);
zfs_close(zhp); zfs_close(zhp);
return (rv); return (rv);
} }

View File

@ -103,7 +103,14 @@ zfs_do_list_ioctl(zfs_handle_t *zhp, int arg, zfs_cmd_t *zc)
* Iterate over all child filesystems * Iterate over all child filesystems
*/ */
int int
zfs_iter_filesystems(zfs_handle_t *zhp, int flags, zfs_iter_f func, void *data) zfs_iter_filesystems(zfs_handle_t *zhp, zfs_iter_f func, void *data)
{
return (zfs_iter_filesystems_v2(zhp, 0, func, data));
}
int
zfs_iter_filesystems_v2(zfs_handle_t *zhp, int flags, zfs_iter_f func,
void *data)
{ {
zfs_cmd_t zc = {"\0"}; zfs_cmd_t zc = {"\0"};
zfs_handle_t *nzhp; zfs_handle_t *nzhp;
@ -143,7 +150,15 @@ zfs_iter_filesystems(zfs_handle_t *zhp, int flags, zfs_iter_f func, void *data)
* Iterate over all snapshots * Iterate over all snapshots
*/ */
int int
zfs_iter_snapshots(zfs_handle_t *zhp, int flags, zfs_iter_f func, zfs_iter_snapshots(zfs_handle_t *zhp, boolean_t simple, zfs_iter_f func,
void *data, uint64_t min_txg, uint64_t max_txg)
{
return (zfs_iter_snapshots_v2(zhp, simple ? ZFS_ITER_SIMPLE : 0, func,
data, min_txg, max_txg));
}
int
zfs_iter_snapshots_v2(zfs_handle_t *zhp, int flags, zfs_iter_f func,
void *data, uint64_t min_txg, uint64_t max_txg) void *data, uint64_t min_txg, uint64_t max_txg)
{ {
zfs_cmd_t zc = {"\0"}; zfs_cmd_t zc = {"\0"};
@ -197,7 +212,13 @@ zfs_iter_snapshots(zfs_handle_t *zhp, int flags, zfs_iter_f func,
* Iterate over all bookmarks * Iterate over all bookmarks
*/ */
int int
zfs_iter_bookmarks(zfs_handle_t *zhp, int flags __maybe_unused, zfs_iter_bookmarks(zfs_handle_t *zhp, zfs_iter_f func, void *data)
{
return (zfs_iter_bookmarks_v2(zhp, 0, func, data));
}
int
zfs_iter_bookmarks_v2(zfs_handle_t *zhp, int flags __maybe_unused,
zfs_iter_f func, void *data) zfs_iter_f func, void *data)
{ {
zfs_handle_t *nzhp; zfs_handle_t *nzhp;
@ -305,7 +326,15 @@ zfs_snapshot_compare(const void *larg, const void *rarg)
} }
int int
zfs_iter_snapshots_sorted(zfs_handle_t *zhp, int flags, zfs_iter_f callback, zfs_iter_snapshots_sorted(zfs_handle_t *zhp, zfs_iter_f callback,
void *data, uint64_t min_txg, uint64_t max_txg)
{
return (zfs_iter_snapshots_sorted_v2(zhp, 0, callback, data,
min_txg, max_txg));
}
int
zfs_iter_snapshots_sorted_v2(zfs_handle_t *zhp, int flags, zfs_iter_f callback,
void *data, uint64_t min_txg, uint64_t max_txg) void *data, uint64_t min_txg, uint64_t max_txg)
{ {
int ret = 0; int ret = 0;
@ -316,7 +345,7 @@ zfs_iter_snapshots_sorted(zfs_handle_t *zhp, int flags, zfs_iter_f callback,
avl_create(&avl, zfs_snapshot_compare, avl_create(&avl, zfs_snapshot_compare,
sizeof (zfs_node_t), offsetof(zfs_node_t, zn_avlnode)); sizeof (zfs_node_t), offsetof(zfs_node_t, zn_avlnode));
ret = zfs_iter_snapshots(zhp, flags, zfs_sort_snaps, &avl, min_txg, ret = zfs_iter_snapshots_v2(zhp, flags, zfs_sort_snaps, &avl, min_txg,
max_txg); max_txg);
for (node = avl_first(&avl); node != NULL; node = AVL_NEXT(&avl, node)) for (node = avl_first(&avl); node != NULL; node = AVL_NEXT(&avl, node))
@ -379,7 +408,14 @@ snapspec_cb(zfs_handle_t *zhp, void *arg)
* return ENOENT at the end. * return ENOENT at the end.
*/ */
int int
zfs_iter_snapspec(zfs_handle_t *fs_zhp, int flags, const char *spec_orig, zfs_iter_snapspec(zfs_handle_t *fs_zhp, const char *spec_orig,
zfs_iter_f func, void *arg)
{
return (zfs_iter_snapspec_v2(fs_zhp, 0, spec_orig, func, arg));
}
int
zfs_iter_snapspec_v2(zfs_handle_t *fs_zhp, int flags, const char *spec_orig,
zfs_iter_f func, void *arg) zfs_iter_f func, void *arg)
{ {
char *buf, *comma_separated, *cp; char *buf, *comma_separated, *cp;
@ -419,7 +455,7 @@ zfs_iter_snapspec(zfs_handle_t *fs_zhp, int flags, const char *spec_orig,
} }
} }
err = zfs_iter_snapshots_sorted(fs_zhp, flags, err = zfs_iter_snapshots_sorted_v2(fs_zhp, flags,
snapspec_cb, &ssa, 0, 0); snapspec_cb, &ssa, 0, 0);
if (ret == 0) if (ret == 0)
ret = err; ret = err;
@ -456,14 +492,20 @@ zfs_iter_snapspec(zfs_handle_t *fs_zhp, int flags, const char *spec_orig,
* and as close as possible. * and as close as possible.
*/ */
int int
zfs_iter_children(zfs_handle_t *zhp, int flags, zfs_iter_f func, void *data) zfs_iter_children(zfs_handle_t *zhp, zfs_iter_f func, void *data)
{
return (zfs_iter_children_v2(zhp, 0, func, data));
}
int
zfs_iter_children_v2(zfs_handle_t *zhp, int flags, zfs_iter_f func, void *data)
{ {
int ret; int ret;
if ((ret = zfs_iter_snapshots(zhp, flags, func, data, 0, 0)) != 0) if ((ret = zfs_iter_snapshots_v2(zhp, flags, func, data, 0, 0)) != 0)
return (ret); return (ret);
return (zfs_iter_filesystems(zhp, flags, func, data)); return (zfs_iter_filesystems_v2(zhp, flags, func, data));
} }
@ -524,10 +566,10 @@ iter_dependents_cb(zfs_handle_t *zhp, void *arg)
isf.zhp = zhp; isf.zhp = zhp;
isf.next = ida->stack; isf.next = ida->stack;
ida->stack = &isf; ida->stack = &isf;
err = zfs_iter_filesystems(zhp, ida->flags, err = zfs_iter_filesystems_v2(zhp, ida->flags,
iter_dependents_cb, ida); iter_dependents_cb, ida);
if (err == 0) if (err == 0)
err = zfs_iter_snapshots(zhp, ida->flags, err = zfs_iter_snapshots_v2(zhp, ida->flags,
iter_dependents_cb, ida, 0, 0); iter_dependents_cb, ida, 0, 0);
ida->stack = isf.next; ida->stack = isf.next;
} }
@ -541,7 +583,14 @@ iter_dependents_cb(zfs_handle_t *zhp, void *arg)
} }
int int
zfs_iter_dependents(zfs_handle_t *zhp, int flags, boolean_t allowrecursion, zfs_iter_dependents(zfs_handle_t *zhp, boolean_t allowrecursion,
zfs_iter_f func, void *data)
{
return (zfs_iter_dependents_v2(zhp, 0, allowrecursion, func, data));
}
int
zfs_iter_dependents_v2(zfs_handle_t *zhp, int flags, boolean_t allowrecursion,
zfs_iter_f func, void *data) zfs_iter_f func, void *data)
{ {
iter_dependents_arg_t ida; iter_dependents_arg_t ida;

View File

@ -940,7 +940,7 @@ zfs_iter_cb(zfs_handle_t *zhp, void *data)
} }
libzfs_add_handle(cbp, zhp); libzfs_add_handle(cbp, zhp);
if (zfs_iter_filesystems(zhp, 0, zfs_iter_cb, cbp) != 0) { if (zfs_iter_filesystems_v2(zhp, 0, zfs_iter_cb, cbp) != 0) {
zfs_close(zhp); zfs_close(zhp);
return (-1); return (-1);
} }
@ -1289,7 +1289,7 @@ zpool_enable_datasets(zpool_handle_t *zhp, const char *mntopts, int flags)
* over all child filesystems. * over all child filesystems.
*/ */
libzfs_add_handle(&cb, zfsp); libzfs_add_handle(&cb, zfsp);
if (zfs_iter_filesystems(zfsp, 0, zfs_iter_cb, &cb) != 0) if (zfs_iter_filesystems_v2(zfsp, 0, zfs_iter_cb, &cb) != 0)
goto out; goto out;
/* /*

View File

@ -2954,6 +2954,7 @@ zpool_vdev_is_interior(const char *name)
strncmp(name, VDEV_TYPE_SPARE, strlen(VDEV_TYPE_SPARE)) == 0 || strncmp(name, VDEV_TYPE_SPARE, strlen(VDEV_TYPE_SPARE)) == 0 ||
strncmp(name, strncmp(name,
VDEV_TYPE_REPLACING, strlen(VDEV_TYPE_REPLACING)) == 0 || VDEV_TYPE_REPLACING, strlen(VDEV_TYPE_REPLACING)) == 0 ||
strncmp(name, VDEV_TYPE_ROOT, strlen(VDEV_TYPE_ROOT)) == 0 ||
strncmp(name, VDEV_TYPE_MIRROR, strlen(VDEV_TYPE_MIRROR)) == 0) strncmp(name, VDEV_TYPE_MIRROR, strlen(VDEV_TYPE_MIRROR)) == 0)
return (B_TRUE); return (B_TRUE);

View File

@ -288,7 +288,7 @@ send_iterate_prop(zfs_handle_t *zhp, boolean_t received_only, nvlist_t *nv);
/* /*
* Collect guid, valid props, optionally holds, etc. of a snapshot. * Collect guid, valid props, optionally holds, etc. of a snapshot.
* This interface is intended for use as a zfs_iter_snapshots_sorted visitor. * This interface is intended for use as a zfs_iter_snapshots_v2_sorted visitor.
*/ */
static int static int
send_iterate_snap(zfs_handle_t *zhp, void *arg) send_iterate_snap(zfs_handle_t *zhp, void *arg)
@ -619,8 +619,8 @@ send_iterate_fs(zfs_handle_t *zhp, void *arg)
min_txg = fromsnap_txg; min_txg = fromsnap_txg;
if (!sd->replicate && tosnap_txg != 0) if (!sd->replicate && tosnap_txg != 0)
max_txg = tosnap_txg; max_txg = tosnap_txg;
(void) zfs_iter_snapshots_sorted(zhp, 0, send_iterate_snap, sd, (void) zfs_iter_snapshots_sorted_v2(zhp, 0, send_iterate_snap,
min_txg, max_txg); sd, min_txg, max_txg);
} else { } else {
char snapname[MAXPATHLEN] = { 0 }; char snapname[MAXPATHLEN] = { 0 };
zfs_handle_t *snap; zfs_handle_t *snap;
@ -662,7 +662,7 @@ send_iterate_fs(zfs_handle_t *zhp, void *arg)
/* Iterate over children. */ /* Iterate over children. */
if (sd->recursive) if (sd->recursive)
rv = zfs_iter_filesystems(zhp, 0, send_iterate_fs, sd); rv = zfs_iter_filesystems_v2(zhp, 0, send_iterate_fs, sd);
out: out:
/* Restore saved fields. */ /* Restore saved fields. */
@ -1083,7 +1083,7 @@ send_print_verbose(FILE *fout, const char *tosnap, const char *fromsnap,
/* /*
* Send a single filesystem snapshot, updating the send dump data. * Send a single filesystem snapshot, updating the send dump data.
* This interface is intended for use as a zfs_iter_snapshots_sorted visitor. * This interface is intended for use as a zfs_iter_snapshots_v2_sorted visitor.
*/ */
static int static int
dump_snapshot(zfs_handle_t *zhp, void *arg) dump_snapshot(zfs_handle_t *zhp, void *arg)
@ -1293,7 +1293,7 @@ dump_filesystem(zfs_handle_t *zhp, send_dump_data_t *sdd)
zhp->zfs_name, sdd->tosnap); zhp->zfs_name, sdd->tosnap);
} }
} }
rv = zfs_iter_snapshots_sorted(zhp, 0, dump_snapshot, sdd, rv = zfs_iter_snapshots_sorted_v2(zhp, 0, dump_snapshot, sdd,
min_txg, max_txg); min_txg, max_txg);
} else { } else {
char snapname[MAXPATHLEN] = { 0 }; char snapname[MAXPATHLEN] = { 0 };
@ -3162,9 +3162,9 @@ guid_to_name_cb(zfs_handle_t *zhp, void *arg)
return (EEXIST); return (EEXIST);
} }
err = zfs_iter_children(zhp, 0, guid_to_name_cb, gtnd); err = zfs_iter_children_v2(zhp, 0, guid_to_name_cb, gtnd);
if (err != EEXIST && gtnd->bookmark_ok) if (err != EEXIST && gtnd->bookmark_ok)
err = zfs_iter_bookmarks(zhp, 0, guid_to_name_cb, gtnd); err = zfs_iter_bookmarks_v2(zhp, 0, guid_to_name_cb, gtnd);
zfs_close(zhp); zfs_close(zhp);
return (err); return (err);
} }
@ -3218,9 +3218,10 @@ guid_to_name_redact_snaps(libzfs_handle_t *hdl, const char *parent,
continue; continue;
int err = guid_to_name_cb(zfs_handle_dup(zhp), &gtnd); int err = guid_to_name_cb(zfs_handle_dup(zhp), &gtnd);
if (err != EEXIST) if (err != EEXIST)
err = zfs_iter_children(zhp, 0, guid_to_name_cb, &gtnd); err = zfs_iter_children_v2(zhp, 0, guid_to_name_cb,
&gtnd);
if (err != EEXIST && bookmark_ok) if (err != EEXIST && bookmark_ok)
err = zfs_iter_bookmarks(zhp, 0, guid_to_name_cb, err = zfs_iter_bookmarks_v2(zhp, 0, guid_to_name_cb,
&gtnd); &gtnd);
zfs_close(zhp); zfs_close(zhp);
if (err == EEXIST) if (err == EEXIST)

View File

@ -1966,7 +1966,7 @@ zfs_version_print(void)
* Return 1 if the user requested ANSI color output, and our terminal supports * Return 1 if the user requested ANSI color output, and our terminal supports
* it. Return 0 for no color. * it. Return 0 for no color.
*/ */
static int int
use_color(void) use_color(void)
{ {
static int use_color = -1; static int use_color = -1;

View File

@ -1927,9 +1927,8 @@ for_each_vdev_cb(void *zhp, nvlist_t *nv, pool_vdev_iter_f func,
if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0) if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0)
return (ret); return (ret);
/* Don't run our function on root or indirect vdevs */ /* Don't run our function on indirect vdevs */
if ((strcmp(type, VDEV_TYPE_ROOT) != 0) && if (strcmp(type, VDEV_TYPE_INDIRECT) != 0) {
(strcmp(type, VDEV_TYPE_INDIRECT) != 0)) {
ret |= func(zhp, nv, data); ret |= func(zhp, nv, data);
} }

View File

@ -357,7 +357,10 @@ and the allocation can't actually be satisfied
When a vdev is added, target this number of metaslabs per top-level vdev. When a vdev is added, target this number of metaslabs per top-level vdev.
. .
.It Sy zfs_vdev_default_ms_shift Ns = Ns Sy 29 Po 512 MiB Pc Pq uint .It Sy zfs_vdev_default_ms_shift Ns = Ns Sy 29 Po 512 MiB Pc Pq uint
Default limit for metaslab size. Default lower limit for metaslab size.
.
.It Sy zfs_vdev_max_ms_shift Ns = Ns Sy 34 Po 16 GiB Pc Pq uint
Default upper limit for metaslab size.
. .
.It Sy zfs_vdev_max_auto_ashift Ns = Ns Sy 14 Pq uint .It Sy zfs_vdev_max_auto_ashift Ns = Ns Sy 14 Pq uint
Maximum ashift used when optimizing for logical \[->] physical sector size on Maximum ashift used when optimizing for logical \[->] physical sector size on
@ -1292,6 +1295,11 @@ as fuller devices will tend to be slower than empty devices.
Also see Also see
.Sy zio_dva_throttle_enabled . .Sy zio_dva_throttle_enabled .
. .
.It Sy zfs_vdev_def_queue_depth Ns = Ns Sy 32 Pq uint
Default queue depth for each vdev IO allocator.
Higher values allow for better coalescing of sequential writes before sending
them to the disk, but can increase transaction commit times.
.
.It Sy zfs_vdev_failfast_mask Ns = Ns Sy 1 Pq uint .It Sy zfs_vdev_failfast_mask Ns = Ns Sy 1 Pq uint
Defines if the driver should retire on a given error type. Defines if the driver should retire on a given error type.
The following options may be bitwise-ored together: The following options may be bitwise-ored together:

View File

@ -1,6 +1,6 @@
.\" SPDX-License-Identifier: 0BSD .\" SPDX-License-Identifier: 0BSD
.\" .\"
.Dd April 4, 2022 .Dd March 28, 2023
.Dt DRACUT.ZFS 7 .Dt DRACUT.ZFS 7
.Os .Os
. .
@ -28,13 +28,13 @@ zfs-import-scan.service \(da \(da | zfs-import-c
zfs-import.target \(-> dracut-pre-mount.service zfs-import.target \(-> dracut-pre-mount.service
| \(ua | | \(ua |
| dracut-zfs-generator | | dracut-zfs-generator |
| ____________________/| | _____________________/|
|/ \(da |/ \(da
| sysroot.mount \(<-\(em\(em\(em\(em\(em\(em\(em\(em\(em\(em\(em\(em\(em\(em dracut-zfs-generator | sysroot.mount \(<-\(em\(em\(em dracut-zfs-generator
| | \(da | | |
| \(da sysroot-{usr,etc,lib,&c.}.mount | | \(da
| initrd-root-fs.target \(<-\(em\(em\(em\(em\(em\(em\(em\(em\(em\(em\(em\(em or \(da | initrd-root-fs.target \(<-\(em zfs-nonroot-necessities.service
| | zfs-nonroot-necessities.service | | |
| \(da | | \(da |
\(da dracut-mount.service | \(da dracut-mount.service |
zfs-snapshot-bootfs.service | | zfs-snapshot-bootfs.service | |
@ -42,7 +42,7 @@ zfs-import-scan.service \(da \(da | zfs-import-c
\(da … | \(da … |
zfs-rollback-bootfs.service | | zfs-rollback-bootfs.service | |
| \(da | | \(da |
| sysroot-usr.mount \(<-\(em\(em\(em\(em\(em\(em\(em\(em\(em\(em\(em\(em\(em\(em\(em\(em\(em\(em\(em\(em\(em\(em\(em/ | /sysroot/{usr,etc,lib,&c.} \(<-\(em\(em\(em\(em\(em\(em\(em\(em\(em\(em\(em\(em\(em\(em\(em\(em\(em\(em\(em/
| | | |
| \(da | \(da
| initrd-fs.target | initrd-fs.target

View File

@ -38,7 +38,7 @@
.\" Copyright (c) 2019, Kjeld Schouten-Lebbing .\" Copyright (c) 2019, Kjeld Schouten-Lebbing
.\" Copyright (c) 2022 Hewlett Packard Enterprise Development LP. .\" Copyright (c) 2022 Hewlett Packard Enterprise Development LP.
.\" .\"
.Dd July 21, 2022 .Dd April 18, 2023
.Dt ZFSPROPS 7 .Dt ZFSPROPS 7
.Os .Os
. .
@ -80,7 +80,9 @@ for zettabyte
The following are all valid The following are all valid
.Pq and equal .Pq and equal
specifications: specifications:
.Li 1536M, 1.5g, 1.50GB . .Li 1536M ,
.Li 1.5g ,
.Li 1.50GB .
.Pp .Pp
The values of non-numeric properties are case sensitive and must be lowercase, The values of non-numeric properties are case sensitive and must be lowercase,
except for except for
@ -1254,10 +1256,12 @@ location.
Controls whether the file system should be mounted with Controls whether the file system should be mounted with
.Sy nbmand .Sy nbmand
.Pq Non-blocking mandatory locks . .Pq Non-blocking mandatory locks .
This is used for SMB clients.
Changes to this property only take effect when the file system is umounted and Changes to this property only take effect when the file system is umounted and
remounted. remounted.
Support for these locks is scarce and not described by POSIX. This was only supported by Linux prior to 5.15, and was buggy there,
and is not supported by
.Fx .
On Solaris it's used for SMB clients.
.It Sy overlay Ns = Ns Sy on Ns | Ns Sy off .It Sy overlay Ns = Ns Sy on Ns | Ns Sy off
Allow mounting on a busy directory or a directory which already contains Allow mounting on a busy directory or a directory which already contains
files or directories. files or directories.

View File

@ -858,6 +858,22 @@ by user and group.
\*[instant-never] \*[instant-never]
\*[remount-upgrade] \*[remount-upgrade]
. .
.feature com.klarasystems vdev_zaps_v2 no
This feature creates a ZAP object for the root vdev.
.Pp
This feature becomes active after the next
.Nm zpool Cm import
or
.Nm zpool reguid .
.
Properties can be retrieved or set on the root vdev using
.Nm zpool Cm get
and
.Nm zpool Cm set
with
.Sy root
as the vdev name which is an alias for
.Sy root-0 .
.feature org.openzfs zilsaxattr yes extensible_dataset .feature org.openzfs zilsaxattr yes extensible_dataset
This feature enables This feature enables
.Sy xattr Ns = Ns Sy sa .Sy xattr Ns = Ns Sy sa

View File

@ -26,7 +26,7 @@
.\" Copyright 2017 Nexenta Systems, Inc. .\" Copyright 2017 Nexenta Systems, Inc.
.\" Copyright (c) 2017 Open-E, Inc. All Rights Reserved. .\" Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
.\" .\"
.Dd June 2, 2021 .Dd April 7, 2023
.Dt ZPOOLCONCEPTS 7 .Dt ZPOOLCONCEPTS 7
.Os .Os
. .
@ -36,7 +36,7 @@
. .
.Sh DESCRIPTION .Sh DESCRIPTION
.Ss Virtual Devices (vdevs) .Ss Virtual Devices (vdevs)
A "virtual device" describes a single device or a collection of devices A "virtual device" describes a single device or a collection of devices,
organized according to certain performance and fault characteristics. organized according to certain performance and fault characteristics.
The following virtual devices are supported: The following virtual devices are supported:
.Bl -tag -width "special" .Bl -tag -width "special"
@ -66,13 +66,14 @@ A mirror of two or more devices.
Data is replicated in an identical fashion across all components of a mirror. Data is replicated in an identical fashion across all components of a mirror.
A mirror with A mirror with
.Em N No disks of size Em X No can hold Em X No bytes and can withstand Em N-1 .Em N No disks of size Em X No can hold Em X No bytes and can withstand Em N-1
devices failing without losing data. devices failing, without losing data.
.It Sy raidz , raidz1 , raidz2 , raidz3 .It Sy raidz , raidz1 , raidz2 , raidz3
A variation on RAID-5 that allows for better distribution of parity and A distributed-parity layout, similar to RAID-5/6, with improved distribution of
eliminates the RAID-5 parity, and which does not suffer from the RAID-5/6
.Qq write hole .Qq write hole ,
.Pq in which data and parity become inconsistent after a power loss . .Pq in which data and parity become inconsistent after a power loss .
Data and parity is striped across all disks within a raidz group. Data and parity is striped across all disks within a raidz group, though not
necessarily in a consistent stripe width.
.Pp .Pp
A raidz group can have single, double, or triple parity, meaning that the A raidz group can have single, double, or triple parity, meaning that the
raidz group can sustain one, two, or three failures, respectively, without raidz group can sustain one, two, or three failures, respectively, without
@ -96,8 +97,8 @@ The minimum number of devices in a raidz group is one more than the number of
parity disks. parity disks.
The recommended number is between 3 and 9 to help increase performance. The recommended number is between 3 and 9 to help increase performance.
.It Sy draid , draid1 , draid2 , draid3 .It Sy draid , draid1 , draid2 , draid3
A variant of raidz that provides integrated distributed hot spares which A variant of raidz that provides integrated distributed hot spares, allowing
allows for faster resilvering while retaining the benefits of raidz. for faster resilvering, while retaining the benefits of raidz.
A dRAID vdev is constructed from multiple internal raidz groups, each with A dRAID vdev is constructed from multiple internal raidz groups, each with
.Em D No data devices and Em P No parity devices . .Em D No data devices and Em P No parity devices .
These groups are distributed over all of the children in order to fully These groups are distributed over all of the children in order to fully
@ -105,12 +106,12 @@ utilize the available disk performance.
.Pp .Pp
Unlike raidz, dRAID uses a fixed stripe width (padding as necessary with Unlike raidz, dRAID uses a fixed stripe width (padding as necessary with
zeros) to allow fully sequential resilvering. zeros) to allow fully sequential resilvering.
This fixed stripe width significantly effects both usable capacity and IOPS. This fixed stripe width significantly affects both usable capacity and IOPS.
For example, with the default For example, with the default
.Em D=8 No and Em 4 KiB No disk sectors the minimum allocation size is Em 32 KiB . .Em D=8 No and Em 4 KiB No disk sectors the minimum allocation size is Em 32 KiB .
If using compression, this relatively large allocation size can reduce the If using compression, this relatively large allocation size can reduce the
effective compression ratio. effective compression ratio.
When using ZFS volumes and dRAID, the default of the When using ZFS volumes (zvols) and dRAID, the default of the
.Sy volblocksize .Sy volblocksize
property is increased to account for the allocation size. property is increased to account for the allocation size.
If a dRAID pool will hold a significant amount of small blocks, it is If a dRAID pool will hold a significant amount of small blocks, it is
@ -118,7 +119,7 @@ recommended to also add a mirrored
.Sy special .Sy special
vdev to store those blocks. vdev to store those blocks.
.Pp .Pp
In regards to I/O, performance is similar to raidz since for any read all In regards to I/O, performance is similar to raidz since, for any read, all
.Em D No data disks must be accessed . .Em D No data disks must be accessed .
Delivered random IOPS can be reasonably approximated as Delivered random IOPS can be reasonably approximated as
.Sy floor((N-S)/(D+P))*single_drive_IOPS . .Sy floor((N-S)/(D+P))*single_drive_IOPS .
@ -178,7 +179,7 @@ For more information, see the
.Sx Intent Log .Sx Intent Log
section. section.
.It Sy dedup .It Sy dedup
A device dedicated solely for deduplication tables. A device solely dedicated for deduplication tables.
The redundancy of this device should match the redundancy of the other normal The redundancy of this device should match the redundancy of the other normal
devices in the pool. devices in the pool.
If more than one dedup device is specified, then If more than one dedup device is specified, then
@ -230,7 +231,7 @@ each a mirror of two disks:
ZFS supports a rich set of mechanisms for handling device failure and data ZFS supports a rich set of mechanisms for handling device failure and data
corruption. corruption.
All metadata and data is checksummed, and ZFS automatically repairs bad data All metadata and data is checksummed, and ZFS automatically repairs bad data
from a good copy when corruption is detected. from a good copy, when corruption is detected.
.Pp .Pp
In order to take advantage of these features, a pool must make use of some form In order to take advantage of these features, a pool must make use of some form
of redundancy, using either mirrored or raidz groups. of redundancy, using either mirrored or raidz groups.
@ -247,7 +248,7 @@ A faulted pool has corrupted metadata, or one or more faulted devices, and
insufficient replicas to continue functioning. insufficient replicas to continue functioning.
.Pp .Pp
The health of the top-level vdev, such as a mirror or raidz device, The health of the top-level vdev, such as a mirror or raidz device,
is potentially impacted by the state of its associated vdevs, is potentially impacted by the state of its associated vdevs
or component devices. or component devices.
A top-level vdev or component device is in one of the following states: A top-level vdev or component device is in one of the following states:
.Bl -tag -width "DEGRADED" .Bl -tag -width "DEGRADED"
@ -319,14 +320,15 @@ In this case, checksum errors are reported for all disks on which the block
is stored. is stored.
.Pp .Pp
If a device is removed and later re-attached to the system, If a device is removed and later re-attached to the system,
ZFS attempts online the device automatically. ZFS attempts to bring the device online automatically.
Device attachment detection is hardware-dependent Device attachment detection is hardware-dependent
and might not be supported on all platforms. and might not be supported on all platforms.
. .
.Ss Hot Spares .Ss Hot Spares
ZFS allows devices to be associated with pools as ZFS allows devices to be associated with pools as
.Qq hot spares . .Qq hot spares .
These devices are not actively used in the pool, but when an active device These devices are not actively used in the pool.
But, when an active device
fails, it is automatically replaced by a hot spare. fails, it is automatically replaced by a hot spare.
To create a pool with hot spares, specify a To create a pool with hot spares, specify a
.Sy spare .Sy spare
@ -343,10 +345,10 @@ Once a spare replacement is initiated, a new
.Sy spare .Sy spare
vdev is created within the configuration that will remain there until the vdev is created within the configuration that will remain there until the
original device is replaced. original device is replaced.
At this point, the hot spare becomes available again if another device fails. At this point, the hot spare becomes available again, if another device fails.
.Pp .Pp
If a pool has a shared spare that is currently being used, the pool can not be If a pool has a shared spare that is currently being used, the pool cannot be
exported since other pools may use this shared spare, which may lead to exported, since other pools may use this shared spare, which may lead to
potential data corruption. potential data corruption.
.Pp .Pp
Shared spares add some risk. Shared spares add some risk.
@ -390,7 +392,7 @@ See the
.Sx EXAMPLES .Sx EXAMPLES
section for an example of mirroring multiple log devices. section for an example of mirroring multiple log devices.
.Pp .Pp
Log devices can be added, replaced, attached, detached and removed. Log devices can be added, replaced, attached, detached, and removed.
In addition, log devices are imported and exported as part of the pool In addition, log devices are imported and exported as part of the pool
that contains them. that contains them.
Mirrored devices can be removed by specifying the top-level mirror vdev. Mirrored devices can be removed by specifying the top-level mirror vdev.
@ -423,8 +425,8 @@ This can be disabled by setting
.Sy l2arc_rebuild_enabled Ns = Ns Sy 0 . .Sy l2arc_rebuild_enabled Ns = Ns Sy 0 .
For cache devices smaller than For cache devices smaller than
.Em 1 GiB , .Em 1 GiB ,
we do not write the metadata structures ZFS does not write the metadata structures
required for rebuilding the L2ARC in order not to waste space. required for rebuilding the L2ARC, to conserve space.
This can be changed with This can be changed with
.Sy l2arc_rebuild_blocks_min_l2size . .Sy l2arc_rebuild_blocks_min_l2size .
The cache device header The cache device header
@ -435,21 +437,21 @@ Setting
will result in scanning the full-length ARC lists for cacheable content to be will result in scanning the full-length ARC lists for cacheable content to be
written in L2ARC (persistent ARC). written in L2ARC (persistent ARC).
If a cache device is added with If a cache device is added with
.Nm zpool Cm add .Nm zpool Cm add ,
its label and header will be overwritten and its contents are not going to be its label and header will be overwritten and its contents will not be
restored in L2ARC, even if the device was previously part of the pool. restored in L2ARC, even if the device was previously part of the pool.
If a cache device is onlined with If a cache device is onlined with
.Nm zpool Cm online .Nm zpool Cm online ,
its contents will be restored in L2ARC. its contents will be restored in L2ARC.
This is useful in case of memory pressure This is useful in case of memory pressure,
where the contents of the cache device are not fully restored in L2ARC. where the contents of the cache device are not fully restored in L2ARC.
The user can off- and online the cache device when there is less memory pressure The user can off- and online the cache device when there is less memory
in order to fully restore its contents to L2ARC. pressure, to fully restore its contents to L2ARC.
. .
.Ss Pool checkpoint .Ss Pool checkpoint
Before starting critical procedures that include destructive actions Before starting critical procedures that include destructive actions
.Pq like Nm zfs Cm destroy , .Pq like Nm zfs Cm destroy ,
an administrator can checkpoint the pool's state and in the case of a an administrator can checkpoint the pool's state and, in the case of a
mistake or failure, rewind the entire pool back to the checkpoint. mistake or failure, rewind the entire pool back to the checkpoint.
Otherwise, the checkpoint can be discarded when the procedure has completed Otherwise, the checkpoint can be discarded when the procedure has completed
successfully. successfully.
@ -485,7 +487,7 @@ current state of the pool won't be scanned during a scrub.
. .
.Ss Special Allocation Class .Ss Special Allocation Class
Allocations in the special class are dedicated to specific block types. Allocations in the special class are dedicated to specific block types.
By default this includes all metadata, the indirect blocks of user data, and By default, this includes all metadata, the indirect blocks of user data, and
any deduplication tables. any deduplication tables.
The class can also be provisioned to accept small file blocks. The class can also be provisioned to accept small file blocks.
.Pp .Pp

View File

@ -507,6 +507,16 @@ CFLAGS.zstd_lazy.c+= ${__ZFS_ZSTD_AARCH64_FLAGS}
CFLAGS.zstd_ldm.c+= ${__ZFS_ZSTD_AARCH64_FLAGS} CFLAGS.zstd_ldm.c+= ${__ZFS_ZSTD_AARCH64_FLAGS}
CFLAGS.zstd_opt.c+= ${__ZFS_ZSTD_AARCH64_FLAGS} CFLAGS.zstd_opt.c+= ${__ZFS_ZSTD_AARCH64_FLAGS}
sha256-armv8.o: sha256-armv8.S
${CC} -c ${CFLAGS:N-mgeneral-regs-only} ${WERROR} ${.IMPSRC} \
-o ${.TARGET}
${CTFCONVERT_CMD}
sha512-armv8.o: sha512-armv8.S
${CC} -c ${CFLAGS:N-mgeneral-regs-only} ${WERROR} ${.IMPSRC} \
-o ${.TARGET}
${CTFCONVERT_CMD}
b3_aarch64_sse2.o: b3_aarch64_sse2.S b3_aarch64_sse2.o: b3_aarch64_sse2.S
${CC} -c ${CFLAGS:N-mgeneral-regs-only} ${WERROR} ${.IMPSRC} \ ${CC} -c ${CFLAGS:N-mgeneral-regs-only} ${WERROR} ${.IMPSRC} \
-o ${.TARGET} -o ${.TARGET}

View File

@ -84,6 +84,8 @@ static intptr_t stack_remaining(void) {
#define JMP_BUF_CNT 18 #define JMP_BUF_CNT 18
#elif defined(__riscv) #elif defined(__riscv)
#define JMP_BUF_CNT 64 #define JMP_BUF_CNT 64
#elif defined(__loongarch_lp64)
#define JMP_BUF_CNT 64
#else #else
#define JMP_BUF_CNT 1 #define JMP_BUF_CNT 1
#endif #endif

View File

@ -16,4 +16,6 @@
#include "setjmp_s390x.S" #include "setjmp_s390x.S"
#elif defined(__riscv) #elif defined(__riscv)
#include "setjmp_rv64g.S" #include "setjmp_rv64g.S"
#elif defined(__loongarch_lp64)
#include "setjmp_loongarch64.S"
#endif #endif

View File

@ -0,0 +1,82 @@
/*-
* Copyright 2022 Han Gao <gaohan@uniontech.com>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#if __loongarch_lp64
#define ENTRY(symbol) \
.text; \
.globl symbol; \
.align 3; \
.type symbol, @function; \
symbol:
#define END(function) \
.size function, .- function;
ENTRY(setjmp)
st.d $ra, $a0, 0*8
st.d $sp, $a0, 1*8
st.d $r21, $a0, 2*8
st.d $fp, $a0, 3*8
st.d $s0, $a0, 4*8
st.d $s1, $a0, 5*8
st.d $s2, $a0, 6*8
st.d $s3, $a0, 7*8
st.d $s4, $a0, 8*8
st.d $s5, $a0, 9*8
st.d $s6, $a0, 10*8
st.d $s7, $a0, 11*8
st.d $s8, $a0, 12*8
li.w $a0, 0
jr $ra
END(setjmp)
ENTRY(longjmp)
ld.d $ra, $a0, 0*8
ld.d $sp, $a0, 1*8
ld.d $r21, $a0, 2*8
ld.d $fp, $a0, 3*8
ld.d $s0, $a0, 4*8
ld.d $s1, $a0, 5*8
ld.d $s2, $a0, 6*8
ld.d $s3, $a0, 7*8
ld.d $s4, $a0, 8*8
ld.d $s5, $a0, 9*8
ld.d $s6, $a0, 10*8
ld.d $s7, $a0, 11*8
ld.d $s8, $a0, 12*8
sltui $a0, $a1, 1
add.d $a0, $a0, $a1 // a0 = (a1 == 0) ? 1 : a1
jr $ra
END(longjmp)
#ifdef __ELF__
.section .note.GNU-stack,"",%progbits
#endif
#endif

View File

@ -887,14 +887,6 @@ SYSCTL_UINT(_vfs_zfs, OID_AUTO, top_maxinflight,
" (LEGACY)"); " (LEGACY)");
/* END CSTYLED */ /* END CSTYLED */
extern uint_t zfs_vdev_def_queue_depth;
/* BEGIN CSTYLED */
SYSCTL_UINT(_vfs_zfs_vdev, OID_AUTO, def_queue_depth,
CTLFLAG_RWTUN, &zfs_vdev_def_queue_depth, 0,
"Default queue depth for each allocator");
/* END CSTYLED */
/* zio.c */ /* zio.c */
/* BEGIN CSTYLED */ /* BEGIN CSTYLED */

View File

@ -1619,7 +1619,7 @@ zfs_acl_inherit(zfsvfs_t *zfsvfs, vtype_t vtype, zfs_acl_t *paclp,
*/ */
int int
zfs_acl_ids_create(znode_t *dzp, int flag, vattr_t *vap, cred_t *cr, zfs_acl_ids_create(znode_t *dzp, int flag, vattr_t *vap, cred_t *cr,
vsecattr_t *vsecp, zfs_acl_ids_t *acl_ids, zuserns_t *mnt_ns) vsecattr_t *vsecp, zfs_acl_ids_t *acl_ids, zidmap_t *mnt_ns)
{ {
int error; int error;
zfsvfs_t *zfsvfs = dzp->z_zfsvfs; zfsvfs_t *zfsvfs = dzp->z_zfsvfs;
@ -2341,7 +2341,7 @@ zfs_fastaccesschk_execute(znode_t *zdp, cred_t *cr)
*/ */
int int
zfs_zaccess(znode_t *zp, int mode, int flags, boolean_t skipaclchk, cred_t *cr, zfs_zaccess(znode_t *zp, int mode, int flags, boolean_t skipaclchk, cred_t *cr,
zuserns_t *mnt_ns) zidmap_t *mnt_ns)
{ {
uint32_t working_mode; uint32_t working_mode;
int error; int error;
@ -2471,7 +2471,7 @@ zfs_zaccess(znode_t *zp, int mode, int flags, boolean_t skipaclchk, cred_t *cr,
*/ */
int int
zfs_zaccess_rwx(znode_t *zp, mode_t mode, int flags, cred_t *cr, zfs_zaccess_rwx(znode_t *zp, mode_t mode, int flags, cred_t *cr,
zuserns_t *mnt_ns) zidmap_t *mnt_ns)
{ {
return (zfs_zaccess(zp, zfs_unix_to_v4(mode >> 6), flags, B_FALSE, cr, return (zfs_zaccess(zp, zfs_unix_to_v4(mode >> 6), flags, B_FALSE, cr,
mnt_ns)); mnt_ns));
@ -2541,7 +2541,7 @@ zfs_delete_final_check(znode_t *zp, znode_t *dzp,
* *
*/ */
int int
zfs_zaccess_delete(znode_t *dzp, znode_t *zp, cred_t *cr, zuserns_t *mnt_ns) zfs_zaccess_delete(znode_t *dzp, znode_t *zp, cred_t *cr, zidmap_t *mnt_ns)
{ {
uint32_t dzp_working_mode = 0; uint32_t dzp_working_mode = 0;
uint32_t zp_working_mode = 0; uint32_t zp_working_mode = 0;
@ -2628,7 +2628,7 @@ zfs_zaccess_delete(znode_t *dzp, znode_t *zp, cred_t *cr, zuserns_t *mnt_ns)
int int
zfs_zaccess_rename(znode_t *sdzp, znode_t *szp, znode_t *tdzp, zfs_zaccess_rename(znode_t *sdzp, znode_t *szp, znode_t *tdzp,
znode_t *tzp, cred_t *cr, zuserns_t *mnt_ns) znode_t *tzp, cred_t *cr, zidmap_t *mnt_ns)
{ {
int add_perm; int add_perm;
int error; int error;

View File

@ -85,7 +85,6 @@
#include <sys/zfs_vnops.h> #include <sys/zfs_vnops.h>
#include <sys/module.h> #include <sys/module.h>
#include <sys/sysent.h> #include <sys/sysent.h>
#include <sys/dmu_impl.h> #include <sys/dmu_impl.h>
#include <sys/brt.h> #include <sys/brt.h>
#include <sys/zfeature.h> #include <sys/zfeature.h>
@ -1054,7 +1053,7 @@ zfs_lookup(vnode_t *dvp, const char *nm, vnode_t **vpp,
*/ */
int int
zfs_create(znode_t *dzp, const char *name, vattr_t *vap, int excl, int mode, zfs_create(znode_t *dzp, const char *name, vattr_t *vap, int excl, int mode,
znode_t **zpp, cred_t *cr, int flag, vsecattr_t *vsecp, zuserns_t *mnt_ns) znode_t **zpp, cred_t *cr, int flag, vsecattr_t *vsecp, zidmap_t *mnt_ns)
{ {
(void) excl, (void) mode, (void) flag; (void) excl, (void) mode, (void) flag;
znode_t *zp; znode_t *zp;
@ -1406,7 +1405,7 @@ zfs_remove(znode_t *dzp, const char *name, cred_t *cr, int flags)
*/ */
int int
zfs_mkdir(znode_t *dzp, const char *dirname, vattr_t *vap, znode_t **zpp, zfs_mkdir(znode_t *dzp, const char *dirname, vattr_t *vap, znode_t **zpp,
cred_t *cr, int flags, vsecattr_t *vsecp, zuserns_t *mnt_ns) cred_t *cr, int flags, vsecattr_t *vsecp, zidmap_t *mnt_ns)
{ {
(void) flags, (void) vsecp; (void) flags, (void) vsecp;
znode_t *zp; znode_t *zp;
@ -2160,7 +2159,7 @@ zfs_getattr(vnode_t *vp, vattr_t *vap, int flags, cred_t *cr)
* vp - ctime updated, mtime updated if size changed. * vp - ctime updated, mtime updated if size changed.
*/ */
int int
zfs_setattr(znode_t *zp, vattr_t *vap, int flags, cred_t *cr, zuserns_t *mnt_ns) zfs_setattr(znode_t *zp, vattr_t *vap, int flags, cred_t *cr, zidmap_t *mnt_ns)
{ {
vnode_t *vp = ZTOV(zp); vnode_t *vp = ZTOV(zp);
zfsvfs_t *zfsvfs = zp->z_zfsvfs; zfsvfs_t *zfsvfs = zp->z_zfsvfs;
@ -3421,7 +3420,7 @@ zfs_do_rename_impl(vnode_t *sdvp, vnode_t **svpp, struct componentname *scnp,
int int
zfs_rename(znode_t *sdzp, const char *sname, znode_t *tdzp, const char *tname, zfs_rename(znode_t *sdzp, const char *sname, znode_t *tdzp, const char *tname,
cred_t *cr, int flags, uint64_t rflags, vattr_t *wo_vap, zuserns_t *mnt_ns) cred_t *cr, int flags, uint64_t rflags, vattr_t *wo_vap, zidmap_t *mnt_ns)
{ {
struct componentname scn, tcn; struct componentname scn, tcn;
vnode_t *sdvp, *tdvp; vnode_t *sdvp, *tdvp;
@ -3478,7 +3477,7 @@ zfs_rename(znode_t *sdzp, const char *sname, znode_t *tdzp, const char *tname,
*/ */
int int
zfs_symlink(znode_t *dzp, const char *name, vattr_t *vap, zfs_symlink(znode_t *dzp, const char *name, vattr_t *vap,
const char *link, znode_t **zpp, cred_t *cr, int flags, zuserns_t *mnt_ns) const char *link, znode_t **zpp, cred_t *cr, int flags, zidmap_t *mnt_ns)
{ {
(void) flags; (void) flags;
znode_t *zp; znode_t *zp;
@ -6269,8 +6268,12 @@ zfs_freebsd_copy_file_range(struct vop_copy_file_range_args *ap)
goto bad_write_fallback; goto bad_write_fallback;
} }
} else { } else {
#if __FreeBSD_version >= 1400086
vn_lock_pair(invp, false, LK_EXCLUSIVE, outvp, false, vn_lock_pair(invp, false, LK_EXCLUSIVE, outvp, false,
LK_EXCLUSIVE); LK_EXCLUSIVE);
#else
vn_lock_pair(invp, false, outvp, false);
#endif
if (VN_IS_DOOMED(invp) || VN_IS_DOOMED(outvp)) { if (VN_IS_DOOMED(invp) || VN_IS_DOOMED(outvp)) {
goto bad_locked_fallback; goto bad_locked_fallback;
} }

View File

@ -145,6 +145,18 @@ crgetgid(const cred_t *cr)
return (KGID_TO_SGID(cr->fsgid)); return (KGID_TO_SGID(cr->fsgid));
} }
/* Return the initial user ns or nop_mnt_idmap */
zidmap_t *
zfs_get_init_idmap(void)
{
#ifdef HAVE_IOPS_CREATE_IDMAP
return ((zidmap_t *)&nop_mnt_idmap);
#else
return ((zidmap_t *)&init_user_ns);
#endif
}
EXPORT_SYMBOL(zfs_get_init_idmap);
EXPORT_SYMBOL(crhold); EXPORT_SYMBOL(crhold);
EXPORT_SYMBOL(crfree); EXPORT_SYMBOL(crfree);
EXPORT_SYMBOL(crgetuid); EXPORT_SYMBOL(crgetuid);

View File

@ -124,7 +124,7 @@ secpolicy_vnode_any_access(const cred_t *cr, struct inode *ip, uid_t owner)
if (crgetuid(cr) == owner) if (crgetuid(cr) == owner)
return (0); return (0);
if (zpl_inode_owner_or_capable(kcred->user_ns, ip)) if (zpl_inode_owner_or_capable(zfs_init_idmap, ip))
return (0); return (0);
#if defined(CONFIG_USER_NS) #if defined(CONFIG_USER_NS)
@ -214,8 +214,8 @@ secpolicy_vnode_setid_retain(struct znode *zp __maybe_unused, const cred_t *cr,
* Determine that subject can set the file setgid flag. * Determine that subject can set the file setgid flag.
*/ */
int int
secpolicy_vnode_setids_setgids(const cred_t *cr, gid_t gid, zuserns_t *mnt_ns, secpolicy_vnode_setids_setgids(const cred_t *cr, gid_t gid, zidmap_t *mnt_ns,
zuserns_t *fs_ns) struct user_namespace *fs_ns)
{ {
gid = zfs_gid_to_vfsgid(mnt_ns, fs_ns, gid); gid = zfs_gid_to_vfsgid(mnt_ns, fs_ns, gid);
#if defined(CONFIG_USER_NS) #if defined(CONFIG_USER_NS)
@ -286,8 +286,8 @@ secpolicy_setid_clear(vattr_t *vap, cred_t *cr)
* Determine that subject can set the file setid flags. * Determine that subject can set the file setid flags.
*/ */
static int static int
secpolicy_vnode_setid_modify(const cred_t *cr, uid_t owner, zuserns_t *mnt_ns, secpolicy_vnode_setid_modify(const cred_t *cr, uid_t owner, zidmap_t *mnt_ns,
zuserns_t *fs_ns) struct user_namespace *fs_ns)
{ {
owner = zfs_uid_to_vfsuid(mnt_ns, fs_ns, owner); owner = zfs_uid_to_vfsuid(mnt_ns, fs_ns, owner);
@ -315,7 +315,8 @@ secpolicy_vnode_stky_modify(const cred_t *cr)
int int
secpolicy_setid_setsticky_clear(struct inode *ip, vattr_t *vap, secpolicy_setid_setsticky_clear(struct inode *ip, vattr_t *vap,
const vattr_t *ovap, cred_t *cr, zuserns_t *mnt_ns, zuserns_t *fs_ns) const vattr_t *ovap, cred_t *cr, zidmap_t *mnt_ns,
struct user_namespace *fs_ns)
{ {
int error; int error;

View File

@ -1802,7 +1802,7 @@ zfs_acl_inherit(zfsvfs_t *zfsvfs, umode_t va_mode, zfs_acl_t *paclp,
*/ */
int int
zfs_acl_ids_create(znode_t *dzp, int flag, vattr_t *vap, cred_t *cr, zfs_acl_ids_create(znode_t *dzp, int flag, vattr_t *vap, cred_t *cr,
vsecattr_t *vsecp, zfs_acl_ids_t *acl_ids, zuserns_t *mnt_ns) vsecattr_t *vsecp, zfs_acl_ids_t *acl_ids, zidmap_t *mnt_ns)
{ {
int error; int error;
zfsvfs_t *zfsvfs = ZTOZSB(dzp); zfsvfs_t *zfsvfs = ZTOZSB(dzp);
@ -1981,7 +1981,7 @@ zfs_getacl(znode_t *zp, vsecattr_t *vsecp, boolean_t skipaclchk, cred_t *cr)
return (SET_ERROR(ENOSYS)); return (SET_ERROR(ENOSYS));
if ((error = zfs_zaccess(zp, ACE_READ_ACL, 0, skipaclchk, cr, if ((error = zfs_zaccess(zp, ACE_READ_ACL, 0, skipaclchk, cr,
kcred->user_ns))) zfs_init_idmap)))
return (error); return (error);
mutex_enter(&zp->z_acl_lock); mutex_enter(&zp->z_acl_lock);
@ -2141,7 +2141,7 @@ zfs_setacl(znode_t *zp, vsecattr_t *vsecp, boolean_t skipaclchk, cred_t *cr)
return (SET_ERROR(EPERM)); return (SET_ERROR(EPERM));
if ((error = zfs_zaccess(zp, ACE_WRITE_ACL, 0, skipaclchk, cr, if ((error = zfs_zaccess(zp, ACE_WRITE_ACL, 0, skipaclchk, cr,
kcred->user_ns))) zfs_init_idmap)))
return (error); return (error);
error = zfs_vsec_2_aclp(zfsvfs, ZTOI(zp)->i_mode, vsecp, cr, &fuidp, error = zfs_vsec_2_aclp(zfsvfs, ZTOI(zp)->i_mode, vsecp, cr, &fuidp,
@ -2286,7 +2286,7 @@ zfs_zaccess_dataset_check(znode_t *zp, uint32_t v4_mode)
*/ */
static int static int
zfs_zaccess_aces_check(znode_t *zp, uint32_t *working_mode, zfs_zaccess_aces_check(znode_t *zp, uint32_t *working_mode,
boolean_t anyaccess, cred_t *cr, zuserns_t *mnt_ns) boolean_t anyaccess, cred_t *cr, zidmap_t *mnt_ns)
{ {
zfsvfs_t *zfsvfs = ZTOZSB(zp); zfsvfs_t *zfsvfs = ZTOZSB(zp);
zfs_acl_t *aclp; zfs_acl_t *aclp;
@ -2420,7 +2420,7 @@ zfs_has_access(znode_t *zp, cred_t *cr)
uint32_t have = ACE_ALL_PERMS; uint32_t have = ACE_ALL_PERMS;
if (zfs_zaccess_aces_check(zp, &have, B_TRUE, cr, if (zfs_zaccess_aces_check(zp, &have, B_TRUE, cr,
kcred->user_ns) != 0) { zfs_init_idmap) != 0) {
uid_t owner; uid_t owner;
owner = zfs_fuid_map_id(ZTOZSB(zp), owner = zfs_fuid_map_id(ZTOZSB(zp),
@ -2451,7 +2451,7 @@ zfs_has_access(znode_t *zp, cred_t *cr)
*/ */
static int static int
zfs_zaccess_trivial(znode_t *zp, uint32_t *working_mode, cred_t *cr, zfs_zaccess_trivial(znode_t *zp, uint32_t *working_mode, cred_t *cr,
zuserns_t *mnt_ns) zidmap_t *mnt_ns)
{ {
int err, mask; int err, mask;
int unmapped = 0; int unmapped = 0;
@ -2464,11 +2464,9 @@ zfs_zaccess_trivial(znode_t *zp, uint32_t *working_mode, cred_t *cr,
return (unmapped ? SET_ERROR(EPERM) : 0); return (unmapped ? SET_ERROR(EPERM) : 0);
} }
#if defined(HAVE_IOPS_PERMISSION_USERNS) #if (defined(HAVE_IOPS_PERMISSION_USERNS) || \
if (mnt_ns) defined(HAVE_IOPS_PERMISSION_IDMAP))
err = generic_permission(mnt_ns, ZTOI(zp), mask); err = generic_permission(mnt_ns, ZTOI(zp), mask);
else
err = generic_permission(cr->user_ns, ZTOI(zp), mask);
#else #else
err = generic_permission(ZTOI(zp), mask); err = generic_permission(ZTOI(zp), mask);
#endif #endif
@ -2483,7 +2481,7 @@ zfs_zaccess_trivial(znode_t *zp, uint32_t *working_mode, cred_t *cr,
static int static int
zfs_zaccess_common(znode_t *zp, uint32_t v4_mode, uint32_t *working_mode, zfs_zaccess_common(znode_t *zp, uint32_t v4_mode, uint32_t *working_mode,
boolean_t *check_privs, boolean_t skipaclchk, cred_t *cr, zuserns_t *mnt_ns) boolean_t *check_privs, boolean_t skipaclchk, cred_t *cr, zidmap_t *mnt_ns)
{ {
zfsvfs_t *zfsvfs = ZTOZSB(zp); zfsvfs_t *zfsvfs = ZTOZSB(zp);
int err; int err;
@ -2540,7 +2538,7 @@ zfs_zaccess_common(znode_t *zp, uint32_t v4_mode, uint32_t *working_mode,
static int static int
zfs_zaccess_append(znode_t *zp, uint32_t *working_mode, boolean_t *check_privs, zfs_zaccess_append(znode_t *zp, uint32_t *working_mode, boolean_t *check_privs,
cred_t *cr, zuserns_t *mnt_ns) cred_t *cr, zidmap_t *mnt_ns)
{ {
if (*working_mode != ACE_WRITE_DATA) if (*working_mode != ACE_WRITE_DATA)
return (SET_ERROR(EACCES)); return (SET_ERROR(EACCES));
@ -2612,7 +2610,7 @@ zfs_fastaccesschk_execute(znode_t *zdp, cred_t *cr)
if ((error = zfs_enter(ZTOZSB(zdp), FTAG)) != 0) if ((error = zfs_enter(ZTOZSB(zdp), FTAG)) != 0)
return (error); return (error);
error = zfs_zaccess(zdp, ACE_EXECUTE, 0, B_FALSE, cr, error = zfs_zaccess(zdp, ACE_EXECUTE, 0, B_FALSE, cr,
kcred->user_ns); zfs_init_idmap);
zfs_exit(ZTOZSB(zdp), FTAG); zfs_exit(ZTOZSB(zdp), FTAG);
return (error); return (error);
} }
@ -2625,7 +2623,7 @@ zfs_fastaccesschk_execute(znode_t *zdp, cred_t *cr)
*/ */
int int
zfs_zaccess(znode_t *zp, int mode, int flags, boolean_t skipaclchk, cred_t *cr, zfs_zaccess(znode_t *zp, int mode, int flags, boolean_t skipaclchk, cred_t *cr,
zuserns_t *mnt_ns) zidmap_t *mnt_ns)
{ {
uint32_t working_mode; uint32_t working_mode;
int error; int error;
@ -2774,7 +2772,7 @@ zfs_zaccess(znode_t *zp, int mode, int flags, boolean_t skipaclchk, cred_t *cr,
*/ */
int int
zfs_zaccess_rwx(znode_t *zp, mode_t mode, int flags, cred_t *cr, zfs_zaccess_rwx(znode_t *zp, mode_t mode, int flags, cred_t *cr,
zuserns_t *mnt_ns) zidmap_t *mnt_ns)
{ {
return (zfs_zaccess(zp, zfs_unix_to_v4(mode >> 6), flags, B_FALSE, cr, return (zfs_zaccess(zp, zfs_unix_to_v4(mode >> 6), flags, B_FALSE, cr,
mnt_ns)); mnt_ns));
@ -2788,7 +2786,7 @@ zfs_zaccess_unix(void *zp, int mode, cred_t *cr)
{ {
int v4_mode = zfs_unix_to_v4(mode >> 6); int v4_mode = zfs_unix_to_v4(mode >> 6);
return (zfs_zaccess(zp, v4_mode, 0, B_FALSE, cr, kcred->user_ns)); return (zfs_zaccess(zp, v4_mode, 0, B_FALSE, cr, zfs_init_idmap));
} }
/* See zfs_zaccess_delete() */ /* See zfs_zaccess_delete() */
@ -2865,7 +2863,7 @@ static const boolean_t zfs_write_implies_delete_child = B_TRUE;
* zfs_write_implies_delete_child * zfs_write_implies_delete_child
*/ */
int int
zfs_zaccess_delete(znode_t *dzp, znode_t *zp, cred_t *cr, zuserns_t *mnt_ns) zfs_zaccess_delete(znode_t *dzp, znode_t *zp, cred_t *cr, zidmap_t *mnt_ns)
{ {
uint32_t wanted_dirperms; uint32_t wanted_dirperms;
uint32_t dzp_working_mode = 0; uint32_t dzp_working_mode = 0;
@ -2996,7 +2994,7 @@ zfs_zaccess_delete(znode_t *dzp, znode_t *zp, cred_t *cr, zuserns_t *mnt_ns)
int int
zfs_zaccess_rename(znode_t *sdzp, znode_t *szp, znode_t *tdzp, zfs_zaccess_rename(znode_t *sdzp, znode_t *szp, znode_t *tdzp,
znode_t *tzp, cred_t *cr, zuserns_t *mnt_ns) znode_t *tzp, cred_t *cr, zidmap_t *mnt_ns)
{ {
int add_perm; int add_perm;
int error; int error;

View File

@ -1120,7 +1120,7 @@ zfs_make_xattrdir(znode_t *zp, vattr_t *vap, znode_t **xzpp, cred_t *cr)
*xzpp = NULL; *xzpp = NULL;
if ((error = zfs_acl_ids_create(zp, IS_XATTR, vap, cr, NULL, if ((error = zfs_acl_ids_create(zp, IS_XATTR, vap, cr, NULL,
&acl_ids, kcred->user_ns)) != 0) &acl_ids, zfs_init_idmap)) != 0)
return (error); return (error);
if (zfs_acl_ids_overquota(zfsvfs, &acl_ids, zp->z_projid)) { if (zfs_acl_ids_overquota(zfsvfs, &acl_ids, zp->z_projid)) {
zfs_acl_ids_free(&acl_ids); zfs_acl_ids_free(&acl_ids);
@ -1269,7 +1269,7 @@ zfs_sticky_remove_access(znode_t *zdp, znode_t *zp, cred_t *cr)
if ((uid = crgetuid(cr)) == downer || uid == fowner || if ((uid = crgetuid(cr)) == downer || uid == fowner ||
zfs_zaccess(zp, ACE_WRITE_DATA, 0, B_FALSE, cr, zfs_zaccess(zp, ACE_WRITE_DATA, 0, B_FALSE, cr,
kcred->user_ns) == 0) zfs_init_idmap) == 0)
return (0); return (0);
else else
return (secpolicy_vnode_remove(cr)); return (secpolicy_vnode_remove(cr));

View File

@ -282,6 +282,8 @@ zfsdev_detach(void)
#define ZFS_DEBUG_STR "" #define ZFS_DEBUG_STR ""
#endif #endif
zidmap_t *zfs_init_idmap;
static int static int
openzfs_init_os(void) openzfs_init_os(void)
{ {
@ -305,6 +307,8 @@ openzfs_init_os(void)
printk(KERN_NOTICE "ZFS: Posix ACLs disabled by kernel\n"); printk(KERN_NOTICE "ZFS: Posix ACLs disabled by kernel\n");
#endif /* CONFIG_FS_POSIX_ACL */ #endif /* CONFIG_FS_POSIX_ACL */
zfs_init_idmap = (zidmap_t *)zfs_get_init_idmap();
return (0); return (0);
} }

View File

@ -1194,7 +1194,7 @@ zfs_prune_aliases(zfsvfs_t *zfsvfs, unsigned long nr_to_scan)
int objects = 0; int objects = 0;
int i = 0, j = 0; int i = 0, j = 0;
zp_array = kmem_zalloc(max_array * sizeof (znode_t *), KM_SLEEP); zp_array = vmem_zalloc(max_array * sizeof (znode_t *), KM_SLEEP);
mutex_enter(&zfsvfs->z_znodes_lock); mutex_enter(&zfsvfs->z_znodes_lock);
while ((zp = list_head(&zfsvfs->z_all_znodes)) != NULL) { while ((zp = list_head(&zfsvfs->z_all_znodes)) != NULL) {
@ -1230,7 +1230,7 @@ zfs_prune_aliases(zfsvfs_t *zfsvfs, unsigned long nr_to_scan)
zrele(zp); zrele(zp);
} }
kmem_free(zp_array, max_array * sizeof (znode_t *)); vmem_free(zp_array, max_array * sizeof (znode_t *));
return (objects); return (objects);
} }

View File

@ -487,7 +487,7 @@ zfs_lookup(znode_t *zdp, char *nm, znode_t **zpp, int flags, cred_t *cr,
*/ */
if ((error = zfs_zaccess(*zpp, ACE_EXECUTE, 0, if ((error = zfs_zaccess(*zpp, ACE_EXECUTE, 0,
B_TRUE, cr, kcred->user_ns))) { B_TRUE, cr, zfs_init_idmap))) {
zrele(*zpp); zrele(*zpp);
*zpp = NULL; *zpp = NULL;
} }
@ -506,7 +506,7 @@ zfs_lookup(znode_t *zdp, char *nm, znode_t **zpp, int flags, cred_t *cr,
*/ */
if ((error = zfs_zaccess(zdp, ACE_EXECUTE, 0, B_FALSE, cr, if ((error = zfs_zaccess(zdp, ACE_EXECUTE, 0, B_FALSE, cr,
kcred->user_ns))) { zfs_init_idmap))) {
zfs_exit(zfsvfs, FTAG); zfs_exit(zfsvfs, FTAG);
return (error); return (error);
} }
@ -551,7 +551,7 @@ zfs_lookup(znode_t *zdp, char *nm, znode_t **zpp, int flags, cred_t *cr,
int int
zfs_create(znode_t *dzp, char *name, vattr_t *vap, int excl, zfs_create(znode_t *dzp, char *name, vattr_t *vap, int excl,
int mode, znode_t **zpp, cred_t *cr, int flag, vsecattr_t *vsecp, int mode, znode_t **zpp, cred_t *cr, int flag, vsecattr_t *vsecp,
zuserns_t *mnt_ns) zidmap_t *mnt_ns)
{ {
znode_t *zp; znode_t *zp;
zfsvfs_t *zfsvfs = ZTOZSB(dzp); zfsvfs_t *zfsvfs = ZTOZSB(dzp);
@ -799,7 +799,7 @@ zfs_create(znode_t *dzp, char *name, vattr_t *vap, int excl,
int int
zfs_tmpfile(struct inode *dip, vattr_t *vap, int excl, zfs_tmpfile(struct inode *dip, vattr_t *vap, int excl,
int mode, struct inode **ipp, cred_t *cr, int flag, vsecattr_t *vsecp, int mode, struct inode **ipp, cred_t *cr, int flag, vsecattr_t *vsecp,
zuserns_t *mnt_ns) zidmap_t *mnt_ns)
{ {
(void) excl, (void) mode, (void) flag; (void) excl, (void) mode, (void) flag;
znode_t *zp = NULL, *dzp = ITOZ(dip); znode_t *zp = NULL, *dzp = ITOZ(dip);
@ -984,7 +984,7 @@ zfs_remove(znode_t *dzp, char *name, cred_t *cr, int flags)
return (error); return (error);
} }
if ((error = zfs_zaccess_delete(dzp, zp, cr, kcred->user_ns))) { if ((error = zfs_zaccess_delete(dzp, zp, cr, zfs_init_idmap))) {
goto out; goto out;
} }
@ -1179,7 +1179,7 @@ zfs_remove(znode_t *dzp, char *name, cred_t *cr, int flags)
*/ */
int int
zfs_mkdir(znode_t *dzp, char *dirname, vattr_t *vap, znode_t **zpp, zfs_mkdir(znode_t *dzp, char *dirname, vattr_t *vap, znode_t **zpp,
cred_t *cr, int flags, vsecattr_t *vsecp, zuserns_t *mnt_ns) cred_t *cr, int flags, vsecattr_t *vsecp, zidmap_t *mnt_ns)
{ {
znode_t *zp; znode_t *zp;
zfsvfs_t *zfsvfs = ZTOZSB(dzp); zfsvfs_t *zfsvfs = ZTOZSB(dzp);
@ -1400,7 +1400,7 @@ zfs_rmdir(znode_t *dzp, char *name, znode_t *cwd, cred_t *cr,
return (error); return (error);
} }
if ((error = zfs_zaccess_delete(dzp, zp, cr, kcred->user_ns))) { if ((error = zfs_zaccess_delete(dzp, zp, cr, zfs_init_idmap))) {
goto out; goto out;
} }
@ -1652,8 +1652,7 @@ zfs_readdir(struct inode *ip, zpl_dir_context_t *ctx, cred_t *cr)
* RETURN: 0 (always succeeds) * RETURN: 0 (always succeeds)
*/ */
int int
zfs_getattr_fast(struct user_namespace *user_ns, struct inode *ip, zfs_getattr_fast(zidmap_t *user_ns, struct inode *ip, struct kstat *sp)
struct kstat *sp)
{ {
znode_t *zp = ITOZ(ip); znode_t *zp = ITOZ(ip);
zfsvfs_t *zfsvfs = ITOZSB(ip); zfsvfs_t *zfsvfs = ITOZSB(ip);
@ -1841,7 +1840,7 @@ zfs_setattr_dir(znode_t *dzp)
* ip - ctime updated, mtime updated if size changed. * ip - ctime updated, mtime updated if size changed.
*/ */
int int
zfs_setattr(znode_t *zp, vattr_t *vap, int flags, cred_t *cr, zuserns_t *mnt_ns) zfs_setattr(znode_t *zp, vattr_t *vap, int flags, cred_t *cr, zidmap_t *mnt_ns)
{ {
struct inode *ip; struct inode *ip;
zfsvfs_t *zfsvfs = ZTOZSB(zp); zfsvfs_t *zfsvfs = ZTOZSB(zp);
@ -2038,10 +2037,10 @@ zfs_setattr(znode_t *zp, vattr_t *vap, int flags, cred_t *cr, zuserns_t *mnt_ns)
* Take ownership or chgrp to group we are a member of * Take ownership or chgrp to group we are a member of
*/ */
uid = zfs_uid_to_vfsuid((struct user_namespace *)mnt_ns, uid = zfs_uid_to_vfsuid(mnt_ns, zfs_i_user_ns(ip),
zfs_i_user_ns(ip), vap->va_uid); vap->va_uid);
gid = zfs_gid_to_vfsgid((struct user_namespace *)mnt_ns, gid = zfs_gid_to_vfsgid(mnt_ns, zfs_i_user_ns(ip),
zfs_i_user_ns(ip), vap->va_gid); vap->va_gid);
take_owner = (mask & ATTR_UID) && (uid == crgetuid(cr)); take_owner = (mask & ATTR_UID) && (uid == crgetuid(cr));
take_group = (mask & ATTR_GID) && take_group = (mask & ATTR_GID) &&
zfs_groupmember(zfsvfs, gid, cr); zfs_groupmember(zfsvfs, gid, cr);
@ -2680,7 +2679,7 @@ zfs_rename_lock(znode_t *szp, znode_t *tdzp, znode_t *sdzp, zfs_zlock_t **zlpp)
*/ */
int int
zfs_rename(znode_t *sdzp, char *snm, znode_t *tdzp, char *tnm, zfs_rename(znode_t *sdzp, char *snm, znode_t *tdzp, char *tnm,
cred_t *cr, int flags, uint64_t rflags, vattr_t *wo_vap, zuserns_t *mnt_ns) cred_t *cr, int flags, uint64_t rflags, vattr_t *wo_vap, zidmap_t *mnt_ns)
{ {
znode_t *szp, *tzp; znode_t *szp, *tzp;
zfsvfs_t *zfsvfs = ZTOZSB(sdzp); zfsvfs_t *zfsvfs = ZTOZSB(sdzp);
@ -3213,7 +3212,7 @@ zfs_rename(znode_t *sdzp, char *snm, znode_t *tdzp, char *tnm,
*/ */
int int
zfs_symlink(znode_t *dzp, char *name, vattr_t *vap, char *link, zfs_symlink(znode_t *dzp, char *name, vattr_t *vap, char *link,
znode_t **zpp, cred_t *cr, int flags, zuserns_t *mnt_ns) znode_t **zpp, cred_t *cr, int flags, zidmap_t *mnt_ns)
{ {
znode_t *zp; znode_t *zp;
zfs_dirlock_t *dl; zfs_dirlock_t *dl;
@ -3521,7 +3520,7 @@ zfs_link(znode_t *tdzp, znode_t *szp, char *name, cred_t *cr,
} }
if ((error = zfs_zaccess(tdzp, ACE_ADD_FILE, 0, B_FALSE, cr, if ((error = zfs_zaccess(tdzp, ACE_ADD_FILE, 0, B_FALSE, cr,
kcred->user_ns))) { zfs_init_idmap))) {
zfs_exit(zfsvfs, FTAG); zfs_exit(zfsvfs, FTAG);
return (error); return (error);
} }
@ -4136,7 +4135,7 @@ zfs_space(znode_t *zp, int cmd, flock64_t *bfp, int flag,
* operates directly on inodes, so we need to check access rights. * operates directly on inodes, so we need to check access rights.
*/ */
if ((error = zfs_zaccess(zp, ACE_WRITE_DATA, 0, B_FALSE, cr, if ((error = zfs_zaccess(zp, ACE_WRITE_DATA, 0, B_FALSE, cr,
kcred->user_ns))) { zfs_init_idmap))) {
zfs_exit(zfsvfs, FTAG); zfs_exit(zfsvfs, FTAG);
return (error); return (error);
} }

View File

@ -1963,7 +1963,7 @@ zfs_create_fs(objset_t *os, cred_t *cr, nvlist_t *zplprops, dmu_tx_t *tx)
} }
VERIFY(0 == zfs_acl_ids_create(rootzp, IS_ROOT_NODE, &vattr, VERIFY(0 == zfs_acl_ids_create(rootzp, IS_ROOT_NODE, &vattr,
cr, NULL, &acl_ids, kcred->user_ns)); cr, NULL, &acl_ids, zfs_init_idmap));
zfs_mknode(rootzp, &vattr, tx, cr, IS_ROOT_NODE, &zp, &acl_ids); zfs_mknode(rootzp, &vattr, tx, cr, IS_ROOT_NODE, &zp, &acl_ids);
ASSERT3P(zp, ==, rootzp); ASSERT3P(zp, ==, rootzp);
error = zap_add(os, moid, ZFS_ROOT_OBJ, 8, 1, &rootzp->z_id, tx); error = zap_add(os, moid, ZFS_ROOT_OBJ, 8, 1, &rootzp->z_id, tx);

View File

@ -103,7 +103,11 @@ zpl_root_readdir(struct file *filp, void *dirent, filldir_t filldir)
* Get root directory attributes. * Get root directory attributes.
*/ */
static int static int
#ifdef HAVE_USERNS_IOPS_GETATTR #ifdef HAVE_IDMAP_IOPS_GETATTR
zpl_root_getattr_impl(struct mnt_idmap *user_ns,
const struct path *path, struct kstat *stat, u32 request_mask,
unsigned int query_flags)
#elif defined(HAVE_USERNS_IOPS_GETATTR)
zpl_root_getattr_impl(struct user_namespace *user_ns, zpl_root_getattr_impl(struct user_namespace *user_ns,
const struct path *path, struct kstat *stat, u32 request_mask, const struct path *path, struct kstat *stat, u32 request_mask,
unsigned int query_flags) unsigned int query_flags)
@ -115,9 +119,11 @@ zpl_root_getattr_impl(const struct path *path, struct kstat *stat,
(void) request_mask, (void) query_flags; (void) request_mask, (void) query_flags;
struct inode *ip = path->dentry->d_inode; struct inode *ip = path->dentry->d_inode;
#ifdef HAVE_USERNS_IOPS_GETATTR #if (defined(HAVE_USERNS_IOPS_GETATTR) || defined(HAVE_IDMAP_IOPS_GETATTR))
#ifdef HAVE_GENERIC_FILLATTR_USERNS #ifdef HAVE_GENERIC_FILLATTR_USERNS
generic_fillattr(user_ns, ip, stat); generic_fillattr(user_ns, ip, stat);
#elif defined(HAVE_GENERIC_FILLATTR_IDMAP)
generic_fillattr(user_ns, ip, stat);
#else #else
(void) user_ns; (void) user_ns;
#endif #endif
@ -312,6 +318,10 @@ static int
zpl_snapdir_rename2(struct user_namespace *user_ns, struct inode *sdip, zpl_snapdir_rename2(struct user_namespace *user_ns, struct inode *sdip,
struct dentry *sdentry, struct inode *tdip, struct dentry *tdentry, struct dentry *sdentry, struct inode *tdip, struct dentry *tdentry,
unsigned int flags) unsigned int flags)
#elif defined(HAVE_IOPS_RENAME_IDMAP)
zpl_snapdir_rename2(struct mnt_idmap *user_ns, struct inode *sdip,
struct dentry *sdentry, struct inode *tdip, struct dentry *tdentry,
unsigned int flags)
#else #else
zpl_snapdir_rename2(struct inode *sdip, struct dentry *sdentry, zpl_snapdir_rename2(struct inode *sdip, struct dentry *sdentry,
struct inode *tdip, struct dentry *tdentry, unsigned int flags) struct inode *tdip, struct dentry *tdentry, unsigned int flags)
@ -333,7 +343,9 @@ zpl_snapdir_rename2(struct inode *sdip, struct dentry *sdentry,
return (error); return (error);
} }
#if !defined(HAVE_RENAME_WANTS_FLAGS) && !defined(HAVE_IOPS_RENAME_USERNS) #if (!defined(HAVE_RENAME_WANTS_FLAGS) && \
!defined(HAVE_IOPS_RENAME_USERNS) && \
!defined(HAVE_IOPS_RENAME_IDMAP))
static int static int
zpl_snapdir_rename(struct inode *sdip, struct dentry *sdentry, zpl_snapdir_rename(struct inode *sdip, struct dentry *sdentry,
struct inode *tdip, struct dentry *tdentry) struct inode *tdip, struct dentry *tdentry)
@ -360,6 +372,9 @@ static int
#ifdef HAVE_IOPS_MKDIR_USERNS #ifdef HAVE_IOPS_MKDIR_USERNS
zpl_snapdir_mkdir(struct user_namespace *user_ns, struct inode *dip, zpl_snapdir_mkdir(struct user_namespace *user_ns, struct inode *dip,
struct dentry *dentry, umode_t mode) struct dentry *dentry, umode_t mode)
#elif defined(HAVE_IOPS_MKDIR_IDMAP)
zpl_snapdir_mkdir(struct mnt_idmap *user_ns, struct inode *dip,
struct dentry *dentry, umode_t mode)
#else #else
zpl_snapdir_mkdir(struct inode *dip, struct dentry *dentry, umode_t mode) zpl_snapdir_mkdir(struct inode *dip, struct dentry *dentry, umode_t mode)
#endif #endif
@ -371,10 +386,10 @@ zpl_snapdir_mkdir(struct inode *dip, struct dentry *dentry, umode_t mode)
crhold(cr); crhold(cr);
vap = kmem_zalloc(sizeof (vattr_t), KM_SLEEP); vap = kmem_zalloc(sizeof (vattr_t), KM_SLEEP);
#ifdef HAVE_IOPS_MKDIR_USERNS #if (defined(HAVE_IOPS_MKDIR_USERNS) || defined(HAVE_IOPS_MKDIR_IDMAP))
zpl_vap_init(vap, dip, mode | S_IFDIR, cr, user_ns); zpl_vap_init(vap, dip, mode | S_IFDIR, cr, user_ns);
#else #else
zpl_vap_init(vap, dip, mode | S_IFDIR, cr, kcred->user_ns); zpl_vap_init(vap, dip, mode | S_IFDIR, cr, zfs_init_idmap);
#endif #endif
error = -zfsctl_snapdir_mkdir(dip, dname(dentry), vap, &ip, cr, 0); error = -zfsctl_snapdir_mkdir(dip, dname(dentry), vap, &ip, cr, 0);
@ -395,7 +410,11 @@ zpl_snapdir_mkdir(struct inode *dip, struct dentry *dentry, umode_t mode)
* Get snapshot directory attributes. * Get snapshot directory attributes.
*/ */
static int static int
#ifdef HAVE_USERNS_IOPS_GETATTR #ifdef HAVE_IDMAP_IOPS_GETATTR
zpl_snapdir_getattr_impl(struct mnt_idmap *user_ns,
const struct path *path, struct kstat *stat, u32 request_mask,
unsigned int query_flags)
#elif defined(HAVE_USERNS_IOPS_GETATTR)
zpl_snapdir_getattr_impl(struct user_namespace *user_ns, zpl_snapdir_getattr_impl(struct user_namespace *user_ns,
const struct path *path, struct kstat *stat, u32 request_mask, const struct path *path, struct kstat *stat, u32 request_mask,
unsigned int query_flags) unsigned int query_flags)
@ -411,9 +430,11 @@ zpl_snapdir_getattr_impl(const struct path *path, struct kstat *stat,
if ((error = zpl_enter(zfsvfs, FTAG)) != 0) if ((error = zpl_enter(zfsvfs, FTAG)) != 0)
return (error); return (error);
#ifdef HAVE_USERNS_IOPS_GETATTR #if (defined(HAVE_USERNS_IOPS_GETATTR) || defined(HAVE_IDMAP_IOPS_GETATTR))
#ifdef HAVE_GENERIC_FILLATTR_USERNS #ifdef HAVE_GENERIC_FILLATTR_USERNS
generic_fillattr(user_ns, ip, stat); generic_fillattr(user_ns, ip, stat);
#elif defined(HAVE_GENERIC_FILLATTR_IDMAP)
generic_fillattr(user_ns, ip, stat);
#else #else
(void) user_ns; (void) user_ns;
#endif #endif
@ -471,7 +492,9 @@ const struct file_operations zpl_fops_snapdir = {
const struct inode_operations zpl_ops_snapdir = { const struct inode_operations zpl_ops_snapdir = {
.lookup = zpl_snapdir_lookup, .lookup = zpl_snapdir_lookup,
.getattr = zpl_snapdir_getattr, .getattr = zpl_snapdir_getattr,
#if defined(HAVE_RENAME_WANTS_FLAGS) || defined(HAVE_IOPS_RENAME_USERNS) #if (defined(HAVE_RENAME_WANTS_FLAGS) || \
defined(HAVE_IOPS_RENAME_USERNS) || \
defined(HAVE_IOPS_RENAME_IDMAP))
.rename = zpl_snapdir_rename2, .rename = zpl_snapdir_rename2,
#else #else
.rename = zpl_snapdir_rename, .rename = zpl_snapdir_rename,
@ -562,6 +585,10 @@ static int
zpl_shares_getattr_impl(struct user_namespace *user_ns, zpl_shares_getattr_impl(struct user_namespace *user_ns,
const struct path *path, struct kstat *stat, u32 request_mask, const struct path *path, struct kstat *stat, u32 request_mask,
unsigned int query_flags) unsigned int query_flags)
#elif defined(HAVE_IDMAP_IOPS_GETATTR)
zpl_shares_getattr_impl(struct mnt_idmap *user_ns,
const struct path *path, struct kstat *stat, u32 request_mask,
unsigned int query_flags)
#else #else
zpl_shares_getattr_impl(const struct path *path, struct kstat *stat, zpl_shares_getattr_impl(const struct path *path, struct kstat *stat,
u32 request_mask, unsigned int query_flags) u32 request_mask, unsigned int query_flags)
@ -577,9 +604,11 @@ zpl_shares_getattr_impl(const struct path *path, struct kstat *stat,
return (error); return (error);
if (zfsvfs->z_shares_dir == 0) { if (zfsvfs->z_shares_dir == 0) {
#ifdef HAVE_USERNS_IOPS_GETATTR #if (defined(HAVE_USERNS_IOPS_GETATTR) || defined(HAVE_IDMAP_IOPS_GETATTR))
#ifdef HAVE_GENERIC_FILLATTR_USERNS #ifdef HAVE_GENERIC_FILLATTR_USERNS
generic_fillattr(user_ns, path->dentry->d_inode, stat); generic_fillattr(user_ns, path->dentry->d_inode, stat);
#elif defined(HAVE_GENERIC_FILLATTR_IDMAP)
generic_fillattr(user_ns, path->dentry->d_inode, stat);
#else #else
(void) user_ns; (void) user_ns;
#endif #endif
@ -594,12 +623,8 @@ zpl_shares_getattr_impl(const struct path *path, struct kstat *stat,
error = -zfs_zget(zfsvfs, zfsvfs->z_shares_dir, &dzp); error = -zfs_zget(zfsvfs, zfsvfs->z_shares_dir, &dzp);
if (error == 0) { if (error == 0) {
#ifdef HAVE_USERNS_IOPS_GETATTR #if (defined(HAVE_USERNS_IOPS_GETATTR) || defined(HAVE_IDMAP_IOPS_GETATTR))
#ifdef HAVE_GENERIC_FILLATTR_USERNS
error = -zfs_getattr_fast(user_ns, ZTOI(dzp), stat); error = -zfs_getattr_fast(user_ns, ZTOI(dzp), stat);
#else
(void) user_ns;
#endif
#else #else
error = -zfs_getattr_fast(kcred->user_ns, ZTOI(dzp), stat); error = -zfs_getattr_fast(kcred->user_ns, ZTOI(dzp), stat);
#endif #endif

View File

@ -736,6 +736,29 @@ zpl_putpage(struct page *pp, struct writeback_control *wbc, void *data)
return (0); return (0);
} }
#ifdef HAVE_WRITEPAGE_T_FOLIO
static int
zpl_putfolio(struct folio *pp, struct writeback_control *wbc, void *data)
{
(void) zpl_putpage(&pp->page, wbc, data);
return (0);
}
#endif
static inline int
zpl_write_cache_pages(struct address_space *mapping,
struct writeback_control *wbc, void *data)
{
int result;
#ifdef HAVE_WRITEPAGE_T_FOLIO
result = write_cache_pages(mapping, wbc, zpl_putfolio, data);
#else
result = write_cache_pages(mapping, wbc, zpl_putpage, data);
#endif
return (result);
}
static int static int
zpl_writepages(struct address_space *mapping, struct writeback_control *wbc) zpl_writepages(struct address_space *mapping, struct writeback_control *wbc)
{ {
@ -760,7 +783,7 @@ zpl_writepages(struct address_space *mapping, struct writeback_control *wbc)
*/ */
boolean_t for_sync = (sync_mode == WB_SYNC_ALL); boolean_t for_sync = (sync_mode == WB_SYNC_ALL);
wbc->sync_mode = WB_SYNC_NONE; wbc->sync_mode = WB_SYNC_NONE;
result = write_cache_pages(mapping, wbc, zpl_putpage, &for_sync); result = zpl_write_cache_pages(mapping, wbc, &for_sync);
if (sync_mode != wbc->sync_mode) { if (sync_mode != wbc->sync_mode) {
if ((result = zpl_enter_verify_zp(zfsvfs, zp, FTAG)) != 0) if ((result = zpl_enter_verify_zp(zfsvfs, zp, FTAG)) != 0)
return (result); return (result);
@ -776,8 +799,7 @@ zpl_writepages(struct address_space *mapping, struct writeback_control *wbc)
* details). That being said, this is a no-op in most cases. * details). That being said, this is a no-op in most cases.
*/ */
wbc->sync_mode = sync_mode; wbc->sync_mode = sync_mode;
result = write_cache_pages(mapping, wbc, zpl_putpage, result = zpl_write_cache_pages(mapping, wbc, &for_sync);
&for_sync);
} }
return (result); return (result);
} }
@ -1027,7 +1049,7 @@ __zpl_ioctl_setflags(struct inode *ip, uint32_t ioctl_flags, xvattr_t *xva)
!capable(CAP_LINUX_IMMUTABLE)) !capable(CAP_LINUX_IMMUTABLE))
return (-EPERM); return (-EPERM);
if (!zpl_inode_owner_or_capable(kcred->user_ns, ip)) if (!zpl_inode_owner_or_capable(zfs_init_idmap, ip))
return (-EACCES); return (-EACCES);
xva_init(xva); xva_init(xva);
@ -1074,7 +1096,7 @@ zpl_ioctl_setflags(struct file *filp, void __user *arg)
crhold(cr); crhold(cr);
cookie = spl_fstrans_mark(); cookie = spl_fstrans_mark();
err = -zfs_setattr(ITOZ(ip), (vattr_t *)&xva, 0, cr, kcred->user_ns); err = -zfs_setattr(ITOZ(ip), (vattr_t *)&xva, 0, cr, zfs_init_idmap);
spl_fstrans_unmark(cookie); spl_fstrans_unmark(cookie);
crfree(cr); crfree(cr);
@ -1122,7 +1144,7 @@ zpl_ioctl_setxattr(struct file *filp, void __user *arg)
crhold(cr); crhold(cr);
cookie = spl_fstrans_mark(); cookie = spl_fstrans_mark();
err = -zfs_setattr(ITOZ(ip), (vattr_t *)&xva, 0, cr, kcred->user_ns); err = -zfs_setattr(ITOZ(ip), (vattr_t *)&xva, 0, cr, zfs_init_idmap);
spl_fstrans_unmark(cookie); spl_fstrans_unmark(cookie);
crfree(cr); crfree(cr);
@ -1157,7 +1179,7 @@ __zpl_ioctl_setdosflags(struct inode *ip, uint64_t ioctl_flags, xvattr_t *xva)
!capable(CAP_LINUX_IMMUTABLE)) !capable(CAP_LINUX_IMMUTABLE))
return (-EPERM); return (-EPERM);
if (!zpl_inode_owner_or_capable(kcred->user_ns, ip)) if (!zpl_inode_owner_or_capable(zfs_init_idmap, ip))
return (-EACCES); return (-EACCES);
xva_init(xva); xva_init(xva);
@ -1210,7 +1232,7 @@ zpl_ioctl_setdosflags(struct file *filp, void __user *arg)
crhold(cr); crhold(cr);
cookie = spl_fstrans_mark(); cookie = spl_fstrans_mark();
err = -zfs_setattr(ITOZ(ip), (vattr_t *)&xva, 0, cr, kcred->user_ns); err = -zfs_setattr(ITOZ(ip), (vattr_t *)&xva, 0, cr, zfs_init_idmap);
spl_fstrans_unmark(cookie); spl_fstrans_unmark(cookie);
crfree(cr); crfree(cr);

View File

@ -113,12 +113,12 @@ zpl_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags)
void void
zpl_vap_init(vattr_t *vap, struct inode *dir, umode_t mode, cred_t *cr, zpl_vap_init(vattr_t *vap, struct inode *dir, umode_t mode, cred_t *cr,
zuserns_t *mnt_ns) zidmap_t *mnt_ns)
{ {
vap->va_mask = ATTR_MODE; vap->va_mask = ATTR_MODE;
vap->va_mode = mode; vap->va_mode = mode;
vap->va_uid = zfs_vfsuid_to_uid((struct user_namespace *)mnt_ns, vap->va_uid = zfs_vfsuid_to_uid(mnt_ns,
zfs_i_user_ns(dir), crgetuid(cr)); zfs_i_user_ns(dir), crgetuid(cr));
if (dir->i_mode & S_ISGID) { if (dir->i_mode & S_ISGID) {
@ -126,7 +126,7 @@ zpl_vap_init(vattr_t *vap, struct inode *dir, umode_t mode, cred_t *cr,
if (S_ISDIR(mode)) if (S_ISDIR(mode))
vap->va_mode |= S_ISGID; vap->va_mode |= S_ISGID;
} else { } else {
vap->va_gid = zfs_vfsgid_to_gid((struct user_namespace *)mnt_ns, vap->va_gid = zfs_vfsgid_to_gid(mnt_ns,
zfs_i_user_ns(dir), crgetgid(cr)); zfs_i_user_ns(dir), crgetgid(cr));
} }
} }
@ -135,6 +135,9 @@ static int
#ifdef HAVE_IOPS_CREATE_USERNS #ifdef HAVE_IOPS_CREATE_USERNS
zpl_create(struct user_namespace *user_ns, struct inode *dir, zpl_create(struct user_namespace *user_ns, struct inode *dir,
struct dentry *dentry, umode_t mode, bool flag) struct dentry *dentry, umode_t mode, bool flag)
#elif defined(HAVE_IOPS_CREATE_IDMAP)
zpl_create(struct mnt_idmap *user_ns, struct inode *dir,
struct dentry *dentry, umode_t mode, bool flag)
#else #else
zpl_create(struct inode *dir, struct dentry *dentry, umode_t mode, bool flag) zpl_create(struct inode *dir, struct dentry *dentry, umode_t mode, bool flag)
#endif #endif
@ -144,8 +147,8 @@ zpl_create(struct inode *dir, struct dentry *dentry, umode_t mode, bool flag)
vattr_t *vap; vattr_t *vap;
int error; int error;
fstrans_cookie_t cookie; fstrans_cookie_t cookie;
#ifndef HAVE_IOPS_CREATE_USERNS #if !(defined(HAVE_IOPS_CREATE_USERNS) || defined(HAVE_IOPS_CREATE_IDMAP))
zuserns_t *user_ns = kcred->user_ns; zidmap_t *user_ns = kcred->user_ns;
#endif #endif
crhold(cr); crhold(cr);
@ -181,6 +184,9 @@ static int
#ifdef HAVE_IOPS_MKNOD_USERNS #ifdef HAVE_IOPS_MKNOD_USERNS
zpl_mknod(struct user_namespace *user_ns, struct inode *dir, zpl_mknod(struct user_namespace *user_ns, struct inode *dir,
struct dentry *dentry, umode_t mode, struct dentry *dentry, umode_t mode,
#elif defined(HAVE_IOPS_MKNOD_IDMAP)
zpl_mknod(struct mnt_idmap *user_ns, struct inode *dir,
struct dentry *dentry, umode_t mode,
#else #else
zpl_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, zpl_mknod(struct inode *dir, struct dentry *dentry, umode_t mode,
#endif #endif
@ -191,8 +197,8 @@ zpl_mknod(struct inode *dir, struct dentry *dentry, umode_t mode,
vattr_t *vap; vattr_t *vap;
int error; int error;
fstrans_cookie_t cookie; fstrans_cookie_t cookie;
#ifndef HAVE_IOPS_MKNOD_USERNS #if !(defined(HAVE_IOPS_MKNOD_USERNS) || defined(HAVE_IOPS_MKNOD_IDMAP))
zuserns_t *user_ns = kcred->user_ns; zidmap_t *user_ns = kcred->user_ns;
#endif #endif
/* /*
@ -234,7 +240,10 @@ zpl_mknod(struct inode *dir, struct dentry *dentry, umode_t mode,
#ifdef HAVE_TMPFILE #ifdef HAVE_TMPFILE
static int static int
#ifndef HAVE_TMPFILE_DENTRY #ifdef HAVE_TMPFILE_IDMAP
zpl_tmpfile(struct mnt_idmap *userns, struct inode *dir,
struct file *file, umode_t mode)
#elif !defined(HAVE_TMPFILE_DENTRY)
zpl_tmpfile(struct user_namespace *userns, struct inode *dir, zpl_tmpfile(struct user_namespace *userns, struct inode *dir,
struct file *file, umode_t mode) struct file *file, umode_t mode)
#else #else
@ -251,8 +260,8 @@ zpl_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
vattr_t *vap; vattr_t *vap;
int error; int error;
fstrans_cookie_t cookie; fstrans_cookie_t cookie;
#ifndef HAVE_TMPFILE_USERNS #if !(defined(HAVE_TMPFILE_USERNS) || defined(HAVE_TMPFILE_IDMAP))
zuserns_t *userns = kcred->user_ns; zidmap_t *userns = kcred->user_ns;
#endif #endif
crhold(cr); crhold(cr);
@ -330,6 +339,9 @@ static int
#ifdef HAVE_IOPS_MKDIR_USERNS #ifdef HAVE_IOPS_MKDIR_USERNS
zpl_mkdir(struct user_namespace *user_ns, struct inode *dir, zpl_mkdir(struct user_namespace *user_ns, struct inode *dir,
struct dentry *dentry, umode_t mode) struct dentry *dentry, umode_t mode)
#elif defined(HAVE_IOPS_MKDIR_IDMAP)
zpl_mkdir(struct mnt_idmap *user_ns, struct inode *dir,
struct dentry *dentry, umode_t mode)
#else #else
zpl_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) zpl_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
#endif #endif
@ -339,8 +351,8 @@ zpl_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
znode_t *zp; znode_t *zp;
int error; int error;
fstrans_cookie_t cookie; fstrans_cookie_t cookie;
#ifndef HAVE_IOPS_MKDIR_USERNS #if !(defined(HAVE_IOPS_MKDIR_USERNS) || defined(HAVE_IOPS_MKDIR_IDMAP))
zuserns_t *user_ns = kcred->user_ns; zidmap_t *user_ns = kcred->user_ns;
#endif #endif
crhold(cr); crhold(cr);
@ -403,6 +415,10 @@ static int
zpl_getattr_impl(struct user_namespace *user_ns, zpl_getattr_impl(struct user_namespace *user_ns,
const struct path *path, struct kstat *stat, u32 request_mask, const struct path *path, struct kstat *stat, u32 request_mask,
unsigned int query_flags) unsigned int query_flags)
#elif defined(HAVE_IDMAP_IOPS_GETATTR)
zpl_getattr_impl(struct mnt_idmap *user_ns,
const struct path *path, struct kstat *stat, u32 request_mask,
unsigned int query_flags)
#else #else
zpl_getattr_impl(const struct path *path, struct kstat *stat, u32 request_mask, zpl_getattr_impl(const struct path *path, struct kstat *stat, u32 request_mask,
unsigned int query_flags) unsigned int query_flags)
@ -419,7 +435,7 @@ zpl_getattr_impl(const struct path *path, struct kstat *stat, u32 request_mask,
* XXX query_flags currently ignored. * XXX query_flags currently ignored.
*/ */
#ifdef HAVE_USERNS_IOPS_GETATTR #if (defined(HAVE_USERNS_IOPS_GETATTR) || defined(HAVE_IDMAP_IOPS_GETATTR))
error = -zfs_getattr_fast(user_ns, ip, stat); error = -zfs_getattr_fast(user_ns, ip, stat);
#else #else
error = -zfs_getattr_fast(kcred->user_ns, ip, stat); error = -zfs_getattr_fast(kcred->user_ns, ip, stat);
@ -458,9 +474,12 @@ zpl_getattr_impl(const struct path *path, struct kstat *stat, u32 request_mask,
ZPL_GETATTR_WRAPPER(zpl_getattr); ZPL_GETATTR_WRAPPER(zpl_getattr);
static int static int
#ifdef HAVE_SETATTR_PREPARE_USERNS #ifdef HAVE_USERNS_IOPS_SETATTR
zpl_setattr(struct user_namespace *user_ns, struct dentry *dentry, zpl_setattr(struct user_namespace *user_ns, struct dentry *dentry,
struct iattr *ia) struct iattr *ia)
#elif defined(HAVE_IDMAP_IOPS_SETATTR)
zpl_setattr(struct mnt_idmap *user_ns, struct dentry *dentry,
struct iattr *ia)
#else #else
zpl_setattr(struct dentry *dentry, struct iattr *ia) zpl_setattr(struct dentry *dentry, struct iattr *ia)
#endif #endif
@ -473,8 +492,10 @@ zpl_setattr(struct dentry *dentry, struct iattr *ia)
#ifdef HAVE_SETATTR_PREPARE_USERNS #ifdef HAVE_SETATTR_PREPARE_USERNS
error = zpl_setattr_prepare(user_ns, dentry, ia); error = zpl_setattr_prepare(user_ns, dentry, ia);
#elif defined(HAVE_SETATTR_PREPARE_IDMAP)
error = zpl_setattr_prepare(user_ns, dentry, ia);
#else #else
error = zpl_setattr_prepare(kcred->user_ns, dentry, ia); error = zpl_setattr_prepare(zfs_init_idmap, dentry, ia);
#endif #endif
if (error) if (error)
return (error); return (error);
@ -506,10 +527,12 @@ zpl_setattr(struct dentry *dentry, struct iattr *ia)
ip->i_atime = zpl_inode_timestamp_truncate(ia->ia_atime, ip); ip->i_atime = zpl_inode_timestamp_truncate(ia->ia_atime, ip);
cookie = spl_fstrans_mark(); cookie = spl_fstrans_mark();
#ifdef HAVE_SETATTR_PREPARE_USERNS #ifdef HAVE_USERNS_IOPS_SETATTR
error = -zfs_setattr(ITOZ(ip), vap, 0, cr, user_ns);
#elif defined(HAVE_IDMAP_IOPS_SETATTR)
error = -zfs_setattr(ITOZ(ip), vap, 0, cr, user_ns); error = -zfs_setattr(ITOZ(ip), vap, 0, cr, user_ns);
#else #else
error = -zfs_setattr(ITOZ(ip), vap, 0, cr, kcred->user_ns); error = -zfs_setattr(ITOZ(ip), vap, 0, cr, zfs_init_idmap);
#endif #endif
if (!error && (ia->ia_valid & ATTR_MODE)) if (!error && (ia->ia_valid & ATTR_MODE))
error = zpl_chmod_acl(ip); error = zpl_chmod_acl(ip);
@ -527,6 +550,10 @@ static int
zpl_rename2(struct user_namespace *user_ns, struct inode *sdip, zpl_rename2(struct user_namespace *user_ns, struct inode *sdip,
struct dentry *sdentry, struct inode *tdip, struct dentry *tdentry, struct dentry *sdentry, struct inode *tdip, struct dentry *tdentry,
unsigned int rflags) unsigned int rflags)
#elif defined(HAVE_IOPS_RENAME_IDMAP)
zpl_rename2(struct mnt_idmap *user_ns, struct inode *sdip,
struct dentry *sdentry, struct inode *tdip, struct dentry *tdentry,
unsigned int rflags)
#else #else
zpl_rename2(struct inode *sdip, struct dentry *sdentry, zpl_rename2(struct inode *sdip, struct dentry *sdentry,
struct inode *tdip, struct dentry *tdentry, unsigned int rflags) struct inode *tdip, struct dentry *tdentry, unsigned int rflags)
@ -536,8 +563,8 @@ zpl_rename2(struct inode *sdip, struct dentry *sdentry,
vattr_t *wo_vap = NULL; vattr_t *wo_vap = NULL;
int error; int error;
fstrans_cookie_t cookie; fstrans_cookie_t cookie;
#ifndef HAVE_IOPS_RENAME_USERNS #if !(defined(HAVE_IOPS_RENAME_USERNS) || defined(HAVE_IOPS_RENAME_IDMAP))
zuserns_t *user_ns = kcred->user_ns; zidmap_t *user_ns = kcred->user_ns;
#endif #endif
crhold(cr); crhold(cr);
@ -561,7 +588,8 @@ zpl_rename2(struct inode *sdip, struct dentry *sdentry,
#if !defined(HAVE_IOPS_RENAME_USERNS) && \ #if !defined(HAVE_IOPS_RENAME_USERNS) && \
!defined(HAVE_RENAME_WANTS_FLAGS) && \ !defined(HAVE_RENAME_WANTS_FLAGS) && \
!defined(HAVE_RENAME2) !defined(HAVE_RENAME2) && \
!defined(HAVE_IOPS_RENAME_IDMAP)
static int static int
zpl_rename(struct inode *sdip, struct dentry *sdentry, zpl_rename(struct inode *sdip, struct dentry *sdentry,
struct inode *tdip, struct dentry *tdentry) struct inode *tdip, struct dentry *tdentry)
@ -574,6 +602,9 @@ static int
#ifdef HAVE_IOPS_SYMLINK_USERNS #ifdef HAVE_IOPS_SYMLINK_USERNS
zpl_symlink(struct user_namespace *user_ns, struct inode *dir, zpl_symlink(struct user_namespace *user_ns, struct inode *dir,
struct dentry *dentry, const char *name) struct dentry *dentry, const char *name)
#elif defined(HAVE_IOPS_SYMLINK_IDMAP)
zpl_symlink(struct mnt_idmap *user_ns, struct inode *dir,
struct dentry *dentry, const char *name)
#else #else
zpl_symlink(struct inode *dir, struct dentry *dentry, const char *name) zpl_symlink(struct inode *dir, struct dentry *dentry, const char *name)
#endif #endif
@ -583,8 +614,8 @@ zpl_symlink(struct inode *dir, struct dentry *dentry, const char *name)
znode_t *zp; znode_t *zp;
int error; int error;
fstrans_cookie_t cookie; fstrans_cookie_t cookie;
#ifndef HAVE_IOPS_SYMLINK_USERNS #if !(defined(HAVE_IOPS_SYMLINK_USERNS) || defined(HAVE_IOPS_SYMLINK_IDMAP))
zuserns_t *user_ns = kcred->user_ns; zidmap_t *user_ns = kcred->user_ns;
#endif #endif
crhold(cr); crhold(cr);
@ -802,6 +833,8 @@ const struct inode_operations zpl_dir_inode_operations = {
.rename2 = zpl_rename2, .rename2 = zpl_rename2,
#elif defined(HAVE_RENAME_WANTS_FLAGS) || defined(HAVE_IOPS_RENAME_USERNS) #elif defined(HAVE_RENAME_WANTS_FLAGS) || defined(HAVE_IOPS_RENAME_USERNS)
.rename = zpl_rename2, .rename = zpl_rename2,
#elif defined(HAVE_IOPS_RENAME_IDMAP)
.rename = zpl_rename2,
#else #else
.rename = zpl_rename, .rename = zpl_rename,
#endif #endif

View File

@ -499,7 +499,7 @@ zpl_xattr_set_dir(struct inode *ip, const char *name, const void *value,
vap->va_gid = crgetgid(cr); vap->va_gid = crgetgid(cr);
error = -zfs_create(dxzp, (char *)name, vap, 0, 0644, &xzp, error = -zfs_create(dxzp, (char *)name, vap, 0, 0644, &xzp,
cr, ATTR_NOACLCHECK, NULL, kcred->user_ns); cr, ATTR_NOACLCHECK, NULL, zfs_init_idmap);
if (error) if (error)
goto out; goto out;
} }
@ -738,7 +738,7 @@ __zpl_xattr_user_get(struct inode *ip, const char *name,
ZPL_XATTR_GET_WRAPPER(zpl_xattr_user_get); ZPL_XATTR_GET_WRAPPER(zpl_xattr_user_get);
static int static int
__zpl_xattr_user_set(struct user_namespace *user_ns, __zpl_xattr_user_set(zidmap_t *user_ns,
struct inode *ip, const char *name, struct inode *ip, const char *name,
const void *value, size_t size, int flags) const void *value, size_t size, int flags)
{ {
@ -848,7 +848,7 @@ __zpl_xattr_trusted_get(struct inode *ip, const char *name,
ZPL_XATTR_GET_WRAPPER(zpl_xattr_trusted_get); ZPL_XATTR_GET_WRAPPER(zpl_xattr_trusted_get);
static int static int
__zpl_xattr_trusted_set(struct user_namespace *user_ns, __zpl_xattr_trusted_set(zidmap_t *user_ns,
struct inode *ip, const char *name, struct inode *ip, const char *name,
const void *value, size_t size, int flags) const void *value, size_t size, int flags)
{ {
@ -918,7 +918,7 @@ __zpl_xattr_security_get(struct inode *ip, const char *name,
ZPL_XATTR_GET_WRAPPER(zpl_xattr_security_get); ZPL_XATTR_GET_WRAPPER(zpl_xattr_security_get);
static int static int
__zpl_xattr_security_set(struct user_namespace *user_ns, __zpl_xattr_security_set(zidmap_t *user_ns,
struct inode *ip, const char *name, struct inode *ip, const char *name,
const void *value, size_t size, int flags) const void *value, size_t size, int flags)
{ {
@ -1061,6 +1061,9 @@ int
#ifdef HAVE_SET_ACL_USERNS #ifdef HAVE_SET_ACL_USERNS
zpl_set_acl(struct user_namespace *userns, struct inode *ip, zpl_set_acl(struct user_namespace *userns, struct inode *ip,
struct posix_acl *acl, int type) struct posix_acl *acl, int type)
#elif defined(HAVE_SET_ACL_IDMAP_DENTRY)
zpl_set_acl(struct mnt_idmap *userns, struct dentry *dentry,
struct posix_acl *acl, int type)
#elif defined(HAVE_SET_ACL_USERNS_DENTRY_ARG2) #elif defined(HAVE_SET_ACL_USERNS_DENTRY_ARG2)
zpl_set_acl(struct user_namespace *userns, struct dentry *dentry, zpl_set_acl(struct user_namespace *userns, struct dentry *dentry,
struct posix_acl *acl, int type) struct posix_acl *acl, int type)
@ -1070,6 +1073,8 @@ zpl_set_acl(struct inode *ip, struct posix_acl *acl, int type)
{ {
#ifdef HAVE_SET_ACL_USERNS_DENTRY_ARG2 #ifdef HAVE_SET_ACL_USERNS_DENTRY_ARG2
return (zpl_set_acl_impl(d_inode(dentry), acl, type)); return (zpl_set_acl_impl(d_inode(dentry), acl, type));
#elif defined(HAVE_SET_ACL_IDMAP_DENTRY)
return (zpl_set_acl_impl(d_inode(dentry), acl, type));
#else #else
return (zpl_set_acl_impl(ip, acl, type)); return (zpl_set_acl_impl(ip, acl, type));
#endif /* HAVE_SET_ACL_USERNS_DENTRY_ARG2 */ #endif /* HAVE_SET_ACL_USERNS_DENTRY_ARG2 */
@ -1313,7 +1318,7 @@ __zpl_xattr_acl_get_default(struct inode *ip, const char *name,
ZPL_XATTR_GET_WRAPPER(zpl_xattr_acl_get_default); ZPL_XATTR_GET_WRAPPER(zpl_xattr_acl_get_default);
static int static int
__zpl_xattr_acl_set_access(struct user_namespace *mnt_ns, __zpl_xattr_acl_set_access(zidmap_t *mnt_ns,
struct inode *ip, const char *name, struct inode *ip, const char *name,
const void *value, size_t size, int flags) const void *value, size_t size, int flags)
{ {
@ -1328,12 +1333,12 @@ __zpl_xattr_acl_set_access(struct user_namespace *mnt_ns,
if (ITOZSB(ip)->z_acl_type != ZFS_ACLTYPE_POSIX) if (ITOZSB(ip)->z_acl_type != ZFS_ACLTYPE_POSIX)
return (-EOPNOTSUPP); return (-EOPNOTSUPP);
#if defined(HAVE_XATTR_SET_USERNS) #if defined(HAVE_XATTR_SET_USERNS) || defined(HAVE_XATTR_SET_IDMAP)
if (!zpl_inode_owner_or_capable(mnt_ns, ip)) if (!zpl_inode_owner_or_capable(mnt_ns, ip))
return (-EPERM); return (-EPERM);
#else #else
(void) mnt_ns; (void) mnt_ns;
if (!zpl_inode_owner_or_capable(kcred->user_ns, ip)) if (!zpl_inode_owner_or_capable(zfs_init_idmap, ip))
return (-EPERM); return (-EPERM);
#endif #endif
@ -1359,7 +1364,7 @@ __zpl_xattr_acl_set_access(struct user_namespace *mnt_ns,
ZPL_XATTR_SET_WRAPPER(zpl_xattr_acl_set_access); ZPL_XATTR_SET_WRAPPER(zpl_xattr_acl_set_access);
static int static int
__zpl_xattr_acl_set_default(struct user_namespace *mnt_ns, __zpl_xattr_acl_set_default(zidmap_t *mnt_ns,
struct inode *ip, const char *name, struct inode *ip, const char *name,
const void *value, size_t size, int flags) const void *value, size_t size, int flags)
{ {
@ -1374,12 +1379,12 @@ __zpl_xattr_acl_set_default(struct user_namespace *mnt_ns,
if (ITOZSB(ip)->z_acl_type != ZFS_ACLTYPE_POSIX) if (ITOZSB(ip)->z_acl_type != ZFS_ACLTYPE_POSIX)
return (-EOPNOTSUPP); return (-EOPNOTSUPP);
#if defined(HAVE_XATTR_SET_USERNS) #if defined(HAVE_XATTR_SET_USERNS) || defined(HAVE_XATTR_SET_IDMAP)
if (!zpl_inode_owner_or_capable(mnt_ns, ip)) if (!zpl_inode_owner_or_capable(mnt_ns, ip))
return (-EPERM); return (-EPERM);
#else #else
(void) mnt_ns; (void) mnt_ns;
if (!zpl_inode_owner_or_capable(kcred->user_ns, ip)) if (!zpl_inode_owner_or_capable(zfs_init_idmap, ip))
return (-EPERM); return (-EPERM);
#endif #endif

View File

@ -731,6 +731,12 @@ zpool_feature_init(void)
ZFEATURE_FLAG_READONLY_COMPAT, ZFEATURE_TYPE_BOOLEAN, NULL, ZFEATURE_FLAG_READONLY_COMPAT, ZFEATURE_TYPE_BOOLEAN, NULL,
sfeatures); sfeatures);
zfeature_register(SPA_FEATURE_AVZ_V2,
"com.klarasystems:vdev_zaps_v2", "vdev_zaps_v2",
"Support for root vdev ZAP.",
ZFEATURE_FLAG_MOS, ZFEATURE_TYPE_BOOLEAN, NULL,
sfeatures);
zfs_mod_list_supported_free(sfeatures); zfs_mod_list_supported_free(sfeatures);
} }

View File

@ -4465,7 +4465,7 @@ arc_evict(void)
*/ */
int64_t prune = 0; int64_t prune = 0;
int64_t dn = wmsum_value(&arc_sums.arcstat_dnode_size); int64_t dn = wmsum_value(&arc_sums.arcstat_dnode_size);
w = wt * (arc_meta >> 16) >> 16; w = wt * (int64_t)(arc_meta >> 16) >> 16;
if (zfs_refcount_count(&arc_mru->arcs_size[ARC_BUFC_METADATA]) + if (zfs_refcount_count(&arc_mru->arcs_size[ARC_BUFC_METADATA]) +
zfs_refcount_count(&arc_mfu->arcs_size[ARC_BUFC_METADATA]) - zfs_refcount_count(&arc_mfu->arcs_size[ARC_BUFC_METADATA]) -
zfs_refcount_count(&arc_mru->arcs_esize[ARC_BUFC_METADATA]) - zfs_refcount_count(&arc_mru->arcs_esize[ARC_BUFC_METADATA]) -
@ -4481,7 +4481,7 @@ arc_evict(void)
arc_prune_async(prune); arc_prune_async(prune);
/* Evict MRU metadata. */ /* Evict MRU metadata. */
w = wt * (arc_meta * arc_pm >> 48) >> 16; w = wt * (int64_t)(arc_meta * arc_pm >> 48) >> 16;
e = MIN((int64_t)(asize - arc_c), (int64_t)(mrum - w)); e = MIN((int64_t)(asize - arc_c), (int64_t)(mrum - w));
bytes = arc_evict_impl(arc_mru, ARC_BUFC_METADATA, e); bytes = arc_evict_impl(arc_mru, ARC_BUFC_METADATA, e);
total_evicted += bytes; total_evicted += bytes;
@ -4489,7 +4489,7 @@ arc_evict(void)
asize -= bytes; asize -= bytes;
/* Evict MFU metadata. */ /* Evict MFU metadata. */
w = wt * (arc_meta >> 16) >> 16; w = wt * (int64_t)(arc_meta >> 16) >> 16;
e = MIN((int64_t)(asize - arc_c), (int64_t)(m - w)); e = MIN((int64_t)(asize - arc_c), (int64_t)(m - w));
bytes = arc_evict_impl(arc_mfu, ARC_BUFC_METADATA, e); bytes = arc_evict_impl(arc_mfu, ARC_BUFC_METADATA, e);
total_evicted += bytes; total_evicted += bytes;
@ -4498,7 +4498,7 @@ arc_evict(void)
/* Evict MRU data. */ /* Evict MRU data. */
wt -= m - total_evicted; wt -= m - total_evicted;
w = wt * (arc_pd >> 16) >> 16; w = wt * (int64_t)(arc_pd >> 16) >> 16;
e = MIN((int64_t)(asize - arc_c), (int64_t)(mrud - w)); e = MIN((int64_t)(asize - arc_c), (int64_t)(mrud - w));
bytes = arc_evict_impl(arc_mru, ARC_BUFC_DATA, e); bytes = arc_evict_impl(arc_mru, ARC_BUFC_DATA, e);
total_evicted += bytes; total_evicted += bytes;

View File

@ -1620,8 +1620,7 @@ dbuf_read_impl(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags,
* If this is not true it indicates tampering and we report an error. * If this is not true it indicates tampering and we report an error.
*/ */
if (db->db_objset->os_encrypted && !BP_USES_CRYPT(bpp)) { if (db->db_objset->os_encrypted && !BP_USES_CRYPT(bpp)) {
spa_log_error(db->db_objset->os_spa, &zb, spa_log_error(db->db_objset->os_spa, &zb, &bpp->blk_birth);
&db->db_blkptr->blk_birth);
zfs_panic_recover("unencrypted block in encrypted " zfs_panic_recover("unencrypted block in encrypted "
"object set %llu", dmu_objset_id(db->db_objset)); "object set %llu", dmu_objset_id(db->db_objset));
err = SET_ERROR(EIO); err = SET_ERROR(EIO);

View File

@ -445,7 +445,7 @@ mmp_write_uberblock(spa_t *spa)
uint64_t offset; uint64_t offset;
hrtime_t lock_acquire_time = gethrtime(); hrtime_t lock_acquire_time = gethrtime();
spa_config_enter(spa, SCL_STATE, mmp_tag, RW_READER); spa_config_enter_mmp(spa, SCL_STATE, mmp_tag, RW_READER);
lock_acquire_time = gethrtime() - lock_acquire_time; lock_acquire_time = gethrtime() - lock_acquire_time;
if (lock_acquire_time > (MSEC2NSEC(MMP_MIN_INTERVAL) / 10)) if (lock_acquire_time > (MSEC2NSEC(MMP_MIN_INTERVAL) / 10))
zfs_dbgmsg("MMP SCL_STATE acquisition pool '%s' took %llu ns " zfs_dbgmsg("MMP SCL_STATE acquisition pool '%s' took %llu ns "

View File

@ -3084,6 +3084,12 @@ vdev_count_verify_zaps(vdev_t *vd)
spa_t *spa = vd->vdev_spa; spa_t *spa = vd->vdev_spa;
uint64_t total = 0; uint64_t total = 0;
if (spa_feature_is_active(vd->vdev_spa, SPA_FEATURE_AVZ_V2) &&
vd->vdev_root_zap != 0) {
total++;
ASSERT0(zap_lookup_int(spa->spa_meta_objset,
spa->spa_all_vdev_zaps, vd->vdev_root_zap));
}
if (vd->vdev_top_zap != 0) { if (vd->vdev_top_zap != 0) {
total++; total++;
ASSERT0(zap_lookup_int(spa->spa_meta_objset, ASSERT0(zap_lookup_int(spa->spa_meta_objset,
@ -7098,7 +7104,7 @@ spa_vdev_attach(spa_t *spa, uint64_t guid, nvlist_t *nvroot, int replacing,
* Detach a device from a mirror or replacing vdev. * Detach a device from a mirror or replacing vdev.
* *
* If 'replace_done' is specified, only detach if the parent * If 'replace_done' is specified, only detach if the parent
* is a replacing vdev. * is a replacing or a spare vdev.
*/ */
int int
spa_vdev_detach(spa_t *spa, uint64_t guid, uint64_t pguid, int replace_done) spa_vdev_detach(spa_t *spa, uint64_t guid, uint64_t pguid, int replace_done)
@ -8321,7 +8327,8 @@ spa_async_thread(void *arg)
* If any devices are done replacing, detach them. * If any devices are done replacing, detach them.
*/ */
if (tasks & SPA_ASYNC_RESILVER_DONE || if (tasks & SPA_ASYNC_RESILVER_DONE ||
tasks & SPA_ASYNC_REBUILD_DONE) { tasks & SPA_ASYNC_REBUILD_DONE ||
tasks & SPA_ASYNC_DETACH_SPARE) {
spa_vdev_resilver_done(spa); spa_vdev_resilver_done(spa);
} }
@ -8665,6 +8672,11 @@ spa_avz_build(vdev_t *vd, uint64_t avz, dmu_tx_t *tx)
{ {
spa_t *spa = vd->vdev_spa; spa_t *spa = vd->vdev_spa;
if (vd->vdev_root_zap != 0 &&
spa_feature_is_active(spa, SPA_FEATURE_AVZ_V2)) {
VERIFY0(zap_add_int(spa->spa_meta_objset, avz,
vd->vdev_root_zap, tx));
}
if (vd->vdev_top_zap != 0) { if (vd->vdev_top_zap != 0) {
VERIFY0(zap_add_int(spa->spa_meta_objset, avz, VERIFY0(zap_add_int(spa->spa_meta_objset, avz,
vd->vdev_top_zap, tx)); vd->vdev_top_zap, tx));

View File

@ -354,12 +354,12 @@ check_filesystem(spa_t *spa, uint64_t head_ds, zbookmark_err_phys_t *zep,
dsl_dataset_rele(ds, FTAG); dsl_dataset_rele(ds, FTAG);
return (error); return (error);
} }
}
if (snap_count == 0) { if (snap_count == 0) {
/* Filesystem without snapshots. */ /* Filesystem without snapshots. */
dsl_dataset_rele(ds, FTAG); dsl_dataset_rele(ds, FTAG);
return (0); return (0);
}
} }
uint64_t *snap_obj_array = kmem_zalloc(snap_count * sizeof (uint64_t), uint64_t *snap_obj_array = kmem_zalloc(snap_count * sizeof (uint64_t),
@ -470,9 +470,9 @@ static int check_clones(spa_t *spa, uint64_t zap_clone, uint64_t snap_count,
break; break;
} }
zap_cursor_fini(zc);
kmem_free(za, sizeof (*za)); kmem_free(za, sizeof (*za));
kmem_free(zc, sizeof (*zc)); kmem_free(zc, sizeof (*zc));
zap_cursor_fini(zc);
return (error); return (error);
} }

Some files were not shown because too many files have changed in this diff Show More