MFV r248217:

Merge change from vendor to reduce diff only.
ZFS dtrace probes are not supported on FreeBSD yet.

Illumos ZFS issues:
  3598 want to dtrace when errors are generated in zfs

MFC after:	3 weeks
This commit is contained in:
Martin Matuska 2013-04-06 10:39:38 +00:00
commit f1b5c26470
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=249195
58 changed files with 1076 additions and 857 deletions

View File

@ -20,6 +20,7 @@
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013 by Delphix. All rights reserved.
* Copyright (c) 2012, Joyent, Inc. All rights reserved.
*/
@ -86,6 +87,9 @@ extern "C" {
#include <sys/sysevent/dev.h>
#include <machine/atomic.h>
#include <sys/debug.h>
#ifdef illumos
#include "zfs.h"
#endif
#define ZFS_EXPORTS_PATH "/etc/zfs/exports"
@ -133,28 +137,64 @@ extern int aok;
#ifdef DTRACE_PROBE
#undef DTRACE_PROBE
#define DTRACE_PROBE(a) ((void)0)
#endif /* DTRACE_PROBE */
#ifdef illumos
#define DTRACE_PROBE(a) \
ZFS_PROBE0(#a)
#endif
#ifdef DTRACE_PROBE1
#undef DTRACE_PROBE1
#define DTRACE_PROBE1(a, b, c) ((void)0)
#endif /* DTRACE_PROBE1 */
#ifdef illumos
#define DTRACE_PROBE1(a, b, c) \
ZFS_PROBE1(#a, (unsigned long)c)
#endif
#ifdef DTRACE_PROBE2
#undef DTRACE_PROBE2
#define DTRACE_PROBE2(a, b, c, d, e) ((void)0)
#endif /* DTRACE_PROBE2 */
#ifdef illumos
#define DTRACE_PROBE2(a, b, c, d, e) \
ZFS_PROBE2(#a, (unsigned long)c, (unsigned long)e)
#endif
#ifdef DTRACE_PROBE3
#undef DTRACE_PROBE3
#define DTRACE_PROBE3(a, b, c, d, e, f, g) ((void)0)
#endif /* DTRACE_PROBE3 */
#ifdef illumos
#define DTRACE_PROBE3(a, b, c, d, e, f, g) \
ZFS_PROBE3(#a, (unsigned long)c, (unsigned long)e, (unsigned long)g)
#endif
#ifdef DTRACE_PROBE4
#undef DTRACE_PROBE4
#define DTRACE_PROBE4(a, b, c, d, e, f, g, h, i) ((void)0)
#endif /* DTRACE_PROBE4 */
#ifdef illumos
#define DTRACE_PROBE4(a, b, c, d, e, f, g, h, i) \
ZFS_PROBE4(#a, (unsigned long)c, (unsigned long)e, (unsigned long)g, \
(unsigned long)i)
#endif
#ifdef illumos
/*
* We use the comma operator so that this macro can be used without much
* additional code. For example, "return (EINVAL);" becomes
* "return (SET_ERROR(EINVAL));". Note that the argument will be evaluated
* twice, so it should not have side effects (e.g. something like:
* "return (SET_ERROR(log_error(EINVAL, info)));" would log the error twice).
*/
#define SET_ERROR(err) (ZFS_SET_ERROR(err), err)
#else /* !illumos */
#define DTRACE_PROBE(a) ((void)0)
#define DTRACE_PROBE1(a, b, c) ((void)0)
#define DTRACE_PROBE2(a, b, c, d, e) ((void)0)
#define DTRACE_PROBE3(a, b, c, d, e, f, g) ((void)0)
#define DTRACE_PROBE4(a, b, c, d, e, f, g, h, i) ((void)0)
#define SET_ERROR(err) (err)
#endif /* !illumos */
/*
* Threads

View File

@ -0,0 +1,36 @@
/*
* CDDL HEADER START
*
* This file and its contents are supplied under the terms of the
* Common Development and Distribution License ("CDDL"), version 1.0.
* You may only use this file in accordance with the terms of version
* 1.0 of the CDDL.
*
* A full copy of the text of the CDDL should have accompanied this
* source. A copy of the CDDL is also available via the Internet at
* http://www.illumos.org/license/CDDL.
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2013 by Delphix. All rights reserved.
*/
provider zfs {
probe probe0(char *probename);
probe probe1(char *probename, unsigned long arg1);
probe probe2(char *probename, unsigned long arg1, unsigned long arg2);
probe probe3(char *probename, unsigned long arg1, unsigned long arg2,
unsigned long arg3);
probe probe4(char *probename, unsigned long arg1, unsigned long arg2,
unsigned long arg3, unsigned long arg4);
probe set__error(int err);
};
#pragma D attributes Evolving/Evolving/ISA provider zfs provider
#pragma D attributes Private/Private/Unknown provider zfs module
#pragma D attributes Private/Private/Unknown provider zfs function
#pragma D attributes Evolving/Evolving/ISA provider zfs name
#pragma D attributes Evolving/Evolving/ISA provider zfs args

View File

@ -41,6 +41,8 @@
#define DTRACE_PROBE1(name, type1, arg1)
#define DTRACE_PROBE2(name, type1, arg1, type2, arg2)
#define DTRACE_PROBE3(name, type1, arg1, type2, arg2, type3, arg3)
#define DTRACE_PROBE4(name, type1, arg1, type2, arg2, type3, arg3, type4, arg4)
#define DTRACE_PROBE4(name, type1, arg1, type2, arg2, type3, arg3, type4, arg4)
#define SET_ERROR(err) (err)
#endif /* _OPENSOLARIS_SYS_SDT_H_ */

View File

@ -21,7 +21,7 @@
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright 2011 Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2011 by Delphix. All rights reserved.
* Copyright (c) 2013 by Delphix. All rights reserved.
*/
/*
@ -3738,14 +3738,14 @@ arc_memory_throttle(uint64_t reserve, uint64_t inflight_data, uint64_t txg)
*/
if (curproc == pageproc) {
if (page_load > available_memory / 4)
return (ERESTART);
return (SET_ERROR(ERESTART));
/* Note: reserve is inflated, so we deflate */
page_load += reserve / 8;
return (0);
} else if (page_load > 0 && arc_reclaim_needed()) {
/* memory is low, delay before restarting */
ARCSTAT_INCR(arcstat_memory_throttle_count, 1);
return (EAGAIN);
return (SET_ERROR(EAGAIN));
}
page_load = 0;
@ -3760,7 +3760,7 @@ arc_memory_throttle(uint64_t reserve, uint64_t inflight_data, uint64_t txg)
if (inflight_data > available_memory / 4) {
ARCSTAT_INCR(arcstat_memory_throttle_count, 1);
return (ERESTART);
return (SET_ERROR(ERESTART));
}
#endif
return (0);
@ -3785,13 +3785,13 @@ arc_tempreserve_space(uint64_t reserve, uint64_t txg)
*/
if (spa_get_random(10000) == 0) {
dprintf("forcing random failure\n");
return (ERESTART);
return (SET_ERROR(ERESTART));
}
#endif
if (reserve > arc_c/4 && !arc_no_grow)
arc_c = MIN(arc_c_max, reserve * 4);
if (reserve > arc_c)
return (ENOMEM);
return (SET_ERROR(ENOMEM));
/*
* Don't count loaned bufs as in flight dirty data to prevent long
@ -3824,7 +3824,7 @@ arc_tempreserve_space(uint64_t reserve, uint64_t txg)
arc_anon->arcs_lsize[ARC_BUFC_METADATA]>>10,
arc_anon->arcs_lsize[ARC_BUFC_DATA]>>10,
reserve>>10, arc_c>>10);
return (ERESTART);
return (SET_ERROR(ERESTART));
}
atomic_add_64(&arc_tempreserve, reserve);
return (0);
@ -4518,7 +4518,7 @@ l2arc_read_done(zio_t *zio)
if (zio->io_error != 0) {
ARCSTAT_BUMP(arcstat_l2_io_error);
} else {
zio->io_error = EIO;
zio->io_error = SET_ERROR(EIO);
}
if (!equal)
ARCSTAT_BUMP(arcstat_l2_cksum_bad);

View File

@ -21,7 +21,7 @@
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright 2011 Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2012 by Delphix. All rights reserved.
* Copyright (c) 2013 by Delphix. All rights reserved.
*/
#include <sys/zfs_context.h>
@ -598,7 +598,7 @@ dbuf_read(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags)
ASSERT(!refcount_is_zero(&db->db_holds));
if (db->db_state == DB_NOFILL)
return (EIO);
return (SET_ERROR(EIO));
DB_DNODE_ENTER(db);
dn = DB_DNODE(db);
@ -655,7 +655,7 @@ dbuf_read(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags)
cv_wait(&db->db_changed, &db->db_mtx);
}
if (db->db_state == DB_UNCACHED)
err = EIO;
err = SET_ERROR(EIO);
}
mutex_exit(&db->db_mtx);
}
@ -1593,7 +1593,7 @@ dbuf_findbp(dnode_t *dn, int level, uint64_t blkid, int fail_sparse,
if (level >= nlevels ||
(blkid > (dn->dn_phys->dn_maxblkid >> (level * epbs)))) {
/* the buffer has no parent yet */
return (ENOENT);
return (SET_ERROR(ENOENT));
} else if (level < nlevels-1) {
/* this block is referenced from an indirect block */
int err = dbuf_hold_impl(dn, level+1,
@ -1844,7 +1844,7 @@ dbuf_hold_impl(dnode_t *dn, uint8_t level, uint64_t blkid, int fail_sparse,
err = dbuf_findbp(dn, level, blkid, fail_sparse, &parent, &bp);
if (fail_sparse) {
if (err == 0 && bp && BP_IS_HOLE(bp))
err = ENOENT;
err = SET_ERROR(ENOENT);
if (err) {
if (parent)
dbuf_rele(parent, NULL);
@ -1941,7 +1941,7 @@ dbuf_spill_set_blksz(dmu_buf_t *db_fake, uint64_t blksz, dmu_tx_t *tx)
dnode_t *dn;
if (db->db_blkid != DMU_SPILL_BLKID)
return (ENOTSUP);
return (SET_ERROR(ENOTSUP));
if (blksz == 0)
blksz = SPA_MINBLOCKSIZE;
if (blksz > SPA_MAXBLOCKSIZE)

View File

@ -21,7 +21,7 @@
/*
* Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012 by Delphix. All rights reserved.
* Copyright (c) 2013 by Delphix. All rights reserved.
*/
#include <sys/zfs_context.h>
@ -174,7 +174,7 @@ ddt_object_lookup(ddt_t *ddt, enum ddt_type type, enum ddt_class class,
ddt_entry_t *dde)
{
if (!ddt_object_exists(ddt, type, class))
return (ENOENT);
return (SET_ERROR(ENOENT));
return (ddt_ops[type]->ddt_op_lookup(ddt->ddt_os,
ddt->ddt_object[type][class], dde));
@ -235,7 +235,7 @@ ddt_object_info(ddt_t *ddt, enum ddt_type type, enum ddt_class class,
dmu_object_info_t *doi)
{
if (!ddt_object_exists(ddt, type, class))
return (ENOENT);
return (SET_ERROR(ENOENT));
return (dmu_object_info(ddt->ddt_os, ddt->ddt_object[type][class],
doi));
@ -1157,5 +1157,5 @@ ddt_walk(spa_t *spa, ddt_bookmark_t *ddb, ddt_entry_t *dde)
ddb->ddb_type = 0;
} while (++ddb->ddb_class < DDT_CLASSES);
return (ENOENT);
return (SET_ERROR(ENOENT));
}

View File

@ -20,7 +20,7 @@
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012 by Delphix. All rights reserved.
* Copyright (c) 2013 by Delphix. All rights reserved.
*/
#include <sys/dmu.h>
@ -146,7 +146,7 @@ dmu_buf_hold(objset_t *os, uint64_t object, uint64_t offset,
db = dbuf_hold(dn, blkid, tag);
rw_exit(&dn->dn_struct_rwlock);
if (db == NULL) {
err = EIO;
err = SET_ERROR(EIO);
} else {
err = dbuf_read(db, NULL, db_flags);
if (err) {
@ -177,9 +177,9 @@ dmu_set_bonus(dmu_buf_t *db_fake, int newsize, dmu_tx_t *tx)
dn = DB_DNODE(db);
if (dn->dn_bonus != db) {
error = EINVAL;
error = SET_ERROR(EINVAL);
} else if (newsize < 0 || newsize > db_fake->db_size) {
error = EINVAL;
error = SET_ERROR(EINVAL);
} else {
dnode_setbonuslen(dn, newsize, tx);
error = 0;
@ -200,9 +200,9 @@ dmu_set_bonustype(dmu_buf_t *db_fake, dmu_object_type_t type, dmu_tx_t *tx)
dn = DB_DNODE(db);
if (!DMU_OT_IS_VALID(type)) {
error = EINVAL;
error = SET_ERROR(EINVAL);
} else if (dn->dn_bonus != db) {
error = EINVAL;
error = SET_ERROR(EINVAL);
} else {
dnode_setbonus_type(dn, type, tx);
error = 0;
@ -329,12 +329,12 @@ dmu_spill_hold_existing(dmu_buf_t *bonus, void *tag, dmu_buf_t **dbp)
dn = DB_DNODE(db);
if (spa_version(dn->dn_objset->os_spa) < SPA_VERSION_SA) {
err = EINVAL;
err = SET_ERROR(EINVAL);
} else {
rw_enter(&dn->dn_struct_rwlock, RW_READER);
if (!dn->dn_have_spill) {
err = ENOENT;
err = SET_ERROR(ENOENT);
} else {
err = dmu_spill_hold_by_dnode(dn,
DB_RF_HAVESTRUCT | DB_RF_CANFAIL, tag, dbp);
@ -400,7 +400,7 @@ dmu_buf_hold_array_by_dnode(dnode_t *dn, uint64_t offset, uint64_t length,
(longlong_t)dn->dn_object, dn->dn_datablksz,
(longlong_t)offset, (longlong_t)length);
rw_exit(&dn->dn_struct_rwlock);
return (EIO);
return (SET_ERROR(EIO));
}
nblks = 1;
}
@ -417,7 +417,7 @@ dmu_buf_hold_array_by_dnode(dnode_t *dn, uint64_t offset, uint64_t length,
rw_exit(&dn->dn_struct_rwlock);
dmu_buf_rele_array(dbp, nblks, tag);
zio_nowait(zio);
return (EIO);
return (SET_ERROR(EIO));
}
/* initiate async i/o */
if (read)
@ -449,7 +449,7 @@ dmu_buf_hold_array_by_dnode(dnode_t *dn, uint64_t offset, uint64_t length,
db->db_state == DB_FILL)
cv_wait(&db->db_changed, &db->db_mtx);
if (db->db_state == DB_UNCACHED)
err = EIO;
err = SET_ERROR(EIO);
mutex_exit(&db->db_mtx);
if (err) {
dmu_buf_rele_array(dbp, nblks, tag);
@ -1363,7 +1363,8 @@ dmu_sync_late_arrival(zio_t *pio, objset_t *os, dmu_sync_cb_t *done, zgd_t *zgd,
dmu_tx_hold_space(tx, zgd->zgd_db->db_size);
if (dmu_tx_assign(tx, TXG_WAIT) != 0) {
dmu_tx_abort(tx);
return (EIO); /* Make zl_get_data do txg_waited_synced() */
/* Make zl_get_data do txg_waited_synced() */
return (SET_ERROR(EIO));
}
dsa = kmem_alloc(sizeof (dmu_sync_arg_t), KM_SLEEP);
@ -1448,7 +1449,7 @@ dmu_sync(zio_t *pio, uint64_t txg, dmu_sync_cb_t *done, zgd_t *zgd)
* This txg has already synced. There's nothing to do.
*/
mutex_exit(&db->db_mtx);
return (EEXIST);
return (SET_ERROR(EEXIST));
}
if (txg <= spa_syncing_txg(os->os_spa)) {
@ -1470,7 +1471,7 @@ dmu_sync(zio_t *pio, uint64_t txg, dmu_sync_cb_t *done, zgd_t *zgd)
* There's no need to log writes to freed blocks, so we're done.
*/
mutex_exit(&db->db_mtx);
return (ENOENT);
return (SET_ERROR(ENOENT));
}
ASSERT(dr->dr_next == NULL || dr->dr_next->dr_txg < txg);
@ -1499,7 +1500,7 @@ dmu_sync(zio_t *pio, uint64_t txg, dmu_sync_cb_t *done, zgd_t *zgd)
* have been dirtied since, or we would have cleared the state.
*/
mutex_exit(&db->db_mtx);
return (EALREADY);
return (SET_ERROR(EALREADY));
}
ASSERT(dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN);

View File

@ -20,7 +20,7 @@
*/
/*
* Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012 by Delphix. All rights reserved.
* Copyright (c) 2013 by Delphix. All rights reserved.
*/
#include <sys/dmu.h>
@ -136,7 +136,7 @@ diff_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
int err = 0;
if (issig(JUSTLOOKING) && issig(FORREAL))
return (EINTR);
return (SET_ERROR(EINTR));
if (zb->zb_object != DMU_META_DNODE_OBJECT)
return (0);
@ -159,7 +159,7 @@ diff_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
if (arc_read(NULL, spa, bp, arc_getbuf_func, &abuf,
ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL,
&aflags, zb) != 0)
return (EIO);
return (SET_ERROR(EIO));
blk = abuf->b_data;
for (i = 0; i < blksz >> DNODE_SHIFT; i++) {
@ -195,7 +195,7 @@ dmu_diff(const char *tosnap_name, const char *fromsnap_name,
if (strchr(tosnap_name, '@') == NULL ||
strchr(fromsnap_name, '@') == NULL)
return (EINVAL);
return (SET_ERROR(EINVAL));
error = dsl_pool_hold(tosnap_name, FTAG, &dp);
if (error != 0)
@ -218,7 +218,7 @@ dmu_diff(const char *tosnap_name, const char *fromsnap_name,
dsl_dataset_rele(fromsnap, FTAG);
dsl_dataset_rele(tosnap, FTAG);
dsl_pool_rele(dp, FTAG);
return (EXDEV);
return (SET_ERROR(EXDEV));
}
fromtxg = fromsnap->ds_phys->ds_creation_txg;

View File

@ -20,6 +20,7 @@
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013 by Delphix. All rights reserved.
*/
#include <sys/dmu.h>
@ -90,7 +91,7 @@ dmu_object_claim(objset_t *os, uint64_t object, dmu_object_type_t ot,
int err;
if (object == DMU_META_DNODE_OBJECT && !dmu_tx_private_ok(tx))
return (EBADF);
return (SET_ERROR(EBADF));
err = dnode_hold_impl(os, object, DNODE_MUST_BE_FREE, FTAG, &dn);
if (err)
@ -112,7 +113,7 @@ dmu_object_reclaim(objset_t *os, uint64_t object, dmu_object_type_t ot,
int err;
if (object == DMU_META_DNODE_OBJECT)
return (EBADF);
return (SET_ERROR(EBADF));
err = dnode_hold_impl(os, object, DNODE_MUST_BE_ALLOCATED,
FTAG, &dn);

View File

@ -20,7 +20,7 @@
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012 by Delphix. All rights reserved.
* Copyright (c) 2013 by Delphix. All rights reserved.
*/
/* Portions Copyright 2010 Robert Milkowski */
@ -285,7 +285,7 @@ dmu_objset_open_impl(spa_t *spa, dsl_dataset_t *ds, blkptr_t *bp,
kmem_free(os, sizeof (objset_t));
/* convert checksum errors into IO errors */
if (err == ECKSUM)
err = EIO;
err = SET_ERROR(EIO);
return (err);
}
@ -498,10 +498,10 @@ dmu_objset_own(const char *name, dmu_objset_type_t type,
dsl_dataset_disown(ds, tag);
} else if (type != DMU_OST_ANY && type != (*osp)->os_phys->os_type) {
dsl_dataset_disown(ds, tag);
return (EINVAL);
return (SET_ERROR(EINVAL));
} else if (!readonly && dsl_dataset_is_snapshot(ds)) {
dsl_dataset_disown(ds, tag);
return (EROFS);
return (SET_ERROR(EROFS));
}
return (err);
}
@ -714,14 +714,14 @@ dmu_objset_create_check(void *arg, dmu_tx_t *tx)
int error;
if (strchr(doca->doca_name, '@') != NULL)
return (EINVAL);
return (SET_ERROR(EINVAL));
error = dsl_dir_hold(dp, doca->doca_name, FTAG, &pdd, &tail);
if (error != 0)
return (error);
if (tail == NULL) {
dsl_dir_rele(pdd, FTAG);
return (EEXIST);
return (SET_ERROR(EEXIST));
}
dsl_dir_rele(pdd, FTAG);
@ -795,19 +795,19 @@ dmu_objset_clone_check(void *arg, dmu_tx_t *tx)
dsl_pool_t *dp = dmu_tx_pool(tx);
if (strchr(doca->doca_clone, '@') != NULL)
return (EINVAL);
return (SET_ERROR(EINVAL));
error = dsl_dir_hold(dp, doca->doca_clone, FTAG, &pdd, &tail);
if (error != 0)
return (error);
if (tail == NULL) {
dsl_dir_rele(pdd, FTAG);
return (EEXIST);
return (SET_ERROR(EEXIST));
}
/* You can't clone across pools. */
if (pdd->dd_pool != dp) {
dsl_dir_rele(pdd, FTAG);
return (EXDEV);
return (SET_ERROR(EXDEV));
}
dsl_dir_rele(pdd, FTAG);
@ -818,13 +818,13 @@ dmu_objset_clone_check(void *arg, dmu_tx_t *tx)
/* You can't clone across pools. */
if (origin->ds_dir->dd_pool != dp) {
dsl_dataset_rele(origin, FTAG);
return (EXDEV);
return (SET_ERROR(EXDEV));
}
/* You can only clone snapshots, not the head datasets. */
if (!dsl_dataset_is_snapshot(origin)) {
dsl_dataset_rele(origin, FTAG);
return (EINVAL);
return (SET_ERROR(EINVAL));
}
dsl_dataset_rele(origin, FTAG);
@ -1302,9 +1302,9 @@ dmu_objset_userspace_upgrade(objset_t *os)
if (dmu_objset_userspace_present(os))
return (0);
if (!dmu_objset_userused_enabled(os))
return (ENOTSUP);
return (SET_ERROR(ENOTSUP));
if (dmu_objset_is_snapshot(os))
return (EINVAL);
return (SET_ERROR(EINVAL));
/*
* We simply need to mark every object dirty, so that it will be
@ -1320,7 +1320,7 @@ dmu_objset_userspace_upgrade(objset_t *os)
int objerr;
if (issig(JUSTLOOKING) && issig(FORREAL))
return (EINTR);
return (SET_ERROR(EINTR));
objerr = dmu_bonus_hold(os, obj, FTAG, &db);
if (objerr != 0)
@ -1396,7 +1396,7 @@ dmu_snapshot_realname(objset_t *os, char *name, char *real, int maxlen,
uint64_t ignored;
if (ds->ds_phys->ds_snapnames_zapobj == 0)
return (ENOENT);
return (SET_ERROR(ENOENT));
return (zap_lookup_norm(ds->ds_dir->dd_pool->dp_meta_objset,
ds->ds_phys->ds_snapnames_zapobj, name, 8, 1, &ignored, MT_FIRST,
@ -1414,7 +1414,7 @@ dmu_snapshot_list_next(objset_t *os, int namelen, char *name,
ASSERT(dsl_pool_config_held(dmu_objset_pool(os)));
if (ds->ds_phys->ds_snapnames_zapobj == 0)
return (ENOENT);
return (SET_ERROR(ENOENT));
zap_cursor_init_serialized(&cursor,
ds->ds_dir->dd_pool->dp_meta_objset,
@ -1422,12 +1422,12 @@ dmu_snapshot_list_next(objset_t *os, int namelen, char *name,
if (zap_cursor_retrieve(&cursor, &attr) != 0) {
zap_cursor_fini(&cursor);
return (ENOENT);
return (SET_ERROR(ENOENT));
}
if (strlen(attr.za_name) + 1 > namelen) {
zap_cursor_fini(&cursor);
return (ENAMETOOLONG);
return (SET_ERROR(ENAMETOOLONG));
}
(void) strcpy(name, attr.za_name);
@ -1453,7 +1453,7 @@ dmu_dir_list_next(objset_t *os, int namelen, char *name,
/* there is no next dir on a snapshot! */
if (os->os_dsl_dataset->ds_object !=
dd->dd_phys->dd_head_dataset_obj)
return (ENOENT);
return (SET_ERROR(ENOENT));
zap_cursor_init_serialized(&cursor,
dd->dd_pool->dp_meta_objset,
@ -1461,12 +1461,12 @@ dmu_dir_list_next(objset_t *os, int namelen, char *name,
if (zap_cursor_retrieve(&cursor, &attr) != 0) {
zap_cursor_fini(&cursor);
return (ENOENT);
return (SET_ERROR(ENOENT));
}
if (strlen(attr.za_name) + 1 > namelen) {
zap_cursor_fini(&cursor);
return (ENAMETOOLONG);
return (SET_ERROR(ENAMETOOLONG));
}
(void) strcpy(name, attr.za_name);
@ -1733,9 +1733,9 @@ dmu_fsname(const char *snapname, char *buf)
{
char *atp = strchr(snapname, '@');
if (atp == NULL)
return (EINVAL);
return (SET_ERROR(EINVAL));
if (atp - snapname >= MAXNAMELEN)
return (ENAMETOOLONG);
return (SET_ERROR(ENAMETOOLONG));
(void) strlcpy(buf, snapname, atp - snapname + 1);
return (0);
}

View File

@ -21,7 +21,7 @@
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright 2011 Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2012 by Delphix. All rights reserved.
* Copyright (c) 2013 by Delphix. All rights reserved.
* Copyright (c) 2012, Joyent, Inc. All rights reserved.
* Copyright (c) 2012, Martin Matuska <mm@FreeBSD.org>. All rights reserved.
*/
@ -110,7 +110,7 @@ dump_free(dmu_sendarg_t *dsp, uint64_t object, uint64_t offset,
dsp->dsa_pending_op != PENDING_FREE) {
if (dump_bytes(dsp, dsp->dsa_drr,
sizeof (dmu_replay_record_t)) != 0)
return (EINTR);
return (SET_ERROR(EINTR));
dsp->dsa_pending_op = PENDING_NONE;
}
@ -134,7 +134,7 @@ dump_free(dmu_sendarg_t *dsp, uint64_t object, uint64_t offset,
/* not a continuation. Push out pending record */
if (dump_bytes(dsp, dsp->dsa_drr,
sizeof (dmu_replay_record_t)) != 0)
return (EINTR);
return (SET_ERROR(EINTR));
dsp->dsa_pending_op = PENDING_NONE;
}
}
@ -148,7 +148,7 @@ dump_free(dmu_sendarg_t *dsp, uint64_t object, uint64_t offset,
if (length == -1ULL) {
if (dump_bytes(dsp, dsp->dsa_drr,
sizeof (dmu_replay_record_t)) != 0)
return (EINTR);
return (SET_ERROR(EINTR));
} else {
dsp->dsa_pending_op = PENDING_FREE;
}
@ -172,7 +172,7 @@ dump_data(dmu_sendarg_t *dsp, dmu_object_type_t type,
if (dsp->dsa_pending_op != PENDING_NONE) {
if (dump_bytes(dsp, dsp->dsa_drr,
sizeof (dmu_replay_record_t)) != 0)
return (EINTR);
return (SET_ERROR(EINTR));
dsp->dsa_pending_op = PENDING_NONE;
}
/* write a DATA record */
@ -192,9 +192,9 @@ dump_data(dmu_sendarg_t *dsp, dmu_object_type_t type,
drrw->drr_key.ddk_cksum = bp->blk_cksum;
if (dump_bytes(dsp, dsp->dsa_drr, sizeof (dmu_replay_record_t)) != 0)
return (EINTR);
return (SET_ERROR(EINTR));
if (dump_bytes(dsp, data, blksz) != 0)
return (EINTR);
return (SET_ERROR(EINTR));
return (0);
}
@ -206,7 +206,7 @@ dump_spill(dmu_sendarg_t *dsp, uint64_t object, int blksz, void *data)
if (dsp->dsa_pending_op != PENDING_NONE) {
if (dump_bytes(dsp, dsp->dsa_drr,
sizeof (dmu_replay_record_t)) != 0)
return (EINTR);
return (SET_ERROR(EINTR));
dsp->dsa_pending_op = PENDING_NONE;
}
@ -218,9 +218,9 @@ dump_spill(dmu_sendarg_t *dsp, uint64_t object, int blksz, void *data)
drrs->drr_toguid = dsp->dsa_toguid;
if (dump_bytes(dsp, dsp->dsa_drr, sizeof (dmu_replay_record_t)))
return (EINTR);
return (SET_ERROR(EINTR));
if (dump_bytes(dsp, data, blksz))
return (EINTR);
return (SET_ERROR(EINTR));
return (0);
}
@ -240,7 +240,7 @@ dump_freeobjects(dmu_sendarg_t *dsp, uint64_t firstobj, uint64_t numobjs)
dsp->dsa_pending_op != PENDING_FREEOBJECTS) {
if (dump_bytes(dsp, dsp->dsa_drr,
sizeof (dmu_replay_record_t)) != 0)
return (EINTR);
return (SET_ERROR(EINTR));
dsp->dsa_pending_op = PENDING_NONE;
}
if (dsp->dsa_pending_op == PENDING_FREEOBJECTS) {
@ -255,7 +255,7 @@ dump_freeobjects(dmu_sendarg_t *dsp, uint64_t firstobj, uint64_t numobjs)
/* can't be aggregated. Push out pending record */
if (dump_bytes(dsp, dsp->dsa_drr,
sizeof (dmu_replay_record_t)) != 0)
return (EINTR);
return (SET_ERROR(EINTR));
dsp->dsa_pending_op = PENDING_NONE;
}
}
@ -283,7 +283,7 @@ dump_dnode(dmu_sendarg_t *dsp, uint64_t object, dnode_phys_t *dnp)
if (dsp->dsa_pending_op != PENDING_NONE) {
if (dump_bytes(dsp, dsp->dsa_drr,
sizeof (dmu_replay_record_t)) != 0)
return (EINTR);
return (SET_ERROR(EINTR));
dsp->dsa_pending_op = PENDING_NONE;
}
@ -300,17 +300,17 @@ dump_dnode(dmu_sendarg_t *dsp, uint64_t object, dnode_phys_t *dnp)
drro->drr_toguid = dsp->dsa_toguid;
if (dump_bytes(dsp, dsp->dsa_drr, sizeof (dmu_replay_record_t)) != 0)
return (EINTR);
return (SET_ERROR(EINTR));
if (dump_bytes(dsp, DN_BONUS(dnp), P2ROUNDUP(dnp->dn_bonuslen, 8)) != 0)
return (EINTR);
return (SET_ERROR(EINTR));
/* free anything past the end of the file */
if (dump_free(dsp, object, (dnp->dn_maxblkid + 1) *
(dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT), -1ULL))
return (EINTR);
return (SET_ERROR(EINTR));
if (dsp->dsa_err != 0)
return (EINTR);
return (SET_ERROR(EINTR));
return (0);
}
@ -328,7 +328,7 @@ backup_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
int err = 0;
if (issig(JUSTLOOKING) && issig(FORREAL))
return (EINTR);
return (SET_ERROR(EINTR));
if (zb->zb_object != DMU_META_DNODE_OBJECT &&
DMU_OBJECT_IS_SPECIAL(zb->zb_object)) {
@ -352,7 +352,7 @@ backup_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
if (arc_read(NULL, spa, bp, arc_getbuf_func, &abuf,
ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL,
&aflags, zb) != 0)
return (EIO);
return (SET_ERROR(EIO));
blk = abuf->b_data;
for (i = 0; i < blksz >> DNODE_SHIFT; i++) {
@ -371,7 +371,7 @@ backup_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
if (arc_read(NULL, spa, bp, arc_getbuf_func, &abuf,
ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL,
&aflags, zb) != 0)
return (EIO);
return (SET_ERROR(EIO));
err = dump_spill(dsp, zb->zb_object, blksz, abuf->b_data);
(void) arc_buf_remove_ref(abuf, &abuf);
@ -393,7 +393,7 @@ backup_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
ptr++)
*ptr = 0x2f5baddb10c;
} else {
return (EIO);
return (SET_ERROR(EIO));
}
}
@ -427,7 +427,7 @@ dmu_send_impl(void *tag, dsl_pool_t *dp, dsl_dataset_t *ds,
dsl_dataset_rele(fromds, tag);
dsl_dataset_rele(ds, tag);
dsl_pool_rele(dp, tag);
return (EXDEV);
return (SET_ERROR(EXDEV));
}
err = dmu_objset_from_ds(ds, &os);
@ -454,7 +454,7 @@ dmu_send_impl(void *tag, dsl_pool_t *dp, dsl_dataset_t *ds,
dsl_dataset_rele(fromds, tag);
dsl_dataset_rele(ds, tag);
dsl_pool_rele(dp, tag);
return (EINVAL);
return (SET_ERROR(EINVAL));
}
if (version >= ZPL_VERSION_SA) {
DMU_SET_FEATUREFLAGS(
@ -513,7 +513,7 @@ dmu_send_impl(void *tag, dsl_pool_t *dp, dsl_dataset_t *ds,
if (dsp->dsa_pending_op != PENDING_NONE)
if (dump_bytes(dsp, drr, sizeof (dmu_replay_record_t)) != 0)
err = EINTR;
err = SET_ERROR(EINTR);
if (err != 0) {
if (err == EINTR && dsp->dsa_err != 0)
@ -594,9 +594,9 @@ dmu_send(const char *tosnap, const char *fromsnap,
int err;
if (strchr(tosnap, '@') == NULL)
return (EINVAL);
return (SET_ERROR(EINVAL));
if (fromsnap != NULL && strchr(fromsnap, '@') == NULL)
return (EINVAL);
return (SET_ERROR(EINVAL));
err = dsl_pool_hold(tosnap, FTAG, &dp);
if (err != 0)
@ -630,14 +630,14 @@ dmu_send_estimate(dsl_dataset_t *ds, dsl_dataset_t *fromds, uint64_t *sizep)
/* tosnap must be a snapshot */
if (!dsl_dataset_is_snapshot(ds))
return (EINVAL);
return (SET_ERROR(EINVAL));
/*
* fromsnap must be an earlier snapshot from the same fs as tosnap,
* or the origin's fs.
*/
if (fromds != NULL && !dsl_dataset_is_before(ds, fromds))
return (EXDEV);
return (SET_ERROR(EXDEV));
/* Get uncompressed size estimate of changed data. */
if (fromds == NULL) {
@ -696,7 +696,7 @@ recv_begin_check_existing_impl(dmu_recv_begin_arg_t *drba, dsl_dataset_t *ds,
/* must not have any changes since most recent snapshot */
if (!drba->drba_cookie->drc_force &&
dsl_dataset_modified_since_lastsnap(ds))
return (ETXTBSY);
return (SET_ERROR(ETXTBSY));
/* temporary clone name must not exist */
error = zap_lookup(dp->dp_meta_objset,
@ -715,7 +715,7 @@ recv_begin_check_existing_impl(dmu_recv_begin_arg_t *drba, dsl_dataset_t *ds,
if (fromguid != 0) {
/* if incremental, most recent snapshot must match fromguid */
if (ds->ds_prev == NULL)
return (ENODEV);
return (SET_ERROR(ENODEV));
/*
* most recent snapshot must match fromguid, or there are no
@ -729,10 +729,10 @@ recv_begin_check_existing_impl(dmu_recv_begin_arg_t *drba, dsl_dataset_t *ds,
error = dsl_dataset_hold_obj(dp, obj, FTAG,
&snap);
if (error != 0)
return (ENODEV);
return (SET_ERROR(ENODEV));
if (snap->ds_phys->ds_creation_txg < birth) {
dsl_dataset_rele(snap, FTAG);
return (ENODEV);
return (SET_ERROR(ENODEV));
}
if (snap->ds_phys->ds_guid == fromguid) {
dsl_dataset_rele(snap, FTAG);
@ -742,12 +742,12 @@ recv_begin_check_existing_impl(dmu_recv_begin_arg_t *drba, dsl_dataset_t *ds,
dsl_dataset_rele(snap, FTAG);
}
if (obj == 0)
return (ENODEV);
return (SET_ERROR(ENODEV));
}
} else {
/* if full, most recent snapshot must be $ORIGIN */
if (ds->ds_phys->ds_prev_snap_txg >= TXG_INITIAL)
return (ENODEV);
return (SET_ERROR(ENODEV));
}
return (0);
@ -773,13 +773,13 @@ dmu_recv_begin_check(void *arg, dmu_tx_t *tx)
DMU_COMPOUNDSTREAM ||
drrb->drr_type >= DMU_OST_NUMTYPES ||
((flags & DRR_FLAG_CLONE) && drba->drba_origin == NULL))
return (EINVAL);
return (SET_ERROR(EINVAL));
/* Verify pool version supports SA if SA_SPILL feature set */
if ((DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo) &
DMU_BACKUP_FEATURE_SA_SPILL) &&
spa_version(dp->dp_spa) < SPA_VERSION_SA) {
return (ENOTSUP);
return (SET_ERROR(ENOTSUP));
}
error = dsl_dataset_hold(dp, tofs, FTAG, &ds);
@ -789,7 +789,7 @@ dmu_recv_begin_check(void *arg, dmu_tx_t *tx)
/* Can't recv a clone into an existing fs */
if (flags & DRR_FLAG_CLONE) {
dsl_dataset_rele(ds, FTAG);
return (EINVAL);
return (SET_ERROR(EINVAL));
}
error = recv_begin_check_existing_impl(drba, ds, fromguid);
@ -803,7 +803,7 @@ dmu_recv_begin_check(void *arg, dmu_tx_t *tx)
* target fs, so fail the recv.
*/
if (fromguid != 0 && !(flags & DRR_FLAG_CLONE))
return (ENOENT);
return (SET_ERROR(ENOENT));
/* Open the parent of tofs */
ASSERT3U(strlen(tofs), <, MAXNAMELEN);
@ -823,12 +823,12 @@ dmu_recv_begin_check(void *arg, dmu_tx_t *tx)
if (!dsl_dataset_is_snapshot(origin)) {
dsl_dataset_rele(origin, FTAG);
dsl_dataset_rele(ds, FTAG);
return (EINVAL);
return (SET_ERROR(EINVAL));
}
if (origin->ds_phys->ds_guid != fromguid) {
dsl_dataset_rele(origin, FTAG);
dsl_dataset_rele(ds, FTAG);
return (ENODEV);
return (SET_ERROR(ENODEV));
}
dsl_dataset_rele(origin, FTAG);
}
@ -919,7 +919,7 @@ dmu_recv_begin(char *tofs, char *tosnap, struct drr_begin *drrb,
if (drrb->drr_magic == BSWAP_64(DMU_BACKUP_MAGIC))
drc->drc_byteswap = B_TRUE;
else if (drrb->drr_magic != DMU_BACKUP_MAGIC)
return (EINVAL);
return (SET_ERROR(EINVAL));
drr = kmem_zalloc(sizeof (dmu_replay_record_t), KM_SLEEP);
drr->drr_type = DRR_BEGIN;
@ -1038,7 +1038,7 @@ restore_read(struct restorearg *ra, int len)
len - done, ra->voff, &resid);
if (resid == len - done)
ra->err = EINVAL;
ra->err = SET_ERROR(EINVAL);
ra->voff += len - done - resid;
done = len - resid;
if (ra->err != 0)
@ -1150,13 +1150,13 @@ restore_object(struct restorearg *ra, objset_t *os, struct drr_object *drro)
drro->drr_blksz < SPA_MINBLOCKSIZE ||
drro->drr_blksz > SPA_MAXBLOCKSIZE ||
drro->drr_bonuslen > DN_MAX_BONUSLEN) {
return (EINVAL);
return (SET_ERROR(EINVAL));
}
err = dmu_object_info(os, drro->drr_object, NULL);
if (err != 0 && err != ENOENT)
return (EINVAL);
return (SET_ERROR(EINVAL));
if (drro->drr_bonuslen) {
data = restore_read(ra, P2ROUNDUP(drro->drr_bonuslen, 8));
@ -1184,7 +1184,7 @@ restore_object(struct restorearg *ra, objset_t *os, struct drr_object *drro)
drro->drr_bonustype, drro->drr_bonuslen);
}
if (err != 0) {
return (EINVAL);
return (SET_ERROR(EINVAL));
}
tx = dmu_tx_create(os);
@ -1227,7 +1227,7 @@ restore_freeobjects(struct restorearg *ra, objset_t *os,
uint64_t obj;
if (drrfo->drr_firstobj + drrfo->drr_numobjs < drrfo->drr_firstobj)
return (EINVAL);
return (SET_ERROR(EINVAL));
for (obj = drrfo->drr_firstobj;
obj < drrfo->drr_firstobj + drrfo->drr_numobjs;
@ -1254,14 +1254,14 @@ restore_write(struct restorearg *ra, objset_t *os,
if (drrw->drr_offset + drrw->drr_length < drrw->drr_offset ||
!DMU_OT_IS_VALID(drrw->drr_type))
return (EINVAL);
return (SET_ERROR(EINVAL));
data = restore_read(ra, drrw->drr_length);
if (data == NULL)
return (ra->err);
if (dmu_object_info(os, drrw->drr_object, NULL) != 0)
return (EINVAL);
return (SET_ERROR(EINVAL));
tx = dmu_tx_create(os);
@ -1303,7 +1303,7 @@ restore_write_byref(struct restorearg *ra, objset_t *os,
dmu_buf_t *dbp;
if (drrwbr->drr_offset + drrwbr->drr_length < drrwbr->drr_offset)
return (EINVAL);
return (SET_ERROR(EINVAL));
/*
* If the GUID of the referenced dataset is different from the
@ -1313,10 +1313,10 @@ restore_write_byref(struct restorearg *ra, objset_t *os,
gmesrch.guid = drrwbr->drr_refguid;
if ((gmep = avl_find(ra->guid_to_ds_map, &gmesrch,
&where)) == NULL) {
return (EINVAL);
return (SET_ERROR(EINVAL));
}
if (dmu_objset_from_ds(gmep->gme_ds, &ref_os))
return (EINVAL);
return (SET_ERROR(EINVAL));
} else {
ref_os = os;
}
@ -1351,14 +1351,14 @@ restore_spill(struct restorearg *ra, objset_t *os, struct drr_spill *drrs)
if (drrs->drr_length < SPA_MINBLOCKSIZE ||
drrs->drr_length > SPA_MAXBLOCKSIZE)
return (EINVAL);
return (SET_ERROR(EINVAL));
data = restore_read(ra, drrs->drr_length);
if (data == NULL)
return (ra->err);
if (dmu_object_info(os, drrs->drr_object, NULL) != 0)
return (EINVAL);
return (SET_ERROR(EINVAL));
VERIFY(0 == dmu_bonus_hold(os, drrs->drr_object, FTAG, &db));
if ((err = dmu_spill_hold_by_bonus(db, FTAG, &db_spill)) != 0) {
@ -1400,10 +1400,10 @@ restore_free(struct restorearg *ra, objset_t *os,
if (drrf->drr_length != -1ULL &&
drrf->drr_offset + drrf->drr_length < drrf->drr_offset)
return (EINVAL);
return (SET_ERROR(EINVAL));
if (dmu_object_info(os, drrf->drr_object, NULL) != 0)
return (EINVAL);
return (SET_ERROR(EINVAL));
err = dmu_free_long_range(os, drrf->drr_object,
drrf->drr_offset, drrf->drr_length);
@ -1460,7 +1460,7 @@ dmu_recv_stream(dmu_recv_cookie_t *drc, struct file *fp, offset_t *voffp,
minor_t minor;
if (cleanup_fd == -1) {
ra.err = EBADF;
ra.err = SET_ERROR(EBADF);
goto out;
}
ra.err = zfs_onexit_fd_hold(cleanup_fd, &minor);
@ -1497,7 +1497,7 @@ dmu_recv_stream(dmu_recv_cookie_t *drc, struct file *fp, offset_t *voffp,
while (ra.err == 0 &&
NULL != (drr = restore_read(&ra, sizeof (*drr)))) {
if (issig(JUSTLOOKING) && issig(FORREAL)) {
ra.err = EINTR;
ra.err = SET_ERROR(EINTR);
goto out;
}
@ -1551,7 +1551,7 @@ dmu_recv_stream(dmu_recv_cookie_t *drc, struct file *fp, offset_t *voffp,
* everything before the DRR_END record.
*/
if (!ZIO_CHECKSUM_EQUAL(drre.drr_checksum, pcksum))
ra.err = ECKSUM;
ra.err = SET_ERROR(ECKSUM);
goto out;
}
case DRR_SPILL:
@ -1561,7 +1561,7 @@ dmu_recv_stream(dmu_recv_cookie_t *drc, struct file *fp, offset_t *voffp,
break;
}
default:
ra.err = EINVAL;
ra.err = SET_ERROR(EINVAL);
goto out;
}
pcksum = ra.cksum;

View File

@ -20,7 +20,7 @@
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012 by Delphix. All rights reserved.
* Copyright (c) 2013 by Delphix. All rights reserved.
*/
#include <sys/zfs_context.h>
@ -434,7 +434,7 @@ traverse_prefetcher(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
ASSERT(pfd->pd_blks_fetched >= 0);
if (pfd->pd_cancel)
return (EINTR);
return (SET_ERROR(EINTR));
if (bp == NULL || !((pfd->pd_flags & TRAVERSE_PREFETCH_DATA) ||
BP_GET_TYPE(bp) == DMU_OT_DNODE || BP_GET_LEVEL(bp) > 0) ||

View File

@ -160,7 +160,7 @@ dmu_tx_check_ioerr(zio_t *zio, dnode_t *dn, int level, uint64_t blkid)
db = dbuf_hold_level(dn, level, blkid, FTAG);
rw_exit(&dn->dn_struct_rwlock);
if (db == NULL)
return (EIO);
return (SET_ERROR(EIO));
err = dbuf_read(db, zio, DB_RF_CANFAIL | DB_RF_NOPREFETCH);
dbuf_rele(db, FTAG);
return (err);
@ -370,7 +370,7 @@ dmu_tx_count_write(dmu_tx_hold_t *txh, uint64_t off, uint64_t len)
out:
if (txh->txh_space_towrite + txh->txh_space_tooverwrite >
2 * DMU_MAX_ACCESS)
err = EFBIG;
err = SET_ERROR(EFBIG);
if (err)
txh->txh_tx->tx_err = err;
@ -922,9 +922,9 @@ dmu_tx_try_assign(dmu_tx_t *tx, txg_how_t txg_how)
*/
if (spa_get_failmode(spa) == ZIO_FAILURE_MODE_CONTINUE &&
txg_how != TXG_WAIT)
return (EIO);
return (SET_ERROR(EIO));
return (ERESTART);
return (SET_ERROR(ERESTART));
}
tx->tx_txg = txg_hold_open(tx->tx_pool, &tx->tx_txgh);
@ -945,7 +945,7 @@ dmu_tx_try_assign(dmu_tx_t *tx, txg_how_t txg_how)
if (dn->dn_assigned_txg == tx->tx_txg - 1) {
mutex_exit(&dn->dn_mtx);
tx->tx_needassign_txh = txh;
return (ERESTART);
return (SET_ERROR(ERESTART));
}
if (dn->dn_assigned_txg == 0)
dn->dn_assigned_txg = tx->tx_txg;

View File

@ -20,7 +20,7 @@
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012 by Delphix. All rights reserved.
* Copyright (c) 2013 by Delphix. All rights reserved.
*/
#include <sys/zfs_context.h>
@ -1036,12 +1036,12 @@ dnode_hold_impl(objset_t *os, uint64_t object, int flag,
dn = (object == DMU_USERUSED_OBJECT) ?
DMU_USERUSED_DNODE(os) : DMU_GROUPUSED_DNODE(os);
if (dn == NULL)
return (ENOENT);
return (SET_ERROR(ENOENT));
type = dn->dn_type;
if ((flag & DNODE_MUST_BE_ALLOCATED) && type == DMU_OT_NONE)
return (ENOENT);
return (SET_ERROR(ENOENT));
if ((flag & DNODE_MUST_BE_FREE) && type != DMU_OT_NONE)
return (EEXIST);
return (SET_ERROR(EEXIST));
DNODE_VERIFY(dn);
(void) refcount_add(&dn->dn_holds, tag);
*dnp = dn;
@ -1049,7 +1049,7 @@ dnode_hold_impl(objset_t *os, uint64_t object, int flag,
}
if (object == 0 || object >= DN_MAX_OBJECT)
return (EINVAL);
return (SET_ERROR(EINVAL));
mdn = DMU_META_DNODE(os);
ASSERT(mdn->dn_object == DMU_META_DNODE_OBJECT);
@ -1067,7 +1067,7 @@ dnode_hold_impl(objset_t *os, uint64_t object, int flag,
if (drop_struct_lock)
rw_exit(&mdn->dn_struct_rwlock);
if (db == NULL)
return (EIO);
return (SET_ERROR(EIO));
err = dbuf_read(db, NULL, DB_RF_CANFAIL);
if (err) {
dbuf_rele(db, FTAG);
@ -1375,7 +1375,7 @@ dnode_set_blksz(dnode_t *dn, uint64_t size, int ibs, dmu_tx_t *tx)
fail:
rw_exit(&dn->dn_struct_rwlock);
return (ENOTSUP);
return (SET_ERROR(ENOTSUP));
}
/* read-holding callers must not rely on the lock being continuously held */
@ -1861,7 +1861,7 @@ dnode_next_offset_level(dnode_t *dn, int flags, uint64_t *offset,
* at the pointer to this block in its parent, and its
* going to be unallocated, so we will skip over it.
*/
return (ESRCH);
return (SET_ERROR(ESRCH));
}
error = dbuf_read(db, NULL, DB_RF_CANFAIL | DB_RF_HAVESTRUCT);
if (error) {
@ -1877,7 +1877,7 @@ dnode_next_offset_level(dnode_t *dn, int flags, uint64_t *offset,
* This can only happen when we are searching up the tree
* and these conditions mean that we need to keep climbing.
*/
error = ESRCH;
error = SET_ERROR(ESRCH);
} else if (lvl == 0) {
dnode_phys_t *dnp = data;
span = DNODE_SHIFT;
@ -1890,7 +1890,7 @@ dnode_next_offset_level(dnode_t *dn, int flags, uint64_t *offset,
*offset += (1ULL << span) * inc;
}
if (i < 0 || i == blkfill)
error = ESRCH;
error = SET_ERROR(ESRCH);
} else {
blkptr_t *bp = data;
uint64_t start = *offset;
@ -1922,7 +1922,7 @@ dnode_next_offset_level(dnode_t *dn, int flags, uint64_t *offset,
*offset = start;
}
if (i < 0 || i >= epb)
error = ESRCH;
error = SET_ERROR(ESRCH);
}
if (db)
@ -1966,7 +1966,7 @@ dnode_next_offset(dnode_t *dn, int flags, uint64_t *offset,
rw_enter(&dn->dn_struct_rwlock, RW_READER);
if (dn->dn_phys->dn_nlevels == 0) {
error = ESRCH;
error = SET_ERROR(ESRCH);
goto out;
}
@ -1975,7 +1975,7 @@ dnode_next_offset(dnode_t *dn, int flags, uint64_t *offset,
if (flags & DNODE_FIND_HOLE)
*offset = dn->dn_datablksz;
} else {
error = ESRCH;
error = SET_ERROR(ESRCH);
}
goto out;
}
@ -1996,7 +1996,7 @@ dnode_next_offset(dnode_t *dn, int flags, uint64_t *offset,
if (error == 0 && (flags & DNODE_FIND_BACKWARDS ?
initial_offset < *offset : initial_offset > *offset))
error = ESRCH;
error = SET_ERROR(ESRCH);
out:
if (!(flags & DNODE_FIND_HAVELOCK))
rw_exit(&dn->dn_struct_rwlock);

View File

@ -362,7 +362,7 @@ dsl_dataset_hold_obj(dsl_pool_t *dp, uint64_t dsobj, void *tag,
/* Make sure dsobj has the correct object type. */
dmu_object_info_from_db(dbuf, &doi);
if (doi.doi_type != DMU_OT_DSL_DATASET)
return (EINVAL);
return (SET_ERROR(EINVAL));
ds = dmu_buf_get_user(dbuf);
if (ds == NULL) {
@ -479,7 +479,7 @@ dsl_dataset_hold(dsl_pool_t *dp, const char *name,
if (obj != 0)
err = dsl_dataset_hold_obj(dp, obj, tag, dsp);
else
err = ENOENT;
err = SET_ERROR(ENOENT);
/* we may be looking for a snapshot */
if (err == 0 && snapname != NULL) {
@ -488,7 +488,7 @@ dsl_dataset_hold(dsl_pool_t *dp, const char *name,
if (*snapname++ != '@') {
dsl_dataset_rele(*dsp, tag);
dsl_dir_rele(dd, FTAG);
return (ENOENT);
return (SET_ERROR(ENOENT));
}
dprintf("looking for snapshot '%s'\n", snapname);
@ -521,7 +521,7 @@ dsl_dataset_own_obj(dsl_pool_t *dp, uint64_t dsobj,
if (!dsl_dataset_tryown(*dsp, tag)) {
dsl_dataset_rele(*dsp, tag);
*dsp = NULL;
return (EBUSY);
return (SET_ERROR(EBUSY));
}
return (0);
}
@ -535,7 +535,7 @@ dsl_dataset_own(dsl_pool_t *dp, const char *name,
return (err);
if (!dsl_dataset_tryown(*dsp, tag)) {
dsl_dataset_rele(*dsp, tag);
return (EBUSY);
return (SET_ERROR(EBUSY));
}
return (0);
}
@ -972,7 +972,7 @@ dsl_dataset_snapshot_reserve_space(dsl_dataset_t *ds, dmu_tx_t *tx)
ASSERT(ds->ds_reserved == 0 || DS_UNIQUE_IS_ACCURATE(ds));
asize = MIN(ds->ds_phys->ds_unique_bytes, ds->ds_reserved);
if (asize > dsl_dir_space_available(ds->ds_dir, NULL, 0, TRUE))
return (ENOSPC);
return (SET_ERROR(ENOSPC));
/*
* Propagate any reserved space for this snapshot to other
@ -1007,14 +1007,14 @@ dsl_dataset_snapshot_check_impl(dsl_dataset_t *ds, const char *snapname,
* is already one, try again.
*/
if (ds->ds_phys->ds_prev_snap_txg >= tx->tx_txg)
return (EAGAIN);
return (SET_ERROR(EAGAIN));
/*
* Check for conflicting snapshot name.
*/
error = dsl_dataset_snap_lookup(ds, snapname, &value);
if (error == 0)
return (EEXIST);
return (SET_ERROR(EEXIST));
if (error != ENOENT)
return (error);
@ -1042,11 +1042,11 @@ dsl_dataset_snapshot_check(void *arg, dmu_tx_t *tx)
name = nvpair_name(pair);
if (strlen(name) >= MAXNAMELEN)
error = ENAMETOOLONG;
error = SET_ERROR(ENAMETOOLONG);
if (error == 0) {
atp = strchr(name, '@');
if (atp == NULL)
error = EINVAL;
error = SET_ERROR(EINVAL);
if (error == 0)
(void) strlcpy(dsname, name, atp - name + 1);
}
@ -1255,7 +1255,7 @@ dsl_dataset_snapshot(nvlist_t *snaps, nvlist_t *props, nvlist_t *errors)
atp = strchr(snapname, '@');
if (atp == NULL) {
error = EINVAL;
error = SET_ERROR(EINVAL);
break;
}
(void) strlcpy(fsname, snapname, atp - snapname + 1);
@ -1328,7 +1328,7 @@ dsl_dataset_snapshot_tmp_check(void *arg, dmu_tx_t *tx)
if (spa_version(dp->dp_spa) < SPA_VERSION_USERREFS) {
dsl_dataset_rele(ds, FTAG);
return (ENOTSUP);
return (SET_ERROR(ENOTSUP));
}
error = dsl_dataset_user_hold_check_one(NULL, ddsta->ddsta_htag,
B_TRUE, tx);
@ -1630,14 +1630,14 @@ dsl_dataset_rename_snapshot_check_impl(dsl_pool_t *dp,
/* new name should not exist */
error = dsl_dataset_snap_lookup(hds, ddrsa->ddrsa_newsnapname, &val);
if (error == 0)
error = EEXIST;
error = SET_ERROR(EEXIST);
else if (error == ENOENT)
error = 0;
/* dataset name + 1 for the "@" + the new snapshot name must fit */
if (dsl_dir_namelen(hds->ds_dir) + 1 +
strlen(ddrsa->ddrsa_newsnapname) >= MAXNAMELEN)
error = ENAMETOOLONG;
error = SET_ERROR(ENAMETOOLONG);
return (error);
}
@ -1769,18 +1769,18 @@ dsl_dataset_rollback_check(void *arg, dmu_tx_t *tx)
/* must not be a snapshot */
if (dsl_dataset_is_snapshot(ds)) {
dsl_dataset_rele(ds, FTAG);
return (EINVAL);
return (SET_ERROR(EINVAL));
}
/* must have a most recent snapshot */
if (ds->ds_phys->ds_prev_snap_txg < TXG_INITIAL) {
dsl_dataset_rele(ds, FTAG);
return (EINVAL);
return (SET_ERROR(EINVAL));
}
if (dsl_dataset_long_held(ds)) {
dsl_dataset_rele(ds, FTAG);
return (EBUSY);
return (SET_ERROR(EBUSY));
}
/*
@ -1790,7 +1790,7 @@ dsl_dataset_rollback_check(void *arg, dmu_tx_t *tx)
if (ds->ds_quota != 0 &&
ds->ds_prev->ds_phys->ds_referenced_bytes > ds->ds_quota) {
dsl_dataset_rele(ds, FTAG);
return (EDQUOT);
return (SET_ERROR(EDQUOT));
}
/*
@ -1807,7 +1807,7 @@ dsl_dataset_rollback_check(void *arg, dmu_tx_t *tx)
unused_refres_delta >
dsl_dir_space_available(ds->ds_dir, NULL, 0, TRUE)) {
dsl_dataset_rele(ds, FTAG);
return (ENOSPC);
return (SET_ERROR(ENOSPC));
}
dsl_dataset_rele(ds, FTAG);
@ -1883,7 +1883,7 @@ dsl_dataset_promote_check(void *arg, dmu_tx_t *tx)
if (hds->ds_phys->ds_flags & DS_FLAG_NOPROMOTE) {
promote_rele(ddpa, FTAG);
return (EXDEV);
return (SET_ERROR(EXDEV));
}
/*
@ -1933,7 +1933,7 @@ dsl_dataset_promote_check(void *arg, dmu_tx_t *tx)
* the objset.
*/
if (dsl_dataset_long_held(ds)) {
err = EBUSY;
err = SET_ERROR(EBUSY);
goto out;
}
@ -1942,7 +1942,7 @@ dsl_dataset_promote_check(void *arg, dmu_tx_t *tx)
err = dsl_dataset_snap_lookup(hds, ds->ds_snapname, &val);
if (err == 0) {
(void) strcpy(ddpa->err_ds, snap->ds->ds_snapname);
err = EEXIST;
err = SET_ERROR(EEXIST);
goto out;
}
if (err != ENOENT)
@ -2279,7 +2279,7 @@ promote_hold(dsl_dataset_promote_arg_t *ddpa, dsl_pool_t *dp, void *tag)
if (dsl_dataset_is_snapshot(ddpa->ddpa_clone) ||
!dsl_dir_is_clone(dd)) {
dsl_dataset_rele(ddpa->ddpa_clone, tag);
return (EINVAL);
return (SET_ERROR(EINVAL));
}
error = snaplist_make(dp, 0, dd->dd_phys->dd_origin_obj,
@ -2367,30 +2367,30 @@ dsl_dataset_clone_swap_check_impl(dsl_dataset_t *clone,
/* they should both be heads */
if (dsl_dataset_is_snapshot(clone) ||
dsl_dataset_is_snapshot(origin_head))
return (EINVAL);
return (SET_ERROR(EINVAL));
/* the branch point should be just before them */
if (clone->ds_prev != origin_head->ds_prev)
return (EINVAL);
return (SET_ERROR(EINVAL));
/* clone should be the clone (unless they are unrelated) */
if (clone->ds_prev != NULL &&
clone->ds_prev != clone->ds_dir->dd_pool->dp_origin_snap &&
origin_head->ds_object !=
clone->ds_prev->ds_phys->ds_next_snap_obj)
return (EINVAL);
return (SET_ERROR(EINVAL));
/* the clone should be a child of the origin */
if (clone->ds_dir->dd_parent != origin_head->ds_dir)
return (EINVAL);
return (SET_ERROR(EINVAL));
/* origin_head shouldn't be modified unless 'force' */
if (!force && dsl_dataset_modified_since_lastsnap(origin_head))
return (ETXTBSY);
return (SET_ERROR(ETXTBSY));
/* origin_head should have no long holds (e.g. is not mounted) */
if (dsl_dataset_long_held(origin_head))
return (EBUSY);
return (SET_ERROR(EBUSY));
/* check amount of any unconsumed refreservation */
unused_refres_delta =
@ -2402,12 +2402,12 @@ dsl_dataset_clone_swap_check_impl(dsl_dataset_t *clone,
if (unused_refres_delta > 0 &&
unused_refres_delta >
dsl_dir_space_available(origin_head->ds_dir, NULL, 0, TRUE))
return (ENOSPC);
return (SET_ERROR(ENOSPC));
/* clone can't be over the head's refquota */
if (origin_head->ds_quota != 0 &&
clone->ds_phys->ds_referenced_bytes > origin_head->ds_quota)
return (EDQUOT);
return (SET_ERROR(EDQUOT));
return (0);
}
@ -2602,9 +2602,9 @@ dsl_dataset_check_quota(dsl_dataset_t *ds, boolean_t check_quota,
if (ds->ds_phys->ds_referenced_bytes + inflight >= ds->ds_quota) {
if (inflight > 0 ||
ds->ds_phys->ds_referenced_bytes < ds->ds_quota)
error = ERESTART;
error = SET_ERROR(ERESTART);
else
error = EDQUOT;
error = SET_ERROR(EDQUOT);
}
mutex_exit(&ds->ds_lock);
@ -2629,7 +2629,7 @@ dsl_dataset_set_refquota_check(void *arg, dmu_tx_t *tx)
uint64_t newval;
if (spa_version(dp->dp_spa) < SPA_VERSION_REFQUOTA)
return (ENOTSUP);
return (SET_ERROR(ENOTSUP));
error = dsl_dataset_hold(dp, ddsqra->ddsqra_name, FTAG, &ds);
if (error != 0)
@ -2637,7 +2637,7 @@ dsl_dataset_set_refquota_check(void *arg, dmu_tx_t *tx)
if (dsl_dataset_is_snapshot(ds)) {
dsl_dataset_rele(ds, FTAG);
return (EINVAL);
return (SET_ERROR(EINVAL));
}
error = dsl_prop_predict(ds->ds_dir,
@ -2656,7 +2656,7 @@ dsl_dataset_set_refquota_check(void *arg, dmu_tx_t *tx)
if (newval < ds->ds_phys->ds_referenced_bytes ||
newval < ds->ds_reserved) {
dsl_dataset_rele(ds, FTAG);
return (ENOSPC);
return (SET_ERROR(ENOSPC));
}
dsl_dataset_rele(ds, FTAG);
@ -2712,7 +2712,7 @@ dsl_dataset_set_refreservation_check(void *arg, dmu_tx_t *tx)
uint64_t newval, unique;
if (spa_version(dp->dp_spa) < SPA_VERSION_REFRESERVATION)
return (ENOTSUP);
return (SET_ERROR(ENOTSUP));
error = dsl_dataset_hold(dp, ddsqra->ddsqra_name, FTAG, &ds);
if (error != 0)
@ -2720,7 +2720,7 @@ dsl_dataset_set_refreservation_check(void *arg, dmu_tx_t *tx)
if (dsl_dataset_is_snapshot(ds)) {
dsl_dataset_rele(ds, FTAG);
return (EINVAL);
return (SET_ERROR(EINVAL));
}
error = dsl_prop_predict(ds->ds_dir,
@ -2754,7 +2754,7 @@ dsl_dataset_set_refreservation_check(void *arg, dmu_tx_t *tx)
dsl_dir_space_available(ds->ds_dir, NULL, 0, B_TRUE) ||
(ds->ds_quota > 0 && newval > ds->ds_quota)) {
dsl_dataset_rele(ds, FTAG);
return (ENOSPC);
return (SET_ERROR(ENOSPC));
}
}
@ -2899,7 +2899,7 @@ dsl_dataset_space_written(dsl_dataset_t *oldsnap, dsl_dataset_t *new,
if (snap != new)
dsl_dataset_rele(snap, FTAG);
if (snapobj == 0) {
err = EINVAL;
err = SET_ERROR(EINVAL);
break;
}
@ -2941,7 +2941,7 @@ dsl_dataset_space_wouldfree(dsl_dataset_t *firstsnap,
if (firstsnap->ds_dir != lastsnap->ds_dir ||
firstsnap->ds_phys->ds_creation_txg >
lastsnap->ds_phys->ds_creation_txg)
return (EINVAL);
return (SET_ERROR(EINVAL));
*usedp = *compp = *uncompp = 0;

View File

@ -20,7 +20,7 @@
*/
/*
* Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012 by Delphix. All rights reserved.
* Copyright (c) 2013 by Delphix. All rights reserved.
*/
/*
@ -107,7 +107,7 @@ dsl_deleg_can_allow(char *ddname, nvlist_t *nvp, cred_t *cr)
const char *perm = nvpair_name(permpair);
if (strcmp(perm, ZFS_DELEG_PERM_ALLOW) == 0)
return (EPERM);
return (SET_ERROR(EPERM));
if ((error = dsl_deleg_access(ddname, perm, cr)) != 0)
return (error);
@ -139,10 +139,10 @@ dsl_deleg_can_unallow(char *ddname, nvlist_t *nvp, cred_t *cr)
if (type != ZFS_DELEG_USER &&
type != ZFS_DELEG_USER_SETS)
return (EPERM);
return (SET_ERROR(EPERM));
if (strcmp(idstr, &nvpair_name(whopair)[3]) != 0)
return (EPERM);
return (SET_ERROR(EPERM));
}
return (0);
}
@ -261,7 +261,7 @@ dsl_deleg_check(void *arg, dmu_tx_t *tx)
if (spa_version(dmu_tx_pool(tx)->dp_spa) <
SPA_VERSION_DELEGATED_PERMS) {
return (ENOTSUP);
return (SET_ERROR(ENOTSUP));
}
error = dsl_dir_hold(dmu_tx_pool(tx), dda->dda_name, FTAG, &dd, NULL);
@ -417,7 +417,7 @@ dsl_check_access(objset_t *mos, uint64_t zapobj,
if (error == 0) {
error = zap_lookup(mos, jumpobj, perm, 8, 1, &zero);
if (error == ENOENT)
error = EPERM;
error = SET_ERROR(EPERM);
}
return (error);
}
@ -462,7 +462,7 @@ dsl_check_user_access(objset_t *mos, uint64_t zapobj, const char *perm,
return (0);
}
return (EPERM);
return (SET_ERROR(EPERM));
}
/*
@ -555,11 +555,11 @@ dsl_deleg_access_impl(dsl_dataset_t *ds, const char *perm, cred_t *cr)
mos = dp->dp_meta_objset;
if (dsl_delegation_on(mos) == B_FALSE)
return (ECANCELED);
return (SET_ERROR(ECANCELED));
if (spa_version(dmu_objset_spa(dp->dp_meta_objset)) <
SPA_VERSION_DELEGATED_PERMS)
return (EPERM);
return (SET_ERROR(EPERM));
if (dsl_dataset_is_snapshot(ds)) {
/*
@ -633,7 +633,7 @@ dsl_deleg_access_impl(dsl_dataset_t *ds, const char *perm, cred_t *cr)
if (error == 0)
goto success;
}
error = EPERM;
error = SET_ERROR(EPERM);
success:
cookie = NULL;

View File

@ -20,7 +20,7 @@
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012 by Delphix. All rights reserved.
* Copyright (c) 2013 by Delphix. All rights reserved.
*/
#include <sys/zfs_context.h>
@ -52,10 +52,10 @@ static int
dsl_destroy_snapshot_check_impl(dsl_dataset_t *ds, boolean_t defer)
{
if (!dsl_dataset_is_snapshot(ds))
return (EINVAL);
return (SET_ERROR(EINVAL));
if (dsl_dataset_long_held(ds))
return (EBUSY);
return (SET_ERROR(EBUSY));
/*
* Only allow deferred destroy on pools that support it.
@ -64,7 +64,7 @@ dsl_destroy_snapshot_check_impl(dsl_dataset_t *ds, boolean_t defer)
if (defer) {
if (spa_version(ds->ds_dir->dd_pool->dp_spa) <
SPA_VERSION_USERREFS)
return (ENOTSUP);
return (SET_ERROR(ENOTSUP));
return (0);
}
@ -73,13 +73,13 @@ dsl_destroy_snapshot_check_impl(dsl_dataset_t *ds, boolean_t defer)
* we can't destroy it yet.
*/
if (ds->ds_userrefs > 0)
return (EBUSY);
return (SET_ERROR(EBUSY));
/*
* Can't delete a branch point.
*/
if (ds->ds_phys->ds_num_children > 1)
return (EEXIST);
return (SET_ERROR(EEXIST));
return (0);
}
@ -587,10 +587,10 @@ dsl_destroy_head_check_impl(dsl_dataset_t *ds, int expected_holds)
objset_t *mos;
if (dsl_dataset_is_snapshot(ds))
return (EINVAL);
return (SET_ERROR(EINVAL));
if (refcount_count(&ds->ds_longholds) != expected_holds)
return (EBUSY);
return (SET_ERROR(EBUSY));
mos = ds->ds_dir->dd_pool->dp_meta_objset;
@ -601,7 +601,7 @@ dsl_destroy_head_check_impl(dsl_dataset_t *ds, int expected_holds)
*/
if (ds->ds_prev != NULL &&
ds->ds_prev->ds_phys->ds_next_snap_obj == ds->ds_object)
return (EBUSY);
return (SET_ERROR(EBUSY));
/*
* Can't delete if there are children of this fs.
@ -611,14 +611,14 @@ dsl_destroy_head_check_impl(dsl_dataset_t *ds, int expected_holds)
if (error != 0)
return (error);
if (count != 0)
return (EEXIST);
return (SET_ERROR(EEXIST));
if (dsl_dir_is_clone(ds->ds_dir) && DS_IS_DEFER_DESTROY(ds->ds_prev) &&
ds->ds_prev->ds_phys->ds_num_children == 2 &&
ds->ds_prev->ds_userrefs == 0) {
/* We need to remove the origin snapshot as well. */
if (!refcount_is_zero(&ds->ds_prev->ds_longholds))
return (EBUSY);
return (SET_ERROR(EBUSY));
}
return (0);
}

View File

@ -22,7 +22,7 @@
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011 Pawel Jakub Dawidek <pawel@dawidek.net>.
* All rights reserved.
* Copyright (c) 2012 by Delphix. All rights reserved.
* Copyright (c) 2013 by Delphix. All rights reserved.
*/
#include <sys/dmu.h>
@ -255,12 +255,12 @@ getcomponent(const char *path, char *component, const char **nextp)
char *p;
if ((path == NULL) || (path[0] == '\0'))
return (ENOENT);
return (SET_ERROR(ENOENT));
/* This would be a good place to reserve some namespace... */
p = strpbrk(path, "/@");
if (p && (p[1] == '/' || p[1] == '@')) {
/* two separators in a row */
return (EINVAL);
return (SET_ERROR(EINVAL));
}
if (p == NULL || p == path) {
/*
@ -270,14 +270,14 @@ getcomponent(const char *path, char *component, const char **nextp)
*/
if (p != NULL &&
(p[0] != '@' || strpbrk(path+1, "/@") || p[1] == '\0'))
return (EINVAL);
return (SET_ERROR(EINVAL));
if (strlen(path) >= MAXNAMELEN)
return (ENAMETOOLONG);
return (SET_ERROR(ENAMETOOLONG));
(void) strcpy(component, path);
p = NULL;
} else if (p[0] == '/') {
if (p - path >= MAXNAMELEN)
return (ENAMETOOLONG);
return (SET_ERROR(ENAMETOOLONG));
(void) strncpy(component, path, p - path);
component[p - path] = '\0';
p++;
@ -287,9 +287,9 @@ getcomponent(const char *path, char *component, const char **nextp)
* any more slashes.
*/
if (strchr(path, '/'))
return (EINVAL);
return (SET_ERROR(EINVAL));
if (p - path >= MAXNAMELEN)
return (ENAMETOOLONG);
return (SET_ERROR(ENAMETOOLONG));
(void) strncpy(component, path, p - path);
component[p - path] = '\0';
} else {
@ -323,7 +323,7 @@ dsl_dir_hold(dsl_pool_t *dp, const char *name, void *tag,
/* Make sure the name is in the specified pool. */
spaname = spa_name(dp->dp_spa);
if (strcmp(buf, spaname) != 0)
return (EINVAL);
return (SET_ERROR(EINVAL));
ASSERT(dsl_pool_config_held(dp));
@ -374,7 +374,7 @@ dsl_dir_hold(dsl_pool_t *dp, const char *name, void *tag,
/* bad path name */
dsl_dir_rele(dd, tag);
dprintf("next=%p (%s) tail=%p\n", next, next?next:"", tailp);
err = ENOENT;
err = SET_ERROR(ENOENT);
}
if (tailp != NULL)
*tailp = next;
@ -683,7 +683,7 @@ dsl_dir_tempreserve_impl(dsl_dir_t *dd, uint64_t asize, boolean_t netfree,
used_on_disk>>10, est_inflight>>10,
quota>>10, asize>>10, retval);
mutex_exit(&dd->dd_lock);
return (retval);
return (SET_ERROR(retval));
}
/* We need to up our estimated delta before dropping dd_lock */
@ -745,7 +745,7 @@ dsl_dir_tempreserve_space(dsl_dir_t *dd, uint64_t lsize, uint64_t asize,
} else {
if (err == EAGAIN) {
txg_delay(dd->dd_pool, tx->tx_txg, 1);
err = ERESTART;
err = SET_ERROR(ERESTART);
}
dsl_pool_memory_pressure(dd->dd_pool);
}
@ -955,7 +955,7 @@ dsl_dir_set_quota_check(void *arg, dmu_tx_t *tx)
if ((dmu_tx_is_syncing(tx) || towrite == 0) &&
(newval < ds->ds_dir->dd_phys->dd_reserved ||
newval < ds->ds_dir->dd_phys->dd_used_bytes + towrite)) {
error = ENOSPC;
error = SET_ERROR(ENOSPC);
}
mutex_exit(&ds->ds_dir->dd_lock);
dsl_dataset_rele(ds, FTAG);
@ -1049,7 +1049,7 @@ dsl_dir_set_reservation_check(void *arg, dmu_tx_t *tx)
if (delta > avail ||
(dd->dd_phys->dd_quota > 0 &&
newval > dd->dd_phys->dd_quota))
error = ENOSPC;
error = SET_ERROR(ENOSPC);
}
dsl_dataset_rele(ds, FTAG);
@ -1156,7 +1156,7 @@ dsl_valid_rename(dsl_pool_t *dp, dsl_dataset_t *ds, void *arg)
dsl_dataset_name(ds, namebuf);
if (strlen(namebuf) + *deltap >= MAXNAMELEN)
return (ENAMETOOLONG);
return (SET_ERROR(ENAMETOOLONG));
return (0);
}
@ -1187,14 +1187,14 @@ dsl_dir_rename_check(void *arg, dmu_tx_t *tx)
if (dd->dd_pool != newparent->dd_pool) {
dsl_dir_rele(newparent, FTAG);
dsl_dir_rele(dd, FTAG);
return (ENXIO);
return (SET_ERROR(ENXIO));
}
/* new name should not already exist */
if (mynewname == NULL) {
dsl_dir_rele(newparent, FTAG);
dsl_dir_rele(dd, FTAG);
return (EEXIST);
return (SET_ERROR(EEXIST));
}
/* if the name length is growing, validate child name lengths */
@ -1217,7 +1217,7 @@ dsl_dir_rename_check(void *arg, dmu_tx_t *tx)
if (closest_common_ancestor(dd, newparent) == dd) {
dsl_dir_rele(newparent, FTAG);
dsl_dir_rele(dd, FTAG);
return (EINVAL);
return (SET_ERROR(EINVAL));
}
error = dsl_dir_transfer_possible(dd->dd_parent,
@ -1328,7 +1328,7 @@ dsl_dir_transfer_possible(dsl_dir_t *sdd, dsl_dir_t *tdd, uint64_t space)
adelta = would_change(sdd, -space, ancestor);
avail = dsl_dir_space_available(tdd, ancestor, adelta, FALSE);
if (avail < space)
return (ENOSPC);
return (SET_ERROR(ENOSPC));
return (0);
}

View File

@ -20,7 +20,7 @@
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012 by Delphix. All rights reserved.
* Copyright (c) 2013 by Delphix. All rights reserved.
*/
#include <sys/dsl_pool.h>
@ -630,7 +630,7 @@ dsl_pool_tempreserve_space(dsl_pool_t *dp, uint64_t space, dmu_tx_t *tx)
+ dp->dp_tempreserved[tx->tx_txg & TXG_MASK] / 2;
if (reserved && reserved > write_limit)
return (ERESTART);
return (SET_ERROR(ERESTART));
}
atomic_add_64(&dp->dp_tempreserved[tx->tx_txg & TXG_MASK], space);
@ -912,7 +912,7 @@ dsl_pool_user_hold_rele_impl(dsl_pool_t *dp, uint64_t dsobj,
dsl_pool_user_hold_create_obj(dp, tx);
zapobj = dp->dp_tmp_userrefs_obj;
} else {
return (ENOENT);
return (SET_ERROR(ENOENT));
}
}

View File

@ -20,7 +20,7 @@
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012 by Delphix. All rights reserved.
* Copyright (c) 2013 by Delphix. All rights reserved.
*/
#include <sys/zfs_context.h>
@ -52,16 +52,16 @@ dodefault(const char *propname, int intsz, int numints, void *buf)
*/
if ((prop = zfs_name_to_prop(propname)) == ZPROP_INVAL ||
(zfs_prop_readonly(prop) && !zfs_prop_setonce(prop)))
return (ENOENT);
return (SET_ERROR(ENOENT));
if (zfs_prop_get_type(prop) == PROP_TYPE_STRING) {
if (intsz != 1)
return (EOVERFLOW);
return (SET_ERROR(EOVERFLOW));
(void) strncpy(buf, zfs_prop_default_string(prop),
numints);
} else {
if (intsz != 8 || numints < 1)
return (EOVERFLOW);
return (SET_ERROR(EOVERFLOW));
*(uint64_t *)buf = zfs_prop_default_numeric(prop);
}
@ -144,7 +144,7 @@ dsl_prop_get_dd(dsl_dir_t *dd, const char *propname,
* at the end of the loop (instead of at the beginning) ensures
* that err has a valid post-loop value.
*/
err = ENOENT;
err = SET_ERROR(ENOENT);
}
if (err == ENOENT)
@ -400,7 +400,7 @@ dsl_prop_unregister(dsl_dataset_t *ds, const char *propname,
if (cbr == NULL) {
mutex_exit(&dd->dd_lock);
return (ENOMSG);
return (SET_ERROR(ENOMSG));
}
list_remove(&dd->dd_prop_cbs, cbr);
@ -749,7 +749,7 @@ dsl_props_set_check(void *arg, dmu_tx_t *tx)
while ((elem = nvlist_next_nvpair(dpsa->dpsa_props, elem)) != NULL) {
if (strlen(nvpair_name(elem)) >= ZAP_MAXNAMELEN) {
dsl_dataset_rele(ds, FTAG);
return (ENAMETOOLONG);
return (SET_ERROR(ENAMETOOLONG));
}
if (nvpair_type(elem) == DATA_TYPE_STRING) {
char *valstr = fnvpair_value_string(elem);
@ -764,7 +764,7 @@ dsl_props_set_check(void *arg, dmu_tx_t *tx)
if (dsl_dataset_is_snapshot(ds) && version < SPA_VERSION_SNAP_PROPS) {
dsl_dataset_rele(ds, FTAG);
return (ENOTSUP);
return (SET_ERROR(ENOTSUP));
}
dsl_dataset_rele(ds, FTAG);
return (0);

View File

@ -20,7 +20,7 @@
*/
/*
* Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012 by Delphix. All rights reserved.
* Copyright (c) 2013 by Delphix. All rights reserved.
*/
#include <sys/dsl_scan.h>
@ -189,7 +189,7 @@ dsl_scan_setup_check(void *arg, dmu_tx_t *tx)
dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan;
if (scn->scn_phys.scn_state == DSS_SCANNING)
return (EBUSY);
return (SET_ERROR(EBUSY));
return (0);
}
@ -349,7 +349,7 @@ dsl_scan_cancel_check(void *arg, dmu_tx_t *tx)
dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan;
if (scn->scn_phys.scn_state != DSS_SCANNING)
return (ENOENT);
return (SET_ERROR(ENOENT));
return (0);
}
@ -1352,7 +1352,7 @@ dsl_scan_free_block_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
if (!scn->scn_is_bptree ||
(BP_GET_LEVEL(bp) == 0 && BP_GET_TYPE(bp) != DMU_OT_OBJSET)) {
if (dsl_scan_free_should_pause(scn))
return (ERESTART);
return (SET_ERROR(ERESTART));
}
zio_nowait(zio_free_sync(scn->scn_zio_root, scn->scn_dp->dp_spa,

View File

@ -20,7 +20,7 @@
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012 by Delphix. All rights reserved.
* Copyright (c) 2013 by Delphix. All rights reserved.
*/
#include <sys/dmu.h>
@ -155,7 +155,7 @@ dsl_sync_task_sync(dsl_sync_task_t *dst, dmu_tx_t *tx)
used = dp->dp_root_dir->dd_phys->dd_used_bytes;
/* MOS space is triple-dittoed, so we multiply by 3. */
if (dst->dst_space > 0 && used + dst->dst_space * 3 > quota) {
dst->dst_error = ENOSPC;
dst->dst_error = SET_ERROR(ENOSPC);
if (dst->dst_nowaiter)
kmem_free(dst, sizeof (*dst));
return;

View File

@ -20,7 +20,7 @@
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012 by Delphix. All rights reserved.
* Copyright (c) 2013 by Delphix. All rights reserved.
*/
#include <sys/zfs_context.h>
@ -67,7 +67,7 @@ dsl_dataset_user_hold_check_one(dsl_dataset_t *ds, const char *htag,
error = zap_lookup(mos, ds->ds_phys->ds_userrefs_obj,
htag, 8, 1, &value);
if (error == 0)
error = EEXIST;
error = SET_ERROR(EEXIST);
else if (error == ENOENT)
error = 0;
}
@ -86,7 +86,7 @@ dsl_dataset_user_hold_check(void *arg, dmu_tx_t *tx)
int rv = 0;
if (spa_version(dp->dp_spa) < SPA_VERSION_USERREFS)
return (ENOTSUP);
return (SET_ERROR(ENOTSUP));
for (pair = nvlist_next_nvpair(dduha->dduha_holds, NULL); pair != NULL;
pair = nvlist_next_nvpair(dduha->dduha_holds, pair)) {
@ -96,7 +96,7 @@ dsl_dataset_user_hold_check(void *arg, dmu_tx_t *tx)
/* must be a snapshot */
if (strchr(nvpair_name(pair), '@') == NULL)
error = EINVAL;
error = SET_ERROR(EINVAL);
if (error == 0)
error = nvpair_value_string(pair, &htag);
@ -218,11 +218,11 @@ dsl_dataset_user_release_check_one(dsl_dataset_t *ds,
*todelete = B_FALSE;
if (!dsl_dataset_is_snapshot(ds))
return (EINVAL);
return (SET_ERROR(EINVAL));
zapobj = ds->ds_phys->ds_userrefs_obj;
if (zapobj == 0)
return (ESRCH);
return (SET_ERROR(ESRCH));
for (pair = nvlist_next_nvpair(holds, NULL); pair != NULL;
pair = nvlist_next_nvpair(holds, pair)) {
@ -230,7 +230,7 @@ dsl_dataset_user_release_check_one(dsl_dataset_t *ds,
uint64_t tmp;
error = zap_lookup(mos, zapobj, nvpair_name(pair), 8, 1, &tmp);
if (error == ENOENT)
error = ESRCH;
error = SET_ERROR(ESRCH);
if (error != 0)
return (error);
numholds++;
@ -241,7 +241,7 @@ dsl_dataset_user_release_check_one(dsl_dataset_t *ds,
/* we need to destroy the snapshot as well */
if (dsl_dataset_long_held(ds))
return (EBUSY);
return (SET_ERROR(EBUSY));
*todelete = B_TRUE;
}
return (0);
@ -267,7 +267,7 @@ dsl_dataset_user_release_check(void *arg, dmu_tx_t *tx)
error = nvpair_value_nvlist(pair, &holds);
if (error != 0)
return (EINVAL);
return (SET_ERROR(EINVAL));
error = dsl_dataset_hold(dp, name, FTAG, &ds);
if (error == 0) {

View File

@ -20,7 +20,7 @@
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012 by Delphix. All rights reserved.
* Copyright (c) 2013 by Delphix. All rights reserved.
* Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
*/
@ -1517,7 +1517,7 @@ metaslab_alloc_dva(spa_t *spa, metaslab_class_t *mc, uint64_t psize,
* For testing, make some blocks above a certain size be gang blocks.
*/
if (psize >= metaslab_gang_bang && (ddi_get_lbolt() & 3) == 0)
return (ENOSPC);
return (SET_ERROR(ENOSPC));
/*
* Start at the rotor and loop through all mgs until we find something.
@ -1682,7 +1682,7 @@ metaslab_alloc_dva(spa_t *spa, metaslab_class_t *mc, uint64_t psize,
bzero(&dva[d], sizeof (dva_t));
return (ENOSPC);
return (SET_ERROR(ENOSPC));
}
/*
@ -1751,7 +1751,7 @@ metaslab_claim_dva(spa_t *spa, const dva_t *dva, uint64_t txg)
if ((vd = vdev_lookup_top(spa, vdev)) == NULL ||
(offset >> vd->vdev_ms_shift) >= vd->vdev_ms_count)
return (ENXIO);
return (SET_ERROR(ENXIO));
msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
@ -1764,7 +1764,7 @@ metaslab_claim_dva(spa_t *spa, const dva_t *dva, uint64_t txg)
error = metaslab_activate(msp, METASLAB_WEIGHT_SECONDARY);
if (error == 0 && !space_map_contains(msp->ms_map, offset, size))
error = ENOENT;
error = SET_ERROR(ENOENT);
if (error || txg == 0) { /* txg == 0 indicates dry run */
mutex_exit(&msp->ms_lock);
@ -1799,7 +1799,7 @@ metaslab_alloc(spa_t *spa, metaslab_class_t *mc, uint64_t psize, blkptr_t *bp,
if (mc->mc_rotor == NULL) { /* no vdevs in this class */
spa_config_exit(spa, SCL_ALLOC, FTAG);
return (ENOSPC);
return (SET_ERROR(ENOSPC));
}
ASSERT(ndvas > 0 && ndvas <= spa_max_replication(spa));

View File

@ -22,7 +22,7 @@
/*
* Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
* Portions Copyright 2011 iXsystems, Inc
* Copyright (c) 2012 by Delphix. All rights reserved.
* Copyright (c) 2013 by Delphix. All rights reserved.
*/
#include <sys/zfs_context.h>
@ -373,7 +373,7 @@ sa_attr_op(sa_handle_t *hdl, sa_bulk_attr_t *bulk, int count,
switch (data_op) {
case SA_LOOKUP:
if (bulk[i].sa_addr == NULL)
return (ENOENT);
return (SET_ERROR(ENOENT));
if (bulk[i].sa_data) {
SA_COPY_DATA(bulk[i].sa_data_func,
bulk[i].sa_addr, bulk[i].sa_data,
@ -503,7 +503,7 @@ sa_resize_spill(sa_handle_t *hdl, uint32_t size, dmu_tx_t *tx)
blocksize = SPA_MINBLOCKSIZE;
} else if (size > SPA_MAXBLOCKSIZE) {
ASSERT(0);
return (EFBIG);
return (SET_ERROR(EFBIG));
} else {
blocksize = P2ROUNDUP_TYPED(size, SPA_MINBLOCKSIZE, uint32_t);
}
@ -677,7 +677,7 @@ sa_build_layouts(sa_handle_t *hdl, sa_bulk_attr_t *attr_desc, int attr_count,
SA_BONUS, &i, &used, &spilling);
if (used > SPA_MAXBLOCKSIZE)
return (EFBIG);
return (SET_ERROR(EFBIG));
VERIFY(0 == dmu_set_bonus(hdl->sa_bonus, spilling ?
MIN(DN_MAX_BONUSLEN - sizeof (blkptr_t), used + hdrsize) :
@ -701,7 +701,7 @@ sa_build_layouts(sa_handle_t *hdl, sa_bulk_attr_t *attr_desc, int attr_count,
&spill_used, &dummy);
if (spill_used > SPA_MAXBLOCKSIZE)
return (EFBIG);
return (SET_ERROR(EFBIG));
buf_space = hdl->sa_spill->db_size - spillhdrsize;
if (BUF_SPACE_NEEDED(spill_used, spillhdrsize) >
@ -861,7 +861,7 @@ sa_attr_table_setup(objset_t *os, sa_attr_reg_t *reg_attrs, int count)
*/
if (error || (error == 0 && sa_attr_count == 0)) {
if (error == 0)
error = EINVAL;
error = SET_ERROR(EINVAL);
goto bail;
}
sa_reg_count = sa_attr_count;
@ -892,7 +892,7 @@ sa_attr_table_setup(objset_t *os, sa_attr_reg_t *reg_attrs, int count)
error = zap_lookup(os, sa->sa_reg_attr_obj,
reg_attrs[i].sa_name, 8, 1, &attr_value);
else
error = ENOENT;
error = SET_ERROR(ENOENT);
switch (error) {
case ENOENT:
sa->sa_user_table[i] = (sa_attr_type_t)sa_attr_count;
@ -1051,7 +1051,7 @@ sa_setup(objset_t *os, uint64_t sa_obj, sa_attr_reg_t *reg_attrs, int count,
*/
if (error || (error == 0 && layout_count == 0)) {
if (error == 0)
error = EINVAL;
error = SET_ERROR(EINVAL);
goto fail;
}

View File

@ -21,7 +21,7 @@
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012 by Delphix. All rights reserved.
* Copyright (c) 2013 by Delphix. All rights reserved.
* Copyright 2013 Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2013 Martin Matuska <mm@FreeBSD.org>. All rights reserved.
*/
@ -403,7 +403,7 @@ spa_prop_validate(spa_t *spa, nvlist_t *props)
switch (prop) {
case ZPROP_INVAL:
if (!zpool_prop_feature(propname)) {
error = EINVAL;
error = SET_ERROR(EINVAL);
break;
}
@ -411,23 +411,23 @@ spa_prop_validate(spa_t *spa, nvlist_t *props)
* Sanitize the input.
*/
if (nvpair_type(elem) != DATA_TYPE_UINT64) {
error = EINVAL;
error = SET_ERROR(EINVAL);
break;
}
if (nvpair_value_uint64(elem, &intval) != 0) {
error = EINVAL;
error = SET_ERROR(EINVAL);
break;
}
if (intval != 0) {
error = EINVAL;
error = SET_ERROR(EINVAL);
break;
}
fname = strchr(propname, '@') + 1;
if (zfeature_lookup_name(fname, NULL) != 0) {
error = EINVAL;
error = SET_ERROR(EINVAL);
break;
}
@ -440,7 +440,7 @@ spa_prop_validate(spa_t *spa, nvlist_t *props)
(intval < spa_version(spa) ||
intval > SPA_VERSION_BEFORE_FEATURES ||
has_feature))
error = EINVAL;
error = SET_ERROR(EINVAL);
break;
case ZPOOL_PROP_DELEGATION:
@ -449,7 +449,7 @@ spa_prop_validate(spa_t *spa, nvlist_t *props)
case ZPOOL_PROP_AUTOEXPAND:
error = nvpair_value_uint64(elem, &intval);
if (!error && intval > 1)
error = EINVAL;
error = SET_ERROR(EINVAL);
break;
case ZPOOL_PROP_BOOTFS:
@ -459,7 +459,7 @@ spa_prop_validate(spa_t *spa, nvlist_t *props)
* the bootfs property cannot be set.
*/
if (spa_version(spa) < SPA_VERSION_BOOTFS) {
error = ENOTSUP;
error = SET_ERROR(ENOTSUP);
break;
}
@ -467,7 +467,7 @@ spa_prop_validate(spa_t *spa, nvlist_t *props)
* Make sure the vdev config is bootable
*/
if (!vdev_is_bootable(spa->spa_root_vdev)) {
error = ENOTSUP;
error = SET_ERROR(ENOTSUP);
break;
}
@ -491,13 +491,13 @@ spa_prop_validate(spa_t *spa, nvlist_t *props)
/* Must be ZPL and not gzip compressed. */
if (dmu_objset_type(os) != DMU_OST_ZFS) {
error = ENOTSUP;
error = SET_ERROR(ENOTSUP);
} else if ((error =
dsl_prop_get_int_ds(dmu_objset_ds(os),
zfs_prop_to_name(ZFS_PROP_COMPRESSION),
&compress)) == 0 &&
!BOOTFS_COMPRESS_VALID(compress)) {
error = ENOTSUP;
error = SET_ERROR(ENOTSUP);
} else {
objnum = dmu_objset_id(os);
}
@ -509,7 +509,7 @@ spa_prop_validate(spa_t *spa, nvlist_t *props)
error = nvpair_value_uint64(elem, &intval);
if (!error && (intval < ZIO_FAILURE_MODE_WAIT ||
intval > ZIO_FAILURE_MODE_PANIC))
error = EINVAL;
error = SET_ERROR(EINVAL);
/*
* This is a special case which only occurs when
@ -523,7 +523,7 @@ spa_prop_validate(spa_t *spa, nvlist_t *props)
*/
if (!error && spa_suspended(spa)) {
spa->spa_failmode = intval;
error = EIO;
error = SET_ERROR(EIO);
}
break;
@ -538,7 +538,7 @@ spa_prop_validate(spa_t *spa, nvlist_t *props)
break;
if (strval[0] != '/') {
error = EINVAL;
error = SET_ERROR(EINVAL);
break;
}
@ -547,7 +547,7 @@ spa_prop_validate(spa_t *spa, nvlist_t *props)
if (slash[1] == '\0' || strcmp(slash, "/.") == 0 ||
strcmp(slash, "/..") == 0)
error = EINVAL;
error = SET_ERROR(EINVAL);
break;
case ZPOOL_PROP_COMMENT:
@ -561,7 +561,7 @@ spa_prop_validate(spa_t *spa, nvlist_t *props)
* there is an easy-to-use kernel isprint().
*/
if (*check >= 0x7f) {
error = EINVAL;
error = SET_ERROR(EINVAL);
break;
}
check++;
@ -572,12 +572,12 @@ spa_prop_validate(spa_t *spa, nvlist_t *props)
case ZPOOL_PROP_DEDUPDITTO:
if (spa_version(spa) < SPA_VERSION_DEDUP)
error = ENOTSUP;
error = SET_ERROR(ENOTSUP);
else
error = nvpair_value_uint64(elem, &intval);
if (error == 0 &&
intval != 0 && intval < ZIO_DEDUPDITTO_MIN)
error = EINVAL;
error = SET_ERROR(EINVAL);
break;
}
@ -709,7 +709,7 @@ spa_change_guid_check(void *arg, dmu_tx_t *tx)
spa_config_exit(spa, SCL_STATE, FTAG);
if (vdev_state != VDEV_STATE_HEALTHY)
return (ENXIO);
return (SET_ERROR(ENXIO));
ASSERT3U(spa_guid(spa), !=, *newguid);
@ -1128,7 +1128,7 @@ spa_config_parse(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent,
if (error) {
vdev_free(*vdp);
*vdp = NULL;
return (EINVAL);
return (SET_ERROR(EINVAL));
}
for (int c = 0; c < children; c++) {
@ -1844,7 +1844,7 @@ spa_load_verify(spa_t *spa)
if (error) {
if (error != ENXIO && error != EIO)
error = EIO;
error = SET_ERROR(EIO);
return (error);
}
@ -1972,7 +1972,7 @@ spa_load(spa_t *spa, spa_load_state_t state, spa_import_type_t type,
nvlist_t *nvl;
if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, &pool_guid))
return (EINVAL);
return (SET_ERROR(EINVAL));
ASSERT(spa->spa_comment == NULL);
if (nvlist_lookup_string(config, ZPOOL_CONFIG_COMMENT, &comment) == 0)
@ -1991,7 +1991,7 @@ spa_load(spa_t *spa, spa_load_state_t state, spa_import_type_t type,
if ((state == SPA_LOAD_IMPORT || state == SPA_LOAD_TRYIMPORT) &&
spa_guid_exists(pool_guid, 0)) {
error = EEXIST;
error = SET_ERROR(EEXIST);
} else {
spa->spa_config_guid = pool_guid;
@ -2057,7 +2057,7 @@ spa_load_impl(spa_t *spa, uint64_t pool_guid, nvlist_t *config,
spa->spa_load_state = state;
if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvroot))
return (EINVAL);
return (SET_ERROR(EINVAL));
parse = (type == SPA_IMPORT_EXISTING ?
VDEV_ALLOC_LOAD : VDEV_ALLOC_SPLIT);
@ -2117,7 +2117,7 @@ spa_load_impl(spa_t *spa, uint64_t pool_guid, nvlist_t *config,
return (error);
if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN)
return (ENXIO);
return (SET_ERROR(ENXIO));
}
/*
@ -2352,7 +2352,7 @@ spa_load_impl(spa_t *spa, uint64_t pool_guid, nvlist_t *config,
"See: http://illumos.org/msg/ZFS-8000-EY",
spa_name(spa), hostname,
(unsigned long)hostid);
return (EBADF);
return (SET_ERROR(EBADF));
}
}
if (nvlist_lookup_nvlist(spa->spa_config,
@ -2541,7 +2541,7 @@ spa_load_impl(spa_t *spa, uint64_t pool_guid, nvlist_t *config,
* more toplevel vdevs are faulted.
*/
if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN)
return (ENXIO);
return (SET_ERROR(ENXIO));
if (spa_check_logs(spa)) {
*ereport = FM_EREPORT_ZFS_LOG_REPLAY;
@ -2804,7 +2804,7 @@ spa_open_common(const char *pool, spa_t **spapp, void *tag, nvlist_t *nvpolicy,
if ((spa = spa_lookup(pool)) == NULL) {
if (locked)
mutex_exit(&spa_namespace_lock);
return (ENOENT);
return (SET_ERROR(ENOENT));
}
if (spa->spa_state == POOL_STATE_UNINITIALIZED) {
@ -2839,7 +2839,7 @@ spa_open_common(const char *pool, spa_t **spapp, void *tag, nvlist_t *nvpolicy,
spa_remove(spa);
if (locked)
mutex_exit(&spa_namespace_lock);
return (ENOENT);
return (SET_ERROR(ENOENT));
}
if (error) {
@ -3176,14 +3176,14 @@ spa_validate_aux_devs(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode,
return (0);
if (ndev == 0)
return (EINVAL);
return (SET_ERROR(EINVAL));
/*
* Make sure the pool is formatted with a version that supports this
* device type.
*/
if (spa_version(spa) < version)
return (ENOTSUP);
return (SET_ERROR(ENOTSUP));
/*
* Set the pending device list so we correctly handle device in-use
@ -3199,7 +3199,7 @@ spa_validate_aux_devs(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode,
if (!vd->vdev_ops->vdev_op_leaf) {
vdev_free(vd);
error = EINVAL;
error = SET_ERROR(EINVAL);
goto out;
}
@ -3210,7 +3210,7 @@ spa_validate_aux_devs(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode,
#ifdef _KERNEL
if ((strcmp(config, ZPOOL_CONFIG_L2CACHE) == 0) &&
strcmp(vd->vdev_ops->vdev_op_type, VDEV_TYPE_DISK) != 0) {
error = ENOTBLK;
error = SET_ERROR(ENOTBLK);
vdev_free(vd);
goto out;
}
@ -3349,7 +3349,7 @@ spa_create(const char *pool, nvlist_t *nvroot, nvlist_t *props,
mutex_enter(&spa_namespace_lock);
if (spa_lookup(pool) != NULL) {
mutex_exit(&spa_namespace_lock);
return (EEXIST);
return (SET_ERROR(EEXIST));
}
/*
@ -3402,7 +3402,7 @@ spa_create(const char *pool, nvlist_t *nvroot, nvlist_t *props,
ASSERT(error != 0 || spa->spa_root_vdev == rvd);
if (error == 0 && !zfs_allocatable_devs(nvroot))
error = EINVAL;
error = SET_ERROR(EINVAL);
if (error == 0 &&
(error = vdev_create(rvd, txg, B_FALSE)) == 0 &&
@ -3675,7 +3675,7 @@ spa_import_rootpool(char *devpath, char *devid)
if (config == NULL) {
cmn_err(CE_NOTE, "Cannot read the pool label from '%s'",
devpath);
return (EIO);
return (SET_ERROR(EIO));
}
VERIFY(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
@ -3718,7 +3718,7 @@ spa_import_rootpool(char *devpath, char *devid)
if ((bvd = vdev_lookup_by_guid(rvd, guid)) == NULL) {
cmn_err(CE_NOTE, "Can not find the boot vdev for guid %llu",
(u_longlong_t)guid);
error = ENOENT;
error = SET_ERROR(ENOENT);
goto out;
}
@ -3730,7 +3730,7 @@ spa_import_rootpool(char *devpath, char *devid)
if (avd != bvd) {
cmn_err(CE_NOTE, "The boot device is 'degraded'. Please "
"try booting from '%s'", avd->vdev_path);
error = EINVAL;
error = SET_ERROR(EINVAL);
goto out;
}
@ -3744,7 +3744,7 @@ spa_import_rootpool(char *devpath, char *devid)
"try booting from '%s'",
bvd->vdev_parent->
vdev_child[bvd->vdev_parent->vdev_children - 1]->vdev_path);
error = EINVAL;
error = SET_ERROR(EINVAL);
goto out;
}
@ -3979,7 +3979,7 @@ spa_import(const char *pool, nvlist_t *config, nvlist_t *props, uint64_t flags)
mutex_enter(&spa_namespace_lock);
if (spa_lookup(pool) != NULL) {
mutex_exit(&spa_namespace_lock);
return (EEXIST);
return (SET_ERROR(EEXIST));
}
/*
@ -4257,12 +4257,12 @@ spa_export_common(char *pool, int new_state, nvlist_t **oldconfig,
*oldconfig = NULL;
if (!(spa_mode_global & FWRITE))
return (EROFS);
return (SET_ERROR(EROFS));
mutex_enter(&spa_namespace_lock);
if ((spa = spa_lookup(pool)) == NULL) {
mutex_exit(&spa_namespace_lock);
return (ENOENT);
return (SET_ERROR(ENOENT));
}
/*
@ -4296,7 +4296,7 @@ spa_export_common(char *pool, int new_state, nvlist_t **oldconfig,
new_state != POOL_STATE_UNINITIALIZED)) {
spa_async_resume(spa);
mutex_exit(&spa_namespace_lock);
return (EBUSY);
return (SET_ERROR(EBUSY));
}
/*
@ -4309,7 +4309,7 @@ spa_export_common(char *pool, int new_state, nvlist_t **oldconfig,
spa_has_active_shared_spare(spa)) {
spa_async_resume(spa);
mutex_exit(&spa_namespace_lock);
return (EXDEV);
return (SET_ERROR(EXDEV));
}
/*
@ -5017,7 +5017,7 @@ spa_vdev_split_mirror(spa_t *spa, char *newname, nvlist_t *config,
spa->spa_root_vdev->vdev_child[c]->vdev_islog) {
continue;
} else {
error = EINVAL;
error = SET_ERROR(EINVAL);
break;
}
}
@ -5025,14 +5025,14 @@ spa_vdev_split_mirror(spa_t *spa, char *newname, nvlist_t *config,
/* which disk is going to be split? */
if (nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_GUID,
&glist[c]) != 0) {
error = EINVAL;
error = SET_ERROR(EINVAL);
break;
}
/* look it up in the spa */
vml[c] = spa_lookup_by_guid(spa, glist[c], B_FALSE);
if (vml[c] == NULL) {
error = ENODEV;
error = SET_ERROR(ENODEV);
break;
}
@ -5046,12 +5046,12 @@ spa_vdev_split_mirror(spa_t *spa, char *newname, nvlist_t *config,
vml[c]->vdev_children != 0 ||
vml[c]->vdev_state != VDEV_STATE_HEALTHY ||
c != spa->spa_root_vdev->vdev_child[c]->vdev_id) {
error = EINVAL;
error = SET_ERROR(EINVAL);
break;
}
if (vdev_dtl_required(vml[c])) {
error = EBUSY;
error = SET_ERROR(EBUSY);
break;
}
@ -5286,7 +5286,7 @@ spa_vdev_remove_evacuate(spa_t *spa, vdev_t *vd)
if (vd->vdev_stat.vs_alloc != 0)
error = spa_offline_log(spa);
} else {
error = ENOTSUP;
error = SET_ERROR(ENOTSUP);
}
if (error)
@ -5395,7 +5395,7 @@ spa_vdev_remove(spa_t *spa, uint64_t guid, boolean_t unspare)
spa_load_spares(spa);
spa->spa_spares.sav_sync = B_TRUE;
} else {
error = EBUSY;
error = SET_ERROR(EBUSY);
}
} else if (spa->spa_l2cache.sav_vdevs != NULL &&
nvlist_lookup_nvlist_array(spa->spa_l2cache.sav_config,
@ -5455,12 +5455,12 @@ spa_vdev_remove(spa_t *spa, uint64_t guid, boolean_t unspare)
/*
* Normal vdevs cannot be removed (yet).
*/
error = ENOTSUP;
error = SET_ERROR(ENOTSUP);
} else {
/*
* There is no vdev of any kind with the specified guid.
*/
error = ENOENT;
error = SET_ERROR(ENOENT);
}
if (!locked)
@ -5647,7 +5647,7 @@ spa_scan_stop(spa_t *spa)
{
ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0);
if (dsl_scan_resilvering(spa->spa_dsl_pool))
return (EBUSY);
return (SET_ERROR(EBUSY));
return (dsl_scan_cancel(spa->spa_dsl_pool));
}
@ -5657,7 +5657,7 @@ spa_scan(spa_t *spa, pool_scan_func_t func)
ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0);
if (func >= POOL_SCAN_FUNCS || func == POOL_SCAN_NONE)
return (ENOTSUP);
return (SET_ERROR(ENOTSUP));
/*
* If a resilver was requested, but there is no DTL on a

View File

@ -20,6 +20,7 @@
*/
/*
* Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013 by Delphix. All rights reserved.
*/
/*
@ -175,7 +176,7 @@ process_error_log(spa_t *spa, uint64_t obj, void *addr, size_t *count)
if (*count == 0) {
zap_cursor_fini(&zc);
return (ENOMEM);
return (SET_ERROR(ENOMEM));
}
name_to_bookmark(za.za_name, &zb);
@ -183,7 +184,7 @@ process_error_log(spa_t *spa, uint64_t obj, void *addr, size_t *count)
if (copyout(&zb, (char *)addr +
(*count - 1) * sizeof (zbookmark_t),
sizeof (zbookmark_t)) != 0)
return (EFAULT);
return (SET_ERROR(EFAULT));
*count -= 1;
}
@ -201,12 +202,12 @@ process_error_list(avl_tree_t *list, void *addr, size_t *count)
for (se = avl_first(list); se != NULL; se = AVL_NEXT(list, se)) {
if (*count == 0)
return (ENOMEM);
return (SET_ERROR(ENOMEM));
if (copyout(&se->se_bookmark, (char *)addr +
(*count - 1) * sizeof (zbookmark_t),
sizeof (zbookmark_t)) != 0)
return (EFAULT);
return (SET_ERROR(EFAULT));
*count -= 1;
}

View File

@ -21,7 +21,7 @@
/*
* Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012 by Delphix. All rights reserved.
* Copyright (c) 2013 by Delphix. All rights reserved.
*/
#include <sys/spa.h>
@ -306,7 +306,7 @@ spa_history_log_nvl(spa_t *spa, nvlist_t *nvl)
return (EINVAL);
if (spa_version(spa) < SPA_VERSION_ZPOOL_HISTORY || !spa_writeable(spa))
return (EINVAL);
return (SET_ERROR(EINVAL));
tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
err = dmu_tx_assign(tx, TXG_WAIT);
@ -350,7 +350,7 @@ spa_history_get(spa_t *spa, uint64_t *offp, uint64_t *len, char *buf)
* that's ok, just return ENOENT.
*/
if (!spa->spa_history)
return (ENOENT);
return (SET_ERROR(ENOENT));
/*
* The history is logged asynchronously, so when they request

View File

@ -20,7 +20,7 @@
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012 by Delphix. All rights reserved.
* Copyright (c) 2013 by Delphix. All rights reserved.
* Copyright 2011 Nexenta Systems, Inc. All rights reserved.
* Copyright 2013 Martin Matuska <mm@FreeBSD.org>. All rights reserved.
*/
@ -1861,7 +1861,7 @@ spa_scan_get_stats(spa_t *spa, pool_scan_stat_t *ps)
dsl_scan_t *scn = spa->spa_dsl_pool ? spa->spa_dsl_pool->dp_scan : NULL;
if (scn == NULL || scn->scn_phys.scn_func == POOL_SCAN_NONE)
return (ENOENT);
return (SET_ERROR(ENOENT));
bzero(ps, sizeof (pool_scan_stat_t));
/* data stored on disk */

View File

@ -20,6 +20,7 @@
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013 by Delphix. All rights reserved.
*/
#include <sys/zfs_context.h>
@ -33,7 +34,7 @@ uberblock_verify(uberblock_t *ub)
byteswap_uint64_array(ub, sizeof (uberblock_t));
if (ub->ub_magic != UBERBLOCK_MAGIC)
return (EINVAL);
return (SET_ERROR(EINVAL));
return (0);
}

View File

@ -22,7 +22,7 @@
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright 2011 Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2012 by Delphix. All rights reserved.
* Copyright (c) 2013 by Delphix. All rights reserved.
* Copyright 2013 Martin Matuska <mm@FreeBSD.org>. All rights reserved.
*/
@ -357,10 +357,10 @@ vdev_alloc(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent, uint_t id,
ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0)
return (EINVAL);
return (SET_ERROR(EINVAL));
if ((ops = vdev_getops(type)) == NULL)
return (EINVAL);
return (SET_ERROR(EINVAL));
/*
* If this is a load, get the vdev guid from the nvlist.
@ -371,26 +371,26 @@ vdev_alloc(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent, uint_t id,
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID, &label_id) ||
label_id != id)
return (EINVAL);
return (SET_ERROR(EINVAL));
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0)
return (EINVAL);
return (SET_ERROR(EINVAL));
} else if (alloctype == VDEV_ALLOC_SPARE) {
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0)
return (EINVAL);
return (SET_ERROR(EINVAL));
} else if (alloctype == VDEV_ALLOC_L2CACHE) {
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0)
return (EINVAL);
return (SET_ERROR(EINVAL));
} else if (alloctype == VDEV_ALLOC_ROOTPOOL) {
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0)
return (EINVAL);
return (SET_ERROR(EINVAL));
}
/*
* The first allocated vdev must be of type 'root'.
*/
if (ops != &vdev_root_ops && spa->spa_root_vdev == NULL)
return (EINVAL);
return (SET_ERROR(EINVAL));
/*
* Determine whether we're a log vdev.
@ -398,10 +398,10 @@ vdev_alloc(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent, uint_t id,
islog = 0;
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG, &islog);
if (islog && spa_version(spa) < SPA_VERSION_SLOGS)
return (ENOTSUP);
return (SET_ERROR(ENOTSUP));
if (ops == &vdev_hole_ops && spa_version(spa) < SPA_VERSION_HOLES)
return (ENOTSUP);
return (SET_ERROR(ENOTSUP));
/*
* Set the nparity property for RAID-Z vdevs.
@ -411,24 +411,24 @@ vdev_alloc(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent, uint_t id,
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY,
&nparity) == 0) {
if (nparity == 0 || nparity > VDEV_RAIDZ_MAXPARITY)
return (EINVAL);
return (SET_ERROR(EINVAL));
/*
* Previous versions could only support 1 or 2 parity
* device.
*/
if (nparity > 1 &&
spa_version(spa) < SPA_VERSION_RAIDZ2)
return (ENOTSUP);
return (SET_ERROR(ENOTSUP));
if (nparity > 2 &&
spa_version(spa) < SPA_VERSION_RAIDZ3)
return (ENOTSUP);
return (SET_ERROR(ENOTSUP));
} else {
/*
* We require the parity to be specified for SPAs that
* support multiple parity levels.
*/
if (spa_version(spa) >= SPA_VERSION_RAIDZ2)
return (EINVAL);
return (SET_ERROR(EINVAL));
/*
* Otherwise, we default to 1 parity device for RAID-Z.
*/
@ -946,7 +946,7 @@ vdev_probe_done(zio_t *zio)
ASSERT(zio->io_error != 0);
zfs_ereport_post(FM_EREPORT_ZFS_PROBE_FAILURE,
spa, vd, NULL, 0, 0);
zio->io_error = ENXIO;
zio->io_error = SET_ERROR(ENXIO);
}
mutex_enter(&vd->vdev_probe_lock);
@ -956,7 +956,7 @@ vdev_probe_done(zio_t *zio)
while ((pio = zio_walk_parents(zio)) != NULL)
if (!vdev_accessible(vd, pio))
pio->io_error = ENXIO;
pio->io_error = SET_ERROR(ENXIO);
kmem_free(vps, sizeof (*vps));
}
@ -1141,11 +1141,11 @@ vdev_open(vdev_t *vd)
vd->vdev_label_aux == VDEV_AUX_EXTERNAL);
vdev_set_state(vd, B_TRUE, VDEV_STATE_FAULTED,
vd->vdev_label_aux);
return (ENXIO);
return (SET_ERROR(ENXIO));
} else if (vd->vdev_offline) {
ASSERT(vd->vdev_children == 0);
vdev_set_state(vd, B_TRUE, VDEV_STATE_OFFLINE, VDEV_AUX_NONE);
return (ENXIO);
return (SET_ERROR(ENXIO));
}
error = vd->vdev_ops->vdev_op_open(vd, &osize, &max_osize, &ashift);
@ -1180,7 +1180,7 @@ vdev_open(vdev_t *vd)
vd->vdev_label_aux == VDEV_AUX_EXTERNAL);
vdev_set_state(vd, B_TRUE, VDEV_STATE_FAULTED,
vd->vdev_label_aux);
return (ENXIO);
return (SET_ERROR(ENXIO));
}
if (vd->vdev_degraded) {
@ -1217,7 +1217,7 @@ vdev_open(vdev_t *vd)
if (osize < SPA_MINDEVSIZE) {
vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_TOO_SMALL);
return (EOVERFLOW);
return (SET_ERROR(EOVERFLOW));
}
psize = osize;
asize = osize - (VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE);
@ -1228,7 +1228,7 @@ vdev_open(vdev_t *vd)
(VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE)) {
vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_TOO_SMALL);
return (EOVERFLOW);
return (SET_ERROR(EOVERFLOW));
}
psize = 0;
asize = osize;
@ -1243,7 +1243,7 @@ vdev_open(vdev_t *vd)
if (asize < vd->vdev_min_asize) {
vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_BAD_LABEL);
return (EINVAL);
return (SET_ERROR(EINVAL));
}
if (vd->vdev_asize == 0) {
@ -1325,7 +1325,7 @@ vdev_validate(vdev_t *vd, boolean_t strict)
for (int c = 0; c < vd->vdev_children; c++)
if (vdev_validate(vd->vdev_child[c], strict) != 0)
return (EBADF);
return (SET_ERROR(EBADF));
/*
* If the device has already failed, or was marked offline, don't do
@ -1411,7 +1411,7 @@ vdev_validate(vdev_t *vd, boolean_t strict)
if (!(spa->spa_import_flags & ZFS_IMPORT_VERBATIM) &&
spa_load_state(spa) == SPA_LOAD_OPEN &&
state != POOL_STATE_ACTIVE)
return (EBADF);
return (SET_ERROR(EBADF));
/*
* If we were able to open and validate a vdev that was

View File

@ -22,6 +22,9 @@
* Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
/*
* Copyright (c) 2013 by Delphix. All rights reserved.
*/
#include <sys/zfs_context.h>
#include <sys/spa.h>
@ -271,16 +274,16 @@ vdev_cache_read(zio_t *zio)
ASSERT(zio->io_type == ZIO_TYPE_READ);
if (zio->io_flags & ZIO_FLAG_DONT_CACHE)
return (EINVAL);
return (SET_ERROR(EINVAL));
if (zio->io_size > zfs_vdev_cache_max)
return (EOVERFLOW);
return (SET_ERROR(EOVERFLOW));
/*
* If the I/O straddles two or more cache blocks, don't cache it.
*/
if (P2BOUNDARY(zio->io_offset, zio->io_size, VCBS))
return (EXDEV);
return (SET_ERROR(EXDEV));
ASSERT(cache_phase + zio->io_size <= VCBS);
@ -292,7 +295,7 @@ vdev_cache_read(zio_t *zio)
if (ve != NULL) {
if (ve->ve_missed_update) {
mutex_exit(&vc->vc_lock);
return (ESTALE);
return (SET_ERROR(ESTALE));
}
if ((fio = ve->ve_fill_io) != NULL) {
@ -315,7 +318,7 @@ vdev_cache_read(zio_t *zio)
if (ve == NULL) {
mutex_exit(&vc->vc_lock);
return (ENOMEM);
return (SET_ERROR(ENOMEM));
}
fio = zio_vdev_delegated_io(zio->io_vd, cache_offset,

View File

@ -20,7 +20,7 @@
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012 by Delphix. All rights reserved.
* Copyright (c) 2013 by Delphix. All rights reserved.
*/
#include <sys/zfs_context.h>
@ -150,7 +150,7 @@ vdev_disk_open(vdev_t *vd, uint64_t *psize, uint64_t *max_psize,
*/
if (vd->vdev_path == NULL || vd->vdev_path[0] != '/') {
vd->vdev_stat.vs_aux = VDEV_AUX_BAD_LABEL;
return (EINVAL);
return (SET_ERROR(EINVAL));
}
/*
@ -185,7 +185,7 @@ vdev_disk_open(vdev_t *vd, uint64_t *psize, uint64_t *max_psize,
if (ddi_devid_str_decode(vd->vdev_devid, &dvd->vd_devid,
&dvd->vd_minor) != 0) {
vd->vdev_stat.vs_aux = VDEV_AUX_BAD_LABEL;
return (EINVAL);
return (SET_ERROR(EINVAL));
}
}
@ -221,7 +221,7 @@ vdev_disk_open(vdev_t *vd, uint64_t *psize, uint64_t *max_psize,
if (error == 0 && vd->vdev_devid != NULL &&
ldi_get_devid(dvd->vd_lh, &devid) == 0) {
if (ddi_devid_compare(devid, dvd->vd_devid) != 0) {
error = EINVAL;
error = SET_ERROR(EINVAL);
(void) ldi_close(dvd->vd_lh, spa_mode(spa),
kcred);
dvd->vd_lh = NULL;
@ -303,7 +303,7 @@ vdev_disk_open(vdev_t *vd, uint64_t *psize, uint64_t *max_psize,
*/
if (ldi_get_size(dvd->vd_lh, psize) != 0) {
vd->vdev_stat.vs_aux = VDEV_AUX_OPEN_FAILED;
return (EINVAL);
return (SET_ERROR(EINVAL));
}
/*
@ -374,7 +374,7 @@ vdev_disk_physio(ldi_handle_t vd_lh, caddr_t data, size_t size,
int error = 0;
if (vd_lh == NULL)
return (EINVAL);
return (SET_ERROR(EINVAL));
ASSERT(flags & B_READ || flags & B_WRITE);
@ -388,7 +388,7 @@ vdev_disk_physio(ldi_handle_t vd_lh, caddr_t data, size_t size,
error = ldi_strategy(vd_lh, bp);
ASSERT(error == 0);
if ((error = biowait(bp)) == 0 && bp->b_resid != 0)
error = EIO;
error = SET_ERROR(EIO);
freerbuf(bp);
return (error);
@ -408,7 +408,7 @@ vdev_disk_io_intr(buf_t *bp)
zio->io_error = (geterror(bp) != 0 ? EIO : 0);
if (zio->io_error == 0 && bp->b_resid != 0)
zio->io_error = EIO;
zio->io_error = SET_ERROR(EIO);
kmem_free(vdb, sizeof (vdev_disk_buf_t));
@ -449,7 +449,7 @@ vdev_disk_io_start(zio_t *zio)
if (zio->io_type == ZIO_TYPE_IOCTL) {
/* XXPOLICY */
if (!vdev_readable(vd)) {
zio->io_error = ENXIO;
zio->io_error = SET_ERROR(ENXIO);
return (ZIO_PIPELINE_CONTINUE);
}
@ -461,7 +461,7 @@ vdev_disk_io_start(zio_t *zio)
break;
if (vd->vdev_nowritecache) {
zio->io_error = ENOTSUP;
zio->io_error = SET_ERROR(ENOTSUP);
break;
}
@ -499,7 +499,7 @@ vdev_disk_io_start(zio_t *zio)
break;
default:
zio->io_error = ENOTSUP;
zio->io_error = SET_ERROR(ENOTSUP);
}
return (ZIO_PIPELINE_CONTINUE);
@ -604,7 +604,7 @@ vdev_disk_read_rootlabel(char *devpath, char *devid, nvlist_t **config)
if (ldi_get_size(vd_lh, &s)) {
(void) ldi_close(vd_lh, FREAD, kcred);
return (EIO);
return (SET_ERROR(EIO));
}
size = P2ALIGN_TYPED(s, sizeof (vdev_label_t), uint64_t);
@ -646,7 +646,7 @@ vdev_disk_read_rootlabel(char *devpath, char *devid, nvlist_t **config)
kmem_free(label, sizeof (vdev_label_t));
(void) ldi_close(vd_lh, FREAD, kcred);
if (*config == NULL)
error = EIDRM;
error = SET_ERROR(EIDRM);
return (error);
}

View File

@ -20,7 +20,7 @@
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012 by Delphix. All rights reserved.
* Copyright (c) 2013 by Delphix. All rights reserved.
*/
#include <sys/zfs_context.h>
@ -61,7 +61,7 @@ vdev_file_open(vdev_t *vd, uint64_t *psize, uint64_t *max_psize,
*/
if (vd->vdev_path == NULL || vd->vdev_path[0] != '/') {
vd->vdev_stat.vs_aux = VDEV_AUX_BAD_LABEL;
return (EINVAL);
return (SET_ERROR(EINVAL));
}
/*
@ -101,13 +101,17 @@ vdev_file_open(vdev_t *vd, uint64_t *psize, uint64_t *max_psize,
* Make sure it's a regular file.
*/
if (vp->v_type != VREG) {
#ifdef __FreeBSD__
(void) VOP_CLOSE(vp, spa_mode(vd->vdev_spa), 1, 0, kcred, NULL);
#endif
vd->vdev_stat.vs_aux = VDEV_AUX_OPEN_FAILED;
#ifdef __FreeBSD__
kmem_free(vd->vdev_tsd, sizeof (vdev_file_t));
vd->vdev_tsd = NULL;
return (ENODEV);
}
#endif
return (SET_ERROR(ENODEV));
}
#endif /* _KERNEL */
skip_open:
/*
@ -158,7 +162,7 @@ vdev_file_io_start(zio_t *zio)
ssize_t resid;
if (!vdev_readable(vd)) {
zio->io_error = ENXIO;
zio->io_error = SET_ERROR(ENXIO);
return (ZIO_PIPELINE_CONTINUE);
}
@ -172,7 +176,7 @@ vdev_file_io_start(zio_t *zio)
kcred, NULL);
break;
default:
zio->io_error = ENOTSUP;
zio->io_error = SET_ERROR(ENOTSUP);
}
return (ZIO_PIPELINE_CONTINUE);

View File

@ -21,7 +21,7 @@
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012 by Delphix. All rights reserved.
* Copyright (c) 2013 by Delphix. All rights reserved.
*/
/*
@ -669,14 +669,14 @@ vdev_label_init(vdev_t *vd, uint64_t crtxg, vdev_labeltype_t reason)
* Dead vdevs cannot be initialized.
*/
if (vdev_is_dead(vd))
return (EIO);
return (SET_ERROR(EIO));
/*
* Determine if the vdev is in use.
*/
if (reason != VDEV_LABEL_REMOVE && reason != VDEV_LABEL_SPLIT &&
vdev_inuse(vd, crtxg, reason, &spare_guid, &l2cache_guid))
return (EBUSY);
return (SET_ERROR(EBUSY));
/*
* If this is a request to add or replace a spare or l2cache device
@ -1094,7 +1094,7 @@ vdev_label_sync_top_done(zio_t *zio)
uint64_t *good_writes = zio->io_private;
if (*good_writes == 0)
zio->io_error = EIO;
zio->io_error = SET_ERROR(EIO);
kmem_free(good_writes, sizeof (uint64_t));
}

View File

@ -24,7 +24,7 @@
*/
/*
* Copyright (c) 2012 by Delphix. All rights reserved.
* Copyright (c) 2013 by Delphix. All rights reserved.
*/
#include <sys/zfs_context.h>
@ -139,7 +139,7 @@ vdev_mirror_open(vdev_t *vd, uint64_t *asize, uint64_t *max_asize,
if (vd->vdev_children == 0) {
vd->vdev_stat.vs_aux = VDEV_AUX_BAD_LABEL;
return (EINVAL);
return (SET_ERROR(EINVAL));
}
vdev_open_children(vd);
@ -234,14 +234,14 @@ vdev_mirror_child_select(zio_t *zio)
if (mc->mc_tried || mc->mc_skipped)
continue;
if (!vdev_readable(mc->mc_vd)) {
mc->mc_error = ENXIO;
mc->mc_error = SET_ERROR(ENXIO);
mc->mc_tried = 1; /* don't even try */
mc->mc_skipped = 1;
continue;
}
if (!vdev_dtl_contains(mc->mc_vd, DTL_MISSING, txg, 1))
return (c);
mc->mc_error = ESTALE;
mc->mc_error = SET_ERROR(ESTALE);
mc->mc_skipped = 1;
mc->mc_speculative = 1;
}
@ -429,7 +429,7 @@ vdev_mirror_io_done(zio_t *zio)
!vdev_dtl_contains(mc->mc_vd, DTL_PARTIAL,
zio->io_txg, 1))
continue;
mc->mc_error = ESTALE;
mc->mc_error = SET_ERROR(ESTALE);
}
zio_nowait(zio_vdev_child_io(zio, zio->io_bp,

View File

@ -24,7 +24,7 @@
*/
/*
* Copyright (c) 2012 by Delphix. All rights reserved.
* Copyright (c) 2013 by Delphix. All rights reserved.
*/
/*
@ -69,7 +69,7 @@ vdev_missing_close(vdev_t *vd)
static int
vdev_missing_io_start(zio_t *zio)
{
zio->io_error = ENOTSUP;
zio->io_error = SET_ERROR(ENOTSUP);
return (ZIO_PIPELINE_CONTINUE);
}

View File

@ -21,7 +21,7 @@
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012 by Delphix. All rights reserved.
* Copyright (c) 2013 by Delphix. All rights reserved.
*/
#include <sys/zfs_context.h>
@ -1465,7 +1465,7 @@ vdev_raidz_open(vdev_t *vd, uint64_t *asize, uint64_t *max_asize,
if (nparity > VDEV_RAIDZ_MAXPARITY ||
vd->vdev_children < nparity + 1) {
vd->vdev_stat.vs_aux = VDEV_AUX_BAD_LABEL;
return (EINVAL);
return (SET_ERROR(EINVAL));
}
vdev_open_children(vd);
@ -1602,7 +1602,7 @@ vdev_raidz_io_start(zio_t *zio)
rm->rm_missingdata++;
else
rm->rm_missingparity++;
rc->rc_error = ENXIO;
rc->rc_error = SET_ERROR(ENXIO);
rc->rc_tried = 1; /* don't even try */
rc->rc_skipped = 1;
continue;
@ -1612,7 +1612,7 @@ vdev_raidz_io_start(zio_t *zio)
rm->rm_missingdata++;
else
rm->rm_missingparity++;
rc->rc_error = ESTALE;
rc->rc_error = SET_ERROR(ESTALE);
rc->rc_skipped = 1;
continue;
}
@ -1700,7 +1700,7 @@ raidz_parity_verify(zio_t *zio, raidz_map_t *rm)
continue;
if (bcmp(orig[c], rc->rc_data, rc->rc_size) != 0) {
raidz_checksum_error(zio, rc, orig[c]);
rc->rc_error = ECKSUM;
rc->rc_error = SET_ERROR(ECKSUM);
ret++;
}
zio_buf_free(orig[c], rc->rc_size);
@ -1824,7 +1824,7 @@ vdev_raidz_combrec(zio_t *zio, int total_errors, int data_errors)
if (rc->rc_tried)
raidz_checksum_error(zio, rc,
orig[i]);
rc->rc_error = ECKSUM;
rc->rc_error = SET_ERROR(ECKSUM);
}
ret = code;
@ -2102,7 +2102,7 @@ vdev_raidz_io_done(zio_t *zio)
* Start checksum ereports for all children which haven't
* failed, and the IO wasn't speculative.
*/
zio->io_error = ECKSUM;
zio->io_error = SET_ERROR(ECKSUM);
if (!(zio->io_flags & ZIO_FLAG_SPECULATIVE)) {
for (c = 0; c < rm->rm_cols; c++) {

View File

@ -24,7 +24,7 @@
*/
/*
* Copyright (c) 2012 by Delphix. All rights reserved.
* Copyright (c) 2013 by Delphix. All rights reserved.
*/
#include <sys/zfs_context.h>
@ -62,7 +62,7 @@ vdev_root_open(vdev_t *vd, uint64_t *asize, uint64_t *max_asize,
if (vd->vdev_children == 0) {
vd->vdev_stat.vs_aux = VDEV_AUX_BAD_LABEL;
return (EINVAL);
return (SET_ERROR(EINVAL));
}
vdev_open_children(vd);

View File

@ -20,7 +20,7 @@
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012 by Delphix. All rights reserved.
* Copyright (c) 2013 by Delphix. All rights reserved.
*/
/*
@ -325,7 +325,7 @@ zap_grow_ptrtbl(zap_t *zap, dmu_tx_t *tx)
* this is already an aberrant condition.
*/
if (zap->zap_f.zap_phys->zap_ptrtbl.zt_shift >= zap_hashbits(zap) - 2)
return (ENOSPC);
return (SET_ERROR(ENOSPC));
if (zap->zap_f.zap_phys->zap_ptrtbl.zt_numblks == 0) {
/*
@ -714,7 +714,7 @@ static int
fzap_checkname(zap_name_t *zn)
{
if (zn->zn_key_orig_numints * zn->zn_key_intlen > ZAP_MAXNAMELEN)
return (ENAMETOOLONG);
return (SET_ERROR(ENAMETOOLONG));
return (0);
}
@ -729,7 +729,7 @@ fzap_checksize(uint64_t integer_size, uint64_t num_integers)
case 8:
break;
default:
return (EINVAL);
return (SET_ERROR(EINVAL));
}
if (integer_size * num_integers > ZAP_MAXVALUELEN)
@ -805,7 +805,7 @@ fzap_add_cd(zap_name_t *zn,
retry:
err = zap_leaf_lookup(l, zn, &zeh);
if (err == 0) {
err = EEXIST;
err = SET_ERROR(EEXIST);
goto out;
}
if (err != ENOENT)
@ -996,7 +996,7 @@ zap_join(objset_t *os, uint64_t fromobj, uint64_t intoobj, dmu_tx_t *tx)
zap_cursor_retrieve(&zc, &za) == 0;
(void) zap_cursor_advance(&zc)) {
if (za.za_integer_length != 8 || za.za_num_integers != 1)
return (EINVAL);
return (SET_ERROR(EINVAL));
err = zap_add(os, intoobj, za.za_name,
8, 1, &za.za_first_integer, tx);
if (err)
@ -1018,7 +1018,7 @@ zap_join_key(objset_t *os, uint64_t fromobj, uint64_t intoobj,
zap_cursor_retrieve(&zc, &za) == 0;
(void) zap_cursor_advance(&zc)) {
if (za.za_integer_length != 8 || za.za_num_integers != 1)
return (EINVAL);
return (SET_ERROR(EINVAL));
err = zap_add(os, intoobj, za.za_name,
8, 1, &value, tx);
if (err)
@ -1042,7 +1042,7 @@ zap_join_increment(objset_t *os, uint64_t fromobj, uint64_t intoobj,
uint64_t delta = 0;
if (za.za_integer_length != 8 || za.za_num_integers != 1)
return (EINVAL);
return (SET_ERROR(EINVAL));
err = zap_lookup(os, intoobj, za.za_name, 8, 1, &delta);
if (err != 0 && err != ENOENT)
@ -1250,7 +1250,7 @@ fzap_cursor_move_to_key(zap_cursor_t *zc, zap_name_t *zn)
zap_entry_handle_t zeh;
if (zn->zn_key_orig_numints * zn->zn_key_intlen > ZAP_MAXNAMELEN)
return (ENAMETOOLONG);
return (SET_ERROR(ENAMETOOLONG));
err = zap_deref_leaf(zc->zc_zap, zn->zn_hash, NULL, RW_READER, &l);
if (err != 0)

View File

@ -20,6 +20,7 @@
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013 by Delphix. All rights reserved.
*/
/*
@ -434,7 +435,7 @@ zap_leaf_lookup(zap_leaf_t *l, zap_name_t *zn, zap_entry_handle_t *zeh)
goto again;
}
return (ENOENT);
return (SET_ERROR(ENOENT));
}
/* Return (h1,cd1 >= h2,cd2) */
@ -492,14 +493,14 @@ zap_entry_read(const zap_entry_handle_t *zeh,
ASSERT3U(le->le_type, ==, ZAP_CHUNK_ENTRY);
if (le->le_value_intlen > integer_size)
return (EINVAL);
return (SET_ERROR(EINVAL));
zap_leaf_array_read(zeh->zeh_leaf, le->le_value_chunk,
le->le_value_intlen, le->le_value_numints,
integer_size, num_integers, buf);
if (zeh->zeh_num_integers > num_integers)
return (EOVERFLOW);
return (SET_ERROR(EOVERFLOW));
return (0);
}
@ -520,7 +521,7 @@ zap_entry_read_name(zap_t *zap, const zap_entry_handle_t *zeh, uint16_t buflen,
le->le_name_numints, 1, buflen, buf);
}
if (le->le_name_numints > buflen)
return (EOVERFLOW);
return (SET_ERROR(EOVERFLOW));
return (0);
}
@ -536,7 +537,7 @@ zap_entry_update(zap_entry_handle_t *zeh,
ZAP_LEAF_ARRAY_NCHUNKS(le->le_value_numints * le->le_value_intlen);
if ((int)l->l_phys->l_hdr.lh_nfree < delta_chunks)
return (EAGAIN);
return (SET_ERROR(EAGAIN));
zap_leaf_array_free(l, &le->le_value_chunk);
le->le_value_chunk =
@ -626,7 +627,7 @@ zap_entry_create(zap_leaf_t *l, zap_name_t *zn, uint32_t cd,
}
if (l->l_phys->l_hdr.lh_nfree < numchunks)
return (EAGAIN);
return (SET_ERROR(EAGAIN));
/* make the entry */
chunk = zap_leaf_chunk_alloc(l);

View File

@ -20,7 +20,7 @@
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012 by Delphix. All rights reserved.
* Copyright (c) 2013 by Delphix. All rights reserved.
*/
#include <sys/zio.h>
@ -791,7 +791,7 @@ zap_lookup_norm(objset_t *os, uint64_t zapobj, const char *name,
zn = zap_name_alloc(zap, name, mt);
if (zn == NULL) {
zap_unlockdir(zap);
return (ENOTSUP);
return (SET_ERROR(ENOTSUP));
}
if (!zap->zap_ismicro) {
@ -800,12 +800,12 @@ zap_lookup_norm(objset_t *os, uint64_t zapobj, const char *name,
} else {
mze = mze_find(zn);
if (mze == NULL) {
err = ENOENT;
err = SET_ERROR(ENOENT);
} else {
if (num_integers < 1) {
err = EOVERFLOW;
err = SET_ERROR(EOVERFLOW);
} else if (integer_size != 8) {
err = EINVAL;
err = SET_ERROR(EINVAL);
} else {
*(uint64_t *)buf =
MZE_PHYS(zap, mze)->mze_value;
@ -837,7 +837,7 @@ zap_prefetch_uint64(objset_t *os, uint64_t zapobj, const uint64_t *key,
zn = zap_name_alloc_uint64(zap, key, key_numints);
if (zn == NULL) {
zap_unlockdir(zap);
return (ENOTSUP);
return (SET_ERROR(ENOTSUP));
}
fzap_prefetch(zn);
@ -860,7 +860,7 @@ zap_lookup_uint64(objset_t *os, uint64_t zapobj, const uint64_t *key,
zn = zap_name_alloc_uint64(zap, key, key_numints);
if (zn == NULL) {
zap_unlockdir(zap);
return (ENOTSUP);
return (SET_ERROR(ENOTSUP));
}
err = fzap_lookup(zn, integer_size, num_integers, buf,
@ -895,14 +895,14 @@ zap_length(objset_t *os, uint64_t zapobj, const char *name,
zn = zap_name_alloc(zap, name, MT_EXACT);
if (zn == NULL) {
zap_unlockdir(zap);
return (ENOTSUP);
return (SET_ERROR(ENOTSUP));
}
if (!zap->zap_ismicro) {
err = fzap_length(zn, integer_size, num_integers);
} else {
mze = mze_find(zn);
if (mze == NULL) {
err = ENOENT;
err = SET_ERROR(ENOENT);
} else {
if (integer_size)
*integer_size = 8;
@ -929,7 +929,7 @@ zap_length_uint64(objset_t *os, uint64_t zapobj, const uint64_t *key,
zn = zap_name_alloc_uint64(zap, key, key_numints);
if (zn == NULL) {
zap_unlockdir(zap);
return (ENOTSUP);
return (SET_ERROR(ENOTSUP));
}
err = fzap_length(zn, integer_size, num_integers);
zap_name_free(zn);
@ -998,7 +998,7 @@ zap_add(objset_t *os, uint64_t zapobj, const char *key,
zn = zap_name_alloc(zap, key, MT_EXACT);
if (zn == NULL) {
zap_unlockdir(zap);
return (ENOTSUP);
return (SET_ERROR(ENOTSUP));
}
if (!zap->zap_ismicro) {
err = fzap_add(zn, integer_size, num_integers, val, tx);
@ -1012,7 +1012,7 @@ zap_add(objset_t *os, uint64_t zapobj, const char *key,
} else {
mze = mze_find(zn);
if (mze != NULL) {
err = EEXIST;
err = SET_ERROR(EEXIST);
} else {
mzap_addent(zn, *intval);
}
@ -1039,7 +1039,7 @@ zap_add_uint64(objset_t *os, uint64_t zapobj, const uint64_t *key,
zn = zap_name_alloc_uint64(zap, key, key_numints);
if (zn == NULL) {
zap_unlockdir(zap);
return (ENOTSUP);
return (SET_ERROR(ENOTSUP));
}
err = fzap_add(zn, integer_size, num_integers, val, tx);
zap = zn->zn_zap; /* fzap_add() may change zap */
@ -1075,7 +1075,7 @@ zap_update(objset_t *os, uint64_t zapobj, const char *name,
zn = zap_name_alloc(zap, name, MT_EXACT);
if (zn == NULL) {
zap_unlockdir(zap);
return (ENOTSUP);
return (SET_ERROR(ENOTSUP));
}
if (!zap->zap_ismicro) {
err = fzap_update(zn, integer_size, num_integers, val, tx);
@ -1120,7 +1120,7 @@ zap_update_uint64(objset_t *os, uint64_t zapobj, const uint64_t *key,
zn = zap_name_alloc_uint64(zap, key, key_numints);
if (zn == NULL) {
zap_unlockdir(zap);
return (ENOTSUP);
return (SET_ERROR(ENOTSUP));
}
err = fzap_update(zn, integer_size, num_integers, val, tx);
zap = zn->zn_zap; /* fzap_update() may change zap */
@ -1151,14 +1151,14 @@ zap_remove_norm(objset_t *os, uint64_t zapobj, const char *name,
zn = zap_name_alloc(zap, name, mt);
if (zn == NULL) {
zap_unlockdir(zap);
return (ENOTSUP);
return (SET_ERROR(ENOTSUP));
}
if (!zap->zap_ismicro) {
err = fzap_remove(zn, tx);
} else {
mze = mze_find(zn);
if (mze == NULL) {
err = ENOENT;
err = SET_ERROR(ENOENT);
} else {
zap->zap_m.zap_num_entries--;
bzero(&zap->zap_m.zap_phys->mz_chunk[mze->mze_chunkid],
@ -1185,7 +1185,7 @@ zap_remove_uint64(objset_t *os, uint64_t zapobj, const uint64_t *key,
zn = zap_name_alloc_uint64(zap, key, key_numints);
if (zn == NULL) {
zap_unlockdir(zap);
return (ENOTSUP);
return (SET_ERROR(ENOTSUP));
}
err = fzap_remove(zn, tx);
zap_name_free(zn);
@ -1263,7 +1263,7 @@ zap_cursor_retrieve(zap_cursor_t *zc, zap_attribute_t *za)
mzap_ent_t *mze;
if (zc->zc_hash == -1ULL)
return (ENOENT);
return (SET_ERROR(ENOENT));
if (zc->zc_zap == NULL) {
int hb;
@ -1289,8 +1289,6 @@ zap_cursor_retrieve(zap_cursor_t *zc, zap_attribute_t *za)
if (!zc->zc_zap->zap_ismicro) {
err = fzap_cursor_retrieve(zc->zc_zap, zc, za);
} else {
err = ENOENT;
mze_tofind.mze_hash = zc->zc_hash;
mze_tofind.mze_cd = zc->zc_cd;
@ -1313,6 +1311,7 @@ zap_cursor_retrieve(zap_cursor_t *zc, zap_attribute_t *za)
err = 0;
} else {
zc->zc_hash = -1ULL;
err = SET_ERROR(ENOENT);
}
}
rw_exit(&zc->zc_zap->zap_rwlock);
@ -1346,7 +1345,7 @@ zap_cursor_move_to_key(zap_cursor_t *zc, const char *name, matchtype_t mt)
zn = zap_name_alloc(zc->zc_zap, name, mt);
if (zn == NULL) {
rw_exit(&zc->zc_zap->zap_rwlock);
return (ENOTSUP);
return (SET_ERROR(ENOTSUP));
}
if (!zc->zc_zap->zap_ismicro) {
@ -1354,7 +1353,7 @@ zap_cursor_move_to_key(zap_cursor_t *zc, const char *name, matchtype_t mt)
} else {
mze = mze_find(zn);
if (mze == NULL) {
err = ENOENT;
err = SET_ERROR(ENOENT);
goto out;
}
zc->zc_hash = mze->mze_hash;

View File

@ -20,7 +20,7 @@
*/
/*
* Copyright (c) 2012 by Delphix. All rights reserved.
* Copyright (c) 2013 by Delphix. All rights reserved.
*/
#include <sys/zfs_context.h>
@ -226,13 +226,13 @@ feature_get_refcount(objset_t *os, uint64_t read_obj, uint64_t write_obj,
* have been allocated yet. Act as though all features are disabled.
*/
if (zapobj == 0)
return (ENOTSUP);
return (SET_ERROR(ENOTSUP));
err = zap_lookup(os, zapobj, feature->fi_guid, sizeof (uint64_t), 1,
&refcount);
if (err != 0) {
if (err == ENOENT)
return (ENOTSUP);
return (SET_ERROR(ENOTSUP));
else
return (err);
}
@ -273,16 +273,16 @@ feature_do_action(objset_t *os, uint64_t read_obj, uint64_t write_obj,
break;
case FEATURE_ACTION_INCR:
if (error == ENOENT)
return (ENOTSUP);
return (SET_ERROR(ENOTSUP));
if (refcount == UINT64_MAX)
return (EOVERFLOW);
return (SET_ERROR(EOVERFLOW));
refcount++;
break;
case FEATURE_ACTION_DECR:
if (error == ENOENT)
return (ENOTSUP);
return (SET_ERROR(ENOTSUP));
if (refcount == 0)
return (EOVERFLOW);
return (SET_ERROR(EOVERFLOW));
refcount--;
break;
default:

View File

@ -21,6 +21,7 @@
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright 2011 Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2013 by Delphix. All rights reserved.
*/
#include <sys/types.h>
@ -681,7 +682,7 @@ zfs_copy_ace_2_fuid(zfsvfs_t *zfsvfs, vtype_t obj_type, zfs_acl_t *aclp,
*/
if (zfs_ace_valid(obj_type, aclp, aceptr->z_hdr.z_type,
aceptr->z_hdr.z_flags) != B_TRUE)
return (EINVAL);
return (SET_ERROR(EINVAL));
switch (acep->a_type) {
case ACE_ACCESS_ALLOWED_OBJECT_ACE_TYPE:
@ -788,7 +789,7 @@ zfs_copy_ace_2_oldace(vtype_t obj_type, zfs_acl_t *aclp, ace_t *acep,
*/
if (zfs_ace_valid(obj_type, aclp, aceptr->z_type,
aceptr->z_flags) != B_TRUE)
return (EINVAL);
return (SET_ERROR(EINVAL));
}
*size = (caddr_t)aceptr - (caddr_t)z_acl;
return (0);
@ -1122,7 +1123,7 @@ zfs_acl_node_read(znode_t *zp, boolean_t have_lock, zfs_acl_t **aclpp,
zfs_acl_node_free(aclnode);
/* convert checksum errors into IO errors */
if (error == ECKSUM)
error = EIO;
error = SET_ERROR(EIO);
goto done;
}
@ -1781,7 +1782,7 @@ zfs_getacl(znode_t *zp, vsecattr_t *vsecp, boolean_t skipaclchk, cred_t *cr)
VSA_ACE_ACLFLAGS | VSA_ACE_ALLTYPES);
if (mask == 0)
return (ENOSYS);
return (SET_ERROR(ENOSYS));
if (error = zfs_zaccess(zp, ACE_READ_ACL, 0, skipaclchk, cr))
return (error);
@ -1875,7 +1876,7 @@ zfs_vsec_2_aclp(zfsvfs_t *zfsvfs, vtype_t obj_type,
int error;
if (vsecp->vsa_aclcnt > MAX_ACL_ENTRIES || vsecp->vsa_aclcnt <= 0)
return (EINVAL);
return (SET_ERROR(EINVAL));
aclp = zfs_acl_alloc(zfs_acl_version(zfsvfs->z_version));
@ -1937,10 +1938,10 @@ zfs_setacl(znode_t *zp, vsecattr_t *vsecp, boolean_t skipaclchk, cred_t *cr)
uint64_t acl_obj;
if (mask == 0)
return (ENOSYS);
return (SET_ERROR(ENOSYS));
if (zp->z_pflags & ZFS_IMMUTABLE)
return (EPERM);
return (SET_ERROR(EPERM));
if (error = zfs_zaccess(zp, ACE_WRITE_ACL, 0, skipaclchk, cr))
return (error);
@ -2037,7 +2038,7 @@ zfs_zaccess_dataset_check(znode_t *zp, uint32_t v4_mode)
(zp->z_zfsvfs->z_vfs->vfs_flag & VFS_RDONLY) &&
(!IS_DEVVP(ZTOV(zp)) ||
(IS_DEVVP(ZTOV(zp)) && (v4_mode & WRITE_MASK_ATTRS)))) {
return (EROFS);
return (SET_ERROR(EROFS));
}
/*
@ -2048,13 +2049,13 @@ zfs_zaccess_dataset_check(znode_t *zp, uint32_t v4_mode)
(zp->z_pflags & (ZFS_READONLY | ZFS_IMMUTABLE))) ||
(ZTOV(zp)->v_type == VDIR &&
(zp->z_pflags & ZFS_IMMUTABLE)))) {
return (EPERM);
return (SET_ERROR(EPERM));
}
#ifdef sun
if ((v4_mode & (ACE_DELETE | ACE_DELETE_CHILD)) &&
(zp->z_pflags & ZFS_NOUNLINK)) {
return (EPERM);
return (SET_ERROR(EPERM));
}
#else
/*
@ -2070,7 +2071,7 @@ zfs_zaccess_dataset_check(znode_t *zp, uint32_t v4_mode)
if (((v4_mode & (ACE_READ_DATA|ACE_EXECUTE)) &&
(zp->z_pflags & ZFS_AV_QUARANTINED))) {
return (EACCES);
return (SET_ERROR(EACCES));
}
return (0);
@ -2178,7 +2179,7 @@ zfs_zaccess_aces_check(znode_t *zp, uint32_t *working_mode,
break;
} else {
mutex_exit(&zp->z_acl_lock);
return (EIO);
return (SET_ERROR(EIO));
}
}
@ -2212,7 +2213,7 @@ zfs_zaccess_aces_check(znode_t *zp, uint32_t *working_mode,
/* Put the found 'denies' back on the working mode */
if (deny_mask) {
*working_mode |= deny_mask;
return (EACCES);
return (SET_ERROR(EACCES));
} else if (*working_mode) {
return (-1);
}
@ -2279,7 +2280,7 @@ zfs_zaccess_append(znode_t *zp, uint32_t *working_mode, boolean_t *check_privs,
cred_t *cr)
{
if (*working_mode != ACE_WRITE_DATA)
return (EACCES);
return (SET_ERROR(EACCES));
return (zfs_zaccess_common(zp, ACE_APPEND_DATA, working_mode,
check_privs, B_FALSE, cr));
@ -2295,7 +2296,7 @@ zfs_fastaccesschk_execute(znode_t *zdp, cred_t *cr)
int error;
if (zdp->z_pflags & ZFS_AV_QUARANTINED)
return (EACCES);
return (SET_ERROR(EACCES));
is_attr = ((zdp->z_pflags & ZFS_XATTR) &&
(ZTOV(zdp)->v_type == VDIR));
@ -2501,7 +2502,7 @@ zfs_zaccess(znode_t *zp, int mode, int flags, boolean_t skipaclchk, cred_t *cr)
* for are still present. If so then return EACCES
*/
if (working_mode & ~(ZFS_CHECKED_MASKS)) {
error = EACCES;
error = SET_ERROR(EACCES);
}
}
} else if (error == 0) {
@ -2615,7 +2616,7 @@ zfs_zaccess_delete(znode_t *dzp, znode_t *zp, cred_t *cr)
*/
if (zp->z_pflags & (ZFS_IMMUTABLE | ZFS_NOUNLINK))
return (EPERM);
return (SET_ERROR(EPERM));
/*
* First row
@ -2682,7 +2683,7 @@ zfs_zaccess_rename(znode_t *sdzp, znode_t *szp, znode_t *tdzp,
int error;
if (szp->z_pflags & ZFS_AV_QUARANTINED)
return (EACCES);
return (SET_ERROR(EACCES));
add_perm = (ZTOV(szp)->v_type == VDIR) ?
ACE_ADD_SUBDIRECTORY : ACE_ADD_FILE;

View File

@ -20,7 +20,7 @@
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012 by Delphix. All rights reserved.
* Copyright (c) 2013 by Delphix. All rights reserved.
*/
/*
@ -303,7 +303,7 @@ zfsctl_common_open(struct vop_open_args *ap)
int flags = ap->a_mode;
if (flags & FWRITE)
return (EACCES);
return (SET_ERROR(EACCES));
return (0);
}
@ -336,11 +336,11 @@ zfsctl_common_access(ap)
#ifdef TODO
if (flags & V_ACE_MASK) {
if (accmode & ACE_ALL_WRITE_PERMS)
return (EACCES);
return (SET_ERROR(EACCES));
} else {
#endif
if (accmode & VWRITE)
return (EACCES);
return (SET_ERROR(EACCES));
#ifdef TODO
}
#endif
@ -397,7 +397,15 @@ zfsctl_common_fid(ap)
ZFS_ENTER(zfsvfs);
#ifdef illumos
if (fidp->fid_len < SHORT_FID_LEN) {
fidp->fid_len = SHORT_FID_LEN;
ZFS_EXIT(zfsvfs);
return (SET_ERROR(ENOSPC));
}
#else
fidp->fid_len = SHORT_FID_LEN;
#endif
zfid = (zfid_short_t *)fidp;
@ -433,7 +441,7 @@ zfsctl_shares_fid(ap)
if (zfsvfs->z_shares_dir == 0) {
ZFS_EXIT(zfsvfs);
return (ENOTSUP);
return (SET_ERROR(ENOTSUP));
}
if ((error = zfs_zget(zfsvfs, zfsvfs->z_shares_dir, &dzp)) == 0) {
@ -523,7 +531,7 @@ zfsctl_root_lookup(vnode_t *dvp, char *nm, vnode_t **vpp, pathname_t *pnp,
* No extended attributes allowed under .zfs
*/
if (flags & LOOKUP_XATTR)
return (EINVAL);
return (SET_ERROR(EINVAL));
ZFS_ENTER(zfsvfs);
@ -631,10 +639,10 @@ zfsctl_snapshot_zname(vnode_t *vp, const char *name, int len, char *zname)
objset_t *os = ((zfsvfs_t *)((vp)->v_vfsp->vfs_data))->z_os;
if (snapshot_namecheck(name, NULL, NULL) != 0)
return (EILSEQ);
return (SET_ERROR(EILSEQ));
dmu_objset_name(os, zname);
if (strlen(zname) + 1 + strlen(name) >= len)
return (ENAMETOOLONG);
return (SET_ERROR(ENAMETOOLONG));
(void) strcat(zname, "@");
(void) strcat(zname, name);
return (0);
@ -776,7 +784,7 @@ zfsctl_snapdir_rename(vnode_t *sdvp, char *snm, vnode_t *tdvp, char *tnm,
* Cannot move snapshots out of the snapdir.
*/
if (sdvp != tdvp)
return (EINVAL);
return (SET_ERROR(EINVAL));
if (strcmp(snm, tnm) == 0)
return (0);
@ -786,7 +794,7 @@ zfsctl_snapdir_rename(vnode_t *sdvp, char *snm, vnode_t *tdvp, char *tnm,
search.se_name = (char *)snm;
if ((sep = avl_find(&sdp->sd_snaps, &search, &where)) == NULL) {
mutex_exit(&sdp->sd_lock);
return (ENOENT);
return (SET_ERROR(ENOENT));
}
err = dsl_dataset_rename_snapshot(fsname, snm, tnm, 0);
@ -848,7 +856,7 @@ zfsctl_snapdir_remove(vnode_t *dvp, char *name, vnode_t *cwd, cred_t *cr,
else
err = dsl_destroy_snapshot(snapname, B_FALSE);
} else {
err = ENOENT;
err = SET_ERROR(ENOENT);
}
mutex_exit(&sdp->sd_lock);
@ -872,7 +880,7 @@ zfsctl_snapdir_mkdir(vnode_t *dvp, char *dirname, vattr_t *vap, vnode_t **vpp,
static enum uio_seg seg = UIO_SYSSPACE;
if (snapshot_namecheck(dirname, NULL, NULL) != 0)
return (EILSEQ);
return (SET_ERROR(EILSEQ));
dmu_objset_name(zfsvfs->z_os, name);
@ -942,7 +950,7 @@ zfsctl_snapdir_lookup(ap)
* No extended attributes allowed under .zfs
*/
if (flags & LOOKUP_XATTR)
return (EINVAL);
return (SET_ERROR(EINVAL));
ASSERT(ap->a_cnp->cn_namelen < sizeof(nm));
strlcpy(nm, ap->a_cnp->cn_nameptr, ap->a_cnp->cn_namelen + 1);
@ -957,7 +965,7 @@ zfsctl_snapdir_lookup(ap)
* add some flag to domount() to tell it not to do this lookup.
*/
if (MUTEX_HELD(&sdp->sd_lock))
return (ENOENT);
return (SET_ERROR(ENOENT));
ZFS_ENTER(zfsvfs);
@ -1031,15 +1039,20 @@ zfsctl_snapdir_lookup(ap)
}
if (dmu_objset_hold(snapname, FTAG, &snap) != 0) {
mutex_exit(&sdp->sd_lock);
#ifdef illumos
ZFS_EXIT(zfsvfs);
return (SET_ERROR(ENOENT));
#else /* !illumos */
/* Translate errors and add SAVENAME when needed. */
if ((cnp->cn_flags & ISLASTCN) && cnp->cn_nameiop == CREATE) {
err = EJUSTRETURN;
cnp->cn_flags |= SAVENAME;
} else {
err = ENOENT;
err = SET_ERROR(ENOENT);
}
ZFS_EXIT(zfsvfs);
return (err);
#endif /* !illumos */
}
sep = kmem_alloc(sizeof (zfs_snapentry_t), KM_SLEEP);
@ -1118,7 +1131,7 @@ zfsctl_shares_lookup(ap)
if (zfsvfs->z_shares_dir == 0) {
ZFS_EXIT(zfsvfs);
return (ENOTSUP);
return (SET_ERROR(ENOTSUP));
}
if ((error = zfs_zget(zfsvfs, zfsvfs->z_shares_dir, &dzp)) == 0)
error = VOP_LOOKUP(ZTOV(dzp), vpp, cnp);
@ -1199,7 +1212,7 @@ zfsctl_shares_readdir(ap)
if (zfsvfs->z_shares_dir == 0) {
ZFS_EXIT(zfsvfs);
return (ENOTSUP);
return (SET_ERROR(ENOTSUP));
}
if ((error = zfs_zget(zfsvfs, zfsvfs->z_shares_dir, &dzp)) == 0) {
vn_lock(ZTOV(dzp), LK_SHARED | LK_RETRY);
@ -1207,7 +1220,7 @@ zfsctl_shares_readdir(ap)
VN_URELE(ZTOV(dzp));
} else {
*eofp = 1;
error = ENOENT;
error = SET_ERROR(ENOENT);
}
ZFS_EXIT(zfsvfs);
@ -1276,7 +1289,7 @@ zfsctl_shares_getattr(ap)
ZFS_ENTER(zfsvfs);
if (zfsvfs->z_shares_dir == 0) {
ZFS_EXIT(zfsvfs);
return (ENOTSUP);
return (SET_ERROR(ENOTSUP));
}
if ((error = zfs_zget(zfsvfs, zfsvfs->z_shares_dir, &dzp)) == 0) {
vn_lock(ZTOV(dzp), LK_SHARED | LK_RETRY);
@ -1668,7 +1681,7 @@ zfsctl_lookup_objset(vfs_t *vfsp, uint64_t objsetid, zfsvfs_t **zfsvfsp)
error = traverse(&vp, LK_SHARED | LK_RETRY);
if (error == 0) {
if (vp == sep->se_root)
error = EINVAL;
error = SET_ERROR(EINVAL);
else
*zfsvfsp = VTOZ(vp)->z_zfsvfs;
}
@ -1678,7 +1691,7 @@ zfsctl_lookup_objset(vfs_t *vfsp, uint64_t objsetid, zfsvfs_t **zfsvfsp)
else
VN_RELE(vp);
} else {
error = EINVAL;
error = SET_ERROR(EINVAL);
mutex_exit(&sdp->sd_lock);
}

View File

@ -20,6 +20,7 @@
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013 by Delphix. All rights reserved.
*/
#include <sys/types.h>
@ -154,7 +155,7 @@ zfs_dirent_lock(zfs_dirlock_t **dlpp, znode_t *dzp, char *name, znode_t **zpp,
if (name[0] == '.' &&
(name[1] == '\0' || (name[1] == '.' && name[2] == '\0')) ||
zfs_has_ctldir(dzp) && strcmp(name, ZFS_CTLDIR_NAME) == 0)
return (EEXIST);
return (SET_ERROR(EEXIST));
/*
* Case sensitivity and normalization preferences are set when
@ -225,7 +226,7 @@ zfs_dirent_lock(zfs_dirlock_t **dlpp, znode_t *dzp, char *name, znode_t **zpp,
mutex_exit(&dzp->z_lock);
if (!(flag & ZHAVELOCK))
rw_exit(&dzp->z_name_lock);
return (ENOENT);
return (SET_ERROR(ENOENT));
}
for (dl = dzp->z_dirlocks; dl != NULL; dl = dl->dl_next) {
if ((u8_strcmp(name, dl->dl_name, 0, cmpflags,
@ -236,7 +237,7 @@ zfs_dirent_lock(zfs_dirlock_t **dlpp, znode_t *dzp, char *name, znode_t **zpp,
mutex_exit(&dzp->z_lock);
if (!(flag & ZHAVELOCK))
rw_exit(&dzp->z_name_lock);
return (ENOENT);
return (SET_ERROR(ENOENT));
}
if (dl == NULL) {
size_t namesize;
@ -289,12 +290,12 @@ zfs_dirent_lock(zfs_dirlock_t **dlpp, znode_t *dzp, char *name, znode_t **zpp,
vp = dnlc_lookup(ZTOV(dzp), name);
if (vp == DNLC_NO_VNODE) {
VN_RELE(vp);
error = ENOENT;
error = SET_ERROR(ENOENT);
} else if (vp) {
if (flag & ZNEW) {
zfs_dirent_unlock(dl);
VN_RELE(vp);
return (EEXIST);
return (SET_ERROR(EEXIST));
}
*dlpp = dl;
*zpp = VTOZ(vp);
@ -312,7 +313,7 @@ zfs_dirent_lock(zfs_dirlock_t **dlpp, znode_t *dzp, char *name, znode_t **zpp,
} else {
if (flag & ZNEW) {
zfs_dirent_unlock(dl);
return (EEXIST);
return (SET_ERROR(EEXIST));
}
error = zfs_zget(zfsvfs, zoid, zpp);
if (error) {
@ -719,7 +720,7 @@ zfs_link_create(zfs_dirlock_t *dl, znode_t *zp, dmu_tx_t *tx, int flag)
if (zp->z_unlinked) { /* no new links to unlinked zp */
ASSERT(!(flag & (ZNEW | ZEXISTS)));
mutex_exit(&zp->z_lock);
return (ENOENT);
return (SET_ERROR(ENOENT));
}
zp->z_links++;
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zfsvfs), NULL,
@ -820,11 +821,11 @@ zfs_link_destroy(zfs_dirlock_t *dl, znode_t *zp, dmu_tx_t *tx, int flag,
if (!(flag & ZRENAMING)) {
if (vn_vfswlock(vp)) /* prevent new mounts on zp */
return (EBUSY);
return (SET_ERROR(EBUSY));
if (vn_ismntpt(vp)) { /* don't remove mount point */
vn_vfsunlock(vp);
return (EBUSY);
return (SET_ERROR(EBUSY));
}
mutex_enter(&zp->z_lock);
@ -832,7 +833,11 @@ zfs_link_destroy(zfs_dirlock_t *dl, znode_t *zp, dmu_tx_t *tx, int flag,
if (zp_is_dir && !zfs_dirempty(zp)) {
mutex_exit(&zp->z_lock);
vn_vfsunlock(vp);
return (ENOTEMPTY);
#ifdef illumos
return (SET_ERROR(EEXIST));
#else
return (SET_ERROR(ENOTEMPTY));
#endif
}
/*
@ -943,7 +948,7 @@ zfs_make_xattrdir(znode_t *zp, vattr_t *vap, vnode_t **xvpp, cred_t *cr)
return (error);
if (zfs_acl_ids_overquota(zfsvfs, &acl_ids)) {
zfs_acl_ids_free(&acl_ids);
return (EDQUOT);
return (SET_ERROR(EDQUOT));
}
top:
@ -1026,16 +1031,16 @@ zfs_get_xattrdir(znode_t *zp, vnode_t **xvpp, cred_t *cr, int flags)
if (!(flags & CREATE_XATTR_DIR)) {
zfs_dirent_unlock(dl);
#ifdef __FreeBSD__
return (ENOATTR);
#ifdef illumos
return (SET_ERROR(ENOENT));
#else
return (ENOENT);
return (SET_ERROR(ENOATTR));
#endif
}
if (zfsvfs->z_vfs->vfs_flag & VFS_RDONLY) {
zfs_dirent_unlock(dl);
return (EROFS);
return (SET_ERROR(EROFS));
}
/*

File diff suppressed because it is too large Load Diff

View File

@ -20,6 +20,7 @@
*/
/*
* Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013 by Delphix. All rights reserved.
*/
#include <sys/types.h>
@ -105,7 +106,7 @@ zfs_onexit_minor_to_state(minor_t minor, zfs_onexit_t **zo)
{
*zo = zfsdev_get_soft_state(minor, ZSST_CTLDEV);
if (*zo == NULL)
return (EBADF);
return (SET_ERROR(EBADF));
return (0);
}
@ -126,7 +127,7 @@ zfs_onexit_fd_hold(int fd, minor_t *minorp)
fp = getf(fd, CAP_NONE);
if (fp == NULL)
return (EBADF);
return (SET_ERROR(EBADF));
tmpfp = curthread->td_fpop;
curthread->td_fpop = fp;
@ -216,7 +217,7 @@ zfs_onexit_del_cb(minor_t minor, uint64_t action_handle, boolean_t fire)
kmem_free(ap, sizeof (zfs_onexit_action_node_t));
} else {
mutex_exit(&zo->zo_lock);
error = ENOENT;
error = SET_ERROR(ENOENT);
}
return (error);
@ -245,7 +246,7 @@ zfs_onexit_cb_data(minor_t minor, uint64_t action_handle, void **data)
if (ap != NULL)
*data = ap->za_data;
else
error = ENOENT;
error = SET_ERROR(ENOENT);
mutex_exit(&zo->zo_lock);
return (error);

View File

@ -20,6 +20,7 @@
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013 by Delphix. All rights reserved.
*/
#include <sys/types.h>
@ -73,7 +74,7 @@ zfs_init_vattr(vattr_t *vap, uint64_t mask, uint64_t mode,
static int
zfs_replay_error(zfsvfs_t *zfsvfs, lr_t *lr, boolean_t byteswap)
{
return (ENOTSUP);
return (SET_ERROR(ENOTSUP));
}
static void
@ -396,7 +397,7 @@ zfs_replay_create_acl(zfsvfs_t *zfsvfs,
#endif
break;
default:
error = ENOTSUP;
error = SET_ERROR(ENOTSUP);
}
bail:
@ -528,7 +529,7 @@ zfs_replay_create(zfsvfs_t *zfsvfs, lr_create_t *lr, boolean_t byteswap)
error = VOP_SYMLINK(ZTOV(dzp), &vp, &cn, &xva.xva_vattr, link /*,vflg*/);
break;
default:
error = ENOTSUP;
error = SET_ERROR(ENOTSUP);
}
VOP_UNLOCK(ZTOV(dzp), 0);
@ -584,7 +585,7 @@ zfs_replay_remove(zfsvfs_t *zfsvfs, lr_remove_t *lr, boolean_t byteswap)
error = VOP_RMDIR(ZTOV(dzp), vp, &cn /*,vflg*/);
break;
default:
error = ENOTSUP;
error = SET_ERROR(ENOTSUP);
}
vput(vp);
VOP_UNLOCK(ZTOV(dzp), 0);

View File

@ -22,7 +22,7 @@
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011 Pawel Jakub Dawidek <pawel@dawidek.net>.
* All rights reserved.
* Copyright (c) 2012 by Delphix. All rights reserved.
* Copyright (c) 2013 by Delphix. All rights reserved.
*/
/* Portions Copyright 2010 Robert Milkowski */
@ -584,7 +584,7 @@ zfs_space_delta_cb(dmu_object_type_t bonustype, void *data,
* Is it a valid type of object to track?
*/
if (bonustype != DMU_OT_ZNODE && bonustype != DMU_OT_SA)
return (ENOENT);
return (SET_ERROR(ENOENT));
/*
* If we have a NULL data pointer
@ -593,7 +593,7 @@ zfs_space_delta_cb(dmu_object_type_t bonustype, void *data,
* use the same ids
*/
if (data == NULL)
return (EEXIST);
return (SET_ERROR(EEXIST));
if (bonustype == DMU_OT_ZNODE) {
znode_phys_t *znp = data;
@ -683,7 +683,7 @@ zfs_userspace_many(zfsvfs_t *zfsvfs, zfs_userquota_prop_t type,
uint64_t obj;
if (!dmu_objset_userspace_present(zfsvfs->z_os))
return (ENOTSUP);
return (SET_ERROR(ENOTSUP));
obj = zfs_userquota_prop_to_obj(zfsvfs, type);
if (obj == 0) {
@ -727,7 +727,7 @@ id_to_fuidstr(zfsvfs_t *zfsvfs, const char *domain, uid_t rid,
if (domain && domain[0]) {
domainid = zfs_fuid_find_by_domain(zfsvfs, domain, NULL, addok);
if (domainid == -1)
return (ENOENT);
return (SET_ERROR(ENOENT));
}
fuid = FUID_ENCODE(domainid, rid);
(void) sprintf(buf, "%llx", (longlong_t)fuid);
@ -745,7 +745,7 @@ zfs_userspace_one(zfsvfs_t *zfsvfs, zfs_userquota_prop_t type,
*valp = 0;
if (!dmu_objset_userspace_present(zfsvfs->z_os))
return (ENOTSUP);
return (SET_ERROR(ENOTSUP));
obj = zfs_userquota_prop_to_obj(zfsvfs, type);
if (obj == 0)
@ -772,10 +772,10 @@ zfs_set_userquota(zfsvfs_t *zfsvfs, zfs_userquota_prop_t type,
boolean_t fuid_dirtied;
if (type != ZFS_PROP_USERQUOTA && type != ZFS_PROP_GROUPQUOTA)
return (EINVAL);
return (SET_ERROR(EINVAL));
if (zfsvfs->z_version < ZPL_VERSION_USERSPACE)
return (ENOTSUP);
return (SET_ERROR(ENOTSUP));
objp = (type == ZFS_PROP_USERQUOTA) ? &zfsvfs->z_userquota_obj :
&zfsvfs->z_groupquota_obj;
@ -903,7 +903,7 @@ zfsvfs_create(const char *osname, zfsvfs_t **zfvp)
"on a version %lld pool\n. Pool must be upgraded to mount "
"this file system.", (u_longlong_t)zfsvfs->z_version,
(u_longlong_t)spa_version(dmu_objset_spa(os)));
error = ENOTSUP;
error = SET_ERROR(ENOTSUP);
goto out;
}
if ((error = zfs_get_zplprop(os, ZFS_PROP_NORMALIZE, &zval)) != 0)
@ -1152,6 +1152,18 @@ zfs_domount(vfs_t *vfsp, char *osname)
return (error);
zfsvfs->z_vfs = vfsp;
#ifdef illumos
/* Initialize the generic filesystem structure. */
vfsp->vfs_bcount = 0;
vfsp->vfs_data = NULL;
if (zfs_create_unique_device(&mount_dev) == -1) {
error = SET_ERROR(ENODEV);
goto out;
}
ASSERT(vfs_devismounted(mount_dev) == 0);
#endif
if (error = dsl_prop_get_integer(osname, "recordsize", &recordsize,
NULL))
goto out;
@ -1282,7 +1294,7 @@ str_to_uint64(char *str, uint64_t *objnum)
while (*str) {
if (*str < '0' || *str > '9')
return (EINVAL);
return (SET_ERROR(EINVAL));
num = num*10 + *str++ - '0';
}
@ -1304,7 +1316,7 @@ zfs_parse_bootfs(char *bpath, char *outpath)
int error;
if (*bpath == 0 || *bpath == '/')
return (EINVAL);
return (SET_ERROR(EINVAL));
(void) strcpy(outpath, bpath);
@ -1349,10 +1361,10 @@ zfs_check_global_label(const char *dsname, const char *hexsl)
if (dsl_prop_get_integer(dsname,
zfs_prop_to_name(ZFS_PROP_READONLY), &rdonly, NULL))
return (EACCES);
return (SET_ERROR(EACCES));
return (rdonly ? 0 : EACCES);
}
return (EACCES);
return (SET_ERROR(EACCES));
}
/*
@ -1384,7 +1396,7 @@ zfs_mount_label_policy(vfs_t *vfsp, char *osname)
error = dsl_prop_get(osname, zfs_prop_to_name(ZFS_PROP_MLSLABEL),
1, sizeof (ds_hexsl), &ds_hexsl, NULL);
if (error)
return (EACCES);
return (SET_ERROR(EACCES));
/*
* If labeling is NOT enabled, then disallow the mount of datasets
@ -1394,7 +1406,7 @@ zfs_mount_label_policy(vfs_t *vfsp, char *osname)
if (!is_system_labeled()) {
if (strcasecmp(ds_hexsl, ZFS_MLSLABEL_DEFAULT) == 0)
return (0);
return (EACCES);
return (SET_ERROR(EACCES));
}
/*
@ -1411,7 +1423,7 @@ zfs_mount_label_policy(vfs_t *vfsp, char *osname)
if (dsl_prop_get_integer(osname,
zfs_prop_to_name(ZFS_PROP_ZONED), &zoned, NULL))
return (EACCES);
return (SET_ERROR(EACCES));
if (!zoned)
return (zfs_check_global_label(osname, ds_hexsl));
else
@ -1483,7 +1495,7 @@ zfs_mountroot(vfs_t *vfsp, enum whymountroot why)
*/
if (why == ROOT_INIT) {
if (zfsrootdone++)
return (EBUSY);
return (SET_ERROR(EBUSY));
/*
* the process of doing a spa_load will require the
* clock to be set before we could (for example) do
@ -1495,7 +1507,7 @@ zfs_mountroot(vfs_t *vfsp, enum whymountroot why)
if ((zfs_bootfs = spa_get_bootprop("zfs-bootfs")) == NULL) {
cmn_err(CE_NOTE, "spa_get_bootfs: can not get "
"bootfs name");
return (EINVAL);
return (SET_ERROR(EINVAL));
}
zfs_devid = spa_get_bootprop("diskdevid");
error = spa_import_rootpool(rootfs.bo_name, zfs_devid);
@ -1564,7 +1576,7 @@ zfs_mountroot(vfs_t *vfsp, enum whymountroot why)
* if "why" is equal to anything else other than ROOT_INIT,
* ROOT_REMOUNT, or ROOT_UNMOUNT, we do not support it.
*/
return (ENOTSUP);
return (SET_ERROR(ENOTSUP));
}
#endif /* OPENSOLARIS_MOUNTROOT */
@ -1598,11 +1610,33 @@ zfs_mount(vfs_t *vfsp)
int error = 0;
int canwrite;
#ifdef illumos
if (mvp->v_type != VDIR)
return (SET_ERROR(ENOTDIR));
mutex_enter(&mvp->v_lock);
if ((uap->flags & MS_REMOUNT) == 0 &&
(uap->flags & MS_OVERLAY) == 0 &&
(mvp->v_count != 1 || (mvp->v_flag & VROOT))) {
mutex_exit(&mvp->v_lock);
return (SET_ERROR(EBUSY));
}
mutex_exit(&mvp->v_lock);
/*
* ZFS does not support passing unparsed data in via MS_DATA.
* Users should use the MS_OPTIONSTR interface; this means
* that all option parsing is already done and the options struct
* can be interrogated.
*/
if ((uap->flags & MS_DATA) && uap->datalen > 0)
#else
if (!prison_allow(td->td_ucred, PR_ALLOW_MOUNT_ZFS))
return (EPERM);
return (SET_ERROR(EPERM));
if (vfs_getopt(vfsp->mnt_optnew, "from", (void **)&osname, NULL))
return (EINVAL);
return (SET_ERROR(EINVAL));
#endif /* ! illumos */
/*
* If full-owner-access is enabled and delegated administration is
@ -1657,7 +1691,7 @@ zfs_mount(vfs_t *vfsp)
*/
if (!INGLOBALZONE(curthread) &&
(!zone_dataset_visible(osname, &canwrite) || !canwrite)) {
error = EPERM;
error = SET_ERROR(EPERM);
goto out;
}
@ -1848,7 +1882,7 @@ zfsvfs_teardown(zfsvfs_t *zfsvfs, boolean_t unmounting)
if (!unmounting && (zfsvfs->z_unmounted || zfsvfs->z_os == NULL)) {
rw_exit(&zfsvfs->z_teardown_inactive_lock);
rrw_exit(&zfsvfs->z_teardown_lock, FTAG);
return (EIO);
return (SET_ERROR(EIO));
}
/*
@ -1982,11 +2016,11 @@ zfs_umount(vfs_t *vfsp, int fflag)
*/
if (zfsvfs->z_ctldir == NULL) {
if (vfsp->vfs_count > 1)
return (EBUSY);
return (SET_ERROR(EBUSY));
} else {
if (vfsp->vfs_count > 2 ||
zfsvfs->z_ctldir->v_count > 1)
return (EBUSY);
return (SET_ERROR(EBUSY));
}
}
#endif
@ -2116,7 +2150,7 @@ zfs_fhtovp(vfs_t *vfsp, fid_t *fidp, int flags, vnode_t **vpp)
err = zfsctl_lookup_objset(vfsp, objsetid, &zfsvfs);
if (err)
return (EINVAL);
return (SET_ERROR(EINVAL));
ZFS_ENTER(zfsvfs);
}
@ -2130,7 +2164,7 @@ zfs_fhtovp(vfs_t *vfsp, fid_t *fidp, int flags, vnode_t **vpp)
fid_gen |= ((uint64_t)zfid->zf_gen[i]) << (8 * i);
} else {
ZFS_EXIT(zfsvfs);
return (EINVAL);
return (SET_ERROR(EINVAL));
}
/*
@ -2175,7 +2209,7 @@ zfs_fhtovp(vfs_t *vfsp, fid_t *fidp, int flags, vnode_t **vpp)
dprintf("znode gen (%u) != fid gen (%u)\n", zp_gen, fid_gen);
VN_RELE(ZTOV(zp));
ZFS_EXIT(zfsvfs);
return (EINVAL);
return (SET_ERROR(EINVAL));
}
*vpp = ZTOV(zp);
@ -2386,14 +2420,14 @@ zfs_set_version(zfsvfs_t *zfsvfs, uint64_t newvers)
dmu_tx_t *tx;
if (newvers < ZPL_VERSION_INITIAL || newvers > ZPL_VERSION)
return (EINVAL);
return (SET_ERROR(EINVAL));
if (newvers < zfsvfs->z_version)
return (EINVAL);
return (SET_ERROR(EINVAL));
if (zfs_spa_version_map(newvers) >
spa_version(dmu_objset_spa(zfsvfs->z_os)))
return (ENOTSUP);
return (SET_ERROR(ENOTSUP));
tx = dmu_tx_create(os);
dmu_tx_hold_zap(tx, MASTER_NODE_OBJ, B_FALSE, ZPL_VERSION_STR);

View File

@ -20,7 +20,7 @@
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012 by Delphix. All rights reserved.
* Copyright (c) 2013 by Delphix. All rights reserved.
*/
/* Portions Copyright 2007 Jeremy Teo */
@ -179,7 +179,7 @@ zfs_open(vnode_t **vpp, int flag, cred_t *cr, caller_context_t *ct)
if ((flag & FWRITE) && (zp->z_pflags & ZFS_APPENDONLY) &&
((flag & FAPPEND) == 0)) {
ZFS_EXIT(zfsvfs);
return (EPERM);
return (SET_ERROR(EPERM));
}
if (!zfs_has_ctldir(zp) && zp->z_zfsvfs->z_vscan &&
@ -187,7 +187,7 @@ zfs_open(vnode_t **vpp, int flag, cred_t *cr, caller_context_t *ct)
!(zp->z_pflags & ZFS_AV_QUARANTINED) && zp->z_size > 0) {
if (fs_vscan(*vpp, cr, 0) != 0) {
ZFS_EXIT(zfsvfs);
return (EACCES);
return (SET_ERROR(EACCES));
}
}
@ -244,7 +244,7 @@ zfs_holey(vnode_t *vp, u_long cmd, offset_t *off)
file_sz = zp->z_size;
if (noff >= file_sz) {
return (ENXIO);
return (SET_ERROR(ENXIO));
}
if (cmd == _FIO_SEEK_HOLE)
@ -263,7 +263,7 @@ zfs_holey(vnode_t *vp, u_long cmd, offset_t *off)
*off = file_sz;
return (0);
}
return (ENXIO);
return (SET_ERROR(ENXIO));
}
if (noff < *off)
@ -298,7 +298,7 @@ zfs_ioctl(vnode_t *vp, u_long com, intptr_t data, int flag, cred_t *cred,
case _FIO_SEEK_HOLE:
#ifdef sun
if (ddi_copyin((void *)data, &off, sizeof (off), flag))
return (EFAULT);
return (SET_ERROR(EFAULT));
#else
off = *(offset_t *)data;
#endif
@ -314,13 +314,13 @@ zfs_ioctl(vnode_t *vp, u_long com, intptr_t data, int flag, cred_t *cred,
return (error);
#ifdef sun
if (ddi_copyout(&off, (void *)data, sizeof (off), flag))
return (EFAULT);
return (SET_ERROR(EFAULT));
#else
*(offset_t *)data = off;
#endif
return (0);
}
return (ENOTTY);
return (SET_ERROR(ENOTTY));
}
static vm_page_t
@ -655,7 +655,7 @@ zfs_read(vnode_t *vp, uio_t *uio, int ioflag, cred_t *cr, caller_context_t *ct)
if (zp->z_pflags & ZFS_AV_QUARANTINED) {
ZFS_EXIT(zfsvfs);
return (EACCES);
return (SET_ERROR(EACCES));
}
/*
@ -663,7 +663,7 @@ zfs_read(vnode_t *vp, uio_t *uio, int ioflag, cred_t *cr, caller_context_t *ct)
*/
if (uio->uio_loffset < (offset_t)0) {
ZFS_EXIT(zfsvfs);
return (EINVAL);
return (SET_ERROR(EINVAL));
}
/*
@ -756,7 +756,7 @@ zfs_read(vnode_t *vp, uio_t *uio, int ioflag, cred_t *cr, caller_context_t *ct)
if (error) {
/* convert checksum errors into IO errors */
if (error == ECKSUM)
error = EIO;
error = SET_ERROR(EIO);
break;
}
@ -844,7 +844,7 @@ zfs_write(vnode_t *vp, uio_t *uio, int ioflag, cred_t *cr, caller_context_t *ct)
((zp->z_pflags & ZFS_APPENDONLY) && !(ioflag & FAPPEND) &&
(uio->uio_loffset < zp->z_size))) {
ZFS_EXIT(zfsvfs);
return (EPERM);
return (SET_ERROR(EPERM));
}
zilog = zfsvfs->z_log;
@ -855,7 +855,7 @@ zfs_write(vnode_t *vp, uio_t *uio, int ioflag, cred_t *cr, caller_context_t *ct)
woff = ioflag & FAPPEND ? zp->z_size : uio->uio_loffset;
if (woff < 0) {
ZFS_EXIT(zfsvfs);
return (EINVAL);
return (SET_ERROR(EINVAL));
}
/*
@ -918,7 +918,7 @@ zfs_write(vnode_t *vp, uio_t *uio, int ioflag, cred_t *cr, caller_context_t *ct)
if (woff >= limit) {
zfs_range_unlock(rl);
ZFS_EXIT(zfsvfs);
return (EFBIG);
return (SET_ERROR(EFBIG));
}
if ((woff + n) > limit || woff > (limit - n))
@ -942,7 +942,7 @@ zfs_write(vnode_t *vp, uio_t *uio, int ioflag, cred_t *cr, caller_context_t *ct)
zfs_owner_overquota(zfsvfs, zp, B_TRUE)) {
if (abuf != NULL)
dmu_return_arcbuf(abuf);
error = EDQUOT;
error = SET_ERROR(EDQUOT);
break;
}
@ -1207,7 +1207,7 @@ zfs_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio)
* Nothing to do if the file has been removed
*/
if (zfs_zget(zfsvfs, object, &zp) != 0)
return (ENOENT);
return (SET_ERROR(ENOENT));
if (zp->z_unlinked) {
/*
* Release the vnode asynchronously as we currently have the
@ -1215,7 +1215,7 @@ zfs_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio)
*/
VN_RELE_ASYNC(ZTOV(zp),
dsl_pool_vnrele_taskq(dmu_objset_pool(os)));
return (ENOENT);
return (SET_ERROR(ENOENT));
}
zgd = (zgd_t *)kmem_zalloc(sizeof (zgd_t), KM_SLEEP);
@ -1233,7 +1233,7 @@ zfs_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio)
zgd->zgd_rl = zfs_range_lock(zp, offset, size, RL_READER);
/* test for truncation needs to be done while range locked */
if (offset >= zp->z_size) {
error = ENOENT;
error = SET_ERROR(ENOENT);
} else {
error = dmu_read(os, object, offset, size, buf,
DMU_READ_NO_PREFETCH);
@ -1260,10 +1260,10 @@ zfs_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio)
}
/* test for truncation needs to be done while range locked */
if (lr->lr_offset >= zp->z_size)
error = ENOENT;
error = SET_ERROR(ENOENT);
#ifdef DEBUG
if (zil_fault_io) {
error = EIO;
error = SET_ERROR(EIO);
zil_fault_io = 0;
}
#endif
@ -1344,7 +1344,7 @@ specvp_check(vnode_t **vpp, cred_t *cr)
svp = specvp(*vpp, (*vpp)->v_rdev, (*vpp)->v_type, cr);
VN_RELE(*vpp);
if (svp == NULL)
error = ENOSYS;
error = SET_ERROR(ENOSYS);
*vpp = svp;
}
return (error);
@ -1388,9 +1388,9 @@ zfs_lookup(vnode_t *dvp, char *nm, vnode_t **vpp, struct componentname *cnp,
if (!(flags & (LOOKUP_XATTR | FIGNORECASE))) {
if (dvp->v_type != VDIR) {
return (ENOTDIR);
return (SET_ERROR(ENOTDIR));
} else if (zdp->z_sa_hdl == NULL) {
return (EIO);
return (SET_ERROR(EIO));
}
if (nm[0] == 0 || (nm[0] == '.' && nm[1] == '\0')) {
@ -1412,7 +1412,7 @@ zfs_lookup(vnode_t *dvp, char *nm, vnode_t **vpp, struct componentname *cnp,
}
if (tvp == DNLC_NO_VNODE) {
VN_RELE(tvp);
return (ENOENT);
return (SET_ERROR(ENOENT));
} else {
*vpp = tvp;
return (specvp_check(vpp, cr));
@ -1435,7 +1435,7 @@ zfs_lookup(vnode_t *dvp, char *nm, vnode_t **vpp, struct componentname *cnp,
*/
if (!(zfsvfs->z_vfs->vfs_flag & VFS_XATTR)) {
ZFS_EXIT(zfsvfs);
return (EINVAL);
return (SET_ERROR(EINVAL));
}
#endif
@ -1445,7 +1445,7 @@ zfs_lookup(vnode_t *dvp, char *nm, vnode_t **vpp, struct componentname *cnp,
*/
if (zdp->z_pflags & ZFS_XATTR) {
ZFS_EXIT(zfsvfs);
return (EINVAL);
return (SET_ERROR(EINVAL));
}
if (error = zfs_get_xattrdir(VTOZ(dvp), vpp, cr, flags)) {
@ -1469,7 +1469,7 @@ zfs_lookup(vnode_t *dvp, char *nm, vnode_t **vpp, struct componentname *cnp,
if (dvp->v_type != VDIR) {
ZFS_EXIT(zfsvfs);
return (ENOTDIR);
return (SET_ERROR(ENOTDIR));
}
/*
@ -1484,7 +1484,7 @@ zfs_lookup(vnode_t *dvp, char *nm, vnode_t **vpp, struct componentname *cnp,
if (zfsvfs->z_utf8 && u8_validate(nm, strlen(nm),
NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
ZFS_EXIT(zfsvfs);
return (EILSEQ);
return (SET_ERROR(EILSEQ));
}
error = zfs_dirlook(zdp, nm, vpp, flags, direntflags, realpnp);
@ -1608,7 +1608,7 @@ zfs_create(vnode_t *dvp, char *name, vattr_t *vap, int excl, int mode,
if (zfsvfs->z_use_fuids == B_FALSE &&
(vsecp || (vap->va_mask & AT_XVATTR) ||
IS_EPHEMERAL(uid) || IS_EPHEMERAL(gid)))
return (EINVAL);
return (SET_ERROR(EINVAL));
ZFS_ENTER(zfsvfs);
ZFS_VERIFY_ZP(dzp);
@ -1618,7 +1618,7 @@ zfs_create(vnode_t *dvp, char *name, vattr_t *vap, int excl, int mode,
if (zfsvfs->z_utf8 && u8_validate(name, strlen(name),
NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
ZFS_EXIT(zfsvfs);
return (EILSEQ);
return (SET_ERROR(EILSEQ));
}
if (vap->va_mask & AT_XVATTR) {
@ -1655,7 +1655,7 @@ zfs_create(vnode_t *dvp, char *name, vattr_t *vap, int excl, int mode,
if (have_acl)
zfs_acl_ids_free(&acl_ids);
if (strcmp(name, "..") == 0)
error = EISDIR;
error = SET_ERROR(EISDIR);
ZFS_EXIT(zfsvfs);
return (error);
}
@ -1683,7 +1683,7 @@ zfs_create(vnode_t *dvp, char *name, vattr_t *vap, int excl, int mode,
(vap->va_type != VREG)) {
if (have_acl)
zfs_acl_ids_free(&acl_ids);
error = EINVAL;
error = SET_ERROR(EINVAL);
goto out;
}
@ -1694,7 +1694,7 @@ zfs_create(vnode_t *dvp, char *name, vattr_t *vap, int excl, int mode,
if (zfs_acl_ids_overquota(zfsvfs, &acl_ids)) {
zfs_acl_ids_free(&acl_ids);
error = EDQUOT;
error = SET_ERROR(EDQUOT);
goto out;
}
@ -1753,14 +1753,14 @@ zfs_create(vnode_t *dvp, char *name, vattr_t *vap, int excl, int mode,
* Can't truncate an existing file if in exclusive mode.
*/
if (excl == EXCL) {
error = EEXIST;
error = SET_ERROR(EEXIST);
goto out;
}
/*
* Can't open a directory for writing.
*/
if ((ZTOV(zp)->v_type == VDIR) && (mode & S_IWRITE)) {
error = EISDIR;
error = SET_ERROR(EISDIR);
goto out;
}
/*
@ -1883,7 +1883,7 @@ zfs_remove(vnode_t *dvp, char *name, cred_t *cr, caller_context_t *ct,
* Need to use rmdir for removing directories.
*/
if (vp->v_type == VDIR) {
error = EPERM;
error = SET_ERROR(EPERM);
goto out;
}
@ -2098,7 +2098,7 @@ zfs_mkdir(vnode_t *dvp, char *dirname, vattr_t *vap, vnode_t **vpp, cred_t *cr,
if (zfsvfs->z_use_fuids == B_FALSE &&
(vsecp || (vap->va_mask & AT_XVATTR) ||
IS_EPHEMERAL(uid) || IS_EPHEMERAL(gid)))
return (EINVAL);
return (SET_ERROR(EINVAL));
ZFS_ENTER(zfsvfs);
ZFS_VERIFY_ZP(dzp);
@ -2106,13 +2106,13 @@ zfs_mkdir(vnode_t *dvp, char *dirname, vattr_t *vap, vnode_t **vpp, cred_t *cr,
if (dzp->z_pflags & ZFS_XATTR) {
ZFS_EXIT(zfsvfs);
return (EINVAL);
return (SET_ERROR(EINVAL));
}
if (zfsvfs->z_utf8 && u8_validate(dirname,
strlen(dirname), NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
ZFS_EXIT(zfsvfs);
return (EILSEQ);
return (SET_ERROR(EILSEQ));
}
if (flags & FIGNORECASE)
zf |= ZCILOOK;
@ -2158,7 +2158,7 @@ zfs_mkdir(vnode_t *dvp, char *dirname, vattr_t *vap, vnode_t **vpp, cred_t *cr,
zfs_acl_ids_free(&acl_ids);
zfs_dirent_unlock(dl);
ZFS_EXIT(zfsvfs);
return (EDQUOT);
return (SET_ERROR(EDQUOT));
}
/*
@ -2284,12 +2284,12 @@ zfs_rmdir(vnode_t *dvp, char *name, vnode_t *cwd, cred_t *cr,
}
if (vp->v_type != VDIR) {
error = ENOTDIR;
error = SET_ERROR(ENOTDIR);
goto out;
}
if (vp == cwd) {
error = EINVAL;
error = SET_ERROR(EINVAL);
goto out;
}
@ -2435,7 +2435,7 @@ zfs_readdir(vnode_t *vp, uio_t *uio, cred_t *cr, int *eofp, int *ncookies, u_lon
*/
if (uio->uio_iov->iov_len <= 0) {
ZFS_EXIT(zfsvfs);
return (EINVAL);
return (SET_ERROR(EINVAL));
}
/*
@ -2549,7 +2549,7 @@ zfs_readdir(vnode_t *vp, uio_t *uio, cred_t *cr, int *eofp, int *ncookies, u_lon
"entry, obj = %lld, offset = %lld\n",
(u_longlong_t)zp->z_id,
(u_longlong_t)offset);
error = ENXIO;
error = SET_ERROR(ENXIO);
goto update;
}
@ -2598,7 +2598,7 @@ zfs_readdir(vnode_t *vp, uio_t *uio, cred_t *cr, int *eofp, int *ncookies, u_lon
* Did we manage to fit anything in the buffer?
*/
if (!outcount) {
error = EINVAL;
error = SET_ERROR(EINVAL);
goto update;
}
break;
@ -2984,7 +2984,7 @@ zfs_setattr(vnode_t *vp, vattr_t *vap, int flags, cred_t *cr,
return (0);
if (mask & AT_NOSET)
return (EINVAL);
return (SET_ERROR(EINVAL));
ZFS_ENTER(zfsvfs);
ZFS_VERIFY_ZP(zp);
@ -3001,17 +3001,17 @@ zfs_setattr(vnode_t *vp, vattr_t *vap, int flags, cred_t *cr,
((mask & AT_GID) && IS_EPHEMERAL(vap->va_gid)) ||
(mask & AT_XVATTR))) {
ZFS_EXIT(zfsvfs);
return (EINVAL);
return (SET_ERROR(EINVAL));
}
if (mask & AT_SIZE && vp->v_type == VDIR) {
ZFS_EXIT(zfsvfs);
return (EISDIR);
return (SET_ERROR(EISDIR));
}
if (mask & AT_SIZE && vp->v_type != VREG && vp->v_type != VFIFO) {
ZFS_EXIT(zfsvfs);
return (EINVAL);
return (SET_ERROR(EINVAL));
}
/*
@ -3029,12 +3029,12 @@ zfs_setattr(vnode_t *vp, vattr_t *vap, int flags, cred_t *cr,
((mask & (AT_SIZE|AT_UID|AT_GID|AT_MTIME|AT_MODE)) ||
((mask & AT_XVATTR) && XVA_ISSET_REQ(xvap, XAT_CREATETIME)))) {
ZFS_EXIT(zfsvfs);
return (EPERM);
return (SET_ERROR(EPERM));
}
if ((mask & AT_SIZE) && (zp->z_pflags & ZFS_READONLY)) {
ZFS_EXIT(zfsvfs);
return (EPERM);
return (SET_ERROR(EPERM));
}
/*
@ -3047,7 +3047,7 @@ zfs_setattr(vnode_t *vp, vattr_t *vap, int flags, cred_t *cr,
if (((mask & AT_ATIME) && TIMESPEC_OVERFLOW(&vap->va_atime)) ||
((mask & AT_MTIME) && TIMESPEC_OVERFLOW(&vap->va_mtime))) {
ZFS_EXIT(zfsvfs);
return (EOVERFLOW);
return (SET_ERROR(EOVERFLOW));
}
}
@ -3058,7 +3058,7 @@ zfs_setattr(vnode_t *vp, vattr_t *vap, int flags, cred_t *cr,
/* Can this be moved to before the top label? */
if (zfsvfs->z_vfs->vfs_flag & VFS_RDONLY) {
ZFS_EXIT(zfsvfs);
return (EROFS);
return (SET_ERROR(EROFS));
}
/*
@ -3216,7 +3216,7 @@ zfs_setattr(vnode_t *vp, vattr_t *vap, int flags, cred_t *cr,
if (XVA_ISSET_REQ(xvap, XAT_REPARSE)) {
mutex_exit(&zp->z_lock);
ZFS_EXIT(zfsvfs);
return (EPERM);
return (SET_ERROR(EPERM));
}
if (need_policy == FALSE &&
@ -3303,7 +3303,7 @@ zfs_setattr(vnode_t *vp, vattr_t *vap, int flags, cred_t *cr,
zfs_fuid_overquota(zfsvfs, B_FALSE, new_uid)) {
if (attrzp)
VN_RELE(ZTOV(attrzp));
err = EDQUOT;
err = SET_ERROR(EDQUOT);
goto out2;
}
}
@ -3315,7 +3315,7 @@ zfs_setattr(vnode_t *vp, vattr_t *vap, int flags, cred_t *cr,
zfs_fuid_overquota(zfsvfs, B_TRUE, new_gid)) {
if (attrzp)
VN_RELE(ZTOV(attrzp));
err = EDQUOT;
err = SET_ERROR(EDQUOT);
goto out2;
}
}
@ -3329,7 +3329,7 @@ zfs_setattr(vnode_t *vp, vattr_t *vap, int flags, cred_t *cr,
if (zp->z_zfsvfs->z_acl_mode == ZFS_ACL_RESTRICTED &&
!(zp->z_pflags & ZFS_ACL_TRIVIAL)) {
err = EPERM;
err = SET_ERROR(EPERM);
goto out;
}
@ -3657,7 +3657,7 @@ zfs_rename_lock(znode_t *szp, znode_t *tdzp, znode_t *sdzp, zfs_zlock_t **zlpp)
*zlpp = zl;
if (oidp == szp->z_id) /* We're a descendant of szp */
return (EINVAL);
return (SET_ERROR(EINVAL));
if (oidp == rootid) /* We've hit the top */
return (0);
@ -3725,7 +3725,7 @@ zfs_rename(vnode_t *sdvp, char *snm, vnode_t *tdvp, char *tnm, cred_t *cr,
if (tdvp->v_vfsp != sdvp->v_vfsp || zfsctl_is_node(tdvp)) {
ZFS_EXIT(zfsvfs);
return (EXDEV);
return (SET_ERROR(EXDEV));
}
tdzp = VTOZ(tdvp);
@ -3733,7 +3733,7 @@ zfs_rename(vnode_t *sdvp, char *snm, vnode_t *tdvp, char *tnm, cred_t *cr,
if (zfsvfs->z_utf8 && u8_validate(tnm,
strlen(tnm), NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
ZFS_EXIT(zfsvfs);
return (EILSEQ);
return (SET_ERROR(EILSEQ));
}
if (flags & FIGNORECASE)
@ -3751,7 +3751,7 @@ zfs_rename(vnode_t *sdvp, char *snm, vnode_t *tdvp, char *tnm, cred_t *cr,
*/
if ((tdzp->z_pflags & ZFS_XATTR) != (sdzp->z_pflags & ZFS_XATTR)) {
ZFS_EXIT(zfsvfs);
return (EINVAL);
return (SET_ERROR(EINVAL));
}
/*
@ -3856,7 +3856,7 @@ zfs_rename(vnode_t *sdvp, char *snm, vnode_t *tdvp, char *tnm, cred_t *cr,
* not the case for FreeBSD, so we check for "." explicitly.
*/
if (strcmp(snm, ".") == 0 || strcmp(snm, "..") == 0)
serr = EINVAL;
serr = SET_ERROR(EINVAL);
ZFS_EXIT(zfsvfs);
return (serr);
}
@ -3868,7 +3868,7 @@ zfs_rename(vnode_t *sdvp, char *snm, vnode_t *tdvp, char *tnm, cred_t *cr,
rw_exit(&sdzp->z_name_lock);
if (strcmp(tnm, "..") == 0)
terr = EINVAL;
terr = SET_ERROR(EINVAL);
ZFS_EXIT(zfsvfs);
return (terr);
}
@ -3901,12 +3901,12 @@ zfs_rename(vnode_t *sdvp, char *snm, vnode_t *tdvp, char *tnm, cred_t *cr,
*/
if (ZTOV(szp)->v_type == VDIR) {
if (ZTOV(tzp)->v_type != VDIR) {
error = ENOTDIR;
error = SET_ERROR(ENOTDIR);
goto out;
}
} else {
if (ZTOV(tzp)->v_type == VDIR) {
error = EISDIR;
error = SET_ERROR(EISDIR);
goto out;
}
}
@ -4091,14 +4091,14 @@ zfs_symlink(vnode_t *dvp, vnode_t **vpp, char *name, vattr_t *vap, char *link,
if (zfsvfs->z_utf8 && u8_validate(name, strlen(name),
NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
ZFS_EXIT(zfsvfs);
return (EILSEQ);
return (SET_ERROR(EILSEQ));
}
if (flags & FIGNORECASE)
zflg |= ZCILOOK;
if (len > MAXPATHLEN) {
ZFS_EXIT(zfsvfs);
return (ENAMETOOLONG);
return (SET_ERROR(ENAMETOOLONG));
}
if ((error = zfs_acl_ids_create(dzp, 0,
@ -4128,7 +4128,7 @@ zfs_symlink(vnode_t *dvp, vnode_t **vpp, char *name, vattr_t *vap, char *link,
zfs_acl_ids_free(&acl_ids);
zfs_dirent_unlock(dl);
ZFS_EXIT(zfsvfs);
return (EDQUOT);
return (SET_ERROR(EDQUOT));
}
tx = dmu_tx_create(zfsvfs->z_os);
fuid_dirtied = zfsvfs->z_fuid_dirty;
@ -4290,12 +4290,12 @@ zfs_link(vnode_t *tdvp, vnode_t *svp, char *name, cred_t *cr,
*/
if (svp->v_type == VDIR) {
ZFS_EXIT(zfsvfs);
return (EPERM);
return (SET_ERROR(EPERM));
}
if (svp->v_vfsp != tdvp->v_vfsp || zfsctl_is_node(svp)) {
ZFS_EXIT(zfsvfs);
return (EXDEV);
return (SET_ERROR(EXDEV));
}
szp = VTOZ(svp);
@ -4310,13 +4310,13 @@ zfs_link(vnode_t *tdvp, vnode_t *svp, char *name, cred_t *cr,
}
if (parent == zfsvfs->z_shares_dir) {
ZFS_EXIT(zfsvfs);
return (EPERM);
return (SET_ERROR(EPERM));
}
if (zfsvfs->z_utf8 && u8_validate(name,
strlen(name), NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
ZFS_EXIT(zfsvfs);
return (EILSEQ);
return (SET_ERROR(EILSEQ));
}
if (flags & FIGNORECASE)
zf |= ZCILOOK;
@ -4329,14 +4329,14 @@ zfs_link(vnode_t *tdvp, vnode_t *svp, char *name, cred_t *cr,
*/
if ((szp->z_pflags & ZFS_XATTR) != (dzp->z_pflags & ZFS_XATTR)) {
ZFS_EXIT(zfsvfs);
return (EINVAL);
return (SET_ERROR(EINVAL));
}
owner = zfs_fuid_map_id(zfsvfs, szp->z_uid, cr, ZFS_OWNER);
if (owner != crgetuid(cr) && secpolicy_basic_link(svp, cr) != 0) {
ZFS_EXIT(zfsvfs);
return (EPERM);
return (SET_ERROR(EPERM));
}
if (error = zfs_zaccess(dzp, ACE_ADD_FILE, 0, B_FALSE, cr)) {
@ -4477,7 +4477,7 @@ zfs_putapage(vnode_t *vp, page_t *pp, u_offset_t *offp,
if (zfs_owner_overquota(zfsvfs, zp, B_FALSE) ||
zfs_owner_overquota(zfsvfs, zp, B_TRUE)) {
err = EDQUOT;
err = SET_ERROR(EDQUOT);
goto out;
}
top:
@ -4729,7 +4729,7 @@ zfs_frlock(vnode_t *vp, int cmd, flock64_t *bfp, int flag, offset_t offset,
*/
if (zp->z_mapcnt > 0 && MANDMODE(zp->z_mode)) {
ZFS_EXIT(zfsvfs);
return (EAGAIN);
return (SET_ERROR(EAGAIN));
}
ZFS_EXIT(zfsvfs);
return (fs_frlock(vp, cmd, bfp, flag, offset, flk_cbp, cr, ct));
@ -4793,7 +4793,7 @@ zfs_fillpage(vnode_t *vp, u_offset_t off, struct seg *seg,
pvn_read_done(pp, B_ERROR);
/* convert checksum errors into IO errors */
if (err == ECKSUM)
err = EIO;
err = SET_ERROR(EIO);
return (err);
}
cur_pp = cur_pp->p_next;
@ -4943,28 +4943,28 @@ zfs_map(vnode_t *vp, offset_t off, struct as *as, caddr_t *addrp,
if ((prot & PROT_WRITE) && (zp->z_pflags &
(ZFS_IMMUTABLE | ZFS_READONLY | ZFS_APPENDONLY))) {
ZFS_EXIT(zfsvfs);
return (EPERM);
return (SET_ERROR(EPERM));
}
if ((prot & (PROT_READ | PROT_EXEC)) &&
(zp->z_pflags & ZFS_AV_QUARANTINED)) {
ZFS_EXIT(zfsvfs);
return (EACCES);
return (SET_ERROR(EACCES));
}
if (vp->v_flag & VNOMAP) {
ZFS_EXIT(zfsvfs);
return (ENOSYS);
return (SET_ERROR(ENOSYS));
}
if (off < 0 || len > MAXOFFSET_T - off) {
ZFS_EXIT(zfsvfs);
return (ENXIO);
return (SET_ERROR(ENXIO));
}
if (vp->v_type != VREG) {
ZFS_EXIT(zfsvfs);
return (ENODEV);
return (SET_ERROR(ENODEV));
}
/*
@ -4972,7 +4972,7 @@ zfs_map(vnode_t *vp, offset_t off, struct as *as, caddr_t *addrp,
*/
if (MANDMODE(zp->z_mode) && vn_has_flocks(vp)) {
ZFS_EXIT(zfsvfs);
return (EAGAIN);
return (SET_ERROR(EAGAIN));
}
as_rangelock(as);
@ -5087,7 +5087,7 @@ zfs_space(vnode_t *vp, int cmd, flock64_t *bfp, int flag,
if (cmd != F_FREESP) {
ZFS_EXIT(zfsvfs);
return (EINVAL);
return (SET_ERROR(EINVAL));
}
if (error = convoff(vp, bfp, 0, offset)) {
@ -5097,7 +5097,7 @@ zfs_space(vnode_t *vp, int cmd, flock64_t *bfp, int flag,
if (bfp->l_len < 0) {
ZFS_EXIT(zfsvfs);
return (EINVAL);
return (SET_ERROR(EINVAL));
}
off = bfp->l_start;
@ -5137,7 +5137,16 @@ zfs_fid(vnode_t *vp, fid_t *fidp, caller_context_t *ct)
gen = (uint32_t)gen64;
size = (zfsvfs->z_parent != zfsvfs) ? LONG_FID_LEN : SHORT_FID_LEN;
#ifdef illumos
if (fidp->fid_len < size) {
fidp->fid_len = size;
ZFS_EXIT(zfsvfs);
return (SET_ERROR(ENOSPC));
}
#else
fidp->fid_len = size;
#endif
zfid = (zfid_short_t *)fidp;
@ -5322,7 +5331,7 @@ zfs_reqzcbuf(vnode_t *vp, enum uio_rw ioflag, xuio_t *xuio, cred_t *cr,
int preamble, postamble;
if (xuio->xu_type != UIOTYPE_ZEROCOPY)
return (EINVAL);
return (SET_ERROR(EINVAL));
ZFS_ENTER(zfsvfs);
ZFS_VERIFY_ZP(zp);
@ -5335,7 +5344,7 @@ zfs_reqzcbuf(vnode_t *vp, enum uio_rw ioflag, xuio_t *xuio, cred_t *cr,
blksz = max_blksz;
if (size < blksz || zp->z_blksz != blksz) {
ZFS_EXIT(zfsvfs);
return (EINVAL);
return (SET_ERROR(EINVAL));
}
/*
* Caller requests buffers for write before knowing where the
@ -5403,7 +5412,7 @@ zfs_reqzcbuf(vnode_t *vp, enum uio_rw ioflag, xuio_t *xuio, cred_t *cr,
/* avoid potential complexity of dealing with it */
if (blksz > max_blksz) {
ZFS_EXIT(zfsvfs);
return (EINVAL);
return (SET_ERROR(EINVAL));
}
maxsize = zp->z_size - uio->uio_loffset;
@ -5412,12 +5421,12 @@ zfs_reqzcbuf(vnode_t *vp, enum uio_rw ioflag, xuio_t *xuio, cred_t *cr,
if (size < blksz || vn_has_cached_data(vp)) {
ZFS_EXIT(zfsvfs);
return (EINVAL);
return (SET_ERROR(EINVAL));
}
break;
default:
ZFS_EXIT(zfsvfs);
return (EINVAL);
return (SET_ERROR(EINVAL));
}
uio->uio_extflg = UIO_XUIO;
@ -5464,13 +5473,13 @@ static int zfs_isdir();
static int
zfs_inval()
{
return (EINVAL);
return (SET_ERROR(EINVAL));
}
static int
zfs_isdir()
{
return (EISDIR);
return (SET_ERROR(EISDIR));
}
/*
* Directory vnode operations template

View File

@ -20,7 +20,7 @@
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012 by Delphix. All rights reserved.
* Copyright (c) 2013 by Delphix. All rights reserved.
*/
/* Portions Copyright 2007 Jeremy Teo */
@ -1173,8 +1173,10 @@ zfs_zget(zfsvfs_t *zfsvfs, uint64_t obj_num, znode_t **zpp)
doi.doi_bonus_size < sizeof (znode_phys_t)))) {
sa_buf_rele(db, NULL);
ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num);
#ifdef __FreeBSD__
getnewvnode_drop_reserve();
return (EINVAL);
#endif
return (SET_ERROR(EINVAL));
}
hdl = dmu_buf_get_user(db);
@ -1193,7 +1195,7 @@ zfs_zget(zfsvfs_t *zfsvfs, uint64_t obj_num, znode_t **zpp)
mutex_enter(&zp->z_lock);
ASSERT3U(zp->z_id, ==, obj_num);
if (zp->z_unlinked) {
err = ENOENT;
err = SET_ERROR(ENOENT);
} else {
vp = ZTOV(zp);
*zpp = zp;
@ -1245,7 +1247,7 @@ zfs_zget(zfsvfs_t *zfsvfs, uint64_t obj_num, znode_t **zpp)
zp = zfs_znode_alloc(zfsvfs, db, doi.doi_data_block_size,
doi.doi_bonus_type, NULL);
if (zp == NULL) {
err = ENOENT;
err = SET_ERROR(ENOENT);
} else {
*zpp = zp;
}
@ -1304,7 +1306,7 @@ zfs_rezget(znode_t *zp)
doi.doi_bonus_size < sizeof (znode_phys_t)))) {
sa_buf_rele(db, NULL);
ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num);
return (EINVAL);
return (SET_ERROR(EINVAL));
}
zfs_znode_sa_init(zfsvfs, zp, db, doi.doi_bonus_type, NULL);
@ -1331,7 +1333,7 @@ zfs_rezget(znode_t *zp)
if (sa_bulk_lookup(zp->z_sa_hdl, bulk, count)) {
zfs_znode_dmu_fini(zp);
ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num);
return (EIO);
return (SET_ERROR(EIO));
}
zp->z_mode = mode;
@ -1339,7 +1341,7 @@ zfs_rezget(znode_t *zp)
if (gen != zp->z_gen) {
zfs_znode_dmu_fini(zp);
ZFS_OBJ_HOLD_EXIT(zfsvfs, obj_num);
return (EIO);
return (SET_ERROR(EIO));
}
/*
@ -2007,7 +2009,7 @@ zfs_grab_sa_handle(objset_t *osp, uint64_t obj, sa_handle_t **hdlp,
doi.doi_bonus_type == DMU_OT_ZNODE &&
doi.doi_bonus_size < sizeof (znode_phys_t)) {
sa_buf_rele(*db, tag);
return (ENOTSUP);
return (SET_ERROR(ENOTSUP));
}
error = sa_handle_get(osp, obj, NULL, SA_HDL_PRIVATE, hdlp);
@ -2076,7 +2078,7 @@ zfs_obj_to_pobj(objset_t *osp, sa_handle_t *hdl, sa_attr_type_t *sa_table,
* Otherwise the parent must be a directory.
*/
if (!*is_xattrdir && !S_ISDIR(parent_mode))
return (EINVAL);
return (SET_ERROR(EINVAL));
*pobjp = parent;

View File

@ -20,7 +20,7 @@
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012 by Delphix. All rights reserved.
* Copyright (c) 2013 by Delphix. All rights reserved.
*/
/* Portions Copyright 2010 Robert Milkowski */
@ -153,7 +153,7 @@ zil_bp_tree_add(zilog_t *zilog, const blkptr_t *bp)
avl_index_t where;
if (avl_find(t, dva, &where) != NULL)
return (EEXIST);
return (SET_ERROR(EEXIST));
zn = kmem_alloc(sizeof (zil_bp_node_t), KM_SLEEP);
zn->zn_dva = *dva;
@ -224,7 +224,7 @@ zil_read_log_block(zilog_t *zilog, const blkptr_t *bp, blkptr_t *nbp, void *dst,
if (bcmp(&cksum, &zilc->zc_next_blk.blk_cksum,
sizeof (cksum)) || BP_IS_HOLE(&zilc->zc_next_blk)) {
error = ECKSUM;
error = SET_ERROR(ECKSUM);
} else {
bcopy(lr, dst, len);
*end = (char *)dst + len;
@ -238,7 +238,7 @@ zil_read_log_block(zilog_t *zilog, const blkptr_t *bp, blkptr_t *nbp, void *dst,
if (bcmp(&cksum, &zilc->zc_next_blk.blk_cksum,
sizeof (cksum)) || BP_IS_HOLE(&zilc->zc_next_blk) ||
(zilc->zc_nused > (size - sizeof (*zilc)))) {
error = ECKSUM;
error = SET_ERROR(ECKSUM);
} else {
bcopy(lr, dst, zilc->zc_nused);
*end = (char *)dst + zilc->zc_nused;
@ -1860,7 +1860,7 @@ zil_suspend(const char *osname, void **cookiep)
if (zh->zh_flags & ZIL_REPLAY_NEEDED) { /* unplayed log */
mutex_exit(&zilog->zl_lock);
dmu_objset_rele(os, suspend_tag);
return (EBUSY);
return (SET_ERROR(EBUSY));
}
/*
@ -2123,6 +2123,6 @@ zil_vdev_offline(const char *osname, void *arg)
error = zil_suspend(osname, NULL);
if (error != 0)
return (EEXIST);
return (SET_ERROR(EEXIST));
return (0);
}

View File

@ -20,7 +20,7 @@
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012 by Delphix. All rights reserved.
* Copyright (c) 2013 by Delphix. All rights reserved.
*/
#include <sys/zfs_context.h>
@ -407,7 +407,7 @@ zio_decompress(zio_t *zio, void *data, uint64_t size)
if (zio->io_error == 0 &&
zio_decompress_data(BP_GET_COMPRESS(zio->io_bp),
zio->io_data, data, zio->io_size, size) != 0)
zio->io_error = EIO;
zio->io_error = SET_ERROR(EIO);
}
/*
@ -2081,7 +2081,7 @@ zio_ddt_collision(zio_t *zio, ddt_t *ddt, ddt_entry_t *dde)
if (arc_buf_size(abuf) != zio->io_orig_size ||
bcmp(abuf->b_data, zio->io_orig_data,
zio->io_orig_size) != 0)
error = EEXIST;
error = SET_ERROR(EEXIST);
VERIFY(arc_buf_remove_ref(abuf, &abuf));
}
@ -2552,7 +2552,7 @@ zio_vdev_io_start(zio_t *zio)
return (ZIO_PIPELINE_STOP);
if (!vdev_accessible(vd, zio)) {
zio->io_error = ENXIO;
zio->io_error = SET_ERROR(ENXIO);
zio_interrupt(zio);
return (ZIO_PIPELINE_STOP);
}
@ -2606,7 +2606,7 @@ zio_vdev_io_done(zio_t *zio)
if (zio->io_error) {
if (!vdev_accessible(vd, zio)) {
zio->io_error = ENXIO;
zio->io_error = SET_ERROR(ENXIO);
} else {
unexpected_error = B_TRUE;
}
@ -2705,7 +2705,7 @@ zio_vdev_io_assess(zio_t *zio)
*/
if (zio->io_error && vd != NULL && vd->vdev_ops->vdev_op_leaf &&
!vdev_accessible(vd, zio))
zio->io_error = ENXIO;
zio->io_error = SET_ERROR(ENXIO);
/*
* If we can't write to an interior vdev (mirror or RAID-Z),

View File

@ -20,6 +20,7 @@
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013 by Delphix. All rights reserved.
*/
#include <sys/zfs_context.h>
@ -201,7 +202,7 @@ zio_checksum_error(zio_t *zio, zio_bad_cksum_t *info)
zio_cksum_t actual_cksum, expected_cksum, verifier;
if (checksum >= ZIO_CHECKSUM_FUNCTIONS || ci->ci_func[0] == NULL)
return (EINVAL);
return (SET_ERROR(EINVAL));
if (ci->ci_eck) {
zio_eck_t *eck;
@ -216,10 +217,10 @@ zio_checksum_error(zio_t *zio, zio_bad_cksum_t *info)
else if (eck->zec_magic == BSWAP_64(ZEC_MAGIC))
nused = BSWAP_64(zilc->zc_nused);
else
return (ECKSUM);
return (SET_ERROR(ECKSUM));
if (nused > size)
return (ECKSUM);
return (SET_ERROR(ECKSUM));
size = P2ROUNDUP_TYPED(nused, ZIL_MIN_BLKSZ, uint64_t);
} else {
@ -261,7 +262,7 @@ zio_checksum_error(zio_t *zio, zio_bad_cksum_t *info)
info->zbc_has_cksum = 1;
if (!ZIO_CHECKSUM_EQUAL(actual_cksum, expected_cksum))
return (ECKSUM);
return (SET_ERROR(ECKSUM));
if (zio_injection_enabled && !zio->io_error &&
(error = zio_handle_fault_injection(zio, ECKSUM)) != 0) {

View File

@ -27,6 +27,10 @@
* Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
*/
/*
* Copyright (c) 2013 by Delphix. All rights reserved.
*/
#include <sys/zfs_context.h>
#include <sys/compress.h>
#include <sys/spa.h>
@ -130,7 +134,7 @@ zio_decompress_data(enum zio_compress c, void *src, void *dst,
zio_compress_info_t *ci = &zio_compress_table[c];
if ((uint_t)c >= ZIO_COMPRESS_FUNCTIONS || ci->ci_decompress == NULL)
return (EINVAL);
return (SET_ERROR(EINVAL));
return (ci->ci_decompress(src, dst, s_len, d_len, ci->ci_level));
}

View File

@ -20,7 +20,7 @@
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012 by Delphix. All rights reserved.
* Copyright (c) 2013 by Delphix. All rights reserved.
*/
/*
@ -276,7 +276,7 @@ zio_handle_device_injection(vdev_t *vd, zio_t *zio, int error)
break;
}
if (handler->zi_record.zi_error == ENXIO) {
ret = EIO;
ret = SET_ERROR(EIO);
break;
}
}
@ -416,7 +416,7 @@ zio_inject_fault(char *name, int flags, int *id, zinject_record_t *record)
* still allowing it to be unloaded.
*/
if ((spa = spa_inject_addref(name)) == NULL)
return (ENOENT);
return (SET_ERROR(ENOENT));
handler = kmem_alloc(sizeof (inject_handler_t), KM_SLEEP);
@ -468,7 +468,7 @@ zio_inject_list_next(int *id, char *name, size_t buflen,
(void) strncpy(name, spa_name(handler->zi_spa), buflen);
ret = 0;
} else {
ret = ENOENT;
ret = SET_ERROR(ENOENT);
}
rw_exit(&inject_lock);
@ -495,7 +495,7 @@ zio_clear_fault(int id)
if (handler == NULL) {
rw_exit(&inject_lock);
return (ENOENT);
return (SET_ERROR(ENOENT));
}
list_remove(&inject_handlers, handler);

View File

@ -23,7 +23,7 @@
*
* Copyright (c) 2006-2010 Pawel Jakub Dawidek <pjd@FreeBSD.org>
* All rights reserved.
* Copyright (c) 2012 by Delphix. All rights reserved.
* Copyright (c) 2013 by Delphix. All rights reserved.
*/
/* Portions Copyright 2010 Robert Milkowski */
@ -189,14 +189,14 @@ int
zvol_check_volsize(uint64_t volsize, uint64_t blocksize)
{
if (volsize == 0)
return (EINVAL);
return (SET_ERROR(EINVAL));
if (volsize % blocksize != 0)
return (EINVAL);
return (SET_ERROR(EINVAL));
#ifdef _ILP32
if (volsize - 1 > SPEC_MAXOFFSET_T)
return (EOVERFLOW);
return (SET_ERROR(EOVERFLOW));
#endif
return (0);
}
@ -207,7 +207,7 @@ zvol_check_volblocksize(uint64_t volblocksize)
if (volblocksize < SPA_MINBLOCKSIZE ||
volblocksize > SPA_MAXBLOCKSIZE ||
!ISP2(volblocksize))
return (EDOM);
return (SET_ERROR(EDOM));
return (0);
}
@ -283,7 +283,7 @@ zvol_map_block(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
/* Abort immediately if we have encountered gang blocks */
if (BP_IS_GANG(bp))
return (EFRAGS);
return (SET_ERROR(EFRAGS));
/*
* See if the block is at the end of the previous extent.
@ -422,7 +422,7 @@ zvol_replay_write(zvol_state_t *zv, lr_write_t *lr, boolean_t byteswap)
static int
zvol_replay_err(zvol_state_t *zv, lr_t *lr, boolean_t byteswap)
{
return (ENOTSUP);
return (SET_ERROR(ENOTSUP));
}
/*
@ -486,7 +486,7 @@ zvol_create_minor(const char *name)
if (zvol_minor_lookup(name) != NULL) {
mutex_exit(&spa_namespace_lock);
return (EEXIST);
return (SET_ERROR(EEXIST));
}
/* lie and say we're read-only */
@ -501,13 +501,13 @@ zvol_create_minor(const char *name)
if ((minor = zfsdev_minor_alloc()) == 0) {
dmu_objset_disown(os, FTAG);
mutex_exit(&spa_namespace_lock);
return (ENXIO);
return (SET_ERROR(ENXIO));
}
if (ddi_soft_state_zalloc(zfsdev_state, minor) != DDI_SUCCESS) {
dmu_objset_disown(os, FTAG);
mutex_exit(&spa_namespace_lock);
return (EAGAIN);
return (SET_ERROR(EAGAIN));
}
(void) ddi_prop_update_string(minor, zfs_dip, ZVOL_PROP_NAME,
(char *)name);
@ -519,7 +519,7 @@ zvol_create_minor(const char *name)
ddi_soft_state_free(zfsdev_state, minor);
dmu_objset_disown(os, FTAG);
mutex_exit(&spa_namespace_lock);
return (EAGAIN);
return (SET_ERROR(EAGAIN));
}
(void) snprintf(blkbuf, sizeof (blkbuf), "%u", minor);
@ -530,7 +530,7 @@ zvol_create_minor(const char *name)
ddi_soft_state_free(zfsdev_state, minor);
dmu_objset_disown(os, FTAG);
mutex_exit(&spa_namespace_lock);
return (EAGAIN);
return (SET_ERROR(EAGAIN));
}
zs = ddi_get_soft_state(zfsdev_state, minor);
@ -604,7 +604,7 @@ zvol_remove_zv(zvol_state_t *zv)
ASSERT(MUTEX_HELD(&spa_namespace_lock));
if (zv->zv_total_opens != 0)
return (EBUSY);
return (SET_ERROR(EBUSY));
ZFS_LOG(1, "ZVOL %s destroyed.", zv->zv_name);
@ -631,7 +631,7 @@ zvol_remove_minor(const char *name)
mutex_enter(&spa_namespace_lock);
if ((zv = zvol_minor_lookup(name)) == NULL) {
mutex_exit(&spa_namespace_lock);
return (ENXIO);
return (SET_ERROR(ENXIO));
}
g_topology_lock();
rc = zvol_remove_zv(zv);
@ -714,7 +714,7 @@ zvol_prealloc(zvol_state_t *zv)
/* Check the space usage before attempting to allocate the space */
dmu_objset_space(os, &refd, &avail, &usedobjs, &availobjs);
if (avail < zv->zv_volsize)
return (ENOSPC);
return (SET_ERROR(ENOSPC));
/* Free old extents if they exist */
zvol_free_extents(zv);
@ -914,7 +914,7 @@ zvol_open(struct g_provider *pp, int flag, int count)
if (zv == NULL) {
if (locked)
mutex_exit(&spa_namespace_lock);
return (ENXIO);
return (SET_ERROR(ENXIO));
}
if (zv->zv_total_opens == 0)
@ -925,17 +925,17 @@ zvol_open(struct g_provider *pp, int flag, int count)
return (err);
}
if ((flag & FWRITE) && (zv->zv_flags & ZVOL_RDONLY)) {
err = EROFS;
err = SET_ERROR(EROFS);
goto out;
}
if (zv->zv_flags & ZVOL_EXCL) {
err = EBUSY;
err = SET_ERROR(EBUSY);
goto out;
}
#ifdef FEXCL
if (flag & FEXCL) {
if (zv->zv_total_opens != 0) {
err = EBUSY;
err = SET_ERROR(EBUSY);
goto out;
}
zv->zv_flags |= ZVOL_EXCL;
@ -973,7 +973,7 @@ zvol_close(struct g_provider *pp, int flag, int count)
if (zv == NULL) {
if (locked)
mutex_exit(&spa_namespace_lock);
return (ENXIO);
return (SET_ERROR(ENXIO));
}
if (zv->zv_flags & ZVOL_EXCL) {
@ -1182,9 +1182,9 @@ zvol_dumpio_vdev(vdev_t *vd, void *addr, uint64_t offset, uint64_t size,
return (numerrors < vd->vdev_children ? 0 : EIO);
if (doread && !vdev_readable(vd))
return (EIO);
return (SET_ERROR(EIO));
else if (!doread && !vdev_writeable(vd))
return (EIO);
return (SET_ERROR(EIO));
dvd = vd->vdev_tsd;
ASSERT3P(dvd, !=, NULL);
@ -1193,7 +1193,7 @@ zvol_dumpio_vdev(vdev_t *vd, void *addr, uint64_t offset, uint64_t size,
if (ddi_in_panic() || isdump) {
ASSERT(!doread);
if (doread)
return (EIO);
return (SET_ERROR(EIO));
return (ldi_dump(dvd->vd_lh, addr, lbtodb(offset),
lbtodb(size)));
} else {
@ -1214,7 +1214,7 @@ zvol_dumpio(zvol_state_t *zv, void *addr, uint64_t offset, uint64_t size,
/* Must be sector aligned, and not stradle a block boundary. */
if (P2PHASE(offset, DEV_BSIZE) || P2PHASE(size, DEV_BSIZE) ||
P2BOUNDARY(offset, size, zv->zv_volblocksize)) {
return (EINVAL);
return (SET_ERROR(EINVAL));
}
ASSERT(size <= zv->zv_volblocksize);
@ -1226,7 +1226,7 @@ zvol_dumpio(zvol_state_t *zv, void *addr, uint64_t offset, uint64_t size,
}
if (ze == NULL)
return (EINVAL);
return (SET_ERROR(EINVAL));
if (!ddi_in_panic())
spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
@ -1308,7 +1308,7 @@ zvol_strategy(struct bio *bp)
if (error) {
/* convert checksum errors into IO errors */
if (error == ECKSUM)
error = EIO;
error = SET_ERROR(EIO);
break;
}
off += size;
@ -1356,10 +1356,10 @@ zvol_dump(dev_t dev, caddr_t addr, daddr_t blkno, int nblocks)
zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
if (zv == NULL)
return (ENXIO);
return (SET_ERROR(ENXIO));
if ((zv->zv_flags & ZVOL_DUMPIFIED) == 0)
return (EINVAL);
return (SET_ERROR(EINVAL));
boff = ldbtob(blkno);
resid = ldbtob(nblocks);
@ -1391,12 +1391,12 @@ zvol_read(dev_t dev, uio_t *uio, cred_t *cr)
zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
if (zv == NULL)
return (ENXIO);
return (SET_ERROR(ENXIO));
volsize = zv->zv_volsize;
if (uio->uio_resid > 0 &&
(uio->uio_loffset < 0 || uio->uio_loffset >= volsize))
return (EIO);
return (SET_ERROR(EIO));
if (zv->zv_flags & ZVOL_DUMPIFIED) {
error = physio(zvol_strategy, NULL, dev, B_READ,
@ -1417,7 +1417,7 @@ zvol_read(dev_t dev, uio_t *uio, cred_t *cr)
if (error) {
/* convert checksum errors into IO errors */
if (error == ECKSUM)
error = EIO;
error = SET_ERROR(EIO);
break;
}
}
@ -1438,12 +1438,12 @@ zvol_write(dev_t dev, uio_t *uio, cred_t *cr)
zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
if (zv == NULL)
return (ENXIO);
return (SET_ERROR(ENXIO));
volsize = zv->zv_volsize;
if (uio->uio_resid > 0 &&
(uio->uio_loffset < 0 || uio->uio_loffset >= volsize))
return (EIO);
return (SET_ERROR(EIO));
if (zv->zv_flags & ZVOL_DUMPIFIED) {
error = physio(zvol_strategy, NULL, dev, B_WRITE,
@ -1495,7 +1495,7 @@ zvol_getefi(void *arg, int flag, uint64_t vs, uint8_t bs)
char *ptr;
if (ddi_copyin(arg, &efi, sizeof (dk_efi_t), flag))
return (EFAULT);
return (SET_ERROR(EFAULT));
ptr = (char *)(uintptr_t)efi.dki_data_64;
length = efi.dki_length;
/*
@ -1506,7 +1506,7 @@ zvol_getefi(void *arg, int flag, uint64_t vs, uint8_t bs)
* PMBR.
*/
if (efi.dki_lba < 1 || efi.dki_lba > 2 || length <= 0)
return (EINVAL);
return (SET_ERROR(EINVAL));
gpe.efi_gpe_StartingLBA = LE_64(34ULL);
gpe.efi_gpe_EndingLBA = LE_64((vs >> bs) - 1);
@ -1531,13 +1531,13 @@ zvol_getefi(void *arg, int flag, uint64_t vs, uint8_t bs)
gpt.efi_gpt_HeaderCRC32 = LE_32(~crc);
if (ddi_copyout(&gpt, ptr, MIN(sizeof (gpt), length),
flag))
return (EFAULT);
return (SET_ERROR(EFAULT));
ptr += sizeof (gpt);
length -= sizeof (gpt);
}
if (length > 0 && ddi_copyout(&gpe, ptr, MIN(sizeof (gpe),
length), flag))
return (EFAULT);
return (SET_ERROR(EFAULT));
return (0);
}
@ -1557,9 +1557,9 @@ zvol_get_volume_params(minor_t minor, uint64_t *blksize,
zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
if (zv == NULL)
return (ENXIO);
return (SET_ERROR(ENXIO));
if (zv->zv_flags & ZVOL_DUMPIFIED)
return (ENXIO);
return (SET_ERROR(ENXIO));
ASSERT(blksize && max_xfer_len && minor_hdl &&
objset_hdl && zil_hdl && rl_hdl && bonus_hdl);
@ -1633,7 +1633,7 @@ zvol_ioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cr, int *rvalp)
if (zv == NULL) {
mutex_exit(&spa_namespace_lock);
return (ENXIO);
return (SET_ERROR(ENXIO));
}
ASSERT(zv->zv_total_opens > 0);
@ -1648,7 +1648,7 @@ zvol_ioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cr, int *rvalp)
dki.dki_maxtransfer = 1 << (SPA_MAXBLOCKSHIFT - zv->zv_min_bs);
mutex_exit(&spa_namespace_lock);
if (ddi_copyout(&dki, (void *)arg, sizeof (dki), flag))
error = EFAULT;
error = SET_ERROR(EFAULT);
return (error);
case DKIOCGMEDIAINFO:
@ -1658,7 +1658,7 @@ zvol_ioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cr, int *rvalp)
dkm.dki_media_type = DK_UNKNOWN;
mutex_exit(&spa_namespace_lock);
if (ddi_copyout(&dkm, (void *)arg, sizeof (dkm), flag))
error = EFAULT;
error = SET_ERROR(EFAULT);
return (error);
case DKIOCGETEFI:
@ -1686,7 +1686,7 @@ zvol_ioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cr, int *rvalp)
int wce = (zv->zv_flags & ZVOL_WCE) ? 1 : 0;
if (ddi_copyout(&wce, (void *)arg, sizeof (int),
flag))
error = EFAULT;
error = SET_ERROR(EFAULT);
break;
}
case DKIOCSETWCE:
@ -1694,7 +1694,7 @@ zvol_ioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cr, int *rvalp)
int wce;
if (ddi_copyin((void *)arg, &wce, sizeof (int),
flag)) {
error = EFAULT;
error = SET_ERROR(EFAULT);
break;
}
if (wce) {
@ -1714,7 +1714,7 @@ zvol_ioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cr, int *rvalp)
* commands using these (like prtvtoc) expect ENOTSUP
* since we're emulating an EFI label
*/
error = ENOTSUP;
error = SET_ERROR(ENOTSUP);
break;
case DKIOCDUMPINIT:
@ -1733,8 +1733,67 @@ zvol_ioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cr, int *rvalp)
zfs_range_unlock(rl);
break;
case DKIOCFREE:
{
dkioc_free_t df;
dmu_tx_t *tx;
if (ddi_copyin((void *)arg, &df, sizeof (df), flag)) {
error = SET_ERROR(EFAULT);
break;
}
/*
* Apply Postel's Law to length-checking. If they overshoot,
* just blank out until the end, if there's a need to blank
* out anything.
*/
if (df.df_start >= zv->zv_volsize)
break; /* No need to do anything... */
if (df.df_start + df.df_length > zv->zv_volsize)
df.df_length = DMU_OBJECT_END;
rl = zfs_range_lock(&zv->zv_znode, df.df_start, df.df_length,
RL_WRITER);
tx = dmu_tx_create(zv->zv_objset);
error = dmu_tx_assign(tx, TXG_WAIT);
if (error != 0) {
dmu_tx_abort(tx);
} else {
zvol_log_truncate(zv, tx, df.df_start,
df.df_length, B_TRUE);
dmu_tx_commit(tx);
error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ,
df.df_start, df.df_length);
}
zfs_range_unlock(rl);
if (error == 0) {
/*
* If the write-cache is disabled or 'sync' property
* is set to 'always' then treat this as a synchronous
* operation (i.e. commit to zil).
*/
if (!(zv->zv_flags & ZVOL_WCE) ||
(zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS))
zil_commit(zv->zv_zilog, ZVOL_OBJ);
/*
* If the caller really wants synchronous writes, and
* can't wait for them, don't return until the write
* is done.
*/
if (df.df_flags & DF_WAIT_SYNC) {
txg_wait_synced(
dmu_objset_pool(zv->zv_objset), 0);
}
}
break;
}
default:
error = ENOTTY;
error = SET_ERROR(ENOTTY);
break;
}
@ -1881,7 +1940,7 @@ zvol_dumpify(zvol_state_t *zv)
objset_t *os = zv->zv_objset;
if (zv->zv_flags & ZVOL_RDONLY)
return (EROFS);
return (SET_ERROR(EROFS));
if (zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ, ZVOL_DUMPSIZE,
8, 1, &dumpsize) != 0 || dumpsize != zv->zv_volsize) {