9328 zap code can take advantage of c99

9329 panic in zap_leaf_lookup() due to concurrent zapification

illumos/illumos-gate@bf26014c55

Reviewed by: Steve Gonczi <steve.gonczi@delphix.com>
Reviewed by: George Wilson <george.wilson@delphix.com>
Reviewed by: Pavel Zakharov <pavel.zakharov@delphix.com>
Reviewed by: Brad Lewis <brad.lewis@delphix.com>
Approved by: Dan McDonald <danmcd@joyent.com>
Author:     Matthew Ahrens <mahrens@delphix.com>
This commit is contained in:
Alexander Motin 2018-08-01 03:03:15 +00:00
parent 82104b77fb
commit 03a8ca5bd7
4 changed files with 258 additions and 350 deletions

View File

@ -20,7 +20,7 @@
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013, 2015 by Delphix. All rights reserved.
* Copyright (c) 2013, 2017 by Delphix. All rights reserved.
* Copyright 2014 HybridCluster. All rights reserved.
*/
@ -204,13 +204,19 @@ dmu_object_zapify(objset_t *mos, uint64_t object, dmu_object_type_t old_type,
}
ASSERT3U(dn->dn_type, ==, old_type);
ASSERT0(dn->dn_maxblkid);
/*
* We must initialize the ZAP data before changing the type,
* so that concurrent calls to *_is_zapified() can determine if
* the object has been completely zapified by checking the type.
*/
mzap_create_impl(mos, object, 0, 0, tx);
dn->dn_next_type[tx->tx_txg & TXG_MASK] = dn->dn_type =
DMU_OTN_ZAP_METADATA;
dnode_setdirty(dn, tx);
dnode_rele(dn, FTAG);
mzap_create_impl(mos, object, 0, 0, tx);
spa_feature_incr(dmu_objset_spa(mos),
SPA_FEATURE_EXTENSIBLE_DATASET, tx);
}

View File

@ -58,9 +58,7 @@ static uint64_t zap_allocate_blocks(zap_t *zap, int nblocks);
void
fzap_byteswap(void *vbuf, size_t size)
{
uint64_t block_type;
block_type = *(uint64_t *)vbuf;
uint64_t block_type = *(uint64_t *)vbuf;
if (block_type == ZBT_LEAF || block_type == BSWAP_64(ZBT_LEAF))
zap_leaf_byteswap(vbuf, size);
@ -73,11 +71,6 @@ fzap_byteswap(void *vbuf, size_t size)
void
fzap_upgrade(zap_t *zap, dmu_tx_t *tx, zap_flags_t flags)
{
dmu_buf_t *db;
zap_leaf_t *l;
int i;
zap_phys_t *zp;
ASSERT(RW_WRITE_HELD(&zap->zap_rwlock));
zap->zap_ismicro = FALSE;
@ -87,7 +80,7 @@ fzap_upgrade(zap_t *zap, dmu_tx_t *tx, zap_flags_t flags)
mutex_init(&zap->zap_f.zap_num_entries_mtx, 0, 0, 0);
zap->zap_f.zap_block_shift = highbit64(zap->zap_dbuf->db_size) - 1;
zp = zap_f_phys(zap);
zap_phys_t *zp = zap_f_phys(zap);
/*
* explicitly zero it since it might be coming from an
* initialized microzap
@ -106,17 +99,18 @@ fzap_upgrade(zap_t *zap, dmu_tx_t *tx, zap_flags_t flags)
zp->zap_flags = flags;
/* block 1 will be the first leaf */
for (i = 0; i < (1<<zp->zap_ptrtbl.zt_shift); i++)
for (int i = 0; i < (1<<zp->zap_ptrtbl.zt_shift); i++)
ZAP_EMBEDDED_PTRTBL_ENT(zap, i) = 1;
/*
* set up block 1 - the first leaf
*/
VERIFY(0 == dmu_buf_hold(zap->zap_objset, zap->zap_object,
dmu_buf_t *db;
VERIFY0(dmu_buf_hold(zap->zap_objset, zap->zap_object,
1<<FZAP_BLOCK_SHIFT(zap), FTAG, &db, DMU_READ_NO_PREFETCH));
dmu_buf_will_dirty(db, tx);
l = kmem_zalloc(sizeof (zap_leaf_t), KM_SLEEP);
zap_leaf_t *l = kmem_zalloc(sizeof (zap_leaf_t), KM_SLEEP);
l->l_dbuf = db;
zap_leaf_init(l, zp->zap_normflags != 0);
@ -146,9 +140,7 @@ zap_table_grow(zap_t *zap, zap_table_phys_t *tbl,
void (*transfer_func)(const uint64_t *src, uint64_t *dst, int n),
dmu_tx_t *tx)
{
uint64_t b, newblk;
dmu_buf_t *db_old, *db_new;
int err;
uint64_t newblk;
int bs = FZAP_BLOCK_SHIFT(zap);
int hepb = 1<<(bs-4);
/* hepb = half the number of entries in a block */
@ -172,21 +164,23 @@ zap_table_grow(zap_t *zap, zap_table_phys_t *tbl,
* Copy the ptrtbl from the old to new location.
*/
b = tbl->zt_blks_copied;
err = dmu_buf_hold(zap->zap_objset, zap->zap_object,
uint64_t b = tbl->zt_blks_copied;
dmu_buf_t *db_old;
int err = dmu_buf_hold(zap->zap_objset, zap->zap_object,
(tbl->zt_blk + b) << bs, FTAG, &db_old, DMU_READ_NO_PREFETCH);
if (err)
if (err != 0)
return (err);
/* first half of entries in old[b] go to new[2*b+0] */
VERIFY(0 == dmu_buf_hold(zap->zap_objset, zap->zap_object,
dmu_buf_t *db_new;
VERIFY0(dmu_buf_hold(zap->zap_objset, zap->zap_object,
(newblk + 2*b+0) << bs, FTAG, &db_new, DMU_READ_NO_PREFETCH));
dmu_buf_will_dirty(db_new, tx);
transfer_func(db_old->db_data, db_new->db_data, hepb);
dmu_buf_rele(db_new, FTAG);
/* second half of entries in old[b] go to new[2*b+1] */
VERIFY(0 == dmu_buf_hold(zap->zap_objset, zap->zap_object,
VERIFY0(dmu_buf_hold(zap->zap_objset, zap->zap_object,
(newblk + 2*b+1) << bs, FTAG, &db_new, DMU_READ_NO_PREFETCH));
dmu_buf_will_dirty(db_new, tx);
transfer_func((uint64_t *)db_old->db_data + hepb,
@ -221,22 +215,20 @@ static int
zap_table_store(zap_t *zap, zap_table_phys_t *tbl, uint64_t idx, uint64_t val,
dmu_tx_t *tx)
{
int err;
uint64_t blk, off;
int bs = FZAP_BLOCK_SHIFT(zap);
dmu_buf_t *db;
ASSERT(RW_LOCK_HELD(&zap->zap_rwlock));
ASSERT(tbl->zt_blk != 0);
dprintf("storing %llx at index %llx\n", val, idx);
blk = idx >> (bs-3);
off = idx & ((1<<(bs-3))-1);
uint64_t blk = idx >> (bs-3);
uint64_t off = idx & ((1<<(bs-3))-1);
err = dmu_buf_hold(zap->zap_objset, zap->zap_object,
dmu_buf_t *db;
int err = dmu_buf_hold(zap->zap_objset, zap->zap_object,
(tbl->zt_blk + blk) << bs, FTAG, &db, DMU_READ_NO_PREFETCH);
if (err)
if (err != 0)
return (err);
dmu_buf_will_dirty(db, tx);
@ -249,7 +241,7 @@ zap_table_store(zap_t *zap, zap_table_phys_t *tbl, uint64_t idx, uint64_t val,
err = dmu_buf_hold(zap->zap_objset, zap->zap_object,
(tbl->zt_nextblk + blk2) << bs, FTAG, &db2,
DMU_READ_NO_PREFETCH);
if (err) {
if (err != 0) {
dmu_buf_rele(db, FTAG);
return (err);
}
@ -268,27 +260,24 @@ zap_table_store(zap_t *zap, zap_table_phys_t *tbl, uint64_t idx, uint64_t val,
static int
zap_table_load(zap_t *zap, zap_table_phys_t *tbl, uint64_t idx, uint64_t *valp)
{
uint64_t blk, off;
int err;
dmu_buf_t *db;
dnode_t *dn;
int bs = FZAP_BLOCK_SHIFT(zap);
ASSERT(RW_LOCK_HELD(&zap->zap_rwlock));
blk = idx >> (bs-3);
off = idx & ((1<<(bs-3))-1);
uint64_t blk = idx >> (bs-3);
uint64_t off = idx & ((1<<(bs-3))-1);
/*
* Note: this is equivalent to dmu_buf_hold(), but we use
* _dnode_enter / _by_dnode because it's faster because we don't
* have to hold the dnode.
*/
dn = dmu_buf_dnode_enter(zap->zap_dbuf);
err = dmu_buf_hold_by_dnode(dn,
dnode_t *dn = dmu_buf_dnode_enter(zap->zap_dbuf);
dmu_buf_t *db;
int err = dmu_buf_hold_by_dnode(dn,
(tbl->zt_blk + blk) << bs, FTAG, &db, DMU_READ_NO_PREFETCH);
dmu_buf_dnode_exit(zap->zap_dbuf);
if (err)
if (err != 0)
return (err);
*valp = ((uint64_t *)db->db_data)[off];
dmu_buf_rele(db, FTAG);
@ -319,11 +308,10 @@ zap_table_load(zap_t *zap, zap_table_phys_t *tbl, uint64_t idx, uint64_t *valp)
static void
zap_ptrtbl_transfer(const uint64_t *src, uint64_t *dst, int n)
{
int i;
for (i = 0; i < n; i++) {
for (int i = 0; i < n; i++) {
uint64_t lb = src[i];
dst[2*i+0] = lb;
dst[2*i+1] = lb;
dst[2 * i + 0] = lb;
dst[2 * i + 1] = lb;
}
}
@ -345,19 +333,16 @@ zap_grow_ptrtbl(zap_t *zap, dmu_tx_t *tx)
* stored in the header block). Give it its own entire
* block, which will double the size of the ptrtbl.
*/
uint64_t newblk;
dmu_buf_t *db_new;
int err;
ASSERT3U(zap_f_phys(zap)->zap_ptrtbl.zt_shift, ==,
ZAP_EMBEDDED_PTRTBL_SHIFT(zap));
ASSERT0(zap_f_phys(zap)->zap_ptrtbl.zt_blk);
newblk = zap_allocate_blocks(zap, 1);
err = dmu_buf_hold(zap->zap_objset, zap->zap_object,
uint64_t newblk = zap_allocate_blocks(zap, 1);
dmu_buf_t *db_new;
int err = dmu_buf_hold(zap->zap_objset, zap->zap_object,
newblk << FZAP_BLOCK_SHIFT(zap), FTAG, &db_new,
DMU_READ_NO_PREFETCH);
if (err)
if (err != 0)
return (err);
dmu_buf_will_dirty(db_new, tx);
zap_ptrtbl_transfer(&ZAP_EMBEDDED_PTRTBL_ENT(zap, 0),
@ -392,9 +377,8 @@ zap_increment_num_entries(zap_t *zap, int delta, dmu_tx_t *tx)
static uint64_t
zap_allocate_blocks(zap_t *zap, int nblocks)
{
uint64_t newblk;
ASSERT(RW_WRITE_HELD(&zap->zap_rwlock));
newblk = zap_f_phys(zap)->zap_freeblk;
uint64_t newblk = zap_f_phys(zap)->zap_freeblk;
zap_f_phys(zap)->zap_freeblk += nblocks;
return (newblk);
}
@ -411,7 +395,6 @@ zap_leaf_evict_sync(void *dbu)
static zap_leaf_t *
zap_create_leaf(zap_t *zap, dmu_tx_t *tx)
{
void *winner;
zap_leaf_t *l = kmem_zalloc(sizeof (zap_leaf_t), KM_SLEEP);
ASSERT(RW_WRITE_HELD(&zap->zap_rwlock));
@ -421,12 +404,11 @@ zap_create_leaf(zap_t *zap, dmu_tx_t *tx)
l->l_blkid = zap_allocate_blocks(zap, 1);
l->l_dbuf = NULL;
VERIFY(0 == dmu_buf_hold(zap->zap_objset, zap->zap_object,
VERIFY0(dmu_buf_hold(zap->zap_objset, zap->zap_object,
l->l_blkid << FZAP_BLOCK_SHIFT(zap), NULL, &l->l_dbuf,
DMU_READ_NO_PREFETCH));
dmu_buf_init_user(&l->l_dbu, zap_leaf_evict_sync, NULL, &l->l_dbuf);
winner = dmu_buf_set_user(l->l_dbuf, &l->l_dbu);
ASSERT(winner == NULL);
VERIFY3P(NULL, ==, dmu_buf_set_user(l->l_dbuf, &l->l_dbu));
dmu_buf_will_dirty(l->l_dbuf, tx);
zap_leaf_init(l, zap->zap_normflags != 0);
@ -460,11 +442,9 @@ zap_put_leaf(zap_leaf_t *l)
static zap_leaf_t *
zap_open_leaf(uint64_t blkid, dmu_buf_t *db)
{
zap_leaf_t *l, *winner;
ASSERT(blkid != 0);
l = kmem_zalloc(sizeof (zap_leaf_t), KM_SLEEP);
zap_leaf_t *l = kmem_zalloc(sizeof (zap_leaf_t), KM_SLEEP);
rw_init(&l->l_rwlock, 0, 0, 0);
rw_enter(&l->l_rwlock, RW_WRITER);
l->l_blkid = blkid;
@ -472,7 +452,7 @@ zap_open_leaf(uint64_t blkid, dmu_buf_t *db)
l->l_dbuf = db;
dmu_buf_init_user(&l->l_dbu, zap_leaf_evict_sync, NULL, &l->l_dbuf);
winner = dmu_buf_set_user(db, &l->l_dbu);
zap_leaf_t *winner = dmu_buf_set_user(db, &l->l_dbu);
rw_exit(&l->l_rwlock);
if (winner != NULL) {
@ -510,17 +490,15 @@ zap_get_leaf_byblk(zap_t *zap, uint64_t blkid, dmu_tx_t *tx, krw_t lt,
zap_leaf_t **lp)
{
dmu_buf_t *db;
zap_leaf_t *l;
int bs = FZAP_BLOCK_SHIFT(zap);
int err;
ASSERT(RW_LOCK_HELD(&zap->zap_rwlock));
int bs = FZAP_BLOCK_SHIFT(zap);
dnode_t *dn = dmu_buf_dnode_enter(zap->zap_dbuf);
err = dmu_buf_hold_by_dnode(dn,
int err = dmu_buf_hold_by_dnode(dn,
blkid << bs, NULL, &db, DMU_READ_NO_PREFETCH);
dmu_buf_dnode_exit(zap->zap_dbuf);
if (err)
if (err != 0)
return (err);
ASSERT3U(db->db_object, ==, zap->zap_object);
@ -528,7 +506,7 @@ zap_get_leaf_byblk(zap_t *zap, uint64_t blkid, dmu_tx_t *tx, krw_t lt,
ASSERT3U(db->db_size, ==, 1 << bs);
ASSERT(blkid != 0);
l = dmu_buf_get_user(db);
zap_leaf_t *l = dmu_buf_get_user(db);
if (l == NULL)
l = zap_open_leaf(blkid, db);
@ -583,8 +561,7 @@ zap_set_idx_to_blk(zap_t *zap, uint64_t idx, uint64_t blk, dmu_tx_t *tx)
static int
zap_deref_leaf(zap_t *zap, uint64_t h, dmu_tx_t *tx, krw_t lt, zap_leaf_t **lp)
{
uint64_t idx, blk;
int err;
uint64_t blk;
ASSERT(zap->zap_dbuf == NULL ||
zap_f_phys(zap) == zap->zap_dbuf->db_data);
@ -596,8 +573,8 @@ zap_deref_leaf(zap_t *zap, uint64_t h, dmu_tx_t *tx, krw_t lt, zap_leaf_t **lp)
return (SET_ERROR(EIO));
}
idx = ZAP_HASH_IDX(h, zap_f_phys(zap)->zap_ptrtbl.zt_shift);
err = zap_idx_to_blk(zap, idx, &blk);
uint64_t idx = ZAP_HASH_IDX(h, zap_f_phys(zap)->zap_ptrtbl.zt_shift);
int err = zap_idx_to_blk(zap, idx, &blk);
if (err != 0)
return (err);
err = zap_get_leaf_byblk(zap, blk, tx, lt, lp);
@ -614,9 +591,7 @@ zap_expand_leaf(zap_name_t *zn, zap_leaf_t *l,
{
zap_t *zap = zn->zn_zap;
uint64_t hash = zn->zn_hash;
zap_leaf_t *nl;
int prefix_diff, i, err;
uint64_t sibling;
int err;
int old_prefix_len = zap_leaf_phys(l)->l_hdr.lh_prefix_len;
ASSERT3U(old_prefix_len, <=, zap_f_phys(zap)->zap_ptrtbl.zt_shift);
@ -636,19 +611,19 @@ zap_expand_leaf(zap_name_t *zn, zap_leaf_t *l,
err = zap_lockdir(os, object, tx, RW_WRITER,
FALSE, FALSE, tag, &zn->zn_zap);
zap = zn->zn_zap;
if (err)
if (err != 0)
return (err);
ASSERT(!zap->zap_ismicro);
while (old_prefix_len ==
zap_f_phys(zap)->zap_ptrtbl.zt_shift) {
err = zap_grow_ptrtbl(zap, tx);
if (err)
if (err != 0)
return (err);
}
err = zap_deref_leaf(zap, hash, tx, RW_WRITER, &l);
if (err)
if (err != 0)
return (err);
if (zap_leaf_phys(l)->l_hdr.lh_prefix_len != old_prefix_len) {
@ -662,25 +637,26 @@ zap_expand_leaf(zap_name_t *zn, zap_leaf_t *l,
ASSERT3U(ZAP_HASH_IDX(hash, old_prefix_len), ==,
zap_leaf_phys(l)->l_hdr.lh_prefix);
prefix_diff = zap_f_phys(zap)->zap_ptrtbl.zt_shift -
int prefix_diff = zap_f_phys(zap)->zap_ptrtbl.zt_shift -
(old_prefix_len + 1);
sibling = (ZAP_HASH_IDX(hash, old_prefix_len + 1) | 1) << prefix_diff;
uint64_t sibling =
(ZAP_HASH_IDX(hash, old_prefix_len + 1) | 1) << prefix_diff;
/* check for i/o errors before doing zap_leaf_split */
for (i = 0; i < (1ULL<<prefix_diff); i++) {
for (int i = 0; i < (1ULL << prefix_diff); i++) {
uint64_t blk;
err = zap_idx_to_blk(zap, sibling+i, &blk);
if (err)
err = zap_idx_to_blk(zap, sibling + i, &blk);
if (err != 0)
return (err);
ASSERT3U(blk, ==, l->l_blkid);
}
nl = zap_create_leaf(zap, tx);
zap_leaf_t *nl = zap_create_leaf(zap, tx);
zap_leaf_split(l, nl, zap->zap_normflags != 0);
/* set sibling pointers */
for (i = 0; i < (1ULL << prefix_diff); i++) {
err = zap_set_idx_to_blk(zap, sibling+i, nl->l_blkid, tx);
for (int i = 0; i < (1ULL << prefix_diff); i++) {
err = zap_set_idx_to_blk(zap, sibling + i, nl->l_blkid, tx);
ASSERT0(err); /* we checked for i/o errors above */
}
@ -708,8 +684,6 @@ zap_put_leaf_maybe_grow_ptrtbl(zap_name_t *zn, zap_leaf_t *l,
zap_put_leaf(l);
if (leaffull || zap_f_phys(zap)->zap_ptrtbl.zt_nextblk) {
int err;
/*
* We are in the middle of growing the pointer table, or
* this leaf will soon make us grow it.
@ -719,10 +693,10 @@ zap_put_leaf_maybe_grow_ptrtbl(zap_name_t *zn, zap_leaf_t *l,
uint64_t zapobj = zap->zap_object;
zap_unlockdir(zap, tag);
err = zap_lockdir(os, zapobj, tx,
int err = zap_lockdir(os, zapobj, tx,
RW_WRITER, FALSE, FALSE, tag, &zn->zn_zap);
zap = zn->zn_zap;
if (err)
if (err != 0)
return;
}
@ -763,9 +737,8 @@ fzap_checksize(uint64_t integer_size, uint64_t num_integers)
static int
fzap_check(zap_name_t *zn, uint64_t integer_size, uint64_t num_integers)
{
int err;
if ((err = fzap_checkname(zn)) != 0)
int err = fzap_checkname(zn);
if (err != 0)
return (err);
return (fzap_checksize(integer_size, num_integers));
}
@ -779,10 +752,10 @@ fzap_lookup(zap_name_t *zn,
char *realname, int rn_len, boolean_t *ncp)
{
zap_leaf_t *l;
int err;
zap_entry_handle_t zeh;
if ((err = fzap_checkname(zn)) != 0)
int err = fzap_checkname(zn);
if (err != 0)
return (err);
err = zap_deref_leaf(zn->zn_zap, zn->zn_hash, NULL, RW_READER, &l);
@ -870,7 +843,8 @@ fzap_update(zap_name_t *zn,
void *tag, dmu_tx_t *tx)
{
zap_leaf_t *l;
int err, create;
int err;
boolean_t create;
zap_entry_handle_t zeh;
zap_t *zap = zn->zn_zap;
@ -923,9 +897,9 @@ fzap_length(zap_name_t *zn,
if (err != 0)
goto out;
if (integer_size)
if (integer_size != 0)
*integer_size = zeh.zeh_integer_size;
if (num_integers)
if (num_integers != 0)
*num_integers = zeh.zeh_num_integers;
out:
zap_put_leaf(l);
@ -954,15 +928,14 @@ fzap_remove(zap_name_t *zn, dmu_tx_t *tx)
void
fzap_prefetch(zap_name_t *zn)
{
uint64_t idx, blk;
uint64_t blk;
zap_t *zap = zn->zn_zap;
int bs;
idx = ZAP_HASH_IDX(zn->zn_hash,
uint64_t idx = ZAP_HASH_IDX(zn->zn_hash,
zap_f_phys(zap)->zap_ptrtbl.zt_shift);
if (zap_idx_to_blk(zap, idx, &blk) != 0)
return;
bs = FZAP_BLOCK_SHIFT(zap);
int bs = FZAP_BLOCK_SHIFT(zap);
dmu_prefetch(zap->zap_objset, zap->zap_object, 0, blk << bs, 1 << bs,
ZIO_PRIORITY_SYNC_READ);
}
@ -975,9 +948,8 @@ uint64_t
zap_create_link(objset_t *os, dmu_object_type_t ot, uint64_t parent_obj,
const char *name, dmu_tx_t *tx)
{
uint64_t new_obj;
VERIFY((new_obj = zap_create(os, ot, DMU_OT_NONE, 0, tx)) > 0);
uint64_t new_obj = zap_create(os, ot, DMU_OT_NONE, 0, tx);
VERIFY(new_obj != 0);
VERIFY0(zap_add(os, parent_obj, name, sizeof (uint64_t), 1, &new_obj,
tx));
@ -989,13 +961,12 @@ zap_value_search(objset_t *os, uint64_t zapobj, uint64_t value, uint64_t mask,
char *name)
{
zap_cursor_t zc;
zap_attribute_t *za;
int err;
if (mask == 0)
mask = -1ULL;
za = kmem_alloc(sizeof (zap_attribute_t), KM_SLEEP);
zap_attribute_t *za = kmem_alloc(sizeof (*za), KM_SLEEP);
for (zap_cursor_init(&zc, os, zapobj);
(err = zap_cursor_retrieve(&zc, za)) == 0;
zap_cursor_advance(&zc)) {
@ -1005,7 +976,7 @@ zap_value_search(objset_t *os, uint64_t zapobj, uint64_t value, uint64_t mask,
}
}
zap_cursor_fini(&zc);
kmem_free(za, sizeof (zap_attribute_t));
kmem_free(za, sizeof (*za));
return (err);
}
@ -1013,23 +984,23 @@ int
zap_join(objset_t *os, uint64_t fromobj, uint64_t intoobj, dmu_tx_t *tx)
{
zap_cursor_t zc;
zap_attribute_t za;
int err;
int err = 0;
err = 0;
zap_attribute_t *za = kmem_alloc(sizeof (*za), KM_SLEEP);
for (zap_cursor_init(&zc, os, fromobj);
zap_cursor_retrieve(&zc, &za) == 0;
zap_cursor_retrieve(&zc, za) == 0;
(void) zap_cursor_advance(&zc)) {
if (za.za_integer_length != 8 || za.za_num_integers != 1) {
if (za->za_integer_length != 8 || za->za_num_integers != 1) {
err = SET_ERROR(EINVAL);
break;
}
err = zap_add(os, intoobj, za.za_name,
8, 1, &za.za_first_integer, tx);
if (err)
err = zap_add(os, intoobj, za->za_name,
8, 1, &za->za_first_integer, tx);
if (err != 0)
break;
}
zap_cursor_fini(&zc);
kmem_free(za, sizeof (*za));
return (err);
}
@ -1038,23 +1009,23 @@ zap_join_key(objset_t *os, uint64_t fromobj, uint64_t intoobj,
uint64_t value, dmu_tx_t *tx)
{
zap_cursor_t zc;
zap_attribute_t za;
int err;
int err = 0;
err = 0;
zap_attribute_t *za = kmem_alloc(sizeof (*za), KM_SLEEP);
for (zap_cursor_init(&zc, os, fromobj);
zap_cursor_retrieve(&zc, &za) == 0;
zap_cursor_retrieve(&zc, za) == 0;
(void) zap_cursor_advance(&zc)) {
if (za.za_integer_length != 8 || za.za_num_integers != 1) {
if (za->za_integer_length != 8 || za->za_num_integers != 1) {
err = SET_ERROR(EINVAL);
break;
}
err = zap_add(os, intoobj, za.za_name,
err = zap_add(os, intoobj, za->za_name,
8, 1, &value, tx);
if (err)
if (err != 0)
break;
}
zap_cursor_fini(&zc);
kmem_free(za, sizeof (*za));
return (err);
}
@ -1063,29 +1034,29 @@ zap_join_increment(objset_t *os, uint64_t fromobj, uint64_t intoobj,
dmu_tx_t *tx)
{
zap_cursor_t zc;
zap_attribute_t za;
int err;
int err = 0;
err = 0;
zap_attribute_t *za = kmem_alloc(sizeof (*za), KM_SLEEP);
for (zap_cursor_init(&zc, os, fromobj);
zap_cursor_retrieve(&zc, &za) == 0;
zap_cursor_retrieve(&zc, za) == 0;
(void) zap_cursor_advance(&zc)) {
uint64_t delta = 0;
if (za.za_integer_length != 8 || za.za_num_integers != 1) {
if (za->za_integer_length != 8 || za->za_num_integers != 1) {
err = SET_ERROR(EINVAL);
break;
}
err = zap_lookup(os, intoobj, za.za_name, 8, 1, &delta);
err = zap_lookup(os, intoobj, za->za_name, 8, 1, &delta);
if (err != 0 && err != ENOENT)
break;
delta += za.za_first_integer;
err = zap_update(os, intoobj, za.za_name, 8, 1, &delta, tx);
if (err)
delta += za->za_first_integer;
err = zap_update(os, intoobj, za->za_name, 8, 1, &delta, tx);
if (err != 0)
break;
}
zap_cursor_fini(&zc);
kmem_free(za, sizeof (*za));
return (err);
}
@ -1150,12 +1121,11 @@ zap_increment(objset_t *os, uint64_t obj, const char *name, int64_t delta,
dmu_tx_t *tx)
{
uint64_t value = 0;
int err;
if (delta == 0)
return (0);
err = zap_lookup(os, obj, name, 8, 1, &value);
int err = zap_lookup(os, obj, name, 8, 1, &value);
if (err != 0 && err != ENOENT)
return (err);
value += delta;
@ -1253,7 +1223,6 @@ fzap_cursor_retrieve(zap_t *zap, zap_cursor_t *zc, zap_attribute_t *za)
static void
zap_stats_ptrtbl(zap_t *zap, uint64_t *tbl, int len, zap_stats_t *zs)
{
int i, err;
uint64_t lastblk = 0;
/*
@ -1261,14 +1230,14 @@ zap_stats_ptrtbl(zap_t *zap, uint64_t *tbl, int len, zap_stats_t *zs)
* can hold, then it'll be accounted for more than once, since
* we won't have lastblk.
*/
for (i = 0; i < len; i++) {
for (int i = 0; i < len; i++) {
zap_leaf_t *l;
if (tbl[i] == lastblk)
continue;
lastblk = tbl[i];
err = zap_get_leaf_byblk(zap, tbl[i], NULL, RW_READER, &l);
int err = zap_get_leaf_byblk(zap, tbl[i], NULL, RW_READER, &l);
if (err == 0) {
zap_leaf_stats(zap, l, zs);
zap_put_leaf(l);
@ -1308,14 +1277,12 @@ fzap_get_stats(zap_t *zap, zap_stats_t *zs)
zap_stats_ptrtbl(zap, &ZAP_EMBEDDED_PTRTBL_ENT(zap, 0),
1 << ZAP_EMBEDDED_PTRTBL_SHIFT(zap), zs);
} else {
int b;
dmu_prefetch(zap->zap_objset, zap->zap_object, 0,
zap_f_phys(zap)->zap_ptrtbl.zt_blk << bs,
zap_f_phys(zap)->zap_ptrtbl.zt_numblks << bs,
ZIO_PRIORITY_SYNC_READ);
for (b = 0; b < zap_f_phys(zap)->zap_ptrtbl.zt_numblks;
for (int b = 0; b < zap_f_phys(zap)->zap_ptrtbl.zt_numblks;
b++) {
dmu_buf_t *db;
int err;

View File

@ -21,7 +21,7 @@
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013, 2015 by Delphix. All rights reserved.
* Copyright (c) 2013, 2016 by Delphix. All rights reserved.
* Copyright 2017 Nexenta Systems, Inc.
*/
@ -107,7 +107,6 @@ ldv(int len, const void *addr)
void
zap_leaf_byteswap(zap_leaf_phys_t *buf, int size)
{
int i;
zap_leaf_t l;
dmu_buf_t l_dbuf;
@ -123,10 +122,10 @@ zap_leaf_byteswap(zap_leaf_phys_t *buf, int size)
buf->l_hdr.lh_prefix_len = BSWAP_16(buf->l_hdr.lh_prefix_len);
buf->l_hdr.lh_freelist = BSWAP_16(buf->l_hdr.lh_freelist);
for (i = 0; i < ZAP_LEAF_HASH_NUMENTRIES(&l); i++)
for (int i = 0; i < ZAP_LEAF_HASH_NUMENTRIES(&l); i++)
buf->l_hash[i] = BSWAP_16(buf->l_hash[i]);
for (i = 0; i < ZAP_LEAF_NUMCHUNKS(&l); i++) {
for (int i = 0; i < ZAP_LEAF_NUMCHUNKS(&l); i++) {
zap_leaf_chunk_t *lc = &ZAP_LEAF_CHUNK(&l, i);
struct zap_leaf_entry *le;
@ -162,14 +161,12 @@ zap_leaf_byteswap(zap_leaf_phys_t *buf, int size)
void
zap_leaf_init(zap_leaf_t *l, boolean_t sort)
{
int i;
l->l_bs = highbit64(l->l_dbuf->db_size) - 1;
zap_memset(&zap_leaf_phys(l)->l_hdr, 0,
sizeof (struct zap_leaf_header));
zap_memset(zap_leaf_phys(l)->l_hash, CHAIN_END,
2*ZAP_LEAF_HASH_NUMENTRIES(l));
for (i = 0; i < ZAP_LEAF_NUMCHUNKS(l); i++) {
for (int i = 0; i < ZAP_LEAF_NUMCHUNKS(l); i++) {
ZAP_LEAF_CHUNK(l, i).l_free.lf_type = ZAP_CHUNK_FREE;
ZAP_LEAF_CHUNK(l, i).l_free.lf_next = i+1;
}
@ -188,11 +185,9 @@ zap_leaf_init(zap_leaf_t *l, boolean_t sort)
static uint16_t
zap_leaf_chunk_alloc(zap_leaf_t *l)
{
int chunk;
ASSERT(zap_leaf_phys(l)->l_hdr.lh_nfree > 0);
chunk = zap_leaf_phys(l)->l_hdr.lh_freelist;
int chunk = zap_leaf_phys(l)->l_hdr.lh_freelist;
ASSERT3U(chunk, <, ZAP_LEAF_NUMCHUNKS(l));
ASSERT3U(ZAP_LEAF_CHUNK(l, chunk).l_free.lf_type, ==, ZAP_CHUNK_FREE);
@ -232,7 +227,7 @@ zap_leaf_array_create(zap_leaf_t *l, const char *buf,
uint16_t *chunkp = &chunk_head;
int byten = 0;
uint64_t value = 0;
int shift = (integer_size-1)*8;
int shift = (integer_size - 1) * 8;
int len = num_integers;
ASSERT3U(num_integers * integer_size, <, MAX_ARRAY_BYTES);
@ -240,10 +235,9 @@ zap_leaf_array_create(zap_leaf_t *l, const char *buf,
while (len > 0) {
uint16_t chunk = zap_leaf_chunk_alloc(l);
struct zap_leaf_array *la = &ZAP_LEAF_CHUNK(l, chunk).l_array;
int i;
la->la_type = ZAP_CHUNK_ARRAY;
for (i = 0; i < ZAP_LEAF_ARRAY_BYTES; i++) {
for (int i = 0; i < ZAP_LEAF_ARRAY_BYTES; i++) {
if (byten == 0)
value = ldv(integer_size, buf);
la->la_array[i] = value >> shift;
@ -321,10 +315,9 @@ zap_leaf_array_read(zap_leaf_t *l, uint16_t chunk,
while (len > 0) {
struct zap_leaf_array *la = &ZAP_LEAF_CHUNK(l, chunk).l_array;
int i;
ASSERT3U(chunk, <, ZAP_LEAF_NUMCHUNKS(l));
for (i = 0; i < ZAP_LEAF_ARRAY_BYTES && len > 0; i++) {
for (int i = 0; i < ZAP_LEAF_ARRAY_BYTES && len > 0; i++) {
value = (value << 8) | la->la_array[i];
byten++;
if (byten == array_int_len) {
@ -347,16 +340,13 @@ zap_leaf_array_match(zap_leaf_t *l, zap_name_t *zn,
int bseen = 0;
if (zap_getflags(zn->zn_zap) & ZAP_FLAG_UINT64_KEY) {
uint64_t *thiskey;
boolean_t match;
uint64_t *thiskey =
kmem_alloc(array_numints * sizeof (*thiskey), KM_SLEEP);
ASSERT(zn->zn_key_intlen == sizeof (*thiskey));
thiskey = kmem_alloc(array_numints * sizeof (*thiskey),
KM_SLEEP);
zap_leaf_array_read(l, chunk, sizeof (*thiskey), array_numints,
sizeof (*thiskey), array_numints, thiskey);
match = bcmp(thiskey, zn->zn_key_orig,
boolean_t match = bcmp(thiskey, zn->zn_key_orig,
array_numints * sizeof (*thiskey)) == 0;
kmem_free(thiskey, array_numints * sizeof (*thiskey));
return (match);
@ -365,11 +355,10 @@ zap_leaf_array_match(zap_leaf_t *l, zap_name_t *zn,
ASSERT(zn->zn_key_intlen == 1);
if (zn->zn_matchtype & MT_NORMALIZE) {
char *thisname = kmem_alloc(array_numints, KM_SLEEP);
boolean_t match;
zap_leaf_array_read(l, chunk, sizeof (char), array_numints,
sizeof (char), array_numints, thisname);
match = zap_match(zn, thisname);
boolean_t match = zap_match(zn, thisname);
kmem_free(thisname, array_numints);
return (match);
}
@ -400,12 +389,11 @@ zap_leaf_array_match(zap_leaf_t *l, zap_name_t *zn,
int
zap_leaf_lookup(zap_leaf_t *l, zap_name_t *zn, zap_entry_handle_t *zeh)
{
uint16_t *chunkp;
struct zap_leaf_entry *le;
ASSERT3U(zap_leaf_phys(l)->l_hdr.lh_magic, ==, ZAP_LEAF_MAGIC);
for (chunkp = LEAF_HASH_ENTPTR(l, zn->zn_hash);
for (uint16_t *chunkp = LEAF_HASH_ENTPTR(l, zn->zn_hash);
*chunkp != CHAIN_END; chunkp = &le->le_next) {
uint16_t chunk = *chunkp;
le = ZAP_LEAF_ENTRY(l, chunk);
@ -446,17 +434,15 @@ int
zap_leaf_lookup_closest(zap_leaf_t *l,
uint64_t h, uint32_t cd, zap_entry_handle_t *zeh)
{
uint16_t chunk;
uint64_t besth = -1ULL;
uint32_t bestcd = -1U;
uint16_t bestlh = ZAP_LEAF_HASH_NUMENTRIES(l)-1;
uint16_t lh;
struct zap_leaf_entry *le;
ASSERT3U(zap_leaf_phys(l)->l_hdr.lh_magic, ==, ZAP_LEAF_MAGIC);
for (lh = LEAF_HASH(l, h); lh <= bestlh; lh++) {
for (chunk = zap_leaf_phys(l)->l_hash[lh];
for (uint16_t lh = LEAF_HASH(l, h); lh <= bestlh; lh++) {
for (uint16_t chunk = zap_leaf_phys(l)->l_hash[lh];
chunk != CHAIN_END; chunk = le->le_next) {
le = ZAP_LEAF_ENTRY(l, chunk);
@ -529,11 +515,10 @@ int
zap_entry_update(zap_entry_handle_t *zeh,
uint8_t integer_size, uint64_t num_integers, const void *buf)
{
int delta_chunks;
zap_leaf_t *l = zeh->zeh_leaf;
struct zap_leaf_entry *le = ZAP_LEAF_ENTRY(l, *zeh->zeh_chunkp);
delta_chunks = ZAP_LEAF_ARRAY_NCHUNKS(num_integers * integer_size) -
int delta_chunks = ZAP_LEAF_ARRAY_NCHUNKS(num_integers * integer_size) -
ZAP_LEAF_ARRAY_NCHUNKS(le->le_value_numints * le->le_value_intlen);
if ((int)zap_leaf_phys(l)->l_hdr.lh_nfree < delta_chunks)
@ -550,14 +535,12 @@ zap_entry_update(zap_entry_handle_t *zeh,
void
zap_entry_remove(zap_entry_handle_t *zeh)
{
uint16_t entry_chunk;
struct zap_leaf_entry *le;
zap_leaf_t *l = zeh->zeh_leaf;
ASSERT3P(zeh->zeh_chunkp, !=, &zeh->zeh_fakechunk);
entry_chunk = *zeh->zeh_chunkp;
le = ZAP_LEAF_ENTRY(l, entry_chunk);
uint16_t entry_chunk = *zeh->zeh_chunkp;
struct zap_leaf_entry *le = ZAP_LEAF_ENTRY(l, entry_chunk);
ASSERT3U(le->le_type, ==, ZAP_CHUNK_ENTRY);
zap_leaf_array_free(l, &le->le_name_chunk);
@ -575,15 +558,12 @@ zap_entry_create(zap_leaf_t *l, zap_name_t *zn, uint32_t cd,
zap_entry_handle_t *zeh)
{
uint16_t chunk;
uint16_t *chunkp;
struct zap_leaf_entry *le;
uint64_t valuelen;
int numchunks;
uint64_t h = zn->zn_hash;
valuelen = integer_size * num_integers;
uint64_t valuelen = integer_size * num_integers;
numchunks = 1 + ZAP_LEAF_ARRAY_NCHUNKS(zn->zn_key_orig_numints *
int numchunks = 1 + ZAP_LEAF_ARRAY_NCHUNKS(zn->zn_key_orig_numints *
zn->zn_key_intlen) + ZAP_LEAF_ARRAY_NCHUNKS(valuelen);
if (numchunks > ZAP_LEAF_NUMCHUNKS(l))
return (E2BIG);
@ -645,7 +625,7 @@ zap_entry_create(zap_leaf_t *l, zap_name_t *zn, uint32_t cd,
/* link it into the hash chain */
/* XXX if we did the search above, we could just use that */
chunkp = zap_leaf_rehash_entry(l, chunk);
uint16_t *chunkp = zap_leaf_rehash_entry(l, chunk);
zap_leaf_phys(l)->l_hdr.lh_nentries++;
@ -673,14 +653,13 @@ boolean_t
zap_entry_normalization_conflict(zap_entry_handle_t *zeh, zap_name_t *zn,
const char *name, zap_t *zap)
{
uint64_t chunk;
struct zap_leaf_entry *le;
boolean_t allocdzn = B_FALSE;
if (zap->zap_normflags == 0)
return (B_FALSE);
for (chunk = *LEAF_HASH_ENTPTR(zeh->zeh_leaf, zeh->zeh_hash);
for (uint16_t chunk = *LEAF_HASH_ENTPTR(zeh->zeh_leaf, zeh->zeh_hash);
chunk != CHAIN_END; chunk = le->le_next) {
le = ZAP_LEAF_ENTRY(zeh->zeh_leaf, chunk);
if (le->le_hash != zeh->zeh_hash)
@ -763,14 +742,11 @@ zap_leaf_transfer_array(zap_leaf_t *l, uint16_t chunk, zap_leaf_t *nl)
static void
zap_leaf_transfer_entry(zap_leaf_t *l, int entry, zap_leaf_t *nl)
{
struct zap_leaf_entry *le, *nle;
uint16_t chunk;
le = ZAP_LEAF_ENTRY(l, entry);
struct zap_leaf_entry *le = ZAP_LEAF_ENTRY(l, entry);
ASSERT3U(le->le_type, ==, ZAP_CHUNK_ENTRY);
chunk = zap_leaf_chunk_alloc(nl);
nle = ZAP_LEAF_ENTRY(nl, chunk);
uint16_t chunk = zap_leaf_chunk_alloc(nl);
struct zap_leaf_entry *nle = ZAP_LEAF_ENTRY(nl, chunk);
*nle = *le; /* structure assignment */
(void) zap_leaf_rehash_entry(nl, chunk);
@ -791,7 +767,6 @@ zap_leaf_transfer_entry(zap_leaf_t *l, int entry, zap_leaf_t *nl)
void
zap_leaf_split(zap_leaf_t *l, zap_leaf_t *nl, boolean_t sort)
{
int i;
int bit = 64 - 1 - zap_leaf_phys(l)->l_hdr.lh_prefix_len;
/* set new prefix and prefix_len */
@ -818,7 +793,7 @@ zap_leaf_split(zap_leaf_t *l, zap_leaf_t *nl, boolean_t sort)
* but this accesses memory more sequentially, and when we're
* called, the block is usually pretty full.
*/
for (i = 0; i < ZAP_LEAF_NUMCHUNKS(l); i++) {
for (int i = 0; i < ZAP_LEAF_NUMCHUNKS(l); i++) {
struct zap_leaf_entry *le = ZAP_LEAF_ENTRY(l, i);
if (le->le_type != ZAP_CHUNK_ENTRY)
continue;
@ -833,9 +808,7 @@ zap_leaf_split(zap_leaf_t *l, zap_leaf_t *nl, boolean_t sort)
void
zap_leaf_stats(zap_t *zap, zap_leaf_t *l, zap_stats_t *zs)
{
int i, n;
n = zap_f_phys(zap)->zap_ptrtbl.zt_shift -
int n = zap_f_phys(zap)->zap_ptrtbl.zt_shift -
zap_leaf_phys(l)->l_hdr.lh_prefix_len;
n = MIN(n, ZAP_HISTOGRAM_SIZE-1);
zs->zs_leafs_with_2n_pointers[n]++;
@ -851,7 +824,7 @@ zap_leaf_stats(zap_t *zap, zap_leaf_t *l, zap_stats_t *zs)
n = MIN(n, ZAP_HISTOGRAM_SIZE-1);
zs->zs_blocks_n_tenths_full[n]++;
for (i = 0; i < ZAP_LEAF_HASH_NUMENTRIES(l); i++) {
for (int i = 0; i < ZAP_LEAF_HASH_NUMENTRIES(l); i++) {
int nentries = 0;
int chunk = zap_leaf_phys(l)->l_hash[i];

View File

@ -21,7 +21,7 @@
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2016 by Delphix. All rights reserved.
* Copyright (c) 2011, 2017 by Delphix. All rights reserved.
* Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
* Copyright (c) 2014 Integros [integros.com]
* Copyright 2017 Nexenta Systems, Inc.
@ -89,22 +89,20 @@ zap_hash(zap_name_t *zn)
ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY);
if (zap_getflags(zap) & ZAP_FLAG_UINT64_KEY) {
int i;
const uint64_t *wp = zn->zn_key_norm;
ASSERT(zn->zn_key_intlen == 8);
for (i = 0; i < zn->zn_key_norm_numints; wp++, i++) {
int j;
for (int i = 0; i < zn->zn_key_norm_numints;
wp++, i++) {
uint64_t word = *wp;
for (j = 0; j < zn->zn_key_intlen; j++) {
for (int j = 0; j < zn->zn_key_intlen; j++) {
h = (h >> 8) ^
zfs_crc64_table[(h ^ word) & 0xFF];
word >>= NBBY;
}
}
} else {
int i, len;
const uint8_t *cp = zn->zn_key_norm;
/*
@ -114,10 +112,10 @@ zap_hash(zap_name_t *zn)
* zn_key_*_numints includes the terminating
* null for non-binary keys.)
*/
len = zn->zn_key_norm_numints - 1;
int len = zn->zn_key_norm_numints - 1;
ASSERT(zn->zn_key_intlen == 1);
for (i = 0; i < len; cp++, i++) {
for (int i = 0; i < len; cp++, i++) {
h = (h >> 8) ^
zfs_crc64_table[(h ^ *cp) & 0xFF];
}
@ -137,15 +135,12 @@ zap_hash(zap_name_t *zn)
static int
zap_normalize(zap_t *zap, const char *name, char *namenorm, int normflags)
{
size_t inlen, outlen;
int err;
ASSERT(!(zap_getflags(zap) & ZAP_FLAG_UINT64_KEY));
inlen = strlen(name) + 1;
outlen = ZAP_MAXNAMELEN;
size_t inlen = strlen(name) + 1;
size_t outlen = ZAP_MAXNAMELEN;
err = 0;
int err = 0;
(void) u8_textprep_str((char *)name, &inlen, namenorm, &outlen,
normflags | U8_TEXTPREP_IGNORE_NULL | U8_TEXTPREP_IGNORE_INVALID,
U8_UNICODE_LATEST, &err);
@ -255,12 +250,11 @@ zap_name_alloc_uint64(zap_t *zap, const uint64_t *key, int numints)
static void
mzap_byteswap(mzap_phys_t *buf, size_t size)
{
int i, max;
buf->mz_block_type = BSWAP_64(buf->mz_block_type);
buf->mz_salt = BSWAP_64(buf->mz_salt);
buf->mz_normflags = BSWAP_64(buf->mz_normflags);
max = (size / MZAP_ENT_LEN) - 1;
for (i = 0; i < max; i++) {
int max = (size / MZAP_ENT_LEN) - 1;
for (int i = 0; i < max; i++) {
buf->mz_chunk[i].mze_value =
BSWAP_64(buf->mz_chunk[i].mze_value);
buf->mz_chunk[i].mze_cd =
@ -271,9 +265,7 @@ mzap_byteswap(mzap_phys_t *buf, size_t size)
void
zap_byteswap(void *buf, size_t size)
{
uint64_t block_type;
block_type = *(uint64_t *)buf;
uint64_t block_type = *(uint64_t *)buf;
if (block_type == ZBT_MICRO || block_type == BSWAP_64(ZBT_MICRO)) {
/* ASSERT(magic == ZAP_LEAF_MAGIC); */
@ -303,12 +295,10 @@ mze_compare(const void *arg1, const void *arg2)
static void
mze_insert(zap_t *zap, int chunkid, uint64_t hash)
{
mzap_ent_t *mze;
ASSERT(zap->zap_ismicro);
ASSERT(RW_WRITE_HELD(&zap->zap_rwlock));
mze = kmem_alloc(sizeof (mzap_ent_t), KM_SLEEP);
mzap_ent_t *mze = kmem_alloc(sizeof (mzap_ent_t), KM_SLEEP);
mze->mze_chunkid = chunkid;
mze->mze_hash = hash;
mze->mze_cd = MZE_PHYS(zap, mze)->mze_cd;
@ -346,10 +336,8 @@ static uint32_t
mze_find_unused_cd(zap_t *zap, uint64_t hash)
{
mzap_ent_t mze_tofind;
mzap_ent_t *mze;
avl_index_t idx;
avl_tree_t *avl = &zap->zap_m.zap_avl;
uint32_t cd;
ASSERT(zap->zap_ismicro);
ASSERT(RW_LOCK_HELD(&zap->zap_rwlock));
@ -357,8 +345,8 @@ mze_find_unused_cd(zap_t *zap, uint64_t hash)
mze_tofind.mze_hash = hash;
mze_tofind.mze_cd = 0;
cd = 0;
for (mze = avl_find(avl, &mze_tofind, &idx);
uint32_t cd = 0;
for (mzap_ent_t *mze = avl_find(avl, &mze_tofind, &idx);
mze && mze->mze_hash == hash; mze = AVL_NEXT(avl, mze)) {
if (mze->mze_cd != cd)
break;
@ -393,15 +381,13 @@ static zap_t *
mzap_open(objset_t *os, uint64_t obj, dmu_buf_t *db)
{
zap_t *winner;
zap_t *zap;
int i;
uint64_t *zap_hdr = (uint64_t *)db->db_data;
uint64_t zap_block_type = zap_hdr[0];
uint64_t zap_magic = zap_hdr[1];
ASSERT3U(MZAP_ENT_LEN, ==, sizeof (mzap_ent_phys_t));
zap = kmem_zalloc(sizeof (zap_t), KM_SLEEP);
zap_t *zap = kmem_zalloc(sizeof (zap_t), KM_SLEEP);
rw_init(&zap->zap_rwlock, 0, 0, 0);
rw_enter(&zap->zap_rwlock, RW_WRITER);
zap->zap_objset = os;
@ -437,7 +423,7 @@ mzap_open(objset_t *os, uint64_t obj, dmu_buf_t *db)
avl_create(&zap->zap_m.zap_avl, mze_compare,
sizeof (mzap_ent_t), offsetof(mzap_ent_t, mze_node));
for (i = 0; i < zap->zap_m.zap_num_chunks; i++) {
for (int i = 0; i < zap->zap_m.zap_num_chunks; i++) {
mzap_ent_phys_t *mze =
&zap_m_phys(zap)->mz_chunk[i];
if (mze->mze_name[0]) {
@ -484,28 +470,21 @@ mzap_open(objset_t *os, uint64_t obj, dmu_buf_t *db)
return (winner);
}
/*
* This routine "consumes" the caller's hold on the dbuf, which must
* have the specified tag.
*/
static int
zap_lockdir_impl(dmu_buf_t *db, void *tag, dmu_tx_t *tx,
krw_t lti, boolean_t fatreader, boolean_t adding, zap_t **zapp)
{
zap_t *zap;
krw_t lt;
ASSERT0(db->db_offset);
objset_t *os = dmu_buf_get_objset(db);
uint64_t obj = db->db_object;
*zapp = NULL;
#ifdef ZFS_DEBUG
{
dmu_object_info_t doi;
dmu_object_info_from_db(db, &doi);
ASSERT3U(DMU_OT_BYTESWAP(doi.doi_type), ==, DMU_BSWAP_ZAP);
}
#endif
zap = dmu_buf_get_user(db);
zap_t *zap = dmu_buf_get_user(db);
if (zap == NULL) {
zap = mzap_open(os, obj, db);
if (zap == NULL) {
@ -524,7 +503,7 @@ zap_lockdir_impl(dmu_buf_t *db, void *tag, dmu_tx_t *tx,
* can only be different if it was upgraded from micro to fat,
* and micro wanted WRITER but fat only needs READER.
*/
lt = (!zap->zap_ismicro && fatreader) ? RW_READER : lti;
krw_t lt = (!zap->zap_ismicro && fatreader) ? RW_READER : lti;
rw_enter(&zap->zap_rwlock, lt);
if (lt != ((!zap->zap_ismicro && fatreader) ? RW_READER : lti)) {
/* it was upgraded, now we only need reader */
@ -570,12 +549,19 @@ zap_lockdir_by_dnode(dnode_t *dn, dmu_tx_t *tx,
krw_t lti, boolean_t fatreader, boolean_t adding, void *tag, zap_t **zapp)
{
dmu_buf_t *db;
int err;
err = dmu_buf_hold_by_dnode(dn, 0, tag, &db, DMU_READ_NO_PREFETCH);
int err = dmu_buf_hold_by_dnode(dn, 0, tag, &db, DMU_READ_NO_PREFETCH);
if (err != 0) {
return (err);
}
#ifdef ZFS_DEBUG
{
dmu_object_info_t doi;
dmu_object_info_from_db(db, &doi);
ASSERT3U(DMU_OT_BYTESWAP(doi.doi_type), ==, DMU_BSWAP_ZAP);
}
#endif
err = zap_lockdir_impl(db, tag, tx, lti, fatreader, adding, zapp);
if (err != 0) {
dmu_buf_rele(db, tag);
@ -588,11 +574,17 @@ zap_lockdir(objset_t *os, uint64_t obj, dmu_tx_t *tx,
krw_t lti, boolean_t fatreader, boolean_t adding, void *tag, zap_t **zapp)
{
dmu_buf_t *db;
int err;
err = dmu_buf_hold(os, obj, 0, tag, &db, DMU_READ_NO_PREFETCH);
int err = dmu_buf_hold(os, obj, 0, tag, &db, DMU_READ_NO_PREFETCH);
if (err != 0)
return (err);
#ifdef ZFS_DEBUG
{
dmu_object_info_t doi;
dmu_object_info_from_db(db, &doi);
ASSERT3U(DMU_OT_BYTESWAP(doi.doi_type), ==, DMU_BSWAP_ZAP);
}
#endif
err = zap_lockdir_impl(db, tag, tx, lti, fatreader, adding, zapp);
if (err != 0)
dmu_buf_rele(db, tag);
@ -609,22 +601,20 @@ zap_unlockdir(zap_t *zap, void *tag)
static int
mzap_upgrade(zap_t **zapp, void *tag, dmu_tx_t *tx, zap_flags_t flags)
{
mzap_phys_t *mzp;
int i, sz, nchunks;
int err = 0;
zap_t *zap = *zapp;
ASSERT(RW_WRITE_HELD(&zap->zap_rwlock));
sz = zap->zap_dbuf->db_size;
mzp = zio_buf_alloc(sz);
int sz = zap->zap_dbuf->db_size;
mzap_phys_t *mzp = zio_buf_alloc(sz);
bcopy(zap->zap_dbuf->db_data, mzp, sz);
nchunks = zap->zap_m.zap_num_chunks;
int nchunks = zap->zap_m.zap_num_chunks;
if (!flags) {
err = dmu_object_set_blocksize(zap->zap_objset, zap->zap_object,
1ULL << fzap_default_block_shift, 0, tx);
if (err) {
if (err != 0) {
zio_buf_free(mzp, sz);
return (err);
}
@ -637,19 +627,18 @@ mzap_upgrade(zap_t **zapp, void *tag, dmu_tx_t *tx, zap_flags_t flags)
fzap_upgrade(zap, tx, flags);
for (i = 0; i < nchunks; i++) {
for (int i = 0; i < nchunks; i++) {
mzap_ent_phys_t *mze = &mzp->mz_chunk[i];
zap_name_t *zn;
if (mze->mze_name[0] == 0)
continue;
dprintf("adding %s=%llu\n",
mze->mze_name, mze->mze_value);
zn = zap_name_alloc(zap, mze->mze_name, 0);
zap_name_t *zn = zap_name_alloc(zap, mze->mze_name, 0);
err = fzap_add_cd(zn, 8, 1, &mze->mze_value, mze->mze_cd,
tag, tx);
zap = zn->zn_zap; /* fzap_add_cd() may change zap */
zap_name_free(zn);
if (err)
if (err != 0)
break;
}
zio_buf_free(mzp, sz);
@ -679,32 +668,24 @@ mzap_create_impl(objset_t *os, uint64_t obj, int normflags, zap_flags_t flags,
dmu_tx_t *tx)
{
dmu_buf_t *db;
mzap_phys_t *zp;
VERIFY(0 == dmu_buf_hold(os, obj, 0, FTAG, &db, DMU_READ_NO_PREFETCH));
#ifdef ZFS_DEBUG
{
dmu_object_info_t doi;
dmu_object_info_from_db(db, &doi);
ASSERT3U(DMU_OT_BYTESWAP(doi.doi_type), ==, DMU_BSWAP_ZAP);
}
#endif
VERIFY0(dmu_buf_hold(os, obj, 0, FTAG, &db, DMU_READ_NO_PREFETCH));
dmu_buf_will_dirty(db, tx);
zp = db->db_data;
mzap_phys_t *zp = db->db_data;
zp->mz_block_type = ZBT_MICRO;
zp->mz_salt = ((uintptr_t)db ^ (uintptr_t)tx ^ (obj << 1)) | 1ULL;
zp->mz_normflags = normflags;
dmu_buf_rele(db, FTAG);
if (flags != 0) {
zap_t *zap;
/* Only fat zap supports flags; upgrade immediately. */
VERIFY(0 == zap_lockdir(os, obj, tx, RW_WRITER,
B_FALSE, B_FALSE, FTAG, &zap));
VERIFY3U(0, ==, mzap_upgrade(&zap, FTAG, tx, flags));
VERIFY0(zap_lockdir_impl(db, FTAG, tx, RW_WRITER,
B_FALSE, B_FALSE, &zap));
VERIFY0(mzap_upgrade(&zap, FTAG, tx, flags));
zap_unlockdir(zap, FTAG);
} else {
dmu_buf_rele(db, FTAG);
}
}
@ -721,9 +702,8 @@ zap_create_claim_norm(objset_t *os, uint64_t obj, int normflags,
dmu_object_type_t ot,
dmu_object_type_t bonustype, int bonuslen, dmu_tx_t *tx)
{
int err;
err = dmu_object_claim(os, obj, ot, 0, bonustype, bonuslen, tx);
ASSERT3U(DMU_OT_BYTESWAP(ot), ==, DMU_BSWAP_ZAP);
int err = dmu_object_claim(os, obj, ot, 0, bonustype, bonuslen, tx);
if (err != 0)
return (err);
mzap_create_impl(os, obj, normflags, 0, tx);
@ -741,6 +721,7 @@ uint64_t
zap_create_norm(objset_t *os, int normflags, dmu_object_type_t ot,
dmu_object_type_t bonustype, int bonuslen, dmu_tx_t *tx)
{
ASSERT3U(DMU_OT_BYTESWAP(ot), ==, DMU_BSWAP_ZAP);
uint64_t obj = dmu_object_alloc(os, ot, 0, bonustype, bonuslen, tx);
mzap_create_impl(os, obj, normflags, 0, tx);
@ -752,6 +733,7 @@ zap_create_flags(objset_t *os, int normflags, zap_flags_t flags,
dmu_object_type_t ot, int leaf_blockshift, int indirect_blockshift,
dmu_object_type_t bonustype, int bonuslen, dmu_tx_t *tx)
{
ASSERT3U(DMU_OT_BYTESWAP(ot), ==, DMU_BSWAP_ZAP);
uint64_t obj = dmu_object_alloc(os, ot, 0, bonustype, bonuslen, tx);
ASSERT(leaf_blockshift >= SPA_MINBLOCKSHIFT &&
@ -797,10 +779,10 @@ int
zap_count(objset_t *os, uint64_t zapobj, uint64_t *count)
{
zap_t *zap;
int err;
err = zap_lockdir(os, zapobj, NULL, RW_READER, TRUE, FALSE, FTAG, &zap);
if (err)
int err =
zap_lockdir(os, zapobj, NULL, RW_READER, TRUE, FALSE, FTAG, &zap);
if (err != 0)
return (err);
if (!zap->zap_ismicro) {
err = fzap_count(zap, count);
@ -818,7 +800,6 @@ zap_count(objset_t *os, uint64_t zapobj, uint64_t *count)
static boolean_t
mzap_normalization_conflict(zap_t *zap, zap_name_t *zn, mzap_ent_t *mze)
{
mzap_ent_t *other;
int direction = AVL_BEFORE;
boolean_t allocdzn = B_FALSE;
@ -826,7 +807,7 @@ mzap_normalization_conflict(zap_t *zap, zap_name_t *zn, mzap_ent_t *mze)
return (B_FALSE);
again:
for (other = avl_walk(&zap->zap_m.zap_avl, mze, direction);
for (mzap_ent_t *other = avl_walk(&zap->zap_m.zap_avl, mze, direction);
other && other->mze_hash == mze->mze_hash;
other = avl_walk(&zap->zap_m.zap_avl, other, direction)) {
@ -871,10 +852,8 @@ zap_lookup_impl(zap_t *zap, const char *name,
boolean_t *ncp)
{
int err = 0;
mzap_ent_t *mze;
zap_name_t *zn;
zn = zap_name_alloc(zap, name, mt);
zap_name_t *zn = zap_name_alloc(zap, name, mt);
if (zn == NULL)
return (SET_ERROR(ENOTSUP));
@ -882,7 +861,7 @@ zap_lookup_impl(zap_t *zap, const char *name,
err = fzap_lookup(zn, integer_size, num_integers, buf,
realname, rn_len, ncp);
} else {
mze = mze_find(zn);
mzap_ent_t *mze = mze_find(zn);
if (mze == NULL) {
err = SET_ERROR(ENOENT);
} else {
@ -913,9 +892,9 @@ zap_lookup_norm(objset_t *os, uint64_t zapobj, const char *name,
boolean_t *ncp)
{
zap_t *zap;
int err;
err = zap_lockdir(os, zapobj, NULL, RW_READER, TRUE, FALSE, FTAG, &zap);
int err =
zap_lockdir(os, zapobj, NULL, RW_READER, TRUE, FALSE, FTAG, &zap);
if (err != 0)
return (err);
err = zap_lookup_impl(zap, name, integer_size,
@ -939,9 +918,8 @@ zap_lookup_norm_by_dnode(dnode_t *dn, const char *name,
boolean_t *ncp)
{
zap_t *zap;
int err;
err = zap_lockdir_by_dnode(dn, NULL, RW_READER, TRUE, FALSE,
int err = zap_lockdir_by_dnode(dn, NULL, RW_READER, TRUE, FALSE,
FTAG, &zap);
if (err != 0)
return (err);
@ -956,13 +934,12 @@ zap_prefetch_uint64(objset_t *os, uint64_t zapobj, const uint64_t *key,
int key_numints)
{
zap_t *zap;
int err;
zap_name_t *zn;
err = zap_lockdir(os, zapobj, NULL, RW_READER, TRUE, FALSE, FTAG, &zap);
if (err)
int err =
zap_lockdir(os, zapobj, NULL, RW_READER, TRUE, FALSE, FTAG, &zap);
if (err != 0)
return (err);
zn = zap_name_alloc_uint64(zap, key, key_numints);
zap_name_t *zn = zap_name_alloc_uint64(zap, key, key_numints);
if (zn == NULL) {
zap_unlockdir(zap, FTAG);
return (SET_ERROR(ENOTSUP));
@ -979,13 +956,12 @@ zap_lookup_uint64(objset_t *os, uint64_t zapobj, const uint64_t *key,
int key_numints, uint64_t integer_size, uint64_t num_integers, void *buf)
{
zap_t *zap;
int err;
zap_name_t *zn;
err = zap_lockdir(os, zapobj, NULL, RW_READER, TRUE, FALSE, FTAG, &zap);
if (err)
int err =
zap_lockdir(os, zapobj, NULL, RW_READER, TRUE, FALSE, FTAG, &zap);
if (err != 0)
return (err);
zn = zap_name_alloc_uint64(zap, key, key_numints);
zap_name_t *zn = zap_name_alloc_uint64(zap, key, key_numints);
if (zn == NULL) {
zap_unlockdir(zap, FTAG);
return (SET_ERROR(ENOTSUP));
@ -1013,14 +989,12 @@ zap_length(objset_t *os, uint64_t zapobj, const char *name,
uint64_t *integer_size, uint64_t *num_integers)
{
zap_t *zap;
int err;
mzap_ent_t *mze;
zap_name_t *zn;
err = zap_lockdir(os, zapobj, NULL, RW_READER, TRUE, FALSE, FTAG, &zap);
if (err)
int err =
zap_lockdir(os, zapobj, NULL, RW_READER, TRUE, FALSE, FTAG, &zap);
if (err != 0)
return (err);
zn = zap_name_alloc(zap, name, 0);
zap_name_t *zn = zap_name_alloc(zap, name, 0);
if (zn == NULL) {
zap_unlockdir(zap, FTAG);
return (SET_ERROR(ENOTSUP));
@ -1028,7 +1002,7 @@ zap_length(objset_t *os, uint64_t zapobj, const char *name,
if (!zap->zap_ismicro) {
err = fzap_length(zn, integer_size, num_integers);
} else {
mze = mze_find(zn);
mzap_ent_t *mze = mze_find(zn);
if (mze == NULL) {
err = SET_ERROR(ENOENT);
} else {
@ -1048,13 +1022,12 @@ zap_length_uint64(objset_t *os, uint64_t zapobj, const uint64_t *key,
int key_numints, uint64_t *integer_size, uint64_t *num_integers)
{
zap_t *zap;
int err;
zap_name_t *zn;
err = zap_lockdir(os, zapobj, NULL, RW_READER, TRUE, FALSE, FTAG, &zap);
if (err)
int err =
zap_lockdir(os, zapobj, NULL, RW_READER, TRUE, FALSE, FTAG, &zap);
if (err != 0)
return (err);
zn = zap_name_alloc_uint64(zap, key, key_numints);
zap_name_t *zn = zap_name_alloc_uint64(zap, key, key_numints);
if (zn == NULL) {
zap_unlockdir(zap, FTAG);
return (SET_ERROR(ENOTSUP));
@ -1068,26 +1041,24 @@ zap_length_uint64(objset_t *os, uint64_t zapobj, const uint64_t *key,
static void
mzap_addent(zap_name_t *zn, uint64_t value)
{
int i;
zap_t *zap = zn->zn_zap;
int start = zap->zap_m.zap_alloc_next;
uint32_t cd;
ASSERT(RW_WRITE_HELD(&zap->zap_rwlock));
#ifdef ZFS_DEBUG
for (i = 0; i < zap->zap_m.zap_num_chunks; i++) {
for (int i = 0; i < zap->zap_m.zap_num_chunks; i++) {
mzap_ent_phys_t *mze = &zap_m_phys(zap)->mz_chunk[i];
ASSERT(strcmp(zn->zn_key_orig, mze->mze_name) != 0);
}
#endif
cd = mze_find_unused_cd(zap, zn->zn_hash);
uint32_t cd = mze_find_unused_cd(zap, zn->zn_hash);
/* given the limited size of the microzap, this can't happen */
ASSERT(cd < zap_maxcd(zap));
again:
for (i = start; i < zap->zap_m.zap_num_chunks; i++) {
for (int i = start; i < zap->zap_m.zap_num_chunks; i++) {
mzap_ent_phys_t *mze = &zap_m_phys(zap)->mz_chunk[i];
if (mze->mze_name[0] == 0) {
mze->mze_value = value;
@ -1114,12 +1085,10 @@ zap_add_impl(zap_t *zap, const char *key,
int integer_size, uint64_t num_integers,
const void *val, dmu_tx_t *tx, void *tag)
{
int err = 0;
mzap_ent_t *mze;
const uint64_t *intval = val;
zap_name_t *zn;
int err = 0;
zn = zap_name_alloc(zap, key, 0);
zap_name_t *zn = zap_name_alloc(zap, key, 0);
if (zn == NULL) {
zap_unlockdir(zap, tag);
return (SET_ERROR(ENOTSUP));
@ -1136,8 +1105,7 @@ zap_add_impl(zap_t *zap, const char *key,
}
zap = zn->zn_zap; /* fzap_add() may change zap */
} else {
mze = mze_find(zn);
if (mze != NULL) {
if (mze_find(zn) != NULL) {
err = SET_ERROR(EEXIST);
} else {
mzap_addent(zn, *intval);
@ -1188,13 +1156,12 @@ zap_add_uint64(objset_t *os, uint64_t zapobj, const uint64_t *key,
const void *val, dmu_tx_t *tx)
{
zap_t *zap;
int err;
zap_name_t *zn;
err = zap_lockdir(os, zapobj, tx, RW_WRITER, TRUE, TRUE, FTAG, &zap);
if (err)
int err =
zap_lockdir(os, zapobj, tx, RW_WRITER, TRUE, TRUE, FTAG, &zap);
if (err != 0)
return (err);
zn = zap_name_alloc_uint64(zap, key, key_numints);
zap_name_t *zn = zap_name_alloc_uint64(zap, key, key_numints);
if (zn == NULL) {
zap_unlockdir(zap, FTAG);
return (SET_ERROR(ENOTSUP));
@ -1212,11 +1179,8 @@ zap_update(objset_t *os, uint64_t zapobj, const char *name,
int integer_size, uint64_t num_integers, const void *val, dmu_tx_t *tx)
{
zap_t *zap;
mzap_ent_t *mze;
uint64_t oldval;
const uint64_t *intval = val;
zap_name_t *zn;
int err;
#ifdef ZFS_DEBUG
/*
@ -1227,10 +1191,11 @@ zap_update(objset_t *os, uint64_t zapobj, const char *name,
(void) zap_lookup(os, zapobj, name, 8, 1, &oldval);
#endif
err = zap_lockdir(os, zapobj, tx, RW_WRITER, TRUE, TRUE, FTAG, &zap);
if (err)
int err =
zap_lockdir(os, zapobj, tx, RW_WRITER, TRUE, TRUE, FTAG, &zap);
if (err != 0)
return (err);
zn = zap_name_alloc(zap, name, 0);
zap_name_t *zn = zap_name_alloc(zap, name, 0);
if (zn == NULL) {
zap_unlockdir(zap, FTAG);
return (SET_ERROR(ENOTSUP));
@ -1250,7 +1215,7 @@ zap_update(objset_t *os, uint64_t zapobj, const char *name,
}
zap = zn->zn_zap; /* fzap_update() may change zap */
} else {
mze = mze_find(zn);
mzap_ent_t *mze = mze_find(zn);
if (mze != NULL) {
ASSERT3U(MZE_PHYS(zap, mze)->mze_value, ==, oldval);
MZE_PHYS(zap, mze)->mze_value = *intval;
@ -1271,13 +1236,12 @@ zap_update_uint64(objset_t *os, uint64_t zapobj, const uint64_t *key,
int integer_size, uint64_t num_integers, const void *val, dmu_tx_t *tx)
{
zap_t *zap;
zap_name_t *zn;
int err;
err = zap_lockdir(os, zapobj, tx, RW_WRITER, TRUE, TRUE, FTAG, &zap);
if (err)
int err =
zap_lockdir(os, zapobj, tx, RW_WRITER, TRUE, TRUE, FTAG, &zap);
if (err != 0)
return (err);
zn = zap_name_alloc_uint64(zap, key, key_numints);
zap_name_t *zn = zap_name_alloc_uint64(zap, key, key_numints);
if (zn == NULL) {
zap_unlockdir(zap, FTAG);
return (SET_ERROR(ENOTSUP));
@ -1300,17 +1264,15 @@ static int
zap_remove_impl(zap_t *zap, const char *name,
matchtype_t mt, dmu_tx_t *tx)
{
mzap_ent_t *mze;
zap_name_t *zn;
int err = 0;
zn = zap_name_alloc(zap, name, mt);
zap_name_t *zn = zap_name_alloc(zap, name, mt);
if (zn == NULL)
return (SET_ERROR(ENOTSUP));
if (!zap->zap_ismicro) {
err = fzap_remove(zn, tx);
} else {
mze = mze_find(zn);
mzap_ent_t *mze = mze_find(zn);
if (mze == NULL) {
err = SET_ERROR(ENOENT);
} else {
@ -1358,13 +1320,12 @@ zap_remove_uint64(objset_t *os, uint64_t zapobj, const uint64_t *key,
int key_numints, dmu_tx_t *tx)
{
zap_t *zap;
int err;
zap_name_t *zn;
err = zap_lockdir(os, zapobj, tx, RW_WRITER, TRUE, FALSE, FTAG, &zap);
if (err)
int err =
zap_lockdir(os, zapobj, tx, RW_WRITER, TRUE, FALSE, FTAG, &zap);
if (err != 0)
return (err);
zn = zap_name_alloc_uint64(zap, key, key_numints);
zap_name_t *zn = zap_name_alloc_uint64(zap, key, key_numints);
if (zn == NULL) {
zap_unlockdir(zap, FTAG);
return (SET_ERROR(ENOTSUP));
@ -1440,9 +1401,6 @@ int
zap_cursor_retrieve(zap_cursor_t *zc, zap_attribute_t *za)
{
int err;
avl_index_t idx;
mzap_ent_t mze_tofind;
mzap_ent_t *mze;
if (zc->zc_hash == -1ULL)
return (SET_ERROR(ENOENT));
@ -1451,7 +1409,7 @@ zap_cursor_retrieve(zap_cursor_t *zc, zap_attribute_t *za)
int hb;
err = zap_lockdir(zc->zc_objset, zc->zc_zapobj, NULL,
RW_READER, TRUE, FALSE, NULL, &zc->zc_zap);
if (err)
if (err != 0)
return (err);
/*
@ -1471,10 +1429,14 @@ zap_cursor_retrieve(zap_cursor_t *zc, zap_attribute_t *za)
if (!zc->zc_zap->zap_ismicro) {
err = fzap_cursor_retrieve(zc->zc_zap, zc, za);
} else {
avl_index_t idx;
mzap_ent_t mze_tofind;
mze_tofind.mze_hash = zc->zc_hash;
mze_tofind.mze_cd = zc->zc_cd;
mze = avl_find(&zc->zc_zap->zap_m.zap_avl, &mze_tofind, &idx);
mzap_ent_t *mze =
avl_find(&zc->zc_zap->zap_m.zap_avl, &mze_tofind, &idx);
if (mze == NULL) {
mze = avl_nearest(&zc->zc_zap->zap_m.zap_avl,
idx, AVL_AFTER);
@ -1511,11 +1473,11 @@ zap_cursor_advance(zap_cursor_t *zc)
int
zap_get_stats(objset_t *os, uint64_t zapobj, zap_stats_t *zs)
{
int err;
zap_t *zap;
err = zap_lockdir(os, zapobj, NULL, RW_READER, TRUE, FALSE, FTAG, &zap);
if (err)
int err =
zap_lockdir(os, zapobj, NULL, RW_READER, TRUE, FALSE, FTAG, &zap);
if (err != 0)
return (err);
bzero(zs, sizeof (zap_stats_t));