Prefix all refcount functions with zfs_
Recent changes in the Linux kernel made it necessary to prefix the refcount_add() function with zfs_ due to a name collision. To bring the other functions in line with that and to avoid future collisions, prefix the other refcount functions as well. Reviewed by: Matthew Ahrens <mahrens@delphix.com> Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov> Signed-off-by: Tim Schumacher <timschumi@gmx.de> Closes #7963
This commit is contained in:
parent
fc23d59fa0
commit
424fd7c3e0
@ -1340,7 +1340,7 @@ ztest_znode_init(uint64_t object)
|
||||
ztest_znode_t *zp = umem_alloc(sizeof (*zp), UMEM_NOFAIL);
|
||||
|
||||
list_link_init(&zp->z_lnode);
|
||||
refcount_create(&zp->z_refcnt);
|
||||
zfs_refcount_create(&zp->z_refcnt);
|
||||
zp->z_object = object;
|
||||
zfs_rlock_init(&zp->z_range_lock);
|
||||
|
||||
@ -1350,10 +1350,10 @@ ztest_znode_init(uint64_t object)
|
||||
static void
|
||||
ztest_znode_fini(ztest_znode_t *zp)
|
||||
{
|
||||
ASSERT(refcount_is_zero(&zp->z_refcnt));
|
||||
ASSERT(zfs_refcount_is_zero(&zp->z_refcnt));
|
||||
zfs_rlock_destroy(&zp->z_range_lock);
|
||||
zp->z_object = 0;
|
||||
refcount_destroy(&zp->z_refcnt);
|
||||
zfs_refcount_destroy(&zp->z_refcnt);
|
||||
list_link_init(&zp->z_lnode);
|
||||
umem_free(zp, sizeof (*zp));
|
||||
}
|
||||
@ -1403,8 +1403,8 @@ ztest_znode_put(ztest_ds_t *zd, ztest_znode_t *zp)
|
||||
ASSERT3U(zp->z_object, !=, 0);
|
||||
zll = &zd->zd_range_lock[zp->z_object & (ZTEST_OBJECT_LOCKS - 1)];
|
||||
mutex_enter(&zll->z_lock);
|
||||
refcount_remove(&zp->z_refcnt, RL_TAG);
|
||||
if (refcount_is_zero(&zp->z_refcnt)) {
|
||||
zfs_refcount_remove(&zp->z_refcnt, RL_TAG);
|
||||
if (zfs_refcount_is_zero(&zp->z_refcnt)) {
|
||||
list_remove(&zll->z_list, zp);
|
||||
ztest_znode_fini(zp);
|
||||
}
|
||||
|
@ -63,26 +63,24 @@ typedef struct refcount {
|
||||
* refcount_create[_untracked]()
|
||||
*/
|
||||
|
||||
void refcount_create(zfs_refcount_t *rc);
|
||||
void refcount_create_untracked(zfs_refcount_t *rc);
|
||||
void refcount_create_tracked(zfs_refcount_t *rc);
|
||||
void refcount_destroy(zfs_refcount_t *rc);
|
||||
void refcount_destroy_many(zfs_refcount_t *rc, uint64_t number);
|
||||
int refcount_is_zero(zfs_refcount_t *rc);
|
||||
int64_t refcount_count(zfs_refcount_t *rc);
|
||||
int64_t zfs_refcount_add(zfs_refcount_t *rc, void *holder_tag);
|
||||
int64_t refcount_remove(zfs_refcount_t *rc, void *holder_tag);
|
||||
int64_t refcount_add_many(zfs_refcount_t *rc, uint64_t number,
|
||||
void *holder_tag);
|
||||
int64_t refcount_remove_many(zfs_refcount_t *rc, uint64_t number,
|
||||
void *holder_tag);
|
||||
void refcount_transfer(zfs_refcount_t *dst, zfs_refcount_t *src);
|
||||
void refcount_transfer_ownership(zfs_refcount_t *, void *, void *);
|
||||
boolean_t refcount_held(zfs_refcount_t *, void *);
|
||||
boolean_t refcount_not_held(zfs_refcount_t *, void *);
|
||||
void zfs_refcount_create(zfs_refcount_t *);
|
||||
void zfs_refcount_create_untracked(zfs_refcount_t *);
|
||||
void zfs_refcount_create_tracked(zfs_refcount_t *);
|
||||
void zfs_refcount_destroy(zfs_refcount_t *);
|
||||
void zfs_refcount_destroy_many(zfs_refcount_t *, uint64_t);
|
||||
int zfs_refcount_is_zero(zfs_refcount_t *);
|
||||
int64_t zfs_refcount_count(zfs_refcount_t *);
|
||||
int64_t zfs_refcount_add(zfs_refcount_t *, void *);
|
||||
int64_t zfs_refcount_remove(zfs_refcount_t *, void *);
|
||||
int64_t zfs_refcount_add_many(zfs_refcount_t *, uint64_t, void *);
|
||||
int64_t zfs_refcount_remove_many(zfs_refcount_t *, uint64_t, void *);
|
||||
void zfs_refcount_transfer(zfs_refcount_t *, zfs_refcount_t *);
|
||||
void zfs_refcount_transfer_ownership(zfs_refcount_t *, void *, void *);
|
||||
boolean_t zfs_refcount_held(zfs_refcount_t *, void *);
|
||||
boolean_t zfs_refcount_not_held(zfs_refcount_t *, void *);
|
||||
|
||||
void refcount_init(void);
|
||||
void refcount_fini(void);
|
||||
void zfs_refcount_init(void);
|
||||
void zfs_refcount_fini(void);
|
||||
|
||||
#else /* ZFS_DEBUG */
|
||||
|
||||
@ -90,30 +88,30 @@ typedef struct refcount {
|
||||
uint64_t rc_count;
|
||||
} zfs_refcount_t;
|
||||
|
||||
#define refcount_create(rc) ((rc)->rc_count = 0)
|
||||
#define refcount_create_untracked(rc) ((rc)->rc_count = 0)
|
||||
#define refcount_create_tracked(rc) ((rc)->rc_count = 0)
|
||||
#define refcount_destroy(rc) ((rc)->rc_count = 0)
|
||||
#define refcount_destroy_many(rc, number) ((rc)->rc_count = 0)
|
||||
#define refcount_is_zero(rc) ((rc)->rc_count == 0)
|
||||
#define refcount_count(rc) ((rc)->rc_count)
|
||||
#define zfs_refcount_create(rc) ((rc)->rc_count = 0)
|
||||
#define zfs_refcount_create_untracked(rc) ((rc)->rc_count = 0)
|
||||
#define zfs_refcount_create_tracked(rc) ((rc)->rc_count = 0)
|
||||
#define zfs_refcount_destroy(rc) ((rc)->rc_count = 0)
|
||||
#define zfs_refcount_destroy_many(rc, number) ((rc)->rc_count = 0)
|
||||
#define zfs_refcount_is_zero(rc) ((rc)->rc_count == 0)
|
||||
#define zfs_refcount_count(rc) ((rc)->rc_count)
|
||||
#define zfs_refcount_add(rc, holder) atomic_inc_64_nv(&(rc)->rc_count)
|
||||
#define refcount_remove(rc, holder) atomic_dec_64_nv(&(rc)->rc_count)
|
||||
#define refcount_add_many(rc, number, holder) \
|
||||
#define zfs_refcount_remove(rc, holder) atomic_dec_64_nv(&(rc)->rc_count)
|
||||
#define zfs_refcount_add_many(rc, number, holder) \
|
||||
atomic_add_64_nv(&(rc)->rc_count, number)
|
||||
#define refcount_remove_many(rc, number, holder) \
|
||||
#define zfs_refcount_remove_many(rc, number, holder) \
|
||||
atomic_add_64_nv(&(rc)->rc_count, -number)
|
||||
#define refcount_transfer(dst, src) { \
|
||||
#define zfs_refcount_transfer(dst, src) { \
|
||||
uint64_t __tmp = (src)->rc_count; \
|
||||
atomic_add_64(&(src)->rc_count, -__tmp); \
|
||||
atomic_add_64(&(dst)->rc_count, __tmp); \
|
||||
}
|
||||
#define refcount_transfer_ownership(rc, current_holder, new_holder) (void)0
|
||||
#define refcount_held(rc, holder) ((rc)->rc_count > 0)
|
||||
#define refcount_not_held(rc, holder) (B_TRUE)
|
||||
#define zfs_refcount_transfer_ownership(rc, current_holder, new_holder) (void)0
|
||||
#define zfs_refcount_held(rc, holder) ((rc)->rc_count > 0)
|
||||
#define zfs_refcount_not_held(rc, holder) (B_TRUE)
|
||||
|
||||
#define refcount_init()
|
||||
#define refcount_fini()
|
||||
#define zfs_refcount_init()
|
||||
#define zfs_refcount_fini()
|
||||
|
||||
#endif /* ZFS_DEBUG */
|
||||
|
||||
|
@ -71,7 +71,7 @@
|
||||
__entry->db_offset = db->db.db_offset; \
|
||||
__entry->db_size = db->db.db_size; \
|
||||
__entry->db_state = db->db_state; \
|
||||
__entry->db_holds = refcount_count(&db->db_holds); \
|
||||
__entry->db_holds = zfs_refcount_count(&db->db_holds); \
|
||||
snprintf(__get_str(msg), TRACE_DBUF_MSG_MAX, \
|
||||
DBUF_TP_PRINTK_FMT, DBUF_TP_PRINTK_ARGS); \
|
||||
} else { \
|
||||
|
@ -595,7 +595,7 @@ abd_alloc(size_t size, boolean_t is_metadata)
|
||||
}
|
||||
abd->abd_size = size;
|
||||
abd->abd_parent = NULL;
|
||||
refcount_create(&abd->abd_children);
|
||||
zfs_refcount_create(&abd->abd_children);
|
||||
|
||||
abd->abd_u.abd_scatter.abd_offset = 0;
|
||||
|
||||
@ -612,7 +612,7 @@ abd_free_scatter(abd_t *abd)
|
||||
{
|
||||
abd_free_pages(abd);
|
||||
|
||||
refcount_destroy(&abd->abd_children);
|
||||
zfs_refcount_destroy(&abd->abd_children);
|
||||
ABDSTAT_BUMPDOWN(abdstat_scatter_cnt);
|
||||
ABDSTAT_INCR(abdstat_scatter_data_size, -(int)abd->abd_size);
|
||||
ABDSTAT_INCR(abdstat_scatter_chunk_waste,
|
||||
@ -639,7 +639,7 @@ abd_alloc_linear(size_t size, boolean_t is_metadata)
|
||||
}
|
||||
abd->abd_size = size;
|
||||
abd->abd_parent = NULL;
|
||||
refcount_create(&abd->abd_children);
|
||||
zfs_refcount_create(&abd->abd_children);
|
||||
|
||||
if (is_metadata) {
|
||||
abd->abd_u.abd_linear.abd_buf = zio_buf_alloc(size);
|
||||
@ -662,7 +662,7 @@ abd_free_linear(abd_t *abd)
|
||||
zio_data_buf_free(abd->abd_u.abd_linear.abd_buf, abd->abd_size);
|
||||
}
|
||||
|
||||
refcount_destroy(&abd->abd_children);
|
||||
zfs_refcount_destroy(&abd->abd_children);
|
||||
ABDSTAT_BUMPDOWN(abdstat_linear_cnt);
|
||||
ABDSTAT_INCR(abdstat_linear_data_size, -(int)abd->abd_size);
|
||||
|
||||
@ -773,8 +773,8 @@ abd_get_offset_impl(abd_t *sabd, size_t off, size_t size)
|
||||
|
||||
abd->abd_size = size;
|
||||
abd->abd_parent = sabd;
|
||||
refcount_create(&abd->abd_children);
|
||||
(void) refcount_add_many(&sabd->abd_children, abd->abd_size, abd);
|
||||
zfs_refcount_create(&abd->abd_children);
|
||||
(void) zfs_refcount_add_many(&sabd->abd_children, abd->abd_size, abd);
|
||||
|
||||
return (abd);
|
||||
}
|
||||
@ -816,7 +816,7 @@ abd_get_from_buf(void *buf, size_t size)
|
||||
abd->abd_flags = ABD_FLAG_LINEAR;
|
||||
abd->abd_size = size;
|
||||
abd->abd_parent = NULL;
|
||||
refcount_create(&abd->abd_children);
|
||||
zfs_refcount_create(&abd->abd_children);
|
||||
|
||||
abd->abd_u.abd_linear.abd_buf = buf;
|
||||
|
||||
@ -834,11 +834,11 @@ abd_put(abd_t *abd)
|
||||
ASSERT(!(abd->abd_flags & ABD_FLAG_OWNER));
|
||||
|
||||
if (abd->abd_parent != NULL) {
|
||||
(void) refcount_remove_many(&abd->abd_parent->abd_children,
|
||||
(void) zfs_refcount_remove_many(&abd->abd_parent->abd_children,
|
||||
abd->abd_size, abd);
|
||||
}
|
||||
|
||||
refcount_destroy(&abd->abd_children);
|
||||
zfs_refcount_destroy(&abd->abd_children);
|
||||
abd_free_struct(abd);
|
||||
}
|
||||
|
||||
@ -870,7 +870,7 @@ abd_borrow_buf(abd_t *abd, size_t n)
|
||||
} else {
|
||||
buf = zio_buf_alloc(n);
|
||||
}
|
||||
(void) refcount_add_many(&abd->abd_children, n, buf);
|
||||
(void) zfs_refcount_add_many(&abd->abd_children, n, buf);
|
||||
|
||||
return (buf);
|
||||
}
|
||||
@ -902,7 +902,7 @@ abd_return_buf(abd_t *abd, void *buf, size_t n)
|
||||
ASSERT0(abd_cmp_buf(abd, buf, n));
|
||||
zio_buf_free(buf, n);
|
||||
}
|
||||
(void) refcount_remove_many(&abd->abd_children, n, buf);
|
||||
(void) zfs_refcount_remove_many(&abd->abd_children, n, buf);
|
||||
}
|
||||
|
||||
void
|
||||
|
310
module/zfs/arc.c
310
module/zfs/arc.c
@ -1272,7 +1272,7 @@ hdr_full_cons(void *vbuf, void *unused, int kmflag)
|
||||
bzero(hdr, HDR_FULL_SIZE);
|
||||
hdr->b_l1hdr.b_byteswap = DMU_BSWAP_NUMFUNCS;
|
||||
cv_init(&hdr->b_l1hdr.b_cv, NULL, CV_DEFAULT, NULL);
|
||||
refcount_create(&hdr->b_l1hdr.b_refcnt);
|
||||
zfs_refcount_create(&hdr->b_l1hdr.b_refcnt);
|
||||
mutex_init(&hdr->b_l1hdr.b_freeze_lock, NULL, MUTEX_DEFAULT, NULL);
|
||||
list_link_init(&hdr->b_l1hdr.b_arc_node);
|
||||
list_link_init(&hdr->b_l2hdr.b_l2node);
|
||||
@ -1332,7 +1332,7 @@ hdr_full_dest(void *vbuf, void *unused)
|
||||
|
||||
ASSERT(HDR_EMPTY(hdr));
|
||||
cv_destroy(&hdr->b_l1hdr.b_cv);
|
||||
refcount_destroy(&hdr->b_l1hdr.b_refcnt);
|
||||
zfs_refcount_destroy(&hdr->b_l1hdr.b_refcnt);
|
||||
mutex_destroy(&hdr->b_l1hdr.b_freeze_lock);
|
||||
ASSERT(!multilist_link_active(&hdr->b_l1hdr.b_arc_node));
|
||||
arc_space_return(HDR_FULL_SIZE, ARC_SPACE_HDRS);
|
||||
@ -2318,18 +2318,18 @@ arc_evictable_space_increment(arc_buf_hdr_t *hdr, arc_state_t *state)
|
||||
ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL);
|
||||
ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
|
||||
ASSERT(!HDR_HAS_RABD(hdr));
|
||||
(void) refcount_add_many(&state->arcs_esize[type],
|
||||
(void) zfs_refcount_add_many(&state->arcs_esize[type],
|
||||
HDR_GET_LSIZE(hdr), hdr);
|
||||
return;
|
||||
}
|
||||
|
||||
ASSERT(!GHOST_STATE(state));
|
||||
if (hdr->b_l1hdr.b_pabd != NULL) {
|
||||
(void) refcount_add_many(&state->arcs_esize[type],
|
||||
(void) zfs_refcount_add_many(&state->arcs_esize[type],
|
||||
arc_hdr_size(hdr), hdr);
|
||||
}
|
||||
if (HDR_HAS_RABD(hdr)) {
|
||||
(void) refcount_add_many(&state->arcs_esize[type],
|
||||
(void) zfs_refcount_add_many(&state->arcs_esize[type],
|
||||
HDR_GET_PSIZE(hdr), hdr);
|
||||
}
|
||||
|
||||
@ -2337,7 +2337,7 @@ arc_evictable_space_increment(arc_buf_hdr_t *hdr, arc_state_t *state)
|
||||
buf = buf->b_next) {
|
||||
if (arc_buf_is_shared(buf))
|
||||
continue;
|
||||
(void) refcount_add_many(&state->arcs_esize[type],
|
||||
(void) zfs_refcount_add_many(&state->arcs_esize[type],
|
||||
arc_buf_size(buf), buf);
|
||||
}
|
||||
}
|
||||
@ -2359,18 +2359,18 @@ arc_evictable_space_decrement(arc_buf_hdr_t *hdr, arc_state_t *state)
|
||||
ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL);
|
||||
ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
|
||||
ASSERT(!HDR_HAS_RABD(hdr));
|
||||
(void) refcount_remove_many(&state->arcs_esize[type],
|
||||
(void) zfs_refcount_remove_many(&state->arcs_esize[type],
|
||||
HDR_GET_LSIZE(hdr), hdr);
|
||||
return;
|
||||
}
|
||||
|
||||
ASSERT(!GHOST_STATE(state));
|
||||
if (hdr->b_l1hdr.b_pabd != NULL) {
|
||||
(void) refcount_remove_many(&state->arcs_esize[type],
|
||||
(void) zfs_refcount_remove_many(&state->arcs_esize[type],
|
||||
arc_hdr_size(hdr), hdr);
|
||||
}
|
||||
if (HDR_HAS_RABD(hdr)) {
|
||||
(void) refcount_remove_many(&state->arcs_esize[type],
|
||||
(void) zfs_refcount_remove_many(&state->arcs_esize[type],
|
||||
HDR_GET_PSIZE(hdr), hdr);
|
||||
}
|
||||
|
||||
@ -2378,7 +2378,7 @@ arc_evictable_space_decrement(arc_buf_hdr_t *hdr, arc_state_t *state)
|
||||
buf = buf->b_next) {
|
||||
if (arc_buf_is_shared(buf))
|
||||
continue;
|
||||
(void) refcount_remove_many(&state->arcs_esize[type],
|
||||
(void) zfs_refcount_remove_many(&state->arcs_esize[type],
|
||||
arc_buf_size(buf), buf);
|
||||
}
|
||||
}
|
||||
@ -2397,7 +2397,7 @@ add_reference(arc_buf_hdr_t *hdr, void *tag)
|
||||
ASSERT(HDR_HAS_L1HDR(hdr));
|
||||
if (!MUTEX_HELD(HDR_LOCK(hdr))) {
|
||||
ASSERT(hdr->b_l1hdr.b_state == arc_anon);
|
||||
ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
|
||||
ASSERT(zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
|
||||
ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL);
|
||||
}
|
||||
|
||||
@ -2435,7 +2435,7 @@ remove_reference(arc_buf_hdr_t *hdr, kmutex_t *hash_lock, void *tag)
|
||||
* arc_l2c_only counts as a ghost state so we don't need to explicitly
|
||||
* check to prevent usage of the arc_l2c_only list.
|
||||
*/
|
||||
if (((cnt = refcount_remove(&hdr->b_l1hdr.b_refcnt, tag)) == 0) &&
|
||||
if (((cnt = zfs_refcount_remove(&hdr->b_l1hdr.b_refcnt, tag)) == 0) &&
|
||||
(state != arc_anon)) {
|
||||
multilist_insert(state->arcs_list[arc_buf_type(hdr)], hdr);
|
||||
ASSERT3U(hdr->b_l1hdr.b_bufcnt, >, 0);
|
||||
@ -2480,7 +2480,7 @@ arc_buf_info(arc_buf_t *ab, arc_buf_info_t *abi, int state_index)
|
||||
abi->abi_mru_ghost_hits = l1hdr->b_mru_ghost_hits;
|
||||
abi->abi_mfu_hits = l1hdr->b_mfu_hits;
|
||||
abi->abi_mfu_ghost_hits = l1hdr->b_mfu_ghost_hits;
|
||||
abi->abi_holds = refcount_count(&l1hdr->b_refcnt);
|
||||
abi->abi_holds = zfs_refcount_count(&l1hdr->b_refcnt);
|
||||
}
|
||||
|
||||
if (l2hdr) {
|
||||
@ -2516,7 +2516,7 @@ arc_change_state(arc_state_t *new_state, arc_buf_hdr_t *hdr,
|
||||
*/
|
||||
if (HDR_HAS_L1HDR(hdr)) {
|
||||
old_state = hdr->b_l1hdr.b_state;
|
||||
refcnt = refcount_count(&hdr->b_l1hdr.b_refcnt);
|
||||
refcnt = zfs_refcount_count(&hdr->b_l1hdr.b_refcnt);
|
||||
bufcnt = hdr->b_l1hdr.b_bufcnt;
|
||||
update_old = (bufcnt > 0 || hdr->b_l1hdr.b_pabd != NULL ||
|
||||
HDR_HAS_RABD(hdr));
|
||||
@ -2586,7 +2586,7 @@ arc_change_state(arc_state_t *new_state, arc_buf_hdr_t *hdr,
|
||||
* the reference. As a result, we use the arc
|
||||
* header pointer for the reference.
|
||||
*/
|
||||
(void) refcount_add_many(&new_state->arcs_size,
|
||||
(void) zfs_refcount_add_many(&new_state->arcs_size,
|
||||
HDR_GET_LSIZE(hdr), hdr);
|
||||
ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
|
||||
ASSERT(!HDR_HAS_RABD(hdr));
|
||||
@ -2613,18 +2613,21 @@ arc_change_state(arc_state_t *new_state, arc_buf_hdr_t *hdr,
|
||||
if (arc_buf_is_shared(buf))
|
||||
continue;
|
||||
|
||||
(void) refcount_add_many(&new_state->arcs_size,
|
||||
(void) zfs_refcount_add_many(
|
||||
&new_state->arcs_size,
|
||||
arc_buf_size(buf), buf);
|
||||
}
|
||||
ASSERT3U(bufcnt, ==, buffers);
|
||||
|
||||
if (hdr->b_l1hdr.b_pabd != NULL) {
|
||||
(void) refcount_add_many(&new_state->arcs_size,
|
||||
(void) zfs_refcount_add_many(
|
||||
&new_state->arcs_size,
|
||||
arc_hdr_size(hdr), hdr);
|
||||
}
|
||||
|
||||
if (HDR_HAS_RABD(hdr)) {
|
||||
(void) refcount_add_many(&new_state->arcs_size,
|
||||
(void) zfs_refcount_add_many(
|
||||
&new_state->arcs_size,
|
||||
HDR_GET_PSIZE(hdr), hdr);
|
||||
}
|
||||
}
|
||||
@ -2645,7 +2648,7 @@ arc_change_state(arc_state_t *new_state, arc_buf_hdr_t *hdr,
|
||||
* header on the ghost state.
|
||||
*/
|
||||
|
||||
(void) refcount_remove_many(&old_state->arcs_size,
|
||||
(void) zfs_refcount_remove_many(&old_state->arcs_size,
|
||||
HDR_GET_LSIZE(hdr), hdr);
|
||||
} else {
|
||||
uint32_t buffers = 0;
|
||||
@ -2670,7 +2673,7 @@ arc_change_state(arc_state_t *new_state, arc_buf_hdr_t *hdr,
|
||||
if (arc_buf_is_shared(buf))
|
||||
continue;
|
||||
|
||||
(void) refcount_remove_many(
|
||||
(void) zfs_refcount_remove_many(
|
||||
&old_state->arcs_size, arc_buf_size(buf),
|
||||
buf);
|
||||
}
|
||||
@ -2679,13 +2682,13 @@ arc_change_state(arc_state_t *new_state, arc_buf_hdr_t *hdr,
|
||||
HDR_HAS_RABD(hdr));
|
||||
|
||||
if (hdr->b_l1hdr.b_pabd != NULL) {
|
||||
(void) refcount_remove_many(
|
||||
(void) zfs_refcount_remove_many(
|
||||
&old_state->arcs_size, arc_hdr_size(hdr),
|
||||
hdr);
|
||||
}
|
||||
|
||||
if (HDR_HAS_RABD(hdr)) {
|
||||
(void) refcount_remove_many(
|
||||
(void) zfs_refcount_remove_many(
|
||||
&old_state->arcs_size, HDR_GET_PSIZE(hdr),
|
||||
hdr);
|
||||
}
|
||||
@ -2998,7 +3001,7 @@ arc_return_buf(arc_buf_t *buf, void *tag)
|
||||
ASSERT3P(buf->b_data, !=, NULL);
|
||||
ASSERT(HDR_HAS_L1HDR(hdr));
|
||||
(void) zfs_refcount_add(&hdr->b_l1hdr.b_refcnt, tag);
|
||||
(void) refcount_remove(&hdr->b_l1hdr.b_refcnt, arc_onloan_tag);
|
||||
(void) zfs_refcount_remove(&hdr->b_l1hdr.b_refcnt, arc_onloan_tag);
|
||||
|
||||
arc_loaned_bytes_update(-arc_buf_size(buf));
|
||||
}
|
||||
@ -3012,7 +3015,7 @@ arc_loan_inuse_buf(arc_buf_t *buf, void *tag)
|
||||
ASSERT3P(buf->b_data, !=, NULL);
|
||||
ASSERT(HDR_HAS_L1HDR(hdr));
|
||||
(void) zfs_refcount_add(&hdr->b_l1hdr.b_refcnt, arc_onloan_tag);
|
||||
(void) refcount_remove(&hdr->b_l1hdr.b_refcnt, tag);
|
||||
(void) zfs_refcount_remove(&hdr->b_l1hdr.b_refcnt, tag);
|
||||
|
||||
arc_loaned_bytes_update(arc_buf_size(buf));
|
||||
}
|
||||
@ -3039,13 +3042,13 @@ arc_hdr_free_on_write(arc_buf_hdr_t *hdr, boolean_t free_rdata)
|
||||
|
||||
/* protected by hash lock, if in the hash table */
|
||||
if (multilist_link_active(&hdr->b_l1hdr.b_arc_node)) {
|
||||
ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
|
||||
ASSERT(zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
|
||||
ASSERT(state != arc_anon && state != arc_l2c_only);
|
||||
|
||||
(void) refcount_remove_many(&state->arcs_esize[type],
|
||||
(void) zfs_refcount_remove_many(&state->arcs_esize[type],
|
||||
size, hdr);
|
||||
}
|
||||
(void) refcount_remove_many(&state->arcs_size, size, hdr);
|
||||
(void) zfs_refcount_remove_many(&state->arcs_size, size, hdr);
|
||||
if (type == ARC_BUFC_METADATA) {
|
||||
arc_space_return(size, ARC_SPACE_META);
|
||||
} else {
|
||||
@ -3078,7 +3081,8 @@ arc_share_buf(arc_buf_hdr_t *hdr, arc_buf_t *buf)
|
||||
* refcount ownership to the hdr since it always owns
|
||||
* the refcount whenever an arc_buf_t is shared.
|
||||
*/
|
||||
refcount_transfer_ownership(&hdr->b_l1hdr.b_state->arcs_size, buf, hdr);
|
||||
zfs_refcount_transfer_ownership(&hdr->b_l1hdr.b_state->arcs_size,
|
||||
buf, hdr);
|
||||
hdr->b_l1hdr.b_pabd = abd_get_from_buf(buf->b_data, arc_buf_size(buf));
|
||||
abd_take_ownership_of_buf(hdr->b_l1hdr.b_pabd,
|
||||
HDR_ISTYPE_METADATA(hdr));
|
||||
@ -3106,7 +3110,8 @@ arc_unshare_buf(arc_buf_hdr_t *hdr, arc_buf_t *buf)
|
||||
* We are no longer sharing this buffer so we need
|
||||
* to transfer its ownership to the rightful owner.
|
||||
*/
|
||||
refcount_transfer_ownership(&hdr->b_l1hdr.b_state->arcs_size, hdr, buf);
|
||||
zfs_refcount_transfer_ownership(&hdr->b_l1hdr.b_state->arcs_size,
|
||||
hdr, buf);
|
||||
arc_hdr_clear_flags(hdr, ARC_FLAG_SHARED_DATA);
|
||||
abd_release_ownership_of_buf(hdr->b_l1hdr.b_pabd);
|
||||
abd_put(hdr->b_l1hdr.b_pabd);
|
||||
@ -3376,7 +3381,7 @@ arc_hdr_alloc(uint64_t spa, int32_t psize, int32_t lsize,
|
||||
* it references and compressed arc enablement.
|
||||
*/
|
||||
arc_hdr_alloc_abd(hdr, alloc_rdata);
|
||||
ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
|
||||
ASSERT(zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
|
||||
|
||||
return (hdr);
|
||||
}
|
||||
@ -3483,8 +3488,10 @@ arc_hdr_realloc(arc_buf_hdr_t *hdr, kmem_cache_t *old, kmem_cache_t *new)
|
||||
* the wrong pointer address when calling arc_hdr_destroy() later.
|
||||
*/
|
||||
|
||||
(void) refcount_remove_many(&dev->l2ad_alloc, arc_hdr_size(hdr), hdr);
|
||||
(void) refcount_add_many(&dev->l2ad_alloc, arc_hdr_size(nhdr), nhdr);
|
||||
(void) zfs_refcount_remove_many(&dev->l2ad_alloc,
|
||||
arc_hdr_size(hdr), hdr);
|
||||
(void) zfs_refcount_add_many(&dev->l2ad_alloc,
|
||||
arc_hdr_size(nhdr), nhdr);
|
||||
|
||||
buf_discard_identity(hdr);
|
||||
kmem_cache_free(old, hdr);
|
||||
@ -3570,9 +3577,9 @@ arc_hdr_realloc_crypt(arc_buf_hdr_t *hdr, boolean_t need_crypt)
|
||||
mutex_exit(&buf->b_evict_lock);
|
||||
}
|
||||
|
||||
refcount_transfer(&nhdr->b_l1hdr.b_refcnt, &hdr->b_l1hdr.b_refcnt);
|
||||
(void) refcount_remove(&nhdr->b_l1hdr.b_refcnt, FTAG);
|
||||
ASSERT0(refcount_count(&hdr->b_l1hdr.b_refcnt));
|
||||
zfs_refcount_transfer(&nhdr->b_l1hdr.b_refcnt, &hdr->b_l1hdr.b_refcnt);
|
||||
(void) zfs_refcount_remove(&nhdr->b_l1hdr.b_refcnt, FTAG);
|
||||
ASSERT0(zfs_refcount_count(&hdr->b_l1hdr.b_refcnt));
|
||||
|
||||
if (need_crypt) {
|
||||
arc_hdr_set_flags(nhdr, ARC_FLAG_PROTECTED);
|
||||
@ -3770,7 +3777,7 @@ arc_hdr_l2hdr_destroy(arc_buf_hdr_t *hdr)
|
||||
|
||||
vdev_space_update(dev->l2ad_vdev, -psize, 0, 0);
|
||||
|
||||
(void) refcount_remove_many(&dev->l2ad_alloc, psize, hdr);
|
||||
(void) zfs_refcount_remove_many(&dev->l2ad_alloc, psize, hdr);
|
||||
arc_hdr_clear_flags(hdr, ARC_FLAG_HAS_L2HDR);
|
||||
}
|
||||
|
||||
@ -3780,7 +3787,7 @@ arc_hdr_destroy(arc_buf_hdr_t *hdr)
|
||||
if (HDR_HAS_L1HDR(hdr)) {
|
||||
ASSERT(hdr->b_l1hdr.b_buf == NULL ||
|
||||
hdr->b_l1hdr.b_bufcnt > 0);
|
||||
ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
|
||||
ASSERT(zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
|
||||
ASSERT3P(hdr->b_l1hdr.b_state, ==, arc_anon);
|
||||
}
|
||||
ASSERT(!HDR_IO_IN_PROGRESS(hdr));
|
||||
@ -3945,7 +3952,7 @@ arc_evict_hdr(arc_buf_hdr_t *hdr, kmutex_t *hash_lock)
|
||||
return (bytes_evicted);
|
||||
}
|
||||
|
||||
ASSERT0(refcount_count(&hdr->b_l1hdr.b_refcnt));
|
||||
ASSERT0(zfs_refcount_count(&hdr->b_l1hdr.b_refcnt));
|
||||
while (hdr->b_l1hdr.b_buf) {
|
||||
arc_buf_t *buf = hdr->b_l1hdr.b_buf;
|
||||
if (!mutex_tryenter(&buf->b_evict_lock)) {
|
||||
@ -4264,7 +4271,7 @@ arc_flush_state(arc_state_t *state, uint64_t spa, arc_buf_contents_t type,
|
||||
{
|
||||
uint64_t evicted = 0;
|
||||
|
||||
while (refcount_count(&state->arcs_esize[type]) != 0) {
|
||||
while (zfs_refcount_count(&state->arcs_esize[type]) != 0) {
|
||||
evicted += arc_evict_state(state, spa, ARC_EVICT_ALL, type);
|
||||
|
||||
if (!retry)
|
||||
@ -4287,7 +4294,7 @@ arc_prune_task(void *ptr)
|
||||
if (func != NULL)
|
||||
func(ap->p_adjust, ap->p_private);
|
||||
|
||||
refcount_remove(&ap->p_refcnt, func);
|
||||
zfs_refcount_remove(&ap->p_refcnt, func);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -4310,14 +4317,14 @@ arc_prune_async(int64_t adjust)
|
||||
for (ap = list_head(&arc_prune_list); ap != NULL;
|
||||
ap = list_next(&arc_prune_list, ap)) {
|
||||
|
||||
if (refcount_count(&ap->p_refcnt) >= 2)
|
||||
if (zfs_refcount_count(&ap->p_refcnt) >= 2)
|
||||
continue;
|
||||
|
||||
zfs_refcount_add(&ap->p_refcnt, ap->p_pfunc);
|
||||
ap->p_adjust = adjust;
|
||||
if (taskq_dispatch(arc_prune_taskq, arc_prune_task,
|
||||
ap, TQ_SLEEP) == TASKQID_INVALID) {
|
||||
refcount_remove(&ap->p_refcnt, ap->p_pfunc);
|
||||
zfs_refcount_remove(&ap->p_refcnt, ap->p_pfunc);
|
||||
continue;
|
||||
}
|
||||
ARCSTAT_BUMP(arcstat_prune);
|
||||
@ -4339,8 +4346,9 @@ arc_adjust_impl(arc_state_t *state, uint64_t spa, int64_t bytes,
|
||||
{
|
||||
int64_t delta;
|
||||
|
||||
if (bytes > 0 && refcount_count(&state->arcs_esize[type]) > 0) {
|
||||
delta = MIN(refcount_count(&state->arcs_esize[type]), bytes);
|
||||
if (bytes > 0 && zfs_refcount_count(&state->arcs_esize[type]) > 0) {
|
||||
delta = MIN(zfs_refcount_count(&state->arcs_esize[type]),
|
||||
bytes);
|
||||
return (arc_evict_state(state, spa, delta, type));
|
||||
}
|
||||
|
||||
@ -4383,8 +4391,9 @@ arc_adjust_meta_balanced(uint64_t meta_used)
|
||||
*/
|
||||
adjustmnt = meta_used - arc_meta_limit;
|
||||
|
||||
if (adjustmnt > 0 && refcount_count(&arc_mru->arcs_esize[type]) > 0) {
|
||||
delta = MIN(refcount_count(&arc_mru->arcs_esize[type]),
|
||||
if (adjustmnt > 0 &&
|
||||
zfs_refcount_count(&arc_mru->arcs_esize[type]) > 0) {
|
||||
delta = MIN(zfs_refcount_count(&arc_mru->arcs_esize[type]),
|
||||
adjustmnt);
|
||||
total_evicted += arc_adjust_impl(arc_mru, 0, delta, type);
|
||||
adjustmnt -= delta;
|
||||
@ -4400,8 +4409,9 @@ arc_adjust_meta_balanced(uint64_t meta_used)
|
||||
* simply decrement the amount of data evicted from the MRU.
|
||||
*/
|
||||
|
||||
if (adjustmnt > 0 && refcount_count(&arc_mfu->arcs_esize[type]) > 0) {
|
||||
delta = MIN(refcount_count(&arc_mfu->arcs_esize[type]),
|
||||
if (adjustmnt > 0 &&
|
||||
zfs_refcount_count(&arc_mfu->arcs_esize[type]) > 0) {
|
||||
delta = MIN(zfs_refcount_count(&arc_mfu->arcs_esize[type]),
|
||||
adjustmnt);
|
||||
total_evicted += arc_adjust_impl(arc_mfu, 0, delta, type);
|
||||
}
|
||||
@ -4409,17 +4419,17 @@ arc_adjust_meta_balanced(uint64_t meta_used)
|
||||
adjustmnt = meta_used - arc_meta_limit;
|
||||
|
||||
if (adjustmnt > 0 &&
|
||||
refcount_count(&arc_mru_ghost->arcs_esize[type]) > 0) {
|
||||
zfs_refcount_count(&arc_mru_ghost->arcs_esize[type]) > 0) {
|
||||
delta = MIN(adjustmnt,
|
||||
refcount_count(&arc_mru_ghost->arcs_esize[type]));
|
||||
zfs_refcount_count(&arc_mru_ghost->arcs_esize[type]));
|
||||
total_evicted += arc_adjust_impl(arc_mru_ghost, 0, delta, type);
|
||||
adjustmnt -= delta;
|
||||
}
|
||||
|
||||
if (adjustmnt > 0 &&
|
||||
refcount_count(&arc_mfu_ghost->arcs_esize[type]) > 0) {
|
||||
zfs_refcount_count(&arc_mfu_ghost->arcs_esize[type]) > 0) {
|
||||
delta = MIN(adjustmnt,
|
||||
refcount_count(&arc_mfu_ghost->arcs_esize[type]));
|
||||
zfs_refcount_count(&arc_mfu_ghost->arcs_esize[type]));
|
||||
total_evicted += arc_adjust_impl(arc_mfu_ghost, 0, delta, type);
|
||||
}
|
||||
|
||||
@ -4468,8 +4478,8 @@ arc_adjust_meta_only(uint64_t meta_used)
|
||||
* evict some from the MRU here, and some from the MFU below.
|
||||
*/
|
||||
target = MIN((int64_t)(meta_used - arc_meta_limit),
|
||||
(int64_t)(refcount_count(&arc_anon->arcs_size) +
|
||||
refcount_count(&arc_mru->arcs_size) - arc_p));
|
||||
(int64_t)(zfs_refcount_count(&arc_anon->arcs_size) +
|
||||
zfs_refcount_count(&arc_mru->arcs_size) - arc_p));
|
||||
|
||||
total_evicted += arc_adjust_impl(arc_mru, 0, target, ARC_BUFC_METADATA);
|
||||
|
||||
@ -4479,7 +4489,7 @@ arc_adjust_meta_only(uint64_t meta_used)
|
||||
* space allotted to the MFU (which is defined as arc_c - arc_p).
|
||||
*/
|
||||
target = MIN((int64_t)(meta_used - arc_meta_limit),
|
||||
(int64_t)(refcount_count(&arc_mfu->arcs_size) -
|
||||
(int64_t)(zfs_refcount_count(&arc_mfu->arcs_size) -
|
||||
(arc_c - arc_p)));
|
||||
|
||||
total_evicted += arc_adjust_impl(arc_mfu, 0, target, ARC_BUFC_METADATA);
|
||||
@ -4600,8 +4610,8 @@ arc_adjust(void)
|
||||
* arc_p here, and then evict more from the MFU below.
|
||||
*/
|
||||
target = MIN((int64_t)(asize - arc_c),
|
||||
(int64_t)(refcount_count(&arc_anon->arcs_size) +
|
||||
refcount_count(&arc_mru->arcs_size) + ameta - arc_p));
|
||||
(int64_t)(zfs_refcount_count(&arc_anon->arcs_size) +
|
||||
zfs_refcount_count(&arc_mru->arcs_size) + ameta - arc_p));
|
||||
|
||||
/*
|
||||
* If we're below arc_meta_min, always prefer to evict data.
|
||||
@ -4692,8 +4702,8 @@ arc_adjust(void)
|
||||
* cache. The following logic enforces these limits on the ghost
|
||||
* caches, and evicts from them as needed.
|
||||
*/
|
||||
target = refcount_count(&arc_mru->arcs_size) +
|
||||
refcount_count(&arc_mru_ghost->arcs_size) - arc_c;
|
||||
target = zfs_refcount_count(&arc_mru->arcs_size) +
|
||||
zfs_refcount_count(&arc_mru_ghost->arcs_size) - arc_c;
|
||||
|
||||
bytes = arc_adjust_impl(arc_mru_ghost, 0, target, ARC_BUFC_DATA);
|
||||
total_evicted += bytes;
|
||||
@ -4711,8 +4721,8 @@ arc_adjust(void)
|
||||
* mru + mfu + mru ghost + mfu ghost <= 2 * arc_c
|
||||
* mru ghost + mfu ghost <= arc_c
|
||||
*/
|
||||
target = refcount_count(&arc_mru_ghost->arcs_size) +
|
||||
refcount_count(&arc_mfu_ghost->arcs_size) - arc_c;
|
||||
target = zfs_refcount_count(&arc_mru_ghost->arcs_size) +
|
||||
zfs_refcount_count(&arc_mfu_ghost->arcs_size) - arc_c;
|
||||
|
||||
bytes = arc_adjust_impl(arc_mfu_ghost, 0, target, ARC_BUFC_DATA);
|
||||
total_evicted += bytes;
|
||||
@ -5216,10 +5226,10 @@ arc_evictable_memory(void)
|
||||
{
|
||||
int64_t asize = aggsum_value(&arc_size);
|
||||
uint64_t arc_clean =
|
||||
refcount_count(&arc_mru->arcs_esize[ARC_BUFC_DATA]) +
|
||||
refcount_count(&arc_mru->arcs_esize[ARC_BUFC_METADATA]) +
|
||||
refcount_count(&arc_mfu->arcs_esize[ARC_BUFC_DATA]) +
|
||||
refcount_count(&arc_mfu->arcs_esize[ARC_BUFC_METADATA]);
|
||||
zfs_refcount_count(&arc_mru->arcs_esize[ARC_BUFC_DATA]) +
|
||||
zfs_refcount_count(&arc_mru->arcs_esize[ARC_BUFC_METADATA]) +
|
||||
zfs_refcount_count(&arc_mfu->arcs_esize[ARC_BUFC_DATA]) +
|
||||
zfs_refcount_count(&arc_mfu->arcs_esize[ARC_BUFC_METADATA]);
|
||||
uint64_t arc_dirty = MAX((int64_t)asize - (int64_t)arc_clean, 0);
|
||||
|
||||
/*
|
||||
@ -5326,8 +5336,8 @@ arc_adapt(int bytes, arc_state_t *state)
|
||||
{
|
||||
int mult;
|
||||
uint64_t arc_p_min = (arc_c >> arc_p_min_shift);
|
||||
int64_t mrug_size = refcount_count(&arc_mru_ghost->arcs_size);
|
||||
int64_t mfug_size = refcount_count(&arc_mfu_ghost->arcs_size);
|
||||
int64_t mrug_size = zfs_refcount_count(&arc_mru_ghost->arcs_size);
|
||||
int64_t mfug_size = zfs_refcount_count(&arc_mfu_ghost->arcs_size);
|
||||
|
||||
if (state == arc_l2c_only)
|
||||
return;
|
||||
@ -5502,7 +5512,7 @@ arc_get_data_impl(arc_buf_hdr_t *hdr, uint64_t size, void *tag)
|
||||
*/
|
||||
if (!GHOST_STATE(state)) {
|
||||
|
||||
(void) refcount_add_many(&state->arcs_size, size, tag);
|
||||
(void) zfs_refcount_add_many(&state->arcs_size, size, tag);
|
||||
|
||||
/*
|
||||
* If this is reached via arc_read, the link is
|
||||
@ -5514,8 +5524,8 @@ arc_get_data_impl(arc_buf_hdr_t *hdr, uint64_t size, void *tag)
|
||||
* trying to [add|remove]_reference it.
|
||||
*/
|
||||
if (multilist_link_active(&hdr->b_l1hdr.b_arc_node)) {
|
||||
ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
|
||||
(void) refcount_add_many(&state->arcs_esize[type],
|
||||
ASSERT(zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
|
||||
(void) zfs_refcount_add_many(&state->arcs_esize[type],
|
||||
size, tag);
|
||||
}
|
||||
|
||||
@ -5525,8 +5535,8 @@ arc_get_data_impl(arc_buf_hdr_t *hdr, uint64_t size, void *tag)
|
||||
*/
|
||||
if (aggsum_compare(&arc_size, arc_c) < 0 &&
|
||||
hdr->b_l1hdr.b_state == arc_anon &&
|
||||
(refcount_count(&arc_anon->arcs_size) +
|
||||
refcount_count(&arc_mru->arcs_size) > arc_p))
|
||||
(zfs_refcount_count(&arc_anon->arcs_size) +
|
||||
zfs_refcount_count(&arc_mru->arcs_size) > arc_p))
|
||||
arc_p = MIN(arc_c, arc_p + size);
|
||||
}
|
||||
}
|
||||
@ -5563,13 +5573,13 @@ arc_free_data_impl(arc_buf_hdr_t *hdr, uint64_t size, void *tag)
|
||||
|
||||
/* protected by hash lock, if in the hash table */
|
||||
if (multilist_link_active(&hdr->b_l1hdr.b_arc_node)) {
|
||||
ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
|
||||
ASSERT(zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
|
||||
ASSERT(state != arc_anon && state != arc_l2c_only);
|
||||
|
||||
(void) refcount_remove_many(&state->arcs_esize[type],
|
||||
(void) zfs_refcount_remove_many(&state->arcs_esize[type],
|
||||
size, tag);
|
||||
}
|
||||
(void) refcount_remove_many(&state->arcs_size, size, tag);
|
||||
(void) zfs_refcount_remove_many(&state->arcs_size, size, tag);
|
||||
|
||||
VERIFY3U(hdr->b_type, ==, type);
|
||||
if (type == ARC_BUFC_METADATA) {
|
||||
@ -5616,7 +5626,7 @@ arc_access(arc_buf_hdr_t *hdr, kmutex_t *hash_lock)
|
||||
* another prefetch (to make it less likely to be evicted).
|
||||
*/
|
||||
if (HDR_PREFETCH(hdr) || HDR_PRESCIENT_PREFETCH(hdr)) {
|
||||
if (refcount_count(&hdr->b_l1hdr.b_refcnt) == 0) {
|
||||
if (zfs_refcount_count(&hdr->b_l1hdr.b_refcnt) == 0) {
|
||||
/* link protected by hash lock */
|
||||
ASSERT(multilist_link_active(
|
||||
&hdr->b_l1hdr.b_arc_node));
|
||||
@ -5659,7 +5669,7 @@ arc_access(arc_buf_hdr_t *hdr, kmutex_t *hash_lock)
|
||||
|
||||
if (HDR_PREFETCH(hdr) || HDR_PRESCIENT_PREFETCH(hdr)) {
|
||||
new_state = arc_mru;
|
||||
if (refcount_count(&hdr->b_l1hdr.b_refcnt) > 0) {
|
||||
if (zfs_refcount_count(&hdr->b_l1hdr.b_refcnt) > 0) {
|
||||
arc_hdr_clear_flags(hdr,
|
||||
ARC_FLAG_PREFETCH |
|
||||
ARC_FLAG_PRESCIENT_PREFETCH);
|
||||
@ -5979,7 +5989,7 @@ arc_read_done(zio_t *zio)
|
||||
if (callback_cnt == 0)
|
||||
ASSERT(hdr->b_l1hdr.b_pabd != NULL || HDR_HAS_RABD(hdr));
|
||||
|
||||
ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt) ||
|
||||
ASSERT(zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt) ||
|
||||
callback_list != NULL);
|
||||
|
||||
if (zio->io_error == 0) {
|
||||
@ -5990,7 +6000,7 @@ arc_read_done(zio_t *zio)
|
||||
arc_change_state(arc_anon, hdr, hash_lock);
|
||||
if (HDR_IN_HASH_TABLE(hdr))
|
||||
buf_hash_remove(hdr);
|
||||
freeable = refcount_is_zero(&hdr->b_l1hdr.b_refcnt);
|
||||
freeable = zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -6010,7 +6020,7 @@ arc_read_done(zio_t *zio)
|
||||
* in the cache).
|
||||
*/
|
||||
ASSERT3P(hdr->b_l1hdr.b_state, ==, arc_anon);
|
||||
freeable = refcount_is_zero(&hdr->b_l1hdr.b_refcnt);
|
||||
freeable = zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt);
|
||||
}
|
||||
|
||||
/* execute each callback and free its structure */
|
||||
@ -6212,7 +6222,7 @@ arc_read(zio_t *pio, spa_t *spa, const blkptr_t *bp,
|
||||
ASSERT((zio_flags & ZIO_FLAG_SPECULATIVE) ||
|
||||
rc != EACCES);
|
||||
} else if (*arc_flags & ARC_FLAG_PREFETCH &&
|
||||
refcount_count(&hdr->b_l1hdr.b_refcnt) == 0) {
|
||||
zfs_refcount_count(&hdr->b_l1hdr.b_refcnt) == 0) {
|
||||
arc_hdr_set_flags(hdr, ARC_FLAG_PREFETCH);
|
||||
}
|
||||
DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr);
|
||||
@ -6284,7 +6294,8 @@ arc_read(zio_t *pio, spa_t *spa, const blkptr_t *bp,
|
||||
ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
|
||||
ASSERT(!HDR_HAS_RABD(hdr));
|
||||
ASSERT(!HDR_IO_IN_PROGRESS(hdr));
|
||||
ASSERT0(refcount_count(&hdr->b_l1hdr.b_refcnt));
|
||||
ASSERT0(zfs_refcount_count(
|
||||
&hdr->b_l1hdr.b_refcnt));
|
||||
ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL);
|
||||
ASSERT3P(hdr->b_l1hdr.b_freeze_cksum, ==, NULL);
|
||||
} else if (HDR_IO_IN_PROGRESS(hdr)) {
|
||||
@ -6342,7 +6353,7 @@ arc_read(zio_t *pio, spa_t *spa, const blkptr_t *bp,
|
||||
}
|
||||
|
||||
if (*arc_flags & ARC_FLAG_PREFETCH &&
|
||||
refcount_is_zero(&hdr->b_l1hdr.b_refcnt))
|
||||
zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt))
|
||||
arc_hdr_set_flags(hdr, ARC_FLAG_PREFETCH);
|
||||
if (*arc_flags & ARC_FLAG_PRESCIENT_PREFETCH)
|
||||
arc_hdr_set_flags(hdr, ARC_FLAG_PRESCIENT_PREFETCH);
|
||||
@ -6533,7 +6544,7 @@ arc_add_prune_callback(arc_prune_func_t *func, void *private)
|
||||
p->p_pfunc = func;
|
||||
p->p_private = private;
|
||||
list_link_init(&p->p_node);
|
||||
refcount_create(&p->p_refcnt);
|
||||
zfs_refcount_create(&p->p_refcnt);
|
||||
|
||||
mutex_enter(&arc_prune_mtx);
|
||||
zfs_refcount_add(&p->p_refcnt, &arc_prune_list);
|
||||
@ -6549,15 +6560,15 @@ arc_remove_prune_callback(arc_prune_t *p)
|
||||
boolean_t wait = B_FALSE;
|
||||
mutex_enter(&arc_prune_mtx);
|
||||
list_remove(&arc_prune_list, p);
|
||||
if (refcount_remove(&p->p_refcnt, &arc_prune_list) > 0)
|
||||
if (zfs_refcount_remove(&p->p_refcnt, &arc_prune_list) > 0)
|
||||
wait = B_TRUE;
|
||||
mutex_exit(&arc_prune_mtx);
|
||||
|
||||
/* wait for arc_prune_task to finish */
|
||||
if (wait)
|
||||
taskq_wait_outstanding(arc_prune_taskq, 0);
|
||||
ASSERT0(refcount_count(&p->p_refcnt));
|
||||
refcount_destroy(&p->p_refcnt);
|
||||
ASSERT0(zfs_refcount_count(&p->p_refcnt));
|
||||
zfs_refcount_destroy(&p->p_refcnt);
|
||||
kmem_free(p, sizeof (*p));
|
||||
}
|
||||
|
||||
@ -6600,7 +6611,7 @@ arc_freed(spa_t *spa, const blkptr_t *bp)
|
||||
* this hdr, then we don't destroy the hdr.
|
||||
*/
|
||||
if (!HDR_HAS_L1HDR(hdr) || (!HDR_IO_IN_PROGRESS(hdr) &&
|
||||
refcount_is_zero(&hdr->b_l1hdr.b_refcnt))) {
|
||||
zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt))) {
|
||||
arc_change_state(arc_anon, hdr, hash_lock);
|
||||
arc_hdr_destroy(hdr);
|
||||
mutex_exit(hash_lock);
|
||||
@ -6644,7 +6655,7 @@ arc_release(arc_buf_t *buf, void *tag)
|
||||
ASSERT(HDR_EMPTY(hdr));
|
||||
|
||||
ASSERT3U(hdr->b_l1hdr.b_bufcnt, ==, 1);
|
||||
ASSERT3S(refcount_count(&hdr->b_l1hdr.b_refcnt), ==, 1);
|
||||
ASSERT3S(zfs_refcount_count(&hdr->b_l1hdr.b_refcnt), ==, 1);
|
||||
ASSERT(!list_link_active(&hdr->b_l1hdr.b_arc_node));
|
||||
|
||||
hdr->b_l1hdr.b_arc_access = 0;
|
||||
@ -6672,7 +6683,7 @@ arc_release(arc_buf_t *buf, void *tag)
|
||||
ASSERT3P(state, !=, arc_anon);
|
||||
|
||||
/* this buffer is not on any list */
|
||||
ASSERT3S(refcount_count(&hdr->b_l1hdr.b_refcnt), >, 0);
|
||||
ASSERT3S(zfs_refcount_count(&hdr->b_l1hdr.b_refcnt), >, 0);
|
||||
|
||||
if (HDR_HAS_L2HDR(hdr)) {
|
||||
mutex_enter(&hdr->b_l2hdr.b_dev->l2ad_mtx);
|
||||
@ -6765,12 +6776,13 @@ arc_release(arc_buf_t *buf, void *tag)
|
||||
ASSERT(hdr->b_l1hdr.b_pabd != NULL || HDR_HAS_RABD(hdr));
|
||||
ASSERT3P(state, !=, arc_l2c_only);
|
||||
|
||||
(void) refcount_remove_many(&state->arcs_size,
|
||||
(void) zfs_refcount_remove_many(&state->arcs_size,
|
||||
arc_buf_size(buf), buf);
|
||||
|
||||
if (refcount_is_zero(&hdr->b_l1hdr.b_refcnt)) {
|
||||
if (zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt)) {
|
||||
ASSERT3P(state, !=, arc_l2c_only);
|
||||
(void) refcount_remove_many(&state->arcs_esize[type],
|
||||
(void) zfs_refcount_remove_many(
|
||||
&state->arcs_esize[type],
|
||||
arc_buf_size(buf), buf);
|
||||
}
|
||||
|
||||
@ -6795,7 +6807,7 @@ arc_release(arc_buf_t *buf, void *tag)
|
||||
compress, type, HDR_HAS_RABD(hdr));
|
||||
ASSERT3P(nhdr->b_l1hdr.b_buf, ==, NULL);
|
||||
ASSERT0(nhdr->b_l1hdr.b_bufcnt);
|
||||
ASSERT0(refcount_count(&nhdr->b_l1hdr.b_refcnt));
|
||||
ASSERT0(zfs_refcount_count(&nhdr->b_l1hdr.b_refcnt));
|
||||
VERIFY3U(nhdr->b_type, ==, type);
|
||||
ASSERT(!HDR_SHARED_DATA(nhdr));
|
||||
|
||||
@ -6812,11 +6824,11 @@ arc_release(arc_buf_t *buf, void *tag)
|
||||
buf->b_hdr = nhdr;
|
||||
|
||||
mutex_exit(&buf->b_evict_lock);
|
||||
(void) refcount_add_many(&arc_anon->arcs_size,
|
||||
(void) zfs_refcount_add_many(&arc_anon->arcs_size,
|
||||
HDR_GET_LSIZE(nhdr), buf);
|
||||
} else {
|
||||
mutex_exit(&buf->b_evict_lock);
|
||||
ASSERT(refcount_count(&hdr->b_l1hdr.b_refcnt) == 1);
|
||||
ASSERT(zfs_refcount_count(&hdr->b_l1hdr.b_refcnt) == 1);
|
||||
/* protected by hash lock, or hdr is on arc_anon */
|
||||
ASSERT(!multilist_link_active(&hdr->b_l1hdr.b_arc_node));
|
||||
ASSERT(!HDR_IO_IN_PROGRESS(hdr));
|
||||
@ -6853,7 +6865,7 @@ arc_referenced(arc_buf_t *buf)
|
||||
int referenced;
|
||||
|
||||
mutex_enter(&buf->b_evict_lock);
|
||||
referenced = (refcount_count(&buf->b_hdr->b_l1hdr.b_refcnt));
|
||||
referenced = (zfs_refcount_count(&buf->b_hdr->b_l1hdr.b_refcnt));
|
||||
mutex_exit(&buf->b_evict_lock);
|
||||
return (referenced);
|
||||
}
|
||||
@ -6870,7 +6882,7 @@ arc_write_ready(zio_t *zio)
|
||||
fstrans_cookie_t cookie = spl_fstrans_mark();
|
||||
|
||||
ASSERT(HDR_HAS_L1HDR(hdr));
|
||||
ASSERT(!refcount_is_zero(&buf->b_hdr->b_l1hdr.b_refcnt));
|
||||
ASSERT(!zfs_refcount_is_zero(&buf->b_hdr->b_l1hdr.b_refcnt));
|
||||
ASSERT(hdr->b_l1hdr.b_bufcnt > 0);
|
||||
|
||||
/*
|
||||
@ -7081,7 +7093,7 @@ arc_write_done(zio_t *zio)
|
||||
if (!BP_EQUAL(&zio->io_bp_orig, zio->io_bp))
|
||||
panic("bad overwrite, hdr=%p exists=%p",
|
||||
(void *)hdr, (void *)exists);
|
||||
ASSERT(refcount_is_zero(
|
||||
ASSERT(zfs_refcount_is_zero(
|
||||
&exists->b_l1hdr.b_refcnt));
|
||||
arc_change_state(arc_anon, exists, hash_lock);
|
||||
mutex_exit(hash_lock);
|
||||
@ -7111,7 +7123,7 @@ arc_write_done(zio_t *zio)
|
||||
arc_hdr_clear_flags(hdr, ARC_FLAG_IO_IN_PROGRESS);
|
||||
}
|
||||
|
||||
ASSERT(!refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
|
||||
ASSERT(!zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
|
||||
callback->awcb_done(zio, buf, callback->awcb_private);
|
||||
|
||||
abd_put(zio->io_abd);
|
||||
@ -7289,7 +7301,7 @@ arc_tempreserve_space(spa_t *spa, uint64_t reserve, uint64_t txg)
|
||||
/* assert that it has not wrapped around */
|
||||
ASSERT3S(atomic_add_64_nv(&arc_loaned_bytes, 0), >=, 0);
|
||||
|
||||
anon_size = MAX((int64_t)(refcount_count(&arc_anon->arcs_size) -
|
||||
anon_size = MAX((int64_t)(zfs_refcount_count(&arc_anon->arcs_size) -
|
||||
arc_loaned_bytes), 0);
|
||||
|
||||
/*
|
||||
@ -7324,10 +7336,10 @@ arc_tempreserve_space(spa_t *spa, uint64_t reserve, uint64_t txg)
|
||||
anon_size > arc_c * zfs_arc_anon_limit_percent / 100 &&
|
||||
spa_dirty_anon > anon_size * zfs_arc_pool_dirty_percent / 100) {
|
||||
#ifdef ZFS_DEBUG
|
||||
uint64_t meta_esize =
|
||||
refcount_count(&arc_anon->arcs_esize[ARC_BUFC_METADATA]);
|
||||
uint64_t meta_esize = zfs_refcount_count(
|
||||
&arc_anon->arcs_esize[ARC_BUFC_METADATA]);
|
||||
uint64_t data_esize =
|
||||
refcount_count(&arc_anon->arcs_esize[ARC_BUFC_DATA]);
|
||||
zfs_refcount_count(&arc_anon->arcs_esize[ARC_BUFC_DATA]);
|
||||
dprintf("failing, arc_tempreserve=%lluK anon_meta=%lluK "
|
||||
"anon_data=%lluK tempreserve=%lluK arc_c=%lluK\n",
|
||||
arc_tempreserve >> 10, meta_esize >> 10,
|
||||
@ -7344,11 +7356,11 @@ static void
|
||||
arc_kstat_update_state(arc_state_t *state, kstat_named_t *size,
|
||||
kstat_named_t *evict_data, kstat_named_t *evict_metadata)
|
||||
{
|
||||
size->value.ui64 = refcount_count(&state->arcs_size);
|
||||
size->value.ui64 = zfs_refcount_count(&state->arcs_size);
|
||||
evict_data->value.ui64 =
|
||||
refcount_count(&state->arcs_esize[ARC_BUFC_DATA]);
|
||||
zfs_refcount_count(&state->arcs_esize[ARC_BUFC_DATA]);
|
||||
evict_metadata->value.ui64 =
|
||||
refcount_count(&state->arcs_esize[ARC_BUFC_METADATA]);
|
||||
zfs_refcount_count(&state->arcs_esize[ARC_BUFC_METADATA]);
|
||||
}
|
||||
|
||||
static int
|
||||
@ -7582,25 +7594,25 @@ arc_state_init(void)
|
||||
offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node),
|
||||
arc_state_multilist_index_func);
|
||||
|
||||
refcount_create(&arc_anon->arcs_esize[ARC_BUFC_METADATA]);
|
||||
refcount_create(&arc_anon->arcs_esize[ARC_BUFC_DATA]);
|
||||
refcount_create(&arc_mru->arcs_esize[ARC_BUFC_METADATA]);
|
||||
refcount_create(&arc_mru->arcs_esize[ARC_BUFC_DATA]);
|
||||
refcount_create(&arc_mru_ghost->arcs_esize[ARC_BUFC_METADATA]);
|
||||
refcount_create(&arc_mru_ghost->arcs_esize[ARC_BUFC_DATA]);
|
||||
refcount_create(&arc_mfu->arcs_esize[ARC_BUFC_METADATA]);
|
||||
refcount_create(&arc_mfu->arcs_esize[ARC_BUFC_DATA]);
|
||||
refcount_create(&arc_mfu_ghost->arcs_esize[ARC_BUFC_METADATA]);
|
||||
refcount_create(&arc_mfu_ghost->arcs_esize[ARC_BUFC_DATA]);
|
||||
refcount_create(&arc_l2c_only->arcs_esize[ARC_BUFC_METADATA]);
|
||||
refcount_create(&arc_l2c_only->arcs_esize[ARC_BUFC_DATA]);
|
||||
zfs_refcount_create(&arc_anon->arcs_esize[ARC_BUFC_METADATA]);
|
||||
zfs_refcount_create(&arc_anon->arcs_esize[ARC_BUFC_DATA]);
|
||||
zfs_refcount_create(&arc_mru->arcs_esize[ARC_BUFC_METADATA]);
|
||||
zfs_refcount_create(&arc_mru->arcs_esize[ARC_BUFC_DATA]);
|
||||
zfs_refcount_create(&arc_mru_ghost->arcs_esize[ARC_BUFC_METADATA]);
|
||||
zfs_refcount_create(&arc_mru_ghost->arcs_esize[ARC_BUFC_DATA]);
|
||||
zfs_refcount_create(&arc_mfu->arcs_esize[ARC_BUFC_METADATA]);
|
||||
zfs_refcount_create(&arc_mfu->arcs_esize[ARC_BUFC_DATA]);
|
||||
zfs_refcount_create(&arc_mfu_ghost->arcs_esize[ARC_BUFC_METADATA]);
|
||||
zfs_refcount_create(&arc_mfu_ghost->arcs_esize[ARC_BUFC_DATA]);
|
||||
zfs_refcount_create(&arc_l2c_only->arcs_esize[ARC_BUFC_METADATA]);
|
||||
zfs_refcount_create(&arc_l2c_only->arcs_esize[ARC_BUFC_DATA]);
|
||||
|
||||
refcount_create(&arc_anon->arcs_size);
|
||||
refcount_create(&arc_mru->arcs_size);
|
||||
refcount_create(&arc_mru_ghost->arcs_size);
|
||||
refcount_create(&arc_mfu->arcs_size);
|
||||
refcount_create(&arc_mfu_ghost->arcs_size);
|
||||
refcount_create(&arc_l2c_only->arcs_size);
|
||||
zfs_refcount_create(&arc_anon->arcs_size);
|
||||
zfs_refcount_create(&arc_mru->arcs_size);
|
||||
zfs_refcount_create(&arc_mru_ghost->arcs_size);
|
||||
zfs_refcount_create(&arc_mfu->arcs_size);
|
||||
zfs_refcount_create(&arc_mfu_ghost->arcs_size);
|
||||
zfs_refcount_create(&arc_l2c_only->arcs_size);
|
||||
|
||||
aggsum_init(&arc_meta_used, 0);
|
||||
aggsum_init(&arc_size, 0);
|
||||
@ -7623,25 +7635,25 @@ arc_state_init(void)
|
||||
static void
|
||||
arc_state_fini(void)
|
||||
{
|
||||
refcount_destroy(&arc_anon->arcs_esize[ARC_BUFC_METADATA]);
|
||||
refcount_destroy(&arc_anon->arcs_esize[ARC_BUFC_DATA]);
|
||||
refcount_destroy(&arc_mru->arcs_esize[ARC_BUFC_METADATA]);
|
||||
refcount_destroy(&arc_mru->arcs_esize[ARC_BUFC_DATA]);
|
||||
refcount_destroy(&arc_mru_ghost->arcs_esize[ARC_BUFC_METADATA]);
|
||||
refcount_destroy(&arc_mru_ghost->arcs_esize[ARC_BUFC_DATA]);
|
||||
refcount_destroy(&arc_mfu->arcs_esize[ARC_BUFC_METADATA]);
|
||||
refcount_destroy(&arc_mfu->arcs_esize[ARC_BUFC_DATA]);
|
||||
refcount_destroy(&arc_mfu_ghost->arcs_esize[ARC_BUFC_METADATA]);
|
||||
refcount_destroy(&arc_mfu_ghost->arcs_esize[ARC_BUFC_DATA]);
|
||||
refcount_destroy(&arc_l2c_only->arcs_esize[ARC_BUFC_METADATA]);
|
||||
refcount_destroy(&arc_l2c_only->arcs_esize[ARC_BUFC_DATA]);
|
||||
zfs_refcount_destroy(&arc_anon->arcs_esize[ARC_BUFC_METADATA]);
|
||||
zfs_refcount_destroy(&arc_anon->arcs_esize[ARC_BUFC_DATA]);
|
||||
zfs_refcount_destroy(&arc_mru->arcs_esize[ARC_BUFC_METADATA]);
|
||||
zfs_refcount_destroy(&arc_mru->arcs_esize[ARC_BUFC_DATA]);
|
||||
zfs_refcount_destroy(&arc_mru_ghost->arcs_esize[ARC_BUFC_METADATA]);
|
||||
zfs_refcount_destroy(&arc_mru_ghost->arcs_esize[ARC_BUFC_DATA]);
|
||||
zfs_refcount_destroy(&arc_mfu->arcs_esize[ARC_BUFC_METADATA]);
|
||||
zfs_refcount_destroy(&arc_mfu->arcs_esize[ARC_BUFC_DATA]);
|
||||
zfs_refcount_destroy(&arc_mfu_ghost->arcs_esize[ARC_BUFC_METADATA]);
|
||||
zfs_refcount_destroy(&arc_mfu_ghost->arcs_esize[ARC_BUFC_DATA]);
|
||||
zfs_refcount_destroy(&arc_l2c_only->arcs_esize[ARC_BUFC_METADATA]);
|
||||
zfs_refcount_destroy(&arc_l2c_only->arcs_esize[ARC_BUFC_DATA]);
|
||||
|
||||
refcount_destroy(&arc_anon->arcs_size);
|
||||
refcount_destroy(&arc_mru->arcs_size);
|
||||
refcount_destroy(&arc_mru_ghost->arcs_size);
|
||||
refcount_destroy(&arc_mfu->arcs_size);
|
||||
refcount_destroy(&arc_mfu_ghost->arcs_size);
|
||||
refcount_destroy(&arc_l2c_only->arcs_size);
|
||||
zfs_refcount_destroy(&arc_anon->arcs_size);
|
||||
zfs_refcount_destroy(&arc_mru->arcs_size);
|
||||
zfs_refcount_destroy(&arc_mru_ghost->arcs_size);
|
||||
zfs_refcount_destroy(&arc_mfu->arcs_size);
|
||||
zfs_refcount_destroy(&arc_mfu_ghost->arcs_size);
|
||||
zfs_refcount_destroy(&arc_l2c_only->arcs_size);
|
||||
|
||||
multilist_destroy(arc_mru->arcs_list[ARC_BUFC_METADATA]);
|
||||
multilist_destroy(arc_mru_ghost->arcs_list[ARC_BUFC_METADATA]);
|
||||
@ -7821,8 +7833,8 @@ arc_fini(void)
|
||||
mutex_enter(&arc_prune_mtx);
|
||||
while ((p = list_head(&arc_prune_list)) != NULL) {
|
||||
list_remove(&arc_prune_list, p);
|
||||
refcount_remove(&p->p_refcnt, &arc_prune_list);
|
||||
refcount_destroy(&p->p_refcnt);
|
||||
zfs_refcount_remove(&p->p_refcnt, &arc_prune_list);
|
||||
zfs_refcount_destroy(&p->p_refcnt);
|
||||
kmem_free(p, sizeof (*p));
|
||||
}
|
||||
mutex_exit(&arc_prune_mtx);
|
||||
@ -8225,7 +8237,7 @@ l2arc_write_done(zio_t *zio)
|
||||
ARCSTAT_INCR(arcstat_l2_lsize, -HDR_GET_LSIZE(hdr));
|
||||
|
||||
bytes_dropped += arc_hdr_size(hdr);
|
||||
(void) refcount_remove_many(&dev->l2ad_alloc,
|
||||
(void) zfs_refcount_remove_many(&dev->l2ad_alloc,
|
||||
arc_hdr_size(hdr), hdr);
|
||||
}
|
||||
|
||||
@ -8924,7 +8936,7 @@ l2arc_write_buffers(spa_t *spa, l2arc_dev_t *dev, uint64_t target_sz)
|
||||
list_insert_head(&dev->l2ad_buflist, hdr);
|
||||
mutex_exit(&dev->l2ad_mtx);
|
||||
|
||||
(void) refcount_add_many(&dev->l2ad_alloc,
|
||||
(void) zfs_refcount_add_many(&dev->l2ad_alloc,
|
||||
arc_hdr_size(hdr), hdr);
|
||||
|
||||
wzio = zio_write_phys(pio, dev->l2ad_vdev,
|
||||
@ -9133,7 +9145,7 @@ l2arc_add_vdev(spa_t *spa, vdev_t *vd)
|
||||
offsetof(arc_buf_hdr_t, b_l2hdr.b_l2node));
|
||||
|
||||
vdev_space_update(vd, 0, 0, adddev->l2ad_end - adddev->l2ad_hand);
|
||||
refcount_create(&adddev->l2ad_alloc);
|
||||
zfs_refcount_create(&adddev->l2ad_alloc);
|
||||
|
||||
/*
|
||||
* Add device to global list
|
||||
@ -9179,7 +9191,7 @@ l2arc_remove_vdev(vdev_t *vd)
|
||||
l2arc_evict(remdev, 0, B_TRUE);
|
||||
list_destroy(&remdev->l2ad_buflist);
|
||||
mutex_destroy(&remdev->l2ad_mtx);
|
||||
refcount_destroy(&remdev->l2ad_alloc);
|
||||
zfs_refcount_destroy(&remdev->l2ad_alloc);
|
||||
kmem_free(remdev, sizeof (l2arc_dev_t));
|
||||
}
|
||||
|
||||
|
@ -289,7 +289,7 @@ dbuf_cons(void *vdb, void *unused, int kmflag)
|
||||
mutex_init(&db->db_mtx, NULL, MUTEX_DEFAULT, NULL);
|
||||
cv_init(&db->db_changed, NULL, CV_DEFAULT, NULL);
|
||||
multilist_link_init(&db->db_cache_link);
|
||||
refcount_create(&db->db_holds);
|
||||
zfs_refcount_create(&db->db_holds);
|
||||
|
||||
return (0);
|
||||
}
|
||||
@ -302,7 +302,7 @@ dbuf_dest(void *vdb, void *unused)
|
||||
mutex_destroy(&db->db_mtx);
|
||||
cv_destroy(&db->db_changed);
|
||||
ASSERT(!multilist_link_active(&db->db_cache_link));
|
||||
refcount_destroy(&db->db_holds);
|
||||
zfs_refcount_destroy(&db->db_holds);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -445,7 +445,8 @@ dbuf_include_in_metadata_cache(dmu_buf_impl_t *db)
|
||||
* Sanity check for small-memory systems: don't allocate too
|
||||
* much memory for this purpose.
|
||||
*/
|
||||
if (refcount_count(&dbuf_caches[DB_DBUF_METADATA_CACHE].size) >
|
||||
if (zfs_refcount_count(
|
||||
&dbuf_caches[DB_DBUF_METADATA_CACHE].size) >
|
||||
dbuf_metadata_cache_max_bytes) {
|
||||
DBUF_STAT_BUMP(metadata_cache_overflow);
|
||||
return (B_FALSE);
|
||||
@ -475,7 +476,7 @@ dbuf_hash_remove(dmu_buf_impl_t *db)
|
||||
* We mustn't hold db_mtx to maintain lock ordering:
|
||||
* DBUF_HASH_MUTEX > db_mtx.
|
||||
*/
|
||||
ASSERT(refcount_is_zero(&db->db_holds));
|
||||
ASSERT(zfs_refcount_is_zero(&db->db_holds));
|
||||
ASSERT(db->db_state == DB_EVICTING);
|
||||
ASSERT(!MUTEX_HELD(&db->db_mtx));
|
||||
|
||||
@ -515,7 +516,7 @@ dbuf_verify_user(dmu_buf_impl_t *db, dbvu_verify_type_t verify_type)
|
||||
ASSERT(db->db.db_data != NULL);
|
||||
ASSERT3U(db->db_state, ==, DB_CACHED);
|
||||
|
||||
holds = refcount_count(&db->db_holds);
|
||||
holds = zfs_refcount_count(&db->db_holds);
|
||||
if (verify_type == DBVU_EVICTING) {
|
||||
/*
|
||||
* Immediate eviction occurs when holds == dirtycnt.
|
||||
@ -650,14 +651,14 @@ dbuf_cache_lowater_bytes(void)
|
||||
static inline boolean_t
|
||||
dbuf_cache_above_hiwater(void)
|
||||
{
|
||||
return (refcount_count(&dbuf_caches[DB_DBUF_CACHE].size) >
|
||||
return (zfs_refcount_count(&dbuf_caches[DB_DBUF_CACHE].size) >
|
||||
dbuf_cache_hiwater_bytes());
|
||||
}
|
||||
|
||||
static inline boolean_t
|
||||
dbuf_cache_above_lowater(void)
|
||||
{
|
||||
return (refcount_count(&dbuf_caches[DB_DBUF_CACHE].size) >
|
||||
return (zfs_refcount_count(&dbuf_caches[DB_DBUF_CACHE].size) >
|
||||
dbuf_cache_lowater_bytes());
|
||||
}
|
||||
|
||||
@ -684,8 +685,8 @@ dbuf_evict_one(void)
|
||||
if (db != NULL) {
|
||||
multilist_sublist_remove(mls, db);
|
||||
multilist_sublist_unlock(mls);
|
||||
(void) refcount_remove_many(&dbuf_caches[DB_DBUF_CACHE].size,
|
||||
db->db.db_size, db);
|
||||
(void) zfs_refcount_remove_many(
|
||||
&dbuf_caches[DB_DBUF_CACHE].size, db->db.db_size, db);
|
||||
DBUF_STAT_BUMPDOWN(cache_levels[db->db_level]);
|
||||
DBUF_STAT_BUMPDOWN(cache_count);
|
||||
DBUF_STAT_DECR(cache_levels_bytes[db->db_level],
|
||||
@ -694,7 +695,7 @@ dbuf_evict_one(void)
|
||||
db->db_caching_status = DB_NO_CACHE;
|
||||
dbuf_destroy(db);
|
||||
DBUF_STAT_MAX(cache_size_bytes_max,
|
||||
refcount_count(&dbuf_caches[DB_DBUF_CACHE].size));
|
||||
zfs_refcount_count(&dbuf_caches[DB_DBUF_CACHE].size));
|
||||
DBUF_STAT_BUMP(cache_total_evicts);
|
||||
} else {
|
||||
multilist_sublist_unlock(mls);
|
||||
@ -757,7 +758,7 @@ dbuf_evict_notify(void)
|
||||
* because it's OK to occasionally make the wrong decision here,
|
||||
* and grabbing the lock results in massive lock contention.
|
||||
*/
|
||||
if (refcount_count(&dbuf_caches[DB_DBUF_CACHE].size) >
|
||||
if (zfs_refcount_count(&dbuf_caches[DB_DBUF_CACHE].size) >
|
||||
dbuf_cache_target_bytes()) {
|
||||
if (dbuf_cache_above_hiwater())
|
||||
dbuf_evict_one();
|
||||
@ -773,10 +774,10 @@ dbuf_kstat_update(kstat_t *ksp, int rw)
|
||||
if (rw == KSTAT_WRITE) {
|
||||
return (SET_ERROR(EACCES));
|
||||
} else {
|
||||
ds->metadata_cache_size_bytes.value.ui64 =
|
||||
refcount_count(&dbuf_caches[DB_DBUF_METADATA_CACHE].size);
|
||||
ds->metadata_cache_size_bytes.value.ui64 = zfs_refcount_count(
|
||||
&dbuf_caches[DB_DBUF_METADATA_CACHE].size);
|
||||
ds->cache_size_bytes.value.ui64 =
|
||||
refcount_count(&dbuf_caches[DB_DBUF_CACHE].size);
|
||||
zfs_refcount_count(&dbuf_caches[DB_DBUF_CACHE].size);
|
||||
ds->cache_target_bytes.value.ui64 = dbuf_cache_target_bytes();
|
||||
ds->cache_hiwater_bytes.value.ui64 = dbuf_cache_hiwater_bytes();
|
||||
ds->cache_lowater_bytes.value.ui64 = dbuf_cache_lowater_bytes();
|
||||
@ -857,7 +858,7 @@ dbuf_init(void)
|
||||
multilist_create(sizeof (dmu_buf_impl_t),
|
||||
offsetof(dmu_buf_impl_t, db_cache_link),
|
||||
dbuf_cache_multilist_index_func);
|
||||
refcount_create(&dbuf_caches[dcs].size);
|
||||
zfs_refcount_create(&dbuf_caches[dcs].size);
|
||||
}
|
||||
|
||||
dbuf_evict_thread_exit = B_FALSE;
|
||||
@ -921,7 +922,7 @@ dbuf_fini(void)
|
||||
cv_destroy(&dbuf_evict_cv);
|
||||
|
||||
for (dbuf_cached_state_t dcs = 0; dcs < DB_CACHE_MAX; dcs++) {
|
||||
refcount_destroy(&dbuf_caches[dcs].size);
|
||||
zfs_refcount_destroy(&dbuf_caches[dcs].size);
|
||||
multilist_destroy(dbuf_caches[dcs].cache);
|
||||
}
|
||||
|
||||
@ -1113,7 +1114,7 @@ dbuf_loan_arcbuf(dmu_buf_impl_t *db)
|
||||
|
||||
ASSERT(db->db_blkid != DMU_BONUS_BLKID);
|
||||
mutex_enter(&db->db_mtx);
|
||||
if (arc_released(db->db_buf) || refcount_count(&db->db_holds) > 1) {
|
||||
if (arc_released(db->db_buf) || zfs_refcount_count(&db->db_holds) > 1) {
|
||||
int blksz = db->db.db_size;
|
||||
spa_t *spa = db->db_objset->os_spa;
|
||||
|
||||
@ -1187,7 +1188,7 @@ dbuf_read_done(zio_t *zio, const zbookmark_phys_t *zb, const blkptr_t *bp,
|
||||
/*
|
||||
* All reads are synchronous, so we must have a hold on the dbuf
|
||||
*/
|
||||
ASSERT(refcount_count(&db->db_holds) > 0);
|
||||
ASSERT(zfs_refcount_count(&db->db_holds) > 0);
|
||||
ASSERT(db->db_buf == NULL);
|
||||
ASSERT(db->db.db_data == NULL);
|
||||
if (buf == NULL) {
|
||||
@ -1282,7 +1283,7 @@ dbuf_read_impl(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags)
|
||||
|
||||
DB_DNODE_ENTER(db);
|
||||
dn = DB_DNODE(db);
|
||||
ASSERT(!refcount_is_zero(&db->db_holds));
|
||||
ASSERT(!zfs_refcount_is_zero(&db->db_holds));
|
||||
/* We need the struct_rwlock to prevent db_blkptr from changing. */
|
||||
ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
|
||||
ASSERT(MUTEX_HELD(&db->db_mtx));
|
||||
@ -1447,7 +1448,7 @@ dbuf_fix_old_data(dmu_buf_impl_t *db, uint64_t txg)
|
||||
dr->dt.dl.dr_data = kmem_alloc(bonuslen, KM_SLEEP);
|
||||
arc_space_consume(bonuslen, ARC_SPACE_BONUS);
|
||||
bcopy(db->db.db_data, dr->dt.dl.dr_data, bonuslen);
|
||||
} else if (refcount_count(&db->db_holds) > db->db_dirtycnt) {
|
||||
} else if (zfs_refcount_count(&db->db_holds) > db->db_dirtycnt) {
|
||||
dnode_t *dn = DB_DNODE(db);
|
||||
int size = arc_buf_size(db->db_buf);
|
||||
arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
|
||||
@ -1492,7 +1493,7 @@ dbuf_read(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags)
|
||||
* We don't have to hold the mutex to check db_state because it
|
||||
* can't be freed while we have a hold on the buffer.
|
||||
*/
|
||||
ASSERT(!refcount_is_zero(&db->db_holds));
|
||||
ASSERT(!zfs_refcount_is_zero(&db->db_holds));
|
||||
|
||||
if (db->db_state == DB_NOFILL)
|
||||
return (SET_ERROR(EIO));
|
||||
@ -1606,7 +1607,7 @@ dbuf_read(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags)
|
||||
static void
|
||||
dbuf_noread(dmu_buf_impl_t *db)
|
||||
{
|
||||
ASSERT(!refcount_is_zero(&db->db_holds));
|
||||
ASSERT(!zfs_refcount_is_zero(&db->db_holds));
|
||||
ASSERT(db->db_blkid != DMU_BONUS_BLKID);
|
||||
mutex_enter(&db->db_mtx);
|
||||
while (db->db_state == DB_READ || db->db_state == DB_FILL)
|
||||
@ -1727,7 +1728,7 @@ dbuf_free_range(dnode_t *dn, uint64_t start_blkid, uint64_t end_blkid,
|
||||
mutex_exit(&db->db_mtx);
|
||||
continue;
|
||||
}
|
||||
if (refcount_count(&db->db_holds) == 0) {
|
||||
if (zfs_refcount_count(&db->db_holds) == 0) {
|
||||
ASSERT(db->db_buf);
|
||||
dbuf_destroy(db);
|
||||
continue;
|
||||
@ -1874,7 +1875,7 @@ dbuf_dirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
|
||||
int txgoff = tx->tx_txg & TXG_MASK;
|
||||
|
||||
ASSERT(tx->tx_txg != 0);
|
||||
ASSERT(!refcount_is_zero(&db->db_holds));
|
||||
ASSERT(!zfs_refcount_is_zero(&db->db_holds));
|
||||
DMU_TX_DIRTY_BUF(tx, db);
|
||||
|
||||
DB_DNODE_ENTER(db);
|
||||
@ -2244,7 +2245,7 @@ dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
|
||||
ASSERT(db->db_dirtycnt > 0);
|
||||
db->db_dirtycnt -= 1;
|
||||
|
||||
if (refcount_remove(&db->db_holds, (void *)(uintptr_t)txg) == 0) {
|
||||
if (zfs_refcount_remove(&db->db_holds, (void *)(uintptr_t)txg) == 0) {
|
||||
ASSERT(db->db_state == DB_NOFILL || arc_released(db->db_buf));
|
||||
dbuf_destroy(db);
|
||||
return (B_TRUE);
|
||||
@ -2259,7 +2260,7 @@ dmu_buf_will_dirty_impl(dmu_buf_t *db_fake, int flags, dmu_tx_t *tx)
|
||||
dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
|
||||
|
||||
ASSERT(tx->tx_txg != 0);
|
||||
ASSERT(!refcount_is_zero(&db->db_holds));
|
||||
ASSERT(!zfs_refcount_is_zero(&db->db_holds));
|
||||
|
||||
/*
|
||||
* Quick check for dirtyness. For already dirty blocks, this
|
||||
@ -2319,7 +2320,7 @@ dmu_buf_will_fill(dmu_buf_t *db_fake, dmu_tx_t *tx)
|
||||
ASSERT(db->db_blkid != DMU_BONUS_BLKID);
|
||||
ASSERT(tx->tx_txg != 0);
|
||||
ASSERT(db->db_level == 0);
|
||||
ASSERT(!refcount_is_zero(&db->db_holds));
|
||||
ASSERT(!zfs_refcount_is_zero(&db->db_holds));
|
||||
|
||||
ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT ||
|
||||
dmu_tx_private_ok(tx));
|
||||
@ -2433,7 +2434,7 @@ dmu_buf_write_embedded(dmu_buf_t *dbuf, void *data,
|
||||
void
|
||||
dbuf_assign_arcbuf(dmu_buf_impl_t *db, arc_buf_t *buf, dmu_tx_t *tx)
|
||||
{
|
||||
ASSERT(!refcount_is_zero(&db->db_holds));
|
||||
ASSERT(!zfs_refcount_is_zero(&db->db_holds));
|
||||
ASSERT(db->db_blkid != DMU_BONUS_BLKID);
|
||||
ASSERT(db->db_level == 0);
|
||||
ASSERT3U(dbuf_is_metadata(db), ==, arc_is_metadata(buf));
|
||||
@ -2452,7 +2453,7 @@ dbuf_assign_arcbuf(dmu_buf_impl_t *db, arc_buf_t *buf, dmu_tx_t *tx)
|
||||
ASSERT(db->db_state == DB_CACHED || db->db_state == DB_UNCACHED);
|
||||
|
||||
if (db->db_state == DB_CACHED &&
|
||||
refcount_count(&db->db_holds) - 1 > db->db_dirtycnt) {
|
||||
zfs_refcount_count(&db->db_holds) - 1 > db->db_dirtycnt) {
|
||||
/*
|
||||
* In practice, we will never have a case where we have an
|
||||
* encrypted arc buffer while additional holds exist on the
|
||||
@ -2505,7 +2506,7 @@ dbuf_destroy(dmu_buf_impl_t *db)
|
||||
dmu_buf_impl_t *dndb;
|
||||
|
||||
ASSERT(MUTEX_HELD(&db->db_mtx));
|
||||
ASSERT(refcount_is_zero(&db->db_holds));
|
||||
ASSERT(zfs_refcount_is_zero(&db->db_holds));
|
||||
|
||||
if (db->db_buf != NULL) {
|
||||
arc_buf_destroy(db->db_buf, db);
|
||||
@ -2529,7 +2530,7 @@ dbuf_destroy(dmu_buf_impl_t *db)
|
||||
db->db_caching_status == DB_DBUF_METADATA_CACHE);
|
||||
|
||||
multilist_remove(dbuf_caches[db->db_caching_status].cache, db);
|
||||
(void) refcount_remove_many(
|
||||
(void) zfs_refcount_remove_many(
|
||||
&dbuf_caches[db->db_caching_status].size,
|
||||
db->db.db_size, db);
|
||||
|
||||
@ -2587,7 +2588,7 @@ dbuf_destroy(dmu_buf_impl_t *db)
|
||||
DB_DNODE_EXIT(db);
|
||||
}
|
||||
|
||||
ASSERT(refcount_is_zero(&db->db_holds));
|
||||
ASSERT(zfs_refcount_is_zero(&db->db_holds));
|
||||
|
||||
db->db_parent = NULL;
|
||||
|
||||
@ -2783,7 +2784,7 @@ dbuf_create(dnode_t *dn, uint8_t level, uint64_t blkid,
|
||||
dbuf_add_ref(parent, db);
|
||||
|
||||
ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT ||
|
||||
refcount_count(&dn->dn_holds) > 0);
|
||||
zfs_refcount_count(&dn->dn_holds) > 0);
|
||||
(void) zfs_refcount_add(&dn->dn_holds, db);
|
||||
atomic_inc_32(&dn->dn_dbufs_count);
|
||||
|
||||
@ -3162,14 +3163,14 @@ dbuf_hold_impl_arg(struct dbuf_hold_arg *dh)
|
||||
}
|
||||
|
||||
if (multilist_link_active(&dh->dh_db->db_cache_link)) {
|
||||
ASSERT(refcount_is_zero(&dh->dh_db->db_holds));
|
||||
ASSERT(zfs_refcount_is_zero(&dh->dh_db->db_holds));
|
||||
ASSERT(dh->dh_db->db_caching_status == DB_DBUF_CACHE ||
|
||||
dh->dh_db->db_caching_status == DB_DBUF_METADATA_CACHE);
|
||||
|
||||
multilist_remove(
|
||||
dbuf_caches[dh->dh_db->db_caching_status].cache,
|
||||
dh->dh_db);
|
||||
(void) refcount_remove_many(
|
||||
(void) zfs_refcount_remove_many(
|
||||
&dbuf_caches[dh->dh_db->db_caching_status].size,
|
||||
dh->dh_db->db.db_size, dh->dh_db);
|
||||
|
||||
@ -3382,7 +3383,7 @@ dbuf_rele_and_unlock(dmu_buf_impl_t *db, void *tag, boolean_t evicting)
|
||||
* dnode so we can guarantee in dnode_move() that a referenced bonus
|
||||
* buffer has a corresponding dnode hold.
|
||||
*/
|
||||
holds = refcount_remove(&db->db_holds, tag);
|
||||
holds = zfs_refcount_remove(&db->db_holds, tag);
|
||||
ASSERT(holds >= 0);
|
||||
|
||||
/*
|
||||
@ -3469,14 +3470,15 @@ dbuf_rele_and_unlock(dmu_buf_impl_t *db, void *tag, boolean_t evicting)
|
||||
db->db_caching_status = dcs;
|
||||
|
||||
multilist_insert(dbuf_caches[dcs].cache, db);
|
||||
(void) refcount_add_many(&dbuf_caches[dcs].size,
|
||||
(void) zfs_refcount_add_many(
|
||||
&dbuf_caches[dcs].size,
|
||||
db->db.db_size, db);
|
||||
|
||||
if (dcs == DB_DBUF_METADATA_CACHE) {
|
||||
DBUF_STAT_BUMP(metadata_cache_count);
|
||||
DBUF_STAT_MAX(
|
||||
metadata_cache_size_bytes_max,
|
||||
refcount_count(
|
||||
zfs_refcount_count(
|
||||
&dbuf_caches[dcs].size));
|
||||
} else {
|
||||
DBUF_STAT_BUMP(
|
||||
@ -3486,7 +3488,7 @@ dbuf_rele_and_unlock(dmu_buf_impl_t *db, void *tag, boolean_t evicting)
|
||||
cache_levels_bytes[db->db_level],
|
||||
db->db.db_size);
|
||||
DBUF_STAT_MAX(cache_size_bytes_max,
|
||||
refcount_count(
|
||||
zfs_refcount_count(
|
||||
&dbuf_caches[dcs].size));
|
||||
}
|
||||
mutex_exit(&db->db_mtx);
|
||||
@ -3510,7 +3512,7 @@ dbuf_rele_and_unlock(dmu_buf_impl_t *db, void *tag, boolean_t evicting)
|
||||
uint64_t
|
||||
dbuf_refcount(dmu_buf_impl_t *db)
|
||||
{
|
||||
return (refcount_count(&db->db_holds));
|
||||
return (zfs_refcount_count(&db->db_holds));
|
||||
}
|
||||
|
||||
uint64_t
|
||||
@ -3520,8 +3522,8 @@ dmu_buf_user_refcount(dmu_buf_t *db_fake)
|
||||
dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
|
||||
|
||||
mutex_enter(&db->db_mtx);
|
||||
ASSERT3U(refcount_count(&db->db_holds), >=, db->db_dirtycnt);
|
||||
holds = refcount_count(&db->db_holds) - db->db_dirtycnt;
|
||||
ASSERT3U(zfs_refcount_count(&db->db_holds), >=, db->db_dirtycnt);
|
||||
holds = zfs_refcount_count(&db->db_holds) - db->db_dirtycnt;
|
||||
mutex_exit(&db->db_mtx);
|
||||
|
||||
return (holds);
|
||||
@ -3878,7 +3880,7 @@ dbuf_sync_leaf(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
|
||||
|
||||
if (db->db_state != DB_NOFILL &&
|
||||
dn->dn_object != DMU_META_DNODE_OBJECT &&
|
||||
refcount_count(&db->db_holds) > 1 &&
|
||||
zfs_refcount_count(&db->db_holds) > 1 &&
|
||||
dr->dt.dl.dr_override_state != DR_OVERRIDDEN &&
|
||||
*datap == db->db_buf) {
|
||||
/*
|
||||
|
@ -89,7 +89,7 @@ __dbuf_stats_hash_table_data(char *buf, size_t size, dmu_buf_impl_t *db)
|
||||
(u_longlong_t)db->db.db_size,
|
||||
!!dbuf_is_metadata(db),
|
||||
db->db_state,
|
||||
(ulong_t)refcount_count(&db->db_holds),
|
||||
(ulong_t)zfs_refcount_count(&db->db_holds),
|
||||
multilist_link_active(&db->db_cache_link),
|
||||
/* arc_buf_info_t */
|
||||
abi.abi_state_type,
|
||||
@ -114,7 +114,7 @@ __dbuf_stats_hash_table_data(char *buf, size_t size, dmu_buf_impl_t *db)
|
||||
(ulong_t)doi.doi_metadata_block_size,
|
||||
(u_longlong_t)doi.doi_bonus_size,
|
||||
(ulong_t)doi.doi_indirection,
|
||||
(ulong_t)refcount_count(&dn->dn_holds),
|
||||
(ulong_t)zfs_refcount_count(&dn->dn_holds),
|
||||
(u_longlong_t)doi.doi_fill_count,
|
||||
(u_longlong_t)doi.doi_max_offset);
|
||||
|
||||
|
@ -132,8 +132,8 @@ dmu_tx_hold_dnode_impl(dmu_tx_t *tx, dnode_t *dn, enum dmu_tx_hold_type type,
|
||||
txh = kmem_zalloc(sizeof (dmu_tx_hold_t), KM_SLEEP);
|
||||
txh->txh_tx = tx;
|
||||
txh->txh_dnode = dn;
|
||||
refcount_create(&txh->txh_space_towrite);
|
||||
refcount_create(&txh->txh_memory_tohold);
|
||||
zfs_refcount_create(&txh->txh_space_towrite);
|
||||
zfs_refcount_create(&txh->txh_memory_tohold);
|
||||
txh->txh_type = type;
|
||||
txh->txh_arg1 = arg1;
|
||||
txh->txh_arg2 = arg2;
|
||||
@ -228,9 +228,9 @@ dmu_tx_count_write(dmu_tx_hold_t *txh, uint64_t off, uint64_t len)
|
||||
if (len == 0)
|
||||
return;
|
||||
|
||||
(void) refcount_add_many(&txh->txh_space_towrite, len, FTAG);
|
||||
(void) zfs_refcount_add_many(&txh->txh_space_towrite, len, FTAG);
|
||||
|
||||
if (refcount_count(&txh->txh_space_towrite) > 2 * DMU_MAX_ACCESS)
|
||||
if (zfs_refcount_count(&txh->txh_space_towrite) > 2 * DMU_MAX_ACCESS)
|
||||
err = SET_ERROR(EFBIG);
|
||||
|
||||
if (dn == NULL)
|
||||
@ -295,7 +295,8 @@ dmu_tx_count_write(dmu_tx_hold_t *txh, uint64_t off, uint64_t len)
|
||||
static void
|
||||
dmu_tx_count_dnode(dmu_tx_hold_t *txh)
|
||||
{
|
||||
(void) refcount_add_many(&txh->txh_space_towrite, DNODE_MIN_SIZE, FTAG);
|
||||
(void) zfs_refcount_add_many(&txh->txh_space_towrite,
|
||||
DNODE_MIN_SIZE, FTAG);
|
||||
}
|
||||
|
||||
void
|
||||
@ -327,7 +328,7 @@ dmu_tx_hold_remap_l1indirect(dmu_tx_t *tx, uint64_t object)
|
||||
return;
|
||||
|
||||
dnode_t *dn = txh->txh_dnode;
|
||||
(void) refcount_add_many(&txh->txh_space_towrite,
|
||||
(void) zfs_refcount_add_many(&txh->txh_space_towrite,
|
||||
1ULL << dn->dn_indblkshift, FTAG);
|
||||
dmu_tx_count_dnode(txh);
|
||||
}
|
||||
@ -434,7 +435,7 @@ dmu_tx_hold_free_impl(dmu_tx_hold_t *txh, uint64_t off, uint64_t len)
|
||||
return;
|
||||
}
|
||||
|
||||
(void) refcount_add_many(&txh->txh_memory_tohold,
|
||||
(void) zfs_refcount_add_many(&txh->txh_memory_tohold,
|
||||
1 << dn->dn_indblkshift, FTAG);
|
||||
|
||||
err = dmu_tx_check_ioerr(zio, dn, 1, i);
|
||||
@ -493,7 +494,7 @@ dmu_tx_hold_zap_impl(dmu_tx_hold_t *txh, const char *name)
|
||||
* - 2 blocks for possibly split leaves,
|
||||
* - 2 grown ptrtbl blocks
|
||||
*/
|
||||
(void) refcount_add_many(&txh->txh_space_towrite,
|
||||
(void) zfs_refcount_add_many(&txh->txh_space_towrite,
|
||||
MZAP_MAX_BLKSZ, FTAG);
|
||||
|
||||
if (dn == NULL)
|
||||
@ -583,8 +584,10 @@ dmu_tx_hold_space(dmu_tx_t *tx, uint64_t space)
|
||||
|
||||
txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
|
||||
DMU_NEW_OBJECT, THT_SPACE, space, 0);
|
||||
if (txh)
|
||||
(void) refcount_add_many(&txh->txh_space_towrite, space, FTAG);
|
||||
if (txh) {
|
||||
(void) zfs_refcount_add_many(
|
||||
&txh->txh_space_towrite, space, FTAG);
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef ZFS_DEBUG
|
||||
@ -935,8 +938,8 @@ dmu_tx_try_assign(dmu_tx_t *tx, uint64_t txg_how)
|
||||
(void) zfs_refcount_add(&dn->dn_tx_holds, tx);
|
||||
mutex_exit(&dn->dn_mtx);
|
||||
}
|
||||
towrite += refcount_count(&txh->txh_space_towrite);
|
||||
tohold += refcount_count(&txh->txh_memory_tohold);
|
||||
towrite += zfs_refcount_count(&txh->txh_space_towrite);
|
||||
tohold += zfs_refcount_count(&txh->txh_memory_tohold);
|
||||
}
|
||||
|
||||
/* needed allocation: worst-case estimate of write space */
|
||||
@ -978,7 +981,7 @@ dmu_tx_unassign(dmu_tx_t *tx)
|
||||
mutex_enter(&dn->dn_mtx);
|
||||
ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg);
|
||||
|
||||
if (refcount_remove(&dn->dn_tx_holds, tx) == 0) {
|
||||
if (zfs_refcount_remove(&dn->dn_tx_holds, tx) == 0) {
|
||||
dn->dn_assigned_txg = 0;
|
||||
cv_broadcast(&dn->dn_notxholds);
|
||||
}
|
||||
@ -1117,10 +1120,10 @@ dmu_tx_destroy(dmu_tx_t *tx)
|
||||
dnode_t *dn = txh->txh_dnode;
|
||||
|
||||
list_remove(&tx->tx_holds, txh);
|
||||
refcount_destroy_many(&txh->txh_space_towrite,
|
||||
refcount_count(&txh->txh_space_towrite));
|
||||
refcount_destroy_many(&txh->txh_memory_tohold,
|
||||
refcount_count(&txh->txh_memory_tohold));
|
||||
zfs_refcount_destroy_many(&txh->txh_space_towrite,
|
||||
zfs_refcount_count(&txh->txh_space_towrite));
|
||||
zfs_refcount_destroy_many(&txh->txh_memory_tohold,
|
||||
zfs_refcount_count(&txh->txh_memory_tohold));
|
||||
kmem_free(txh, sizeof (dmu_tx_hold_t));
|
||||
if (dn != NULL)
|
||||
dnode_rele(dn, tx);
|
||||
@ -1150,7 +1153,7 @@ dmu_tx_commit(dmu_tx_t *tx)
|
||||
mutex_enter(&dn->dn_mtx);
|
||||
ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg);
|
||||
|
||||
if (refcount_remove(&dn->dn_tx_holds, tx) == 0) {
|
||||
if (zfs_refcount_remove(&dn->dn_tx_holds, tx) == 0) {
|
||||
dn->dn_assigned_txg = 0;
|
||||
cv_broadcast(&dn->dn_notxholds);
|
||||
}
|
||||
@ -1265,7 +1268,7 @@ dmu_tx_hold_spill(dmu_tx_t *tx, uint64_t object)
|
||||
txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, object,
|
||||
THT_SPILL, 0, 0);
|
||||
if (txh != NULL)
|
||||
(void) refcount_add_many(&txh->txh_space_towrite,
|
||||
(void) zfs_refcount_add_many(&txh->txh_space_towrite,
|
||||
SPA_OLD_MAXBLOCKSIZE, FTAG);
|
||||
}
|
||||
|
||||
|
@ -125,8 +125,8 @@ dnode_cons(void *arg, void *unused, int kmflag)
|
||||
* Every dbuf has a reference, and dropping a tracked reference is
|
||||
* O(number of references), so don't track dn_holds.
|
||||
*/
|
||||
refcount_create_untracked(&dn->dn_holds);
|
||||
refcount_create(&dn->dn_tx_holds);
|
||||
zfs_refcount_create_untracked(&dn->dn_holds);
|
||||
zfs_refcount_create(&dn->dn_tx_holds);
|
||||
list_link_init(&dn->dn_link);
|
||||
|
||||
bzero(&dn->dn_next_nblkptr[0], sizeof (dn->dn_next_nblkptr));
|
||||
@ -184,8 +184,8 @@ dnode_dest(void *arg, void *unused)
|
||||
mutex_destroy(&dn->dn_mtx);
|
||||
mutex_destroy(&dn->dn_dbufs_mtx);
|
||||
cv_destroy(&dn->dn_notxholds);
|
||||
refcount_destroy(&dn->dn_holds);
|
||||
refcount_destroy(&dn->dn_tx_holds);
|
||||
zfs_refcount_destroy(&dn->dn_holds);
|
||||
zfs_refcount_destroy(&dn->dn_tx_holds);
|
||||
ASSERT(!list_link_active(&dn->dn_link));
|
||||
|
||||
for (i = 0; i < TXG_SIZE; i++) {
|
||||
@ -384,7 +384,7 @@ dnode_buf_byteswap(void *vbuf, size_t size)
|
||||
void
|
||||
dnode_setbonuslen(dnode_t *dn, int newsize, dmu_tx_t *tx)
|
||||
{
|
||||
ASSERT3U(refcount_count(&dn->dn_holds), >=, 1);
|
||||
ASSERT3U(zfs_refcount_count(&dn->dn_holds), >=, 1);
|
||||
|
||||
dnode_setdirty(dn, tx);
|
||||
rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
|
||||
@ -401,7 +401,7 @@ dnode_setbonuslen(dnode_t *dn, int newsize, dmu_tx_t *tx)
|
||||
void
|
||||
dnode_setbonus_type(dnode_t *dn, dmu_object_type_t newtype, dmu_tx_t *tx)
|
||||
{
|
||||
ASSERT3U(refcount_count(&dn->dn_holds), >=, 1);
|
||||
ASSERT3U(zfs_refcount_count(&dn->dn_holds), >=, 1);
|
||||
dnode_setdirty(dn, tx);
|
||||
rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
|
||||
dn->dn_bonustype = newtype;
|
||||
@ -412,7 +412,7 @@ dnode_setbonus_type(dnode_t *dn, dmu_object_type_t newtype, dmu_tx_t *tx)
|
||||
void
|
||||
dnode_rm_spill(dnode_t *dn, dmu_tx_t *tx)
|
||||
{
|
||||
ASSERT3U(refcount_count(&dn->dn_holds), >=, 1);
|
||||
ASSERT3U(zfs_refcount_count(&dn->dn_holds), >=, 1);
|
||||
ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock));
|
||||
dnode_setdirty(dn, tx);
|
||||
dn->dn_rm_spillblk[tx->tx_txg&TXG_MASK] = DN_KILL_SPILLBLK;
|
||||
@ -605,8 +605,8 @@ dnode_allocate(dnode_t *dn, dmu_object_type_t ot, int blocksize, int ibs,
|
||||
ASSERT0(dn->dn_allocated_txg);
|
||||
ASSERT0(dn->dn_assigned_txg);
|
||||
ASSERT0(dn->dn_dirty_txg);
|
||||
ASSERT(refcount_is_zero(&dn->dn_tx_holds));
|
||||
ASSERT3U(refcount_count(&dn->dn_holds), <=, 1);
|
||||
ASSERT(zfs_refcount_is_zero(&dn->dn_tx_holds));
|
||||
ASSERT3U(zfs_refcount_count(&dn->dn_holds), <=, 1);
|
||||
ASSERT(avl_is_empty(&dn->dn_dbufs));
|
||||
|
||||
for (i = 0; i < TXG_SIZE; i++) {
|
||||
@ -800,8 +800,8 @@ dnode_move_impl(dnode_t *odn, dnode_t *ndn)
|
||||
ndn->dn_dirty_txg = odn->dn_dirty_txg;
|
||||
ndn->dn_dirtyctx = odn->dn_dirtyctx;
|
||||
ndn->dn_dirtyctx_firstset = odn->dn_dirtyctx_firstset;
|
||||
ASSERT(refcount_count(&odn->dn_tx_holds) == 0);
|
||||
refcount_transfer(&ndn->dn_holds, &odn->dn_holds);
|
||||
ASSERT(zfs_refcount_count(&odn->dn_tx_holds) == 0);
|
||||
zfs_refcount_transfer(&ndn->dn_holds, &odn->dn_holds);
|
||||
ASSERT(avl_is_empty(&ndn->dn_dbufs));
|
||||
avl_swap(&ndn->dn_dbufs, &odn->dn_dbufs);
|
||||
ndn->dn_dbufs_count = odn->dn_dbufs_count;
|
||||
@ -993,7 +993,7 @@ dnode_move(void *buf, void *newbuf, size_t size, void *arg)
|
||||
* hold before the dbuf is removed, the hold is discounted, and the
|
||||
* removal is blocked until the move completes.
|
||||
*/
|
||||
refcount = refcount_count(&odn->dn_holds);
|
||||
refcount = zfs_refcount_count(&odn->dn_holds);
|
||||
ASSERT(refcount >= 0);
|
||||
dbufs = odn->dn_dbufs_count;
|
||||
|
||||
@ -1021,7 +1021,7 @@ dnode_move(void *buf, void *newbuf, size_t size, void *arg)
|
||||
|
||||
list_link_replace(&odn->dn_link, &ndn->dn_link);
|
||||
/* If the dnode was safe to move, the refcount cannot have changed. */
|
||||
ASSERT(refcount == refcount_count(&ndn->dn_holds));
|
||||
ASSERT(refcount == zfs_refcount_count(&ndn->dn_holds));
|
||||
ASSERT(dbufs == ndn->dn_dbufs_count);
|
||||
zrl_exit(&ndn->dn_handle->dnh_zrlock); /* handle has moved */
|
||||
mutex_exit(&os->os_lock);
|
||||
@ -1170,7 +1170,7 @@ dnode_special_close(dnode_handle_t *dnh)
|
||||
* has a hold on this dnode while we are trying to evict this
|
||||
* dnode.
|
||||
*/
|
||||
while (refcount_count(&dn->dn_holds) > 0)
|
||||
while (zfs_refcount_count(&dn->dn_holds) > 0)
|
||||
delay(1);
|
||||
ASSERT(dn->dn_dbuf == NULL ||
|
||||
dmu_buf_get_user(&dn->dn_dbuf->db) == NULL);
|
||||
@ -1225,8 +1225,8 @@ dnode_buf_evict_async(void *dbu)
|
||||
* it wouldn't be eligible for eviction and this function
|
||||
* would not have been called.
|
||||
*/
|
||||
ASSERT(refcount_is_zero(&dn->dn_holds));
|
||||
ASSERT(refcount_is_zero(&dn->dn_tx_holds));
|
||||
ASSERT(zfs_refcount_is_zero(&dn->dn_holds));
|
||||
ASSERT(zfs_refcount_is_zero(&dn->dn_tx_holds));
|
||||
|
||||
dnode_destroy(dn); /* implicit zrl_remove() for first slot */
|
||||
zrl_destroy(&dnh->dnh_zrlock);
|
||||
@ -1502,7 +1502,7 @@ dnode_hold_impl(objset_t *os, uint64_t object, int flag, int slots,
|
||||
}
|
||||
|
||||
mutex_enter(&dn->dn_mtx);
|
||||
if (!refcount_is_zero(&dn->dn_holds)) {
|
||||
if (!zfs_refcount_is_zero(&dn->dn_holds)) {
|
||||
DNODE_STAT_BUMP(dnode_hold_free_refcount);
|
||||
mutex_exit(&dn->dn_mtx);
|
||||
dnode_slots_rele(dnc, idx, slots);
|
||||
@ -1563,7 +1563,7 @@ boolean_t
|
||||
dnode_add_ref(dnode_t *dn, void *tag)
|
||||
{
|
||||
mutex_enter(&dn->dn_mtx);
|
||||
if (refcount_is_zero(&dn->dn_holds)) {
|
||||
if (zfs_refcount_is_zero(&dn->dn_holds)) {
|
||||
mutex_exit(&dn->dn_mtx);
|
||||
return (FALSE);
|
||||
}
|
||||
@ -1587,7 +1587,7 @@ dnode_rele_and_unlock(dnode_t *dn, void *tag, boolean_t evicting)
|
||||
dmu_buf_impl_t *db = dn->dn_dbuf;
|
||||
dnode_handle_t *dnh = dn->dn_handle;
|
||||
|
||||
refs = refcount_remove(&dn->dn_holds, tag);
|
||||
refs = zfs_refcount_remove(&dn->dn_holds, tag);
|
||||
mutex_exit(&dn->dn_mtx);
|
||||
|
||||
/*
|
||||
@ -1652,7 +1652,7 @@ dnode_setdirty(dnode_t *dn, dmu_tx_t *tx)
|
||||
return;
|
||||
}
|
||||
|
||||
ASSERT(!refcount_is_zero(&dn->dn_holds) ||
|
||||
ASSERT(!zfs_refcount_is_zero(&dn->dn_holds) ||
|
||||
!avl_is_empty(&dn->dn_dbufs));
|
||||
ASSERT(dn->dn_datablksz != 0);
|
||||
ASSERT0(dn->dn_next_bonuslen[txg&TXG_MASK]);
|
||||
|
@ -441,7 +441,7 @@ dnode_evict_dbufs(dnode_t *dn)
|
||||
|
||||
mutex_enter(&db->db_mtx);
|
||||
if (db->db_state != DB_EVICTING &&
|
||||
refcount_is_zero(&db->db_holds)) {
|
||||
zfs_refcount_is_zero(&db->db_holds)) {
|
||||
db_marker->db_level = db->db_level;
|
||||
db_marker->db_blkid = db->db_blkid;
|
||||
db_marker->db_state = DB_SEARCH;
|
||||
@ -483,7 +483,7 @@ dnode_evict_bonus(dnode_t *dn)
|
||||
{
|
||||
rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
|
||||
if (dn->dn_bonus != NULL) {
|
||||
if (refcount_is_zero(&dn->dn_bonus->db_holds)) {
|
||||
if (zfs_refcount_is_zero(&dn->dn_bonus->db_holds)) {
|
||||
mutex_enter(&dn->dn_bonus->db_mtx);
|
||||
dbuf_destroy(dn->dn_bonus);
|
||||
dn->dn_bonus = NULL;
|
||||
@ -549,7 +549,7 @@ dnode_sync_free(dnode_t *dn, dmu_tx_t *tx)
|
||||
* zfs_obj_to_path() also depends on this being
|
||||
* commented out.
|
||||
*
|
||||
* ASSERT3U(refcount_count(&dn->dn_holds), ==, 1);
|
||||
* ASSERT3U(zfs_refcount_count(&dn->dn_holds), ==, 1);
|
||||
*/
|
||||
|
||||
/* Undirty next bits */
|
||||
|
@ -80,13 +80,13 @@ dsl_wrapping_key_hold(dsl_wrapping_key_t *wkey, void *tag)
|
||||
static void
|
||||
dsl_wrapping_key_rele(dsl_wrapping_key_t *wkey, void *tag)
|
||||
{
|
||||
(void) refcount_remove(&wkey->wk_refcnt, tag);
|
||||
(void) zfs_refcount_remove(&wkey->wk_refcnt, tag);
|
||||
}
|
||||
|
||||
static void
|
||||
dsl_wrapping_key_free(dsl_wrapping_key_t *wkey)
|
||||
{
|
||||
ASSERT0(refcount_count(&wkey->wk_refcnt));
|
||||
ASSERT0(zfs_refcount_count(&wkey->wk_refcnt));
|
||||
|
||||
if (wkey->wk_key.ck_data) {
|
||||
bzero(wkey->wk_key.ck_data,
|
||||
@ -95,7 +95,7 @@ dsl_wrapping_key_free(dsl_wrapping_key_t *wkey)
|
||||
CRYPTO_BITS2BYTES(wkey->wk_key.ck_length));
|
||||
}
|
||||
|
||||
refcount_destroy(&wkey->wk_refcnt);
|
||||
zfs_refcount_destroy(&wkey->wk_refcnt);
|
||||
kmem_free(wkey, sizeof (dsl_wrapping_key_t));
|
||||
}
|
||||
|
||||
@ -123,7 +123,7 @@ dsl_wrapping_key_create(uint8_t *wkeydata, zfs_keyformat_t keyformat,
|
||||
bcopy(wkeydata, wkey->wk_key.ck_data, WRAPPING_KEY_LEN);
|
||||
|
||||
/* initialize the rest of the struct */
|
||||
refcount_create(&wkey->wk_refcnt);
|
||||
zfs_refcount_create(&wkey->wk_refcnt);
|
||||
wkey->wk_keyformat = keyformat;
|
||||
wkey->wk_salt = salt;
|
||||
wkey->wk_iters = iters;
|
||||
@ -518,13 +518,13 @@ dsl_crypto_can_set_keylocation(const char *dsname, const char *keylocation)
|
||||
static void
|
||||
dsl_crypto_key_free(dsl_crypto_key_t *dck)
|
||||
{
|
||||
ASSERT(refcount_count(&dck->dck_holds) == 0);
|
||||
ASSERT(zfs_refcount_count(&dck->dck_holds) == 0);
|
||||
|
||||
/* destroy the zio_crypt_key_t */
|
||||
zio_crypt_key_destroy(&dck->dck_key);
|
||||
|
||||
/* free the refcount, wrapping key, and lock */
|
||||
refcount_destroy(&dck->dck_holds);
|
||||
zfs_refcount_destroy(&dck->dck_holds);
|
||||
if (dck->dck_wkey)
|
||||
dsl_wrapping_key_rele(dck->dck_wkey, dck);
|
||||
|
||||
@ -535,7 +535,7 @@ dsl_crypto_key_free(dsl_crypto_key_t *dck)
|
||||
static void
|
||||
dsl_crypto_key_rele(dsl_crypto_key_t *dck, void *tag)
|
||||
{
|
||||
if (refcount_remove(&dck->dck_holds, tag) == 0)
|
||||
if (zfs_refcount_remove(&dck->dck_holds, tag) == 0)
|
||||
dsl_crypto_key_free(dck);
|
||||
}
|
||||
|
||||
@ -601,7 +601,7 @@ dsl_crypto_key_open(objset_t *mos, dsl_wrapping_key_t *wkey,
|
||||
}
|
||||
|
||||
/* finish initializing the dsl_crypto_key_t */
|
||||
refcount_create(&dck->dck_holds);
|
||||
zfs_refcount_create(&dck->dck_holds);
|
||||
dsl_wrapping_key_hold(wkey, dck);
|
||||
dck->dck_wkey = wkey;
|
||||
dck->dck_obj = dckobj;
|
||||
@ -714,7 +714,7 @@ spa_keystore_dsl_key_rele(spa_t *spa, dsl_crypto_key_t *dck, void *tag)
|
||||
{
|
||||
rw_enter(&spa->spa_keystore.sk_dk_lock, RW_WRITER);
|
||||
|
||||
if (refcount_remove(&dck->dck_holds, tag) == 0) {
|
||||
if (zfs_refcount_remove(&dck->dck_holds, tag) == 0) {
|
||||
avl_remove(&spa->spa_keystore.sk_dsl_keys, dck);
|
||||
dsl_crypto_key_free(dck);
|
||||
}
|
||||
@ -872,7 +872,7 @@ spa_keystore_unload_wkey_impl(spa_t *spa, uint64_t ddobj)
|
||||
if (!found_wkey) {
|
||||
ret = SET_ERROR(EACCES);
|
||||
goto error_unlock;
|
||||
} else if (refcount_count(&found_wkey->wk_refcnt) != 0) {
|
||||
} else if (zfs_refcount_count(&found_wkey->wk_refcnt) != 0) {
|
||||
ret = SET_ERROR(EBUSY);
|
||||
goto error_unlock;
|
||||
}
|
||||
@ -946,11 +946,11 @@ spa_keystore_create_mapping_impl(spa_t *spa, uint64_t dsobj,
|
||||
|
||||
/* Allocate and initialize the mapping */
|
||||
km = kmem_zalloc(sizeof (dsl_key_mapping_t), KM_SLEEP);
|
||||
refcount_create(&km->km_refcnt);
|
||||
zfs_refcount_create(&km->km_refcnt);
|
||||
|
||||
ret = spa_keystore_dsl_key_hold_dd(spa, dd, km, &km->km_key);
|
||||
if (ret != 0) {
|
||||
refcount_destroy(&km->km_refcnt);
|
||||
zfs_refcount_destroy(&km->km_refcnt);
|
||||
kmem_free(km, sizeof (dsl_key_mapping_t));
|
||||
return (ret);
|
||||
}
|
||||
@ -980,7 +980,7 @@ spa_keystore_create_mapping_impl(spa_t *spa, uint64_t dsobj,
|
||||
|
||||
if (should_free) {
|
||||
spa_keystore_dsl_key_rele(spa, km->km_key, km);
|
||||
refcount_destroy(&km->km_refcnt);
|
||||
zfs_refcount_destroy(&km->km_refcnt);
|
||||
kmem_free(km, sizeof (dsl_key_mapping_t));
|
||||
}
|
||||
|
||||
@ -1020,7 +1020,7 @@ spa_keystore_remove_mapping(spa_t *spa, uint64_t dsobj, void *tag)
|
||||
* it is zero. Try to minimize time spent in this lock by deferring
|
||||
* cleanup work.
|
||||
*/
|
||||
if (refcount_remove(&found_km->km_refcnt, tag) == 0) {
|
||||
if (zfs_refcount_remove(&found_km->km_refcnt, tag) == 0) {
|
||||
should_free = B_TRUE;
|
||||
avl_remove(&spa->spa_keystore.sk_key_mappings, found_km);
|
||||
}
|
||||
@ -1506,7 +1506,7 @@ spa_keystore_change_key_sync(void *arg, dmu_tx_t *tx)
|
||||
wkey_search.wk_ddobj = ds->ds_dir->dd_object;
|
||||
found_wkey = avl_find(&spa->spa_keystore.sk_wkeys, &wkey_search, NULL);
|
||||
if (found_wkey != NULL) {
|
||||
ASSERT0(refcount_count(&found_wkey->wk_refcnt));
|
||||
ASSERT0(zfs_refcount_count(&found_wkey->wk_refcnt));
|
||||
avl_remove(&spa->spa_keystore.sk_wkeys, found_wkey);
|
||||
dsl_wrapping_key_free(found_wkey);
|
||||
}
|
||||
|
@ -339,7 +339,7 @@ dsl_dataset_evict_async(void *dbu)
|
||||
mutex_destroy(&ds->ds_opening_lock);
|
||||
mutex_destroy(&ds->ds_sendstream_lock);
|
||||
mutex_destroy(&ds->ds_remap_deadlist_lock);
|
||||
refcount_destroy(&ds->ds_longholds);
|
||||
zfs_refcount_destroy(&ds->ds_longholds);
|
||||
rrw_destroy(&ds->ds_bp_rwlock);
|
||||
|
||||
kmem_free(ds, sizeof (dsl_dataset_t));
|
||||
@ -484,7 +484,7 @@ dsl_dataset_hold_obj_flags(dsl_pool_t *dp, uint64_t dsobj,
|
||||
mutex_init(&ds->ds_remap_deadlist_lock,
|
||||
NULL, MUTEX_DEFAULT, NULL);
|
||||
rrw_init(&ds->ds_bp_rwlock, B_FALSE);
|
||||
refcount_create(&ds->ds_longholds);
|
||||
zfs_refcount_create(&ds->ds_longholds);
|
||||
|
||||
bplist_create(&ds->ds_pending_deadlist);
|
||||
|
||||
@ -577,7 +577,7 @@ dsl_dataset_hold_obj_flags(dsl_pool_t *dp, uint64_t dsobj,
|
||||
mutex_destroy(&ds->ds_lock);
|
||||
mutex_destroy(&ds->ds_opening_lock);
|
||||
mutex_destroy(&ds->ds_sendstream_lock);
|
||||
refcount_destroy(&ds->ds_longholds);
|
||||
zfs_refcount_destroy(&ds->ds_longholds);
|
||||
kmem_free(ds, sizeof (dsl_dataset_t));
|
||||
if (err != 0) {
|
||||
dmu_buf_rele(dbuf, tag);
|
||||
@ -733,14 +733,14 @@ dsl_dataset_long_hold(dsl_dataset_t *ds, void *tag)
|
||||
void
|
||||
dsl_dataset_long_rele(dsl_dataset_t *ds, void *tag)
|
||||
{
|
||||
(void) refcount_remove(&ds->ds_longholds, tag);
|
||||
(void) zfs_refcount_remove(&ds->ds_longholds, tag);
|
||||
}
|
||||
|
||||
/* Return B_TRUE if there are any long holds on this dataset. */
|
||||
boolean_t
|
||||
dsl_dataset_long_held(dsl_dataset_t *ds)
|
||||
{
|
||||
return (!refcount_is_zero(&ds->ds_longholds));
|
||||
return (!zfs_refcount_is_zero(&ds->ds_longholds));
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -293,7 +293,7 @@ dsl_destroy_snapshot_sync_impl(dsl_dataset_t *ds, boolean_t defer, dmu_tx_t *tx)
|
||||
rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG);
|
||||
ASSERT3U(dsl_dataset_phys(ds)->ds_bp.blk_birth, <=, tx->tx_txg);
|
||||
rrw_exit(&ds->ds_bp_rwlock, FTAG);
|
||||
ASSERT(refcount_is_zero(&ds->ds_longholds));
|
||||
ASSERT(zfs_refcount_is_zero(&ds->ds_longholds));
|
||||
|
||||
if (defer &&
|
||||
(ds->ds_userrefs > 0 ||
|
||||
@ -729,7 +729,7 @@ dsl_destroy_head_check_impl(dsl_dataset_t *ds, int expected_holds)
|
||||
if (ds->ds_is_snapshot)
|
||||
return (SET_ERROR(EINVAL));
|
||||
|
||||
if (refcount_count(&ds->ds_longholds) != expected_holds)
|
||||
if (zfs_refcount_count(&ds->ds_longholds) != expected_holds)
|
||||
return (SET_ERROR(EBUSY));
|
||||
|
||||
mos = ds->ds_dir->dd_pool->dp_meta_objset;
|
||||
@ -757,7 +757,7 @@ dsl_destroy_head_check_impl(dsl_dataset_t *ds, int expected_holds)
|
||||
dsl_dataset_phys(ds->ds_prev)->ds_num_children == 2 &&
|
||||
ds->ds_prev->ds_userrefs == 0) {
|
||||
/* We need to remove the origin snapshot as well. */
|
||||
if (!refcount_is_zero(&ds->ds_prev->ds_longholds))
|
||||
if (!zfs_refcount_is_zero(&ds->ds_prev->ds_longholds))
|
||||
return (SET_ERROR(EBUSY));
|
||||
}
|
||||
return (0);
|
||||
|
@ -1314,8 +1314,8 @@ scan_prefetch_queue_compare(const void *a, const void *b)
|
||||
static void
|
||||
scan_prefetch_ctx_rele(scan_prefetch_ctx_t *spc, void *tag)
|
||||
{
|
||||
if (refcount_remove(&spc->spc_refcnt, tag) == 0) {
|
||||
refcount_destroy(&spc->spc_refcnt);
|
||||
if (zfs_refcount_remove(&spc->spc_refcnt, tag) == 0) {
|
||||
zfs_refcount_destroy(&spc->spc_refcnt);
|
||||
kmem_free(spc, sizeof (scan_prefetch_ctx_t));
|
||||
}
|
||||
}
|
||||
@ -1326,7 +1326,7 @@ scan_prefetch_ctx_create(dsl_scan_t *scn, dnode_phys_t *dnp, void *tag)
|
||||
scan_prefetch_ctx_t *spc;
|
||||
|
||||
spc = kmem_alloc(sizeof (scan_prefetch_ctx_t), KM_SLEEP);
|
||||
refcount_create(&spc->spc_refcnt);
|
||||
zfs_refcount_create(&spc->spc_refcnt);
|
||||
zfs_refcount_add(&spc->spc_refcnt, tag);
|
||||
spc->spc_scn = scn;
|
||||
if (dnp != NULL) {
|
||||
|
@ -251,7 +251,7 @@ metaslab_class_create(spa_t *spa, metaslab_ops_t *ops)
|
||||
mc->mc_alloc_max_slots = kmem_zalloc(spa->spa_alloc_count *
|
||||
sizeof (uint64_t), KM_SLEEP);
|
||||
for (int i = 0; i < spa->spa_alloc_count; i++)
|
||||
refcount_create_tracked(&mc->mc_alloc_slots[i]);
|
||||
zfs_refcount_create_tracked(&mc->mc_alloc_slots[i]);
|
||||
|
||||
return (mc);
|
||||
}
|
||||
@ -266,7 +266,7 @@ metaslab_class_destroy(metaslab_class_t *mc)
|
||||
ASSERT(mc->mc_dspace == 0);
|
||||
|
||||
for (int i = 0; i < mc->mc_spa->spa_alloc_count; i++)
|
||||
refcount_destroy(&mc->mc_alloc_slots[i]);
|
||||
zfs_refcount_destroy(&mc->mc_alloc_slots[i]);
|
||||
kmem_free(mc->mc_alloc_slots, mc->mc_spa->spa_alloc_count *
|
||||
sizeof (zfs_refcount_t));
|
||||
kmem_free(mc->mc_alloc_max_slots, mc->mc_spa->spa_alloc_count *
|
||||
@ -653,7 +653,7 @@ metaslab_group_create(metaslab_class_t *mc, vdev_t *vd, int allocators)
|
||||
mg->mg_cur_max_alloc_queue_depth = kmem_zalloc(allocators *
|
||||
sizeof (uint64_t), KM_SLEEP);
|
||||
for (int i = 0; i < allocators; i++) {
|
||||
refcount_create_tracked(&mg->mg_alloc_queue_depth[i]);
|
||||
zfs_refcount_create_tracked(&mg->mg_alloc_queue_depth[i]);
|
||||
mg->mg_cur_max_alloc_queue_depth[i] = 0;
|
||||
}
|
||||
|
||||
@ -683,7 +683,7 @@ metaslab_group_destroy(metaslab_group_t *mg)
|
||||
mutex_destroy(&mg->mg_lock);
|
||||
|
||||
for (int i = 0; i < mg->mg_allocators; i++) {
|
||||
refcount_destroy(&mg->mg_alloc_queue_depth[i]);
|
||||
zfs_refcount_destroy(&mg->mg_alloc_queue_depth[i]);
|
||||
mg->mg_cur_max_alloc_queue_depth[i] = 0;
|
||||
}
|
||||
kmem_free(mg->mg_alloc_queue_depth, mg->mg_allocators *
|
||||
@ -1048,7 +1048,8 @@ metaslab_group_allocatable(metaslab_group_t *mg, metaslab_group_t *rotor,
|
||||
*/
|
||||
qmax = qmax * (4 + d) / 4;
|
||||
|
||||
qdepth = refcount_count(&mg->mg_alloc_queue_depth[allocator]);
|
||||
qdepth = zfs_refcount_count(
|
||||
&mg->mg_alloc_queue_depth[allocator]);
|
||||
|
||||
/*
|
||||
* If this metaslab group is below its qmax or it's
|
||||
@ -1069,7 +1070,7 @@ metaslab_group_allocatable(metaslab_group_t *mg, metaslab_group_t *rotor,
|
||||
for (mgp = mg->mg_next; mgp != rotor; mgp = mgp->mg_next) {
|
||||
qmax = mgp->mg_cur_max_alloc_queue_depth[allocator];
|
||||
qmax = qmax * (4 + d) / 4;
|
||||
qdepth = refcount_count(
|
||||
qdepth = zfs_refcount_count(
|
||||
&mgp->mg_alloc_queue_depth[allocator]);
|
||||
|
||||
/*
|
||||
@ -2936,7 +2937,7 @@ metaslab_group_alloc_decrement(spa_t *spa, uint64_t vdev, void *tag, int flags,
|
||||
if (!mg->mg_class->mc_alloc_throttle_enabled)
|
||||
return;
|
||||
|
||||
(void) refcount_remove(&mg->mg_alloc_queue_depth[allocator], tag);
|
||||
(void) zfs_refcount_remove(&mg->mg_alloc_queue_depth[allocator], tag);
|
||||
if (io_complete)
|
||||
metaslab_group_increment_qdepth(mg, allocator);
|
||||
}
|
||||
@ -2952,8 +2953,8 @@ metaslab_group_alloc_verify(spa_t *spa, const blkptr_t *bp, void *tag,
|
||||
for (int d = 0; d < ndvas; d++) {
|
||||
uint64_t vdev = DVA_GET_VDEV(&dva[d]);
|
||||
metaslab_group_t *mg = vdev_lookup_top(spa, vdev)->vdev_mg;
|
||||
VERIFY(refcount_not_held(&mg->mg_alloc_queue_depth[allocator],
|
||||
tag));
|
||||
VERIFY(zfs_refcount_not_held(
|
||||
&mg->mg_alloc_queue_depth[allocator], tag));
|
||||
}
|
||||
#endif
|
||||
}
|
||||
@ -3840,7 +3841,7 @@ metaslab_class_throttle_reserve(metaslab_class_t *mc, int slots, int allocator,
|
||||
mutex_enter(&mc->mc_lock);
|
||||
|
||||
uint64_t reserved_slots =
|
||||
refcount_count(&mc->mc_alloc_slots[allocator]);
|
||||
zfs_refcount_count(&mc->mc_alloc_slots[allocator]);
|
||||
if (reserved_slots < max)
|
||||
available_slots = max - reserved_slots;
|
||||
|
||||
@ -3870,7 +3871,7 @@ metaslab_class_throttle_unreserve(metaslab_class_t *mc, int slots,
|
||||
ASSERT(mc->mc_alloc_throttle_enabled);
|
||||
mutex_enter(&mc->mc_lock);
|
||||
for (int d = 0; d < slots; d++) {
|
||||
(void) refcount_remove(&mc->mc_alloc_slots[allocator],
|
||||
(void) zfs_refcount_remove(&mc->mc_alloc_slots[allocator],
|
||||
zio);
|
||||
}
|
||||
mutex_exit(&mc->mc_lock);
|
||||
|
@ -38,7 +38,7 @@ static kmem_cache_t *reference_cache;
|
||||
static kmem_cache_t *reference_history_cache;
|
||||
|
||||
void
|
||||
refcount_init(void)
|
||||
zfs_refcount_init(void)
|
||||
{
|
||||
reference_cache = kmem_cache_create("reference_cache",
|
||||
sizeof (reference_t), 0, NULL, NULL, NULL, NULL, NULL, 0);
|
||||
@ -48,14 +48,14 @@ refcount_init(void)
|
||||
}
|
||||
|
||||
void
|
||||
refcount_fini(void)
|
||||
zfs_refcount_fini(void)
|
||||
{
|
||||
kmem_cache_destroy(reference_cache);
|
||||
kmem_cache_destroy(reference_history_cache);
|
||||
}
|
||||
|
||||
void
|
||||
refcount_create(zfs_refcount_t *rc)
|
||||
zfs_refcount_create(zfs_refcount_t *rc)
|
||||
{
|
||||
mutex_init(&rc->rc_mtx, NULL, MUTEX_DEFAULT, NULL);
|
||||
list_create(&rc->rc_list, sizeof (reference_t),
|
||||
@ -68,21 +68,21 @@ refcount_create(zfs_refcount_t *rc)
|
||||
}
|
||||
|
||||
void
|
||||
refcount_create_tracked(zfs_refcount_t *rc)
|
||||
zfs_refcount_create_tracked(zfs_refcount_t *rc)
|
||||
{
|
||||
refcount_create(rc);
|
||||
zfs_refcount_create(rc);
|
||||
rc->rc_tracked = B_TRUE;
|
||||
}
|
||||
|
||||
void
|
||||
refcount_create_untracked(zfs_refcount_t *rc)
|
||||
zfs_refcount_create_untracked(zfs_refcount_t *rc)
|
||||
{
|
||||
refcount_create(rc);
|
||||
zfs_refcount_create(rc);
|
||||
rc->rc_tracked = B_FALSE;
|
||||
}
|
||||
|
||||
void
|
||||
refcount_destroy_many(zfs_refcount_t *rc, uint64_t number)
|
||||
zfs_refcount_destroy_many(zfs_refcount_t *rc, uint64_t number)
|
||||
{
|
||||
reference_t *ref;
|
||||
|
||||
@ -103,25 +103,25 @@ refcount_destroy_many(zfs_refcount_t *rc, uint64_t number)
|
||||
}
|
||||
|
||||
void
|
||||
refcount_destroy(zfs_refcount_t *rc)
|
||||
zfs_refcount_destroy(zfs_refcount_t *rc)
|
||||
{
|
||||
refcount_destroy_many(rc, 0);
|
||||
zfs_refcount_destroy_many(rc, 0);
|
||||
}
|
||||
|
||||
int
|
||||
refcount_is_zero(zfs_refcount_t *rc)
|
||||
zfs_refcount_is_zero(zfs_refcount_t *rc)
|
||||
{
|
||||
return (rc->rc_count == 0);
|
||||
}
|
||||
|
||||
int64_t
|
||||
refcount_count(zfs_refcount_t *rc)
|
||||
zfs_refcount_count(zfs_refcount_t *rc)
|
||||
{
|
||||
return (rc->rc_count);
|
||||
}
|
||||
|
||||
int64_t
|
||||
refcount_add_many(zfs_refcount_t *rc, uint64_t number, void *holder)
|
||||
zfs_refcount_add_many(zfs_refcount_t *rc, uint64_t number, void *holder)
|
||||
{
|
||||
reference_t *ref = NULL;
|
||||
int64_t count;
|
||||
@ -145,11 +145,11 @@ refcount_add_many(zfs_refcount_t *rc, uint64_t number, void *holder)
|
||||
int64_t
|
||||
zfs_refcount_add(zfs_refcount_t *rc, void *holder)
|
||||
{
|
||||
return (refcount_add_many(rc, 1, holder));
|
||||
return (zfs_refcount_add_many(rc, 1, holder));
|
||||
}
|
||||
|
||||
int64_t
|
||||
refcount_remove_many(zfs_refcount_t *rc, uint64_t number, void *holder)
|
||||
zfs_refcount_remove_many(zfs_refcount_t *rc, uint64_t number, void *holder)
|
||||
{
|
||||
reference_t *ref;
|
||||
int64_t count;
|
||||
@ -197,13 +197,13 @@ refcount_remove_many(zfs_refcount_t *rc, uint64_t number, void *holder)
|
||||
}
|
||||
|
||||
int64_t
|
||||
refcount_remove(zfs_refcount_t *rc, void *holder)
|
||||
zfs_refcount_remove(zfs_refcount_t *rc, void *holder)
|
||||
{
|
||||
return (refcount_remove_many(rc, 1, holder));
|
||||
return (zfs_refcount_remove_many(rc, 1, holder));
|
||||
}
|
||||
|
||||
void
|
||||
refcount_transfer(zfs_refcount_t *dst, zfs_refcount_t *src)
|
||||
zfs_refcount_transfer(zfs_refcount_t *dst, zfs_refcount_t *src)
|
||||
{
|
||||
int64_t count, removed_count;
|
||||
list_t list, removed;
|
||||
@ -234,7 +234,7 @@ refcount_transfer(zfs_refcount_t *dst, zfs_refcount_t *src)
|
||||
}
|
||||
|
||||
void
|
||||
refcount_transfer_ownership(zfs_refcount_t *rc, void *current_holder,
|
||||
zfs_refcount_transfer_ownership(zfs_refcount_t *rc, void *current_holder,
|
||||
void *new_holder)
|
||||
{
|
||||
reference_t *ref;
|
||||
@ -264,7 +264,7 @@ refcount_transfer_ownership(zfs_refcount_t *rc, void *current_holder,
|
||||
* might be held.
|
||||
*/
|
||||
boolean_t
|
||||
refcount_held(zfs_refcount_t *rc, void *holder)
|
||||
zfs_refcount_held(zfs_refcount_t *rc, void *holder)
|
||||
{
|
||||
reference_t *ref;
|
||||
|
||||
@ -292,7 +292,7 @@ refcount_held(zfs_refcount_t *rc, void *holder)
|
||||
* since the reference might not be held.
|
||||
*/
|
||||
boolean_t
|
||||
refcount_not_held(zfs_refcount_t *rc, void *holder)
|
||||
zfs_refcount_not_held(zfs_refcount_t *rc, void *holder)
|
||||
{
|
||||
reference_t *ref;
|
||||
|
||||
|
@ -85,7 +85,7 @@ rrn_find(rrwlock_t *rrl)
|
||||
{
|
||||
rrw_node_t *rn;
|
||||
|
||||
if (refcount_count(&rrl->rr_linked_rcount) == 0)
|
||||
if (zfs_refcount_count(&rrl->rr_linked_rcount) == 0)
|
||||
return (NULL);
|
||||
|
||||
for (rn = tsd_get(rrw_tsd_key); rn != NULL; rn = rn->rn_next) {
|
||||
@ -120,7 +120,7 @@ rrn_find_and_remove(rrwlock_t *rrl, void *tag)
|
||||
rrw_node_t *rn;
|
||||
rrw_node_t *prev = NULL;
|
||||
|
||||
if (refcount_count(&rrl->rr_linked_rcount) == 0)
|
||||
if (zfs_refcount_count(&rrl->rr_linked_rcount) == 0)
|
||||
return (B_FALSE);
|
||||
|
||||
for (rn = tsd_get(rrw_tsd_key); rn != NULL; rn = rn->rn_next) {
|
||||
@ -143,8 +143,8 @@ rrw_init(rrwlock_t *rrl, boolean_t track_all)
|
||||
mutex_init(&rrl->rr_lock, NULL, MUTEX_DEFAULT, NULL);
|
||||
cv_init(&rrl->rr_cv, NULL, CV_DEFAULT, NULL);
|
||||
rrl->rr_writer = NULL;
|
||||
refcount_create(&rrl->rr_anon_rcount);
|
||||
refcount_create(&rrl->rr_linked_rcount);
|
||||
zfs_refcount_create(&rrl->rr_anon_rcount);
|
||||
zfs_refcount_create(&rrl->rr_linked_rcount);
|
||||
rrl->rr_writer_wanted = B_FALSE;
|
||||
rrl->rr_track_all = track_all;
|
||||
}
|
||||
@ -155,8 +155,8 @@ rrw_destroy(rrwlock_t *rrl)
|
||||
mutex_destroy(&rrl->rr_lock);
|
||||
cv_destroy(&rrl->rr_cv);
|
||||
ASSERT(rrl->rr_writer == NULL);
|
||||
refcount_destroy(&rrl->rr_anon_rcount);
|
||||
refcount_destroy(&rrl->rr_linked_rcount);
|
||||
zfs_refcount_destroy(&rrl->rr_anon_rcount);
|
||||
zfs_refcount_destroy(&rrl->rr_linked_rcount);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -173,10 +173,10 @@ rrw_enter_read_impl(rrwlock_t *rrl, boolean_t prio, void *tag)
|
||||
DTRACE_PROBE(zfs__rrwfastpath__rdmiss);
|
||||
#endif
|
||||
ASSERT(rrl->rr_writer != curthread);
|
||||
ASSERT(refcount_count(&rrl->rr_anon_rcount) >= 0);
|
||||
ASSERT(zfs_refcount_count(&rrl->rr_anon_rcount) >= 0);
|
||||
|
||||
while (rrl->rr_writer != NULL || (rrl->rr_writer_wanted &&
|
||||
refcount_is_zero(&rrl->rr_anon_rcount) && !prio &&
|
||||
zfs_refcount_is_zero(&rrl->rr_anon_rcount) && !prio &&
|
||||
rrn_find(rrl) == NULL))
|
||||
cv_wait(&rrl->rr_cv, &rrl->rr_lock);
|
||||
|
||||
@ -216,8 +216,8 @@ rrw_enter_write(rrwlock_t *rrl)
|
||||
mutex_enter(&rrl->rr_lock);
|
||||
ASSERT(rrl->rr_writer != curthread);
|
||||
|
||||
while (refcount_count(&rrl->rr_anon_rcount) > 0 ||
|
||||
refcount_count(&rrl->rr_linked_rcount) > 0 ||
|
||||
while (zfs_refcount_count(&rrl->rr_anon_rcount) > 0 ||
|
||||
zfs_refcount_count(&rrl->rr_linked_rcount) > 0 ||
|
||||
rrl->rr_writer != NULL) {
|
||||
rrl->rr_writer_wanted = B_TRUE;
|
||||
cv_wait(&rrl->rr_cv, &rrl->rr_lock);
|
||||
@ -250,24 +250,25 @@ rrw_exit(rrwlock_t *rrl, void *tag)
|
||||
}
|
||||
DTRACE_PROBE(zfs__rrwfastpath__exitmiss);
|
||||
#endif
|
||||
ASSERT(!refcount_is_zero(&rrl->rr_anon_rcount) ||
|
||||
!refcount_is_zero(&rrl->rr_linked_rcount) ||
|
||||
ASSERT(!zfs_refcount_is_zero(&rrl->rr_anon_rcount) ||
|
||||
!zfs_refcount_is_zero(&rrl->rr_linked_rcount) ||
|
||||
rrl->rr_writer != NULL);
|
||||
|
||||
if (rrl->rr_writer == NULL) {
|
||||
int64_t count;
|
||||
if (rrn_find_and_remove(rrl, tag)) {
|
||||
count = refcount_remove(&rrl->rr_linked_rcount, tag);
|
||||
count = zfs_refcount_remove(
|
||||
&rrl->rr_linked_rcount, tag);
|
||||
} else {
|
||||
ASSERT(!rrl->rr_track_all);
|
||||
count = refcount_remove(&rrl->rr_anon_rcount, tag);
|
||||
count = zfs_refcount_remove(&rrl->rr_anon_rcount, tag);
|
||||
}
|
||||
if (count == 0)
|
||||
cv_broadcast(&rrl->rr_cv);
|
||||
} else {
|
||||
ASSERT(rrl->rr_writer == curthread);
|
||||
ASSERT(refcount_is_zero(&rrl->rr_anon_rcount) &&
|
||||
refcount_is_zero(&rrl->rr_linked_rcount));
|
||||
ASSERT(zfs_refcount_is_zero(&rrl->rr_anon_rcount) &&
|
||||
zfs_refcount_is_zero(&rrl->rr_linked_rcount));
|
||||
rrl->rr_writer = NULL;
|
||||
cv_broadcast(&rrl->rr_cv);
|
||||
}
|
||||
@ -288,7 +289,7 @@ rrw_held(rrwlock_t *rrl, krw_t rw)
|
||||
if (rw == RW_WRITER) {
|
||||
held = (rrl->rr_writer == curthread);
|
||||
} else {
|
||||
held = (!refcount_is_zero(&rrl->rr_anon_rcount) ||
|
||||
held = (!zfs_refcount_is_zero(&rrl->rr_anon_rcount) ||
|
||||
rrn_find(rrl) != NULL);
|
||||
}
|
||||
mutex_exit(&rrl->rr_lock);
|
||||
|
@ -1136,7 +1136,7 @@ sa_tear_down(objset_t *os)
|
||||
avl_destroy_nodes(&sa->sa_layout_hash_tree, &cookie))) {
|
||||
sa_idx_tab_t *tab;
|
||||
while ((tab = list_head(&layout->lot_idx_tab))) {
|
||||
ASSERT(refcount_count(&tab->sa_refcount));
|
||||
ASSERT(zfs_refcount_count(&tab->sa_refcount));
|
||||
sa_idx_tab_rele(os, tab);
|
||||
}
|
||||
}
|
||||
@ -1327,13 +1327,13 @@ sa_idx_tab_rele(objset_t *os, void *arg)
|
||||
return;
|
||||
|
||||
mutex_enter(&sa->sa_lock);
|
||||
if (refcount_remove(&idx_tab->sa_refcount, NULL) == 0) {
|
||||
if (zfs_refcount_remove(&idx_tab->sa_refcount, NULL) == 0) {
|
||||
list_remove(&idx_tab->sa_layout->lot_idx_tab, idx_tab);
|
||||
if (idx_tab->sa_variable_lengths)
|
||||
kmem_free(idx_tab->sa_variable_lengths,
|
||||
sizeof (uint16_t) *
|
||||
idx_tab->sa_layout->lot_var_sizes);
|
||||
refcount_destroy(&idx_tab->sa_refcount);
|
||||
zfs_refcount_destroy(&idx_tab->sa_refcount);
|
||||
kmem_free(idx_tab->sa_idx_tab,
|
||||
sizeof (uint32_t) * sa->sa_num_attrs);
|
||||
kmem_free(idx_tab, sizeof (sa_idx_tab_t));
|
||||
@ -1748,7 +1748,7 @@ sa_find_idx_tab(objset_t *os, dmu_object_type_t bonustype, sa_hdr_phys_t *hdr)
|
||||
idx_tab->sa_idx_tab =
|
||||
kmem_zalloc(sizeof (uint32_t) * sa->sa_num_attrs, KM_SLEEP);
|
||||
idx_tab->sa_layout = tb;
|
||||
refcount_create(&idx_tab->sa_refcount);
|
||||
zfs_refcount_create(&idx_tab->sa_refcount);
|
||||
if (tb->lot_var_sizes)
|
||||
idx_tab->sa_variable_lengths = kmem_alloc(sizeof (uint16_t) *
|
||||
tb->lot_var_sizes, KM_SLEEP);
|
||||
|
@ -2371,7 +2371,7 @@ spa_load(spa_t *spa, spa_load_state_t state, spa_import_type_t type)
|
||||
* and are making their way through the eviction process.
|
||||
*/
|
||||
spa_evicting_os_wait(spa);
|
||||
spa->spa_minref = refcount_count(&spa->spa_refcount);
|
||||
spa->spa_minref = zfs_refcount_count(&spa->spa_refcount);
|
||||
if (error) {
|
||||
if (error != EEXIST) {
|
||||
spa->spa_loaded_ts.tv_sec = 0;
|
||||
@ -5278,7 +5278,7 @@ spa_create(const char *pool, nvlist_t *nvroot, nvlist_t *props,
|
||||
* and are making their way through the eviction process.
|
||||
*/
|
||||
spa_evicting_os_wait(spa);
|
||||
spa->spa_minref = refcount_count(&spa->spa_refcount);
|
||||
spa->spa_minref = zfs_refcount_count(&spa->spa_refcount);
|
||||
spa->spa_load_state = SPA_LOAD_NONE;
|
||||
|
||||
mutex_exit(&spa_namespace_lock);
|
||||
@ -7769,7 +7769,8 @@ spa_sync(spa_t *spa, uint64_t txg)
|
||||
* allocations all happen from spa_sync().
|
||||
*/
|
||||
for (int i = 0; i < spa->spa_alloc_count; i++)
|
||||
ASSERT0(refcount_count(&(mg->mg_alloc_queue_depth[i])));
|
||||
ASSERT0(zfs_refcount_count(
|
||||
&(mg->mg_alloc_queue_depth[i])));
|
||||
mg->mg_max_alloc_queue_depth = max_queue_depth;
|
||||
|
||||
for (int i = 0; i < spa->spa_alloc_count; i++) {
|
||||
@ -7780,9 +7781,9 @@ spa_sync(spa_t *spa, uint64_t txg)
|
||||
}
|
||||
|
||||
for (int i = 0; i < spa->spa_alloc_count; i++) {
|
||||
ASSERT0(refcount_count(&normal->mc_alloc_slots[i]));
|
||||
ASSERT0(refcount_count(&special->mc_alloc_slots[i]));
|
||||
ASSERT0(refcount_count(&dedup->mc_alloc_slots[i]));
|
||||
ASSERT0(zfs_refcount_count(&normal->mc_alloc_slots[i]));
|
||||
ASSERT0(zfs_refcount_count(&special->mc_alloc_slots[i]));
|
||||
ASSERT0(zfs_refcount_count(&dedup->mc_alloc_slots[i]));
|
||||
normal->mc_alloc_max_slots[i] = slots_per_allocator;
|
||||
special->mc_alloc_max_slots[i] = slots_per_allocator;
|
||||
dedup->mc_alloc_max_slots[i] = slots_per_allocator;
|
||||
|
@ -434,7 +434,7 @@ spa_config_lock_init(spa_t *spa)
|
||||
spa_config_lock_t *scl = &spa->spa_config_lock[i];
|
||||
mutex_init(&scl->scl_lock, NULL, MUTEX_DEFAULT, NULL);
|
||||
cv_init(&scl->scl_cv, NULL, CV_DEFAULT, NULL);
|
||||
refcount_create_untracked(&scl->scl_count);
|
||||
zfs_refcount_create_untracked(&scl->scl_count);
|
||||
scl->scl_writer = NULL;
|
||||
scl->scl_write_wanted = 0;
|
||||
}
|
||||
@ -447,7 +447,7 @@ spa_config_lock_destroy(spa_t *spa)
|
||||
spa_config_lock_t *scl = &spa->spa_config_lock[i];
|
||||
mutex_destroy(&scl->scl_lock);
|
||||
cv_destroy(&scl->scl_cv);
|
||||
refcount_destroy(&scl->scl_count);
|
||||
zfs_refcount_destroy(&scl->scl_count);
|
||||
ASSERT(scl->scl_writer == NULL);
|
||||
ASSERT(scl->scl_write_wanted == 0);
|
||||
}
|
||||
@ -470,7 +470,7 @@ spa_config_tryenter(spa_t *spa, int locks, void *tag, krw_t rw)
|
||||
}
|
||||
} else {
|
||||
ASSERT(scl->scl_writer != curthread);
|
||||
if (!refcount_is_zero(&scl->scl_count)) {
|
||||
if (!zfs_refcount_is_zero(&scl->scl_count)) {
|
||||
mutex_exit(&scl->scl_lock);
|
||||
spa_config_exit(spa, locks & ((1 << i) - 1),
|
||||
tag);
|
||||
@ -504,7 +504,7 @@ spa_config_enter(spa_t *spa, int locks, void *tag, krw_t rw)
|
||||
}
|
||||
} else {
|
||||
ASSERT(scl->scl_writer != curthread);
|
||||
while (!refcount_is_zero(&scl->scl_count)) {
|
||||
while (!zfs_refcount_is_zero(&scl->scl_count)) {
|
||||
scl->scl_write_wanted++;
|
||||
cv_wait(&scl->scl_cv, &scl->scl_lock);
|
||||
scl->scl_write_wanted--;
|
||||
@ -525,8 +525,8 @@ spa_config_exit(spa_t *spa, int locks, void *tag)
|
||||
if (!(locks & (1 << i)))
|
||||
continue;
|
||||
mutex_enter(&scl->scl_lock);
|
||||
ASSERT(!refcount_is_zero(&scl->scl_count));
|
||||
if (refcount_remove(&scl->scl_count, tag) == 0) {
|
||||
ASSERT(!zfs_refcount_is_zero(&scl->scl_count));
|
||||
if (zfs_refcount_remove(&scl->scl_count, tag) == 0) {
|
||||
ASSERT(scl->scl_writer == NULL ||
|
||||
scl->scl_writer == curthread);
|
||||
scl->scl_writer = NULL; /* OK in either case */
|
||||
@ -545,7 +545,8 @@ spa_config_held(spa_t *spa, int locks, krw_t rw)
|
||||
spa_config_lock_t *scl = &spa->spa_config_lock[i];
|
||||
if (!(locks & (1 << i)))
|
||||
continue;
|
||||
if ((rw == RW_READER && !refcount_is_zero(&scl->scl_count)) ||
|
||||
if ((rw == RW_READER &&
|
||||
!zfs_refcount_is_zero(&scl->scl_count)) ||
|
||||
(rw == RW_WRITER && scl->scl_writer == curthread))
|
||||
locks_held |= 1 << i;
|
||||
}
|
||||
@ -663,7 +664,7 @@ spa_add(const char *name, nvlist_t *config, const char *altroot)
|
||||
spa->spa_deadman_ziotime = MSEC2NSEC(zfs_deadman_ziotime_ms);
|
||||
spa_set_deadman_failmode(spa, zfs_deadman_failmode);
|
||||
|
||||
refcount_create(&spa->spa_refcount);
|
||||
zfs_refcount_create(&spa->spa_refcount);
|
||||
spa_config_lock_init(spa);
|
||||
spa_stats_init(spa);
|
||||
|
||||
@ -746,7 +747,7 @@ spa_remove(spa_t *spa)
|
||||
|
||||
ASSERT(MUTEX_HELD(&spa_namespace_lock));
|
||||
ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED);
|
||||
ASSERT3U(refcount_count(&spa->spa_refcount), ==, 0);
|
||||
ASSERT3U(zfs_refcount_count(&spa->spa_refcount), ==, 0);
|
||||
|
||||
nvlist_free(spa->spa_config_splitting);
|
||||
|
||||
@ -779,7 +780,7 @@ spa_remove(spa_t *spa)
|
||||
nvlist_free(spa->spa_feat_stats);
|
||||
spa_config_set(spa, NULL);
|
||||
|
||||
refcount_destroy(&spa->spa_refcount);
|
||||
zfs_refcount_destroy(&spa->spa_refcount);
|
||||
|
||||
spa_stats_destroy(spa);
|
||||
spa_config_lock_destroy(spa);
|
||||
@ -839,7 +840,7 @@ spa_next(spa_t *prev)
|
||||
void
|
||||
spa_open_ref(spa_t *spa, void *tag)
|
||||
{
|
||||
ASSERT(refcount_count(&spa->spa_refcount) >= spa->spa_minref ||
|
||||
ASSERT(zfs_refcount_count(&spa->spa_refcount) >= spa->spa_minref ||
|
||||
MUTEX_HELD(&spa_namespace_lock));
|
||||
(void) zfs_refcount_add(&spa->spa_refcount, tag);
|
||||
}
|
||||
@ -851,9 +852,9 @@ spa_open_ref(spa_t *spa, void *tag)
|
||||
void
|
||||
spa_close(spa_t *spa, void *tag)
|
||||
{
|
||||
ASSERT(refcount_count(&spa->spa_refcount) > spa->spa_minref ||
|
||||
ASSERT(zfs_refcount_count(&spa->spa_refcount) > spa->spa_minref ||
|
||||
MUTEX_HELD(&spa_namespace_lock));
|
||||
(void) refcount_remove(&spa->spa_refcount, tag);
|
||||
(void) zfs_refcount_remove(&spa->spa_refcount, tag);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -867,7 +868,7 @@ spa_close(spa_t *spa, void *tag)
|
||||
void
|
||||
spa_async_close(spa_t *spa, void *tag)
|
||||
{
|
||||
(void) refcount_remove(&spa->spa_refcount, tag);
|
||||
(void) zfs_refcount_remove(&spa->spa_refcount, tag);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -880,7 +881,7 @@ spa_refcount_zero(spa_t *spa)
|
||||
{
|
||||
ASSERT(MUTEX_HELD(&spa_namespace_lock));
|
||||
|
||||
return (refcount_count(&spa->spa_refcount) == spa->spa_minref);
|
||||
return (zfs_refcount_count(&spa->spa_refcount) == spa->spa_minref);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -2099,7 +2100,7 @@ spa_init(int mode)
|
||||
#endif
|
||||
|
||||
fm_init();
|
||||
refcount_init();
|
||||
zfs_refcount_init();
|
||||
unique_init();
|
||||
range_tree_init();
|
||||
metaslab_alloc_trace_init();
|
||||
@ -2138,7 +2139,7 @@ spa_fini(void)
|
||||
metaslab_alloc_trace_fini();
|
||||
range_tree_fini();
|
||||
unique_fini();
|
||||
refcount_fini();
|
||||
zfs_refcount_fini();
|
||||
fm_fini();
|
||||
scan_fini();
|
||||
qat_fini();
|
||||
|
@ -141,7 +141,7 @@ zfsctl_snapshot_alloc(char *full_name, char *full_path, spa_t *spa,
|
||||
se->se_root_dentry = root_dentry;
|
||||
se->se_taskqid = TASKQID_INVALID;
|
||||
|
||||
refcount_create(&se->se_refcount);
|
||||
zfs_refcount_create(&se->se_refcount);
|
||||
|
||||
return (se);
|
||||
}
|
||||
@ -153,7 +153,7 @@ zfsctl_snapshot_alloc(char *full_name, char *full_path, spa_t *spa,
|
||||
static void
|
||||
zfsctl_snapshot_free(zfs_snapentry_t *se)
|
||||
{
|
||||
refcount_destroy(&se->se_refcount);
|
||||
zfs_refcount_destroy(&se->se_refcount);
|
||||
strfree(se->se_name);
|
||||
strfree(se->se_path);
|
||||
|
||||
@ -176,7 +176,7 @@ zfsctl_snapshot_hold(zfs_snapentry_t *se)
|
||||
static void
|
||||
zfsctl_snapshot_rele(zfs_snapentry_t *se)
|
||||
{
|
||||
if (refcount_remove(&se->se_refcount, NULL) == 0)
|
||||
if (zfs_refcount_remove(&se->se_refcount, NULL) == 0)
|
||||
zfsctl_snapshot_free(se);
|
||||
}
|
||||
|
||||
|
@ -141,7 +141,7 @@ zfs_znode_hold_cache_constructor(void *buf, void *arg, int kmflags)
|
||||
znode_hold_t *zh = buf;
|
||||
|
||||
mutex_init(&zh->zh_lock, NULL, MUTEX_DEFAULT, NULL);
|
||||
refcount_create(&zh->zh_refcount);
|
||||
zfs_refcount_create(&zh->zh_refcount);
|
||||
zh->zh_obj = ZFS_NO_OBJECT;
|
||||
|
||||
return (0);
|
||||
@ -153,7 +153,7 @@ zfs_znode_hold_cache_destructor(void *buf, void *arg)
|
||||
znode_hold_t *zh = buf;
|
||||
|
||||
mutex_destroy(&zh->zh_lock);
|
||||
refcount_destroy(&zh->zh_refcount);
|
||||
zfs_refcount_destroy(&zh->zh_refcount);
|
||||
}
|
||||
|
||||
void
|
||||
@ -271,7 +271,7 @@ zfs_znode_hold_enter(zfsvfs_t *zfsvfs, uint64_t obj)
|
||||
kmem_cache_free(znode_hold_cache, zh_new);
|
||||
|
||||
ASSERT(MUTEX_NOT_HELD(&zh->zh_lock));
|
||||
ASSERT3S(refcount_count(&zh->zh_refcount), >, 0);
|
||||
ASSERT3S(zfs_refcount_count(&zh->zh_refcount), >, 0);
|
||||
mutex_enter(&zh->zh_lock);
|
||||
|
||||
return (zh);
|
||||
@ -284,11 +284,11 @@ zfs_znode_hold_exit(zfsvfs_t *zfsvfs, znode_hold_t *zh)
|
||||
boolean_t remove = B_FALSE;
|
||||
|
||||
ASSERT(zfs_znode_held(zfsvfs, zh->zh_obj));
|
||||
ASSERT3S(refcount_count(&zh->zh_refcount), >, 0);
|
||||
ASSERT3S(zfs_refcount_count(&zh->zh_refcount), >, 0);
|
||||
mutex_exit(&zh->zh_lock);
|
||||
|
||||
mutex_enter(&zfsvfs->z_hold_locks[i]);
|
||||
if (refcount_remove(&zh->zh_refcount, NULL) == 0) {
|
||||
if (zfs_refcount_remove(&zh->zh_refcount, NULL) == 0) {
|
||||
avl_remove(&zfsvfs->z_hold_trees[i], zh);
|
||||
remove = B_TRUE;
|
||||
}
|
||||
|
@ -2639,7 +2639,7 @@ zio_write_gang_block(zio_t *pio)
|
||||
ASSERT(!(pio->io_flags & ZIO_FLAG_NODATA));
|
||||
|
||||
flags |= METASLAB_ASYNC_ALLOC;
|
||||
VERIFY(refcount_held(&mc->mc_alloc_slots[pio->io_allocator],
|
||||
VERIFY(zfs_refcount_held(&mc->mc_alloc_slots[pio->io_allocator],
|
||||
pio));
|
||||
|
||||
/*
|
||||
@ -4351,7 +4351,7 @@ zio_done(zio_t *zio)
|
||||
|
||||
metaslab_group_alloc_verify(zio->io_spa, zio->io_bp, zio,
|
||||
zio->io_allocator);
|
||||
VERIFY(refcount_not_held(
|
||||
VERIFY(zfs_refcount_not_held(
|
||||
&zio->io_metaslab_class->mc_alloc_slots[zio->io_allocator],
|
||||
zio));
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user