10343 ZoL: Prefix all refcount functions with zfs_
illumos/illumos-gate@e914ace2e9
e914ace2e9
https://www.illumos.org/issues/10343
On the openzfs feature/porting matrix, this is listed as:
prefix to refcount funcs/types
Having these changes will make it easier to share other work across the
different ZFS operating systems.
PR 7963 424fd7c3e Prefix all refcount functions with zfs_
PR 7885 & 7932 c13060e47 Linux 4.19-rc3+ compat: Remove refcount_t compat
PR 5823 & 5842 4859fe796 Linux 4.11 compat: avoid refcount_t name conflict
Author: Tim Schumacher <timschumi@gmx.de>
This commit is contained in:
parent
a4a9c977b9
commit
1dd2b15521
@ -291,7 +291,7 @@ abd_alloc(size_t size, boolean_t is_metadata)
|
||||
}
|
||||
abd->abd_size = size;
|
||||
abd->abd_parent = NULL;
|
||||
refcount_create(&abd->abd_children);
|
||||
zfs_refcount_create(&abd->abd_children);
|
||||
|
||||
abd->abd_u.abd_scatter.abd_offset = 0;
|
||||
abd->abd_u.abd_scatter.abd_chunk_size = zfs_abd_chunk_size;
|
||||
@ -318,7 +318,7 @@ abd_free_scatter(abd_t *abd)
|
||||
abd_free_chunk(abd->abd_u.abd_scatter.abd_chunks[i]);
|
||||
}
|
||||
|
||||
refcount_destroy(&abd->abd_children);
|
||||
zfs_refcount_destroy(&abd->abd_children);
|
||||
ABDSTAT_BUMPDOWN(abdstat_scatter_cnt);
|
||||
ABDSTAT_INCR(abdstat_scatter_data_size, -(int)abd->abd_size);
|
||||
ABDSTAT_INCR(abdstat_scatter_chunk_waste,
|
||||
@ -345,7 +345,7 @@ abd_alloc_linear(size_t size, boolean_t is_metadata)
|
||||
}
|
||||
abd->abd_size = size;
|
||||
abd->abd_parent = NULL;
|
||||
refcount_create(&abd->abd_children);
|
||||
zfs_refcount_create(&abd->abd_children);
|
||||
|
||||
if (is_metadata) {
|
||||
abd->abd_u.abd_linear.abd_buf = zio_buf_alloc(size);
|
||||
@ -368,7 +368,7 @@ abd_free_linear(abd_t *abd)
|
||||
zio_data_buf_free(abd->abd_u.abd_linear.abd_buf, abd->abd_size);
|
||||
}
|
||||
|
||||
refcount_destroy(&abd->abd_children);
|
||||
zfs_refcount_destroy(&abd->abd_children);
|
||||
ABDSTAT_BUMPDOWN(abdstat_linear_cnt);
|
||||
ABDSTAT_INCR(abdstat_linear_data_size, -(int)abd->abd_size);
|
||||
|
||||
@ -474,8 +474,8 @@ abd_get_offset(abd_t *sabd, size_t off)
|
||||
|
||||
abd->abd_size = sabd->abd_size - off;
|
||||
abd->abd_parent = sabd;
|
||||
refcount_create(&abd->abd_children);
|
||||
(void) refcount_add_many(&sabd->abd_children, abd->abd_size, abd);
|
||||
zfs_refcount_create(&abd->abd_children);
|
||||
(void) zfs_refcount_add_many(&sabd->abd_children, abd->abd_size, abd);
|
||||
|
||||
return (abd);
|
||||
}
|
||||
@ -499,7 +499,7 @@ abd_get_from_buf(void *buf, size_t size)
|
||||
abd->abd_flags = ABD_FLAG_LINEAR;
|
||||
abd->abd_size = size;
|
||||
abd->abd_parent = NULL;
|
||||
refcount_create(&abd->abd_children);
|
||||
zfs_refcount_create(&abd->abd_children);
|
||||
|
||||
abd->abd_u.abd_linear.abd_buf = buf;
|
||||
|
||||
@ -517,11 +517,11 @@ abd_put(abd_t *abd)
|
||||
ASSERT(!(abd->abd_flags & ABD_FLAG_OWNER));
|
||||
|
||||
if (abd->abd_parent != NULL) {
|
||||
(void) refcount_remove_many(&abd->abd_parent->abd_children,
|
||||
(void) zfs_refcount_remove_many(&abd->abd_parent->abd_children,
|
||||
abd->abd_size, abd);
|
||||
}
|
||||
|
||||
refcount_destroy(&abd->abd_children);
|
||||
zfs_refcount_destroy(&abd->abd_children);
|
||||
abd_free_struct(abd);
|
||||
}
|
||||
|
||||
@ -553,7 +553,7 @@ abd_borrow_buf(abd_t *abd, size_t n)
|
||||
} else {
|
||||
buf = zio_buf_alloc(n);
|
||||
}
|
||||
(void) refcount_add_many(&abd->abd_children, n, buf);
|
||||
(void) zfs_refcount_add_many(&abd->abd_children, n, buf);
|
||||
|
||||
return (buf);
|
||||
}
|
||||
@ -585,7 +585,7 @@ abd_return_buf(abd_t *abd, void *buf, size_t n)
|
||||
ASSERT0(abd_cmp_buf(abd, buf, n));
|
||||
zio_buf_free(buf, n);
|
||||
}
|
||||
(void) refcount_remove_many(&abd->abd_children, n, buf);
|
||||
(void) zfs_refcount_remove_many(&abd->abd_children, n, buf);
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -428,12 +428,12 @@ typedef struct arc_state {
|
||||
/*
|
||||
* total amount of evictable data in this state
|
||||
*/
|
||||
refcount_t arcs_esize[ARC_BUFC_NUMTYPES];
|
||||
zfs_refcount_t arcs_esize[ARC_BUFC_NUMTYPES];
|
||||
/*
|
||||
* total amount of data in this state; this includes: evictable,
|
||||
* non-evictable, ARC_BUFC_DATA, and ARC_BUFC_METADATA.
|
||||
*/
|
||||
refcount_t arcs_size;
|
||||
zfs_refcount_t arcs_size;
|
||||
} arc_state_t;
|
||||
|
||||
/* The 6 states: */
|
||||
@ -947,7 +947,7 @@ typedef struct l1arc_buf_hdr {
|
||||
clock_t b_arc_access;
|
||||
|
||||
/* self protecting */
|
||||
refcount_t b_refcnt;
|
||||
zfs_refcount_t b_refcnt;
|
||||
|
||||
arc_callback_t *b_acb;
|
||||
abd_t *b_pabd;
|
||||
@ -1124,7 +1124,7 @@ struct l2arc_dev {
|
||||
kmutex_t l2ad_mtx; /* lock for buffer list */
|
||||
list_t l2ad_buflist; /* buffer list */
|
||||
list_node_t l2ad_node; /* device list node */
|
||||
refcount_t l2ad_alloc; /* allocated bytes */
|
||||
zfs_refcount_t l2ad_alloc; /* allocated bytes */
|
||||
};
|
||||
|
||||
static list_t L2ARC_dev_list; /* device list */
|
||||
@ -1341,7 +1341,7 @@ hdr_full_cons(void *vbuf, void *unused, int kmflag)
|
||||
|
||||
bzero(hdr, HDR_FULL_SIZE);
|
||||
cv_init(&hdr->b_l1hdr.b_cv, NULL, CV_DEFAULT, NULL);
|
||||
refcount_create(&hdr->b_l1hdr.b_refcnt);
|
||||
zfs_refcount_create(&hdr->b_l1hdr.b_refcnt);
|
||||
mutex_init(&hdr->b_l1hdr.b_freeze_lock, NULL, MUTEX_DEFAULT, NULL);
|
||||
multilist_link_init(&hdr->b_l1hdr.b_arc_node);
|
||||
arc_space_consume(HDR_FULL_SIZE, ARC_SPACE_HDRS);
|
||||
@ -1386,7 +1386,7 @@ hdr_full_dest(void *vbuf, void *unused)
|
||||
|
||||
ASSERT(HDR_EMPTY(hdr));
|
||||
cv_destroy(&hdr->b_l1hdr.b_cv);
|
||||
refcount_destroy(&hdr->b_l1hdr.b_refcnt);
|
||||
zfs_refcount_destroy(&hdr->b_l1hdr.b_refcnt);
|
||||
mutex_destroy(&hdr->b_l1hdr.b_freeze_lock);
|
||||
ASSERT(!multilist_link_active(&hdr->b_l1hdr.b_arc_node));
|
||||
arc_space_return(HDR_FULL_SIZE, ARC_SPACE_HDRS);
|
||||
@ -2065,21 +2065,21 @@ arc_evictable_space_increment(arc_buf_hdr_t *hdr, arc_state_t *state)
|
||||
ASSERT0(hdr->b_l1hdr.b_bufcnt);
|
||||
ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL);
|
||||
ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
|
||||
(void) refcount_add_many(&state->arcs_esize[type],
|
||||
(void) zfs_refcount_add_many(&state->arcs_esize[type],
|
||||
HDR_GET_LSIZE(hdr), hdr);
|
||||
return;
|
||||
}
|
||||
|
||||
ASSERT(!GHOST_STATE(state));
|
||||
if (hdr->b_l1hdr.b_pabd != NULL) {
|
||||
(void) refcount_add_many(&state->arcs_esize[type],
|
||||
(void) zfs_refcount_add_many(&state->arcs_esize[type],
|
||||
arc_hdr_size(hdr), hdr);
|
||||
}
|
||||
for (arc_buf_t *buf = hdr->b_l1hdr.b_buf; buf != NULL;
|
||||
buf = buf->b_next) {
|
||||
if (arc_buf_is_shared(buf))
|
||||
continue;
|
||||
(void) refcount_add_many(&state->arcs_esize[type],
|
||||
(void) zfs_refcount_add_many(&state->arcs_esize[type],
|
||||
arc_buf_size(buf), buf);
|
||||
}
|
||||
}
|
||||
@ -2100,21 +2100,21 @@ arc_evictable_space_decrement(arc_buf_hdr_t *hdr, arc_state_t *state)
|
||||
ASSERT0(hdr->b_l1hdr.b_bufcnt);
|
||||
ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL);
|
||||
ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
|
||||
(void) refcount_remove_many(&state->arcs_esize[type],
|
||||
(void) zfs_refcount_remove_many(&state->arcs_esize[type],
|
||||
HDR_GET_LSIZE(hdr), hdr);
|
||||
return;
|
||||
}
|
||||
|
||||
ASSERT(!GHOST_STATE(state));
|
||||
if (hdr->b_l1hdr.b_pabd != NULL) {
|
||||
(void) refcount_remove_many(&state->arcs_esize[type],
|
||||
(void) zfs_refcount_remove_many(&state->arcs_esize[type],
|
||||
arc_hdr_size(hdr), hdr);
|
||||
}
|
||||
for (arc_buf_t *buf = hdr->b_l1hdr.b_buf; buf != NULL;
|
||||
buf = buf->b_next) {
|
||||
if (arc_buf_is_shared(buf))
|
||||
continue;
|
||||
(void) refcount_remove_many(&state->arcs_esize[type],
|
||||
(void) zfs_refcount_remove_many(&state->arcs_esize[type],
|
||||
arc_buf_size(buf), buf);
|
||||
}
|
||||
}
|
||||
@ -2131,13 +2131,13 @@ add_reference(arc_buf_hdr_t *hdr, void *tag)
|
||||
ASSERT(HDR_HAS_L1HDR(hdr));
|
||||
if (!MUTEX_HELD(HDR_LOCK(hdr))) {
|
||||
ASSERT(hdr->b_l1hdr.b_state == arc_anon);
|
||||
ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
|
||||
ASSERT(zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
|
||||
ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL);
|
||||
}
|
||||
|
||||
arc_state_t *state = hdr->b_l1hdr.b_state;
|
||||
|
||||
if ((refcount_add(&hdr->b_l1hdr.b_refcnt, tag) == 1) &&
|
||||
if ((zfs_refcount_add(&hdr->b_l1hdr.b_refcnt, tag) == 1) &&
|
||||
(state != arc_anon)) {
|
||||
/* We don't use the L2-only state list. */
|
||||
if (state != arc_l2c_only) {
|
||||
@ -2169,7 +2169,7 @@ remove_reference(arc_buf_hdr_t *hdr, kmutex_t *hash_lock, void *tag)
|
||||
* arc_l2c_only counts as a ghost state so we don't need to explicitly
|
||||
* check to prevent usage of the arc_l2c_only list.
|
||||
*/
|
||||
if (((cnt = refcount_remove(&hdr->b_l1hdr.b_refcnt, tag)) == 0) &&
|
||||
if (((cnt = zfs_refcount_remove(&hdr->b_l1hdr.b_refcnt, tag)) == 0) &&
|
||||
(state != arc_anon)) {
|
||||
multilist_insert(state->arcs_list[arc_buf_type(hdr)], hdr);
|
||||
ASSERT3U(hdr->b_l1hdr.b_bufcnt, >, 0);
|
||||
@ -2201,7 +2201,7 @@ arc_change_state(arc_state_t *new_state, arc_buf_hdr_t *hdr,
|
||||
*/
|
||||
if (HDR_HAS_L1HDR(hdr)) {
|
||||
old_state = hdr->b_l1hdr.b_state;
|
||||
refcnt = refcount_count(&hdr->b_l1hdr.b_refcnt);
|
||||
refcnt = zfs_refcount_count(&hdr->b_l1hdr.b_refcnt);
|
||||
bufcnt = hdr->b_l1hdr.b_bufcnt;
|
||||
update_old = (bufcnt > 0 || hdr->b_l1hdr.b_pabd != NULL);
|
||||
} else {
|
||||
@ -2271,7 +2271,7 @@ arc_change_state(arc_state_t *new_state, arc_buf_hdr_t *hdr,
|
||||
* the reference. As a result, we use the arc
|
||||
* header pointer for the reference.
|
||||
*/
|
||||
(void) refcount_add_many(&new_state->arcs_size,
|
||||
(void) zfs_refcount_add_many(&new_state->arcs_size,
|
||||
HDR_GET_LSIZE(hdr), hdr);
|
||||
ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
|
||||
} else {
|
||||
@ -2297,13 +2297,15 @@ arc_change_state(arc_state_t *new_state, arc_buf_hdr_t *hdr,
|
||||
if (arc_buf_is_shared(buf))
|
||||
continue;
|
||||
|
||||
(void) refcount_add_many(&new_state->arcs_size,
|
||||
(void) zfs_refcount_add_many(
|
||||
&new_state->arcs_size,
|
||||
arc_buf_size(buf), buf);
|
||||
}
|
||||
ASSERT3U(bufcnt, ==, buffers);
|
||||
|
||||
if (hdr->b_l1hdr.b_pabd != NULL) {
|
||||
(void) refcount_add_many(&new_state->arcs_size,
|
||||
(void) zfs_refcount_add_many(
|
||||
&new_state->arcs_size,
|
||||
arc_hdr_size(hdr), hdr);
|
||||
} else {
|
||||
ASSERT(GHOST_STATE(old_state));
|
||||
@ -2325,7 +2327,7 @@ arc_change_state(arc_state_t *new_state, arc_buf_hdr_t *hdr,
|
||||
* header on the ghost state.
|
||||
*/
|
||||
|
||||
(void) refcount_remove_many(&old_state->arcs_size,
|
||||
(void) zfs_refcount_remove_many(&old_state->arcs_size,
|
||||
HDR_GET_LSIZE(hdr), hdr);
|
||||
} else {
|
||||
uint32_t buffers = 0;
|
||||
@ -2350,13 +2352,13 @@ arc_change_state(arc_state_t *new_state, arc_buf_hdr_t *hdr,
|
||||
if (arc_buf_is_shared(buf))
|
||||
continue;
|
||||
|
||||
(void) refcount_remove_many(
|
||||
(void) zfs_refcount_remove_many(
|
||||
&old_state->arcs_size, arc_buf_size(buf),
|
||||
buf);
|
||||
}
|
||||
ASSERT3U(bufcnt, ==, buffers);
|
||||
ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL);
|
||||
(void) refcount_remove_many(
|
||||
(void) zfs_refcount_remove_many(
|
||||
&old_state->arcs_size, arc_hdr_size(hdr), hdr);
|
||||
}
|
||||
}
|
||||
@ -2610,8 +2612,8 @@ arc_return_buf(arc_buf_t *buf, void *tag)
|
||||
|
||||
ASSERT3P(buf->b_data, !=, NULL);
|
||||
ASSERT(HDR_HAS_L1HDR(hdr));
|
||||
(void) refcount_add(&hdr->b_l1hdr.b_refcnt, tag);
|
||||
(void) refcount_remove(&hdr->b_l1hdr.b_refcnt, arc_onloan_tag);
|
||||
(void) zfs_refcount_add(&hdr->b_l1hdr.b_refcnt, tag);
|
||||
(void) zfs_refcount_remove(&hdr->b_l1hdr.b_refcnt, arc_onloan_tag);
|
||||
|
||||
arc_loaned_bytes_update(-arc_buf_size(buf));
|
||||
}
|
||||
@ -2624,8 +2626,8 @@ arc_loan_inuse_buf(arc_buf_t *buf, void *tag)
|
||||
|
||||
ASSERT3P(buf->b_data, !=, NULL);
|
||||
ASSERT(HDR_HAS_L1HDR(hdr));
|
||||
(void) refcount_add(&hdr->b_l1hdr.b_refcnt, arc_onloan_tag);
|
||||
(void) refcount_remove(&hdr->b_l1hdr.b_refcnt, tag);
|
||||
(void) zfs_refcount_add(&hdr->b_l1hdr.b_refcnt, arc_onloan_tag);
|
||||
(void) zfs_refcount_remove(&hdr->b_l1hdr.b_refcnt, tag);
|
||||
|
||||
arc_loaned_bytes_update(arc_buf_size(buf));
|
||||
}
|
||||
@ -2652,13 +2654,13 @@ arc_hdr_free_on_write(arc_buf_hdr_t *hdr)
|
||||
|
||||
/* protected by hash lock, if in the hash table */
|
||||
if (multilist_link_active(&hdr->b_l1hdr.b_arc_node)) {
|
||||
ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
|
||||
ASSERT(zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
|
||||
ASSERT(state != arc_anon && state != arc_l2c_only);
|
||||
|
||||
(void) refcount_remove_many(&state->arcs_esize[type],
|
||||
(void) zfs_refcount_remove_many(&state->arcs_esize[type],
|
||||
size, hdr);
|
||||
}
|
||||
(void) refcount_remove_many(&state->arcs_size, size, hdr);
|
||||
(void) zfs_refcount_remove_many(&state->arcs_size, size, hdr);
|
||||
if (type == ARC_BUFC_METADATA) {
|
||||
arc_space_return(size, ARC_SPACE_META);
|
||||
} else {
|
||||
@ -2688,7 +2690,7 @@ arc_share_buf(arc_buf_hdr_t *hdr, arc_buf_t *buf)
|
||||
* refcount ownership to the hdr since it always owns
|
||||
* the refcount whenever an arc_buf_t is shared.
|
||||
*/
|
||||
refcount_transfer_ownership(&state->arcs_size, buf, hdr);
|
||||
zfs_refcount_transfer_ownership(&state->arcs_size, buf, hdr);
|
||||
hdr->b_l1hdr.b_pabd = abd_get_from_buf(buf->b_data, arc_buf_size(buf));
|
||||
abd_take_ownership_of_buf(hdr->b_l1hdr.b_pabd,
|
||||
HDR_ISTYPE_METADATA(hdr));
|
||||
@ -2718,7 +2720,7 @@ arc_unshare_buf(arc_buf_hdr_t *hdr, arc_buf_t *buf)
|
||||
* We are no longer sharing this buffer so we need
|
||||
* to transfer its ownership to the rightful owner.
|
||||
*/
|
||||
refcount_transfer_ownership(&state->arcs_size, hdr, buf);
|
||||
zfs_refcount_transfer_ownership(&state->arcs_size, hdr, buf);
|
||||
arc_hdr_clear_flags(hdr, ARC_FLAG_SHARED_DATA);
|
||||
abd_release_ownership_of_buf(hdr->b_l1hdr.b_pabd);
|
||||
abd_put(hdr->b_l1hdr.b_pabd);
|
||||
@ -2943,7 +2945,7 @@ arc_hdr_alloc(uint64_t spa, int32_t psize, int32_t lsize,
|
||||
* it references and compressed arc enablement.
|
||||
*/
|
||||
arc_hdr_alloc_pabd(hdr);
|
||||
ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
|
||||
ASSERT(zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
|
||||
|
||||
return (hdr);
|
||||
}
|
||||
@ -3045,8 +3047,10 @@ arc_hdr_realloc(arc_buf_hdr_t *hdr, kmem_cache_t *old, kmem_cache_t *new)
|
||||
* the wrong pointer address when calling arc_hdr_destroy() later.
|
||||
*/
|
||||
|
||||
(void) refcount_remove_many(&dev->l2ad_alloc, arc_hdr_size(hdr), hdr);
|
||||
(void) refcount_add_many(&dev->l2ad_alloc, arc_hdr_size(nhdr), nhdr);
|
||||
(void) zfs_refcount_remove_many(&dev->l2ad_alloc, arc_hdr_size(hdr),
|
||||
hdr);
|
||||
(void) zfs_refcount_add_many(&dev->l2ad_alloc, arc_hdr_size(nhdr),
|
||||
nhdr);
|
||||
|
||||
buf_discard_identity(hdr);
|
||||
kmem_cache_free(old, hdr);
|
||||
@ -3126,7 +3130,7 @@ arc_hdr_l2hdr_destroy(arc_buf_hdr_t *hdr)
|
||||
|
||||
vdev_space_update(dev->l2ad_vdev, -psize, 0, 0);
|
||||
|
||||
(void) refcount_remove_many(&dev->l2ad_alloc, psize, hdr);
|
||||
(void) zfs_refcount_remove_many(&dev->l2ad_alloc, psize, hdr);
|
||||
arc_hdr_clear_flags(hdr, ARC_FLAG_HAS_L2HDR);
|
||||
}
|
||||
|
||||
@ -3136,7 +3140,7 @@ arc_hdr_destroy(arc_buf_hdr_t *hdr)
|
||||
if (HDR_HAS_L1HDR(hdr)) {
|
||||
ASSERT(hdr->b_l1hdr.b_buf == NULL ||
|
||||
hdr->b_l1hdr.b_bufcnt > 0);
|
||||
ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
|
||||
ASSERT(zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
|
||||
ASSERT3P(hdr->b_l1hdr.b_state, ==, arc_anon);
|
||||
}
|
||||
ASSERT(!HDR_IO_IN_PROGRESS(hdr));
|
||||
@ -3297,7 +3301,7 @@ arc_evict_hdr(arc_buf_hdr_t *hdr, kmutex_t *hash_lock)
|
||||
return (bytes_evicted);
|
||||
}
|
||||
|
||||
ASSERT0(refcount_count(&hdr->b_l1hdr.b_refcnt));
|
||||
ASSERT0(zfs_refcount_count(&hdr->b_l1hdr.b_refcnt));
|
||||
while (hdr->b_l1hdr.b_buf) {
|
||||
arc_buf_t *buf = hdr->b_l1hdr.b_buf;
|
||||
if (!mutex_tryenter(&buf->b_evict_lock)) {
|
||||
@ -3599,7 +3603,7 @@ arc_flush_state(arc_state_t *state, uint64_t spa, arc_buf_contents_t type,
|
||||
{
|
||||
uint64_t evicted = 0;
|
||||
|
||||
while (refcount_count(&state->arcs_esize[type]) != 0) {
|
||||
while (zfs_refcount_count(&state->arcs_esize[type]) != 0) {
|
||||
evicted += arc_evict_state(state, spa, ARC_EVICT_ALL, type);
|
||||
|
||||
if (!retry)
|
||||
@ -3623,8 +3627,9 @@ arc_adjust_impl(arc_state_t *state, uint64_t spa, int64_t bytes,
|
||||
{
|
||||
int64_t delta;
|
||||
|
||||
if (bytes > 0 && refcount_count(&state->arcs_esize[type]) > 0) {
|
||||
delta = MIN(refcount_count(&state->arcs_esize[type]), bytes);
|
||||
if (bytes > 0 && zfs_refcount_count(&state->arcs_esize[type]) > 0) {
|
||||
delta = MIN(zfs_refcount_count(&state->arcs_esize[type]),
|
||||
bytes);
|
||||
return (arc_evict_state(state, spa, delta, type));
|
||||
}
|
||||
|
||||
@ -3649,8 +3654,8 @@ arc_adjust_meta(uint64_t meta_used)
|
||||
* evict some from the MRU here, and some from the MFU below.
|
||||
*/
|
||||
target = MIN((int64_t)(meta_used - arc_meta_limit),
|
||||
(int64_t)(refcount_count(&arc_anon->arcs_size) +
|
||||
refcount_count(&arc_mru->arcs_size) - arc_p));
|
||||
(int64_t)(zfs_refcount_count(&arc_anon->arcs_size) +
|
||||
zfs_refcount_count(&arc_mru->arcs_size) - arc_p));
|
||||
|
||||
total_evicted += arc_adjust_impl(arc_mru, 0, target, ARC_BUFC_METADATA);
|
||||
|
||||
@ -3660,7 +3665,7 @@ arc_adjust_meta(uint64_t meta_used)
|
||||
* space allotted to the MFU (which is defined as arc_c - arc_p).
|
||||
*/
|
||||
target = MIN((int64_t)(meta_used - arc_meta_limit),
|
||||
(int64_t)(refcount_count(&arc_mfu->arcs_size) -
|
||||
(int64_t)(zfs_refcount_count(&arc_mfu->arcs_size) -
|
||||
(arc_c - arc_p)));
|
||||
|
||||
total_evicted += arc_adjust_impl(arc_mfu, 0, target, ARC_BUFC_METADATA);
|
||||
@ -3772,8 +3777,8 @@ arc_adjust(void)
|
||||
* arc_p here, and then evict more from the MFU below.
|
||||
*/
|
||||
target = MIN((int64_t)(asize - arc_c),
|
||||
(int64_t)(refcount_count(&arc_anon->arcs_size) +
|
||||
refcount_count(&arc_mru->arcs_size) + ameta - arc_p));
|
||||
(int64_t)(zfs_refcount_count(&arc_anon->arcs_size) +
|
||||
zfs_refcount_count(&arc_mru->arcs_size) + ameta - arc_p));
|
||||
|
||||
/*
|
||||
* If we're below arc_meta_min, always prefer to evict data.
|
||||
@ -3857,8 +3862,8 @@ arc_adjust(void)
|
||||
* cache. The following logic enforces these limits on the ghost
|
||||
* caches, and evicts from them as needed.
|
||||
*/
|
||||
target = refcount_count(&arc_mru->arcs_size) +
|
||||
refcount_count(&arc_mru_ghost->arcs_size) - arc_c;
|
||||
target = zfs_refcount_count(&arc_mru->arcs_size) +
|
||||
zfs_refcount_count(&arc_mru_ghost->arcs_size) - arc_c;
|
||||
|
||||
bytes = arc_adjust_impl(arc_mru_ghost, 0, target, ARC_BUFC_DATA);
|
||||
total_evicted += bytes;
|
||||
@ -3876,8 +3881,8 @@ arc_adjust(void)
|
||||
* mru + mfu + mru ghost + mfu ghost <= 2 * arc_c
|
||||
* mru ghost + mfu ghost <= arc_c
|
||||
*/
|
||||
target = refcount_count(&arc_mru_ghost->arcs_size) +
|
||||
refcount_count(&arc_mfu_ghost->arcs_size) - arc_c;
|
||||
target = zfs_refcount_count(&arc_mru_ghost->arcs_size) +
|
||||
zfs_refcount_count(&arc_mfu_ghost->arcs_size) - arc_c;
|
||||
|
||||
bytes = arc_adjust_impl(arc_mfu_ghost, 0, target, ARC_BUFC_DATA);
|
||||
total_evicted += bytes;
|
||||
@ -4315,8 +4320,8 @@ arc_adapt(int bytes, arc_state_t *state)
|
||||
{
|
||||
int mult;
|
||||
uint64_t arc_p_min = (arc_c >> arc_p_min_shift);
|
||||
int64_t mrug_size = refcount_count(&arc_mru_ghost->arcs_size);
|
||||
int64_t mfug_size = refcount_count(&arc_mfu_ghost->arcs_size);
|
||||
int64_t mrug_size = zfs_refcount_count(&arc_mru_ghost->arcs_size);
|
||||
int64_t mfug_size = zfs_refcount_count(&arc_mfu_ghost->arcs_size);
|
||||
|
||||
if (state == arc_l2c_only)
|
||||
return;
|
||||
@ -4493,7 +4498,7 @@ arc_get_data_impl(arc_buf_hdr_t *hdr, uint64_t size, void *tag)
|
||||
*/
|
||||
if (!GHOST_STATE(state)) {
|
||||
|
||||
(void) refcount_add_many(&state->arcs_size, size, tag);
|
||||
(void) zfs_refcount_add_many(&state->arcs_size, size, tag);
|
||||
|
||||
/*
|
||||
* If this is reached via arc_read, the link is
|
||||
@ -4505,8 +4510,8 @@ arc_get_data_impl(arc_buf_hdr_t *hdr, uint64_t size, void *tag)
|
||||
* trying to [add|remove]_reference it.
|
||||
*/
|
||||
if (multilist_link_active(&hdr->b_l1hdr.b_arc_node)) {
|
||||
ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
|
||||
(void) refcount_add_many(&state->arcs_esize[type],
|
||||
ASSERT(zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
|
||||
(void) zfs_refcount_add_many(&state->arcs_esize[type],
|
||||
size, tag);
|
||||
}
|
||||
|
||||
@ -4516,8 +4521,8 @@ arc_get_data_impl(arc_buf_hdr_t *hdr, uint64_t size, void *tag)
|
||||
*/
|
||||
if (aggsum_compare(&arc_size, arc_c) < 0 &&
|
||||
hdr->b_l1hdr.b_state == arc_anon &&
|
||||
(refcount_count(&arc_anon->arcs_size) +
|
||||
refcount_count(&arc_mru->arcs_size) > arc_p))
|
||||
(zfs_refcount_count(&arc_anon->arcs_size) +
|
||||
zfs_refcount_count(&arc_mru->arcs_size) > arc_p))
|
||||
arc_p = MIN(arc_c, arc_p + size);
|
||||
}
|
||||
}
|
||||
@ -4554,13 +4559,13 @@ arc_free_data_impl(arc_buf_hdr_t *hdr, uint64_t size, void *tag)
|
||||
|
||||
/* protected by hash lock, if in the hash table */
|
||||
if (multilist_link_active(&hdr->b_l1hdr.b_arc_node)) {
|
||||
ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
|
||||
ASSERT(zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
|
||||
ASSERT(state != arc_anon && state != arc_l2c_only);
|
||||
|
||||
(void) refcount_remove_many(&state->arcs_esize[type],
|
||||
(void) zfs_refcount_remove_many(&state->arcs_esize[type],
|
||||
size, tag);
|
||||
}
|
||||
(void) refcount_remove_many(&state->arcs_size, size, tag);
|
||||
(void) zfs_refcount_remove_many(&state->arcs_size, size, tag);
|
||||
|
||||
VERIFY3U(hdr->b_type, ==, type);
|
||||
if (type == ARC_BUFC_METADATA) {
|
||||
@ -4607,7 +4612,7 @@ arc_access(arc_buf_hdr_t *hdr, kmutex_t *hash_lock)
|
||||
* another prefetch (to make it less likely to be evicted).
|
||||
*/
|
||||
if (HDR_PREFETCH(hdr)) {
|
||||
if (refcount_count(&hdr->b_l1hdr.b_refcnt) == 0) {
|
||||
if (zfs_refcount_count(&hdr->b_l1hdr.b_refcnt) == 0) {
|
||||
/* link protected by hash lock */
|
||||
ASSERT(multilist_link_active(
|
||||
&hdr->b_l1hdr.b_arc_node));
|
||||
@ -4645,7 +4650,7 @@ arc_access(arc_buf_hdr_t *hdr, kmutex_t *hash_lock)
|
||||
|
||||
if (HDR_PREFETCH(hdr)) {
|
||||
new_state = arc_mru;
|
||||
if (refcount_count(&hdr->b_l1hdr.b_refcnt) > 0)
|
||||
if (zfs_refcount_count(&hdr->b_l1hdr.b_refcnt) > 0)
|
||||
arc_hdr_clear_flags(hdr, ARC_FLAG_PREFETCH);
|
||||
DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, hdr);
|
||||
} else {
|
||||
@ -4668,7 +4673,7 @@ arc_access(arc_buf_hdr_t *hdr, kmutex_t *hash_lock)
|
||||
* the head of the list now.
|
||||
*/
|
||||
if ((HDR_PREFETCH(hdr)) != 0) {
|
||||
ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
|
||||
ASSERT(zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
|
||||
/* link protected by hash_lock */
|
||||
ASSERT(multilist_link_active(&hdr->b_l1hdr.b_arc_node));
|
||||
}
|
||||
@ -4687,7 +4692,7 @@ arc_access(arc_buf_hdr_t *hdr, kmutex_t *hash_lock)
|
||||
* This is a prefetch access...
|
||||
* move this block back to the MRU state.
|
||||
*/
|
||||
ASSERT0(refcount_count(&hdr->b_l1hdr.b_refcnt));
|
||||
ASSERT0(zfs_refcount_count(&hdr->b_l1hdr.b_refcnt));
|
||||
new_state = arc_mru;
|
||||
}
|
||||
|
||||
@ -4869,7 +4874,7 @@ arc_read_done(zio_t *zio)
|
||||
ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL);
|
||||
}
|
||||
|
||||
ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt) ||
|
||||
ASSERT(zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt) ||
|
||||
callback_list != NULL);
|
||||
|
||||
if (no_zio_error) {
|
||||
@ -4880,7 +4885,7 @@ arc_read_done(zio_t *zio)
|
||||
arc_change_state(arc_anon, hdr, hash_lock);
|
||||
if (HDR_IN_HASH_TABLE(hdr))
|
||||
buf_hash_remove(hdr);
|
||||
freeable = refcount_is_zero(&hdr->b_l1hdr.b_refcnt);
|
||||
freeable = zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -4900,7 +4905,7 @@ arc_read_done(zio_t *zio)
|
||||
* in the cache).
|
||||
*/
|
||||
ASSERT3P(hdr->b_l1hdr.b_state, ==, arc_anon);
|
||||
freeable = refcount_is_zero(&hdr->b_l1hdr.b_refcnt);
|
||||
freeable = zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt);
|
||||
}
|
||||
|
||||
/* execute each callback and free its structure */
|
||||
@ -5062,7 +5067,7 @@ arc_read(zio_t *pio, spa_t *spa, const blkptr_t *bp, arc_done_func_t *done,
|
||||
VERIFY0(arc_buf_alloc_impl(hdr, private,
|
||||
compressed_read, B_TRUE, &buf));
|
||||
} else if (*arc_flags & ARC_FLAG_PREFETCH &&
|
||||
refcount_count(&hdr->b_l1hdr.b_refcnt) == 0) {
|
||||
zfs_refcount_count(&hdr->b_l1hdr.b_refcnt) == 0) {
|
||||
arc_hdr_set_flags(hdr, ARC_FLAG_PREFETCH);
|
||||
}
|
||||
DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr);
|
||||
@ -5118,7 +5123,7 @@ arc_read(zio_t *pio, spa_t *spa, const blkptr_t *bp, arc_done_func_t *done,
|
||||
ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
|
||||
ASSERT(GHOST_STATE(hdr->b_l1hdr.b_state));
|
||||
ASSERT(!HDR_IO_IN_PROGRESS(hdr));
|
||||
ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
|
||||
ASSERT(zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
|
||||
ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL);
|
||||
ASSERT3P(hdr->b_l1hdr.b_freeze_cksum, ==, NULL);
|
||||
|
||||
@ -5339,7 +5344,7 @@ arc_freed(spa_t *spa, const blkptr_t *bp)
|
||||
* this hdr, then we don't destroy the hdr.
|
||||
*/
|
||||
if (!HDR_HAS_L1HDR(hdr) || (!HDR_IO_IN_PROGRESS(hdr) &&
|
||||
refcount_is_zero(&hdr->b_l1hdr.b_refcnt))) {
|
||||
zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt))) {
|
||||
arc_change_state(arc_anon, hdr, hash_lock);
|
||||
arc_hdr_destroy(hdr);
|
||||
mutex_exit(hash_lock);
|
||||
@ -5383,7 +5388,7 @@ arc_release(arc_buf_t *buf, void *tag)
|
||||
ASSERT(HDR_EMPTY(hdr));
|
||||
|
||||
ASSERT3U(hdr->b_l1hdr.b_bufcnt, ==, 1);
|
||||
ASSERT3S(refcount_count(&hdr->b_l1hdr.b_refcnt), ==, 1);
|
||||
ASSERT3S(zfs_refcount_count(&hdr->b_l1hdr.b_refcnt), ==, 1);
|
||||
ASSERT(!list_link_active(&hdr->b_l1hdr.b_arc_node));
|
||||
|
||||
hdr->b_l1hdr.b_arc_access = 0;
|
||||
@ -5411,7 +5416,7 @@ arc_release(arc_buf_t *buf, void *tag)
|
||||
ASSERT3P(state, !=, arc_anon);
|
||||
|
||||
/* this buffer is not on any list */
|
||||
ASSERT3S(refcount_count(&hdr->b_l1hdr.b_refcnt), >, 0);
|
||||
ASSERT3S(zfs_refcount_count(&hdr->b_l1hdr.b_refcnt), >, 0);
|
||||
|
||||
if (HDR_HAS_L2HDR(hdr)) {
|
||||
mutex_enter(&hdr->b_l2hdr.b_dev->l2ad_mtx);
|
||||
@ -5501,12 +5506,13 @@ arc_release(arc_buf_t *buf, void *tag)
|
||||
ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL);
|
||||
ASSERT3P(state, !=, arc_l2c_only);
|
||||
|
||||
(void) refcount_remove_many(&state->arcs_size,
|
||||
(void) zfs_refcount_remove_many(&state->arcs_size,
|
||||
arc_buf_size(buf), buf);
|
||||
|
||||
if (refcount_is_zero(&hdr->b_l1hdr.b_refcnt)) {
|
||||
if (zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt)) {
|
||||
ASSERT3P(state, !=, arc_l2c_only);
|
||||
(void) refcount_remove_many(&state->arcs_esize[type],
|
||||
(void) zfs_refcount_remove_many(
|
||||
&state->arcs_esize[type],
|
||||
arc_buf_size(buf), buf);
|
||||
}
|
||||
|
||||
@ -5523,21 +5529,21 @@ arc_release(arc_buf_t *buf, void *tag)
|
||||
nhdr = arc_hdr_alloc(spa, psize, lsize, compress, type);
|
||||
ASSERT3P(nhdr->b_l1hdr.b_buf, ==, NULL);
|
||||
ASSERT0(nhdr->b_l1hdr.b_bufcnt);
|
||||
ASSERT0(refcount_count(&nhdr->b_l1hdr.b_refcnt));
|
||||
ASSERT0(zfs_refcount_count(&nhdr->b_l1hdr.b_refcnt));
|
||||
VERIFY3U(nhdr->b_type, ==, type);
|
||||
ASSERT(!HDR_SHARED_DATA(nhdr));
|
||||
|
||||
nhdr->b_l1hdr.b_buf = buf;
|
||||
nhdr->b_l1hdr.b_bufcnt = 1;
|
||||
(void) refcount_add(&nhdr->b_l1hdr.b_refcnt, tag);
|
||||
(void) zfs_refcount_add(&nhdr->b_l1hdr.b_refcnt, tag);
|
||||
buf->b_hdr = nhdr;
|
||||
|
||||
mutex_exit(&buf->b_evict_lock);
|
||||
(void) refcount_add_many(&arc_anon->arcs_size,
|
||||
(void) zfs_refcount_add_many(&arc_anon->arcs_size,
|
||||
arc_buf_size(buf), buf);
|
||||
} else {
|
||||
mutex_exit(&buf->b_evict_lock);
|
||||
ASSERT(refcount_count(&hdr->b_l1hdr.b_refcnt) == 1);
|
||||
ASSERT(zfs_refcount_count(&hdr->b_l1hdr.b_refcnt) == 1);
|
||||
/* protected by hash lock, or hdr is on arc_anon */
|
||||
ASSERT(!multilist_link_active(&hdr->b_l1hdr.b_arc_node));
|
||||
ASSERT(!HDR_IO_IN_PROGRESS(hdr));
|
||||
@ -5569,7 +5575,7 @@ arc_referenced(arc_buf_t *buf)
|
||||
int referenced;
|
||||
|
||||
mutex_enter(&buf->b_evict_lock);
|
||||
referenced = (refcount_count(&buf->b_hdr->b_l1hdr.b_refcnt));
|
||||
referenced = (zfs_refcount_count(&buf->b_hdr->b_l1hdr.b_refcnt));
|
||||
mutex_exit(&buf->b_evict_lock);
|
||||
return (referenced);
|
||||
}
|
||||
@ -5584,7 +5590,7 @@ arc_write_ready(zio_t *zio)
|
||||
uint64_t psize = BP_IS_HOLE(zio->io_bp) ? 0 : BP_GET_PSIZE(zio->io_bp);
|
||||
|
||||
ASSERT(HDR_HAS_L1HDR(hdr));
|
||||
ASSERT(!refcount_is_zero(&buf->b_hdr->b_l1hdr.b_refcnt));
|
||||
ASSERT(!zfs_refcount_is_zero(&buf->b_hdr->b_l1hdr.b_refcnt));
|
||||
ASSERT(hdr->b_l1hdr.b_bufcnt > 0);
|
||||
|
||||
/*
|
||||
@ -5737,7 +5743,7 @@ arc_write_done(zio_t *zio)
|
||||
if (!BP_EQUAL(&zio->io_bp_orig, zio->io_bp))
|
||||
panic("bad overwrite, hdr=%p exists=%p",
|
||||
(void *)hdr, (void *)exists);
|
||||
ASSERT(refcount_is_zero(
|
||||
ASSERT(zfs_refcount_is_zero(
|
||||
&exists->b_l1hdr.b_refcnt));
|
||||
arc_change_state(arc_anon, exists, hash_lock);
|
||||
mutex_exit(hash_lock);
|
||||
@ -5767,7 +5773,7 @@ arc_write_done(zio_t *zio)
|
||||
arc_hdr_clear_flags(hdr, ARC_FLAG_IO_IN_PROGRESS);
|
||||
}
|
||||
|
||||
ASSERT(!refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
|
||||
ASSERT(!zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
|
||||
callback->awcb_done(zio, buf, callback->awcb_private);
|
||||
|
||||
abd_put(zio->io_abd);
|
||||
@ -5912,7 +5918,7 @@ arc_tempreserve_space(spa_t *spa, uint64_t reserve, uint64_t txg)
|
||||
/* assert that it has not wrapped around */
|
||||
ASSERT3S(atomic_add_64_nv(&arc_loaned_bytes, 0), >=, 0);
|
||||
|
||||
anon_size = MAX((int64_t)(refcount_count(&arc_anon->arcs_size) -
|
||||
anon_size = MAX((int64_t)(zfs_refcount_count(&arc_anon->arcs_size) -
|
||||
arc_loaned_bytes), 0);
|
||||
|
||||
/*
|
||||
@ -5947,9 +5953,10 @@ arc_tempreserve_space(spa_t *spa, uint64_t reserve, uint64_t txg)
|
||||
anon_size > arc_c * zfs_arc_anon_limit_percent / 100 &&
|
||||
spa_dirty_anon > anon_size * zfs_arc_pool_dirty_percent / 100) {
|
||||
uint64_t meta_esize =
|
||||
refcount_count(&arc_anon->arcs_esize[ARC_BUFC_METADATA]);
|
||||
zfs_refcount_count(
|
||||
&arc_anon->arcs_esize[ARC_BUFC_METADATA]);
|
||||
uint64_t data_esize =
|
||||
refcount_count(&arc_anon->arcs_esize[ARC_BUFC_DATA]);
|
||||
zfs_refcount_count(&arc_anon->arcs_esize[ARC_BUFC_DATA]);
|
||||
dprintf("failing, arc_tempreserve=%lluK anon_meta=%lluK "
|
||||
"anon_data=%lluK tempreserve=%lluK arc_c=%lluK\n",
|
||||
arc_tempreserve >> 10, meta_esize >> 10,
|
||||
@ -5964,11 +5971,11 @@ static void
|
||||
arc_kstat_update_state(arc_state_t *state, kstat_named_t *size,
|
||||
kstat_named_t *evict_data, kstat_named_t *evict_metadata)
|
||||
{
|
||||
size->value.ui64 = refcount_count(&state->arcs_size);
|
||||
size->value.ui64 = zfs_refcount_count(&state->arcs_size);
|
||||
evict_data->value.ui64 =
|
||||
refcount_count(&state->arcs_esize[ARC_BUFC_DATA]);
|
||||
zfs_refcount_count(&state->arcs_esize[ARC_BUFC_DATA]);
|
||||
evict_metadata->value.ui64 =
|
||||
refcount_count(&state->arcs_esize[ARC_BUFC_METADATA]);
|
||||
zfs_refcount_count(&state->arcs_esize[ARC_BUFC_METADATA]);
|
||||
}
|
||||
|
||||
static int
|
||||
@ -6099,25 +6106,25 @@ arc_state_init(void)
|
||||
offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node),
|
||||
arc_state_multilist_index_func);
|
||||
|
||||
refcount_create(&arc_anon->arcs_esize[ARC_BUFC_METADATA]);
|
||||
refcount_create(&arc_anon->arcs_esize[ARC_BUFC_DATA]);
|
||||
refcount_create(&arc_mru->arcs_esize[ARC_BUFC_METADATA]);
|
||||
refcount_create(&arc_mru->arcs_esize[ARC_BUFC_DATA]);
|
||||
refcount_create(&arc_mru_ghost->arcs_esize[ARC_BUFC_METADATA]);
|
||||
refcount_create(&arc_mru_ghost->arcs_esize[ARC_BUFC_DATA]);
|
||||
refcount_create(&arc_mfu->arcs_esize[ARC_BUFC_METADATA]);
|
||||
refcount_create(&arc_mfu->arcs_esize[ARC_BUFC_DATA]);
|
||||
refcount_create(&arc_mfu_ghost->arcs_esize[ARC_BUFC_METADATA]);
|
||||
refcount_create(&arc_mfu_ghost->arcs_esize[ARC_BUFC_DATA]);
|
||||
refcount_create(&arc_l2c_only->arcs_esize[ARC_BUFC_METADATA]);
|
||||
refcount_create(&arc_l2c_only->arcs_esize[ARC_BUFC_DATA]);
|
||||
zfs_refcount_create(&arc_anon->arcs_esize[ARC_BUFC_METADATA]);
|
||||
zfs_refcount_create(&arc_anon->arcs_esize[ARC_BUFC_DATA]);
|
||||
zfs_refcount_create(&arc_mru->arcs_esize[ARC_BUFC_METADATA]);
|
||||
zfs_refcount_create(&arc_mru->arcs_esize[ARC_BUFC_DATA]);
|
||||
zfs_refcount_create(&arc_mru_ghost->arcs_esize[ARC_BUFC_METADATA]);
|
||||
zfs_refcount_create(&arc_mru_ghost->arcs_esize[ARC_BUFC_DATA]);
|
||||
zfs_refcount_create(&arc_mfu->arcs_esize[ARC_BUFC_METADATA]);
|
||||
zfs_refcount_create(&arc_mfu->arcs_esize[ARC_BUFC_DATA]);
|
||||
zfs_refcount_create(&arc_mfu_ghost->arcs_esize[ARC_BUFC_METADATA]);
|
||||
zfs_refcount_create(&arc_mfu_ghost->arcs_esize[ARC_BUFC_DATA]);
|
||||
zfs_refcount_create(&arc_l2c_only->arcs_esize[ARC_BUFC_METADATA]);
|
||||
zfs_refcount_create(&arc_l2c_only->arcs_esize[ARC_BUFC_DATA]);
|
||||
|
||||
refcount_create(&arc_anon->arcs_size);
|
||||
refcount_create(&arc_mru->arcs_size);
|
||||
refcount_create(&arc_mru_ghost->arcs_size);
|
||||
refcount_create(&arc_mfu->arcs_size);
|
||||
refcount_create(&arc_mfu_ghost->arcs_size);
|
||||
refcount_create(&arc_l2c_only->arcs_size);
|
||||
zfs_refcount_create(&arc_anon->arcs_size);
|
||||
zfs_refcount_create(&arc_mru->arcs_size);
|
||||
zfs_refcount_create(&arc_mru_ghost->arcs_size);
|
||||
zfs_refcount_create(&arc_mfu->arcs_size);
|
||||
zfs_refcount_create(&arc_mfu_ghost->arcs_size);
|
||||
zfs_refcount_create(&arc_l2c_only->arcs_size);
|
||||
|
||||
aggsum_init(&arc_meta_used, 0);
|
||||
aggsum_init(&arc_size, 0);
|
||||
@ -6131,25 +6138,25 @@ arc_state_init(void)
|
||||
static void
|
||||
arc_state_fini(void)
|
||||
{
|
||||
refcount_destroy(&arc_anon->arcs_esize[ARC_BUFC_METADATA]);
|
||||
refcount_destroy(&arc_anon->arcs_esize[ARC_BUFC_DATA]);
|
||||
refcount_destroy(&arc_mru->arcs_esize[ARC_BUFC_METADATA]);
|
||||
refcount_destroy(&arc_mru->arcs_esize[ARC_BUFC_DATA]);
|
||||
refcount_destroy(&arc_mru_ghost->arcs_esize[ARC_BUFC_METADATA]);
|
||||
refcount_destroy(&arc_mru_ghost->arcs_esize[ARC_BUFC_DATA]);
|
||||
refcount_destroy(&arc_mfu->arcs_esize[ARC_BUFC_METADATA]);
|
||||
refcount_destroy(&arc_mfu->arcs_esize[ARC_BUFC_DATA]);
|
||||
refcount_destroy(&arc_mfu_ghost->arcs_esize[ARC_BUFC_METADATA]);
|
||||
refcount_destroy(&arc_mfu_ghost->arcs_esize[ARC_BUFC_DATA]);
|
||||
refcount_destroy(&arc_l2c_only->arcs_esize[ARC_BUFC_METADATA]);
|
||||
refcount_destroy(&arc_l2c_only->arcs_esize[ARC_BUFC_DATA]);
|
||||
zfs_refcount_destroy(&arc_anon->arcs_esize[ARC_BUFC_METADATA]);
|
||||
zfs_refcount_destroy(&arc_anon->arcs_esize[ARC_BUFC_DATA]);
|
||||
zfs_refcount_destroy(&arc_mru->arcs_esize[ARC_BUFC_METADATA]);
|
||||
zfs_refcount_destroy(&arc_mru->arcs_esize[ARC_BUFC_DATA]);
|
||||
zfs_refcount_destroy(&arc_mru_ghost->arcs_esize[ARC_BUFC_METADATA]);
|
||||
zfs_refcount_destroy(&arc_mru_ghost->arcs_esize[ARC_BUFC_DATA]);
|
||||
zfs_refcount_destroy(&arc_mfu->arcs_esize[ARC_BUFC_METADATA]);
|
||||
zfs_refcount_destroy(&arc_mfu->arcs_esize[ARC_BUFC_DATA]);
|
||||
zfs_refcount_destroy(&arc_mfu_ghost->arcs_esize[ARC_BUFC_METADATA]);
|
||||
zfs_refcount_destroy(&arc_mfu_ghost->arcs_esize[ARC_BUFC_DATA]);
|
||||
zfs_refcount_destroy(&arc_l2c_only->arcs_esize[ARC_BUFC_METADATA]);
|
||||
zfs_refcount_destroy(&arc_l2c_only->arcs_esize[ARC_BUFC_DATA]);
|
||||
|
||||
refcount_destroy(&arc_anon->arcs_size);
|
||||
refcount_destroy(&arc_mru->arcs_size);
|
||||
refcount_destroy(&arc_mru_ghost->arcs_size);
|
||||
refcount_destroy(&arc_mfu->arcs_size);
|
||||
refcount_destroy(&arc_mfu_ghost->arcs_size);
|
||||
refcount_destroy(&arc_l2c_only->arcs_size);
|
||||
zfs_refcount_destroy(&arc_anon->arcs_size);
|
||||
zfs_refcount_destroy(&arc_mru->arcs_size);
|
||||
zfs_refcount_destroy(&arc_mru_ghost->arcs_size);
|
||||
zfs_refcount_destroy(&arc_mfu->arcs_size);
|
||||
zfs_refcount_destroy(&arc_mfu_ghost->arcs_size);
|
||||
zfs_refcount_destroy(&arc_l2c_only->arcs_size);
|
||||
|
||||
multilist_destroy(arc_mru->arcs_list[ARC_BUFC_METADATA]);
|
||||
multilist_destroy(arc_mru_ghost->arcs_list[ARC_BUFC_METADATA]);
|
||||
@ -6729,7 +6736,7 @@ l2arc_write_done(zio_t *zio)
|
||||
ARCSTAT_INCR(arcstat_l2_lsize, -HDR_GET_LSIZE(hdr));
|
||||
|
||||
bytes_dropped += arc_hdr_size(hdr);
|
||||
(void) refcount_remove_many(&dev->l2ad_alloc,
|
||||
(void) zfs_refcount_remove_many(&dev->l2ad_alloc,
|
||||
arc_hdr_size(hdr), hdr);
|
||||
}
|
||||
|
||||
@ -7135,7 +7142,8 @@ l2arc_write_buffers(spa_t *spa, l2arc_dev_t *dev, uint64_t target_sz)
|
||||
list_insert_head(&dev->l2ad_buflist, hdr);
|
||||
mutex_exit(&dev->l2ad_mtx);
|
||||
|
||||
(void) refcount_add_many(&dev->l2ad_alloc, psize, hdr);
|
||||
(void) zfs_refcount_add_many(&dev->l2ad_alloc, psize,
|
||||
hdr);
|
||||
|
||||
/*
|
||||
* Normally the L2ARC can use the hdr's data, but if
|
||||
@ -7367,7 +7375,7 @@ l2arc_add_vdev(spa_t *spa, vdev_t *vd)
|
||||
offsetof(arc_buf_hdr_t, b_l2hdr.b_l2node));
|
||||
|
||||
vdev_space_update(vd, 0, 0, adddev->l2ad_end - adddev->l2ad_hand);
|
||||
refcount_create(&adddev->l2ad_alloc);
|
||||
zfs_refcount_create(&adddev->l2ad_alloc);
|
||||
|
||||
/*
|
||||
* Add device to global list
|
||||
@ -7413,7 +7421,7 @@ l2arc_remove_vdev(vdev_t *vd)
|
||||
l2arc_evict(remdev, 0, B_TRUE);
|
||||
list_destroy(&remdev->l2ad_buflist);
|
||||
mutex_destroy(&remdev->l2ad_mtx);
|
||||
refcount_destroy(&remdev->l2ad_alloc);
|
||||
zfs_refcount_destroy(&remdev->l2ad_alloc);
|
||||
kmem_free(remdev, sizeof (l2arc_dev_t));
|
||||
}
|
||||
|
||||
|
@ -105,7 +105,7 @@ static boolean_t dbuf_evict_thread_exit;
|
||||
*/
|
||||
typedef struct dbuf_cache {
|
||||
multilist_t *cache;
|
||||
refcount_t size;
|
||||
zfs_refcount_t size;
|
||||
} dbuf_cache_t;
|
||||
dbuf_cache_t dbuf_caches[DB_CACHE_MAX];
|
||||
|
||||
@ -178,7 +178,7 @@ dbuf_cons(void *vdb, void *unused, int kmflag)
|
||||
mutex_init(&db->db_mtx, NULL, MUTEX_DEFAULT, NULL);
|
||||
cv_init(&db->db_changed, NULL, CV_DEFAULT, NULL);
|
||||
multilist_link_init(&db->db_cache_link);
|
||||
refcount_create(&db->db_holds);
|
||||
zfs_refcount_create(&db->db_holds);
|
||||
|
||||
return (0);
|
||||
}
|
||||
@ -191,7 +191,7 @@ dbuf_dest(void *vdb, void *unused)
|
||||
mutex_destroy(&db->db_mtx);
|
||||
cv_destroy(&db->db_changed);
|
||||
ASSERT(!multilist_link_active(&db->db_cache_link));
|
||||
refcount_destroy(&db->db_holds);
|
||||
zfs_refcount_destroy(&db->db_holds);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -313,7 +313,7 @@ dbuf_hash_remove(dmu_buf_impl_t *db)
|
||||
* We musn't hold db_mtx to maintain lock ordering:
|
||||
* DBUF_HASH_MUTEX > db_mtx.
|
||||
*/
|
||||
ASSERT(refcount_is_zero(&db->db_holds));
|
||||
ASSERT(zfs_refcount_is_zero(&db->db_holds));
|
||||
ASSERT(db->db_state == DB_EVICTING);
|
||||
ASSERT(!MUTEX_HELD(&db->db_mtx));
|
||||
|
||||
@ -350,7 +350,7 @@ dbuf_verify_user(dmu_buf_impl_t *db, dbvu_verify_type_t verify_type)
|
||||
ASSERT(db->db.db_data != NULL);
|
||||
ASSERT3U(db->db_state, ==, DB_CACHED);
|
||||
|
||||
holds = refcount_count(&db->db_holds);
|
||||
holds = zfs_refcount_count(&db->db_holds);
|
||||
if (verify_type == DBVU_EVICTING) {
|
||||
/*
|
||||
* Immediate eviction occurs when holds == dirtycnt.
|
||||
@ -447,7 +447,8 @@ dbuf_include_in_metadata_cache(dmu_buf_impl_t *db)
|
||||
* Sanity check for small-memory systems: don't allocate too
|
||||
* much memory for this purpose.
|
||||
*/
|
||||
if (refcount_count(&dbuf_caches[DB_DBUF_METADATA_CACHE].size) >
|
||||
if (zfs_refcount_count(
|
||||
&dbuf_caches[DB_DBUF_METADATA_CACHE].size) >
|
||||
dbuf_metadata_cache_max_bytes) {
|
||||
dbuf_metadata_cache_overflow++;
|
||||
DTRACE_PROBE1(dbuf__metadata__cache__overflow,
|
||||
@ -496,7 +497,7 @@ dbuf_cache_above_hiwater(void)
|
||||
uint64_t dbuf_cache_hiwater_bytes =
|
||||
(dbuf_cache_max_bytes * dbuf_cache_hiwater_pct) / 100;
|
||||
|
||||
return (refcount_count(&dbuf_caches[DB_DBUF_CACHE].size) >
|
||||
return (zfs_refcount_count(&dbuf_caches[DB_DBUF_CACHE].size) >
|
||||
dbuf_cache_max_bytes + dbuf_cache_hiwater_bytes);
|
||||
}
|
||||
|
||||
@ -506,7 +507,7 @@ dbuf_cache_above_lowater(void)
|
||||
uint64_t dbuf_cache_lowater_bytes =
|
||||
(dbuf_cache_max_bytes * dbuf_cache_lowater_pct) / 100;
|
||||
|
||||
return (refcount_count(&dbuf_caches[DB_DBUF_CACHE].size) >
|
||||
return (zfs_refcount_count(&dbuf_caches[DB_DBUF_CACHE].size) >
|
||||
dbuf_cache_max_bytes - dbuf_cache_lowater_bytes);
|
||||
}
|
||||
|
||||
@ -533,7 +534,8 @@ dbuf_evict_one(void)
|
||||
if (db != NULL) {
|
||||
multilist_sublist_remove(mls, db);
|
||||
multilist_sublist_unlock(mls);
|
||||
(void) refcount_remove_many(&dbuf_caches[DB_DBUF_CACHE].size,
|
||||
(void) zfs_refcount_remove_many(
|
||||
&dbuf_caches[DB_DBUF_CACHE].size,
|
||||
db->db.db_size, db);
|
||||
ASSERT3U(db->db_caching_status, ==, DB_DBUF_CACHE);
|
||||
db->db_caching_status = DB_NO_CACHE;
|
||||
@ -599,7 +601,7 @@ dbuf_evict_notify(void)
|
||||
* because it's OK to occasionally make the wrong decision here,
|
||||
* and grabbing the lock results in massive lock contention.
|
||||
*/
|
||||
if (refcount_count(&dbuf_caches[DB_DBUF_CACHE].size) >
|
||||
if (zfs_refcount_count(&dbuf_caches[DB_DBUF_CACHE].size) >
|
||||
dbuf_cache_max_bytes) {
|
||||
if (dbuf_cache_above_hiwater())
|
||||
dbuf_evict_one();
|
||||
@ -667,7 +669,7 @@ dbuf_init(void)
|
||||
multilist_create(sizeof (dmu_buf_impl_t),
|
||||
offsetof(dmu_buf_impl_t, db_cache_link),
|
||||
dbuf_cache_multilist_index_func);
|
||||
refcount_create(&dbuf_caches[dcs].size);
|
||||
zfs_refcount_create(&dbuf_caches[dcs].size);
|
||||
}
|
||||
|
||||
dbuf_evict_thread_exit = B_FALSE;
|
||||
@ -701,7 +703,7 @@ dbuf_fini(void)
|
||||
cv_destroy(&dbuf_evict_cv);
|
||||
|
||||
for (dbuf_cached_state_t dcs = 0; dcs < DB_CACHE_MAX; dcs++) {
|
||||
refcount_destroy(&dbuf_caches[dcs].size);
|
||||
zfs_refcount_destroy(&dbuf_caches[dcs].size);
|
||||
multilist_destroy(dbuf_caches[dcs].cache);
|
||||
}
|
||||
}
|
||||
@ -887,7 +889,7 @@ dbuf_loan_arcbuf(dmu_buf_impl_t *db)
|
||||
|
||||
ASSERT(db->db_blkid != DMU_BONUS_BLKID);
|
||||
mutex_enter(&db->db_mtx);
|
||||
if (arc_released(db->db_buf) || refcount_count(&db->db_holds) > 1) {
|
||||
if (arc_released(db->db_buf) || zfs_refcount_count(&db->db_holds) > 1) {
|
||||
int blksz = db->db.db_size;
|
||||
spa_t *spa = db->db_objset->os_spa;
|
||||
|
||||
@ -949,7 +951,7 @@ dbuf_read_done(zio_t *zio, arc_buf_t *buf, void *vdb)
|
||||
/*
|
||||
* All reads are synchronous, so we must have a hold on the dbuf
|
||||
*/
|
||||
ASSERT(refcount_count(&db->db_holds) > 0);
|
||||
ASSERT(zfs_refcount_count(&db->db_holds) > 0);
|
||||
ASSERT(db->db_buf == NULL);
|
||||
ASSERT(db->db.db_data == NULL);
|
||||
if (buf == NULL) {
|
||||
@ -986,7 +988,7 @@ dbuf_read_impl(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags)
|
||||
|
||||
DB_DNODE_ENTER(db);
|
||||
dn = DB_DNODE(db);
|
||||
ASSERT(!refcount_is_zero(&db->db_holds));
|
||||
ASSERT(!zfs_refcount_is_zero(&db->db_holds));
|
||||
/* We need the struct_rwlock to prevent db_blkptr from changing. */
|
||||
ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
|
||||
ASSERT(MUTEX_HELD(&db->db_mtx));
|
||||
@ -1117,7 +1119,7 @@ dbuf_fix_old_data(dmu_buf_impl_t *db, uint64_t txg)
|
||||
dr->dt.dl.dr_data = zio_buf_alloc(bonuslen);
|
||||
arc_space_consume(bonuslen, ARC_SPACE_BONUS);
|
||||
bcopy(db->db.db_data, dr->dt.dl.dr_data, bonuslen);
|
||||
} else if (refcount_count(&db->db_holds) > db->db_dirtycnt) {
|
||||
} else if (zfs_refcount_count(&db->db_holds) > db->db_dirtycnt) {
|
||||
int size = arc_buf_size(db->db_buf);
|
||||
arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
|
||||
spa_t *spa = db->db_objset->os_spa;
|
||||
@ -1149,7 +1151,7 @@ dbuf_read(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags)
|
||||
* We don't have to hold the mutex to check db_state because it
|
||||
* can't be freed while we have a hold on the buffer.
|
||||
*/
|
||||
ASSERT(!refcount_is_zero(&db->db_holds));
|
||||
ASSERT(!zfs_refcount_is_zero(&db->db_holds));
|
||||
|
||||
if (db->db_state == DB_NOFILL)
|
||||
return (SET_ERROR(EIO));
|
||||
@ -1244,7 +1246,7 @@ dbuf_read(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags)
|
||||
static void
|
||||
dbuf_noread(dmu_buf_impl_t *db)
|
||||
{
|
||||
ASSERT(!refcount_is_zero(&db->db_holds));
|
||||
ASSERT(!zfs_refcount_is_zero(&db->db_holds));
|
||||
ASSERT(db->db_blkid != DMU_BONUS_BLKID);
|
||||
mutex_enter(&db->db_mtx);
|
||||
while (db->db_state == DB_READ || db->db_state == DB_FILL)
|
||||
@ -1363,7 +1365,7 @@ dbuf_free_range(dnode_t *dn, uint64_t start_blkid, uint64_t end_blkid,
|
||||
mutex_exit(&db->db_mtx);
|
||||
continue;
|
||||
}
|
||||
if (refcount_count(&db->db_holds) == 0) {
|
||||
if (zfs_refcount_count(&db->db_holds) == 0) {
|
||||
ASSERT(db->db_buf);
|
||||
dbuf_destroy(db);
|
||||
continue;
|
||||
@ -1508,7 +1510,7 @@ dbuf_dirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
|
||||
int txgoff = tx->tx_txg & TXG_MASK;
|
||||
|
||||
ASSERT(tx->tx_txg != 0);
|
||||
ASSERT(!refcount_is_zero(&db->db_holds));
|
||||
ASSERT(!zfs_refcount_is_zero(&db->db_holds));
|
||||
DMU_TX_DIRTY_BUF(tx, db);
|
||||
|
||||
DB_DNODE_ENTER(db);
|
||||
@ -1875,7 +1877,7 @@ dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
|
||||
ASSERT(db->db_dirtycnt > 0);
|
||||
db->db_dirtycnt -= 1;
|
||||
|
||||
if (refcount_remove(&db->db_holds, (void *)(uintptr_t)txg) == 0) {
|
||||
if (zfs_refcount_remove(&db->db_holds, (void *)(uintptr_t)txg) == 0) {
|
||||
ASSERT(db->db_state == DB_NOFILL || arc_released(db->db_buf));
|
||||
dbuf_destroy(db);
|
||||
return (B_TRUE);
|
||||
@ -1891,7 +1893,7 @@ dmu_buf_will_dirty(dmu_buf_t *db_fake, dmu_tx_t *tx)
|
||||
int rf = DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH;
|
||||
|
||||
ASSERT(tx->tx_txg != 0);
|
||||
ASSERT(!refcount_is_zero(&db->db_holds));
|
||||
ASSERT(!zfs_refcount_is_zero(&db->db_holds));
|
||||
|
||||
/*
|
||||
* Quick check for dirtyness. For already dirty blocks, this
|
||||
@ -1943,7 +1945,7 @@ dmu_buf_will_fill(dmu_buf_t *db_fake, dmu_tx_t *tx)
|
||||
ASSERT(db->db_blkid != DMU_BONUS_BLKID);
|
||||
ASSERT(tx->tx_txg != 0);
|
||||
ASSERT(db->db_level == 0);
|
||||
ASSERT(!refcount_is_zero(&db->db_holds));
|
||||
ASSERT(!zfs_refcount_is_zero(&db->db_holds));
|
||||
|
||||
ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT ||
|
||||
dmu_tx_private_ok(tx));
|
||||
@ -2018,7 +2020,7 @@ dmu_buf_write_embedded(dmu_buf_t *dbuf, void *data,
|
||||
void
|
||||
dbuf_assign_arcbuf(dmu_buf_impl_t *db, arc_buf_t *buf, dmu_tx_t *tx)
|
||||
{
|
||||
ASSERT(!refcount_is_zero(&db->db_holds));
|
||||
ASSERT(!zfs_refcount_is_zero(&db->db_holds));
|
||||
ASSERT(db->db_blkid != DMU_BONUS_BLKID);
|
||||
ASSERT(db->db_level == 0);
|
||||
ASSERT3U(dbuf_is_metadata(db), ==, arc_is_metadata(buf));
|
||||
@ -2037,7 +2039,7 @@ dbuf_assign_arcbuf(dmu_buf_impl_t *db, arc_buf_t *buf, dmu_tx_t *tx)
|
||||
ASSERT(db->db_state == DB_CACHED || db->db_state == DB_UNCACHED);
|
||||
|
||||
if (db->db_state == DB_CACHED &&
|
||||
refcount_count(&db->db_holds) - 1 > db->db_dirtycnt) {
|
||||
zfs_refcount_count(&db->db_holds) - 1 > db->db_dirtycnt) {
|
||||
mutex_exit(&db->db_mtx);
|
||||
(void) dbuf_dirty(db, tx);
|
||||
bcopy(buf->b_data, db->db.db_data, db->db.db_size);
|
||||
@ -2082,7 +2084,7 @@ dbuf_destroy(dmu_buf_impl_t *db)
|
||||
dmu_buf_impl_t *dndb;
|
||||
|
||||
ASSERT(MUTEX_HELD(&db->db_mtx));
|
||||
ASSERT(refcount_is_zero(&db->db_holds));
|
||||
ASSERT(zfs_refcount_is_zero(&db->db_holds));
|
||||
|
||||
if (db->db_buf != NULL) {
|
||||
arc_buf_destroy(db->db_buf, db);
|
||||
@ -2106,7 +2108,7 @@ dbuf_destroy(dmu_buf_impl_t *db)
|
||||
db->db_caching_status == DB_DBUF_METADATA_CACHE);
|
||||
|
||||
multilist_remove(dbuf_caches[db->db_caching_status].cache, db);
|
||||
(void) refcount_remove_many(
|
||||
(void) zfs_refcount_remove_many(
|
||||
&dbuf_caches[db->db_caching_status].size,
|
||||
db->db.db_size, db);
|
||||
|
||||
@ -2156,7 +2158,7 @@ dbuf_destroy(dmu_buf_impl_t *db)
|
||||
DB_DNODE_EXIT(db);
|
||||
}
|
||||
|
||||
ASSERT(refcount_is_zero(&db->db_holds));
|
||||
ASSERT(zfs_refcount_is_zero(&db->db_holds));
|
||||
|
||||
db->db_parent = NULL;
|
||||
|
||||
@ -2347,8 +2349,8 @@ dbuf_create(dnode_t *dn, uint8_t level, uint64_t blkid,
|
||||
dbuf_add_ref(parent, db);
|
||||
|
||||
ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT ||
|
||||
refcount_count(&dn->dn_holds) > 0);
|
||||
(void) refcount_add(&dn->dn_holds, db);
|
||||
zfs_refcount_count(&dn->dn_holds) > 0);
|
||||
(void) zfs_refcount_add(&dn->dn_holds, db);
|
||||
atomic_inc_32(&dn->dn_dbufs_count);
|
||||
|
||||
dprintf_dbuf(db, "db=%p\n", db);
|
||||
@ -2680,18 +2682,18 @@ dbuf_hold_impl(dnode_t *dn, uint8_t level, uint64_t blkid,
|
||||
}
|
||||
|
||||
if (multilist_link_active(&db->db_cache_link)) {
|
||||
ASSERT(refcount_is_zero(&db->db_holds));
|
||||
ASSERT(zfs_refcount_is_zero(&db->db_holds));
|
||||
ASSERT(db->db_caching_status == DB_DBUF_CACHE ||
|
||||
db->db_caching_status == DB_DBUF_METADATA_CACHE);
|
||||
|
||||
multilist_remove(dbuf_caches[db->db_caching_status].cache, db);
|
||||
(void) refcount_remove_many(
|
||||
(void) zfs_refcount_remove_many(
|
||||
&dbuf_caches[db->db_caching_status].size,
|
||||
db->db.db_size, db);
|
||||
|
||||
db->db_caching_status = DB_NO_CACHE;
|
||||
}
|
||||
(void) refcount_add(&db->db_holds, tag);
|
||||
(void) zfs_refcount_add(&db->db_holds, tag);
|
||||
DBUF_VERIFY(db);
|
||||
mutex_exit(&db->db_mtx);
|
||||
|
||||
@ -2763,7 +2765,7 @@ dbuf_rm_spill(dnode_t *dn, dmu_tx_t *tx)
|
||||
void
|
||||
dbuf_add_ref(dmu_buf_impl_t *db, void *tag)
|
||||
{
|
||||
int64_t holds = refcount_add(&db->db_holds, tag);
|
||||
int64_t holds = zfs_refcount_add(&db->db_holds, tag);
|
||||
ASSERT3S(holds, >, 1);
|
||||
}
|
||||
|
||||
@ -2783,7 +2785,7 @@ dbuf_try_add_ref(dmu_buf_t *db_fake, objset_t *os, uint64_t obj, uint64_t blkid,
|
||||
|
||||
if (found_db != NULL) {
|
||||
if (db == found_db && dbuf_refcount(db) > db->db_dirtycnt) {
|
||||
(void) refcount_add(&db->db_holds, tag);
|
||||
(void) zfs_refcount_add(&db->db_holds, tag);
|
||||
result = B_TRUE;
|
||||
}
|
||||
mutex_exit(&db->db_mtx);
|
||||
@ -2837,7 +2839,7 @@ dbuf_rele_and_unlock(dmu_buf_impl_t *db, void *tag, boolean_t evicting)
|
||||
* dnode so we can guarantee in dnode_move() that a referenced bonus
|
||||
* buffer has a corresponding dnode hold.
|
||||
*/
|
||||
holds = refcount_remove(&db->db_holds, tag);
|
||||
holds = zfs_refcount_remove(&db->db_holds, tag);
|
||||
ASSERT(holds >= 0);
|
||||
|
||||
/*
|
||||
@ -2924,8 +2926,8 @@ dbuf_rele_and_unlock(dmu_buf_impl_t *db, void *tag, boolean_t evicting)
|
||||
db->db_caching_status = dcs;
|
||||
|
||||
multilist_insert(dbuf_caches[dcs].cache, db);
|
||||
(void) refcount_add_many(&dbuf_caches[dcs].size,
|
||||
db->db.db_size, db);
|
||||
(void) zfs_refcount_add_many(
|
||||
&dbuf_caches[dcs].size, db->db.db_size, db);
|
||||
mutex_exit(&db->db_mtx);
|
||||
|
||||
if (db->db_caching_status == DB_DBUF_CACHE &&
|
||||
@ -2947,7 +2949,7 @@ dbuf_rele_and_unlock(dmu_buf_impl_t *db, void *tag, boolean_t evicting)
|
||||
uint64_t
|
||||
dbuf_refcount(dmu_buf_impl_t *db)
|
||||
{
|
||||
return (refcount_count(&db->db_holds));
|
||||
return (zfs_refcount_count(&db->db_holds));
|
||||
}
|
||||
|
||||
void *
|
||||
@ -3222,7 +3224,7 @@ dbuf_sync_leaf(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
|
||||
|
||||
if (db->db_state != DB_NOFILL &&
|
||||
dn->dn_object != DMU_META_DNODE_OBJECT &&
|
||||
refcount_count(&db->db_holds) > 1 &&
|
||||
zfs_refcount_count(&db->db_holds) > 1 &&
|
||||
dr->dt.dl.dr_override_state != DR_OVERRIDDEN &&
|
||||
*datap == db->db_buf) {
|
||||
/*
|
||||
|
@ -357,7 +357,7 @@ dmu_bonus_hold(objset_t *os, uint64_t object, void *tag, dmu_buf_t **dbp)
|
||||
db = dn->dn_bonus;
|
||||
|
||||
/* as long as the bonus buf is held, the dnode will be held */
|
||||
if (refcount_add(&db->db_holds, tag) == 1) {
|
||||
if (zfs_refcount_add(&db->db_holds, tag) == 1) {
|
||||
VERIFY(dnode_add_ref(dn, db));
|
||||
atomic_inc_32(&dn->dn_dbufs_count);
|
||||
}
|
||||
|
@ -99,7 +99,7 @@ dmu_tx_hold_dnode_impl(dmu_tx_t *tx, dnode_t *dn, enum dmu_tx_hold_type type,
|
||||
dmu_tx_hold_t *txh;
|
||||
|
||||
if (dn != NULL) {
|
||||
(void) refcount_add(&dn->dn_holds, tx);
|
||||
(void) zfs_refcount_add(&dn->dn_holds, tx);
|
||||
if (tx->tx_txg != 0) {
|
||||
mutex_enter(&dn->dn_mtx);
|
||||
/*
|
||||
@ -109,7 +109,7 @@ dmu_tx_hold_dnode_impl(dmu_tx_t *tx, dnode_t *dn, enum dmu_tx_hold_type type,
|
||||
*/
|
||||
ASSERT(dn->dn_assigned_txg == 0);
|
||||
dn->dn_assigned_txg = tx->tx_txg;
|
||||
(void) refcount_add(&dn->dn_tx_holds, tx);
|
||||
(void) zfs_refcount_add(&dn->dn_tx_holds, tx);
|
||||
mutex_exit(&dn->dn_mtx);
|
||||
}
|
||||
}
|
||||
@ -117,8 +117,8 @@ dmu_tx_hold_dnode_impl(dmu_tx_t *tx, dnode_t *dn, enum dmu_tx_hold_type type,
|
||||
txh = kmem_zalloc(sizeof (dmu_tx_hold_t), KM_SLEEP);
|
||||
txh->txh_tx = tx;
|
||||
txh->txh_dnode = dn;
|
||||
refcount_create(&txh->txh_space_towrite);
|
||||
refcount_create(&txh->txh_memory_tohold);
|
||||
zfs_refcount_create(&txh->txh_space_towrite);
|
||||
zfs_refcount_create(&txh->txh_memory_tohold);
|
||||
txh->txh_type = type;
|
||||
txh->txh_arg1 = arg1;
|
||||
txh->txh_arg2 = arg2;
|
||||
@ -213,9 +213,9 @@ dmu_tx_count_write(dmu_tx_hold_t *txh, uint64_t off, uint64_t len)
|
||||
if (len == 0)
|
||||
return;
|
||||
|
||||
(void) refcount_add_many(&txh->txh_space_towrite, len, FTAG);
|
||||
(void) zfs_refcount_add_many(&txh->txh_space_towrite, len, FTAG);
|
||||
|
||||
if (refcount_count(&txh->txh_space_towrite) > 2 * DMU_MAX_ACCESS)
|
||||
if (zfs_refcount_count(&txh->txh_space_towrite) > 2 * DMU_MAX_ACCESS)
|
||||
err = SET_ERROR(EFBIG);
|
||||
|
||||
if (dn == NULL)
|
||||
@ -280,7 +280,8 @@ dmu_tx_count_write(dmu_tx_hold_t *txh, uint64_t off, uint64_t len)
|
||||
static void
|
||||
dmu_tx_count_dnode(dmu_tx_hold_t *txh)
|
||||
{
|
||||
(void) refcount_add_many(&txh->txh_space_towrite, DNODE_MIN_SIZE, FTAG);
|
||||
(void) zfs_refcount_add_many(&txh->txh_space_towrite, DNODE_MIN_SIZE,
|
||||
FTAG);
|
||||
}
|
||||
|
||||
void
|
||||
@ -312,7 +313,7 @@ dmu_tx_hold_remap_l1indirect(dmu_tx_t *tx, uint64_t object)
|
||||
return;
|
||||
|
||||
dnode_t *dn = txh->txh_dnode;
|
||||
(void) refcount_add_many(&txh->txh_space_towrite,
|
||||
(void) zfs_refcount_add_many(&txh->txh_space_towrite,
|
||||
1ULL << dn->dn_indblkshift, FTAG);
|
||||
dmu_tx_count_dnode(txh);
|
||||
}
|
||||
@ -419,7 +420,7 @@ dmu_tx_hold_free_impl(dmu_tx_hold_t *txh, uint64_t off, uint64_t len)
|
||||
return;
|
||||
}
|
||||
|
||||
(void) refcount_add_many(&txh->txh_memory_tohold,
|
||||
(void) zfs_refcount_add_many(&txh->txh_memory_tohold,
|
||||
1 << dn->dn_indblkshift, FTAG);
|
||||
|
||||
err = dmu_tx_check_ioerr(zio, dn, 1, i);
|
||||
@ -480,7 +481,7 @@ dmu_tx_hold_zap_impl(dmu_tx_hold_t *txh, const char *name)
|
||||
* - 2 blocks for possibly split leaves,
|
||||
* - 2 grown ptrtbl blocks
|
||||
*/
|
||||
(void) refcount_add_many(&txh->txh_space_towrite,
|
||||
(void) zfs_refcount_add_many(&txh->txh_space_towrite,
|
||||
MZAP_MAX_BLKSZ, FTAG);
|
||||
|
||||
if (dn == NULL)
|
||||
@ -570,7 +571,7 @@ dmu_tx_hold_space(dmu_tx_t *tx, uint64_t space)
|
||||
txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
|
||||
DMU_NEW_OBJECT, THT_SPACE, space, 0);
|
||||
|
||||
(void) refcount_add_many(&txh->txh_space_towrite, space, FTAG);
|
||||
(void) zfs_refcount_add_many(&txh->txh_space_towrite, space, FTAG);
|
||||
}
|
||||
|
||||
#ifdef ZFS_DEBUG
|
||||
@ -925,11 +926,11 @@ dmu_tx_try_assign(dmu_tx_t *tx, uint64_t txg_how)
|
||||
if (dn->dn_assigned_txg == 0)
|
||||
dn->dn_assigned_txg = tx->tx_txg;
|
||||
ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg);
|
||||
(void) refcount_add(&dn->dn_tx_holds, tx);
|
||||
(void) zfs_refcount_add(&dn->dn_tx_holds, tx);
|
||||
mutex_exit(&dn->dn_mtx);
|
||||
}
|
||||
towrite += refcount_count(&txh->txh_space_towrite);
|
||||
tohold += refcount_count(&txh->txh_memory_tohold);
|
||||
towrite += zfs_refcount_count(&txh->txh_space_towrite);
|
||||
tohold += zfs_refcount_count(&txh->txh_memory_tohold);
|
||||
}
|
||||
|
||||
/* needed allocation: worst-case estimate of write space */
|
||||
@ -969,7 +970,7 @@ dmu_tx_unassign(dmu_tx_t *tx)
|
||||
mutex_enter(&dn->dn_mtx);
|
||||
ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg);
|
||||
|
||||
if (refcount_remove(&dn->dn_tx_holds, tx) == 0) {
|
||||
if (zfs_refcount_remove(&dn->dn_tx_holds, tx) == 0) {
|
||||
dn->dn_assigned_txg = 0;
|
||||
cv_broadcast(&dn->dn_notxholds);
|
||||
}
|
||||
@ -1103,10 +1104,10 @@ dmu_tx_destroy(dmu_tx_t *tx)
|
||||
dnode_t *dn = txh->txh_dnode;
|
||||
|
||||
list_remove(&tx->tx_holds, txh);
|
||||
refcount_destroy_many(&txh->txh_space_towrite,
|
||||
refcount_count(&txh->txh_space_towrite));
|
||||
refcount_destroy_many(&txh->txh_memory_tohold,
|
||||
refcount_count(&txh->txh_memory_tohold));
|
||||
zfs_refcount_destroy_many(&txh->txh_space_towrite,
|
||||
zfs_refcount_count(&txh->txh_space_towrite));
|
||||
zfs_refcount_destroy_many(&txh->txh_memory_tohold,
|
||||
zfs_refcount_count(&txh->txh_memory_tohold));
|
||||
kmem_free(txh, sizeof (dmu_tx_hold_t));
|
||||
if (dn != NULL)
|
||||
dnode_rele(dn, tx);
|
||||
@ -1136,7 +1137,7 @@ dmu_tx_commit(dmu_tx_t *tx)
|
||||
mutex_enter(&dn->dn_mtx);
|
||||
ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg);
|
||||
|
||||
if (refcount_remove(&dn->dn_tx_holds, tx) == 0) {
|
||||
if (zfs_refcount_remove(&dn->dn_tx_holds, tx) == 0) {
|
||||
dn->dn_assigned_txg = 0;
|
||||
cv_broadcast(&dn->dn_notxholds);
|
||||
}
|
||||
@ -1251,7 +1252,7 @@ dmu_tx_hold_spill(dmu_tx_t *tx, uint64_t object)
|
||||
txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, object,
|
||||
THT_SPILL, 0, 0);
|
||||
if (txh != NULL)
|
||||
(void) refcount_add_many(&txh->txh_space_towrite,
|
||||
(void) zfs_refcount_add_many(&txh->txh_space_towrite,
|
||||
SPA_OLD_MAXBLOCKSIZE, FTAG);
|
||||
}
|
||||
|
||||
|
@ -137,8 +137,8 @@ dnode_cons(void *arg, void *unused, int kmflag)
|
||||
* Every dbuf has a reference, and dropping a tracked reference is
|
||||
* O(number of references), so don't track dn_holds.
|
||||
*/
|
||||
refcount_create_untracked(&dn->dn_holds);
|
||||
refcount_create(&dn->dn_tx_holds);
|
||||
zfs_refcount_create_untracked(&dn->dn_holds);
|
||||
zfs_refcount_create(&dn->dn_tx_holds);
|
||||
list_link_init(&dn->dn_link);
|
||||
|
||||
bzero(&dn->dn_next_nblkptr[0], sizeof (dn->dn_next_nblkptr));
|
||||
@ -193,8 +193,8 @@ dnode_dest(void *arg, void *unused)
|
||||
mutex_destroy(&dn->dn_mtx);
|
||||
mutex_destroy(&dn->dn_dbufs_mtx);
|
||||
cv_destroy(&dn->dn_notxholds);
|
||||
refcount_destroy(&dn->dn_holds);
|
||||
refcount_destroy(&dn->dn_tx_holds);
|
||||
zfs_refcount_destroy(&dn->dn_holds);
|
||||
zfs_refcount_destroy(&dn->dn_tx_holds);
|
||||
ASSERT(!list_link_active(&dn->dn_link));
|
||||
|
||||
for (i = 0; i < TXG_SIZE; i++) {
|
||||
@ -395,7 +395,7 @@ dnode_buf_byteswap(void *vbuf, size_t size)
|
||||
void
|
||||
dnode_setbonuslen(dnode_t *dn, int newsize, dmu_tx_t *tx)
|
||||
{
|
||||
ASSERT3U(refcount_count(&dn->dn_holds), >=, 1);
|
||||
ASSERT3U(zfs_refcount_count(&dn->dn_holds), >=, 1);
|
||||
|
||||
dnode_setdirty(dn, tx);
|
||||
rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
|
||||
@ -412,7 +412,7 @@ dnode_setbonuslen(dnode_t *dn, int newsize, dmu_tx_t *tx)
|
||||
void
|
||||
dnode_setbonus_type(dnode_t *dn, dmu_object_type_t newtype, dmu_tx_t *tx)
|
||||
{
|
||||
ASSERT3U(refcount_count(&dn->dn_holds), >=, 1);
|
||||
ASSERT3U(zfs_refcount_count(&dn->dn_holds), >=, 1);
|
||||
dnode_setdirty(dn, tx);
|
||||
rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
|
||||
dn->dn_bonustype = newtype;
|
||||
@ -423,7 +423,7 @@ dnode_setbonus_type(dnode_t *dn, dmu_object_type_t newtype, dmu_tx_t *tx)
|
||||
void
|
||||
dnode_rm_spill(dnode_t *dn, dmu_tx_t *tx)
|
||||
{
|
||||
ASSERT3U(refcount_count(&dn->dn_holds), >=, 1);
|
||||
ASSERT3U(zfs_refcount_count(&dn->dn_holds), >=, 1);
|
||||
ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock));
|
||||
dnode_setdirty(dn, tx);
|
||||
dn->dn_rm_spillblk[tx->tx_txg&TXG_MASK] = DN_KILL_SPILLBLK;
|
||||
@ -617,8 +617,8 @@ dnode_allocate(dnode_t *dn, dmu_object_type_t ot, int blocksize, int ibs,
|
||||
ASSERT0(dn->dn_allocated_txg);
|
||||
ASSERT0(dn->dn_dirty_txg);
|
||||
ASSERT0(dn->dn_assigned_txg);
|
||||
ASSERT(refcount_is_zero(&dn->dn_tx_holds));
|
||||
ASSERT3U(refcount_count(&dn->dn_holds), <=, 1);
|
||||
ASSERT(zfs_refcount_is_zero(&dn->dn_tx_holds));
|
||||
ASSERT3U(zfs_refcount_count(&dn->dn_holds), <=, 1);
|
||||
ASSERT(avl_is_empty(&dn->dn_dbufs));
|
||||
|
||||
for (i = 0; i < TXG_SIZE; i++) {
|
||||
@ -809,8 +809,8 @@ dnode_move_impl(dnode_t *odn, dnode_t *ndn)
|
||||
ndn->dn_dirty_txg = odn->dn_dirty_txg;
|
||||
ndn->dn_dirtyctx = odn->dn_dirtyctx;
|
||||
ndn->dn_dirtyctx_firstset = odn->dn_dirtyctx_firstset;
|
||||
ASSERT(refcount_count(&odn->dn_tx_holds) == 0);
|
||||
refcount_transfer(&ndn->dn_holds, &odn->dn_holds);
|
||||
ASSERT(zfs_refcount_count(&odn->dn_tx_holds) == 0);
|
||||
zfs_refcount_transfer(&ndn->dn_holds, &odn->dn_holds);
|
||||
ASSERT(avl_is_empty(&ndn->dn_dbufs));
|
||||
avl_swap(&ndn->dn_dbufs, &odn->dn_dbufs);
|
||||
ndn->dn_dbufs_count = odn->dn_dbufs_count;
|
||||
@ -998,7 +998,7 @@ dnode_move(void *buf, void *newbuf, size_t size, void *arg)
|
||||
* hold before the dbuf is removed, the hold is discounted, and the
|
||||
* removal is blocked until the move completes.
|
||||
*/
|
||||
refcount = refcount_count(&odn->dn_holds);
|
||||
refcount = zfs_refcount_count(&odn->dn_holds);
|
||||
ASSERT(refcount >= 0);
|
||||
dbufs = odn->dn_dbufs_count;
|
||||
|
||||
@ -1026,7 +1026,7 @@ dnode_move(void *buf, void *newbuf, size_t size, void *arg)
|
||||
|
||||
list_link_replace(&odn->dn_link, &ndn->dn_link);
|
||||
/* If the dnode was safe to move, the refcount cannot have changed. */
|
||||
ASSERT(refcount == refcount_count(&ndn->dn_holds));
|
||||
ASSERT(refcount == zfs_refcount_count(&ndn->dn_holds));
|
||||
ASSERT(dbufs == ndn->dn_dbufs_count);
|
||||
zrl_exit(&ndn->dn_handle->dnh_zrlock); /* handle has moved */
|
||||
mutex_exit(&os->os_lock);
|
||||
@ -1176,7 +1176,7 @@ dnode_special_close(dnode_handle_t *dnh)
|
||||
* has a hold on this dnode while we are trying to evict this
|
||||
* dnode.
|
||||
*/
|
||||
while (refcount_count(&dn->dn_holds) > 0)
|
||||
while (zfs_refcount_count(&dn->dn_holds) > 0)
|
||||
delay(1);
|
||||
ASSERT(dn->dn_dbuf == NULL ||
|
||||
dmu_buf_get_user(&dn->dn_dbuf->db) == NULL);
|
||||
@ -1231,8 +1231,8 @@ dnode_buf_evict_async(void *dbu)
|
||||
* it wouldn't be eligible for eviction and this function
|
||||
* would not have been called.
|
||||
*/
|
||||
ASSERT(refcount_is_zero(&dn->dn_holds));
|
||||
ASSERT(refcount_is_zero(&dn->dn_tx_holds));
|
||||
ASSERT(zfs_refcount_is_zero(&dn->dn_holds));
|
||||
ASSERT(zfs_refcount_is_zero(&dn->dn_tx_holds));
|
||||
|
||||
dnode_destroy(dn); /* implicit zrl_remove() for first slot */
|
||||
zrl_destroy(&dnh->dnh_zrlock);
|
||||
@ -1306,7 +1306,7 @@ dnode_hold_impl(objset_t *os, uint64_t object, int flag, int slots,
|
||||
if ((flag & DNODE_MUST_BE_FREE) && type != DMU_OT_NONE)
|
||||
return (SET_ERROR(EEXIST));
|
||||
DNODE_VERIFY(dn);
|
||||
(void) refcount_add(&dn->dn_holds, tag);
|
||||
(void) zfs_refcount_add(&dn->dn_holds, tag);
|
||||
*dnp = dn;
|
||||
return (0);
|
||||
}
|
||||
@ -1499,7 +1499,7 @@ dnode_hold_impl(objset_t *os, uint64_t object, int flag, int slots,
|
||||
}
|
||||
|
||||
mutex_enter(&dn->dn_mtx);
|
||||
if (!refcount_is_zero(&dn->dn_holds) || dn->dn_free_txg) {
|
||||
if (!zfs_refcount_is_zero(&dn->dn_holds) || dn->dn_free_txg) {
|
||||
DNODE_STAT_BUMP(dnode_hold_free_refcount);
|
||||
mutex_exit(&dn->dn_mtx);
|
||||
dnode_slots_rele(dnc, idx, slots);
|
||||
@ -1524,7 +1524,7 @@ dnode_hold_impl(objset_t *os, uint64_t object, int flag, int slots,
|
||||
ENOENT : EEXIST));
|
||||
}
|
||||
|
||||
if (refcount_add(&dn->dn_holds, tag) == 1)
|
||||
if (zfs_refcount_add(&dn->dn_holds, tag) == 1)
|
||||
dbuf_add_ref(db, dnh);
|
||||
|
||||
mutex_exit(&dn->dn_mtx);
|
||||
@ -1560,11 +1560,11 @@ boolean_t
|
||||
dnode_add_ref(dnode_t *dn, void *tag)
|
||||
{
|
||||
mutex_enter(&dn->dn_mtx);
|
||||
if (refcount_is_zero(&dn->dn_holds)) {
|
||||
if (zfs_refcount_is_zero(&dn->dn_holds)) {
|
||||
mutex_exit(&dn->dn_mtx);
|
||||
return (FALSE);
|
||||
}
|
||||
VERIFY(1 < refcount_add(&dn->dn_holds, tag));
|
||||
VERIFY(1 < zfs_refcount_add(&dn->dn_holds, tag));
|
||||
mutex_exit(&dn->dn_mtx);
|
||||
return (TRUE);
|
||||
}
|
||||
@ -1584,7 +1584,7 @@ dnode_rele_and_unlock(dnode_t *dn, void *tag, boolean_t evicting)
|
||||
dmu_buf_impl_t *db = dn->dn_dbuf;
|
||||
dnode_handle_t *dnh = dn->dn_handle;
|
||||
|
||||
refs = refcount_remove(&dn->dn_holds, tag);
|
||||
refs = zfs_refcount_remove(&dn->dn_holds, tag);
|
||||
mutex_exit(&dn->dn_mtx);
|
||||
|
||||
/*
|
||||
@ -1649,7 +1649,7 @@ dnode_setdirty(dnode_t *dn, dmu_tx_t *tx)
|
||||
return;
|
||||
}
|
||||
|
||||
ASSERT(!refcount_is_zero(&dn->dn_holds) ||
|
||||
ASSERT(!zfs_refcount_is_zero(&dn->dn_holds) ||
|
||||
!avl_is_empty(&dn->dn_dbufs));
|
||||
ASSERT(dn->dn_datablksz != 0);
|
||||
ASSERT0(dn->dn_next_bonuslen[txg&TXG_MASK]);
|
||||
|
@ -432,7 +432,7 @@ dnode_evict_dbufs(dnode_t *dn)
|
||||
|
||||
mutex_enter(&db->db_mtx);
|
||||
if (db->db_state != DB_EVICTING &&
|
||||
refcount_is_zero(&db->db_holds)) {
|
||||
zfs_refcount_is_zero(&db->db_holds)) {
|
||||
db_marker.db_level = db->db_level;
|
||||
db_marker.db_blkid = db->db_blkid;
|
||||
db_marker.db_state = DB_SEARCH;
|
||||
@ -472,7 +472,7 @@ dnode_evict_bonus(dnode_t *dn)
|
||||
{
|
||||
rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
|
||||
if (dn->dn_bonus != NULL) {
|
||||
if (refcount_is_zero(&dn->dn_bonus->db_holds)) {
|
||||
if (zfs_refcount_is_zero(&dn->dn_bonus->db_holds)) {
|
||||
mutex_enter(&dn->dn_bonus->db_mtx);
|
||||
dbuf_destroy(dn->dn_bonus);
|
||||
dn->dn_bonus = NULL;
|
||||
@ -538,7 +538,7 @@ dnode_sync_free(dnode_t *dn, dmu_tx_t *tx)
|
||||
* zfs_obj_to_path() also depends on this being
|
||||
* commented out.
|
||||
*
|
||||
* ASSERT3U(refcount_count(&dn->dn_holds), ==, 1);
|
||||
* ASSERT3U(zfs_refcount_count(&dn->dn_holds), ==, 1);
|
||||
*/
|
||||
|
||||
/* Undirty next bits */
|
||||
|
@ -336,7 +336,7 @@ dsl_dataset_evict_async(void *dbu)
|
||||
mutex_destroy(&ds->ds_opening_lock);
|
||||
mutex_destroy(&ds->ds_sendstream_lock);
|
||||
mutex_destroy(&ds->ds_remap_deadlist_lock);
|
||||
refcount_destroy(&ds->ds_longholds);
|
||||
zfs_refcount_destroy(&ds->ds_longholds);
|
||||
rrw_destroy(&ds->ds_bp_rwlock);
|
||||
|
||||
kmem_free(ds, sizeof (dsl_dataset_t));
|
||||
@ -474,7 +474,7 @@ dsl_dataset_hold_obj(dsl_pool_t *dp, uint64_t dsobj, void *tag,
|
||||
mutex_init(&ds->ds_remap_deadlist_lock,
|
||||
NULL, MUTEX_DEFAULT, NULL);
|
||||
rrw_init(&ds->ds_bp_rwlock, B_FALSE);
|
||||
refcount_create(&ds->ds_longholds);
|
||||
zfs_refcount_create(&ds->ds_longholds);
|
||||
|
||||
bplist_create(&ds->ds_pending_deadlist);
|
||||
|
||||
@ -565,7 +565,7 @@ dsl_dataset_hold_obj(dsl_pool_t *dp, uint64_t dsobj, void *tag,
|
||||
mutex_destroy(&ds->ds_lock);
|
||||
mutex_destroy(&ds->ds_opening_lock);
|
||||
mutex_destroy(&ds->ds_sendstream_lock);
|
||||
refcount_destroy(&ds->ds_longholds);
|
||||
zfs_refcount_destroy(&ds->ds_longholds);
|
||||
kmem_free(ds, sizeof (dsl_dataset_t));
|
||||
if (err != 0) {
|
||||
dmu_buf_rele(dbuf, tag);
|
||||
@ -690,20 +690,20 @@ void
|
||||
dsl_dataset_long_hold(dsl_dataset_t *ds, void *tag)
|
||||
{
|
||||
ASSERT(dsl_pool_config_held(ds->ds_dir->dd_pool));
|
||||
(void) refcount_add(&ds->ds_longholds, tag);
|
||||
(void) zfs_refcount_add(&ds->ds_longholds, tag);
|
||||
}
|
||||
|
||||
void
|
||||
dsl_dataset_long_rele(dsl_dataset_t *ds, void *tag)
|
||||
{
|
||||
(void) refcount_remove(&ds->ds_longholds, tag);
|
||||
(void) zfs_refcount_remove(&ds->ds_longholds, tag);
|
||||
}
|
||||
|
||||
/* Return B_TRUE if there are any long holds on this dataset. */
|
||||
boolean_t
|
||||
dsl_dataset_long_held(dsl_dataset_t *ds)
|
||||
{
|
||||
return (!refcount_is_zero(&ds->ds_longholds));
|
||||
return (!zfs_refcount_is_zero(&ds->ds_longholds));
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -263,7 +263,7 @@ dsl_destroy_snapshot_sync_impl(dsl_dataset_t *ds, boolean_t defer, dmu_tx_t *tx)
|
||||
rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG);
|
||||
ASSERT3U(dsl_dataset_phys(ds)->ds_bp.blk_birth, <=, tx->tx_txg);
|
||||
rrw_exit(&ds->ds_bp_rwlock, FTAG);
|
||||
ASSERT(refcount_is_zero(&ds->ds_longholds));
|
||||
ASSERT(zfs_refcount_is_zero(&ds->ds_longholds));
|
||||
|
||||
if (defer &&
|
||||
(ds->ds_userrefs > 0 ||
|
||||
@ -697,7 +697,7 @@ dsl_destroy_head_check_impl(dsl_dataset_t *ds, int expected_holds)
|
||||
if (ds->ds_is_snapshot)
|
||||
return (SET_ERROR(EINVAL));
|
||||
|
||||
if (refcount_count(&ds->ds_longholds) != expected_holds)
|
||||
if (zfs_refcount_count(&ds->ds_longholds) != expected_holds)
|
||||
return (SET_ERROR(EBUSY));
|
||||
|
||||
mos = ds->ds_dir->dd_pool->dp_meta_objset;
|
||||
@ -725,7 +725,7 @@ dsl_destroy_head_check_impl(dsl_dataset_t *ds, int expected_holds)
|
||||
dsl_dataset_phys(ds->ds_prev)->ds_num_children == 2 &&
|
||||
ds->ds_prev->ds_userrefs == 0) {
|
||||
/* We need to remove the origin snapshot as well. */
|
||||
if (!refcount_is_zero(&ds->ds_prev->ds_longholds))
|
||||
if (!zfs_refcount_is_zero(&ds->ds_prev->ds_longholds))
|
||||
return (SET_ERROR(EBUSY));
|
||||
}
|
||||
return (0);
|
||||
|
@ -234,11 +234,11 @@ metaslab_class_create(spa_t *spa, metaslab_ops_t *ops)
|
||||
mc->mc_ops = ops;
|
||||
mutex_init(&mc->mc_lock, NULL, MUTEX_DEFAULT, NULL);
|
||||
mc->mc_alloc_slots = kmem_zalloc(spa->spa_alloc_count *
|
||||
sizeof (refcount_t), KM_SLEEP);
|
||||
sizeof (zfs_refcount_t), KM_SLEEP);
|
||||
mc->mc_alloc_max_slots = kmem_zalloc(spa->spa_alloc_count *
|
||||
sizeof (uint64_t), KM_SLEEP);
|
||||
for (int i = 0; i < spa->spa_alloc_count; i++)
|
||||
refcount_create_tracked(&mc->mc_alloc_slots[i]);
|
||||
zfs_refcount_create_tracked(&mc->mc_alloc_slots[i]);
|
||||
|
||||
return (mc);
|
||||
}
|
||||
@ -253,9 +253,9 @@ metaslab_class_destroy(metaslab_class_t *mc)
|
||||
ASSERT(mc->mc_dspace == 0);
|
||||
|
||||
for (int i = 0; i < mc->mc_spa->spa_alloc_count; i++)
|
||||
refcount_destroy(&mc->mc_alloc_slots[i]);
|
||||
zfs_refcount_destroy(&mc->mc_alloc_slots[i]);
|
||||
kmem_free(mc->mc_alloc_slots, mc->mc_spa->spa_alloc_count *
|
||||
sizeof (refcount_t));
|
||||
sizeof (zfs_refcount_t));
|
||||
kmem_free(mc->mc_alloc_max_slots, mc->mc_spa->spa_alloc_count *
|
||||
sizeof (uint64_t));
|
||||
mutex_destroy(&mc->mc_lock);
|
||||
@ -650,12 +650,12 @@ metaslab_group_create(metaslab_class_t *mc, vdev_t *vd, int allocators)
|
||||
mg->mg_no_free_space = B_TRUE;
|
||||
mg->mg_allocators = allocators;
|
||||
|
||||
mg->mg_alloc_queue_depth = kmem_zalloc(allocators * sizeof (refcount_t),
|
||||
KM_SLEEP);
|
||||
mg->mg_alloc_queue_depth = kmem_zalloc(allocators *
|
||||
sizeof (zfs_refcount_t), KM_SLEEP);
|
||||
mg->mg_cur_max_alloc_queue_depth = kmem_zalloc(allocators *
|
||||
sizeof (uint64_t), KM_SLEEP);
|
||||
for (int i = 0; i < allocators; i++) {
|
||||
refcount_create_tracked(&mg->mg_alloc_queue_depth[i]);
|
||||
zfs_refcount_create_tracked(&mg->mg_alloc_queue_depth[i]);
|
||||
mg->mg_cur_max_alloc_queue_depth[i] = 0;
|
||||
}
|
||||
|
||||
@ -687,11 +687,11 @@ metaslab_group_destroy(metaslab_group_t *mg)
|
||||
cv_destroy(&mg->mg_ms_initialize_cv);
|
||||
|
||||
for (int i = 0; i < mg->mg_allocators; i++) {
|
||||
refcount_destroy(&mg->mg_alloc_queue_depth[i]);
|
||||
zfs_refcount_destroy(&mg->mg_alloc_queue_depth[i]);
|
||||
mg->mg_cur_max_alloc_queue_depth[i] = 0;
|
||||
}
|
||||
kmem_free(mg->mg_alloc_queue_depth, mg->mg_allocators *
|
||||
sizeof (refcount_t));
|
||||
sizeof (zfs_refcount_t));
|
||||
kmem_free(mg->mg_cur_max_alloc_queue_depth, mg->mg_allocators *
|
||||
sizeof (uint64_t));
|
||||
|
||||
@ -1039,7 +1039,8 @@ metaslab_group_allocatable(metaslab_group_t *mg, metaslab_group_t *rotor,
|
||||
if (mg->mg_no_free_space)
|
||||
return (B_FALSE);
|
||||
|
||||
qdepth = refcount_count(&mg->mg_alloc_queue_depth[allocator]);
|
||||
qdepth = zfs_refcount_count(
|
||||
&mg->mg_alloc_queue_depth[allocator]);
|
||||
|
||||
/*
|
||||
* If this metaslab group is below its qmax or it's
|
||||
@ -1060,7 +1061,7 @@ metaslab_group_allocatable(metaslab_group_t *mg, metaslab_group_t *rotor,
|
||||
for (mgp = mg->mg_next; mgp != rotor; mgp = mgp->mg_next) {
|
||||
qmax = mgp->mg_cur_max_alloc_queue_depth[allocator];
|
||||
|
||||
qdepth = refcount_count(
|
||||
qdepth = zfs_refcount_count(
|
||||
&mgp->mg_alloc_queue_depth[allocator]);
|
||||
|
||||
/*
|
||||
@ -2911,7 +2912,7 @@ metaslab_group_alloc_increment(spa_t *spa, uint64_t vdev, void *tag, int flags,
|
||||
if (!mg->mg_class->mc_alloc_throttle_enabled)
|
||||
return;
|
||||
|
||||
(void) refcount_add(&mg->mg_alloc_queue_depth[allocator], tag);
|
||||
(void) zfs_refcount_add(&mg->mg_alloc_queue_depth[allocator], tag);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -2942,7 +2943,7 @@ metaslab_group_alloc_decrement(spa_t *spa, uint64_t vdev, void *tag, int flags,
|
||||
if (!mg->mg_class->mc_alloc_throttle_enabled)
|
||||
return;
|
||||
|
||||
(void) refcount_remove(&mg->mg_alloc_queue_depth[allocator], tag);
|
||||
(void) zfs_refcount_remove(&mg->mg_alloc_queue_depth[allocator], tag);
|
||||
if (io_complete)
|
||||
metaslab_group_increment_qdepth(mg, allocator);
|
||||
}
|
||||
@ -2958,8 +2959,8 @@ metaslab_group_alloc_verify(spa_t *spa, const blkptr_t *bp, void *tag,
|
||||
for (int d = 0; d < ndvas; d++) {
|
||||
uint64_t vdev = DVA_GET_VDEV(&dva[d]);
|
||||
metaslab_group_t *mg = vdev_lookup_top(spa, vdev)->vdev_mg;
|
||||
VERIFY(refcount_not_held(&mg->mg_alloc_queue_depth[allocator],
|
||||
tag));
|
||||
VERIFY(zfs_refcount_not_held(
|
||||
&mg->mg_alloc_queue_depth[allocator], tag));
|
||||
}
|
||||
#endif
|
||||
}
|
||||
@ -3033,10 +3034,10 @@ find_valid_metaslab(metaslab_group_t *mg, uint64_t activation_weight,
|
||||
}
|
||||
|
||||
/*
|
||||
* If the selected metaslab is condensing or being
|
||||
* initialized, skip it.
|
||||
* If the selected metaslab is condensing or being
|
||||
* initialized, skip it.
|
||||
*/
|
||||
if (msp->ms_condensing || msp->ms_initializing > 0)
|
||||
if (msp->ms_condensing || msp->ms_initializing > 0)
|
||||
continue;
|
||||
|
||||
*was_active = msp->ms_allocator != -1;
|
||||
@ -3842,7 +3843,7 @@ metaslab_class_throttle_reserve(metaslab_class_t *mc, int slots, int allocator,
|
||||
mutex_enter(&mc->mc_lock);
|
||||
|
||||
uint64_t reserved_slots =
|
||||
refcount_count(&mc->mc_alloc_slots[allocator]);
|
||||
zfs_refcount_count(&mc->mc_alloc_slots[allocator]);
|
||||
if (reserved_slots < max)
|
||||
available_slots = max - reserved_slots;
|
||||
|
||||
@ -3853,7 +3854,7 @@ metaslab_class_throttle_reserve(metaslab_class_t *mc, int slots, int allocator,
|
||||
*/
|
||||
for (int d = 0; d < slots; d++) {
|
||||
reserved_slots =
|
||||
refcount_add(&mc->mc_alloc_slots[allocator],
|
||||
zfs_refcount_add(&mc->mc_alloc_slots[allocator],
|
||||
zio);
|
||||
}
|
||||
zio->io_flags |= ZIO_FLAG_IO_ALLOCATING;
|
||||
@ -3871,7 +3872,7 @@ metaslab_class_throttle_unreserve(metaslab_class_t *mc, int slots,
|
||||
ASSERT(mc->mc_alloc_throttle_enabled);
|
||||
mutex_enter(&mc->mc_lock);
|
||||
for (int d = 0; d < slots; d++) {
|
||||
(void) refcount_remove(&mc->mc_alloc_slots[allocator],
|
||||
(void) zfs_refcount_remove(&mc->mc_alloc_slots[allocator],
|
||||
zio);
|
||||
}
|
||||
mutex_exit(&mc->mc_lock);
|
||||
|
@ -39,7 +39,7 @@ static kmem_cache_t *reference_cache;
|
||||
static kmem_cache_t *reference_history_cache;
|
||||
|
||||
void
|
||||
refcount_init(void)
|
||||
zfs_refcount_init(void)
|
||||
{
|
||||
reference_cache = kmem_cache_create("reference_cache",
|
||||
sizeof (reference_t), 0, NULL, NULL, NULL, NULL, NULL, 0);
|
||||
@ -49,14 +49,14 @@ refcount_init(void)
|
||||
}
|
||||
|
||||
void
|
||||
refcount_fini(void)
|
||||
zfs_refcount_fini(void)
|
||||
{
|
||||
kmem_cache_destroy(reference_cache);
|
||||
kmem_cache_destroy(reference_history_cache);
|
||||
}
|
||||
|
||||
void
|
||||
refcount_create(refcount_t *rc)
|
||||
zfs_refcount_create(zfs_refcount_t *rc)
|
||||
{
|
||||
mutex_init(&rc->rc_mtx, NULL, MUTEX_DEFAULT, NULL);
|
||||
list_create(&rc->rc_list, sizeof (reference_t),
|
||||
@ -69,21 +69,21 @@ refcount_create(refcount_t *rc)
|
||||
}
|
||||
|
||||
void
|
||||
refcount_create_tracked(refcount_t *rc)
|
||||
zfs_refcount_create_tracked(zfs_refcount_t *rc)
|
||||
{
|
||||
refcount_create(rc);
|
||||
zfs_refcount_create(rc);
|
||||
rc->rc_tracked = B_TRUE;
|
||||
}
|
||||
|
||||
void
|
||||
refcount_create_untracked(refcount_t *rc)
|
||||
zfs_refcount_create_untracked(zfs_refcount_t *rc)
|
||||
{
|
||||
refcount_create(rc);
|
||||
zfs_refcount_create(rc);
|
||||
rc->rc_tracked = B_FALSE;
|
||||
}
|
||||
|
||||
void
|
||||
refcount_destroy_many(refcount_t *rc, uint64_t number)
|
||||
zfs_refcount_destroy_many(zfs_refcount_t *rc, uint64_t number)
|
||||
{
|
||||
reference_t *ref;
|
||||
|
||||
@ -104,25 +104,25 @@ refcount_destroy_many(refcount_t *rc, uint64_t number)
|
||||
}
|
||||
|
||||
void
|
||||
refcount_destroy(refcount_t *rc)
|
||||
zfs_refcount_destroy(zfs_refcount_t *rc)
|
||||
{
|
||||
refcount_destroy_many(rc, 0);
|
||||
zfs_refcount_destroy_many(rc, 0);
|
||||
}
|
||||
|
||||
int
|
||||
refcount_is_zero(refcount_t *rc)
|
||||
zfs_refcount_is_zero(zfs_refcount_t *rc)
|
||||
{
|
||||
return (rc->rc_count == 0);
|
||||
}
|
||||
|
||||
int64_t
|
||||
refcount_count(refcount_t *rc)
|
||||
zfs_refcount_count(zfs_refcount_t *rc)
|
||||
{
|
||||
return (rc->rc_count);
|
||||
}
|
||||
|
||||
int64_t
|
||||
refcount_add_many(refcount_t *rc, uint64_t number, void *holder)
|
||||
zfs_refcount_add_many(zfs_refcount_t *rc, uint64_t number, void *holder)
|
||||
{
|
||||
reference_t *ref = NULL;
|
||||
int64_t count;
|
||||
@ -144,13 +144,13 @@ refcount_add_many(refcount_t *rc, uint64_t number, void *holder)
|
||||
}
|
||||
|
||||
int64_t
|
||||
refcount_add(refcount_t *rc, void *holder)
|
||||
zfs_refcount_add(zfs_refcount_t *rc, void *holder)
|
||||
{
|
||||
return (refcount_add_many(rc, 1, holder));
|
||||
return (zfs_refcount_add_many(rc, 1, holder));
|
||||
}
|
||||
|
||||
int64_t
|
||||
refcount_remove_many(refcount_t *rc, uint64_t number, void *holder)
|
||||
zfs_refcount_remove_many(zfs_refcount_t *rc, uint64_t number, void *holder)
|
||||
{
|
||||
reference_t *ref;
|
||||
int64_t count;
|
||||
@ -198,13 +198,13 @@ refcount_remove_many(refcount_t *rc, uint64_t number, void *holder)
|
||||
}
|
||||
|
||||
int64_t
|
||||
refcount_remove(refcount_t *rc, void *holder)
|
||||
zfs_refcount_remove(zfs_refcount_t *rc, void *holder)
|
||||
{
|
||||
return (refcount_remove_many(rc, 1, holder));
|
||||
return (zfs_refcount_remove_many(rc, 1, holder));
|
||||
}
|
||||
|
||||
void
|
||||
refcount_transfer(refcount_t *dst, refcount_t *src)
|
||||
zfs_refcount_transfer(zfs_refcount_t *dst, zfs_refcount_t *src)
|
||||
{
|
||||
int64_t count, removed_count;
|
||||
list_t list, removed;
|
||||
@ -235,7 +235,7 @@ refcount_transfer(refcount_t *dst, refcount_t *src)
|
||||
}
|
||||
|
||||
void
|
||||
refcount_transfer_ownership(refcount_t *rc, void *current_holder,
|
||||
zfs_refcount_transfer_ownership(zfs_refcount_t *rc, void *current_holder,
|
||||
void *new_holder)
|
||||
{
|
||||
reference_t *ref;
|
||||
@ -265,7 +265,7 @@ refcount_transfer_ownership(refcount_t *rc, void *current_holder,
|
||||
* might be held.
|
||||
*/
|
||||
boolean_t
|
||||
refcount_held(refcount_t *rc, void *holder)
|
||||
zfs_refcount_held(zfs_refcount_t *rc, void *holder)
|
||||
{
|
||||
reference_t *ref;
|
||||
|
||||
@ -293,7 +293,7 @@ refcount_held(refcount_t *rc, void *holder)
|
||||
* since the reference might not be held.
|
||||
*/
|
||||
boolean_t
|
||||
refcount_not_held(refcount_t *rc, void *holder)
|
||||
zfs_refcount_not_held(zfs_refcount_t *rc, void *holder)
|
||||
{
|
||||
reference_t *ref;
|
||||
|
||||
|
@ -85,7 +85,7 @@ rrn_find(rrwlock_t *rrl)
|
||||
{
|
||||
rrw_node_t *rn;
|
||||
|
||||
if (refcount_count(&rrl->rr_linked_rcount) == 0)
|
||||
if (zfs_refcount_count(&rrl->rr_linked_rcount) == 0)
|
||||
return (NULL);
|
||||
|
||||
for (rn = tsd_get(rrw_tsd_key); rn != NULL; rn = rn->rn_next) {
|
||||
@ -120,7 +120,7 @@ rrn_find_and_remove(rrwlock_t *rrl, void *tag)
|
||||
rrw_node_t *rn;
|
||||
rrw_node_t *prev = NULL;
|
||||
|
||||
if (refcount_count(&rrl->rr_linked_rcount) == 0)
|
||||
if (zfs_refcount_count(&rrl->rr_linked_rcount) == 0)
|
||||
return (B_FALSE);
|
||||
|
||||
for (rn = tsd_get(rrw_tsd_key); rn != NULL; rn = rn->rn_next) {
|
||||
@ -143,8 +143,8 @@ rrw_init(rrwlock_t *rrl, boolean_t track_all)
|
||||
mutex_init(&rrl->rr_lock, NULL, MUTEX_DEFAULT, NULL);
|
||||
cv_init(&rrl->rr_cv, NULL, CV_DEFAULT, NULL);
|
||||
rrl->rr_writer = NULL;
|
||||
refcount_create(&rrl->rr_anon_rcount);
|
||||
refcount_create(&rrl->rr_linked_rcount);
|
||||
zfs_refcount_create(&rrl->rr_anon_rcount);
|
||||
zfs_refcount_create(&rrl->rr_linked_rcount);
|
||||
rrl->rr_writer_wanted = B_FALSE;
|
||||
rrl->rr_track_all = track_all;
|
||||
}
|
||||
@ -155,8 +155,8 @@ rrw_destroy(rrwlock_t *rrl)
|
||||
mutex_destroy(&rrl->rr_lock);
|
||||
cv_destroy(&rrl->rr_cv);
|
||||
ASSERT(rrl->rr_writer == NULL);
|
||||
refcount_destroy(&rrl->rr_anon_rcount);
|
||||
refcount_destroy(&rrl->rr_linked_rcount);
|
||||
zfs_refcount_destroy(&rrl->rr_anon_rcount);
|
||||
zfs_refcount_destroy(&rrl->rr_linked_rcount);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -173,19 +173,19 @@ rrw_enter_read_impl(rrwlock_t *rrl, boolean_t prio, void *tag)
|
||||
DTRACE_PROBE(zfs__rrwfastpath__rdmiss);
|
||||
#endif
|
||||
ASSERT(rrl->rr_writer != curthread);
|
||||
ASSERT(refcount_count(&rrl->rr_anon_rcount) >= 0);
|
||||
ASSERT(zfs_refcount_count(&rrl->rr_anon_rcount) >= 0);
|
||||
|
||||
while (rrl->rr_writer != NULL || (rrl->rr_writer_wanted &&
|
||||
refcount_is_zero(&rrl->rr_anon_rcount) && !prio &&
|
||||
zfs_refcount_is_zero(&rrl->rr_anon_rcount) && !prio &&
|
||||
rrn_find(rrl) == NULL))
|
||||
cv_wait(&rrl->rr_cv, &rrl->rr_lock);
|
||||
|
||||
if (rrl->rr_writer_wanted || rrl->rr_track_all) {
|
||||
/* may or may not be a re-entrant enter */
|
||||
rrn_add(rrl, tag);
|
||||
(void) refcount_add(&rrl->rr_linked_rcount, tag);
|
||||
(void) zfs_refcount_add(&rrl->rr_linked_rcount, tag);
|
||||
} else {
|
||||
(void) refcount_add(&rrl->rr_anon_rcount, tag);
|
||||
(void) zfs_refcount_add(&rrl->rr_anon_rcount, tag);
|
||||
}
|
||||
ASSERT(rrl->rr_writer == NULL);
|
||||
mutex_exit(&rrl->rr_lock);
|
||||
@ -216,8 +216,8 @@ rrw_enter_write(rrwlock_t *rrl)
|
||||
mutex_enter(&rrl->rr_lock);
|
||||
ASSERT(rrl->rr_writer != curthread);
|
||||
|
||||
while (refcount_count(&rrl->rr_anon_rcount) > 0 ||
|
||||
refcount_count(&rrl->rr_linked_rcount) > 0 ||
|
||||
while (zfs_refcount_count(&rrl->rr_anon_rcount) > 0 ||
|
||||
zfs_refcount_count(&rrl->rr_linked_rcount) > 0 ||
|
||||
rrl->rr_writer != NULL) {
|
||||
rrl->rr_writer_wanted = B_TRUE;
|
||||
cv_wait(&rrl->rr_cv, &rrl->rr_lock);
|
||||
@ -250,24 +250,25 @@ rrw_exit(rrwlock_t *rrl, void *tag)
|
||||
}
|
||||
DTRACE_PROBE(zfs__rrwfastpath__exitmiss);
|
||||
#endif
|
||||
ASSERT(!refcount_is_zero(&rrl->rr_anon_rcount) ||
|
||||
!refcount_is_zero(&rrl->rr_linked_rcount) ||
|
||||
ASSERT(!zfs_refcount_is_zero(&rrl->rr_anon_rcount) ||
|
||||
!zfs_refcount_is_zero(&rrl->rr_linked_rcount) ||
|
||||
rrl->rr_writer != NULL);
|
||||
|
||||
if (rrl->rr_writer == NULL) {
|
||||
int64_t count;
|
||||
if (rrn_find_and_remove(rrl, tag)) {
|
||||
count = refcount_remove(&rrl->rr_linked_rcount, tag);
|
||||
count = zfs_refcount_remove(
|
||||
&rrl->rr_linked_rcount, tag);
|
||||
} else {
|
||||
ASSERT(!rrl->rr_track_all);
|
||||
count = refcount_remove(&rrl->rr_anon_rcount, tag);
|
||||
count = zfs_refcount_remove(&rrl->rr_anon_rcount, tag);
|
||||
}
|
||||
if (count == 0)
|
||||
cv_broadcast(&rrl->rr_cv);
|
||||
} else {
|
||||
ASSERT(rrl->rr_writer == curthread);
|
||||
ASSERT(refcount_is_zero(&rrl->rr_anon_rcount) &&
|
||||
refcount_is_zero(&rrl->rr_linked_rcount));
|
||||
ASSERT(zfs_refcount_is_zero(&rrl->rr_anon_rcount) &&
|
||||
zfs_refcount_is_zero(&rrl->rr_linked_rcount));
|
||||
rrl->rr_writer = NULL;
|
||||
cv_broadcast(&rrl->rr_cv);
|
||||
}
|
||||
@ -288,7 +289,7 @@ rrw_held(rrwlock_t *rrl, krw_t rw)
|
||||
if (rw == RW_WRITER) {
|
||||
held = (rrl->rr_writer == curthread);
|
||||
} else {
|
||||
held = (!refcount_is_zero(&rrl->rr_anon_rcount) ||
|
||||
held = (!zfs_refcount_is_zero(&rrl->rr_anon_rcount) ||
|
||||
rrn_find(rrl) != NULL);
|
||||
}
|
||||
mutex_exit(&rrl->rr_lock);
|
||||
|
@ -1130,7 +1130,7 @@ sa_tear_down(objset_t *os)
|
||||
while (layout = avl_destroy_nodes(&sa->sa_layout_hash_tree, &cookie)) {
|
||||
sa_idx_tab_t *tab;
|
||||
while (tab = list_head(&layout->lot_idx_tab)) {
|
||||
ASSERT(refcount_count(&tab->sa_refcount));
|
||||
ASSERT(zfs_refcount_count(&tab->sa_refcount));
|
||||
sa_idx_tab_rele(os, tab);
|
||||
}
|
||||
}
|
||||
@ -1315,13 +1315,13 @@ sa_idx_tab_rele(objset_t *os, void *arg)
|
||||
return;
|
||||
|
||||
mutex_enter(&sa->sa_lock);
|
||||
if (refcount_remove(&idx_tab->sa_refcount, NULL) == 0) {
|
||||
if (zfs_refcount_remove(&idx_tab->sa_refcount, NULL) == 0) {
|
||||
list_remove(&idx_tab->sa_layout->lot_idx_tab, idx_tab);
|
||||
if (idx_tab->sa_variable_lengths)
|
||||
kmem_free(idx_tab->sa_variable_lengths,
|
||||
sizeof (uint16_t) *
|
||||
idx_tab->sa_layout->lot_var_sizes);
|
||||
refcount_destroy(&idx_tab->sa_refcount);
|
||||
zfs_refcount_destroy(&idx_tab->sa_refcount);
|
||||
kmem_free(idx_tab->sa_idx_tab,
|
||||
sizeof (uint32_t) * sa->sa_num_attrs);
|
||||
kmem_free(idx_tab, sizeof (sa_idx_tab_t));
|
||||
@ -1335,7 +1335,7 @@ sa_idx_tab_hold(objset_t *os, sa_idx_tab_t *idx_tab)
|
||||
sa_os_t *sa = os->os_sa;
|
||||
|
||||
ASSERT(MUTEX_HELD(&sa->sa_lock));
|
||||
(void) refcount_add(&idx_tab->sa_refcount, NULL);
|
||||
(void) zfs_refcount_add(&idx_tab->sa_refcount, NULL);
|
||||
}
|
||||
|
||||
void
|
||||
@ -1544,7 +1544,7 @@ sa_find_idx_tab(objset_t *os, dmu_object_type_t bonustype, sa_hdr_phys_t *hdr)
|
||||
idx_tab->sa_idx_tab =
|
||||
kmem_zalloc(sizeof (uint32_t) * sa->sa_num_attrs, KM_SLEEP);
|
||||
idx_tab->sa_layout = tb;
|
||||
refcount_create(&idx_tab->sa_refcount);
|
||||
zfs_refcount_create(&idx_tab->sa_refcount);
|
||||
if (tb->lot_var_sizes)
|
||||
idx_tab->sa_variable_lengths = kmem_alloc(sizeof (uint16_t) *
|
||||
tb->lot_var_sizes, KM_SLEEP);
|
||||
|
@ -2261,7 +2261,7 @@ spa_load(spa_t *spa, spa_load_state_t state, spa_import_type_t type)
|
||||
* and are making their way through the eviction process.
|
||||
*/
|
||||
spa_evicting_os_wait(spa);
|
||||
spa->spa_minref = refcount_count(&spa->spa_refcount);
|
||||
spa->spa_minref = zfs_refcount_count(&spa->spa_refcount);
|
||||
if (error) {
|
||||
if (error != EEXIST) {
|
||||
spa->spa_loaded_ts.tv_sec = 0;
|
||||
@ -4836,7 +4836,7 @@ spa_create(const char *pool, nvlist_t *nvroot, nvlist_t *props,
|
||||
* and are making their way through the eviction process.
|
||||
*/
|
||||
spa_evicting_os_wait(spa);
|
||||
spa->spa_minref = refcount_count(&spa->spa_refcount);
|
||||
spa->spa_minref = zfs_refcount_count(&spa->spa_refcount);
|
||||
spa->spa_load_state = SPA_LOAD_NONE;
|
||||
|
||||
mutex_exit(&spa_namespace_lock);
|
||||
@ -7636,7 +7636,8 @@ spa_sync(spa_t *spa, uint64_t txg)
|
||||
* allocations all happen from spa_sync().
|
||||
*/
|
||||
for (int i = 0; i < spa->spa_alloc_count; i++)
|
||||
ASSERT0(refcount_count(&(mg->mg_alloc_queue_depth[i])));
|
||||
ASSERT0(zfs_refcount_count(
|
||||
&(mg->mg_alloc_queue_depth[i])));
|
||||
mg->mg_max_alloc_queue_depth = max_queue_depth;
|
||||
|
||||
for (int i = 0; i < spa->spa_alloc_count; i++) {
|
||||
@ -7647,7 +7648,7 @@ spa_sync(spa_t *spa, uint64_t txg)
|
||||
}
|
||||
metaslab_class_t *mc = spa_normal_class(spa);
|
||||
for (int i = 0; i < spa->spa_alloc_count; i++) {
|
||||
ASSERT0(refcount_count(&mc->mc_alloc_slots[i]));
|
||||
ASSERT0(zfs_refcount_count(&mc->mc_alloc_slots[i]));
|
||||
mc->mc_alloc_max_slots[i] = slots_per_allocator;
|
||||
}
|
||||
mc->mc_alloc_throttle_enabled = zio_dva_throttle_enabled;
|
||||
|
@ -78,7 +78,7 @@
|
||||
* definition they must have an existing reference, and will never need
|
||||
* to lookup a spa_t by name.
|
||||
*
|
||||
* spa_refcount (per-spa refcount_t protected by mutex)
|
||||
* spa_refcount (per-spa zfs_refcount_t protected by mutex)
|
||||
*
|
||||
* This reference count keep track of any active users of the spa_t. The
|
||||
* spa_t cannot be destroyed or freed while this is non-zero. Internally,
|
||||
@ -399,7 +399,7 @@ spa_config_lock_init(spa_t *spa)
|
||||
spa_config_lock_t *scl = &spa->spa_config_lock[i];
|
||||
mutex_init(&scl->scl_lock, NULL, MUTEX_DEFAULT, NULL);
|
||||
cv_init(&scl->scl_cv, NULL, CV_DEFAULT, NULL);
|
||||
refcount_create_untracked(&scl->scl_count);
|
||||
zfs_refcount_create_untracked(&scl->scl_count);
|
||||
scl->scl_writer = NULL;
|
||||
scl->scl_write_wanted = 0;
|
||||
}
|
||||
@ -412,7 +412,7 @@ spa_config_lock_destroy(spa_t *spa)
|
||||
spa_config_lock_t *scl = &spa->spa_config_lock[i];
|
||||
mutex_destroy(&scl->scl_lock);
|
||||
cv_destroy(&scl->scl_cv);
|
||||
refcount_destroy(&scl->scl_count);
|
||||
zfs_refcount_destroy(&scl->scl_count);
|
||||
ASSERT(scl->scl_writer == NULL);
|
||||
ASSERT(scl->scl_write_wanted == 0);
|
||||
}
|
||||
@ -435,7 +435,7 @@ spa_config_tryenter(spa_t *spa, int locks, void *tag, krw_t rw)
|
||||
}
|
||||
} else {
|
||||
ASSERT(scl->scl_writer != curthread);
|
||||
if (!refcount_is_zero(&scl->scl_count)) {
|
||||
if (!zfs_refcount_is_zero(&scl->scl_count)) {
|
||||
mutex_exit(&scl->scl_lock);
|
||||
spa_config_exit(spa, locks & ((1 << i) - 1),
|
||||
tag);
|
||||
@ -443,7 +443,7 @@ spa_config_tryenter(spa_t *spa, int locks, void *tag, krw_t rw)
|
||||
}
|
||||
scl->scl_writer = curthread;
|
||||
}
|
||||
(void) refcount_add(&scl->scl_count, tag);
|
||||
(void) zfs_refcount_add(&scl->scl_count, tag);
|
||||
mutex_exit(&scl->scl_lock);
|
||||
}
|
||||
return (1);
|
||||
@ -469,14 +469,14 @@ spa_config_enter(spa_t *spa, int locks, void *tag, krw_t rw)
|
||||
}
|
||||
} else {
|
||||
ASSERT(scl->scl_writer != curthread);
|
||||
while (!refcount_is_zero(&scl->scl_count)) {
|
||||
while (!zfs_refcount_is_zero(&scl->scl_count)) {
|
||||
scl->scl_write_wanted++;
|
||||
cv_wait(&scl->scl_cv, &scl->scl_lock);
|
||||
scl->scl_write_wanted--;
|
||||
}
|
||||
scl->scl_writer = curthread;
|
||||
}
|
||||
(void) refcount_add(&scl->scl_count, tag);
|
||||
(void) zfs_refcount_add(&scl->scl_count, tag);
|
||||
mutex_exit(&scl->scl_lock);
|
||||
}
|
||||
ASSERT3U(wlocks_held, <=, locks);
|
||||
@ -490,8 +490,8 @@ spa_config_exit(spa_t *spa, int locks, void *tag)
|
||||
if (!(locks & (1 << i)))
|
||||
continue;
|
||||
mutex_enter(&scl->scl_lock);
|
||||
ASSERT(!refcount_is_zero(&scl->scl_count));
|
||||
if (refcount_remove(&scl->scl_count, tag) == 0) {
|
||||
ASSERT(!zfs_refcount_is_zero(&scl->scl_count));
|
||||
if (zfs_refcount_remove(&scl->scl_count, tag) == 0) {
|
||||
ASSERT(scl->scl_writer == NULL ||
|
||||
scl->scl_writer == curthread);
|
||||
scl->scl_writer = NULL; /* OK in either case */
|
||||
@ -510,7 +510,8 @@ spa_config_held(spa_t *spa, int locks, krw_t rw)
|
||||
spa_config_lock_t *scl = &spa->spa_config_lock[i];
|
||||
if (!(locks & (1 << i)))
|
||||
continue;
|
||||
if ((rw == RW_READER && !refcount_is_zero(&scl->scl_count)) ||
|
||||
if ((rw == RW_READER &&
|
||||
!zfs_refcount_is_zero(&scl->scl_count)) ||
|
||||
(rw == RW_WRITER && scl->scl_writer == curthread))
|
||||
locks_held |= 1 << i;
|
||||
}
|
||||
@ -644,7 +645,7 @@ spa_add(const char *name, nvlist_t *config, const char *altroot)
|
||||
spa->spa_deadman_cycid = cyclic_add(&hdlr, &when);
|
||||
mutex_exit(&cpu_lock);
|
||||
|
||||
refcount_create(&spa->spa_refcount);
|
||||
zfs_refcount_create(&spa->spa_refcount);
|
||||
spa_config_lock_init(spa);
|
||||
|
||||
avl_add(&spa_namespace_avl, spa);
|
||||
@ -732,7 +733,7 @@ spa_remove(spa_t *spa)
|
||||
|
||||
ASSERT(MUTEX_HELD(&spa_namespace_lock));
|
||||
ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED);
|
||||
ASSERT3U(refcount_count(&spa->spa_refcount), ==, 0);
|
||||
ASSERT3U(zfs_refcount_count(&spa->spa_refcount), ==, 0);
|
||||
|
||||
nvlist_free(spa->spa_config_splitting);
|
||||
|
||||
@ -772,7 +773,7 @@ spa_remove(spa_t *spa)
|
||||
mutex_exit(&cpu_lock);
|
||||
spa->spa_deadman_cycid = CYCLIC_NONE;
|
||||
|
||||
refcount_destroy(&spa->spa_refcount);
|
||||
zfs_refcount_destroy(&spa->spa_refcount);
|
||||
|
||||
spa_config_lock_destroy(spa);
|
||||
|
||||
@ -834,9 +835,9 @@ spa_next(spa_t *prev)
|
||||
void
|
||||
spa_open_ref(spa_t *spa, void *tag)
|
||||
{
|
||||
ASSERT(refcount_count(&spa->spa_refcount) >= spa->spa_minref ||
|
||||
ASSERT(zfs_refcount_count(&spa->spa_refcount) >= spa->spa_minref ||
|
||||
MUTEX_HELD(&spa_namespace_lock));
|
||||
(void) refcount_add(&spa->spa_refcount, tag);
|
||||
(void) zfs_refcount_add(&spa->spa_refcount, tag);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -846,9 +847,9 @@ spa_open_ref(spa_t *spa, void *tag)
|
||||
void
|
||||
spa_close(spa_t *spa, void *tag)
|
||||
{
|
||||
ASSERT(refcount_count(&spa->spa_refcount) > spa->spa_minref ||
|
||||
ASSERT(zfs_refcount_count(&spa->spa_refcount) > spa->spa_minref ||
|
||||
MUTEX_HELD(&spa_namespace_lock));
|
||||
(void) refcount_remove(&spa->spa_refcount, tag);
|
||||
(void) zfs_refcount_remove(&spa->spa_refcount, tag);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -862,7 +863,7 @@ spa_close(spa_t *spa, void *tag)
|
||||
void
|
||||
spa_async_close(spa_t *spa, void *tag)
|
||||
{
|
||||
(void) refcount_remove(&spa->spa_refcount, tag);
|
||||
(void) zfs_refcount_remove(&spa->spa_refcount, tag);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -875,7 +876,7 @@ spa_refcount_zero(spa_t *spa)
|
||||
{
|
||||
ASSERT(MUTEX_HELD(&spa_namespace_lock));
|
||||
|
||||
return (refcount_count(&spa->spa_refcount) == spa->spa_minref);
|
||||
return (zfs_refcount_count(&spa->spa_refcount) == spa->spa_minref);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1929,7 +1930,7 @@ spa_init(int mode)
|
||||
}
|
||||
#endif
|
||||
|
||||
refcount_init();
|
||||
zfs_refcount_init();
|
||||
unique_init();
|
||||
range_tree_init();
|
||||
metaslab_alloc_trace_init();
|
||||
@ -1958,7 +1959,7 @@ spa_fini(void)
|
||||
metaslab_alloc_trace_fini();
|
||||
range_tree_fini();
|
||||
unique_fini();
|
||||
refcount_fini();
|
||||
zfs_refcount_fini();
|
||||
|
||||
avl_destroy(&spa_namespace_avl);
|
||||
avl_destroy(&spa_spare_avl);
|
||||
|
@ -39,7 +39,7 @@ typedef struct abd {
|
||||
abd_flags_t abd_flags;
|
||||
uint_t abd_size; /* excludes scattered abd_offset */
|
||||
struct abd *abd_parent;
|
||||
refcount_t abd_children;
|
||||
zfs_refcount_t abd_children;
|
||||
union {
|
||||
struct abd_scatter {
|
||||
uint_t abd_offset;
|
||||
|
@ -219,7 +219,7 @@ typedef struct dmu_buf_impl {
|
||||
* If nonzero, the buffer can't be destroyed.
|
||||
* Protected by db_mtx.
|
||||
*/
|
||||
refcount_t db_holds;
|
||||
zfs_refcount_t db_holds;
|
||||
|
||||
/* buffer holding our data */
|
||||
arc_buf_t *db_buf;
|
||||
|
@ -97,8 +97,8 @@ typedef struct dmu_tx_hold {
|
||||
dmu_tx_t *txh_tx;
|
||||
list_node_t txh_node;
|
||||
struct dnode *txh_dnode;
|
||||
refcount_t txh_space_towrite;
|
||||
refcount_t txh_memory_tohold;
|
||||
zfs_refcount_t txh_space_towrite;
|
||||
zfs_refcount_t txh_memory_tohold;
|
||||
enum dmu_tx_hold_type txh_type;
|
||||
uint64_t txh_arg1;
|
||||
uint64_t txh_arg2;
|
||||
|
@ -302,8 +302,8 @@ struct dnode {
|
||||
uint8_t *dn_dirtyctx_firstset; /* dbg: contents meaningless */
|
||||
|
||||
/* protected by own devices */
|
||||
refcount_t dn_tx_holds;
|
||||
refcount_t dn_holds;
|
||||
zfs_refcount_t dn_tx_holds;
|
||||
zfs_refcount_t dn_holds;
|
||||
|
||||
kmutex_t dn_dbufs_mtx;
|
||||
/*
|
||||
|
@ -210,7 +210,7 @@ typedef struct dsl_dataset {
|
||||
* Owning counts as a long hold. See the comments above
|
||||
* dsl_pool_hold() for details.
|
||||
*/
|
||||
refcount_t ds_longholds;
|
||||
zfs_refcount_t ds_longholds;
|
||||
|
||||
/* no locking; only for making guesses */
|
||||
uint64_t ds_trysnap_txg;
|
||||
|
@ -185,7 +185,7 @@ struct metaslab_class {
|
||||
* number of allocations allowed.
|
||||
*/
|
||||
uint64_t *mc_alloc_max_slots;
|
||||
refcount_t *mc_alloc_slots;
|
||||
zfs_refcount_t *mc_alloc_slots;
|
||||
|
||||
uint64_t mc_alloc_groups; /* # of allocatable groups */
|
||||
|
||||
@ -257,7 +257,7 @@ struct metaslab_group {
|
||||
*/
|
||||
uint64_t mg_max_alloc_queue_depth;
|
||||
uint64_t *mg_cur_max_alloc_queue_depth;
|
||||
refcount_t *mg_alloc_queue_depth;
|
||||
zfs_refcount_t *mg_alloc_queue_depth;
|
||||
int mg_allocators;
|
||||
/*
|
||||
* A metalab group that can no longer allocate the minimum block
|
||||
|
@ -56,59 +56,62 @@ typedef struct refcount {
|
||||
list_t rc_removed;
|
||||
uint64_t rc_count;
|
||||
uint64_t rc_removed_count;
|
||||
} refcount_t;
|
||||
} zfs_refcount_t;
|
||||
|
||||
/* Note: refcount_t must be initialized with refcount_create[_untracked]() */
|
||||
/*
|
||||
* Note: zfs_refcount_t must be initialized with
|
||||
* refcount_create[_untracked]()
|
||||
*/
|
||||
|
||||
void refcount_create(refcount_t *rc);
|
||||
void refcount_create_untracked(refcount_t *rc);
|
||||
void refcount_create_tracked(refcount_t *rc);
|
||||
void refcount_destroy(refcount_t *rc);
|
||||
void refcount_destroy_many(refcount_t *rc, uint64_t number);
|
||||
int refcount_is_zero(refcount_t *rc);
|
||||
int64_t refcount_count(refcount_t *rc);
|
||||
int64_t refcount_add(refcount_t *rc, void *holder_tag);
|
||||
int64_t refcount_remove(refcount_t *rc, void *holder_tag);
|
||||
int64_t refcount_add_many(refcount_t *rc, uint64_t number, void *holder_tag);
|
||||
int64_t refcount_remove_many(refcount_t *rc, uint64_t number, void *holder_tag);
|
||||
void refcount_transfer(refcount_t *dst, refcount_t *src);
|
||||
void refcount_transfer_ownership(refcount_t *, void *, void *);
|
||||
boolean_t refcount_held(refcount_t *, void *);
|
||||
boolean_t refcount_not_held(refcount_t *, void *);
|
||||
void zfs_refcount_create(zfs_refcount_t *);
|
||||
void zfs_refcount_create_untracked(zfs_refcount_t *);
|
||||
void zfs_refcount_create_tracked(zfs_refcount_t *);
|
||||
void zfs_refcount_destroy(zfs_refcount_t *);
|
||||
void zfs_refcount_destroy_many(zfs_refcount_t *, uint64_t);
|
||||
int zfs_refcount_is_zero(zfs_refcount_t *);
|
||||
int64_t zfs_refcount_count(zfs_refcount_t *);
|
||||
int64_t zfs_refcount_add(zfs_refcount_t *, void *);
|
||||
int64_t zfs_refcount_remove(zfs_refcount_t *, void *);
|
||||
int64_t zfs_refcount_add_many(zfs_refcount_t *, uint64_t, void *);
|
||||
int64_t zfs_refcount_remove_many(zfs_refcount_t *, uint64_t, void *);
|
||||
void zfs_refcount_transfer(zfs_refcount_t *, zfs_refcount_t *);
|
||||
void zfs_refcount_transfer_ownership(zfs_refcount_t *, void *, void *);
|
||||
boolean_t zfs_refcount_held(zfs_refcount_t *, void *);
|
||||
boolean_t zfs_refcount_not_held(zfs_refcount_t *, void *);
|
||||
|
||||
void refcount_init(void);
|
||||
void refcount_fini(void);
|
||||
void zfs_refcount_init(void);
|
||||
void zfs_refcount_fini(void);
|
||||
|
||||
#else /* ZFS_DEBUG */
|
||||
|
||||
typedef struct refcount {
|
||||
uint64_t rc_count;
|
||||
} refcount_t;
|
||||
} zfs_refcount_t;
|
||||
|
||||
#define refcount_create(rc) ((rc)->rc_count = 0)
|
||||
#define refcount_create_untracked(rc) ((rc)->rc_count = 0)
|
||||
#define refcount_create_tracked(rc) ((rc)->rc_count = 0)
|
||||
#define refcount_destroy(rc) ((rc)->rc_count = 0)
|
||||
#define refcount_destroy_many(rc, number) ((rc)->rc_count = 0)
|
||||
#define refcount_is_zero(rc) ((rc)->rc_count == 0)
|
||||
#define refcount_count(rc) ((rc)->rc_count)
|
||||
#define refcount_add(rc, holder) atomic_inc_64_nv(&(rc)->rc_count)
|
||||
#define refcount_remove(rc, holder) atomic_dec_64_nv(&(rc)->rc_count)
|
||||
#define refcount_add_many(rc, number, holder) \
|
||||
#define zfs_refcount_create(rc) ((rc)->rc_count = 0)
|
||||
#define zfs_refcount_create_untracked(rc) ((rc)->rc_count = 0)
|
||||
#define zfs_refcount_create_tracked(rc) ((rc)->rc_count = 0)
|
||||
#define zfs_refcount_destroy(rc) ((rc)->rc_count = 0)
|
||||
#define zfs_refcount_destroy_many(rc, number) ((rc)->rc_count = 0)
|
||||
#define zfs_refcount_is_zero(rc) ((rc)->rc_count == 0)
|
||||
#define zfs_refcount_count(rc) ((rc)->rc_count)
|
||||
#define zfs_refcount_add(rc, holder) atomic_inc_64_nv(&(rc)->rc_count)
|
||||
#define zfs_refcount_remove(rc, holder) atomic_dec_64_nv(&(rc)->rc_count)
|
||||
#define zfs_refcount_add_many(rc, number, holder) \
|
||||
atomic_add_64_nv(&(rc)->rc_count, number)
|
||||
#define refcount_remove_many(rc, number, holder) \
|
||||
#define zfs_refcount_remove_many(rc, number, holder) \
|
||||
atomic_add_64_nv(&(rc)->rc_count, -number)
|
||||
#define refcount_transfer(dst, src) { \
|
||||
#define zfs_refcount_transfer(dst, src) { \
|
||||
uint64_t __tmp = (src)->rc_count; \
|
||||
atomic_add_64(&(src)->rc_count, -__tmp); \
|
||||
atomic_add_64(&(dst)->rc_count, __tmp); \
|
||||
}
|
||||
#define refcount_transfer_ownership(rc, current_holder, new_holder) (void)0
|
||||
#define refcount_held(rc, holder) ((rc)->rc_count > 0)
|
||||
#define refcount_not_held(rc, holder) (B_TRUE)
|
||||
#define zfs_refcount_transfer_ownership(rc, current_holder, new_holder) (void)0
|
||||
#define zfs_refcount_held(rc, holder) ((rc)->rc_count > 0)
|
||||
#define zfs_refcount_not_held(rc, holder) (B_TRUE)
|
||||
|
||||
#define refcount_init()
|
||||
#define refcount_fini()
|
||||
#define zfs_refcount_init()
|
||||
#define zfs_refcount_fini()
|
||||
|
||||
#endif /* ZFS_DEBUG */
|
||||
|
||||
|
@ -55,8 +55,8 @@ typedef struct rrwlock {
|
||||
kmutex_t rr_lock;
|
||||
kcondvar_t rr_cv;
|
||||
kthread_t *rr_writer;
|
||||
refcount_t rr_anon_rcount;
|
||||
refcount_t rr_linked_rcount;
|
||||
zfs_refcount_t rr_anon_rcount;
|
||||
zfs_refcount_t rr_linked_rcount;
|
||||
boolean_t rr_writer_wanted;
|
||||
boolean_t rr_track_all;
|
||||
} rrwlock_t;
|
||||
|
@ -110,7 +110,7 @@ typedef struct sa_idx_tab {
|
||||
list_node_t sa_next;
|
||||
sa_lot_t *sa_layout;
|
||||
uint16_t *sa_variable_lengths;
|
||||
refcount_t sa_refcount;
|
||||
zfs_refcount_t sa_refcount;
|
||||
uint32_t *sa_idx_tab; /* array of offsets */
|
||||
} sa_idx_tab_t;
|
||||
|
||||
|
@ -136,7 +136,7 @@ typedef struct spa_config_lock {
|
||||
kthread_t *scl_writer;
|
||||
int scl_write_wanted;
|
||||
kcondvar_t scl_cv;
|
||||
refcount_t scl_count;
|
||||
zfs_refcount_t scl_count;
|
||||
} spa_config_lock_t;
|
||||
|
||||
typedef struct spa_config_dirent {
|
||||
@ -382,11 +382,12 @@ struct spa {
|
||||
/*
|
||||
* spa_refcount & spa_config_lock must be the last elements
|
||||
* because refcount_t changes size based on compilation options.
|
||||
* because zfs_refcount_t changes size based on compilation options.
|
||||
* In order for the MDB module to function correctly, the other
|
||||
* fields must remain in the same location.
|
||||
*/
|
||||
spa_config_lock_t spa_config_lock[SCL_LOCKS]; /* config changes */
|
||||
refcount_t spa_refcount; /* number of opens */
|
||||
zfs_refcount_t spa_refcount; /* number of opens */
|
||||
};
|
||||
|
||||
extern const char *spa_config_path;
|
||||
|
@ -228,7 +228,7 @@ int zap_lookup_norm_by_dnode(dnode_t *dn, const char *name,
|
||||
boolean_t *ncp);
|
||||
|
||||
int zap_count_write_by_dnode(dnode_t *dn, const char *name,
|
||||
int add, refcount_t *towrite, refcount_t *tooverwrite);
|
||||
int add, zfs_refcount_t *towrite, zfs_refcount_t *tooverwrite);
|
||||
|
||||
/*
|
||||
* Create an attribute with the given name and value.
|
||||
|
@ -2228,7 +2228,7 @@ zio_write_gang_block(zio_t *pio)
|
||||
ASSERT(has_data);
|
||||
|
||||
flags |= METASLAB_ASYNC_ALLOC;
|
||||
VERIFY(refcount_held(&mc->mc_alloc_slots[pio->io_allocator],
|
||||
VERIFY(zfs_refcount_held(&mc->mc_alloc_slots[pio->io_allocator],
|
||||
pio));
|
||||
|
||||
/*
|
||||
@ -3676,8 +3676,8 @@ zio_done(zio_t *zio)
|
||||
ASSERT(bp != NULL);
|
||||
metaslab_group_alloc_verify(spa, zio->io_bp, zio,
|
||||
zio->io_allocator);
|
||||
VERIFY(refcount_not_held(&mc->mc_alloc_slots[zio->io_allocator],
|
||||
zio));
|
||||
VERIFY(zfs_refcount_not_held(
|
||||
&mc->mc_alloc_slots[zio->io_allocator], zio));
|
||||
}
|
||||
|
||||
for (int c = 0; c < ZIO_CHILD_TYPES; c++)
|
||||
|
Loading…
Reference in New Issue
Block a user