diff --git a/cmd/ztest/ztest.c b/cmd/ztest/ztest.c
index 83d057a74d96..ae25957dc136 100644
--- a/cmd/ztest/ztest.c
+++ b/cmd/ztest/ztest.c
@@ -1323,7 +1323,7 @@ ztest_dmu_objset_own(const char *name, dmu_objset_type_t type,
  */
 typedef struct {
 	list_node_t z_lnode;
-	refcount_t z_refcnt;
+	zfs_refcount_t z_refcnt;
 	uint64_t z_object;
 	zfs_rlock_t z_range_lock;
 } ztest_znode_t;
@@ -1382,13 +1382,13 @@ ztest_znode_get(ztest_ds_t *zd, uint64_t object)
 	for (zp = list_head(&zll->z_list); (zp);
 	    zp = list_next(&zll->z_list, zp)) {
 		if (zp->z_object == object) {
-			refcount_add(&zp->z_refcnt, RL_TAG);
+			zfs_refcount_add(&zp->z_refcnt, RL_TAG);
 			break;
 		}
 	}
 	if (zp == NULL) {
 		zp = ztest_znode_init(object);
-		refcount_add(&zp->z_refcnt, RL_TAG);
+		zfs_refcount_add(&zp->z_refcnt, RL_TAG);
 		list_insert_head(&zll->z_list, zp);
 	}
 	mutex_exit(&zll->z_lock);
diff --git a/include/linux/vfs_compat.h b/include/linux/vfs_compat.h
index 90b3cca78c08..c01f5850881e 100644
--- a/include/linux/vfs_compat.h
+++ b/include/linux/vfs_compat.h
@@ -297,9 +297,6 @@ lseek_execute(
  * This is several orders of magnitude larger than expected grace period.
  * At 60 seconds the kernel will also begin issuing RCU stall warnings.
  */
-#ifdef refcount_t
-#undef refcount_t
-#endif
 
 #include <linux/posix_acl.h>
 
@@ -430,8 +427,6 @@ typedef mode_t zpl_equivmode_t;
 #define	zpl_posix_acl_valid(ip, acl)  posix_acl_valid(acl)
 #endif
 
-#define	refcount_t	zfs_refcount_t
-
 #endif /* CONFIG_FS_POSIX_ACL */
 
 /*
diff --git a/include/sys/abd.h b/include/sys/abd.h
index 077bb9d17618..3d9fdbf102aa 100644
--- a/include/sys/abd.h
+++ b/include/sys/abd.h
@@ -51,7 +51,7 @@ typedef struct abd {
 	abd_flags_t	abd_flags;
 	uint_t		abd_size;	/* excludes scattered abd_offset */
 	struct abd	*abd_parent;
-	refcount_t	abd_children;
+	zfs_refcount_t	abd_children;
 	union {
 		struct abd_scatter {
 			uint_t		abd_offset;
diff --git a/include/sys/arc.h b/include/sys/arc.h
index a5bdefb56f4b..dc2fd03647f3 100644
--- a/include/sys/arc.h
+++ b/include/sys/arc.h
@@ -87,7 +87,7 @@ struct arc_prune {
 	void			*p_private;
 	uint64_t		p_adjust;
 	list_node_t		p_node;
-	refcount_t		p_refcnt;
+	zfs_refcount_t		p_refcnt;
 };
 
 typedef enum arc_strategy {
diff --git a/include/sys/arc_impl.h b/include/sys/arc_impl.h
index 52863bba4ee5..cd42c0c01a20 100644
--- a/include/sys/arc_impl.h
+++ b/include/sys/arc_impl.h
@@ -75,12 +75,12 @@ typedef struct arc_state {
 	/*
 	 * total amount of evictable data in this state
 	 */
-	refcount_t arcs_esize[ARC_BUFC_NUMTYPES];
+	zfs_refcount_t arcs_esize[ARC_BUFC_NUMTYPES];
 	/*
 	 * total amount of data in this state; this includes: evictable,
 	 * non-evictable, ARC_BUFC_DATA, and ARC_BUFC_METADATA.
 	 */
-	refcount_t arcs_size;
+	zfs_refcount_t arcs_size;
 	/*
 	 * supports the "dbufs" kstat
 	 */
@@ -168,7 +168,7 @@ typedef struct l1arc_buf_hdr {
 	uint32_t		b_l2_hits;
 
 	/* self protecting */
-	refcount_t		b_refcnt;
+	zfs_refcount_t		b_refcnt;
 
 	arc_callback_t		*b_acb;
 	abd_t			*b_pabd;
@@ -215,7 +215,7 @@ typedef struct l2arc_dev {
 	kmutex_t		l2ad_mtx;	/* lock for buffer list */
 	list_t			l2ad_buflist;	/* buffer list */
 	list_node_t		l2ad_node;	/* device list node */
-	refcount_t		l2ad_alloc;	/* allocated bytes */
+	zfs_refcount_t		l2ad_alloc;	/* allocated bytes */
 } l2arc_dev_t;
 
 typedef struct l2arc_buf_hdr {
diff --git a/include/sys/dbuf.h b/include/sys/dbuf.h
index ab0950c83c20..eea9e265b008 100644
--- a/include/sys/dbuf.h
+++ b/include/sys/dbuf.h
@@ -230,7 +230,7 @@ typedef struct dmu_buf_impl {
 	 * If nonzero, the buffer can't be destroyed.
 	 * Protected by db_mtx.
 	 */
-	refcount_t db_holds;
+	zfs_refcount_t db_holds;
 
 	/* buffer holding our data */
 	arc_buf_t *db_buf;
diff --git a/include/sys/dmu_tx.h b/include/sys/dmu_tx.h
index 6a4bd3fac2c0..36d205e95017 100644
--- a/include/sys/dmu_tx.h
+++ b/include/sys/dmu_tx.h
@@ -97,8 +97,8 @@ typedef struct dmu_tx_hold {
 	dmu_tx_t *txh_tx;
 	list_node_t txh_node;
 	struct dnode *txh_dnode;
-	refcount_t txh_space_towrite;
-	refcount_t txh_memory_tohold;
+	zfs_refcount_t txh_space_towrite;
+	zfs_refcount_t txh_memory_tohold;
 	enum dmu_tx_hold_type txh_type;
 	uint64_t txh_arg1;
 	uint64_t txh_arg2;
diff --git a/include/sys/dnode.h b/include/sys/dnode.h
index 0774e663f1b6..48ef927d4ad8 100644
--- a/include/sys/dnode.h
+++ b/include/sys/dnode.h
@@ -335,8 +335,8 @@ struct dnode {
 	uint8_t *dn_dirtyctx_firstset;		/* dbg: contents meaningless */
 
 	/* protected by own devices */
-	refcount_t dn_tx_holds;
-	refcount_t dn_holds;
+	zfs_refcount_t dn_tx_holds;
+	zfs_refcount_t dn_holds;
 
 	kmutex_t dn_dbufs_mtx;
 	/*
diff --git a/include/sys/dsl_crypt.h b/include/sys/dsl_crypt.h
index 8766ce51ea9a..c6d2b0a16ac7 100644
--- a/include/sys/dsl_crypt.h
+++ b/include/sys/dsl_crypt.h
@@ -62,7 +62,7 @@ typedef struct dsl_wrapping_key {
 	crypto_key_t wk_key;
 
 	/* refcount of number of dsl_crypto_key_t's holding this struct */
-	refcount_t wk_refcnt;
+	zfs_refcount_t wk_refcnt;
 
 	/* dsl directory object that owns this wrapping key */
 	uint64_t wk_ddobj;
@@ -112,7 +112,7 @@ typedef struct dsl_crypto_key {
 	avl_node_t dck_avl_link;
 
 	/* refcount of dsl_key_mapping_t's holding this key */
-	refcount_t dck_holds;
+	zfs_refcount_t dck_holds;
 
 	/* master key used to derive encryption keys */
 	zio_crypt_key_t dck_key;
@@ -134,7 +134,7 @@ typedef struct dsl_key_mapping {
 	avl_node_t km_avl_link;
 
 	/* refcount of how many users are depending on this mapping */
-	refcount_t km_refcnt;
+	zfs_refcount_t km_refcnt;
 
 	/* dataset this crypto key belongs to (index) */
 	uint64_t km_dsobj;
diff --git a/include/sys/dsl_dataset.h b/include/sys/dsl_dataset.h
index dbe4cb706a1f..768241483a2f 100644
--- a/include/sys/dsl_dataset.h
+++ b/include/sys/dsl_dataset.h
@@ -211,7 +211,7 @@ typedef struct dsl_dataset {
 	 * Owning counts as a long hold.  See the comments above
 	 * dsl_pool_hold() for details.
 	 */
-	refcount_t ds_longholds;
+	zfs_refcount_t ds_longholds;
 
 	/* no locking; only for making guesses */
 	uint64_t ds_trysnap_txg;
diff --git a/include/sys/metaslab_impl.h b/include/sys/metaslab_impl.h
index cc6e8b796d40..aa1c82a0258e 100644
--- a/include/sys/metaslab_impl.h
+++ b/include/sys/metaslab_impl.h
@@ -184,7 +184,7 @@ struct metaslab_class {
 	 * number of allocations allowed.
 	 */
 	uint64_t		*mc_alloc_max_slots;
-	refcount_t		*mc_alloc_slots;
+	zfs_refcount_t		*mc_alloc_slots;
 
 	uint64_t		mc_alloc_groups; /* # of allocatable groups */
 
@@ -256,7 +256,7 @@ struct metaslab_group {
 	 */
 	uint64_t		mg_max_alloc_queue_depth;
 	uint64_t		*mg_cur_max_alloc_queue_depth;
-	refcount_t		*mg_alloc_queue_depth;
+	zfs_refcount_t		*mg_alloc_queue_depth;
 	int			mg_allocators;
 	/*
 	 * A metalab group that can no longer allocate the minimum block
diff --git a/include/sys/refcount.h b/include/sys/refcount.h
index 02002ec2ffd8..e20ffbc30f9c 100644
--- a/include/sys/refcount.h
+++ b/include/sys/refcount.h
@@ -41,17 +41,6 @@ extern "C" {
  */
 #define	FTAG ((char *)(uintptr_t)__func__)
 
-/*
- * Starting with 4.11, torvalds/linux@f405df5, the linux kernel defines a
- * refcount_t type of its own.  The macro below effectively changes references
- * in the ZFS code from refcount_t to zfs_refcount_t at compile time, so that
- * existing code need not be altered, reducing conflicts when landing openZFS
- * patches.
- */
-
-#define	refcount_t	zfs_refcount_t
-#define	refcount_add	zfs_refcount_add
-
 #ifdef	ZFS_DEBUG
 typedef struct reference {
 	list_node_t ref_link;
@@ -69,23 +58,28 @@ typedef struct refcount {
 	uint64_t rc_removed_count;
 } zfs_refcount_t;
 
-/* Note: refcount_t must be initialized with refcount_create[_untracked]() */
+/*
+ * Note: zfs_refcount_t must be initialized with
+ * refcount_create[_untracked]()
+ */
 
-void refcount_create(refcount_t *rc);
-void refcount_create_untracked(refcount_t *rc);
-void refcount_create_tracked(refcount_t *rc);
-void refcount_destroy(refcount_t *rc);
-void refcount_destroy_many(refcount_t *rc, uint64_t number);
-int refcount_is_zero(refcount_t *rc);
-int64_t refcount_count(refcount_t *rc);
-int64_t zfs_refcount_add(refcount_t *rc, void *holder_tag);
-int64_t refcount_remove(refcount_t *rc, void *holder_tag);
-int64_t refcount_add_many(refcount_t *rc, uint64_t number, void *holder_tag);
-int64_t refcount_remove_many(refcount_t *rc, uint64_t number, void *holder_tag);
-void refcount_transfer(refcount_t *dst, refcount_t *src);
-void refcount_transfer_ownership(refcount_t *, void *, void *);
-boolean_t refcount_held(refcount_t *, void *);
-boolean_t refcount_not_held(refcount_t *, void *);
+void refcount_create(zfs_refcount_t *rc);
+void refcount_create_untracked(zfs_refcount_t *rc);
+void refcount_create_tracked(zfs_refcount_t *rc);
+void refcount_destroy(zfs_refcount_t *rc);
+void refcount_destroy_many(zfs_refcount_t *rc, uint64_t number);
+int refcount_is_zero(zfs_refcount_t *rc);
+int64_t refcount_count(zfs_refcount_t *rc);
+int64_t zfs_refcount_add(zfs_refcount_t *rc, void *holder_tag);
+int64_t refcount_remove(zfs_refcount_t *rc, void *holder_tag);
+int64_t refcount_add_many(zfs_refcount_t *rc, uint64_t number,
+    void *holder_tag);
+int64_t refcount_remove_many(zfs_refcount_t *rc, uint64_t number,
+    void *holder_tag);
+void refcount_transfer(zfs_refcount_t *dst, zfs_refcount_t *src);
+void refcount_transfer_ownership(zfs_refcount_t *, void *, void *);
+boolean_t refcount_held(zfs_refcount_t *, void *);
+boolean_t refcount_not_held(zfs_refcount_t *, void *);
 
 void refcount_init(void);
 void refcount_fini(void);
@@ -94,7 +88,7 @@ void refcount_fini(void);
 
 typedef struct refcount {
 	uint64_t rc_count;
-} refcount_t;
+} zfs_refcount_t;
 
 #define	refcount_create(rc) ((rc)->rc_count = 0)
 #define	refcount_create_untracked(rc) ((rc)->rc_count = 0)
diff --git a/include/sys/rrwlock.h b/include/sys/rrwlock.h
index 7a328fd68030..e1c1756cf29a 100644
--- a/include/sys/rrwlock.h
+++ b/include/sys/rrwlock.h
@@ -57,8 +57,8 @@ typedef struct rrwlock {
 	kmutex_t	rr_lock;
 	kcondvar_t	rr_cv;
 	kthread_t	*rr_writer;
-	refcount_t	rr_anon_rcount;
-	refcount_t	rr_linked_rcount;
+	zfs_refcount_t	rr_anon_rcount;
+	zfs_refcount_t	rr_linked_rcount;
 	boolean_t	rr_writer_wanted;
 	boolean_t	rr_track_all;
 } rrwlock_t;
diff --git a/include/sys/sa_impl.h b/include/sys/sa_impl.h
index b68b7610b25e..7eddd8750faa 100644
--- a/include/sys/sa_impl.h
+++ b/include/sys/sa_impl.h
@@ -110,7 +110,7 @@ typedef struct sa_idx_tab {
 	list_node_t	sa_next;
 	sa_lot_t	*sa_layout;
 	uint16_t	*sa_variable_lengths;
-	refcount_t	sa_refcount;
+	zfs_refcount_t	sa_refcount;
 	uint32_t	*sa_idx_tab;	/* array of offsets */
 } sa_idx_tab_t;
 
diff --git a/include/sys/spa_impl.h b/include/sys/spa_impl.h
index 676e8b8a228e..9dbdcfcf5284 100644
--- a/include/sys/spa_impl.h
+++ b/include/sys/spa_impl.h
@@ -139,7 +139,7 @@ typedef struct spa_config_lock {
 	kthread_t	*scl_writer;
 	int		scl_write_wanted;
 	kcondvar_t	scl_cv;
-	refcount_t	scl_count;
+	zfs_refcount_t	scl_count;
 } spa_config_lock_t;
 
 typedef struct spa_config_dirent {
@@ -387,12 +387,12 @@ struct spa {
 
 	/*
 	 * spa_refcount & spa_config_lock must be the last elements
-	 * because refcount_t changes size based on compilation options.
+	 * because zfs_refcount_t changes size based on compilation options.
 	 * In order for the MDB module to function correctly, the other
 	 * fields must remain in the same location.
 	 */
 	spa_config_lock_t spa_config_lock[SCL_LOCKS]; /* config changes */
-	refcount_t	spa_refcount;		/* number of opens */
+	zfs_refcount_t	spa_refcount;		/* number of opens */
 
 	taskq_t		*spa_upgrade_taskq;	/* taskq for upgrade jobs */
 };
diff --git a/include/sys/zap.h b/include/sys/zap.h
index 43b7fbd263c2..7acc3becb5a1 100644
--- a/include/sys/zap.h
+++ b/include/sys/zap.h
@@ -226,7 +226,7 @@ int zap_lookup_norm_by_dnode(dnode_t *dn, const char *name,
     boolean_t *ncp);
 
 int zap_count_write_by_dnode(dnode_t *dn, const char *name,
-    int add, refcount_t *towrite, refcount_t *tooverwrite);
+    int add, zfs_refcount_t *towrite, zfs_refcount_t *tooverwrite);
 
 /*
  * Create an attribute with the given name and value.
diff --git a/include/sys/zfs_znode.h b/include/sys/zfs_znode.h
index e82ac9941a2b..01f4328f0405 100644
--- a/include/sys/zfs_znode.h
+++ b/include/sys/zfs_znode.h
@@ -223,7 +223,7 @@ typedef struct znode_hold {
 	uint64_t	zh_obj;		/* object id */
 	kmutex_t	zh_lock;	/* lock serializing object access */
 	avl_node_t	zh_node;	/* avl tree linkage */
-	refcount_t	zh_refcount;	/* active consumer reference count */
+	zfs_refcount_t	zh_refcount;	/* active consumer reference count */
 } znode_hold_t;
 
 static inline uint64_t
diff --git a/module/zfs/arc.c b/module/zfs/arc.c
index 5e53f987961a..cd094f39ee71 100644
--- a/module/zfs/arc.c
+++ b/module/zfs/arc.c
@@ -2403,7 +2403,7 @@ add_reference(arc_buf_hdr_t *hdr, void *tag)
 
 	state = hdr->b_l1hdr.b_state;
 
-	if ((refcount_add(&hdr->b_l1hdr.b_refcnt, tag) == 1) &&
+	if ((zfs_refcount_add(&hdr->b_l1hdr.b_refcnt, tag) == 1) &&
 	    (state != arc_anon)) {
 		/* We don't use the L2-only state list. */
 		if (state != arc_l2c_only) {
@@ -2997,7 +2997,7 @@ arc_return_buf(arc_buf_t *buf, void *tag)
 
 	ASSERT3P(buf->b_data, !=, NULL);
 	ASSERT(HDR_HAS_L1HDR(hdr));
-	(void) refcount_add(&hdr->b_l1hdr.b_refcnt, tag);
+	(void) zfs_refcount_add(&hdr->b_l1hdr.b_refcnt, tag);
 	(void) refcount_remove(&hdr->b_l1hdr.b_refcnt, arc_onloan_tag);
 
 	arc_loaned_bytes_update(-arc_buf_size(buf));
@@ -3011,7 +3011,7 @@ arc_loan_inuse_buf(arc_buf_t *buf, void *tag)
 
 	ASSERT3P(buf->b_data, !=, NULL);
 	ASSERT(HDR_HAS_L1HDR(hdr));
-	(void) refcount_add(&hdr->b_l1hdr.b_refcnt, arc_onloan_tag);
+	(void) zfs_refcount_add(&hdr->b_l1hdr.b_refcnt, arc_onloan_tag);
 	(void) refcount_remove(&hdr->b_l1hdr.b_refcnt, tag);
 
 	arc_loaned_bytes_update(arc_buf_size(buf));
@@ -3558,11 +3558,11 @@ arc_hdr_realloc_crypt(arc_buf_hdr_t *hdr, boolean_t need_crypt)
 	nhdr->b_l1hdr.b_pabd = hdr->b_l1hdr.b_pabd;
 
 	/*
-	 * This refcount_add() exists only to ensure that the individual
+	 * This zfs_refcount_add() exists only to ensure that the individual
 	 * arc buffers always point to a header that is referenced, avoiding
 	 * a small race condition that could trigger ASSERTs.
 	 */
-	(void) refcount_add(&nhdr->b_l1hdr.b_refcnt, FTAG);
+	(void) zfs_refcount_add(&nhdr->b_l1hdr.b_refcnt, FTAG);
 	nhdr->b_l1hdr.b_buf = hdr->b_l1hdr.b_buf;
 	for (buf = nhdr->b_l1hdr.b_buf; buf != NULL; buf = buf->b_next) {
 		mutex_enter(&buf->b_evict_lock);
@@ -4313,7 +4313,7 @@ arc_prune_async(int64_t adjust)
 		if (refcount_count(&ap->p_refcnt) >= 2)
 			continue;
 
-		refcount_add(&ap->p_refcnt, ap->p_pfunc);
+		zfs_refcount_add(&ap->p_refcnt, ap->p_pfunc);
 		ap->p_adjust = adjust;
 		if (taskq_dispatch(arc_prune_taskq, arc_prune_task,
 		    ap, TQ_SLEEP) == TASKQID_INVALID) {
@@ -6536,7 +6536,7 @@ arc_add_prune_callback(arc_prune_func_t *func, void *private)
 	refcount_create(&p->p_refcnt);
 
 	mutex_enter(&arc_prune_mtx);
-	refcount_add(&p->p_refcnt, &arc_prune_list);
+	zfs_refcount_add(&p->p_refcnt, &arc_prune_list);
 	list_insert_head(&arc_prune_list, p);
 	mutex_exit(&arc_prune_mtx);
 
@@ -6808,7 +6808,7 @@ arc_release(arc_buf_t *buf, void *tag)
 		nhdr->b_l1hdr.b_mfu_hits = 0;
 		nhdr->b_l1hdr.b_mfu_ghost_hits = 0;
 		nhdr->b_l1hdr.b_l2_hits = 0;
-		(void) refcount_add(&nhdr->b_l1hdr.b_refcnt, tag);
+		(void) zfs_refcount_add(&nhdr->b_l1hdr.b_refcnt, tag);
 		buf->b_hdr = nhdr;
 
 		mutex_exit(&buf->b_evict_lock);
diff --git a/module/zfs/dbuf.c b/module/zfs/dbuf.c
index f7376875afe9..db7df6023443 100644
--- a/module/zfs/dbuf.c
+++ b/module/zfs/dbuf.c
@@ -223,7 +223,7 @@ static boolean_t dbuf_evict_thread_exit;
  */
 typedef struct dbuf_cache {
 	multilist_t *cache;
-	refcount_t size;
+	zfs_refcount_t size;
 } dbuf_cache_t;
 dbuf_cache_t dbuf_caches[DB_CACHE_MAX];
 
@@ -2784,7 +2784,7 @@ dbuf_create(dnode_t *dn, uint8_t level, uint64_t blkid,
 
 	ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT ||
 	    refcount_count(&dn->dn_holds) > 0);
-	(void) refcount_add(&dn->dn_holds, db);
+	(void) zfs_refcount_add(&dn->dn_holds, db);
 	atomic_inc_32(&dn->dn_dbufs_count);
 
 	dprintf_dbuf(db, "db=%p\n", db);
@@ -3183,7 +3183,7 @@ dbuf_hold_impl_arg(struct dbuf_hold_arg *dh)
 		}
 		dh->dh_db->db_caching_status = DB_NO_CACHE;
 	}
-	(void) refcount_add(&dh->dh_db->db_holds, dh->dh_tag);
+	(void) zfs_refcount_add(&dh->dh_db->db_holds, dh->dh_tag);
 	DBUF_VERIFY(dh->dh_db);
 	mutex_exit(&dh->dh_db->db_mtx);
 
@@ -3308,7 +3308,7 @@ dbuf_rm_spill(dnode_t *dn, dmu_tx_t *tx)
 void
 dbuf_add_ref(dmu_buf_impl_t *db, void *tag)
 {
-	int64_t holds = refcount_add(&db->db_holds, tag);
+	int64_t holds = zfs_refcount_add(&db->db_holds, tag);
 	VERIFY3S(holds, >, 1);
 }
 
@@ -3328,7 +3328,7 @@ dbuf_try_add_ref(dmu_buf_t *db_fake, objset_t *os, uint64_t obj, uint64_t blkid,
 
 	if (found_db != NULL) {
 		if (db == found_db && dbuf_refcount(db) > db->db_dirtycnt) {
-			(void) refcount_add(&db->db_holds, tag);
+			(void) zfs_refcount_add(&db->db_holds, tag);
 			result = B_TRUE;
 		}
 		mutex_exit(&found_db->db_mtx);
diff --git a/module/zfs/dmu.c b/module/zfs/dmu.c
index 8779eb3586ca..2ff484b63475 100644
--- a/module/zfs/dmu.c
+++ b/module/zfs/dmu.c
@@ -360,7 +360,7 @@ dmu_bonus_hold_impl(objset_t *os, uint64_t object, void *tag, uint32_t flags,
 	db = dn->dn_bonus;
 
 	/* as long as the bonus buf is held, the dnode will be held */
-	if (refcount_add(&db->db_holds, tag) == 1) {
+	if (zfs_refcount_add(&db->db_holds, tag) == 1) {
 		VERIFY(dnode_add_ref(dn, db));
 		atomic_inc_32(&dn->dn_dbufs_count);
 	}
diff --git a/module/zfs/dmu_tx.c b/module/zfs/dmu_tx.c
index c268f3c40465..3dc3f5958845 100644
--- a/module/zfs/dmu_tx.c
+++ b/module/zfs/dmu_tx.c
@@ -114,7 +114,7 @@ dmu_tx_hold_dnode_impl(dmu_tx_t *tx, dnode_t *dn, enum dmu_tx_hold_type type,
 	dmu_tx_hold_t *txh;
 
 	if (dn != NULL) {
-		(void) refcount_add(&dn->dn_holds, tx);
+		(void) zfs_refcount_add(&dn->dn_holds, tx);
 		if (tx->tx_txg != 0) {
 			mutex_enter(&dn->dn_mtx);
 			/*
@@ -124,7 +124,7 @@ dmu_tx_hold_dnode_impl(dmu_tx_t *tx, dnode_t *dn, enum dmu_tx_hold_type type,
 			 */
 			ASSERT(dn->dn_assigned_txg == 0);
 			dn->dn_assigned_txg = tx->tx_txg;
-			(void) refcount_add(&dn->dn_tx_holds, tx);
+			(void) zfs_refcount_add(&dn->dn_tx_holds, tx);
 			mutex_exit(&dn->dn_mtx);
 		}
 	}
@@ -932,7 +932,7 @@ dmu_tx_try_assign(dmu_tx_t *tx, uint64_t txg_how)
 			if (dn->dn_assigned_txg == 0)
 				dn->dn_assigned_txg = tx->tx_txg;
 			ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg);
-			(void) refcount_add(&dn->dn_tx_holds, tx);
+			(void) zfs_refcount_add(&dn->dn_tx_holds, tx);
 			mutex_exit(&dn->dn_mtx);
 		}
 		towrite += refcount_count(&txh->txh_space_towrite);
diff --git a/module/zfs/dnode.c b/module/zfs/dnode.c
index 4e2a733830b1..b0b7ea7102b2 100644
--- a/module/zfs/dnode.c
+++ b/module/zfs/dnode.c
@@ -1304,7 +1304,7 @@ dnode_hold_impl(objset_t *os, uint64_t object, int flag, int slots,
 		if ((flag & DNODE_MUST_BE_FREE) && type != DMU_OT_NONE)
 			return (SET_ERROR(EEXIST));
 		DNODE_VERIFY(dn);
-		(void) refcount_add(&dn->dn_holds, tag);
+		(void) zfs_refcount_add(&dn->dn_holds, tag);
 		*dnp = dn;
 		return (0);
 	}
@@ -1527,7 +1527,7 @@ dnode_hold_impl(objset_t *os, uint64_t object, int flag, int slots,
 		    ENOENT : EEXIST));
 	}
 
-	if (refcount_add(&dn->dn_holds, tag) == 1)
+	if (zfs_refcount_add(&dn->dn_holds, tag) == 1)
 		dbuf_add_ref(db, dnh);
 
 	mutex_exit(&dn->dn_mtx);
@@ -1567,7 +1567,7 @@ dnode_add_ref(dnode_t *dn, void *tag)
 		mutex_exit(&dn->dn_mtx);
 		return (FALSE);
 	}
-	VERIFY(1 < refcount_add(&dn->dn_holds, tag));
+	VERIFY(1 < zfs_refcount_add(&dn->dn_holds, tag));
 	mutex_exit(&dn->dn_mtx);
 	return (TRUE);
 }
diff --git a/module/zfs/dsl_crypt.c b/module/zfs/dsl_crypt.c
index f0878c934771..6beb958c1f54 100644
--- a/module/zfs/dsl_crypt.c
+++ b/module/zfs/dsl_crypt.c
@@ -74,7 +74,7 @@
 static void
 dsl_wrapping_key_hold(dsl_wrapping_key_t *wkey, void *tag)
 {
-	(void) refcount_add(&wkey->wk_refcnt, tag);
+	(void) zfs_refcount_add(&wkey->wk_refcnt, tag);
 }
 
 static void
@@ -605,7 +605,7 @@ dsl_crypto_key_open(objset_t *mos, dsl_wrapping_key_t *wkey,
 	dsl_wrapping_key_hold(wkey, dck);
 	dck->dck_wkey = wkey;
 	dck->dck_obj = dckobj;
-	refcount_add(&dck->dck_holds, tag);
+	zfs_refcount_add(&dck->dck_holds, tag);
 
 	*dck_out = dck;
 	return (0);
@@ -641,7 +641,7 @@ spa_keystore_dsl_key_hold_impl(spa_t *spa, uint64_t dckobj, void *tag,
 	}
 
 	/* increment the refcount */
-	refcount_add(&found_dck->dck_holds, tag);
+	zfs_refcount_add(&found_dck->dck_holds, tag);
 
 	*dck_out = found_dck;
 	return (0);
@@ -970,9 +970,9 @@ spa_keystore_create_mapping_impl(spa_t *spa, uint64_t dsobj,
 	found_km = avl_find(&spa->spa_keystore.sk_key_mappings, km, &where);
 	if (found_km != NULL) {
 		should_free = B_TRUE;
-		refcount_add(&found_km->km_refcnt, tag);
+		zfs_refcount_add(&found_km->km_refcnt, tag);
 	} else {
-		refcount_add(&km->km_refcnt, tag);
+		zfs_refcount_add(&km->km_refcnt, tag);
 		avl_insert(&spa->spa_keystore.sk_key_mappings, km, where);
 	}
 
@@ -1072,7 +1072,7 @@ spa_keystore_lookup_key(spa_t *spa, uint64_t dsobj, void *tag,
 	}
 
 	if (found_km && tag)
-		refcount_add(&found_km->km_key->dck_holds, tag);
+		zfs_refcount_add(&found_km->km_key->dck_holds, tag);
 
 	rw_exit(&spa->spa_keystore.sk_km_lock);
 
diff --git a/module/zfs/dsl_dataset.c b/module/zfs/dsl_dataset.c
index b6e3b9a5c7f3..7546a0765c25 100644
--- a/module/zfs/dsl_dataset.c
+++ b/module/zfs/dsl_dataset.c
@@ -727,7 +727,7 @@ void
 dsl_dataset_long_hold(dsl_dataset_t *ds, void *tag)
 {
 	ASSERT(dsl_pool_config_held(ds->ds_dir->dd_pool));
-	(void) refcount_add(&ds->ds_longholds, tag);
+	(void) zfs_refcount_add(&ds->ds_longholds, tag);
 }
 
 void
diff --git a/module/zfs/dsl_scan.c b/module/zfs/dsl_scan.c
index f3c869538ce1..ee12185cdde5 100644
--- a/module/zfs/dsl_scan.c
+++ b/module/zfs/dsl_scan.c
@@ -273,7 +273,7 @@ struct dsl_scan_io_queue {
 
 /* private data for dsl_scan_prefetch_cb() */
 typedef struct scan_prefetch_ctx {
-	refcount_t spc_refcnt;		/* refcount for memory management */
+	zfs_refcount_t spc_refcnt;	/* refcount for memory management */
 	dsl_scan_t *spc_scn;		/* dsl_scan_t for the pool */
 	boolean_t spc_root;		/* is this prefetch for an objset? */
 	uint8_t spc_indblkshift;	/* dn_indblkshift of current dnode */
@@ -1327,7 +1327,7 @@ scan_prefetch_ctx_create(dsl_scan_t *scn, dnode_phys_t *dnp, void *tag)
 
 	spc = kmem_alloc(sizeof (scan_prefetch_ctx_t), KM_SLEEP);
 	refcount_create(&spc->spc_refcnt);
-	refcount_add(&spc->spc_refcnt, tag);
+	zfs_refcount_add(&spc->spc_refcnt, tag);
 	spc->spc_scn = scn;
 	if (dnp != NULL) {
 		spc->spc_datablkszsec = dnp->dn_datablkszsec;
@@ -1345,7 +1345,7 @@ scan_prefetch_ctx_create(dsl_scan_t *scn, dnode_phys_t *dnp, void *tag)
 static void
 scan_prefetch_ctx_add_ref(scan_prefetch_ctx_t *spc, void *tag)
 {
-	refcount_add(&spc->spc_refcnt, tag);
+	zfs_refcount_add(&spc->spc_refcnt, tag);
 }
 
 static boolean_t
diff --git a/module/zfs/metaslab.c b/module/zfs/metaslab.c
index ac361abb67ef..f657128d040c 100644
--- a/module/zfs/metaslab.c
+++ b/module/zfs/metaslab.c
@@ -247,7 +247,7 @@ metaslab_class_create(spa_t *spa, metaslab_ops_t *ops)
 	mc->mc_ops = ops;
 	mutex_init(&mc->mc_lock, NULL, MUTEX_DEFAULT, NULL);
 	mc->mc_alloc_slots = kmem_zalloc(spa->spa_alloc_count *
-	    sizeof (refcount_t), KM_SLEEP);
+	    sizeof (zfs_refcount_t), KM_SLEEP);
 	mc->mc_alloc_max_slots = kmem_zalloc(spa->spa_alloc_count *
 	    sizeof (uint64_t), KM_SLEEP);
 	for (int i = 0; i < spa->spa_alloc_count; i++)
@@ -268,7 +268,7 @@ metaslab_class_destroy(metaslab_class_t *mc)
 	for (int i = 0; i < mc->mc_spa->spa_alloc_count; i++)
 		refcount_destroy(&mc->mc_alloc_slots[i]);
 	kmem_free(mc->mc_alloc_slots, mc->mc_spa->spa_alloc_count *
-	    sizeof (refcount_t));
+	    sizeof (zfs_refcount_t));
 	kmem_free(mc->mc_alloc_max_slots, mc->mc_spa->spa_alloc_count *
 	    sizeof (uint64_t));
 	mutex_destroy(&mc->mc_lock);
@@ -648,8 +648,8 @@ metaslab_group_create(metaslab_class_t *mc, vdev_t *vd, int allocators)
 	mg->mg_no_free_space = B_TRUE;
 	mg->mg_allocators = allocators;
 
-	mg->mg_alloc_queue_depth = kmem_zalloc(allocators * sizeof (refcount_t),
-	    KM_SLEEP);
+	mg->mg_alloc_queue_depth = kmem_zalloc(allocators *
+	    sizeof (zfs_refcount_t), KM_SLEEP);
 	mg->mg_cur_max_alloc_queue_depth = kmem_zalloc(allocators *
 	    sizeof (uint64_t), KM_SLEEP);
 	for (int i = 0; i < allocators; i++) {
@@ -687,7 +687,7 @@ metaslab_group_destroy(metaslab_group_t *mg)
 		mg->mg_cur_max_alloc_queue_depth[i] = 0;
 	}
 	kmem_free(mg->mg_alloc_queue_depth, mg->mg_allocators *
-	    sizeof (refcount_t));
+	    sizeof (zfs_refcount_t));
 	kmem_free(mg->mg_cur_max_alloc_queue_depth, mg->mg_allocators *
 	    sizeof (uint64_t));
 
@@ -2905,7 +2905,7 @@ metaslab_group_alloc_increment(spa_t *spa, uint64_t vdev, void *tag, int flags,
 	if (!mg->mg_class->mc_alloc_throttle_enabled)
 		return;
 
-	(void) refcount_add(&mg->mg_alloc_queue_depth[allocator], tag);
+	(void) zfs_refcount_add(&mg->mg_alloc_queue_depth[allocator], tag);
 }
 
 static void
@@ -3852,7 +3852,7 @@ metaslab_class_throttle_reserve(metaslab_class_t *mc, int slots, int allocator,
 		 */
 		for (int d = 0; d < slots; d++) {
 			reserved_slots =
-			    refcount_add(&mc->mc_alloc_slots[allocator],
+			    zfs_refcount_add(&mc->mc_alloc_slots[allocator],
 			    zio);
 		}
 		zio->io_flags |= ZIO_FLAG_IO_ALLOCATING;
diff --git a/module/zfs/refcount.c b/module/zfs/refcount.c
index a151aceaecfb..13f9bb6b76e3 100644
--- a/module/zfs/refcount.c
+++ b/module/zfs/refcount.c
@@ -55,7 +55,7 @@ refcount_fini(void)
 }
 
 void
-refcount_create(refcount_t *rc)
+refcount_create(zfs_refcount_t *rc)
 {
 	mutex_init(&rc->rc_mtx, NULL, MUTEX_DEFAULT, NULL);
 	list_create(&rc->rc_list, sizeof (reference_t),
@@ -68,21 +68,21 @@ refcount_create(refcount_t *rc)
 }
 
 void
-refcount_create_tracked(refcount_t *rc)
+refcount_create_tracked(zfs_refcount_t *rc)
 {
 	refcount_create(rc);
 	rc->rc_tracked = B_TRUE;
 }
 
 void
-refcount_create_untracked(refcount_t *rc)
+refcount_create_untracked(zfs_refcount_t *rc)
 {
 	refcount_create(rc);
 	rc->rc_tracked = B_FALSE;
 }
 
 void
-refcount_destroy_many(refcount_t *rc, uint64_t number)
+refcount_destroy_many(zfs_refcount_t *rc, uint64_t number)
 {
 	reference_t *ref;
 
@@ -103,25 +103,25 @@ refcount_destroy_many(refcount_t *rc, uint64_t number)
 }
 
 void
-refcount_destroy(refcount_t *rc)
+refcount_destroy(zfs_refcount_t *rc)
 {
 	refcount_destroy_many(rc, 0);
 }
 
 int
-refcount_is_zero(refcount_t *rc)
+refcount_is_zero(zfs_refcount_t *rc)
 {
 	return (rc->rc_count == 0);
 }
 
 int64_t
-refcount_count(refcount_t *rc)
+refcount_count(zfs_refcount_t *rc)
 {
 	return (rc->rc_count);
 }
 
 int64_t
-refcount_add_many(refcount_t *rc, uint64_t number, void *holder)
+refcount_add_many(zfs_refcount_t *rc, uint64_t number, void *holder)
 {
 	reference_t *ref = NULL;
 	int64_t count;
@@ -143,13 +143,13 @@ refcount_add_many(refcount_t *rc, uint64_t number, void *holder)
 }
 
 int64_t
-zfs_refcount_add(refcount_t *rc, void *holder)
+zfs_refcount_add(zfs_refcount_t *rc, void *holder)
 {
 	return (refcount_add_many(rc, 1, holder));
 }
 
 int64_t
-refcount_remove_many(refcount_t *rc, uint64_t number, void *holder)
+refcount_remove_many(zfs_refcount_t *rc, uint64_t number, void *holder)
 {
 	reference_t *ref;
 	int64_t count;
@@ -197,13 +197,13 @@ refcount_remove_many(refcount_t *rc, uint64_t number, void *holder)
 }
 
 int64_t
-refcount_remove(refcount_t *rc, void *holder)
+refcount_remove(zfs_refcount_t *rc, void *holder)
 {
 	return (refcount_remove_many(rc, 1, holder));
 }
 
 void
-refcount_transfer(refcount_t *dst, refcount_t *src)
+refcount_transfer(zfs_refcount_t *dst, zfs_refcount_t *src)
 {
 	int64_t count, removed_count;
 	list_t list, removed;
@@ -234,7 +234,7 @@ refcount_transfer(refcount_t *dst, refcount_t *src)
 }
 
 void
-refcount_transfer_ownership(refcount_t *rc, void *current_holder,
+refcount_transfer_ownership(zfs_refcount_t *rc, void *current_holder,
     void *new_holder)
 {
 	reference_t *ref;
@@ -264,7 +264,7 @@ refcount_transfer_ownership(refcount_t *rc, void *current_holder,
  * might be held.
  */
 boolean_t
-refcount_held(refcount_t *rc, void *holder)
+refcount_held(zfs_refcount_t *rc, void *holder)
 {
 	reference_t *ref;
 
@@ -292,7 +292,7 @@ refcount_held(refcount_t *rc, void *holder)
  * since the reference might not be held.
  */
 boolean_t
-refcount_not_held(refcount_t *rc, void *holder)
+refcount_not_held(zfs_refcount_t *rc, void *holder)
 {
 	reference_t *ref;
 
diff --git a/module/zfs/rrwlock.c b/module/zfs/rrwlock.c
index 704f76067bf0..effff3305224 100644
--- a/module/zfs/rrwlock.c
+++ b/module/zfs/rrwlock.c
@@ -183,9 +183,9 @@ rrw_enter_read_impl(rrwlock_t *rrl, boolean_t prio, void *tag)
 	if (rrl->rr_writer_wanted || rrl->rr_track_all) {
 		/* may or may not be a re-entrant enter */
 		rrn_add(rrl, tag);
-		(void) refcount_add(&rrl->rr_linked_rcount, tag);
+		(void) zfs_refcount_add(&rrl->rr_linked_rcount, tag);
 	} else {
-		(void) refcount_add(&rrl->rr_anon_rcount, tag);
+		(void) zfs_refcount_add(&rrl->rr_anon_rcount, tag);
 	}
 	ASSERT(rrl->rr_writer == NULL);
 	mutex_exit(&rrl->rr_lock);
diff --git a/module/zfs/sa.c b/module/zfs/sa.c
index caa91bc4c4e1..0856a4b8ff74 100644
--- a/module/zfs/sa.c
+++ b/module/zfs/sa.c
@@ -1347,7 +1347,7 @@ sa_idx_tab_hold(objset_t *os, sa_idx_tab_t *idx_tab)
 	ASSERTV(sa_os_t *sa = os->os_sa);
 
 	ASSERT(MUTEX_HELD(&sa->sa_lock));
-	(void) refcount_add(&idx_tab->sa_refcount, NULL);
+	(void) zfs_refcount_add(&idx_tab->sa_refcount, NULL);
 }
 
 void
diff --git a/module/zfs/spa_misc.c b/module/zfs/spa_misc.c
index 343b01dd6aac..c19f48ac5264 100644
--- a/module/zfs/spa_misc.c
+++ b/module/zfs/spa_misc.c
@@ -81,7 +81,7 @@
  *	definition they must have an existing reference, and will never need
  *	to lookup a spa_t by name.
  *
- * spa_refcount (per-spa refcount_t protected by mutex)
+ * spa_refcount (per-spa zfs_refcount_t protected by mutex)
  *
  *	This reference count keep track of any active users of the spa_t.  The
  *	spa_t cannot be destroyed or freed while this is non-zero.  Internally,
@@ -478,7 +478,7 @@ spa_config_tryenter(spa_t *spa, int locks, void *tag, krw_t rw)
 			}
 			scl->scl_writer = curthread;
 		}
-		(void) refcount_add(&scl->scl_count, tag);
+		(void) zfs_refcount_add(&scl->scl_count, tag);
 		mutex_exit(&scl->scl_lock);
 	}
 	return (1);
@@ -511,7 +511,7 @@ spa_config_enter(spa_t *spa, int locks, void *tag, krw_t rw)
 			}
 			scl->scl_writer = curthread;
 		}
-		(void) refcount_add(&scl->scl_count, tag);
+		(void) zfs_refcount_add(&scl->scl_count, tag);
 		mutex_exit(&scl->scl_lock);
 	}
 	ASSERT3U(wlocks_held, <=, locks);
@@ -841,7 +841,7 @@ spa_open_ref(spa_t *spa, void *tag)
 {
 	ASSERT(refcount_count(&spa->spa_refcount) >= spa->spa_minref ||
 	    MUTEX_HELD(&spa_namespace_lock));
-	(void) refcount_add(&spa->spa_refcount, tag);
+	(void) zfs_refcount_add(&spa->spa_refcount, tag);
 }
 
 /*
diff --git a/module/zfs/zfs_ctldir.c b/module/zfs/zfs_ctldir.c
index f5cfdb55d797..6a7b7bbb5e71 100644
--- a/module/zfs/zfs_ctldir.c
+++ b/module/zfs/zfs_ctldir.c
@@ -117,7 +117,7 @@ typedef struct {
 	taskqid_t	se_taskqid;	/* scheduled unmount taskqid */
 	avl_node_t	se_node_name;	/* zfs_snapshots_by_name link */
 	avl_node_t	se_node_objsetid; /* zfs_snapshots_by_objsetid link */
-	refcount_t	se_refcount;	/* reference count */
+	zfs_refcount_t	se_refcount;	/* reference count */
 } zfs_snapentry_t;
 
 static void zfsctl_snapshot_unmount_delay_impl(zfs_snapentry_t *se, int delay);
@@ -166,7 +166,7 @@ zfsctl_snapshot_free(zfs_snapentry_t *se)
 static void
 zfsctl_snapshot_hold(zfs_snapentry_t *se)
 {
-	refcount_add(&se->se_refcount, NULL);
+	zfs_refcount_add(&se->se_refcount, NULL);
 }
 
 /*
@@ -189,7 +189,7 @@ static void
 zfsctl_snapshot_add(zfs_snapentry_t *se)
 {
 	ASSERT(RW_WRITE_HELD(&zfs_snapshot_lock));
-	refcount_add(&se->se_refcount, NULL);
+	zfs_refcount_add(&se->se_refcount, NULL);
 	avl_add(&zfs_snapshots_by_name, se);
 	avl_add(&zfs_snapshots_by_objsetid, se);
 }
@@ -266,7 +266,7 @@ zfsctl_snapshot_find_by_name(char *snapname)
 	search.se_name = snapname;
 	se = avl_find(&zfs_snapshots_by_name, &search, NULL);
 	if (se)
-		refcount_add(&se->se_refcount, NULL);
+		zfs_refcount_add(&se->se_refcount, NULL);
 
 	return (se);
 }
@@ -287,7 +287,7 @@ zfsctl_snapshot_find_by_objsetid(spa_t *spa, uint64_t objsetid)
 	search.se_objsetid = objsetid;
 	se = avl_find(&zfs_snapshots_by_objsetid, &search, NULL);
 	if (se)
-		refcount_add(&se->se_refcount, NULL);
+		zfs_refcount_add(&se->se_refcount, NULL);
 
 	return (se);
 }
diff --git a/module/zfs/zfs_znode.c b/module/zfs/zfs_znode.c
index f037d4c967cd..a49914762897 100644
--- a/module/zfs/zfs_znode.c
+++ b/module/zfs/zfs_znode.c
@@ -264,7 +264,7 @@ zfs_znode_hold_enter(zfsvfs_t *zfsvfs, uint64_t obj)
 		ASSERT3U(zh->zh_obj, ==, obj);
 		found = B_TRUE;
 	}
-	refcount_add(&zh->zh_refcount, NULL);
+	zfs_refcount_add(&zh->zh_refcount, NULL);
 	mutex_exit(&zfsvfs->z_hold_locks[i]);
 
 	if (found == B_TRUE)