Remove refcount from spa_config_*()

The only reason for spa_config_*() to use refcount instead of simple
non-atomic (thanks to scl_lock) variable for scl_count is tracking,
hard disabled for the last 8 years.  Switch to simple int scl_count
reduces the lock hold time by avoiding atomic, plus makes structure
fit into single cache line, reducing the locks contention.

Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
Reviewed-by: Matthew Ahrens <mahrens@delphix.com>
Reviewed-by: Mark Maybee <mark.maybee@delphix.com>
Signed-off-by: Alexander Motin <mav@FreeBSD.org>
Sponsored-By: iXsystems, Inc.
Closes #12287
This commit is contained in:
Alexander Motin 2021-07-01 11:16:54 -04:00 committed by GitHub
parent cfc564f9b1
commit 42afb12da7
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 11 additions and 12 deletions

View File

@ -141,9 +141,9 @@ typedef struct spa_config_lock {
kmutex_t scl_lock;
kthread_t *scl_writer;
int scl_write_wanted;
int scl_count;
kcondvar_t scl_cv;
zfs_refcount_t scl_count;
} spa_config_lock_t;
} ____cacheline_aligned spa_config_lock_t;
typedef struct spa_config_dirent {
list_node_t scd_link;

View File

@ -444,9 +444,9 @@ spa_config_lock_init(spa_t *spa)
spa_config_lock_t *scl = &spa->spa_config_lock[i];
mutex_init(&scl->scl_lock, NULL, MUTEX_DEFAULT, NULL);
cv_init(&scl->scl_cv, NULL, CV_DEFAULT, NULL);
zfs_refcount_create_untracked(&scl->scl_count);
scl->scl_writer = NULL;
scl->scl_write_wanted = 0;
scl->scl_count = 0;
}
}
@ -457,9 +457,9 @@ spa_config_lock_destroy(spa_t *spa)
spa_config_lock_t *scl = &spa->spa_config_lock[i];
mutex_destroy(&scl->scl_lock);
cv_destroy(&scl->scl_cv);
zfs_refcount_destroy(&scl->scl_count);
ASSERT(scl->scl_writer == NULL);
ASSERT(scl->scl_write_wanted == 0);
ASSERT(scl->scl_count == 0);
}
}
@ -480,7 +480,7 @@ spa_config_tryenter(spa_t *spa, int locks, void *tag, krw_t rw)
}
} else {
ASSERT(scl->scl_writer != curthread);
if (!zfs_refcount_is_zero(&scl->scl_count)) {
if (scl->scl_count != 0) {
mutex_exit(&scl->scl_lock);
spa_config_exit(spa, locks & ((1 << i) - 1),
tag);
@ -488,7 +488,7 @@ spa_config_tryenter(spa_t *spa, int locks, void *tag, krw_t rw)
}
scl->scl_writer = curthread;
}
(void) zfs_refcount_add(&scl->scl_count, tag);
scl->scl_count++;
mutex_exit(&scl->scl_lock);
}
return (1);
@ -514,14 +514,14 @@ spa_config_enter(spa_t *spa, int locks, const void *tag, krw_t rw)
}
} else {
ASSERT(scl->scl_writer != curthread);
while (!zfs_refcount_is_zero(&scl->scl_count)) {
while (scl->scl_count != 0) {
scl->scl_write_wanted++;
cv_wait(&scl->scl_cv, &scl->scl_lock);
scl->scl_write_wanted--;
}
scl->scl_writer = curthread;
}
(void) zfs_refcount_add(&scl->scl_count, tag);
scl->scl_count++;
mutex_exit(&scl->scl_lock);
}
ASSERT3U(wlocks_held, <=, locks);
@ -535,8 +535,8 @@ spa_config_exit(spa_t *spa, int locks, const void *tag)
if (!(locks & (1 << i)))
continue;
mutex_enter(&scl->scl_lock);
ASSERT(!zfs_refcount_is_zero(&scl->scl_count));
if (zfs_refcount_remove(&scl->scl_count, tag) == 0) {
ASSERT(scl->scl_count > 0);
if (--scl->scl_count == 0) {
ASSERT(scl->scl_writer == NULL ||
scl->scl_writer == curthread);
scl->scl_writer = NULL; /* OK in either case */
@ -555,8 +555,7 @@ spa_config_held(spa_t *spa, int locks, krw_t rw)
spa_config_lock_t *scl = &spa->spa_config_lock[i];
if (!(locks & (1 << i)))
continue;
if ((rw == RW_READER &&
!zfs_refcount_is_zero(&scl->scl_count)) ||
if ((rw == RW_READER && scl->scl_count != 0) ||
(rw == RW_WRITER && scl->scl_writer == curthread))
locks_held |= 1 << i;
}