Add a blocking wait bit to refcount. This allows refs to be used as a simple

barrier.

Reviewed by:	markj, kib
Discussed with:	jhb
Sponsored by:	Netflix
Differential Revision:	https://reviews.freebsd.org/D21254
This commit is contained in:
Jeff Roberson 2019-08-18 11:43:58 +00:00
parent 38b7749a82
commit 33205c60e7
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=351188
2 changed files with 115 additions and 25 deletions

View File

@ -52,6 +52,7 @@ __FBSDID("$FreeBSD$");
#include <sys/mutex.h>
#include <sys/proc.h>
#include <sys/resourcevar.h>
#include <sys/refcount.h>
#include <sys/sched.h>
#include <sys/sdt.h>
#include <sys/signalvar.h>
@ -333,6 +334,75 @@ pause_sbt(const char *wmesg, sbintime_t sbt, sbintime_t pr, int flags)
(flags & C_CATCH) ? PCATCH : 0, wmesg, sbt, pr, flags));
}
/*
* Potentially release the last reference for refcount. Check for
* unlikely conditions and signal the caller as to whether it was
* the final ref.
*/
bool
refcount_release_last(volatile u_int *count, u_int n, u_int old)
{
u_int waiter;
waiter = old & REFCOUNT_WAITER;
old = REFCOUNT_COUNT(old);
if (__predict_false(n > old || REFCOUNT_SATURATED(old))) {
/*
* Avoid multiple destructor invocations if underflow occurred.
* This is not perfect since the memory backing the containing
* object may already have been reallocated.
*/
_refcount_update_saturated(count);
return (false);
}
/*
* Attempt to atomically clear the waiter bit. Wakeup waiters
* if we are successful.
*/
if (waiter != 0 && atomic_cmpset_int(count, REFCOUNT_WAITER, 0))
wakeup(__DEVOLATILE(u_int *, count));
/*
* Last reference. Signal the user to call the destructor.
*
* Ensure that the destructor sees all updates. The fence_rel
* at the start of refcount_releasen synchronizes with this fence.
*/
atomic_thread_fence_acq();
return (true);
}
/*
* Wait for a refcount wakeup. This does not guarantee that the ref is still
* zero on return and may be subject to transient wakeups. Callers wanting
* a precise answer should use refcount_wait().
*/
void
refcount_sleep(volatile u_int *count, const char *wmesg, int pri)
{
void *wchan;
u_int old;
if (REFCOUNT_COUNT(*count) == 0)
return;
wchan = __DEVOLATILE(void *, count);
sleepq_lock(wchan);
old = *count;
for (;;) {
if (REFCOUNT_COUNT(old) == 0) {
sleepq_release(wchan);
return;
}
if (old & REFCOUNT_WAITER)
break;
if (atomic_fcmpset_int(count, &old, old | REFCOUNT_WAITER))
break;
}
sleepq_add(wchan, NULL, wmesg, 0, 0);
sleepq_wait(wchan, pri);
}
/*
* Make all threads sleeping on the specified identifier runnable.
*/

View File

@ -39,8 +39,14 @@
#define KASSERT(exp, msg) /* */
#endif
#define REFCOUNT_SATURATED(val) (((val) & (1U << 31)) != 0)
#define REFCOUNT_SATURATION_VALUE (3U << 30)
#define REFCOUNT_WAITER (1 << 31) /* Refcount has waiter. */
#define REFCOUNT_SATURATION_VALUE (3U << 29)
#define REFCOUNT_SATURATED(val) (((val) & (1U << 30)) != 0)
#define REFCOUNT_COUNT(x) ((x) & ~REFCOUNT_WAITER)
bool refcount_release_last(volatile u_int *count, u_int n, u_int old);
void refcount_sleep(volatile u_int *count, const char *wmesg, int prio);
/*
* Attempt to handle reference count overflow and underflow. Force the counter
@ -76,6 +82,19 @@ refcount_acquire(volatile u_int *count)
_refcount_update_saturated(count);
}
static __inline void
refcount_acquiren(volatile u_int *count, u_int n)
{
u_int old;
KASSERT(n < REFCOUNT_SATURATION_VALUE / 2,
("refcount_acquiren: n %d too large", n));
old = atomic_fetchadd_int(count, n);
if (__predict_false(REFCOUNT_SATURATED(old)))
_refcount_update_saturated(count);
}
static __inline __result_use_check bool
refcount_acquire_checked(volatile u_int *count)
{
@ -91,32 +110,33 @@ refcount_acquire_checked(volatile u_int *count)
}
static __inline bool
refcount_release(volatile u_int *count)
refcount_releasen(volatile u_int *count, u_int n)
{
u_int old;
KASSERT(n < REFCOUNT_SATURATION_VALUE / 2,
("refcount_releasen: n %d too large", n));
atomic_thread_fence_rel();
old = atomic_fetchadd_int(count, -1);
if (__predict_false(old == 0 || REFCOUNT_SATURATED(old))) {
/*
* Avoid multiple destructor invocations if underflow occurred.
* This is not perfect since the memory backing the containing
* object may already have been reallocated.
*/
_refcount_update_saturated(count);
return (false);
}
if (old > 1)
return (false);
old = atomic_fetchadd_int(count, -n);
if (__predict_false(n >= REFCOUNT_COUNT(old) ||
REFCOUNT_SATURATED(old)))
return (refcount_release_last(count, n, old));
return (false);
}
/*
* Last reference. Signal the user to call the destructor.
*
* Ensure that the destructor sees all updates. The fence_rel
* at the start of the function synchronizes with this fence.
*/
atomic_thread_fence_acq();
return (true);
static __inline bool
refcount_release(volatile u_int *count)
{
return (refcount_releasen(count, 1));
}
static __inline void
refcount_wait(volatile u_int *count, const char *wmesg, int prio)
{
while (*count != 0)
refcount_sleep(count, wmesg, prio);
}
/*
@ -130,7 +150,7 @@ refcount_acquire_if_not_zero(volatile u_int *count)
old = *count;
for (;;) {
if (old == 0)
if (REFCOUNT_COUNT(old) == 0)
return (false);
if (__predict_false(REFCOUNT_SATURATED(old)))
return (true);
@ -146,7 +166,7 @@ refcount_release_if_not_last(volatile u_int *count)
old = *count;
for (;;) {
if (old == 1)
if (REFCOUNT_COUNT(old) == 1)
return (false);
if (__predict_false(REFCOUNT_SATURATED(old)))
return (true);