refcount(9): Add refcount_release_if_last() and refcount_load()
The former is intended for use in vmspace_exit(). The latter is to encourage use of explicit loads rather than relying on the volatile qualifier. This works better with kernel sanitizers, which can intercept atomic(9) calls, and makes tricky lockless code easier to read by not forcing the reader to remember which variables are declared volatile. Reviewed by: kib, mjg, mmel MFC after: 2 weeks Sponsored by: The FreeBSD Foundation Differential Revision: https://reviews.freebsd.org/D27056
This commit is contained in:
parent
d3231fbdd2
commit
e89004612a
@ -32,7 +32,7 @@
|
||||
.\"
|
||||
.\" $FreeBSD$
|
||||
.\"
|
||||
.Dd July 23, 2019
|
||||
.Dd November 2, 2020
|
||||
.Dt REFCOUNT 9
|
||||
.Os
|
||||
.Sh NAME
|
||||
@ -46,6 +46,8 @@
|
||||
.In sys/refcount.h
|
||||
.Ft void
|
||||
.Fn refcount_init "volatile u_int *count" "u_int value"
|
||||
.Ft u_int
|
||||
.Fn refcount_load "volatile u_int *count"
|
||||
.Ft void
|
||||
.Fn refcount_acquire "volatile u_int *count"
|
||||
.Ft bool
|
||||
@ -55,6 +57,8 @@
|
||||
.Ft bool
|
||||
.Fn refcount_release "volatile u_int *count"
|
||||
.Ft bool
|
||||
.Fn refcount_release_if_last "volatile u_int *count"
|
||||
.Ft bool
|
||||
.Fn refcount_release_if_not_last "volatile u_int *count"
|
||||
.Sh DESCRIPTION
|
||||
The
|
||||
@ -75,6 +79,16 @@ function is used to set the initial value of the counter to
|
||||
It is normally used when creating a reference-counted object.
|
||||
.Pp
|
||||
The
|
||||
.Fn refcount_load
|
||||
function returns a snapshot of the counter value.
|
||||
This value may immediately become out-of-date in the absence of external
|
||||
synchronization.
|
||||
.Fn refcount_load
|
||||
should be used instead of relying on the properties of the
|
||||
.Vt volatile
|
||||
qualifier.
|
||||
.Pp
|
||||
The
|
||||
.Fn refcount_acquire
|
||||
function is used to acquire a new reference.
|
||||
The caller is responsible for ensuring that it holds a valid reference
|
||||
@ -119,16 +133,33 @@ the last reference;
|
||||
otherwise, it returns false.
|
||||
.Pp
|
||||
The
|
||||
.Fn refcount_release_if_last
|
||||
and
|
||||
.Fn refcount_release_if_not_last
|
||||
is a variant of
|
||||
functions are variants of
|
||||
.Fn refcount_release
|
||||
which only drops the reference when it is not the last reference.
|
||||
In other words, the function returns
|
||||
which only drop the reference when it is or is not the last reference,
|
||||
respectively.
|
||||
In other words,
|
||||
.Fn refcount_release_if_last
|
||||
returns
|
||||
.Dv true
|
||||
when
|
||||
.Fa *count
|
||||
is equal to one, in which case it is decremented to zero.
|
||||
Otherwise,
|
||||
.Fa *count
|
||||
is not modified and the function returns
|
||||
.Dv false .
|
||||
Similarly,
|
||||
.Fn refcount_release_if_not_last
|
||||
returns
|
||||
.Dv true
|
||||
when
|
||||
.Fa *count
|
||||
is greater than one, in which case
|
||||
.Fa *count is decremented.
|
||||
.Fa *count
|
||||
is decremented.
|
||||
Otherwise, if
|
||||
.Fa *count
|
||||
is equal to one, the reference is not released and the function returns
|
||||
|
@ -66,6 +66,12 @@ refcount_init(volatile u_int *count, u_int value)
|
||||
atomic_store_int(count, value);
|
||||
}
|
||||
|
||||
static __inline u_int
|
||||
refcount_load(volatile u_int *count)
|
||||
{
|
||||
return (atomic_load_int(count));
|
||||
}
|
||||
|
||||
static __inline u_int
|
||||
refcount_acquire(volatile u_int *count)
|
||||
{
|
||||
@ -168,32 +174,50 @@ refcount_release(volatile u_int *count)
|
||||
return (refcount_releasen(count, 1));
|
||||
}
|
||||
|
||||
#define _refcount_release_if_cond(cond, name) \
|
||||
static __inline __result_use_check bool \
|
||||
_refcount_release_if_##name(volatile u_int *count, u_int n) \
|
||||
{ \
|
||||
u_int old; \
|
||||
\
|
||||
KASSERT(n > 0, ("%s: zero increment", __func__)); \
|
||||
old = atomic_load_int(count); \
|
||||
for (;;) { \
|
||||
if (!(cond)) \
|
||||
return (false); \
|
||||
if (__predict_false(REFCOUNT_SATURATED(old))) \
|
||||
return (false); \
|
||||
if (atomic_fcmpset_rel_int(count, &old, old - 1)) \
|
||||
return (true); \
|
||||
} \
|
||||
}
|
||||
_refcount_release_if_cond(old > n, gt)
|
||||
_refcount_release_if_cond(old == n, eq)
|
||||
|
||||
static __inline __result_use_check bool
|
||||
refcount_release_if_gt(volatile u_int *count, u_int n)
|
||||
{
|
||||
u_int old;
|
||||
|
||||
KASSERT(n > 0,
|
||||
("refcount_release_if_gt: Use refcount_release for final ref"));
|
||||
old = atomic_load_int(count);
|
||||
for (;;) {
|
||||
if (old <= n)
|
||||
return (false);
|
||||
if (__predict_false(REFCOUNT_SATURATED(old)))
|
||||
return (true);
|
||||
/*
|
||||
* Paired with acquire fence in refcount_releasen().
|
||||
*/
|
||||
if (atomic_fcmpset_rel_int(count, &old, old - 1))
|
||||
return (true);
|
||||
return (_refcount_release_if_gt(count, n));
|
||||
}
|
||||
|
||||
static __inline __result_use_check bool
|
||||
refcount_release_if_last(volatile u_int *count)
|
||||
{
|
||||
|
||||
if (_refcount_release_if_eq(count, 1)) {
|
||||
/* See the comment in refcount_releasen(). */
|
||||
atomic_thread_fence_acq();
|
||||
return (true);
|
||||
}
|
||||
return (false);
|
||||
}
|
||||
|
||||
static __inline __result_use_check bool
|
||||
refcount_release_if_not_last(volatile u_int *count)
|
||||
{
|
||||
|
||||
return (refcount_release_if_gt(count, 1));
|
||||
return (_refcount_release_if_gt(count, 1));
|
||||
}
|
||||
|
||||
#endif /* !__SYS_REFCOUNT_H__ */
|
||||
|
Loading…
Reference in New Issue
Block a user