Handle refcount(9) wraparound.

Attempt to mitigate the security risks around refcount overflows by
introducing a "saturated" state for the counter.  Once a counter reaches
INT_MAX+1, subsequent acquire and release operations will blindly set
the counter value to INT_MAX + INT_MAX/2, ensuring that the protected
resource will not be freed; instead, it will merely be leaked.

The approach introduces a small race: if a refcount value reaches
INT_MAX+1, a subsequent release will cause the releasing thread to set
the counter to the saturation value after performing the decrement.  If
in the intervening window INT_MAX refcount releases are performed by a
different thread, a use-after-free is possible.  This is very difficult
to trigger in practice, and any situation where it could be triggered
would likely be vulnerable to reference count wraparound problems
to begin with.  An alternative would be to use atomic_cmpset to acquire
and release references, but this would introduce a larger performance
penalty, particularly when the counter is contended.

Note that refcount_acquire_checked(9) maintains its previous behaviour;
code which must accurately track references should use it instead of
refcount_acquire(9).

Reviewed by:	kib, mjg
MFC after:	3 weeks
Sponsored by:	The FreeBSD Foundation
Differential Revision:	https://reviews.freebsd.org/D21089
This commit is contained in:
Mark Johnston 2019-07-30 15:57:31 +00:00
parent ecb12757ba
commit 0b21d89499

View File

@ -30,7 +30,6 @@
#ifndef __SYS_REFCOUNT_H__
#define __SYS_REFCOUNT_H__
#include <sys/limits.h>
#include <machine/atomic.h>
#ifdef _KERNEL
@ -40,19 +39,41 @@
#define KASSERT(exp, msg) /* */
#endif
#define REFCOUNT_SATURATED(val) (((val) & (1U << 31)) != 0)
#define REFCOUNT_SATURATION_VALUE (3U << 30)
/*
* Attempt to handle reference count overflow and underflow. Force the counter
* to stay at the saturation value so that a counter overflow cannot trigger
* destruction of the containing object and instead leads to a less harmful
* memory leak.
*/
static __inline void
_refcount_update_saturated(volatile u_int *count)
{
#ifdef INVARIANTS
panic("refcount %p wraparound", count);
#else
atomic_store_int(count, REFCOUNT_SATURATION_VALUE);
#endif
}
static __inline void
refcount_init(volatile u_int *count, u_int value)
{
KASSERT(!REFCOUNT_SATURATED(value),
("invalid initial refcount value %u", value));
*count = value;
}
static __inline void
refcount_acquire(volatile u_int *count)
{
u_int old;
KASSERT(*count < UINT_MAX, ("refcount %p overflowed", count));
atomic_add_int(count, 1);
old = atomic_fetchadd_int(count, 1);
if (__predict_false(REFCOUNT_SATURATED(old)))
_refcount_update_saturated(count);
}
static __inline __result_use_check bool
@ -61,7 +82,7 @@ refcount_acquire_checked(volatile u_int *count)
u_int lcount;
for (lcount = *count;;) {
if (__predict_false(lcount + 1 < lcount))
if (__predict_false(REFCOUNT_SATURATED(lcount + 1)))
return (false);
if (__predict_true(atomic_fcmpset_int(count, &lcount,
lcount + 1) == 1))
@ -76,7 +97,15 @@ refcount_release(volatile u_int *count)
atomic_thread_fence_rel();
old = atomic_fetchadd_int(count, -1);
KASSERT(old > 0, ("refcount %p is zero", count));
if (__predict_false(old == 0 || REFCOUNT_SATURATED(old))) {
/*
* Avoid multiple destructor invocations if underflow occurred.
* This is not perfect since the memory backing the containing
* object may already have been reallocated.
*/
_refcount_update_saturated(count);
return (false);
}
if (old > 1)
return (false);
@ -84,7 +113,7 @@ refcount_release(volatile u_int *count)
* Last reference. Signal the user to call the destructor.
*
* Ensure that the destructor sees all updates. The fence_rel
* at the start of the function synchronized with this fence.
* at the start of the function synchronizes with this fence.
*/
atomic_thread_fence_acq();
return (true);
@ -101,9 +130,10 @@ refcount_acquire_if_not_zero(volatile u_int *count)
old = *count;
for (;;) {
KASSERT(old < UINT_MAX, ("refcount %p overflowed", count));
if (old == 0)
return (false);
if (__predict_false(REFCOUNT_SATURATED(old)))
return (true);
if (atomic_fcmpset_int(count, &old, old + 1))
return (true);
}
@ -116,9 +146,10 @@ refcount_release_if_not_last(volatile u_int *count)
old = *count;
for (;;) {
KASSERT(old > 0, ("refcount %p is zero", count));
if (old == 1)
return (false);
if (__predict_false(REFCOUNT_SATURATED(old)))
return (true);
if (atomic_fcmpset_int(count, &old, old - 1))
return (true);
}