Use atomics in more cases for object references. We now can completely

omit the object lock if we are above a certain threshold.  Hold only a
single vnode reference when the vnode object has any ref > 0.  This
allows us to only lock the object and vnode on 0-1 and 1-0 transitions.

Differential Revision:	https://reviews.freebsd.org/D22452
This commit is contained in:
Jeff Roberson 2019-11-27 00:39:23 +00:00
parent beb8beef81
commit a67d540832
3 changed files with 67 additions and 43 deletions

View File

@ -72,7 +72,7 @@ refcount_init(volatile u_int *count, u_int value)
*count = value;
}
static __inline void
static __inline u_int
refcount_acquire(volatile u_int *count)
{
u_int old;
@ -80,9 +80,11 @@ refcount_acquire(volatile u_int *count)
old = atomic_fetchadd_int(count, 1);
if (__predict_false(REFCOUNT_SATURATED(old)))
_refcount_update_saturated(count);
return (old);
}
static __inline void
static __inline u_int
refcount_acquiren(volatile u_int *count, u_int n)
{
u_int old;
@ -92,6 +94,8 @@ refcount_acquiren(volatile u_int *count, u_int n)
old = atomic_fetchadd_int(count, n);
if (__predict_false(REFCOUNT_SATURATED(old)))
_refcount_update_saturated(count);
return (old);
}
static __inline __result_use_check bool
@ -144,13 +148,13 @@ refcount_wait(volatile u_int *count, const char *wmesg, int prio)
* incremented. Else zero is returned.
*/
static __inline __result_use_check bool
refcount_acquire_if_not_zero(volatile u_int *count)
refcount_acquire_if_gt(volatile u_int *count, u_int n)
{
u_int old;
old = *count;
for (;;) {
if (REFCOUNT_COUNT(old) == 0)
if (REFCOUNT_COUNT(old) <= n)
return (false);
if (__predict_false(REFCOUNT_SATURATED(old)))
return (true);
@ -160,19 +164,10 @@ refcount_acquire_if_not_zero(volatile u_int *count)
}
static __inline __result_use_check bool
refcount_release_if_not_last(volatile u_int *count)
refcount_acquire_if_not_zero(volatile u_int *count)
{
u_int old;
old = *count;
for (;;) {
if (REFCOUNT_COUNT(old) == 1)
return (false);
if (__predict_false(REFCOUNT_SATURATED(old)))
return (true);
if (atomic_fcmpset_int(count, &old, old - 1))
return (true);
}
return refcount_acquire_if_gt(count, 0);
}
static __inline __result_use_check bool
@ -193,4 +188,10 @@ refcount_release_if_gt(volatile u_int *count, u_int n)
}
}
static __inline __result_use_check bool
refcount_release_if_not_last(volatile u_int *count)
{
return refcount_release_if_gt(count, 1);
}
#endif /* ! __SYS_REFCOUNT_H__ */

View File

@ -468,11 +468,28 @@ vm_object_allocate_anon(vm_pindex_t size)
void
vm_object_reference(vm_object_t object)
{
struct vnode *vp;
u_int old;
if (object == NULL)
return;
VM_OBJECT_RLOCK(object);
vm_object_reference_locked(object);
VM_OBJECT_RUNLOCK(object);
/*
* Many places assume exclusive access to objects with a single
* ref. vm_object_collapse() in particular will directly mainpulate
* references for objects in this state. vnode objects only need
* the lock for the first ref to reference the vnode.
*/
if (!refcount_acquire_if_gt(&object->ref_count,
object->type == OBJT_VNODE ? 0 : 1)) {
VM_OBJECT_RLOCK(object);
old = refcount_acquire(&object->ref_count);
if (object->type == OBJT_VNODE && old == 0) {
vp = object->handle;
vref(vp);
}
VM_OBJECT_RUNLOCK(object);
}
}
/*
@ -486,10 +503,11 @@ void
vm_object_reference_locked(vm_object_t object)
{
struct vnode *vp;
u_int old;
VM_OBJECT_ASSERT_LOCKED(object);
refcount_acquire(&object->ref_count);
if (object->type == OBJT_VNODE) {
old = refcount_acquire(&object->ref_count);
if (object->type == OBJT_VNODE && old == 0) {
vp = object->handle;
vref(vp);
}
@ -507,11 +525,10 @@ vm_object_vndeallocate(vm_object_t object)
("vm_object_vndeallocate: not a vnode object"));
KASSERT(vp != NULL, ("vm_object_vndeallocate: missing vp"));
if (refcount_release(&object->ref_count) &&
!umtx_shm_vnobj_persistent)
if (!umtx_shm_vnobj_persistent)
umtx_shm_object_terminated(object);
VM_OBJECT_RUNLOCK(object);
VM_OBJECT_WUNLOCK(object);
/* vrele may need the vnode lock. */
vrele(vp);
}
@ -531,15 +548,9 @@ void
vm_object_deallocate(vm_object_t object)
{
vm_object_t robject, temp;
bool released;
bool last, released;
while (object != NULL) {
VM_OBJECT_RLOCK(object);
if (object->type == OBJT_VNODE) {
vm_object_vndeallocate(object);
return;
}
/*
* If the reference count goes to 0 we start calling
* vm_object_terminate() on the object chain. A ref count
@ -551,7 +562,6 @@ vm_object_deallocate(vm_object_t object)
released = refcount_release_if_gt(&object->ref_count, 1);
else
released = refcount_release_if_gt(&object->ref_count, 2);
VM_OBJECT_RUNLOCK(object);
if (released)
return;
@ -559,7 +569,14 @@ vm_object_deallocate(vm_object_t object)
KASSERT(object->ref_count != 0,
("vm_object_deallocate: object deallocated too many times: %d", object->type));
refcount_release(&object->ref_count);
last = refcount_release(&object->ref_count);
if (object->type == OBJT_VNODE) {
if (last)
vm_object_vndeallocate(object);
else
VM_OBJECT_WUNLOCK(object);
return;
}
if (object->ref_count > 1) {
VM_OBJECT_WUNLOCK(object);
return;
@ -629,7 +646,7 @@ vm_object_deallocate(vm_object_t object)
VM_OBJECT_WUNLOCK(object);
if (robject->ref_count == 1) {
robject->ref_count--;
refcount_release(&robject->ref_count);
object = robject;
goto doterm;
}
@ -1838,7 +1855,7 @@ vm_object_collapse(vm_object_t object)
backing_object));
vm_object_pip_wakeup(backing_object);
backing_object->type = OBJT_DEAD;
backing_object->ref_count = 0;
refcount_release(&backing_object->ref_count);
VM_OBJECT_WUNLOCK(backing_object);
vm_object_destroy(backing_object);

View File

@ -150,6 +150,7 @@ vnode_create_vobject(struct vnode *vp, off_t isize, struct thread *td)
vm_object_t object;
vm_ooffset_t size = isize;
struct vattr va;
bool last;
if (!vn_isdisk(vp, NULL) && vn_canvmio(vp) == FALSE)
return (0);
@ -171,12 +172,15 @@ vnode_create_vobject(struct vnode *vp, off_t isize, struct thread *td)
object = vnode_pager_alloc(vp, size, 0, 0, td->td_ucred);
/*
* Dereference the reference we just created. This assumes
* that the object is associated with the vp.
* that the object is associated with the vp. We still have
* to serialize with vnode_pager_dealloc() for the last
* potential reference.
*/
VM_OBJECT_RLOCK(object);
refcount_release(&object->ref_count);
last = refcount_release(&object->ref_count);
VM_OBJECT_RUNLOCK(object);
vrele(vp);
if (last)
vrele(vp);
KASSERT(vp->v_object != NULL, ("vnode_create_vobject: NULL object"));
@ -293,15 +297,17 @@ vnode_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
}
vp->v_object = object;
VI_UNLOCK(vp);
vrefact(vp);
} else {
VM_OBJECT_WLOCK(object);
refcount_acquire(&object->ref_count);
vm_object_reference(object);
#if VM_NRESERVLEVEL > 0
vm_object_color(object, 0);
if ((object->flags & OBJ_COLORED) == 0) {
VM_OBJECT_WLOCK(object);
vm_object_color(object, 0);
VM_OBJECT_WUNLOCK(object);
}
#endif
VM_OBJECT_WUNLOCK(object);
}
vrefact(vp);
return (object);
}
@ -345,7 +351,7 @@ vnode_pager_dealloc(vm_object_t object)
vp->v_writecount = 0;
VI_UNLOCK(vp);
VM_OBJECT_WUNLOCK(object);
while (refs-- > 0)
if (refs > 0)
vunref(vp);
VM_OBJECT_WLOCK(object);
}