Use an atomic reference count for paging in progress so that callers do not

require the object lock.

Reviewed by:	markj
Tested by:	pho (as part of a larger branch)
Sponsored by:	Netflix
Differential Revision:	https://reviews.freebsd.org/D21311
This commit is contained in:
Jeff Roberson 2019-08-19 23:09:38 +00:00
parent 54a210d407
commit cf27e0d125
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=351241
6 changed files with 42 additions and 51 deletions

View File

@ -451,7 +451,7 @@ page_unbusy(vm_page_t pp)
{
vm_page_sunbusy(pp);
vm_object_pip_subtract(pp->object, 1);
vm_object_pip_wakeup(pp->object);
}
static vm_page_t
@ -523,6 +523,7 @@ update_pages(vnode_t *vp, int64_t start, int len, objset_t *os, uint64_t oid,
off = start & PAGEOFFSET;
zfs_vmobject_wlock(obj);
vm_object_pip_add(obj, 1);
for (start &= PAGEMASK; len > 0; start += PAGESIZE) {
vm_page_t pp;
int nbytes = imin(PAGESIZE - off, len);
@ -541,7 +542,7 @@ update_pages(vnode_t *vp, int64_t start, int len, objset_t *os, uint64_t oid,
len -= nbytes;
off = 0;
}
vm_object_pip_wakeupn(obj, 0);
vm_object_pip_wakeup(obj);
zfs_vmobject_wunlock(obj);
}

View File

@ -479,7 +479,8 @@ cluster_rbuild(struct vnode *vp, u_quad_t filesize, daddr_t lbn,
}
if (tsize > 0) {
clean_sbusy:
vm_object_pip_add(tbp->b_bufobj->bo_object, -j);
vm_object_pip_wakeupn(tbp->b_bufobj->bo_object,
j);
for (k = 0; k < j; k++)
vm_page_sunbusy(tbp->b_pages[k]);
VM_OBJECT_WUNLOCK(tbp->b_bufobj->bo_object);

View File

@ -1786,13 +1786,9 @@ bufobj_invalbuf(struct bufobj *bo, int flags, int slpflag, int slptimeo)
*/
do {
bufobj_wwait(bo, 0, 0);
if ((flags & V_VMIO) == 0) {
if ((flags & V_VMIO) == 0 && bo->bo_object != NULL) {
BO_UNLOCK(bo);
if (bo->bo_object != NULL) {
VM_OBJECT_WLOCK(bo->bo_object);
vm_object_pip_wait(bo->bo_object, "bovlbx");
VM_OBJECT_WUNLOCK(bo->bo_object);
}
vm_object_pip_wait_unlocked(bo->bo_object, "bovlbx");
BO_LOCK(bo);
}
} while (bo->bo_numoutput > 0);

View File

@ -1256,7 +1256,7 @@ swap_pager_getpages(vm_object_t object, vm_page_t *ma, int count, int *rbehind,
while ((ma[0]->oflags & VPO_SWAPINPROG) != 0) {
ma[0]->oflags |= VPO_SWAPSLEEP;
VM_CNT_INC(v_intrans);
if (VM_OBJECT_SLEEP(object, &object->paging_in_progress, PSWP,
if (VM_OBJECT_SLEEP(object, &object->handle, PSWP,
"swread", hz * 20)) {
printf(
"swap_pager: indefinite wait buffer: bufobj: %p, blkno: %jd, size: %ld\n",
@ -1531,7 +1531,7 @@ swp_pager_async_iodone(struct buf *bp)
m->oflags &= ~VPO_SWAPINPROG;
if (m->oflags & VPO_SWAPSLEEP) {
m->oflags &= ~VPO_SWAPSLEEP;
wakeup(&object->paging_in_progress);
wakeup(&object->handle);
}
if (bp->b_ioflags & BIO_ERROR) {

View File

@ -80,6 +80,7 @@ __FBSDID("$FreeBSD$");
#include <sys/sysctl.h>
#include <sys/mutex.h>
#include <sys/proc.h> /* for curproc, pageproc */
#include <sys/refcount.h>
#include <sys/socket.h>
#include <sys/resourcevar.h>
#include <sys/rwlock.h>
@ -221,7 +222,7 @@ vm_object_zinit(void *mem, int size, int flags)
object->type = OBJT_DEAD;
object->ref_count = 0;
vm_radix_init(&object->rtree);
object->paging_in_progress = 0;
refcount_init(&object->paging_in_progress, 0);
object->resident_page_count = 0;
object->shadow_count = 0;
object->flags = OBJ_DEAD;
@ -371,41 +372,21 @@ void
vm_object_pip_add(vm_object_t object, short i)
{
VM_OBJECT_ASSERT_WLOCKED(object);
object->paging_in_progress += i;
}
void
vm_object_pip_subtract(vm_object_t object, short i)
{
VM_OBJECT_ASSERT_WLOCKED(object);
object->paging_in_progress -= i;
refcount_acquiren(&object->paging_in_progress, i);
}
void
vm_object_pip_wakeup(vm_object_t object)
{
VM_OBJECT_ASSERT_WLOCKED(object);
object->paging_in_progress--;
if ((object->flags & OBJ_PIPWNT) && object->paging_in_progress == 0) {
vm_object_clear_flag(object, OBJ_PIPWNT);
wakeup(object);
}
refcount_release(&object->paging_in_progress);
}
void
vm_object_pip_wakeupn(vm_object_t object, short i)
{
VM_OBJECT_ASSERT_WLOCKED(object);
if (i)
object->paging_in_progress -= i;
if ((object->flags & OBJ_PIPWNT) && object->paging_in_progress == 0) {
vm_object_clear_flag(object, OBJ_PIPWNT);
wakeup(object);
}
refcount_releasen(&object->paging_in_progress, i);
}
void
@ -413,12 +394,24 @@ vm_object_pip_wait(vm_object_t object, char *waitid)
{
VM_OBJECT_ASSERT_WLOCKED(object);
while (object->paging_in_progress) {
object->flags |= OBJ_PIPWNT;
VM_OBJECT_SLEEP(object, object, PVM, waitid, 0);
VM_OBJECT_WUNLOCK(object);
refcount_wait(&object->paging_in_progress, waitid, PVM);
VM_OBJECT_WLOCK(object);
}
}
void
vm_object_pip_wait_unlocked(vm_object_t object, char *waitid)
{
VM_OBJECT_ASSERT_UNLOCKED(object);
while (object->paging_in_progress)
refcount_wait(&object->paging_in_progress, waitid, PVM);
}
/*
* vm_object_allocate:
*
@ -615,9 +608,10 @@ vm_object_deallocate(vm_object_t object)
}
} else if (object->paging_in_progress) {
VM_OBJECT_WUNLOCK(robject);
object->flags |= OBJ_PIPWNT;
VM_OBJECT_SLEEP(object, object,
PDROP | PVM, "objde2", 0);
VM_OBJECT_WUNLOCK(object);
refcount_wait(
&object->paging_in_progress,
"objde2", PVM);
VM_OBJECT_WLOCK(robject);
temp = robject->backing_object;
if (object == temp) {
@ -760,14 +754,6 @@ vm_object_terminate(vm_object_t object)
*/
vm_object_set_flag(object, OBJ_DEAD);
/*
* wait for the pageout daemon to be done with the object
*/
vm_object_pip_wait(object, "objtrm");
KASSERT(!object->paging_in_progress,
("vm_object_terminate: pageout in progress"));
/*
* Clean and free the pages, as appropriate. All references to the
* object are gone, so we don't need to lock it.
@ -790,6 +776,14 @@ vm_object_terminate(vm_object_t object)
VM_OBJECT_WLOCK(object);
}
/*
* wait for the pageout daemon to be done with the object
*/
vm_object_pip_wait(object, "objtrm");
KASSERT(!object->paging_in_progress,
("vm_object_terminate: pageout in progress"));
KASSERT(object->ref_count == 0,
("vm_object_terminate: object with references, ref_count=%d",
object->ref_count));

View File

@ -111,7 +111,7 @@ struct vm_object {
objtype_t type; /* type of pager */
u_short flags; /* see below */
u_short pg_color; /* (c) color of first page in obj */
u_int paging_in_progress; /* Paging (in or out) so don't collapse or destroy */
volatile u_int paging_in_progress; /* Paging (in or out) so don't collapse or destroy */
int resident_page_count; /* number of resident pages */
struct vm_object *backing_object; /* object that I'm a shadow of */
vm_ooffset_t backing_object_offset;/* Offset in backing object */
@ -183,7 +183,6 @@ struct vm_object {
#define OBJ_DEAD 0x0008 /* dead objects (during rundown) */
#define OBJ_NOSPLIT 0x0010 /* dont split this object */
#define OBJ_UMTXDEAD 0x0020 /* umtx pshared was terminated */
#define OBJ_PIPWNT 0x0040 /* paging in progress wanted */
#define OBJ_PG_DTOR 0x0080 /* dont reset object, leave that for dtor */
#define OBJ_MIGHTBEDIRTY 0x0100 /* object might be dirty, only for vnode */
#define OBJ_TMPFS_NODE 0x0200 /* object belongs to tmpfs VREG node */
@ -309,10 +308,10 @@ vm_object_reserv(vm_object_t object)
void vm_object_clear_flag(vm_object_t object, u_short bits);
void vm_object_pip_add(vm_object_t object, short i);
void vm_object_pip_subtract(vm_object_t object, short i);
void vm_object_pip_wakeup(vm_object_t object);
void vm_object_pip_wakeupn(vm_object_t object, short i);
void vm_object_pip_wait(vm_object_t object, char *waitid);
void vm_object_pip_wait_unlocked(vm_object_t object, char *waitid);
void umtx_shm_object_init(vm_object_t object);
void umtx_shm_object_terminated(vm_object_t object);