Minimize the use of the page queues lock for synchronizing access to the

page's dirty field.  With the exception of one case, access to this field
is now synchronized by the object lock.
This commit is contained in:
Alan Cox 2010-06-02 15:46:37 +00:00
parent 7c4b8137cd
commit c8fa870982
4 changed files with 47 additions and 16 deletions

View File

@ -304,9 +304,7 @@ shm_dotruncate(struct shmfd *shmfd, off_t length)
*/
base = roundup2(base, DEV_BSIZE);
vm_page_lock_queues();
vm_page_clear_dirty(m, base, PAGE_SIZE - base);
vm_page_unlock_queues();
} else if ((length & PAGE_MASK) &&
__predict_false(object->cache != NULL)) {
vm_page_cache_free(object, OFF_TO_IDX(length),

View File

@ -3630,7 +3630,6 @@ vfs_clean_pages(struct buf *bp)
KASSERT(bp->b_offset != NOOFFSET,
("vfs_clean_pages: no buffer offset"));
VM_OBJECT_LOCK(bp->b_bufobj->bo_object);
vm_page_lock_queues();
for (i = 0; i < bp->b_npages; i++) {
m = bp->b_pages[i];
noff = (foff + PAGE_SIZE) & ~(off_t)PAGE_MASK;
@ -3642,7 +3641,6 @@ vfs_clean_pages(struct buf *bp)
/* vm_page_clear_dirty(m, foff & PAGE_MASK, eoff - foff); */
foff = noff;
}
vm_page_unlock_queues();
VM_OBJECT_UNLOCK(bp->b_bufobj->bo_object);
}

View File

@ -170,6 +170,7 @@ TUNABLE_INT("vm.boot_pages", &boot_pages);
SYSCTL_INT(_vm, OID_AUTO, boot_pages, CTLFLAG_RD, &boot_pages, 0,
"number of pages allocated for bootstrapping the VM system");
static void vm_page_clear_dirty_mask(vm_page_t m, int pagebits);
static void vm_page_queue_remove(int queue, vm_page_t m);
static void vm_page_enqueue(int queue, vm_page_t m);
@ -2072,6 +2073,28 @@ vm_page_set_valid(vm_page_t m, int base, int size)
m->valid |= vm_page_bits(base, size);
}
/*
* Clear the given bits from the specified page's dirty field.
*/
static __inline void
vm_page_clear_dirty_mask(vm_page_t m, int pagebits)
{
/*
* If the object is locked and the page is neither VPO_BUSY nor
* PG_WRITEABLE, then the page's dirty field cannot possibly be
* modified by a concurrent pmap operation.
*/
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
if ((m->oflags & VPO_BUSY) == 0 && (m->flags & PG_WRITEABLE) == 0)
m->dirty &= ~pagebits;
else {
vm_page_lock_queues();
m->dirty &= ~pagebits;
vm_page_unlock_queues();
}
}
/*
* vm_page_set_validclean:
*
@ -2087,9 +2110,8 @@ vm_page_set_valid(vm_page_t m, int base, int size)
void
vm_page_set_validclean(vm_page_t m, int base, int size)
{
int pagebits;
int frag;
int endoff;
u_int oldvalid;
int endoff, frag, pagebits;
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
if (size == 0) /* handle degenerate case */
@ -2126,6 +2148,7 @@ vm_page_set_validclean(vm_page_t m, int base, int size)
* clear dirty bits for DEV_BSIZE chunks that are fully within
* the range.
*/
oldvalid = m->valid;
pagebits = vm_page_bits(base, size);
m->valid |= pagebits;
#if 0 /* NOT YET */
@ -2138,21 +2161,35 @@ vm_page_set_validclean(vm_page_t m, int base, int size)
}
pagebits = vm_page_bits(base, size & (DEV_BSIZE - 1));
#endif
m->dirty &= ~pagebits;
if (base == 0 && size == PAGE_SIZE) {
pmap_clear_modify(m);
/*
* The page can only be modified within the pmap if it is
* mapped, and it can only be mapped if it was previously
* fully valid.
*/
if (oldvalid == VM_PAGE_BITS_ALL)
/*
* Perform the pmap_clear_modify() first. Otherwise,
* a concurrent pmap operation, such as
* pmap_protect(), could clear a modification in the
* pmap and set the dirty field on the page before
* pmap_clear_modify() had begun and after the dirty
* field was cleared here.
*/
pmap_clear_modify(m);
m->dirty = 0;
m->oflags &= ~VPO_NOSYNC;
}
} else if (oldvalid != VM_PAGE_BITS_ALL)
m->dirty &= ~pagebits;
else
vm_page_clear_dirty_mask(m, pagebits);
}
void
vm_page_clear_dirty(vm_page_t m, int base, int size)
{
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
if ((m->flags & PG_WRITEABLE) != 0)
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
m->dirty &= ~vm_page_bits(base, size);
vm_page_clear_dirty_mask(m, vm_page_bits(base, size));
}
/*

View File

@ -429,9 +429,7 @@ vnode_pager_setsize(vp, nsize)
* bits. This would prevent bogus_page
* replacement from working properly.
*/
vm_page_lock_queues();
vm_page_clear_dirty(m, base, PAGE_SIZE - base);
vm_page_unlock_queues();
} else if ((nsize & PAGE_MASK) &&
__predict_false(object->cache != NULL)) {
vm_page_cache_free(object, OFF_TO_IDX(nsize),