Reimplement the page's NOSYNC flag as an object-synchronized instead of a
page queues-synchronized flag. Reduce the scope of the page queues lock in vm_fault() accordingly. Move vm_fault()'s call to vm_object_set_writeable_dirty() outside of the scope of the page queues lock. Reviewed by: tegge Additionally, eliminate an unnecessary dereference in computing the argument that is passed to vm_object_set_writeable_dirty().
This commit is contained in:
parent
f6dcb46835
commit
cc1f2c465b
@ -842,14 +842,15 @@ RetryFault:;
|
||||
if (prot & VM_PROT_WRITE) {
|
||||
vm_page_lock_queues();
|
||||
vm_page_flag_set(fs.m, PG_WRITEABLE);
|
||||
vm_object_set_writeable_dirty(fs.m->object);
|
||||
vm_page_unlock_queues();
|
||||
vm_object_set_writeable_dirty(fs.object);
|
||||
|
||||
/*
|
||||
* If the fault is a write, we know that this page is being
|
||||
* written NOW so dirty it explicitly to save on
|
||||
* pmap_is_modified() calls later.
|
||||
*
|
||||
* If this is a NOSYNC mmap we do not want to set PG_NOSYNC
|
||||
* If this is a NOSYNC mmap we do not want to set VPO_NOSYNC
|
||||
* if the page is already dirty to prevent data written with
|
||||
* the expectation of being synced from not being synced.
|
||||
* Likewise if this entry does not request NOSYNC then make
|
||||
@ -861,11 +862,10 @@ RetryFault:;
|
||||
*/
|
||||
if (fs.entry->eflags & MAP_ENTRY_NOSYNC) {
|
||||
if (fs.m->dirty == 0)
|
||||
vm_page_flag_set(fs.m, PG_NOSYNC);
|
||||
fs.m->oflags |= VPO_NOSYNC;
|
||||
} else {
|
||||
vm_page_flag_clear(fs.m, PG_NOSYNC);
|
||||
fs.m->oflags &= ~VPO_NOSYNC;
|
||||
}
|
||||
vm_page_unlock_queues();
|
||||
if (fault_flags & VM_FAULT_DIRTY) {
|
||||
vm_page_dirty(fs.m);
|
||||
vm_pager_page_unswapped(fs.m);
|
||||
|
@ -687,7 +687,7 @@ vm_object_terminate(vm_object_t object)
|
||||
*
|
||||
* Clean all dirty pages in the specified range of object. Leaves page
|
||||
* on whatever queue it is currently on. If NOSYNC is set then do not
|
||||
* write out pages with PG_NOSYNC set (originally comes from MAP_NOSYNC),
|
||||
* write out pages with VPO_NOSYNC set (originally comes from MAP_NOSYNC),
|
||||
* leaving the object dirty.
|
||||
*
|
||||
* When stuffing pages asynchronously, allow clustering. XXX we need a
|
||||
@ -765,7 +765,7 @@ vm_object_page_clean(vm_object_t object, vm_pindex_t start, vm_pindex_t end, int
|
||||
* If we have been asked to skip nosync pages and
|
||||
* this is a nosync page, we can't continue.
|
||||
*/
|
||||
if ((flags & OBJPC_NOSYNC) && (p->flags & PG_NOSYNC)) {
|
||||
if ((flags & OBJPC_NOSYNC) && (p->oflags & VPO_NOSYNC)) {
|
||||
if (--scanlimit == 0)
|
||||
break;
|
||||
++tscan;
|
||||
@ -805,7 +805,7 @@ vm_object_page_clean(vm_object_t object, vm_pindex_t start, vm_pindex_t end, int
|
||||
clearobjflags = 1;
|
||||
TAILQ_FOREACH(p, &object->memq, listq) {
|
||||
vm_page_flag_set(p, PG_CLEANCHK);
|
||||
if ((flags & OBJPC_NOSYNC) && (p->flags & PG_NOSYNC))
|
||||
if ((flags & OBJPC_NOSYNC) && (p->oflags & VPO_NOSYNC))
|
||||
clearobjflags = 0;
|
||||
else
|
||||
pmap_remove_write(p);
|
||||
@ -853,7 +853,7 @@ vm_object_page_clean(vm_object_t object, vm_pindex_t start, vm_pindex_t end, int
|
||||
* nosync page, skip it. Note that the object flags were
|
||||
* not cleared in this case so we do not have to set them.
|
||||
*/
|
||||
if ((flags & OBJPC_NOSYNC) && (p->flags & PG_NOSYNC)) {
|
||||
if ((flags & OBJPC_NOSYNC) && (p->oflags & VPO_NOSYNC)) {
|
||||
vm_page_flag_clear(p, PG_CLEANCHK);
|
||||
continue;
|
||||
}
|
||||
|
@ -1580,7 +1580,7 @@ vm_page_set_validclean(vm_page_t m, int base, int size)
|
||||
/*
|
||||
* Set valid, clear dirty bits. If validating the entire
|
||||
* page we can safely clear the pmap modify bit. We also
|
||||
* use this opportunity to clear the PG_NOSYNC flag. If a process
|
||||
* use this opportunity to clear the VPO_NOSYNC flag. If a process
|
||||
* takes a write fault on a MAP_NOSYNC memory area the flag will
|
||||
* be set again.
|
||||
*
|
||||
@ -1603,7 +1603,7 @@ vm_page_set_validclean(vm_page_t m, int base, int size)
|
||||
m->dirty &= ~pagebits;
|
||||
if (base == 0 && size == PAGE_SIZE) {
|
||||
pmap_clear_modify(m);
|
||||
vm_page_flag_clear(m, PG_NOSYNC);
|
||||
m->oflags &= ~VPO_NOSYNC;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -144,6 +144,7 @@ struct vm_page {
|
||||
*/
|
||||
#define VPO_WANTED 0x0002 /* someone is waiting for page */
|
||||
#define VPO_SWAPINPROG 0x0200 /* swap I/O in progress on page */
|
||||
#define VPO_NOSYNC 0x0400 /* do not collect for syncer */
|
||||
|
||||
/* Make sure that u_long is at least 64 bits when PAGE_SIZE is 32K. */
|
||||
#if PAGE_SIZE == 32768
|
||||
@ -226,7 +227,6 @@ extern struct pq_coloring page_queue_coloring;
|
||||
#define PG_ZERO 0x0040 /* page is zeroed */
|
||||
#define PG_REFERENCED 0x0080 /* page has been referenced */
|
||||
#define PG_CLEANCHK 0x0100 /* page will be checked for cleaning */
|
||||
#define PG_NOSYNC 0x0400 /* do not collect for syncer */
|
||||
#define PG_UNMANAGED 0x0800 /* No PV management for page */
|
||||
#define PG_MARKER 0x1000 /* special queue marker page */
|
||||
#define PG_SLAB 0x2000 /* object pointer is actually a slab */
|
||||
|
Loading…
Reference in New Issue
Block a user