(5/6) Move the VPO_NOSYNC to PGA_NOSYNC to eliminate the dependency on the
object lock in vm_page_set_validclean(). Reviewed by: kib, markj Tested by: pho Sponsored by: Netflix, Intel Differential Revision: https://reviews.freebsd.org/D21595
This commit is contained in:
parent
e249e932a5
commit
786dad5c20
@ -225,15 +225,14 @@ vm_fault_dirty(vm_map_entry_t entry, vm_page_t m, vm_prot_t prot,
|
||||
* FALSE, one for the map entry with MAP_ENTRY_NOSYNC
|
||||
* flag set, other with flag clear, race, it is
|
||||
* possible for the no-NOSYNC thread to see m->dirty
|
||||
* != 0 and not clear VPO_NOSYNC. Take vm_page lock
|
||||
* around manipulation of VPO_NOSYNC and
|
||||
* vm_page_dirty() call, to avoid the race and keep
|
||||
* m->oflags consistent.
|
||||
* != 0 and not clear PGA_NOSYNC. Take vm_page lock
|
||||
* around manipulation of PGA_NOSYNC and
|
||||
* vm_page_dirty() call to avoid the race.
|
||||
*/
|
||||
vm_page_lock(m);
|
||||
|
||||
/*
|
||||
* If this is a NOSYNC mmap we do not want to set VPO_NOSYNC
|
||||
* If this is a NOSYNC mmap we do not want to set PGA_NOSYNC
|
||||
* if the page is already dirty to prevent data written with
|
||||
* the expectation of being synced from not being synced.
|
||||
* Likewise if this entry does not request NOSYNC then make
|
||||
@ -242,10 +241,10 @@ vm_fault_dirty(vm_map_entry_t entry, vm_page_t m, vm_prot_t prot,
|
||||
*/
|
||||
if ((entry->eflags & MAP_ENTRY_NOSYNC) != 0) {
|
||||
if (m->dirty == 0) {
|
||||
m->oflags |= VPO_NOSYNC;
|
||||
vm_page_aflag_set(m, PGA_NOSYNC);
|
||||
}
|
||||
} else {
|
||||
m->oflags &= ~VPO_NOSYNC;
|
||||
vm_page_aflag_clear(m, PGA_NOSYNC);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -772,12 +772,14 @@ static boolean_t
|
||||
vm_object_page_remove_write(vm_page_t p, int flags, boolean_t *clearobjflags)
|
||||
{
|
||||
|
||||
vm_page_assert_busied(p);
|
||||
|
||||
/*
|
||||
* If we have been asked to skip nosync pages and this is a
|
||||
* nosync page, skip it. Note that the object flags were not
|
||||
* cleared in this case so we do not have to set them.
|
||||
*/
|
||||
if ((flags & OBJPC_NOSYNC) != 0 && (p->oflags & VPO_NOSYNC) != 0) {
|
||||
if ((flags & OBJPC_NOSYNC) != 0 && (p->aflags & PGA_NOSYNC) != 0) {
|
||||
*clearobjflags = FALSE;
|
||||
return (FALSE);
|
||||
} else {
|
||||
@ -791,7 +793,7 @@ vm_object_page_remove_write(vm_page_t p, int flags, boolean_t *clearobjflags)
|
||||
*
|
||||
* Clean all dirty pages in the specified range of object. Leaves page
|
||||
* on whatever queue it is currently on. If NOSYNC is set then do not
|
||||
* write out pages with VPO_NOSYNC set (originally comes from MAP_NOSYNC),
|
||||
* write out pages with PGA_NOSYNC set (originally comes from MAP_NOSYNC),
|
||||
* leaving the object dirty.
|
||||
*
|
||||
* When stuffing pages asynchronously, allow clustering. XXX we need a
|
||||
@ -2270,7 +2272,6 @@ void
|
||||
vm_object_unbusy(vm_object_t obj)
|
||||
{
|
||||
|
||||
VM_OBJECT_ASSERT_LOCKED(obj);
|
||||
|
||||
refcount_release(&obj->busy);
|
||||
}
|
||||
|
@ -209,7 +209,7 @@ struct vm_object {
|
||||
|
||||
#define OBJPC_SYNC 0x1 /* sync I/O */
|
||||
#define OBJPC_INVAL 0x2 /* invalidate */
|
||||
#define OBJPC_NOSYNC 0x4 /* skip if VPO_NOSYNC */
|
||||
#define OBJPC_NOSYNC 0x4 /* skip if PGA_NOSYNC */
|
||||
|
||||
/*
|
||||
* The following options are supported by vm_object_page_remove().
|
||||
|
@ -2506,7 +2506,7 @@ vm_page_scan_contig(u_long npages, vm_page_t m_start, vm_page_t m_end,
|
||||
KASSERT((m->oflags & (VPO_SWAPINPROG |
|
||||
VPO_SWAPSLEEP | VPO_UNMANAGED)) == 0,
|
||||
("page %p has unexpected oflags", m));
|
||||
/* Don't care: VPO_NOSYNC. */
|
||||
/* Don't care: PGA_NOSYNC. */
|
||||
run_ext = 1;
|
||||
} else
|
||||
run_ext = 0;
|
||||
@ -2655,7 +2655,7 @@ vm_page_reclaim_run(int req_class, int domain, u_long npages, vm_page_t m_run,
|
||||
KASSERT((m->oflags & (VPO_SWAPINPROG |
|
||||
VPO_SWAPSLEEP | VPO_UNMANAGED)) == 0,
|
||||
("page %p has unexpected oflags", m));
|
||||
/* Don't care: VPO_NOSYNC. */
|
||||
/* Don't care: PGA_NOSYNC. */
|
||||
if (!vm_page_none_valid(m)) {
|
||||
/*
|
||||
* First, try to allocate a new page
|
||||
@ -2721,7 +2721,6 @@ vm_page_reclaim_run(int req_class, int domain, u_long npages, vm_page_t m_run,
|
||||
~PGA_QUEUE_STATE_MASK;
|
||||
KASSERT(m_new->oflags == VPO_UNMANAGED,
|
||||
("page %p is managed", m_new));
|
||||
m_new->oflags = m->oflags & VPO_NOSYNC;
|
||||
pmap_copy_page(m, m_new);
|
||||
m_new->valid = m->valid;
|
||||
m_new->dirty = m->dirty;
|
||||
@ -4703,8 +4702,6 @@ vm_page_set_validclean(vm_page_t m, int base, int size)
|
||||
vm_page_bits_t oldvalid, pagebits;
|
||||
int endoff, frag;
|
||||
|
||||
/* Object lock for VPO_NOSYNC */
|
||||
VM_OBJECT_ASSERT_WLOCKED(m->object);
|
||||
vm_page_assert_busied(m);
|
||||
if (size == 0) /* handle degenerate case */
|
||||
return;
|
||||
@ -4732,7 +4729,7 @@ vm_page_set_validclean(vm_page_t m, int base, int size)
|
||||
/*
|
||||
* Set valid, clear dirty bits. If validating the entire
|
||||
* page we can safely clear the pmap modify bit. We also
|
||||
* use this opportunity to clear the VPO_NOSYNC flag. If a process
|
||||
* use this opportunity to clear the PGA_NOSYNC flag. If a process
|
||||
* takes a write fault on a MAP_NOSYNC memory area the flag will
|
||||
* be set again.
|
||||
*
|
||||
@ -4773,7 +4770,7 @@ vm_page_set_validclean(vm_page_t m, int base, int size)
|
||||
*/
|
||||
pmap_clear_modify(m);
|
||||
m->dirty = 0;
|
||||
m->oflags &= ~VPO_NOSYNC;
|
||||
vm_page_aflag_clear(m, PGA_NOSYNC);
|
||||
} else if (oldvalid != VM_PAGE_BITS_ALL && vm_page_xbusied(m))
|
||||
m->dirty &= ~pagebits;
|
||||
else
|
||||
|
@ -287,7 +287,6 @@ struct vm_page {
|
||||
#define VPO_SWAPSLEEP 0x02 /* waiting for swap to finish */
|
||||
#define VPO_UNMANAGED 0x04 /* no PV management for page */
|
||||
#define VPO_SWAPINPROG 0x08 /* swap I/O in progress on page */
|
||||
#define VPO_NOSYNC 0x10 /* do not collect for syncer */
|
||||
|
||||
/*
|
||||
* Busy page implementation details.
|
||||
@ -390,6 +389,8 @@ extern struct mtx_padalign pa_lock[];
|
||||
* PGA_EXECUTABLE may be set by pmap routines, and indicates that a page has
|
||||
* at least one executable mapping. It is not consumed by the MI VM layer.
|
||||
*
|
||||
* PGA_NOSYNC must be set and cleared with the page busy lock held.
|
||||
*
|
||||
* PGA_ENQUEUED is set and cleared when a page is inserted into or removed
|
||||
* from a page queue, respectively. It determines whether the plinks.q field
|
||||
* of the page is valid. To set or clear this flag, the queue lock for the
|
||||
@ -420,6 +421,7 @@ extern struct mtx_padalign pa_lock[];
|
||||
#define PGA_DEQUEUE 0x10 /* page is due to be dequeued */
|
||||
#define PGA_REQUEUE 0x20 /* page is due to be requeued */
|
||||
#define PGA_REQUEUE_HEAD 0x40 /* page requeue should bypass LRU */
|
||||
#define PGA_NOSYNC 0x80 /* do not collect for syncer */
|
||||
|
||||
#define PGA_QUEUE_STATE_MASK (PGA_ENQUEUED | PGA_DEQUEUE | PGA_REQUEUE | \
|
||||
PGA_REQUEUE_HEAD)
|
||||
|
Loading…
Reference in New Issue
Block a user