The page flag PGA_WRITEABLE is set and cleared exclusively by the pmap

layer, but it is read directly by the MI VM layer.  This change introduces
pmap_page_is_write_mapped() in order to completely encapsulate all direct
access to PGA_WRITEABLE in the pmap layer.

Aesthetics aside, I am making this change because amd64 will likely begin
using an alternative method to track write mappings, and having
pmap_page_is_write_mapped() in place allows me to make such a change
without further modification to the MI VM layer.

As an added bonus, tidy up some nearby comments concerning page flags.

Reviewed by:	kib
MFC after:	6 weeks
This commit is contained in:
Alan Cox 2012-06-16 18:56:19 +00:00
parent bc5116ba42
commit 6031c68de4
13 changed files with 25 additions and 14 deletions

View File

@ -309,6 +309,7 @@ extern vm_offset_t virtual_avail;
extern vm_offset_t virtual_end;
#define pmap_page_get_memattr(m) ((vm_memattr_t)(m)->md.pat_mode)
#define pmap_page_is_write_mapped(m) (((m)->aflags & PGA_WRITEABLE) != 0)
#define pmap_unmapbios(va, sz) pmap_unmapdev((va), (sz))
void pmap_bootstrap(vm_paddr_t *);

View File

@ -78,6 +78,7 @@
#define pmap_page_get_memattr(m) VM_MEMATTR_DEFAULT
#define pmap_page_is_mapped(m) (!TAILQ_EMPTY(&(m)->md.pv_list))
#define pmap_page_is_write_mapped(m) (((m)->aflags & PGA_WRITEABLE) != 0)
#define pmap_page_set_memattr(m, ma) (void)0
/*

View File

@ -498,6 +498,7 @@ extern vm_offset_t virtual_avail;
extern vm_offset_t virtual_end;
#define pmap_page_get_memattr(m) ((vm_memattr_t)(m)->md.pat_mode)
#define pmap_page_is_write_mapped(m) (((m)->aflags & PGA_WRITEABLE) != 0)
#define pmap_unmapbios(va, sz) pmap_unmapdev((va), (sz))
/*

View File

@ -118,6 +118,7 @@ extern int pmap_vhpt_log2size;
#define pmap_page_get_memattr(m) ((m)->md.memattr)
#define pmap_page_is_mapped(m) (!TAILQ_EMPTY(&(m)->md.pv_list))
#define pmap_page_is_write_mapped(m) (((m)->aflags & PGA_WRITEABLE) != 0)
#define pmap_mapbios(pa, sz) pmap_mapdev(pa, sz)
#define pmap_unmapbios(va, sz) pmap_unmapdev(va, sz)

View File

@ -151,6 +151,7 @@ extern vm_paddr_t dump_avail[PHYS_AVAIL_ENTRIES + 2];
#define pmap_page_get_memattr(m) VM_MEMATTR_DEFAULT
#define pmap_page_is_mapped(m) (!TAILQ_EMPTY(&(m)->md.pv_list))
#define pmap_page_is_write_mapped(m) (((m)->aflags & PGA_WRITEABLE) != 0)
#define pmap_page_set_memattr(m, ma) (void)0
void pmap_bootstrap(void);

View File

@ -223,6 +223,8 @@ extern struct pmap kernel_pmap_store;
#define PMAP_TRYLOCK(pmap) mtx_trylock(&(pmap)->pm_mtx)
#define PMAP_UNLOCK(pmap) mtx_unlock(&(pmap)->pm_mtx)
#define pmap_page_is_write_mapped(m) (((m)->aflags & PGA_WRITEABLE) != 0)
void pmap_bootstrap(vm_offset_t, vm_offset_t);
void pmap_kenter(vm_offset_t va, vm_paddr_t pa);
void pmap_kenter_attr(vm_offset_t va, vm_offset_t pa, vm_memattr_t);

View File

@ -80,6 +80,7 @@ struct pmap {
#define PMAP_UNLOCK(pmap) mtx_unlock(&(pmap)->pm_mtx)
#define pmap_page_get_memattr(m) VM_MEMATTR_DEFAULT
#define pmap_page_is_write_mapped(m) (((m)->aflags & PGA_WRITEABLE) != 0)
#define pmap_page_set_memattr(m, ma) (void)0
void pmap_bootstrap(u_int cpu_impl);

View File

@ -80,10 +80,11 @@ struct pmap_statistics {
typedef struct pmap_statistics *pmap_statistics_t;
/*
* Each machine dependent implementation is expected to provide:
* Each machine-dependent implementation is required to provide:
*
* vm_memattr_t pmap_page_get_memattr(vm_page_t);
* boolean_t pmap_page_is_mapped(vm_page_t);
* boolean_t pmap_page_is_write_mapped(vm_page_t);
* void pmap_page_set_memattr(vm_page_t, vm_memattr_t);
*/
#include <machine/pmap.h>

View File

@ -1593,7 +1593,7 @@ swp_pager_async_iodone(struct buf *bp)
* status, then finish the I/O ( which decrements the
* busy count and possibly wakes waiter's up ).
*/
KASSERT((m->aflags & PGA_WRITEABLE) == 0,
KASSERT(!pmap_page_is_write_mapped(m),
("swp_pager_async_iodone: page %p is not write"
" protected", m));
vm_page_undirty(m);

View File

@ -930,7 +930,7 @@ vm_page_insert(vm_page_t m, vm_object_t object, vm_pindex_t pindex)
* Since we are inserting a new and possibly dirty page,
* update the object's OBJ_MIGHTBEDIRTY flag.
*/
if (m->aflags & PGA_WRITEABLE)
if (pmap_page_is_write_mapped(m))
vm_object_set_writeable_dirty(object);
}
@ -2675,11 +2675,11 @@ vm_page_clear_dirty_mask(vm_page_t m, vm_page_bits_t pagebits)
/*
* If the object is locked and the page is neither VPO_BUSY nor
* PGA_WRITEABLE, then the page's dirty field cannot possibly be
* write mapped, then the page's dirty field cannot possibly be
* set by a concurrent pmap operation.
*/
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
if ((m->oflags & VPO_BUSY) == 0 && (m->aflags & PGA_WRITEABLE) == 0)
if ((m->oflags & VPO_BUSY) == 0 && !pmap_page_is_write_mapped(m))
m->dirty &= ~pagebits;
else {
/*

View File

@ -237,20 +237,22 @@ extern struct vpglocks pa_lock[];
#endif
#define vm_page_queue_free_mtx vm_page_queue_free_lock.data
/*
* These are the flags defined for vm_page.
*
* aflags are updated by atomic accesses. Use the vm_page_aflag_set()
* aflags are updated by atomic accesses. Use the vm_page_aflag_set()
* and vm_page_aflag_clear() functions to set and clear the flags.
*
* PGA_REFERENCED may be cleared only if the object containing the page is
* locked.
* locked. It is set by both the MI and MD VM layers.
*
* PGA_WRITEABLE is set exclusively on managed pages by pmap_enter(). When it
* does so, the page must be VPO_BUSY.
* does so, the page must be VPO_BUSY. The MI VM layer must never access this
* flag directly. Instead, it should call pmap_page_is_write_mapped().
*
* PGA_EXECUTABLE may be set by pmap routines, and indicates that a page has
* at least one executable mapping. It is not consumed by the VM layer.
* at least one executable mapping. It is not consumed by the MI VM layer.
*/
#define PGA_WRITEABLE 0x01 /* page may be mapped writeable */
#define PGA_REFERENCED 0x02 /* page has been referenced */
@ -262,12 +264,12 @@ extern struct vpglocks pa_lock[];
*/
#define PG_CACHED 0x01 /* page is cached */
#define PG_FREE 0x02 /* page is free */
#define PG_FICTITIOUS 0x04 /* physical page doesn't exist (O) */
#define PG_FICTITIOUS 0x04 /* physical page doesn't exist */
#define PG_ZERO 0x08 /* page is zeroed */
#define PG_MARKER 0x10 /* special queue marker page */
#define PG_SLAB 0x20 /* object pointer is actually a slab */
#define PG_WINATCFLS 0x40 /* flush dirty page on inactive q */
#define PG_NODUMP 0x80 /* don't include this page in the dump */
#define PG_NODUMP 0x80 /* don't include this page in a dump */
/*
* Misc constants.

View File

@ -503,7 +503,7 @@ vm_pageout_flush(vm_page_t *mc, int count, int flags, int mreq, int *prunlen,
vm_page_t mt = mc[i];
KASSERT(pageout_status[i] == VM_PAGER_PEND ||
(mt->aflags & PGA_WRITEABLE) == 0,
!pmap_page_is_write_mapped(mt),
("vm_pageout_flush: page %p is not write protected", mt));
switch (pageout_status[i]) {
case VM_PAGER_OK:
@ -899,7 +899,7 @@ vm_pageout_scan(int pass)
* be updated.
*/
if (m->dirty != VM_PAGE_BITS_ALL &&
(m->aflags & PGA_WRITEABLE) != 0) {
pmap_page_is_write_mapped(m)) {
/*
* Avoid a race condition: Unless write access is
* removed from the page, another processor could

View File

@ -1146,7 +1146,7 @@ vnode_pager_generic_putpages(struct vnode *vp, vm_page_t *ma, int bytecount,
m = ma[ncount - 1];
KASSERT(m->busy > 0,
("vnode_pager_generic_putpages: page %p is not busy", m));
KASSERT((m->aflags & PGA_WRITEABLE) == 0,
KASSERT(!pmap_page_is_write_mapped(m),
("vnode_pager_generic_putpages: page %p is not read-only", m));
vm_page_clear_dirty(m, pgoff, PAGE_SIZE -
pgoff);