On entry to pmap_enter(), assert that the page is busy. While I'm

here, make the style of assertion used by pmap_enter() consistent
across all architectures.

On entry to pmap_remove_write(), assert that the page is neither
unmanaged nor fictitious, since we cannot remove write access to
either kind of page.

With the push down of the page queues lock, pmap_remove_write() cannot
condition its behavior on the state of the PG_WRITEABLE flag if the
page is busy.  Assert that the object containing the page is locked.
This allows us to know that the page will neither become busy nor will
PG_WRITEABLE be set on it while pmap_remove_write() is running.

Correct a long-standing bug in vm_page_cowsetup().  We cannot possibly
do copy-on-write-based zero-copy transmit on unmanaged or fictitious
pages, so don't even try.  Previously, the call to pmap_remove_write()
would have failed silently.
This commit is contained in:
Alan Cox 2010-05-16 23:45:10 +00:00
parent 0532c3a5a5
commit 9ab6032f73
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=208175
13 changed files with 150 additions and 30 deletions

View File

@ -3139,7 +3139,10 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
va = trunc_page(va);
KASSERT(va <= VM_MAX_KERNEL_ADDRESS, ("pmap_enter: toobig"));
KASSERT(va < UPT_MIN_ADDRESS || va >= UPT_MAX_ADDRESS,
("pmap_enter: invalid to pmap_enter page table pages (va: 0x%lx)", va));
("pmap_enter: invalid to pmap_enter page table pages (va: 0x%lx)",
va));
KASSERT((m->oflags & VPO_BUSY) != 0,
("pmap_enter: page %p is not busy", m));
mpte = NULL;
@ -4240,7 +4243,16 @@ pmap_remove_write(vm_page_t m)
pt_entry_t oldpte, *pte;
vm_offset_t va;
if ((m->flags & PG_FICTITIOUS) != 0 ||
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
("pmap_remove_write: page %p is not managed", m));
/*
* If the page is not VPO_BUSY, then PG_WRITEABLE cannot be set by
* another thread while the object is locked. Thus, if PG_WRITEABLE
* is clear, no page table entries need updating.
*/
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
if ((m->oflags & VPO_BUSY) == 0 &&
(m->flags & PG_WRITEABLE) == 0)
return;
vm_page_lock_queues();

View File

@ -3318,6 +3318,8 @@ pmap_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
u_int oflags;
vm_paddr_t pa;
KASSERT((m->oflags & VPO_BUSY) != 0 || (flags & M_NOWAIT) != 0,
("pmap_enter_locked: page %p is not busy", m));
PMAP_ASSERT_LOCKED(pmap);
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
if (va == vector_page) {
@ -4527,7 +4529,17 @@ void
pmap_remove_write(vm_page_t m)
{
if (m->flags & PG_WRITEABLE) {
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
("pmap_remove_write: page %p is not managed", m));
/*
* If the page is not VPO_BUSY, then PG_WRITEABLE cannot be set by
* another thread while the object is locked. Thus, if PG_WRITEABLE
* is clear, no page table entries need updating.
*/
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
if ((m->oflags & VPO_BUSY) != 0 ||
(m->flags & PG_WRITEABLE) != 0) {
vm_page_lock_queues();
pmap_clearbit(m, PVF_WRITE);
vm_page_unlock_queues();

View File

@ -3268,7 +3268,10 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
va = trunc_page(va);
KASSERT(va <= VM_MAX_KERNEL_ADDRESS, ("pmap_enter: toobig"));
KASSERT(va < UPT_MIN_ADDRESS || va >= UPT_MAX_ADDRESS,
("pmap_enter: invalid to pmap_enter page table pages (va: 0x%x)", va));
("pmap_enter: invalid to pmap_enter page table pages (va: 0x%x)",
va));
KASSERT((m->oflags & VPO_BUSY) != 0,
("pmap_enter: page %p is not busy", m));
mpte = NULL;
@ -4410,7 +4413,16 @@ pmap_remove_write(vm_page_t m)
pt_entry_t oldpte, *pte;
vm_offset_t va;
if ((m->flags & PG_FICTITIOUS) != 0 ||
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
("pmap_remove_write: page %p is not managed", m));
/*
* If the page is not VPO_BUSY, then PG_WRITEABLE cannot be set by
* another thread while the object is locked. Thus, if PG_WRITEABLE
* is clear, no page table entries need updating.
*/
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
if ((m->oflags & VPO_BUSY) == 0 &&
(m->flags & PG_WRITEABLE) == 0)
return;
vm_page_lock_queues();

View File

@ -2682,12 +2682,12 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
CTR6(KTR_PMAP, "pmap_enter: pmap=%08p va=0x%08x access=0x%x ma=0x%08x prot=0x%x wired=%d",
pmap, va, access, xpmap_ptom(VM_PAGE_TO_PHYS(m)), prot, wired);
va = trunc_page(va);
#ifdef PMAP_DIAGNOSTIC
if (va > VM_MAX_KERNEL_ADDRESS)
panic("pmap_enter: toobig");
if ((va >= UPT_MIN_ADDRESS) && (va < UPT_MAX_ADDRESS))
panic("pmap_enter: invalid to pmap_enter page table pages (va: 0x%x)", va);
#endif
KASSERT(va <= VM_MAX_KERNEL_ADDRESS, ("pmap_enter: toobig"));
KASSERT(va < UPT_MIN_ADDRESS || va >= UPT_MAX_ADDRESS,
("pmap_enter: invalid to pmap_enter page table pages (va: 0x%x)",
va));
KASSERT((m->oflags & VPO_BUSY) != 0,
("pmap_enter: page %p is not busy", m));
mpte = NULL;
@ -3780,7 +3780,16 @@ pmap_remove_write(vm_page_t m)
pmap_t pmap;
pt_entry_t oldpte, *pte;
if ((m->flags & PG_FICTITIOUS) != 0 ||
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
("pmap_remove_write: page %p is not managed", m));
/*
* If the page is not VPO_BUSY, then PG_WRITEABLE cannot be set by
* another thread while the object is locked. Thus, if PG_WRITEABLE
* is clear, no page table entries need updating.
*/
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
if ((m->oflags & VPO_BUSY) == 0 &&
(m->flags & PG_WRITEABLE) == 0)
return;
vm_page_lock_queues();

View File

@ -1497,10 +1497,9 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
oldpmap = pmap_switch(pmap);
va &= ~PAGE_MASK;
#ifdef DIAGNOSTIC
if (va > VM_MAX_KERNEL_ADDRESS)
panic("pmap_enter: toobig");
#endif
KASSERT(va <= VM_MAX_KERNEL_ADDRESS, ("pmap_enter: toobig"));
KASSERT((m->oflags & VPO_BUSY) != 0,
("pmap_enter: page %p is not busy", m));
/*
* Find (or create) a pte for the given mapping.
@ -2116,7 +2115,16 @@ pmap_remove_write(vm_page_t m)
pv_entry_t pv;
vm_prot_t prot;
if ((m->flags & PG_FICTITIOUS) != 0 ||
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
("pmap_remove_write: page %p is not managed", m));
/*
* If the page is not VPO_BUSY, then PG_WRITEABLE cannot be set by
* another thread while the object is locked. Thus, if PG_WRITEABLE
* is clear, no page table entries need updating.
*/
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
if ((m->oflags & VPO_BUSY) == 0 &&
(m->flags & PG_WRITEABLE) == 0)
return;
vm_page_lock_queues();

View File

@ -1791,10 +1791,9 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
return;
va &= ~PAGE_MASK;
#ifdef PMAP_DIAGNOSTIC
if (va > VM_MAX_KERNEL_ADDRESS)
panic("pmap_enter: toobig");
#endif
KASSERT(va <= VM_MAX_KERNEL_ADDRESS, ("pmap_enter: toobig"));
KASSERT((m->oflags & VPO_BUSY) != 0,
("pmap_enter: page %p is not busy", m));
mpte = NULL;
@ -2584,7 +2583,16 @@ pmap_remove_write(vm_page_t m)
vm_offset_t va;
pt_entry_t *pte;
if ((m->flags & PG_FICTITIOUS) != 0 ||
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
("pmap_remove_write: page %p is not managed", m));
/*
* If the page is not VPO_BUSY, then PG_WRITEABLE cannot be set by
* another thread while the object is locked. Thus, if PG_WRITEABLE
* is clear, no page table entries need updating.
*/
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
if ((m->oflags & VPO_BUSY) == 0 &&
(m->flags & PG_WRITEABLE) == 0)
return;

View File

@ -876,6 +876,7 @@ moea_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend)
struct vm_page m;
m.phys_addr = translations[i].om_pa + off;
m.oflags = VPO_BUSY;
PMAP_LOCK(&ofw_pmap);
moea_enter_locked(&ofw_pmap,
translations[i].om_va + off, &m,
@ -1101,6 +1102,8 @@ moea_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
if (pmap_bootstrapped)
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
KASSERT((m->oflags & VPO_BUSY) != 0 || VM_OBJECT_LOCKED(m->object),
("moea_enter_locked: page %p is not busy", m));
/* XXX change the pvo head for fake pages */
if ((m->flags & PG_FICTITIOUS) == PG_FICTITIOUS) {
@ -1323,7 +1326,16 @@ moea_remove_write(mmu_t mmu, vm_page_t m)
pmap_t pmap;
u_int lo;
if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0 ||
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
("moea_remove_write: page %p is not managed", m));
/*
* If the page is not VPO_BUSY, then PG_WRITEABLE cannot be set by
* another thread while the object is locked. Thus, if PG_WRITEABLE
* is clear, no page table entries need updating.
*/
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
if ((m->oflags & VPO_BUSY) == 0 &&
(m->flags & PG_WRITEABLE) == 0)
return;
vm_page_lock_queues();

View File

@ -1235,6 +1235,8 @@ moea64_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
if (pmap_bootstrapped)
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
KASSERT((m->oflags & VPO_BUSY) != 0 || VM_OBJECT_LOCKED(m->object),
("moea64_enter_locked: page %p is not busy", m));
/* XXX change the pvo head for fake pages */
if ((m->flags & PG_FICTITIOUS) == PG_FICTITIOUS) {
@ -1519,7 +1521,16 @@ moea64_remove_write(mmu_t mmu, vm_page_t m)
pmap_t pmap;
uint64_t lo;
if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0 ||
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
("moea64_remove_write: page %p is not managed", m));
/*
* If the page is not VPO_BUSY, then PG_WRITEABLE cannot be set by
* another thread while the object is locked. Thus, if PG_WRITEABLE
* is clear, no page table entries need updating.
*/
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
if ((m->oflags & VPO_BUSY) == 0 &&
(m->flags & PG_WRITEABLE) == 0)
return;
vm_page_lock_queues();

View File

@ -1557,6 +1557,8 @@ mmu_booke_enter_locked(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
KASSERT((va <= VM_MAXUSER_ADDRESS),
("mmu_booke_enter_locked: user pmap, non user va"));
}
KASSERT((m->oflags & VPO_BUSY) != 0 || VM_OBJECT_LOCKED(m->object),
("mmu_booke_enter_locked: page %p is not busy", m));
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
@ -1941,7 +1943,16 @@ mmu_booke_remove_write(mmu_t mmu, vm_page_t m)
pv_entry_t pv;
pte_t *pte;
if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0 ||
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
("mmu_booke_remove_write: page %p is not managed", m));
/*
* If the page is not VPO_BUSY, then PG_WRITEABLE cannot be set by
* another thread while the object is locked. Thus, if PG_WRITEABLE
* is clear, no page table entries need updating.
*/
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
if ((m->oflags & VPO_BUSY) == 0 &&
(m->flags & PG_WRITEABLE) == 0)
return;
vm_page_lock_queues();

View File

@ -1351,6 +1351,8 @@ pmap_enter_locked(pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot,
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
PMAP_LOCK_ASSERT(pm, MA_OWNED);
KASSERT((m->oflags & VPO_BUSY) != 0 || VM_OBJECT_LOCKED(m->object),
("pmap_enter_locked: page %p is not busy", m));
PMAP_STATS_INC(pmap_nenter);
pa = VM_PAGE_TO_PHYS(m);
@ -1985,7 +1987,16 @@ pmap_remove_write(vm_page_t m)
struct tte *tp;
u_long data;
if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0 ||
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
("pmap_remove_write: page %p is not managed", m));
/*
* If the page is not VPO_BUSY, then PG_WRITEABLE cannot be set by
* another thread while the object is locked. Thus, if PG_WRITEABLE
* is clear, no page table entries need updating.
*/
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
if ((m->oflags & VPO_BUSY) == 0 &&
(m->flags & PG_WRITEABLE) == 0)
return;
vm_page_lock_queues();

View File

@ -1061,6 +1061,8 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
vm_page_t om;
int invlva;
KASSERT((m->oflags & VPO_BUSY) != 0,
("pmap_enter: page %p is not busy", m));
if (pmap->pm_context)
DPRINTF("pmap_enter(va=%lx, pa=0x%lx, prot=%x)\n", va,
VM_PAGE_TO_PHYS(m), prot);
@ -1737,7 +1739,17 @@ void
pmap_remove_write(vm_page_t m)
{
if ((m->flags & PG_WRITEABLE) == 0)
KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
("pmap_remove_write: page %p is not managed", m));
/*
* If the page is not VPO_BUSY, then PG_WRITEABLE cannot be set by
* another thread while the object is locked. Thus, if PG_WRITEABLE
* is clear, no page table entries need updating.
*/
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
if ((m->oflags & VPO_BUSY) == 0 &&
(m->flags & PG_WRITEABLE) == 0)
return;
vm_page_lock_queues();
tte_clear_phys_bit(m, VTD_SW_W|VTD_W);

View File

@ -2336,10 +2336,12 @@ vm_page_cowsetup(vm_page_t m)
{
vm_page_lock_assert(m, MA_OWNED);
if (m->cow == USHRT_MAX - 1)
if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0 ||
m->cow == USHRT_MAX - 1 || !VM_OBJECT_TRYLOCK(m->object))
return (EBUSY);
m->cow++;
pmap_remove_write(m);
VM_OBJECT_UNLOCK(m->object);
return (0);
}

View File

@ -217,8 +217,8 @@ extern struct vpglocks pa_lock[];
* pte mappings, nor can they be removed from their objects via
* the object, and such pages are also not on any PQ queue.
*
* PG_WRITEABLE is set exclusively by pmap_enter(). When it does so, either
* the page must be VPO_BUSY or the containing object must be locked.
* PG_WRITEABLE is set exclusively by pmap_enter(). When it does so, the page
* must be VPO_BUSY.
*/
#define PG_CACHED 0x0001 /* page is cached */
#define PG_FREE 0x0002 /* page is free */