Advance the state of pmap locking on alpha, amd64, and i386.

- Enable recursion on the page queues lock.  This allows calls to
   vm_page_alloc(VM_ALLOC_NORMAL) and UMA's obj_alloc() with the page
   queues lock held.  Such calls are made to allocate page table pages
   and pv entries.
 - The previous change enables a partial reversion of vm/vm_page.c
   revision 1.216, i.e., the call to vm_page_alloc() by vm_page_cowfault()
   now specifies VM_ALLOC_NORMAL rather than VM_ALLOC_INTERRUPT.
 - Add partial locking to pmap_copy().  (As a side-effect, pmap_copy()
   should now be faster on i386 SMP because it no longer generates IPIs
   for TLB shootdown on the other processors.)
 - Complete the locking of pmap_enter() and pmap_enter_quick().  (As of now,
   all changes to a user-level pmap on alpha, amd64, and i386 are performed
   with appropriate locking.)
This commit is contained in:
alc 2004-07-29 18:56:31 +00:00
parent 85ebc0bc19
commit 60495ef0b2
4 changed files with 81 additions and 55 deletions

View File

@ -1070,7 +1070,11 @@ _pmap_allocpte(pmap, ptepindex)
*/
if ((m = vm_page_alloc(NULL, ptepindex, VM_ALLOC_NOOBJ |
VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) {
PMAP_UNLOCK(pmap);
vm_page_unlock_queues();
VM_WAIT;
vm_page_lock_queues();
PMAP_LOCK(pmap);
/*
* Indicate the need to retry. While waiting, the page table
@ -1107,10 +1111,8 @@ _pmap_allocpte(pmap, ptepindex)
pt_entry_t* l2map;
if (!pmap_pte_v(l1pte)) {
if (_pmap_allocpte(pmap, NUSERLEV3MAPS + l1index) == NULL) {
vm_page_lock_queues();
vm_page_unhold(m);
vm_page_free(m);
vm_page_unlock_queues();
return (NULL);
}
} else {
@ -1402,11 +1404,11 @@ pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t mpte, vm_page_t m)
pv->pv_pmap = pmap;
pv->pv_ptem = mpte;
vm_page_lock_queues();
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
TAILQ_INSERT_TAIL(&pmap->pm_pvlist, pv, pv_plist);
TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
m->md.pv_list_count++;
vm_page_unlock_queues();
}
/*
@ -1697,6 +1699,10 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
#endif
mpte = NULL;
vm_page_lock_queues();
PMAP_LOCK(pmap);
/*
* In the case that a page table page is not
* resident, we are creating it here.
@ -1762,11 +1768,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
*/
if (opa) {
int err;
vm_page_lock_queues();
PMAP_LOCK(pmap);
err = pmap_remove_pte(pmap, pte, va);
PMAP_UNLOCK(pmap);
vm_page_unlock_queues();
if (err)
panic("pmap_enter: pte vanished, va: 0x%lx", va);
}
@ -1820,6 +1822,8 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
if (prot & VM_PROT_EXECUTE)
alpha_pal_imb();
}
vm_page_unlock_queues();
PMAP_UNLOCK(pmap);
}
/*
@ -1839,6 +1843,9 @@ pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_page_t mpte)
register pt_entry_t *pte;
int managed;
vm_page_lock_queues();
PMAP_LOCK(pmap);
/*
* In the case that a page table page is not
* resident, we are creating it here.
@ -1892,12 +1899,10 @@ pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_page_t mpte)
pte = vtopte(va);
if (*pte) {
if (mpte != NULL) {
vm_page_lock_queues();
pmap_unwire_pte_hold(pmap, va, mpte);
vm_page_unlock_queues();
mpte = NULL;
}
alpha_pal_imb(); /* XXX overkill? */
return 0;
goto out;
}
/*
@ -1920,8 +1925,10 @@ pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_page_t mpte)
* Now validate mapping with RO protection
*/
*pte = pmap_phys_to_pte(VM_PAGE_TO_PHYS(m)) | PG_V | PG_KRE | PG_URE | managed;
out:
alpha_pal_imb(); /* XXX overkill? */
vm_page_unlock_queues();
PMAP_UNLOCK(pmap);
return mpte;
}

View File

@ -1135,7 +1135,12 @@ _pmap_allocpte(pmap, ptepindex)
*/
if ((m = vm_page_alloc(NULL, ptepindex, VM_ALLOC_NOOBJ |
VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) {
PMAP_UNLOCK(pmap);
vm_page_unlock_queues();
VM_WAIT;
vm_page_lock_queues();
PMAP_LOCK(pmap);
/*
* Indicate the need to retry. While waiting, the page table
* page may have been allocated.
@ -1184,10 +1189,8 @@ _pmap_allocpte(pmap, ptepindex)
if ((*pml4 & PG_V) == 0) {
/* Have to allocate a new pdp, recurse */
if (_pmap_allocpte(pmap, NUPDE + NUPDPE + pml4index) == NULL) {
vm_page_lock_queues();
vm_page_unhold(m);
vm_page_free(m);
vm_page_unlock_queues();
return (NULL);
}
} else {
@ -1217,10 +1220,8 @@ _pmap_allocpte(pmap, ptepindex)
if ((*pml4 & PG_V) == 0) {
/* Have to allocate a new pd, recurse */
if (_pmap_allocpte(pmap, NUPDE + pdpindex) == NULL) {
vm_page_lock_queues();
vm_page_unhold(m);
vm_page_free(m);
vm_page_unlock_queues();
return (NULL);
}
pdp = (pdp_entry_t *)PHYS_TO_DMAP(*pml4 & PG_FRAME);
@ -1231,10 +1232,8 @@ _pmap_allocpte(pmap, ptepindex)
if ((*pdp & PG_V) == 0) {
/* Have to allocate a new pd, recurse */
if (_pmap_allocpte(pmap, NUPDE + pdpindex) == NULL) {
vm_page_lock_queues();
vm_page_unhold(m);
vm_page_free(m);
vm_page_unlock_queues();
return (NULL);
}
} else {
@ -1495,11 +1494,11 @@ pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t m)
pv->pv_va = va;
pv->pv_pmap = pmap;
vm_page_lock_queues();
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
TAILQ_INSERT_TAIL(&pmap->pm_pvlist, pv, pv_plist);
TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
m->md.pv_list_count++;
vm_page_unlock_queues();
}
/*
@ -1881,6 +1880,10 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
#endif
mpte = NULL;
vm_page_lock_queues();
PMAP_LOCK(pmap);
/*
* In the case that a page table page is not
* resident, we are creating it here.
@ -1963,11 +1966,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
*/
if (opa) {
int err;
vm_page_lock_queues();
PMAP_LOCK(pmap);
err = pmap_remove_pte(pmap, pte, va, ptepde);
PMAP_UNLOCK(pmap);
vm_page_unlock_queues();
if (err)
panic("pmap_enter: pte vanished, va: 0x%lx", va);
}
@ -2016,6 +2015,8 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
pmap_invalidate_page(pmap, va);
}
}
vm_page_unlock_queues();
PMAP_UNLOCK(pmap);
}
/*
@ -2035,6 +2036,9 @@ pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_page_t mpte)
pt_entry_t *pte;
vm_paddr_t pa;
vm_page_lock_queues();
PMAP_LOCK(pmap);
/*
* In the case that a page table page is not
* resident, we are creating it here.
@ -2084,11 +2088,10 @@ pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_page_t mpte)
pte = vtopte(va);
if (*pte) {
if (mpte != NULL) {
vm_page_lock_queues();
pmap_unwire_pte_hold(pmap, va, mpte);
vm_page_unlock_queues();
mpte = NULL;
}
return 0;
goto out;
}
/*
@ -2113,7 +2116,9 @@ pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_page_t mpte)
pte_store(pte, pa | PG_V | PG_U);
else
pte_store(pte, pa | PG_V | PG_U | PG_MANAGED);
out:
vm_page_unlock_queues();
PMAP_UNLOCK(pmap);
return mpte;
}
@ -2257,6 +2262,8 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
if (!pmap_is_current(src_pmap))
return;
vm_page_lock_queues();
PMAP_LOCK(dst_pmap);
for (addr = src_addr; addr < end_addr; addr = va_next) {
pt_entry_t *src_pte, *dst_pte;
vm_page_t dstmpte, srcmpte;
@ -2349,11 +2356,8 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
*dst_pte = ptetemp & ~(PG_M | PG_A);
dst_pmap->pm_stats.resident_count++;
pmap_insert_entry(dst_pmap, addr, m);
} else {
vm_page_lock_queues();
} else
pmap_unwire_pte_hold(dst_pmap, addr, dstmpte);
vm_page_unlock_queues();
}
if (dstmpte->hold_count >= srcmpte->hold_count)
break;
}
@ -2361,6 +2365,8 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
src_pte++;
}
}
vm_page_unlock_queues();
PMAP_UNLOCK(dst_pmap);
}
/*

View File

@ -1181,7 +1181,12 @@ _pmap_allocpte(pmap, ptepindex)
*/
if ((m = vm_page_alloc(NULL, ptepindex, VM_ALLOC_NOOBJ |
VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) {
PMAP_UNLOCK(pmap);
vm_page_unlock_queues();
VM_WAIT;
vm_page_lock_queues();
PMAP_LOCK(pmap);
/*
* Indicate the need to retry. While waiting, the page table
* page may have been allocated.
@ -1556,11 +1561,11 @@ pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t m)
pv->pv_va = va;
pv->pv_pmap = pmap;
vm_page_lock_queues();
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
TAILQ_INSERT_TAIL(&pmap->pm_pvlist, pv, pv_plist);
TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
m->md.pv_list_count++;
vm_page_unlock_queues();
}
/*
@ -1912,6 +1917,11 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
#endif
mpte = NULL;
vm_page_lock_queues();
PMAP_LOCK(pmap);
sched_pin();
/*
* In the case that a page table page is not
* resident, we are creating it here.
@ -1930,7 +1940,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
}
#endif
pte = pmap_pte(pmap, va);
pte = pmap_pte_quick(pmap, va);
/*
* Page Directory table entry not valid, we need a new PT page
@ -2003,11 +2013,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
*/
if (opa) {
int err;
vm_page_lock_queues();
PMAP_LOCK(pmap);
err = pmap_remove_pte(pmap, pte, va);
PMAP_UNLOCK(pmap);
vm_page_unlock_queues();
if (err)
panic("pmap_enter: pte vanished, va: 0x%x", va);
}
@ -2054,6 +2060,9 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
pmap_invalidate_page(pmap, va);
}
}
sched_unpin();
vm_page_unlock_queues();
PMAP_UNLOCK(pmap);
}
/*
@ -2073,6 +2082,9 @@ pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_page_t mpte)
pt_entry_t *pte;
vm_paddr_t pa;
vm_page_lock_queues();
PMAP_LOCK(pmap);
/*
* In the case that a page table page is not
* resident, we are creating it here.
@ -2122,11 +2134,10 @@ pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_page_t mpte)
pte = vtopte(va);
if (*pte) {
if (mpte != NULL) {
vm_page_lock_queues();
pmap_unwire_pte_hold(pmap, mpte);
vm_page_unlock_queues();
mpte = NULL;
}
return 0;
goto out;
}
/*
@ -2151,7 +2162,9 @@ pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_page_t mpte)
pte_store(pte, pa | PG_V | PG_U);
else
pte_store(pte, pa | PG_V | PG_U | PG_MANAGED);
out:
vm_page_unlock_queues();
PMAP_UNLOCK(pmap);
return mpte;
}
@ -2301,6 +2314,9 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
if (!pmap_is_current(src_pmap))
return;
vm_page_lock_queues();
PMAP_LOCK(dst_pmap);
sched_pin();
for (addr = src_addr; addr < end_addr; addr = pdnxt) {
pt_entry_t *src_pte, *dst_pte;
vm_page_t dstmpte, srcmpte;
@ -2356,7 +2372,7 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
* block.
*/
dstmpte = pmap_allocpte(dst_pmap, addr);
dst_pte = pmap_pte(dst_pmap, addr);
dst_pte = pmap_pte_quick(dst_pmap, addr);
if ((*dst_pte == 0) && (ptetemp = *src_pte)) {
/*
* Clear the modified and
@ -2367,11 +2383,8 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
*dst_pte = ptetemp & ~(PG_M | PG_A);
dst_pmap->pm_stats.resident_count++;
pmap_insert_entry(dst_pmap, addr, m);
} else {
vm_page_lock_queues();
} else
pmap_unwire_pte_hold(dst_pmap, dstmpte);
vm_page_unlock_queues();
}
if (dstmpte->hold_count >= srcmpte->hold_count)
break;
}
@ -2379,6 +2392,9 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
src_pte++;
}
}
sched_unpin();
vm_page_unlock_queues();
PMAP_UNLOCK(dst_pmap);
}
static __inline void

View File

@ -204,7 +204,8 @@ vm_page_startup(vm_offset_t vaddr)
/*
* Initialize the locks.
*/
mtx_init(&vm_page_queue_mtx, "vm page queue mutex", NULL, MTX_DEF);
mtx_init(&vm_page_queue_mtx, "vm page queue mutex", NULL, MTX_DEF |
MTX_RECURSE);
mtx_init(&vm_page_queue_free_mtx, "vm page queue free mutex", NULL,
MTX_SPIN);
@ -1657,11 +1658,7 @@ vm_page_cowfault(vm_page_t m)
retry_alloc:
vm_page_remove(m);
/*
* An interrupt allocation is requested because the page
* queues lock is held.
*/
mnew = vm_page_alloc(object, pindex, VM_ALLOC_INTERRUPT);
mnew = vm_page_alloc(object, pindex, VM_ALLOC_NORMAL);
if (mnew == NULL) {
vm_page_insert(m, object, pindex);
vm_page_unlock_queues();