Replace all uses of the vm page queues lock by a r/w lock that is private

to this pmap.

Tested by:	Ian Lepore
This commit is contained in:
Alan Cox 2012-08-31 02:59:44 +00:00
parent d6c6eb82c0
commit 059fb00f23

View File

@ -150,6 +150,7 @@ __FBSDID("$FreeBSD$");
#include <sys/msgbuf.h>
#include <sys/vmmeter.h>
#include <sys/mman.h>
#include <sys/rwlock.h>
#include <sys/smp.h>
#include <sys/sched.h>
@ -408,6 +409,7 @@ static vm_offset_t pmap_kernel_l2ptp_kva;
static vm_paddr_t pmap_kernel_l2ptp_phys;
static struct vm_object pvzone_obj;
static int pv_entry_count=0, pv_entry_max=0, pv_entry_high_water=0;
static struct rwlock pvh_global_lock;
/*
* This list exists for the benefit of pmap_map_chunk(). It keeps track
@ -866,7 +868,7 @@ pmap_alloc_l2_bucket(pmap_t pm, vm_offset_t va)
l1idx = L1_IDX(va);
PMAP_ASSERT_LOCKED(pm);
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
rw_assert(&pvh_global_lock, RA_WLOCKED);
if ((l2 = pm->pm_l2[L2_IDX(l1idx)]) == NULL) {
/*
* No mapping at this address, as there is
@ -875,19 +877,19 @@ pmap_alloc_l2_bucket(pmap_t pm, vm_offset_t va)
*/
again_l2table:
PMAP_UNLOCK(pm);
vm_page_unlock_queues();
rw_wunlock(&pvh_global_lock);
if ((l2 = pmap_alloc_l2_dtable()) == NULL) {
vm_page_lock_queues();
rw_wlock(&pvh_global_lock);
PMAP_LOCK(pm);
return (NULL);
}
vm_page_lock_queues();
rw_wlock(&pvh_global_lock);
PMAP_LOCK(pm);
if (pm->pm_l2[L2_IDX(l1idx)] != NULL) {
PMAP_UNLOCK(pm);
vm_page_unlock_queues();
rw_wunlock(&pvh_global_lock);
uma_zfree(l2table_zone, l2);
vm_page_lock_queues();
rw_wlock(&pvh_global_lock);
PMAP_LOCK(pm);
l2 = pm->pm_l2[L2_IDX(l1idx)];
if (l2 == NULL)
@ -919,16 +921,16 @@ again_l2table:
*/
again_ptep:
PMAP_UNLOCK(pm);
vm_page_unlock_queues();
rw_wunlock(&pvh_global_lock);
ptep = (void*)uma_zalloc(l2zone, M_NOWAIT|M_USE_RESERVE);
vm_page_lock_queues();
rw_wlock(&pvh_global_lock);
PMAP_LOCK(pm);
if (l2b->l2b_kva != 0) {
/* We lost the race. */
PMAP_UNLOCK(pm);
vm_page_unlock_queues();
rw_wunlock(&pvh_global_lock);
uma_zfree(l2zone, ptep);
vm_page_lock_queues();
rw_wlock(&pvh_global_lock);
PMAP_LOCK(pm);
if (l2b->l2b_kva == 0)
goto again_ptep;
@ -1314,7 +1316,7 @@ pmap_fix_cache(struct vm_page *pg, pmap_t pm, vm_offset_t va)
int entries = 0, kentries = 0, uentries = 0;
struct pv_entry *pv;
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
rw_assert(&pvh_global_lock, RA_WLOCKED);
/* the cache gets written back/invalidated on context switch.
* therefore, if a user page shares an entry in the same page or
@ -1426,7 +1428,7 @@ pmap_clearbit(struct vm_page *pg, u_int maskbits)
u_int oflags;
int count = 0;
vm_page_lock_queues();
rw_wlock(&pvh_global_lock);
if (maskbits & PVF_WRITE)
maskbits |= PVF_MOD;
@ -1436,7 +1438,7 @@ pmap_clearbit(struct vm_page *pg, u_int maskbits)
pg->md.pvh_attrs &= ~(maskbits & (PVF_MOD | PVF_REF));
if (TAILQ_EMPTY(&pg->md.pv_list)) {
vm_page_unlock_queues();
rw_wunlock(&pvh_global_lock);
return (0);
}
@ -1572,7 +1574,7 @@ pmap_clearbit(struct vm_page *pg, u_int maskbits)
if (maskbits & PVF_WRITE)
vm_page_aflag_clear(pg, PGA_WRITEABLE);
vm_page_unlock_queues();
rw_wunlock(&pvh_global_lock);
return (count);
}
@ -1601,7 +1603,7 @@ pmap_enter_pv(struct vm_page *pg, struct pv_entry *pve, pmap_t pm,
int km;
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
rw_assert(&pvh_global_lock, RA_WLOCKED);
if (pg->md.pv_kva) {
/* PMAP_ASSERT_LOCKED(pmap_kernel()); */
@ -1615,10 +1617,10 @@ pmap_enter_pv(struct vm_page *pg, struct pv_entry *pve, pmap_t pm,
TAILQ_INSERT_HEAD(&pg->md.pv_list, pve, pv_list);
TAILQ_INSERT_HEAD(&pve->pv_pmap->pm_pvlist, pve, pv_plist);
PMAP_UNLOCK(pmap_kernel());
vm_page_unlock_queues();
rw_wunlock(&pvh_global_lock);
if ((pve = pmap_get_pv_entry()) == NULL)
panic("pmap_kenter_internal: no pv entries");
vm_page_lock_queues();
rw_wlock(&pvh_global_lock);
if (km)
PMAP_LOCK(pmap_kernel());
}
@ -1647,7 +1649,7 @@ pmap_find_pv(struct vm_page *pg, pmap_t pm, vm_offset_t va)
{
struct pv_entry *pv;
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
rw_assert(&pvh_global_lock, RA_WLOCKED);
TAILQ_FOREACH(pv, &pg->md.pv_list, pv_list)
if (pm == pv->pv_pmap && va == pv->pv_va)
break;
@ -1691,7 +1693,7 @@ pmap_nuke_pv(struct vm_page *pg, pmap_t pm, struct pv_entry *pve)
{
struct pv_entry *pv;
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
rw_assert(&pvh_global_lock, RA_WLOCKED);
PMAP_ASSERT_LOCKED(pm);
TAILQ_REMOVE(&pg->md.pv_list, pve, pv_list);
TAILQ_REMOVE(&pm->pm_pvlist, pve, pv_plist);
@ -1737,7 +1739,7 @@ pmap_remove_pv(struct vm_page *pg, pmap_t pm, vm_offset_t va)
{
struct pv_entry *pve;
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
rw_assert(&pvh_global_lock, RA_WLOCKED);
pve = TAILQ_FIRST(&pg->md.pv_list);
while (pve) {
@ -1771,7 +1773,7 @@ pmap_modify_pv(struct vm_page *pg, pmap_t pm, vm_offset_t va,
u_int flags, oflags;
PMAP_ASSERT_LOCKED(pm);
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
rw_assert(&pvh_global_lock, RA_WLOCKED);
if ((npv = pmap_find_pv(pg, pm, va)) == NULL)
return (0);
@ -1878,7 +1880,7 @@ pmap_fault_fixup(pmap_t pm, vm_offset_t va, vm_prot_t ftype, int user)
int rv = 0;
l1idx = L1_IDX(va);
vm_page_lock_queues();
rw_wlock(&pvh_global_lock);
PMAP_LOCK(pm);
/*
@ -2075,7 +2077,7 @@ pmap_fault_fixup(pmap_t pm, vm_offset_t va, vm_prot_t ftype, int user)
rv = 1;
out:
vm_page_unlock_queues();
rw_wunlock(&pvh_global_lock);
PMAP_UNLOCK(pm);
return (rv);
}
@ -2400,6 +2402,11 @@ pmap_bootstrap(vm_offset_t firstaddr, vm_offset_t lastaddr, struct pv_addr *l1pt
CPU_FILL(&kernel_pmap->pm_active);
kernel_pmap->pm_domain = PMAP_DOMAIN_KERNEL;
TAILQ_INIT(&kernel_pmap->pm_pvlist);
/*
* Initialize the global pv list lock.
*/
rw_init_flags(&pvh_global_lock, "pmap pv global", RW_RECURSE);
/*
* Reserve some special page table entries/VA space for temporary
@ -2672,7 +2679,7 @@ pmap_remove_pages(pmap_t pmap)
vm_page_t m;
pt_entry_t *pt;
vm_page_lock_queues();
rw_wlock(&pvh_global_lock);
PMAP_LOCK(pmap);
cpu_idcache_wbinv_all();
cpu_l2cache_wbinv_all();
@ -2701,7 +2708,7 @@ pmap_remove_pages(pmap_t pmap)
pmap_free_pv_entry(pv);
pmap_free_l2_bucket(pmap, l2b, 1);
}
vm_page_unlock_queues();
rw_wunlock(&pvh_global_lock);
cpu_tlb_flushID();
cpu_cpwait();
PMAP_UNLOCK(pmap);
@ -2826,13 +2833,13 @@ pmap_kenter_internal(vm_offset_t va, vm_offset_t pa, int flags)
* This expects the physical memory to have vm_page_array entry.
*/
if (pvzone != NULL && (m = vm_phys_paddr_to_vm_page(pa))) {
vm_page_lock_queues();
rw_wlock(&pvh_global_lock);
if (!TAILQ_EMPTY(&m->md.pv_list) || m->md.pv_kva) {
/* release vm_page lock for pv_entry UMA */
vm_page_unlock_queues();
rw_wunlock(&pvh_global_lock);
if ((pve = pmap_get_pv_entry()) == NULL)
panic("pmap_kenter_internal: no pv entries");
vm_page_lock_queues();
rw_wlock(&pvh_global_lock);
PMAP_LOCK(pmap_kernel());
pmap_enter_pv(m, pve, pmap_kernel(), va,
PVF_WRITE | PVF_UNMAN);
@ -2841,7 +2848,7 @@ pmap_kenter_internal(vm_offset_t va, vm_offset_t pa, int flags)
} else {
m->md.pv_kva = va;
}
vm_page_unlock_queues();
rw_wunlock(&pvh_global_lock);
}
}
@ -2902,13 +2909,13 @@ pmap_kremove(vm_offset_t va)
/* note: should never have to remove an allocation
* before the pvzone is initialized.
*/
vm_page_lock_queues();
rw_wlock(&pvh_global_lock);
PMAP_LOCK(pmap_kernel());
if (pvzone != NULL && (m = vm_phys_paddr_to_vm_page(pa)) &&
(pve = pmap_remove_pv(m, pmap_kernel(), va)))
pmap_free_pv_entry(pve);
PMAP_UNLOCK(pmap_kernel());
vm_page_unlock_queues();
rw_wunlock(&pvh_global_lock);
va = va & ~PAGE_MASK;
cpu_dcache_wbinv_range(va, PAGE_SIZE);
cpu_l2cache_wbinv_range(va, PAGE_SIZE);
@ -3126,7 +3133,7 @@ pmap_remove_all(vm_page_t m)
("pmap_remove_all: page %p is not managed", m));
if (TAILQ_EMPTY(&m->md.pv_list))
return;
vm_page_lock_queues();
rw_wlock(&pvh_global_lock);
pmap_remove_write(m);
curpm = vmspace_pmap(curproc->p_vmspace);
while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
@ -3175,7 +3182,7 @@ pmap_remove_all(vm_page_t m)
pmap_tlb_flushD(curpm);
}
vm_page_aflag_clear(m, PGA_WRITEABLE);
vm_page_unlock_queues();
rw_wunlock(&pvh_global_lock);
}
@ -3208,7 +3215,7 @@ pmap_protect(pmap_t pm, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
return;
}
vm_page_lock_queues();
rw_wlock(&pvh_global_lock);
PMAP_LOCK(pm);
/*
@ -3275,7 +3282,7 @@ pmap_protect(pmap_t pm, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
if (PV_BEEN_REFD(flags))
pmap_tlb_flushD(pm);
}
vm_page_unlock_queues();
rw_wunlock(&pvh_global_lock);
PMAP_UNLOCK(pm);
}
@ -3299,10 +3306,10 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
vm_prot_t prot, boolean_t wired)
{
vm_page_lock_queues();
rw_wlock(&pvh_global_lock);
PMAP_LOCK(pmap);
pmap_enter_locked(pmap, va, m, prot, wired, M_WAITOK);
vm_page_unlock_queues();
rw_wunlock(&pvh_global_lock);
PMAP_UNLOCK(pmap);
}
@ -3322,7 +3329,7 @@ pmap_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
vm_paddr_t pa;
PMAP_ASSERT_LOCKED(pmap);
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
rw_assert(&pvh_global_lock, RA_WLOCKED);
if (va == vector_page) {
pa = systempage.pv_pa;
m = NULL;
@ -3352,9 +3359,9 @@ do_l2b_alloc:
if (l2b == NULL) {
if (flags & M_WAITOK) {
PMAP_UNLOCK(pmap);
vm_page_unlock_queues();
rw_wunlock(&pvh_global_lock);
VM_WAIT;
vm_page_lock_queues();
rw_wlock(&pvh_global_lock);
PMAP_LOCK(pmap);
goto do_l2b_alloc;
}
@ -3594,14 +3601,14 @@ pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
psize = atop(end - start);
m = m_start;
vm_page_lock_queues();
rw_wlock(&pvh_global_lock);
PMAP_LOCK(pmap);
while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
pmap_enter_locked(pmap, start + ptoa(diff), m, prot &
(VM_PROT_READ | VM_PROT_EXECUTE), FALSE, M_NOWAIT);
m = TAILQ_NEXT(m, listq);
}
vm_page_unlock_queues();
rw_wunlock(&pvh_global_lock);
PMAP_UNLOCK(pmap);
}
@ -3618,11 +3625,11 @@ void
pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
{
vm_page_lock_queues();
rw_wlock(&pvh_global_lock);
PMAP_LOCK(pmap);
pmap_enter_locked(pmap, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE),
FALSE, M_NOWAIT);
vm_page_unlock_queues();
rw_wunlock(&pvh_global_lock);
PMAP_UNLOCK(pmap);
}
@ -3640,7 +3647,7 @@ pmap_change_wiring(pmap_t pmap, vm_offset_t va, boolean_t wired)
pt_entry_t *ptep, pte;
vm_page_t pg;
vm_page_lock_queues();
rw_wlock(&pvh_global_lock);
PMAP_LOCK(pmap);
l2b = pmap_get_l2_bucket(pmap, va);
KASSERT(l2b, ("No l2b bucket in pmap_change_wiring"));
@ -3649,7 +3656,7 @@ pmap_change_wiring(pmap_t pmap, vm_offset_t va, boolean_t wired)
pg = PHYS_TO_VM_PAGE(l2pte_pa(pte));
if (pg)
pmap_modify_pv(pg, pmap, va, PVF_WIRED, wired ? PVF_WIRED : 0);
vm_page_unlock_queues();
rw_wunlock(&pvh_global_lock);
PMAP_UNLOCK(pmap);
}
@ -3895,7 +3902,7 @@ pmap_remove(pmap_t pm, vm_offset_t sva, vm_offset_t eva)
* we lock in the pmap => pv_head direction
*/
vm_page_lock_queues();
rw_wlock(&pvh_global_lock);
PMAP_LOCK(pm);
total = 0;
while (sva < eva) {
@ -3989,7 +3996,7 @@ pmap_remove(pmap_t pm, vm_offset_t sva, vm_offset_t eva)
pmap_free_l2_bucket(pm, l2b, mappings);
}
vm_page_unlock_queues();
rw_wunlock(&pvh_global_lock);
if (flushall)
cpu_tlb_flushID();
PMAP_UNLOCK(pm);
@ -4405,7 +4412,7 @@ pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
("pmap_page_exists_quick: page %p is not managed", m));
rv = FALSE;
vm_page_lock_queues();
rw_wlock(&pvh_global_lock);
TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
if (pv->pv_pmap == pmap) {
rv = TRUE;
@ -4415,7 +4422,7 @@ pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
if (loops >= 16)
break;
}
vm_page_unlock_queues();
rw_wunlock(&pvh_global_lock);
return (rv);
}
@ -4434,11 +4441,11 @@ pmap_page_wired_mappings(vm_page_t m)
count = 0;
if ((m->oflags & VPO_UNMANAGED) != 0)
return (count);
vm_page_lock_queues();
rw_wlock(&pvh_global_lock);
TAILQ_FOREACH(pv, &m->md.pv_list, pv_list)
if ((pv->pv_flags & PVF_WIRED) != 0)
count++;
vm_page_unlock_queues();
rw_wunlock(&pvh_global_lock);
return (count);
}