Replace all uses of the vm page queues lock by a r/w lock that is private
to this pmap. Tested by: andreast, jhibbits
This commit is contained in:
parent
996922aeee
commit
3653f5cbcb
@ -125,6 +125,7 @@ __FBSDID("$FreeBSD$");
|
||||
#include <sys/msgbuf.h>
|
||||
#include <sys/mutex.h>
|
||||
#include <sys/proc.h>
|
||||
#include <sys/rwlock.h>
|
||||
#include <sys/sched.h>
|
||||
#include <sys/sysctl.h>
|
||||
#include <sys/systm.h>
|
||||
@ -204,6 +205,17 @@ struct pvo_head *moea_pvo_table; /* pvo entries by pteg index */
|
||||
struct pvo_head moea_pvo_kunmanaged =
|
||||
LIST_HEAD_INITIALIZER(moea_pvo_kunmanaged); /* list of unmanaged pages */
|
||||
|
||||
/*
|
||||
* Isolate the global pv list lock from data and other locks to prevent false
|
||||
* sharing within the cache.
|
||||
*/
|
||||
static struct {
|
||||
struct rwlock lock;
|
||||
char padding[CACHE_LINE_SIZE - sizeof(struct rwlock)];
|
||||
} pvh_global __aligned(CACHE_LINE_SIZE);
|
||||
|
||||
#define pvh_global_lock pvh_global.lock
|
||||
|
||||
uma_zone_t moea_upvo_zone; /* zone for pvo entries for unmanaged pages */
|
||||
uma_zone_t moea_mpvo_zone; /* zone for pvo entries for managed pages */
|
||||
|
||||
@ -455,7 +467,7 @@ static __inline void
|
||||
moea_attr_clear(vm_page_t m, int ptebit)
|
||||
{
|
||||
|
||||
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
|
||||
rw_assert(&pvh_global_lock, RA_WLOCKED);
|
||||
m->md.mdpg_attrs &= ~ptebit;
|
||||
}
|
||||
|
||||
@ -470,7 +482,7 @@ static __inline void
|
||||
moea_attr_save(vm_page_t m, int ptebit)
|
||||
{
|
||||
|
||||
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
|
||||
rw_assert(&pvh_global_lock, RA_WLOCKED);
|
||||
m->md.mdpg_attrs |= ptebit;
|
||||
}
|
||||
|
||||
@ -859,6 +871,11 @@ moea_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend)
|
||||
CPU_FILL(&kernel_pmap->pm_active);
|
||||
RB_INIT(&kernel_pmap->pmap_pvo);
|
||||
|
||||
/*
|
||||
* Initialize the global pv list lock.
|
||||
*/
|
||||
rw_init(&pvh_global_lock, "pmap pv global");
|
||||
|
||||
/*
|
||||
* Set up the Open Firmware mappings
|
||||
*/
|
||||
@ -1066,10 +1083,10 @@ moea_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
|
||||
boolean_t wired)
|
||||
{
|
||||
|
||||
vm_page_lock_queues();
|
||||
rw_wlock(&pvh_global_lock);
|
||||
PMAP_LOCK(pmap);
|
||||
moea_enter_locked(pmap, va, m, prot, wired);
|
||||
vm_page_unlock_queues();
|
||||
rw_wunlock(&pvh_global_lock);
|
||||
PMAP_UNLOCK(pmap);
|
||||
}
|
||||
|
||||
@ -1102,7 +1119,7 @@ moea_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
|
||||
pvo_flags = PVO_MANAGED;
|
||||
}
|
||||
if (pmap_bootstrapped)
|
||||
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
|
||||
rw_assert(&pvh_global_lock, RA_WLOCKED);
|
||||
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
|
||||
KASSERT((m->oflags & (VPO_UNMANAGED | VPO_BUSY)) != 0 ||
|
||||
VM_OBJECT_LOCKED(m->object),
|
||||
@ -1166,14 +1183,14 @@ moea_enter_object(mmu_t mmu, pmap_t pm, vm_offset_t start, vm_offset_t end,
|
||||
|
||||
psize = atop(end - start);
|
||||
m = m_start;
|
||||
vm_page_lock_queues();
|
||||
rw_wlock(&pvh_global_lock);
|
||||
PMAP_LOCK(pm);
|
||||
while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
|
||||
moea_enter_locked(pm, start + ptoa(diff), m, prot &
|
||||
(VM_PROT_READ | VM_PROT_EXECUTE), FALSE);
|
||||
m = TAILQ_NEXT(m, listq);
|
||||
}
|
||||
vm_page_unlock_queues();
|
||||
rw_wunlock(&pvh_global_lock);
|
||||
PMAP_UNLOCK(pm);
|
||||
}
|
||||
|
||||
@ -1182,11 +1199,11 @@ moea_enter_quick(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_page_t m,
|
||||
vm_prot_t prot)
|
||||
{
|
||||
|
||||
vm_page_lock_queues();
|
||||
rw_wlock(&pvh_global_lock);
|
||||
PMAP_LOCK(pm);
|
||||
moea_enter_locked(pm, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE),
|
||||
FALSE);
|
||||
vm_page_unlock_queues();
|
||||
rw_wunlock(&pvh_global_lock);
|
||||
PMAP_UNLOCK(pm);
|
||||
}
|
||||
|
||||
@ -1342,7 +1359,7 @@ moea_remove_write(mmu_t mmu, vm_page_t m)
|
||||
if ((m->oflags & VPO_BUSY) == 0 &&
|
||||
(m->aflags & PGA_WRITEABLE) == 0)
|
||||
return;
|
||||
vm_page_lock_queues();
|
||||
rw_wlock(&pvh_global_lock);
|
||||
lo = moea_attr_fetch(m);
|
||||
powerpc_sync();
|
||||
LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
|
||||
@ -1368,7 +1385,7 @@ moea_remove_write(mmu_t mmu, vm_page_t m)
|
||||
vm_page_dirty(m);
|
||||
}
|
||||
vm_page_aflag_clear(m, PGA_WRITEABLE);
|
||||
vm_page_unlock_queues();
|
||||
rw_wunlock(&pvh_global_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1409,7 +1426,7 @@ moea_page_set_memattr(mmu_t mmu, vm_page_t m, vm_memattr_t ma)
|
||||
return;
|
||||
}
|
||||
|
||||
vm_page_lock_queues();
|
||||
rw_wlock(&pvh_global_lock);
|
||||
pvo_head = vm_page_to_pvoh(m);
|
||||
lo = moea_calc_wimg(VM_PAGE_TO_PHYS(m), ma);
|
||||
|
||||
@ -1429,7 +1446,7 @@ moea_page_set_memattr(mmu_t mmu, vm_page_t m, vm_memattr_t ma)
|
||||
PMAP_UNLOCK(pmap);
|
||||
}
|
||||
m->md.mdpg_cache_attrs = ma;
|
||||
vm_page_unlock_queues();
|
||||
rw_wunlock(&pvh_global_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1543,7 +1560,7 @@ moea_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m)
|
||||
("moea_page_exists_quick: page %p is not managed", m));
|
||||
loops = 0;
|
||||
rv = FALSE;
|
||||
vm_page_lock_queues();
|
||||
rw_wlock(&pvh_global_lock);
|
||||
LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
|
||||
if (pvo->pvo_pmap == pmap) {
|
||||
rv = TRUE;
|
||||
@ -1552,7 +1569,7 @@ moea_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m)
|
||||
if (++loops >= 16)
|
||||
break;
|
||||
}
|
||||
vm_page_unlock_queues();
|
||||
rw_wunlock(&pvh_global_lock);
|
||||
return (rv);
|
||||
}
|
||||
|
||||
@ -1569,11 +1586,11 @@ moea_page_wired_mappings(mmu_t mmu, vm_page_t m)
|
||||
count = 0;
|
||||
if ((m->oflags & VPO_UNMANAGED) != 0)
|
||||
return (count);
|
||||
vm_page_lock_queues();
|
||||
rw_wlock(&pvh_global_lock);
|
||||
LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink)
|
||||
if ((pvo->pvo_vaddr & PVO_WIRED) != 0)
|
||||
count++;
|
||||
vm_page_unlock_queues();
|
||||
rw_wunlock(&pvh_global_lock);
|
||||
return (count);
|
||||
}
|
||||
|
||||
@ -1672,7 +1689,7 @@ moea_protect(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva,
|
||||
return;
|
||||
}
|
||||
|
||||
vm_page_lock_queues();
|
||||
rw_wlock(&pvh_global_lock);
|
||||
PMAP_LOCK(pm);
|
||||
key.pvo_vaddr = sva;
|
||||
for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key);
|
||||
@ -1700,7 +1717,7 @@ moea_protect(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva,
|
||||
mtx_unlock(&moea_table_mutex);
|
||||
}
|
||||
}
|
||||
vm_page_unlock_queues();
|
||||
rw_wunlock(&pvh_global_lock);
|
||||
PMAP_UNLOCK(pm);
|
||||
}
|
||||
|
||||
@ -1766,7 +1783,7 @@ moea_remove(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva)
|
||||
{
|
||||
struct pvo_entry *pvo, *tpvo, key;
|
||||
|
||||
vm_page_lock_queues();
|
||||
rw_wlock(&pvh_global_lock);
|
||||
PMAP_LOCK(pm);
|
||||
key.pvo_vaddr = sva;
|
||||
for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key);
|
||||
@ -1775,7 +1792,7 @@ moea_remove(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva)
|
||||
moea_pvo_remove(pvo, -1);
|
||||
}
|
||||
PMAP_UNLOCK(pm);
|
||||
vm_page_unlock_queues();
|
||||
rw_wunlock(&pvh_global_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1789,7 +1806,7 @@ moea_remove_all(mmu_t mmu, vm_page_t m)
|
||||
struct pvo_entry *pvo, *next_pvo;
|
||||
pmap_t pmap;
|
||||
|
||||
vm_page_lock_queues();
|
||||
rw_wlock(&pvh_global_lock);
|
||||
pvo_head = vm_page_to_pvoh(m);
|
||||
for (pvo = LIST_FIRST(pvo_head); pvo != NULL; pvo = next_pvo) {
|
||||
next_pvo = LIST_NEXT(pvo, pvo_vlink);
|
||||
@ -1804,7 +1821,7 @@ moea_remove_all(mmu_t mmu, vm_page_t m)
|
||||
vm_page_dirty(m);
|
||||
}
|
||||
vm_page_aflag_clear(m, PGA_WRITEABLE);
|
||||
vm_page_unlock_queues();
|
||||
rw_wunlock(&pvh_global_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -2279,7 +2296,7 @@ moea_query_bit(vm_page_t m, int ptebit)
|
||||
if (moea_attr_fetch(m) & ptebit)
|
||||
return (TRUE);
|
||||
|
||||
vm_page_lock_queues();
|
||||
rw_wlock(&pvh_global_lock);
|
||||
LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
|
||||
|
||||
/*
|
||||
@ -2288,7 +2305,7 @@ moea_query_bit(vm_page_t m, int ptebit)
|
||||
*/
|
||||
if (pvo->pvo_pte.pte.pte_lo & ptebit) {
|
||||
moea_attr_save(m, ptebit);
|
||||
vm_page_unlock_queues();
|
||||
rw_wunlock(&pvh_global_lock);
|
||||
return (TRUE);
|
||||
}
|
||||
}
|
||||
@ -2312,13 +2329,13 @@ moea_query_bit(vm_page_t m, int ptebit)
|
||||
mtx_unlock(&moea_table_mutex);
|
||||
if (pvo->pvo_pte.pte.pte_lo & ptebit) {
|
||||
moea_attr_save(m, ptebit);
|
||||
vm_page_unlock_queues();
|
||||
rw_wunlock(&pvh_global_lock);
|
||||
return (TRUE);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
vm_page_unlock_queues();
|
||||
rw_wunlock(&pvh_global_lock);
|
||||
return (FALSE);
|
||||
}
|
||||
|
||||
@ -2329,7 +2346,7 @@ moea_clear_bit(vm_page_t m, int ptebit)
|
||||
struct pvo_entry *pvo;
|
||||
struct pte *pt;
|
||||
|
||||
vm_page_lock_queues();
|
||||
rw_wlock(&pvh_global_lock);
|
||||
|
||||
/*
|
||||
* Clear the cached value.
|
||||
@ -2363,7 +2380,7 @@ moea_clear_bit(vm_page_t m, int ptebit)
|
||||
pvo->pvo_pte.pte.pte_lo &= ~ptebit;
|
||||
}
|
||||
|
||||
vm_page_unlock_queues();
|
||||
rw_wunlock(&pvh_global_lock);
|
||||
return (count);
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user