Remove the pvh_global_lock lock from the arm64 pmap. It is unneeded on arm64

as invalidation will have completed before the pmap_invalidate_* functions
have complete.

Discussed with:	alc, kib
Obtained from:	ABT Systems Ltd
MFC after:	1 month
Sponsored by:	The FreeBSD Foundation
This commit is contained in:
Andrew Turner 2016-08-04 13:49:36 +00:00
parent 7b255097eb
commit b0316526b0

View File

@ -218,8 +218,6 @@ vm_offset_t kernel_vm_end = 0;
struct msgbuf *msgbufp = NULL;
static struct rwlock_padalign pvh_global_lock;
vm_paddr_t dmap_phys_base; /* The start of the dmap region */
vm_paddr_t dmap_phys_max; /* The limit of the dmap region */
vm_offset_t dmap_max_addr; /* The virtual address limit of the dmap */
@ -671,11 +669,6 @@ pmap_bootstrap(vm_offset_t l0pt, vm_offset_t l1pt, vm_paddr_t kernstart,
kernel_pmap_store.pm_l0 = (pd_entry_t *)l0pt;
PMAP_LOCK_INIT(kernel_pmap);
/*
* Initialize the global pv list lock.
*/
rw_init(&pvh_global_lock, "pmap pv global");
/* Assume the address we were loaded to is a valid physical address */
min_pa = max_pa = KERNBASE - kern_delta;
@ -1404,9 +1397,7 @@ _pmap_alloc_l3(pmap_t pmap, vm_pindex_t ptepindex, struct rwlock **lockp)
if (lockp != NULL) {
RELEASE_PV_LIST_LOCK(lockp);
PMAP_UNLOCK(pmap);
rw_runlock(&pvh_global_lock);
VM_WAIT;
rw_rlock(&pvh_global_lock);
PMAP_LOCK(pmap);
}
@ -1748,7 +1739,6 @@ free_pv_entry(pmap_t pmap, pv_entry_t pv)
struct pv_chunk *pc;
int idx, field, bit;
rw_assert(&pvh_global_lock, RA_LOCKED);
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
PV_STAT(atomic_add_long(&pv_entry_frees, 1));
PV_STAT(atomic_add_int(&pv_entry_spare, 1));
@ -1805,7 +1795,6 @@ get_pv_entry(pmap_t pmap, struct rwlock **lockp)
struct pv_chunk *pc;
vm_page_t m;
rw_assert(&pvh_global_lock, RA_LOCKED);
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
PV_STAT(atomic_add_long(&pv_entry_allocs, 1));
retry:
@ -1873,7 +1862,6 @@ pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
{
pv_entry_t pv;
rw_assert(&pvh_global_lock, RA_LOCKED);
TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
if (pmap == PV_PMAP(pv) && va == pv->pv_va) {
TAILQ_REMOVE(&pvh->pv_list, pv, pv_next);
@ -1909,7 +1897,6 @@ pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m,
{
pv_entry_t pv;
rw_assert(&pvh_global_lock, RA_LOCKED);
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
/* Pass NULL instead of the lock pointer to disable reclamation. */
if ((pv = get_pv_entry(pmap, NULL)) != NULL) {
@ -1978,7 +1965,6 @@ pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
anyvalid = 0;
SLIST_INIT(&free);
rw_rlock(&pvh_global_lock);
PMAP_LOCK(pmap);
lock = NULL;
@ -2057,7 +2043,6 @@ pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
rw_wunlock(lock);
if (anyvalid)
pmap_invalidate_all(pmap);
rw_runlock(&pvh_global_lock);
PMAP_UNLOCK(pmap);
pmap_free_zero_pages(&free);
}
@ -2080,18 +2065,31 @@ pmap_remove_all(vm_page_t m)
{
pv_entry_t pv;
pmap_t pmap;
struct rwlock *lock;
pd_entry_t *pde, tpde;
pt_entry_t *pte, tpte;
struct spglist free;
int lvl;
int lvl, md_gen;
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
("pmap_remove_all: page %p is not managed", m));
SLIST_INIT(&free);
rw_wlock(&pvh_global_lock);
lock = VM_PAGE_TO_PV_LIST_LOCK(m);
retry:
rw_wlock(lock);
while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
pmap = PV_PMAP(pv);
PMAP_LOCK(pmap);
if (!PMAP_TRYLOCK(pmap)) {
md_gen = m->md.pv_gen;
rw_wunlock(lock);
PMAP_LOCK(pmap);
rw_wlock(lock);
if (md_gen != m->md.pv_gen) {
rw_wunlock(lock);
PMAP_UNLOCK(pmap);
goto retry;
}
}
pmap_resident_count_dec(pmap, 1);
pde = pmap_pde(pmap, pv->pv_va, &lvl);
@ -2126,7 +2124,7 @@ pmap_remove_all(vm_page_t m)
PMAP_UNLOCK(pmap);
}
vm_page_aflag_clear(m, PGA_WRITEABLE);
rw_wunlock(&pvh_global_lock);
rw_wunlock(lock);
pmap_free_zero_pages(&free);
}
@ -2241,7 +2239,6 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
mpte = NULL;
lock = NULL;
rw_rlock(&pvh_global_lock);
PMAP_LOCK(pmap);
if (va < VM_MAXUSER_ADDRESS) {
@ -2251,7 +2248,6 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
CTR0(KTR_PMAP, "pmap_enter: mpte == NULL");
if (lock != NULL)
rw_wunlock(lock);
rw_runlock(&pvh_global_lock);
PMAP_UNLOCK(pmap);
return (KERN_RESOURCE_SHORTAGE);
}
@ -2433,7 +2429,6 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
if (lock != NULL)
rw_wunlock(lock);
rw_runlock(&pvh_global_lock);
PMAP_UNLOCK(pmap);
return (KERN_SUCCESS);
}
@ -2465,7 +2460,6 @@ pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
mpte = NULL;
m = m_start;
lock = NULL;
rw_rlock(&pvh_global_lock);
PMAP_LOCK(pmap);
while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
va = start + ptoa(diff);
@ -2474,7 +2468,6 @@ pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
}
if (lock != NULL)
rw_wunlock(lock);
rw_runlock(&pvh_global_lock);
PMAP_UNLOCK(pmap);
}
@ -2493,12 +2486,10 @@ pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
struct rwlock *lock;
lock = NULL;
rw_rlock(&pvh_global_lock);
PMAP_LOCK(pmap);
(void)pmap_enter_quick_locked(pmap, va, m, prot, NULL, &lock);
if (lock != NULL)
rw_wunlock(lock);
rw_runlock(&pvh_global_lock);
PMAP_UNLOCK(pmap);
}
@ -2515,7 +2506,6 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva ||
(m->oflags & VPO_UNMANAGED) != 0,
("pmap_enter_quick_locked: managed mapping within the clean submap"));
rw_assert(&pvh_global_lock, RA_LOCKED);
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
CTR2(KTR_PMAP, "pmap_enter_quick_locked: %p %lx", pmap, va);
@ -2644,9 +2634,7 @@ pmap_unwire(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
vm_offset_t va_next;
pd_entry_t *l0, *l1, *l2;
pt_entry_t *l3;
boolean_t pv_lists_locked;
pv_lists_locked = FALSE;
PMAP_LOCK(pmap);
for (; sva < eva; sva = va_next) {
l0 = pmap_l0(pmap, sva);
@ -2692,8 +2680,6 @@ pmap_unwire(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
pmap->pm_stats.wired_count--;
}
}
if (pv_lists_locked)
rw_runlock(&pvh_global_lock);
PMAP_UNLOCK(pmap);
}
@ -2837,7 +2823,6 @@ pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
("pmap_page_exists_quick: page %p is not managed", m));
rv = FALSE;
rw_rlock(&pvh_global_lock);
lock = VM_PAGE_TO_PV_LIST_LOCK(m);
rw_rlock(lock);
TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
@ -2850,7 +2835,6 @@ pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
break;
}
rw_runlock(lock);
rw_runlock(&pvh_global_lock);
return (rv);
}
@ -2871,7 +2855,6 @@ pmap_page_wired_mappings(vm_page_t m)
if ((m->oflags & VPO_UNMANAGED) != 0)
return (0);
rw_rlock(&pvh_global_lock);
lock = VM_PAGE_TO_PV_LIST_LOCK(m);
rw_rlock(lock);
restart:
@ -2894,7 +2877,6 @@ pmap_page_wired_mappings(vm_page_t m)
PMAP_UNLOCK(pmap);
}
rw_runlock(lock);
rw_runlock(&pvh_global_lock);
return (count);
}
@ -2932,7 +2914,6 @@ pmap_remove_pages(pmap_t pmap)
lock = NULL;
SLIST_INIT(&free);
rw_rlock(&pvh_global_lock);
PMAP_LOCK(pmap);
TAILQ_FOREACH_SAFE(pc, &pmap->pm_pvchunk, pc_list, npc) {
allfree = 1;
@ -3018,7 +2999,6 @@ pmap_remove_pages(pmap_t pmap)
pmap_invalidate_all(pmap);
if (lock != NULL)
rw_wunlock(lock);
rw_runlock(&pvh_global_lock);
PMAP_UNLOCK(pmap);
pmap_free_zero_pages(&free);
}
@ -3039,7 +3019,6 @@ pmap_page_test_mappings(vm_page_t m, boolean_t accessed, boolean_t modified)
boolean_t rv;
rv = FALSE;
rw_rlock(&pvh_global_lock);
lock = VM_PAGE_TO_PV_LIST_LOCK(m);
rw_rlock(lock);
restart:
@ -3075,7 +3054,6 @@ pmap_page_test_mappings(vm_page_t m, boolean_t accessed, boolean_t modified)
}
out:
rw_runlock(lock);
rw_runlock(&pvh_global_lock);
return (rv);
}
@ -3164,7 +3142,6 @@ pmap_remove_write(vm_page_t m)
VM_OBJECT_ASSERT_WLOCKED(m->object);
if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
return;
rw_rlock(&pvh_global_lock);
lock = VM_PAGE_TO_PV_LIST_LOCK(m);
retry_pv_loop:
rw_wlock(lock);
@ -3196,7 +3173,6 @@ pmap_remove_write(vm_page_t m)
}
rw_wunlock(lock);
vm_page_aflag_clear(m, PGA_WRITEABLE);
rw_runlock(&pvh_global_lock);
}
static __inline boolean_t
@ -3238,7 +3214,6 @@ pmap_ts_referenced(vm_page_t m)
cleared = 0;
pa = VM_PAGE_TO_PHYS(m);
lock = PHYS_TO_PV_LIST_LOCK(pa);
rw_rlock(&pvh_global_lock);
rw_wlock(lock);
retry:
not_cleared = 0;
@ -3307,7 +3282,6 @@ pmap_ts_referenced(vm_page_t m)
not_cleared < PMAP_TS_REFERENCED_MAX);
out:
rw_wunlock(lock);
rw_runlock(&pvh_global_lock);
pmap_free_zero_pages(&free);
return (cleared + not_cleared);
}