Push down the acquisition and release of the page queues lock into

pmap_remove_pages().  (The implementation of pmap_remove_pages() is
optional.  If pmap_remove_pages() is unimplemented, the acquisition and
release of the page queues lock is unnecessary.)

Remove spl calls from the alpha, arm, and ia64 pmap_remove_pages().
This commit is contained in:
Alan Cox 2004-07-13 02:49:22 +00:00
parent b0c2b92548
commit ce8da3091f
7 changed files with 10 additions and 15 deletions

View File

@ -2185,7 +2185,6 @@ pmap_remove_pages(pmap, sva, eva)
pt_entry_t *pte, tpte;
vm_page_t m;
pv_entry_t pv, npv;
int s;
#ifdef PMAP_REMOVE_PAGES_CURPROC_ONLY
if (!curthread || (pmap != vmspace_pmap(curthread->td_proc->p_vmspace))) {
@ -2194,7 +2193,7 @@ pmap_remove_pages(pmap, sva, eva)
}
#endif
s = splvm();
vm_page_lock_queues();
PMAP_LOCK(pmap);
for(pv = TAILQ_FIRST(&pmap->pm_pvlist);
pv;
@ -2240,9 +2239,9 @@ pmap_remove_pages(pmap, sva, eva)
pmap_unuse_pt(pv->pv_pmap, pv->pv_va, pv->pv_ptem);
free_pv_entry(pv);
}
splx(s);
pmap_invalidate_all(pmap);
PMAP_UNLOCK(pmap);
vm_page_unlock_queues();
}
/*

View File

@ -2459,7 +2459,7 @@ pmap_remove_pages(pmap, sva, eva)
return;
}
#endif
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
vm_page_lock_queues();
PMAP_LOCK(pmap);
for (pv = TAILQ_FIRST(&pmap->pm_pvlist); pv; pv = npv) {
@ -2521,6 +2521,7 @@ pmap_remove_pages(pmap, sva, eva)
}
pmap_invalidate_all(pmap);
PMAP_UNLOCK(pmap);
vm_page_unlock_queues();
}
/*

View File

@ -2620,7 +2620,6 @@ pmap_remove_pages(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
struct l2_bucket *l2b;
pt_entry_t *pte, tpte;
pv_entry_t pv, npv;
int s;
vm_page_t m;
#ifdef PMAP_REMOVE_PAGES_CURPROC_ONLY
@ -2630,7 +2629,7 @@ pmap_remove_pages(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
}
#endif
s = splvm();
vm_page_lock_queues();
for(pv = TAILQ_FIRST(&pmap->pm_pvlist); pv; pv = npv) {
if (pv->pv_va >= eva || pv->pv_va < sva) {
npv = TAILQ_NEXT(pv, pv_plist);
@ -2675,8 +2674,8 @@ pmap_remove_pages(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
pmap_unuse_pt(pv->pv_pmap, pv->pv_va, pv->pv_ptem);
pmap_free_pv_entry(pv);
}
splx(s);
pmap_invalidate_tlb_all(pmap);
vm_page_unlock_queues();
}

View File

@ -2531,7 +2531,7 @@ pmap_remove_pages(pmap, sva, eva)
return;
}
#endif
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
vm_page_lock_queues();
PMAP_LOCK(pmap);
sched_pin();
for (pv = TAILQ_FIRST(&pmap->pm_pvlist); pv; pv = npv) {
@ -2595,6 +2595,7 @@ pmap_remove_pages(pmap, sva, eva)
sched_unpin();
pmap_invalidate_all(pmap);
PMAP_UNLOCK(pmap);
vm_page_unlock_queues();
}
/*

View File

@ -1847,7 +1847,6 @@ void
pmap_remove_pages(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
{
pv_entry_t pv, npv;
int s;
#ifdef PMAP_REMOVE_PAGES_CURPROC_ONLY
if (!curthread || (pmap != vmspace_pmap(curthread->td_proc->p_vmspace))) {
@ -1856,7 +1855,7 @@ pmap_remove_pages(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
}
#endif
s = splvm();
vm_page_lock_queues();
for (pv = TAILQ_FIRST(&pmap->pm_pvlist);
pv;
pv = npv) {
@ -1875,9 +1874,9 @@ pmap_remove_pages(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
pmap_remove_pte(pmap, pte, pv->pv_va, pv, 1);
}
splx(s);
pmap_invalidate_all(pmap);
vm_page_unlock_queues();
}
/*

View File

@ -864,10 +864,8 @@ exec_new_vmspace(imgp, sv)
if (vmspace->vm_refcnt == 1 && vm_map_min(map) == sv->sv_minuser &&
vm_map_max(map) == sv->sv_maxuser) {
shmexit(vmspace);
vm_page_lock_queues();
pmap_remove_pages(vmspace_pmap(vmspace), vm_map_min(map),
vm_map_max(map));
vm_page_unlock_queues();
vm_map_remove(map, vm_map_min(map), vm_map_max(map));
} else {
vmspace_exec(p, sv->sv_minuser, sv->sv_maxuser);

View File

@ -275,10 +275,8 @@ exit1(struct thread *td, int rv)
++vm->vm_exitingcnt;
if (--vm->vm_refcnt == 0) {
shmexit(vm);
vm_page_lock_queues();
pmap_remove_pages(vmspace_pmap(vm), vm_map_min(&vm->vm_map),
vm_map_max(&vm->vm_map));
vm_page_unlock_queues();
(void) vm_map_remove(&vm->vm_map, vm_map_min(&vm->vm_map),
vm_map_max(&vm->vm_map));
}