Avoid needless TLB invalidations in pmap_remove_pages().
pmap_remove_pages() is called during process termination, when it is guaranteed that no other CPU may access the mappings being torn down. In particular, it unnecessary to invalidate each mapping individually since we do a pmap_invalidate_all() at the end of the function. Also don't call pmap_invalidate_all() while holding a PV list lock, the global pvh lock is sufficient. Reviewed by: jhb MFC after: 1 week Sponsored by: The FreeBSD Foundation Differential Revision: https://reviews.freebsd.org/D18562
This commit is contained in:
parent
4f86ff4e47
commit
105c317166
@ -2721,9 +2721,10 @@ pmap_remove_pages(pmap_t pmap)
|
||||
l3 = pmap_l2_to_l3(l2, pv->pv_va);
|
||||
tl3 = pmap_load(l3);
|
||||
|
||||
/*
|
||||
* We cannot remove wired pages from a process' mapping at this time
|
||||
*/
|
||||
/*
|
||||
* We cannot remove wired pages from a
|
||||
* process' mapping at this time.
|
||||
*/
|
||||
if (tl3 & PTE_SW_WIRED) {
|
||||
allfree = 0;
|
||||
continue;
|
||||
@ -2742,7 +2743,6 @@ pmap_remove_pages(pmap_t pmap)
|
||||
(uintmax_t)tl3));
|
||||
|
||||
pmap_load_clear(l3);
|
||||
pmap_invalidate_page(pmap, pv->pv_va);
|
||||
|
||||
/*
|
||||
* Update the vm_page_t clean/reference bits.
|
||||
@ -2771,9 +2771,9 @@ pmap_remove_pages(pmap_t pmap)
|
||||
free_pv_chunk(pc);
|
||||
}
|
||||
}
|
||||
pmap_invalidate_all(pmap);
|
||||
if (lock != NULL)
|
||||
rw_wunlock(lock);
|
||||
pmap_invalidate_all(pmap);
|
||||
rw_runlock(&pvh_global_lock);
|
||||
PMAP_UNLOCK(pmap);
|
||||
vm_page_free_pages_toq(&free, false);
|
||||
|
Loading…
x
Reference in New Issue
Block a user