powerpc64/pmap: Reduce scope of PV_LOCK in remove path

Summary:
Since the 'page pv' lock is one of the most highly contended locks, we
need to try to do as much work outside of the lock as we can.  The
moea64_pvo_remove_from_page() path is a low hanging fruit, where we can
do some heavy work (PHYS_TO_VM_PAGE()) outside of the lock if needed.
In one path, moea64_remove_all(), the PV lock is already held and can't
be swizzled, so we provide two ways to perform the locked operation, one
that can call PHYS_TO_VM_PAGE outside the lock, and one that calls with
the lock already held.

Reviewed By: luporl
Differential Revision: https://reviews.freebsd.org/D20694
This commit is contained in:
Justin Hibbits 2019-07-13 03:02:11 +00:00
parent 21e47be25e
commit a7e6ec601a
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=349963

View File

@ -234,6 +234,8 @@ static int moea64_pvo_enter(mmu_t mmu, struct pvo_entry *pvo,
struct pvo_head *pvo_head);
static void moea64_pvo_remove_from_pmap(mmu_t mmu, struct pvo_entry *pvo);
static void moea64_pvo_remove_from_page(mmu_t mmu, struct pvo_entry *pvo);
static void moea64_pvo_remove_from_page_locked(mmu_t mmu,
struct pvo_entry *pvo);
static struct pvo_entry *moea64_pvo_find_va(pmap_t, vm_offset_t);
/*
@ -1454,9 +1456,7 @@ moea64_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
/* Free any dead pages */
if (oldpvo != NULL) {
PV_LOCK(oldpvo->pvo_pte.pa & LPTE_RPGN);
moea64_pvo_remove_from_page(mmu, oldpvo);
PV_UNLOCK(oldpvo->pvo_pte.pa & LPTE_RPGN);
free_pvo_entry(oldpvo);
}
@ -1877,9 +1877,7 @@ moea64_kenter_attr(mmu_t mmu, vm_offset_t va, vm_paddr_t pa, vm_memattr_t ma)
/* Free any dead pages */
if (oldpvo != NULL) {
PV_LOCK(oldpvo->pvo_pte.pa & LPTE_RPGN);
moea64_pvo_remove_from_page(mmu, oldpvo);
PV_UNLOCK(oldpvo->pvo_pte.pa & LPTE_RPGN);
free_pvo_entry(oldpvo);
}
@ -2386,9 +2384,7 @@ moea64_remove_pages(mmu_t mmu, pmap_t pm)
PMAP_UNLOCK(pm);
RB_FOREACH_SAFE(pvo, pvo_tree, &tofree, tpvo) {
PV_LOCK(pvo->pvo_pte.pa & LPTE_RPGN);
moea64_pvo_remove_from_page(mmu, pvo);
PV_UNLOCK(pvo->pvo_pte.pa & LPTE_RPGN);
RB_REMOVE(pvo_tree, &tofree, pvo);
free_pvo_entry(pvo);
}
@ -2429,9 +2425,7 @@ moea64_remove(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva)
PMAP_UNLOCK(pm);
RB_FOREACH_SAFE(pvo, pvo_tree, &tofree, tpvo) {
PV_LOCK(pvo->pvo_pte.pa & LPTE_RPGN);
moea64_pvo_remove_from_page(mmu, pvo);
PV_UNLOCK(pvo->pvo_pte.pa & LPTE_RPGN);
RB_REMOVE(pvo_tree, &tofree, pvo);
free_pvo_entry(pvo);
}
@ -2458,7 +2452,7 @@ moea64_remove_all(mmu_t mmu, vm_page_t m)
wasdead = (pvo->pvo_vaddr & PVO_DEAD);
if (!wasdead)
moea64_pvo_remove_from_pmap(mmu, pvo);
moea64_pvo_remove_from_page(mmu, pvo);
moea64_pvo_remove_from_page_locked(mmu, pvo);
if (!wasdead)
LIST_INSERT_HEAD(&freequeue, pvo, pvo_vlink);
PMAP_UNLOCK(pmap);
@ -2631,10 +2625,10 @@ moea64_pvo_remove_from_pmap(mmu_t mmu, struct pvo_entry *pvo)
}
}
static void
moea64_pvo_remove_from_page(mmu_t mmu, struct pvo_entry *pvo)
static inline void
_moea64_pvo_remove_from_page_locked(mmu_t mmu, struct pvo_entry *pvo,
vm_page_t m)
{
struct vm_page *pg;
KASSERT(pvo->pvo_vaddr & PVO_DEAD, ("Trying to delink live page"));
@ -2648,12 +2642,10 @@ moea64_pvo_remove_from_page(mmu_t mmu, struct pvo_entry *pvo)
*/
PV_LOCKASSERT(pvo->pvo_pte.pa & LPTE_RPGN);
if (pvo->pvo_vaddr & PVO_MANAGED) {
pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.pa & LPTE_RPGN);
if (pg != NULL) {
if (m != NULL) {
LIST_REMOVE(pvo, pvo_vlink);
if (LIST_EMPTY(vm_page_to_pvoh(pg)))
vm_page_aflag_clear(pg,
if (LIST_EMPTY(vm_page_to_pvoh(m)))
vm_page_aflag_clear(m,
PGA_WRITEABLE | PGA_EXECUTABLE);
}
}
@ -2662,6 +2654,30 @@ moea64_pvo_remove_from_page(mmu_t mmu, struct pvo_entry *pvo)
moea64_pvo_remove_calls++;
}
static void
moea64_pvo_remove_from_page_locked(mmu_t mmu, struct pvo_entry *pvo)
{
vm_page_t pg = NULL;
if (pvo->pvo_vaddr & PVO_MANAGED)
pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.pa & LPTE_RPGN);
_moea64_pvo_remove_from_page_locked(mmu, pvo, pg);
}
static void
moea64_pvo_remove_from_page(mmu_t mmu, struct pvo_entry *pvo)
{
vm_page_t pg = NULL;
if (pvo->pvo_vaddr & PVO_MANAGED)
pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.pa & LPTE_RPGN);
PV_LOCK(pvo->pvo_pte.pa & LPTE_RPGN);
_moea64_pvo_remove_from_page_locked(mmu, pvo, pg);
PV_UNLOCK(pvo->pvo_pte.pa & LPTE_RPGN);
}
static struct pvo_entry *
moea64_pvo_find_va(pmap_t pm, vm_offset_t va)
{