powerpc/booke: Add pte_find_next() to find the next in-use PTE

Summary:
Iterating over VM_MIN_ADDRESS->VM_MAXUSER_ADDRESS can take a very long
time iterating one page at a time (2**(log_2(SIZE)-12) operations),
yielding possibly several days or even weeks on 64-bit Book-E, even for
a largely empty, which can happen when swapping out a process by
vmdaemon.  Speed this up by instead finding the next PTE at or equal to
the given VA.

Reviewed by:	bdragon
Differential Revision: https://reviews.freebsd.org/D24238
This commit is contained in:
Justin Hibbits 2020-04-11 00:16:50 +00:00
parent dd8775a1b0
commit d7c0543ff8
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=359792
3 changed files with 80 additions and 3 deletions

View File

@ -1532,9 +1532,12 @@ mmu_booke_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_offset_t endva)
rw_wlock(&pvh_global_lock);
PMAP_LOCK(pmap);
for (; va < endva; va += PAGE_SIZE) {
pte = pte_find(mmu, pmap, va);
if ((pte != NULL) && PTE_ISVALID(pte))
pte_remove(mmu, pmap, va, hold_flag);
pte = pte_find_next(mmu, pmap, &va);
if ((pte == NULL) || !PTE_ISVALID(pte))
break;
if (va >= endva)
break;
pte_remove(mmu, pmap, va, hold_flag);
}
PMAP_UNLOCK(pmap);
rw_wunlock(&pvh_global_lock);

View File

@ -598,6 +598,35 @@ pte_find(mmu_t mmu, pmap_t pmap, vm_offset_t va)
return (NULL);
}
/* Get a pointer to a PTE in a page table, or the next closest (greater) one. */
static __inline pte_t *
pte_find_next(mmu_t mmu, pmap_t pmap, vm_offset_t *pva)
{
vm_offset_t va;
pte_t **pdir;
pte_t *pte;
unsigned long i, j;
KASSERT((pmap != NULL), ("pte_find: invalid pmap"));
va = *pva;
i = PDIR_IDX(va);
j = PTBL_IDX(va);
pdir = pmap->pm_pdir;
for (; i < PDIR_NENTRIES; i++, j = 0) {
if (pdir[i] == NULL)
continue;
for (; j < PTBL_NENTRIES; j++) {
pte = &pdir[i][j];
if (!PTE_ISVALID(pte))
continue;
*pva = PDIR_SIZE * i + PAGE_SIZE * j;
return (pte);
}
}
return (NULL);
}
/* Set up kernel page tables. */
static void
kernel_pte_alloc(vm_offset_t data_end, vm_offset_t addr)

View File

@ -145,6 +145,7 @@ static vm_paddr_t pte_vatopa(mmu_t, pmap_t, vm_offset_t);
static int pte_enter(mmu_t, pmap_t, vm_page_t, vm_offset_t, uint32_t, boolean_t);
static int pte_remove(mmu_t, pmap_t, vm_offset_t, uint8_t);
static pte_t *pte_find(mmu_t, pmap_t, vm_offset_t);
static pte_t *pte_find_next(mmu_t, pmap_t, vm_offset_t *);
static void kernel_pte_alloc(vm_offset_t, vm_offset_t);
/**************************************************************************/
@ -204,6 +205,50 @@ pte_find(mmu_t mmu, pmap_t pmap, vm_offset_t va)
return ((ptbl != NULL) ? &ptbl[PTBL_IDX(va)] : NULL);
}
/* Get a pointer to a PTE in a page table, or the next closest (greater) one. */
static __inline pte_t *
pte_find_next(mmu_t mmu, pmap_t pmap, vm_offset_t *pva)
{
vm_offset_t va;
pte_t ****pm_root;
pte_t *pte;
unsigned long i, j, k, l;
KASSERT((pmap != NULL), ("pte_find: invalid pmap"));
va = *pva;
i = PG_ROOT_IDX(va);
j = PDIR_L1_IDX(va);
k = PDIR_IDX(va);
l = PTBL_IDX(va);
pm_root = pmap->pm_root;
/* truncate the VA for later. */
va &= ~((1UL << (PG_ROOT_H + 1)) - 1);
for (; i < PG_ROOT_NENTRIES; i++, j = 0) {
if (pm_root[i] == 0)
continue;
for (; j < PDIR_L1_NENTRIES; j++, k = 0) {
if (pm_root[i][j] == 0)
continue;
for (; k < PDIR_NENTRIES; k++, l = 0) {
if (pm_root[i][j][k] == NULL)
continue;
for (; l < PTBL_NENTRIES; l++) {
pte = &pm_root[i][j][k][l];
if (!PTE_ISVALID(pte))
continue;
*pva = va + PG_ROOT_SIZE * i +
PDIR_L1_SIZE * j +
PDIR_SIZE * k +
PAGE_SIZE * l;
return (pte);
}
}
}
}
return (NULL);
}
static bool
unhold_free_page(mmu_t mmu, pmap_t pmap, vm_page_t m)
{