pmap_remove:

Avoid re-walking the page table from the root for every PTE examined.

  Tidy up some of pmap_remove()'s helper functions.

pmap_enter:

  Set PV_TABLE_REF whenever the physical page being mapped is managed, not
  just when it is writeable.

  Only call pmap_update_page() when the old mapping was valid.  If there was
  no prior valid mapping, then pmap_update_page() serves no useful purpose.
  However, it will interrupt every processor that has the pmap active.

pmap_enter_quick_locked:

  Always set PTE_RO.

pmap_emulate_modified:

  Don't set PV_TABLE_REF.

  Eliminate a nonsensical comment.
This commit is contained in:
Alan Cox 2012-09-08 17:43:20 +00:00
parent 9a44104fb6
commit 1c978ec48b
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=240241

View File

@ -1677,6 +1677,11 @@ pmap_remove_pte(struct pmap *pmap, pt_entry_t *ptq, vm_offset_t va,
rw_assert(&pvh_global_lock, RA_WLOCKED);
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
/*
* Write back all cache lines from the page being unmapped.
*/
mips_dcache_wbinv_range_index(va, PAGE_SIZE);
oldpte = *ptq;
if (is_kernel_pmap(pmap))
*ptq = PTE_G;
@ -1723,24 +1728,13 @@ pmap_remove_page(struct pmap *pmap, vm_offset_t va)
ptq = pmap_pde_to_pte(pde, va);
/*
* if there is no pte for this address, just skip it!!!
* If there is no pte for this address, just skip it!
*/
if (!pte_test(ptq, PTE_V)) {
if (!pte_test(ptq, PTE_V))
return;
}
/*
* Write back all caches from the page being destroyed
*/
mips_dcache_wbinv_range_index(va, PAGE_SIZE);
/*
* get a local va for mappings for this pmap.
*/
(void)pmap_remove_pte(pmap, ptq, va, *pde);
pmap_invalidate_page(pmap, va);
return;
}
/*
@ -1789,9 +1783,12 @@ pmap_remove(struct pmap *pmap, vm_offset_t sva, vm_offset_t eva)
continue;
if (va_next > eva)
va_next = eva;
for (pte = pmap_pde_to_pte(pde, sva); sva != va_next;
pte++, sva += PAGE_SIZE) {
pmap_remove_page(pmap, sva);
for (pte = pmap_pde_to_pte(pde, sva); sva != va_next; pte++,
sva += PAGE_SIZE) {
if (!pte_test(pte, PTE_V))
continue;
pmap_remove_pte(pmap, pte, sva, *pde);
pmap_invalidate_page(pmap, sva);
}
}
out:
@ -2035,6 +2032,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
mpte->wire_count--;
if (pte_test(&origpte, PTE_MANAGED)) {
m->md.pv_flags |= PV_TABLE_REF;
om = m;
newpte |= PTE_MANAGED;
if (!pte_test(&newpte, PTE_RO))
@ -2070,6 +2068,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
* Enter on the PV list if part of our managed memory.
*/
if ((m->oflags & VPO_UNMANAGED) == 0) {
m->md.pv_flags |= PV_TABLE_REF;
if (pv == NULL)
pv = get_pv_entry(pmap, FALSE);
pv->pv_va = va;
@ -2087,8 +2086,6 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
pmap->pm_stats.wired_count++;
validate:
if ((access & VM_PROT_WRITE) != 0)
m->md.pv_flags |= PV_TABLE_REF;
#ifdef PMAP_DEBUG
printf("pmap_enter: va: %p -> pa: %p\n", (void *)va, (void *)pa);
@ -2099,8 +2096,8 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
* update the pte.
*/
if (origpte != newpte) {
*pte = newpte;
if (pte_test(&origpte, PTE_V)) {
*pte = newpte;
if (pte_test(&origpte, PTE_MANAGED) && opa != pa) {
if (om->md.pv_flags & PV_TABLE_REF)
vm_page_aflag_set(om, PGA_REFERENCED);
@ -2116,11 +2113,9 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
if (pte_test(&origpte, PTE_MANAGED) &&
TAILQ_EMPTY(&om->md.pv_list))
vm_page_aflag_clear(om, PGA_WRITEABLE);
} else {
*pte = newpte;
pmap_update_page(pmap, va, newpte);
}
}
pmap_update_page(pmap, va, newpte);
/*
* Sync I & D caches for executable pages. Do this only if the
@ -2239,7 +2234,7 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
/*
* Now validate mapping with RO protection
*/
*pte = TLBLO_PA_TO_PFN(pa) | PTE_V;
*pte = PTE_RO | TLBLO_PA_TO_PFN(pa) | PTE_V;
if ((m->oflags & VPO_UNMANAGED) == 0)
*pte |= PTE_MANAGED;
@ -2251,7 +2246,6 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
if (is_kernel_pmap(pmap))
*pte |= PTE_G;
else {
*pte |= PTE_RO;
/*
* Sync I & D caches. Do this only if the target pmap
* belongs to the current process. Otherwise, an
@ -3255,9 +3249,7 @@ init_pte_prot(vm_page_t m, vm_prot_t access, vm_prot_t prot)
int
pmap_emulate_modified(pmap_t pmap, vm_offset_t va)
{
vm_page_t m;
pt_entry_t *pte;
vm_paddr_t pa;
PMAP_LOCK(pmap);
pte = pmap_pte(pmap, va);
@ -3275,7 +3267,6 @@ pmap_emulate_modified(pmap_t pmap, vm_offset_t va)
panic("pmap_emulate_modified: invalid pte");
#endif
if (pte_test(pte, PTE_RO)) {
/* write to read only page in the kernel */
PMAP_UNLOCK(pmap);
return (1);
}
@ -3283,9 +3274,6 @@ pmap_emulate_modified(pmap_t pmap, vm_offset_t va)
tlb_update(pmap, va, *pte);
if (!pte_test(pte, PTE_MANAGED))
panic("pmap_emulate_modified: unmanaged page");
pa = TLBLO_PTE_TO_PA(*pte);
m = PHYS_TO_VM_PAGE(pa);
m->md.pv_flags |= PV_TABLE_REF;
PMAP_UNLOCK(pmap);
return (0);
}