Shave off a few more cycles from pmap_enter()'s critical section. In

particular, do a little less work with the PV list lock held.
This commit is contained in:
Alan Cox 2012-07-29 18:20:49 +00:00
parent 1794251add
commit bc27d6c608

View File

@ -3446,6 +3446,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
KASSERT(va < UPT_MIN_ADDRESS || va >= UPT_MAX_ADDRESS,
("pmap_enter: invalid to pmap_enter page table pages (va: 0x%lx)",
va));
KASSERT((prot & access) == access, ("pmap_enter: access not in prot"));
KASSERT((m->oflags & VPO_UNMANAGED) != 0 || va < kmi.clean_sva ||
va >= kmi.clean_eva,
("pmap_enter: managed mapping within the clean submap"));
@ -3453,7 +3454,9 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
VM_OBJECT_LOCKED(m->object),
("pmap_enter: page %p is not busy", m));
pa = VM_PAGE_TO_PHYS(m);
newpte = (pt_entry_t)(pa | pmap_cache_bits(m->md.pat_mode, 0) | PG_V);
newpte = (pt_entry_t)(pa | PG_A | PG_V);
if ((access & VM_PROT_WRITE) != 0)
newpte |= PG_M;
if ((prot & VM_PROT_WRITE) != 0)
newpte |= PG_RW;
if ((prot & VM_PROT_EXECUTE) == 0)
@ -3464,6 +3467,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
newpte |= PG_U;
if (pmap == kernel_pmap)
newpte |= PG_G;
newpte |= pmap_cache_bits(m->md.pat_mode, 0);
mpte = om = NULL;
@ -3495,7 +3499,6 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
panic("pmap_enter: invalid page directory va=%#lx", va);
origpte = *pte;
opa = origpte & PG_FRAME;
/*
* Is the specified virtual address already mapped?
@ -3507,9 +3510,9 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
* are valid mappings in them. Hence, if a user page is wired,
* the PT page will be also.
*/
if (wired && (origpte & PG_W) == 0)
if ((newpte & PG_W) != 0 && (origpte & PG_W) == 0)
pmap->pm_stats.wired_count++;
else if (!wired && (origpte & PG_W))
else if ((newpte & PG_W) == 0 && (origpte & PG_W) != 0)
pmap->pm_stats.wired_count--;
/*
@ -3523,17 +3526,20 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
}
/*
* Has the mapping changed?
* Has the physical page changed?
*/
opa = origpte & PG_FRAME;
if (opa == pa) {
/*
* No, might be a protection or wiring change.
*/
if ((origpte & PG_MANAGED) != 0) {
newpte |= PG_MANAGED;
if ((newpte & PG_RW) != 0)
vm_page_aflag_set(m, PGA_WRITEABLE);
om = m;
}
if ((origpte & ~(PG_M | PG_A)) == newpte)
if (((origpte ^ newpte) & ~(PG_M | PG_A)) == 0)
goto unchanged;
goto validate;
} else {
@ -3547,7 +3553,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
/*
* Increment the counters.
*/
if (wired)
if ((newpte & PG_W) != 0)
pmap->pm_stats.wired_count++;
pmap_resident_count_inc(pmap, 1);
}
@ -3561,21 +3567,18 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
pv->pv_va = va;
CHANGE_PV_LIST_LOCK_TO_PHYS(&lock, pa);
TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
if ((newpte & PG_RW) != 0)
vm_page_aflag_set(m, PGA_WRITEABLE);
}
validate:
/*
* Update the PTE.
*/
newpte |= PG_A;
if ((access & VM_PROT_WRITE) != 0)
newpte |= PG_M;
if ((newpte & (PG_MANAGED | PG_RW)) == (PG_MANAGED | PG_RW))
vm_page_aflag_set(m, PGA_WRITEABLE);
if ((origpte & PG_V) != 0) {
validate:
invlva = FALSE;
origpte = pte_load_store(pte, newpte);
opa = origpte & PG_FRAME;
if ((origpte & PG_A) != 0 && (opa != pa ||
((origpte & PG_NX) == 0 && (newpte & PG_NX) != 0)))
invlva = TRUE;