diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c index 737dc632e3ce..94ad7d1d856a 100644 --- a/sys/amd64/amd64/pmap.c +++ b/sys/amd64/amd64/pmap.c @@ -5453,8 +5453,7 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, vm_page_t mpte, struct rwlock **lockp) { struct spglist free; - pt_entry_t *pte, PG_V; - vm_paddr_t pa; + pt_entry_t newpte, *pte, PG_V; KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva || (m->oflags & VPO_UNMANAGED) != 0, @@ -5544,17 +5543,15 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, */ pmap_resident_count_inc(pmap, 1); - pa = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(pmap, m->md.pat_mode, 0); + newpte = VM_PAGE_TO_PHYS(m) | PG_V | + pmap_cache_bits(pmap, m->md.pat_mode, 0); + if ((m->oflags & VPO_UNMANAGED) == 0) + newpte |= PG_MANAGED; if ((prot & VM_PROT_EXECUTE) == 0) - pa |= pg_nx; - - /* - * Now validate mapping with RO protection - */ - if ((m->oflags & VPO_UNMANAGED) != 0) - pte_store(pte, pa | PG_V | PG_U); - else - pte_store(pte, pa | PG_V | PG_U | PG_MANAGED); + newpte |= pg_nx; + if (va < VM_MAXUSER_ADDRESS) + newpte |= PG_U; + pte_store(pte, newpte); return (mpte); } diff --git a/sys/i386/i386/pmap.c b/sys/i386/i386/pmap.c index 42d3a9b24bd9..e1067d9c0957 100644 --- a/sys/i386/i386/pmap.c +++ b/sys/i386/i386/pmap.c @@ -4095,8 +4095,7 @@ static vm_page_t pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, vm_page_t mpte) { - pt_entry_t *pte; - vm_paddr_t pa; + pt_entry_t newpte, *pte; struct spglist free; KASSERT(pmap != kernel_pmap || va < kmi.clean_sva || @@ -4179,19 +4178,17 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, */ pmap->pm_stats.resident_count++; - pa = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(pmap, m->md.pat_mode, 0); + newpte = VM_PAGE_TO_PHYS(m) | PG_V | + pmap_cache_bits(pmap, m->md.pat_mode, 0); + if ((m->oflags & VPO_UNMANAGED) == 0) + newpte |= PG_MANAGED; #if defined(PAE) || defined(PAE_TABLES) if ((prot & VM_PROT_EXECUTE) == 0) - pa |= pg_nx; + newpte |= pg_nx; #endif - - /* - * Now validate mapping with RO protection - */ - if ((m->oflags & VPO_UNMANAGED) != 0) - pte_store(pte, pa | PG_V | PG_U); - else - pte_store(pte, pa | PG_V | PG_U | PG_MANAGED); + if (pmap != kernel_pmap) + newpte |= PG_U; + pte_store(pte, newpte); sched_unpin(); return (mpte); }