Avoid setting PG_U unconditionally in pmap_enter_quick_locked().

This KPI may in principle be used to create kernel mappings, in which
case we certainly should not be setting PG_U.  In any case, PG_U must be
set on all layers in the page tables to grant user mode access, and we
were only setting it on leaf entries.  Thus, this change should have no
functional impact.

Reviewed by:	kib
MFC after:	1 week
Sponsored by:	The FreeBSD Foundation
This commit is contained in:
Mark Johnston 2019-01-02 15:36:35 +00:00
parent 1dbb72e9e8
commit 9bfc7fa41d
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=342686
2 changed files with 18 additions and 24 deletions

View File

@ -5453,8 +5453,7 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
vm_prot_t prot, vm_page_t mpte, struct rwlock **lockp)
{
struct spglist free;
pt_entry_t *pte, PG_V;
vm_paddr_t pa;
pt_entry_t newpte, *pte, PG_V;
KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva ||
(m->oflags & VPO_UNMANAGED) != 0,
@ -5544,17 +5543,15 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
*/
pmap_resident_count_inc(pmap, 1);
pa = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(pmap, m->md.pat_mode, 0);
newpte = VM_PAGE_TO_PHYS(m) | PG_V |
pmap_cache_bits(pmap, m->md.pat_mode, 0);
if ((m->oflags & VPO_UNMANAGED) == 0)
newpte |= PG_MANAGED;
if ((prot & VM_PROT_EXECUTE) == 0)
pa |= pg_nx;
/*
* Now validate mapping with RO protection
*/
if ((m->oflags & VPO_UNMANAGED) != 0)
pte_store(pte, pa | PG_V | PG_U);
else
pte_store(pte, pa | PG_V | PG_U | PG_MANAGED);
newpte |= pg_nx;
if (va < VM_MAXUSER_ADDRESS)
newpte |= PG_U;
pte_store(pte, newpte);
return (mpte);
}

View File

@ -4095,8 +4095,7 @@ static vm_page_t
pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
vm_prot_t prot, vm_page_t mpte)
{
pt_entry_t *pte;
vm_paddr_t pa;
pt_entry_t newpte, *pte;
struct spglist free;
KASSERT(pmap != kernel_pmap || va < kmi.clean_sva ||
@ -4179,19 +4178,17 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
*/
pmap->pm_stats.resident_count++;
pa = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(pmap, m->md.pat_mode, 0);
newpte = VM_PAGE_TO_PHYS(m) | PG_V |
pmap_cache_bits(pmap, m->md.pat_mode, 0);
if ((m->oflags & VPO_UNMANAGED) == 0)
newpte |= PG_MANAGED;
#if defined(PAE) || defined(PAE_TABLES)
if ((prot & VM_PROT_EXECUTE) == 0)
pa |= pg_nx;
newpte |= pg_nx;
#endif
/*
* Now validate mapping with RO protection
*/
if ((m->oflags & VPO_UNMANAGED) != 0)
pte_store(pte, pa | PG_V | PG_U);
else
pte_store(pte, pa | PG_V | PG_U | PG_MANAGED);
if (pmap != kernel_pmap)
newpte |= PG_U;
pte_store(pte, newpte);
sched_unpin();
return (mpte);
}