When we protect PTEs (as opposed to PDEs), we only call vm_page_dirty()

when, in fact, we are write protecting the page and the PTE has PG_M set.
However, pmap_protect_pde() was always calling vm_page_dirty() when the PDE
has PG_M set.  So, adding PG_NX to a writeable PDE could result in
unnecessary (but harmless) calls to vm_page_dirty().

Simplify the loop calling vm_page_dirty() in pmap_protect_pde().

Reviewed by:	kib, markj
MFC after:	1 week
Differential Revision:	https://reviews.freebsd.org/D20793
This commit is contained in:
Alan Cox 2019-06-28 22:40:34 +00:00
parent f48c41accd
commit c134ef742f
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=349526
2 changed files with 18 additions and 20 deletions

View File

@ -5202,8 +5202,7 @@ static boolean_t
pmap_protect_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t sva, vm_prot_t prot)
{
pd_entry_t newpde, oldpde;
vm_offset_t eva, va;
vm_page_t m;
vm_page_t m, mt;
boolean_t anychanged;
pt_entry_t PG_G, PG_M, PG_RW;
@ -5217,15 +5216,15 @@ pmap_protect_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t sva, vm_prot_t prot)
anychanged = FALSE;
retry:
oldpde = newpde = *pde;
if ((oldpde & (PG_MANAGED | PG_M | PG_RW)) ==
(PG_MANAGED | PG_M | PG_RW)) {
eva = sva + NBPDR;
for (va = sva, m = PHYS_TO_VM_PAGE(oldpde & PG_PS_FRAME);
va < eva; va += PAGE_SIZE, m++)
vm_page_dirty(m);
}
if ((prot & VM_PROT_WRITE) == 0)
if ((prot & VM_PROT_WRITE) == 0) {
if ((oldpde & (PG_MANAGED | PG_M | PG_RW)) ==
(PG_MANAGED | PG_M | PG_RW)) {
m = PHYS_TO_VM_PAGE(oldpde & PG_PS_FRAME);
for (mt = m; mt < &m[NBPDR / PAGE_SIZE]; mt++)
vm_page_dirty(mt);
}
newpde &= ~(PG_RW | PG_M);
}
if ((prot & VM_PROT_EXECUTE) == 0)
newpde |= pg_nx;
if (newpde != oldpde) {

View File

@ -3251,8 +3251,7 @@ static boolean_t
pmap_protect_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t sva, vm_prot_t prot)
{
pd_entry_t newpde, oldpde;
vm_offset_t eva, va;
vm_page_t m;
vm_page_t m, mt;
boolean_t anychanged;
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
@ -3261,15 +3260,15 @@ pmap_protect_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t sva, vm_prot_t prot)
anychanged = FALSE;
retry:
oldpde = newpde = *pde;
if ((oldpde & (PG_MANAGED | PG_M | PG_RW)) ==
(PG_MANAGED | PG_M | PG_RW)) {
eva = sva + NBPDR;
for (va = sva, m = PHYS_TO_VM_PAGE(oldpde & PG_PS_FRAME);
va < eva; va += PAGE_SIZE, m++)
vm_page_dirty(m);
}
if ((prot & VM_PROT_WRITE) == 0)
if ((prot & VM_PROT_WRITE) == 0) {
if ((oldpde & (PG_MANAGED | PG_M | PG_RW)) ==
(PG_MANAGED | PG_M | PG_RW)) {
m = PHYS_TO_VM_PAGE(oldpde & PG_PS_FRAME);
for (mt = m; mt < &m[NBPDR / PAGE_SIZE]; mt++)
vm_page_dirty(mt);
}
newpde &= ~(PG_RW | PG_M);
}
#ifdef PMAP_PAE_COMP
if ((prot & VM_PROT_EXECUTE) == 0 && !i386_read_exec)
newpde |= pg_nx;