From c134ef742fea6bbb5d44d00f7dd1eff1ed5c4552 Mon Sep 17 00:00:00 2001 From: Alan Cox Date: Fri, 28 Jun 2019 22:40:34 +0000 Subject: [PATCH] When we protect PTEs (as opposed to PDEs), we only call vm_page_dirty() when, in fact, we are write protecting the page and the PTE has PG_M set. However, pmap_protect_pde() was always calling vm_page_dirty() when the PDE has PG_M set. So, adding PG_NX to a writeable PDE could result in unnecessary (but harmless) calls to vm_page_dirty(). Simplify the loop calling vm_page_dirty() in pmap_protect_pde(). Reviewed by: kib, markj MFC after: 1 week Differential Revision: https://reviews.freebsd.org/D20793 --- sys/amd64/amd64/pmap.c | 19 +++++++++---------- sys/i386/i386/pmap.c | 19 +++++++++---------- 2 files changed, 18 insertions(+), 20 deletions(-) diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c index a0a2eb0baa6b..58878e79c07c 100644 --- a/sys/amd64/amd64/pmap.c +++ b/sys/amd64/amd64/pmap.c @@ -5202,8 +5202,7 @@ static boolean_t pmap_protect_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t sva, vm_prot_t prot) { pd_entry_t newpde, oldpde; - vm_offset_t eva, va; - vm_page_t m; + vm_page_t m, mt; boolean_t anychanged; pt_entry_t PG_G, PG_M, PG_RW; @@ -5217,15 +5216,15 @@ pmap_protect_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t sva, vm_prot_t prot) anychanged = FALSE; retry: oldpde = newpde = *pde; - if ((oldpde & (PG_MANAGED | PG_M | PG_RW)) == - (PG_MANAGED | PG_M | PG_RW)) { - eva = sva + NBPDR; - for (va = sva, m = PHYS_TO_VM_PAGE(oldpde & PG_PS_FRAME); - va < eva; va += PAGE_SIZE, m++) - vm_page_dirty(m); - } - if ((prot & VM_PROT_WRITE) == 0) + if ((prot & VM_PROT_WRITE) == 0) { + if ((oldpde & (PG_MANAGED | PG_M | PG_RW)) == + (PG_MANAGED | PG_M | PG_RW)) { + m = PHYS_TO_VM_PAGE(oldpde & PG_PS_FRAME); + for (mt = m; mt < &m[NBPDR / PAGE_SIZE]; mt++) + vm_page_dirty(mt); + } newpde &= ~(PG_RW | PG_M); + } if ((prot & VM_PROT_EXECUTE) == 0) newpde |= pg_nx; if (newpde != oldpde) { diff --git a/sys/i386/i386/pmap.c b/sys/i386/i386/pmap.c index e312549f3f15..48f52f7c0406 100644 --- a/sys/i386/i386/pmap.c +++ b/sys/i386/i386/pmap.c @@ -3251,8 +3251,7 @@ static boolean_t pmap_protect_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t sva, vm_prot_t prot) { pd_entry_t newpde, oldpde; - vm_offset_t eva, va; - vm_page_t m; + vm_page_t m, mt; boolean_t anychanged; PMAP_LOCK_ASSERT(pmap, MA_OWNED); @@ -3261,15 +3260,15 @@ pmap_protect_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t sva, vm_prot_t prot) anychanged = FALSE; retry: oldpde = newpde = *pde; - if ((oldpde & (PG_MANAGED | PG_M | PG_RW)) == - (PG_MANAGED | PG_M | PG_RW)) { - eva = sva + NBPDR; - for (va = sva, m = PHYS_TO_VM_PAGE(oldpde & PG_PS_FRAME); - va < eva; va += PAGE_SIZE, m++) - vm_page_dirty(m); - } - if ((prot & VM_PROT_WRITE) == 0) + if ((prot & VM_PROT_WRITE) == 0) { + if ((oldpde & (PG_MANAGED | PG_M | PG_RW)) == + (PG_MANAGED | PG_M | PG_RW)) { + m = PHYS_TO_VM_PAGE(oldpde & PG_PS_FRAME); + for (mt = m; mt < &m[NBPDR / PAGE_SIZE]; mt++) + vm_page_dirty(mt); + } newpde &= ~(PG_RW | PG_M); + } #ifdef PMAP_PAE_COMP if ((prot & VM_PROT_EXECUTE) == 0 && !i386_read_exec) newpde |= pg_nx;