Merge r349526 from amd64. When we protect an L3 entry, we only call

vm_page_dirty() when, in fact, we are write protecting the page and the L3
entry has PTE_D set.  However, pmap_protect() was always calling
vm_page_dirty() when an L2 entry has PTE_D set.  Handle L2 entries the
same as L3 entries so that we won't perform unnecessary calls to
vm_page_dirty().

Simplify the loop calling vm_page_dirty() on L2 entries.
This commit is contained in:
Alan Cox 2019-07-05 05:23:23 +00:00
parent 5d47236b18
commit 7fe5c13c05
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=349760

View File

@ -2298,9 +2298,9 @@ pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
{
pd_entry_t *l1, *l2, l2e;
pt_entry_t *l3, l3e, mask;
vm_page_t m;
vm_page_t m, mt;
vm_paddr_t pa;
vm_offset_t va, va_next;
vm_offset_t va_next;
bool anychanged, pv_lists_locked;
if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
@ -2340,12 +2340,13 @@ pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
if ((l2e & PTE_RWX) != 0) {
if (sva + L2_SIZE == va_next && eva >= va_next) {
retryl2:
if ((l2e & (PTE_SW_MANAGED | PTE_D)) ==
if ((prot & VM_PROT_WRITE) == 0 &&
(l2e & (PTE_SW_MANAGED | PTE_D)) ==
(PTE_SW_MANAGED | PTE_D)) {
pa = PTE_TO_PHYS(l2e);
for (va = sva, m = PHYS_TO_VM_PAGE(pa);
va < va_next; m++, va += PAGE_SIZE)
vm_page_dirty(m);
m = PHYS_TO_VM_PAGE(pa);
for (mt = m; mt < &m[Ln_ENTRIES]; mt++)
vm_page_dirty(mt);
}
if (!atomic_fcmpset_long(l2, &l2e, l2e & ~mask))
goto retryl2;