Clearing a page table entry's accessed bit (PG_A) and setting the
page's PG_REFERENCED flag in pmap_protect() can't really be justified. In contrast to pmap_remove() or pmap_remove_all(), the mapping is not being destroyed, so the notion that the page was accessed is not lost. Moreover, clearing the page table entry's accessed bit and setting the page's PG_REFERENCED flag can throw off the page daemon's activity count calculation. Finally, in my tests, I found that 15% of the atomic memory operations being performed by pmap_protect() were only to clear PG_A, and not change protection. This could, by itself, be fixed, but I don't see the point given the above argument. Remove a comment from pmap_protect_pde() that is no longer meaningful after the above change.
This commit is contained in:
parent
6dbd88581d
commit
0d2e1c3e39
@ -2833,18 +2833,9 @@ retry:
|
|||||||
if (oldpde & PG_MANAGED) {
|
if (oldpde & PG_MANAGED) {
|
||||||
eva = sva + NBPDR;
|
eva = sva + NBPDR;
|
||||||
for (va = sva, m = PHYS_TO_VM_PAGE(oldpde & PG_PS_FRAME);
|
for (va = sva, m = PHYS_TO_VM_PAGE(oldpde & PG_PS_FRAME);
|
||||||
va < eva; va += PAGE_SIZE, m++) {
|
va < eva; va += PAGE_SIZE, m++)
|
||||||
/*
|
|
||||||
* In contrast to the analogous operation on a 4KB page
|
|
||||||
* mapping, the mapping's PG_A flag is not cleared and
|
|
||||||
* the page's PG_REFERENCED flag is not set. The
|
|
||||||
* reason is that pmap_demote_pde() expects that a 2MB
|
|
||||||
* page mapping with a stored page table page has PG_A
|
|
||||||
* set.
|
|
||||||
*/
|
|
||||||
if ((oldpde & (PG_M | PG_RW)) == (PG_M | PG_RW))
|
if ((oldpde & (PG_M | PG_RW)) == (PG_M | PG_RW))
|
||||||
vm_page_dirty(m);
|
vm_page_dirty(m);
|
||||||
}
|
|
||||||
}
|
}
|
||||||
if ((prot & VM_PROT_WRITE) == 0)
|
if ((prot & VM_PROT_WRITE) == 0)
|
||||||
newpde &= ~(PG_RW | PG_M);
|
newpde &= ~(PG_RW | PG_M);
|
||||||
@ -2953,23 +2944,15 @@ retry:
|
|||||||
obits = pbits = *pte;
|
obits = pbits = *pte;
|
||||||
if ((pbits & PG_V) == 0)
|
if ((pbits & PG_V) == 0)
|
||||||
continue;
|
continue;
|
||||||
if (pbits & PG_MANAGED) {
|
|
||||||
m = NULL;
|
if ((prot & VM_PROT_WRITE) == 0) {
|
||||||
if (pbits & PG_A) {
|
if ((pbits & (PG_MANAGED | PG_M | PG_RW)) ==
|
||||||
|
(PG_MANAGED | PG_M | PG_RW)) {
|
||||||
m = PHYS_TO_VM_PAGE(pbits & PG_FRAME);
|
m = PHYS_TO_VM_PAGE(pbits & PG_FRAME);
|
||||||
vm_page_flag_set(m, PG_REFERENCED);
|
|
||||||
pbits &= ~PG_A;
|
|
||||||
}
|
|
||||||
if ((pbits & (PG_M | PG_RW)) == (PG_M | PG_RW)) {
|
|
||||||
if (m == NULL)
|
|
||||||
m = PHYS_TO_VM_PAGE(pbits &
|
|
||||||
PG_FRAME);
|
|
||||||
vm_page_dirty(m);
|
vm_page_dirty(m);
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
if ((prot & VM_PROT_WRITE) == 0)
|
|
||||||
pbits &= ~(PG_RW | PG_M);
|
pbits &= ~(PG_RW | PG_M);
|
||||||
|
}
|
||||||
if ((prot & VM_PROT_EXECUTE) == 0)
|
if ((prot & VM_PROT_EXECUTE) == 0)
|
||||||
pbits |= pg_nx;
|
pbits |= pg_nx;
|
||||||
|
|
||||||
|
@ -2955,18 +2955,9 @@ retry:
|
|||||||
if (oldpde & PG_MANAGED) {
|
if (oldpde & PG_MANAGED) {
|
||||||
eva = sva + NBPDR;
|
eva = sva + NBPDR;
|
||||||
for (va = sva, m = PHYS_TO_VM_PAGE(oldpde & PG_PS_FRAME);
|
for (va = sva, m = PHYS_TO_VM_PAGE(oldpde & PG_PS_FRAME);
|
||||||
va < eva; va += PAGE_SIZE, m++) {
|
va < eva; va += PAGE_SIZE, m++)
|
||||||
/*
|
|
||||||
* In contrast to the analogous operation on a 4KB page
|
|
||||||
* mapping, the mapping's PG_A flag is not cleared and
|
|
||||||
* the page's PG_REFERENCED flag is not set. The
|
|
||||||
* reason is that pmap_demote_pde() expects that a 2/4MB
|
|
||||||
* page mapping with a stored page table page has PG_A
|
|
||||||
* set.
|
|
||||||
*/
|
|
||||||
if ((oldpde & (PG_M | PG_RW)) == (PG_M | PG_RW))
|
if ((oldpde & (PG_M | PG_RW)) == (PG_M | PG_RW))
|
||||||
vm_page_dirty(m);
|
vm_page_dirty(m);
|
||||||
}
|
|
||||||
}
|
}
|
||||||
if ((prot & VM_PROT_WRITE) == 0)
|
if ((prot & VM_PROT_WRITE) == 0)
|
||||||
newpde &= ~(PG_RW | PG_M);
|
newpde &= ~(PG_RW | PG_M);
|
||||||
@ -3074,22 +3065,15 @@ retry:
|
|||||||
obits = pbits = *pte;
|
obits = pbits = *pte;
|
||||||
if ((pbits & PG_V) == 0)
|
if ((pbits & PG_V) == 0)
|
||||||
continue;
|
continue;
|
||||||
if (pbits & PG_MANAGED) {
|
|
||||||
m = NULL;
|
if ((prot & VM_PROT_WRITE) == 0) {
|
||||||
if (pbits & PG_A) {
|
if ((pbits & (PG_MANAGED | PG_M | PG_RW)) ==
|
||||||
|
(PG_MANAGED | PG_M | PG_RW)) {
|
||||||
m = PHYS_TO_VM_PAGE(pbits & PG_FRAME);
|
m = PHYS_TO_VM_PAGE(pbits & PG_FRAME);
|
||||||
vm_page_flag_set(m, PG_REFERENCED);
|
|
||||||
pbits &= ~PG_A;
|
|
||||||
}
|
|
||||||
if ((pbits & (PG_M | PG_RW)) == (PG_M | PG_RW)) {
|
|
||||||
if (m == NULL)
|
|
||||||
m = PHYS_TO_VM_PAGE(pbits & PG_FRAME);
|
|
||||||
vm_page_dirty(m);
|
vm_page_dirty(m);
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
if ((prot & VM_PROT_WRITE) == 0)
|
|
||||||
pbits &= ~(PG_RW | PG_M);
|
pbits &= ~(PG_RW | PG_M);
|
||||||
|
}
|
||||||
#ifdef PAE
|
#ifdef PAE
|
||||||
if ((prot & VM_PROT_EXECUTE) == 0)
|
if ((prot & VM_PROT_EXECUTE) == 0)
|
||||||
pbits |= pg_nx;
|
pbits |= pg_nx;
|
||||||
|
Loading…
x
Reference in New Issue
Block a user