Rework setting PTE_D for kernel mappings.
Rather than unconditionally setting PTE_D for all writeable kernel mappings, set PTE_D for writable mappings of unmanaged pages (whether user or kernel). This matches what amd64 does and also matches what the RISC-V spec suggests (preset the A and D bits on mappings where the OS doesn't care about the state). Suggested by: alc Reviewed by: alc, markj Sponsored by: DARPA
This commit is contained in:
parent
6806504da4
commit
ff9738d954
@ -2098,13 +2098,20 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
|
||||
new_l3 |= PTE_W;
|
||||
if ((va >> 63) == 0)
|
||||
new_l3 |= PTE_U;
|
||||
else if (prot & VM_PROT_WRITE)
|
||||
new_l3 |= PTE_D;
|
||||
|
||||
new_l3 |= (pn << PTE_PPN0_S);
|
||||
if ((flags & PMAP_ENTER_WIRED) != 0)
|
||||
new_l3 |= PTE_SW_WIRED;
|
||||
if ((m->oflags & VPO_UNMANAGED) == 0)
|
||||
|
||||
/*
|
||||
* Set modified bit gratuitously for writeable mappings if
|
||||
* the page is unmanaged. We do not want to take a fault
|
||||
* to do the dirty bit accounting for these mappings.
|
||||
*/
|
||||
if ((m->oflags & VPO_UNMANAGED) != 0) {
|
||||
if (prot & VM_PROT_WRITE)
|
||||
new_l3 |= PTE_D;
|
||||
} else
|
||||
new_l3 |= PTE_SW_MANAGED;
|
||||
|
||||
CTR2(KTR_PMAP, "pmap_enter: %.16lx -> %.16lx", va, pa);
|
||||
|
Loading…
x
Reference in New Issue
Block a user