diff --git a/sys/vm/vm_fault.c b/sys/vm/vm_fault.c index 2e347a1df681..0052614cb9f8 100644 --- a/sys/vm/vm_fault.c +++ b/sys/vm/vm_fault.c @@ -883,7 +883,7 @@ readrest: } vm_page_lock_queues(); vm_page_flag_clear(fs.m, PG_ZERO); - vm_page_flag_set(fs.m, PG_MAPPED|PG_REFERENCED); + vm_page_flag_set(fs.m, PG_REFERENCED); /* * If the page is not wired down, then put it where the pageout daemon @@ -1077,7 +1077,7 @@ vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry) vm_page_flag_clear(dst_m, PG_ZERO); pmap_enter(dst_map->pmap, vaddr, dst_m, prot, FALSE); vm_page_lock_queues(); - vm_page_flag_set(dst_m, PG_WRITEABLE|PG_MAPPED); + vm_page_flag_set(dst_m, PG_WRITEABLE); /* * Mark it no longer busy, and put it on the active list. diff --git a/sys/vm/vm_kern.c b/sys/vm/vm_kern.c index 5fe1b1e62c94..f6fa55a8b91a 100644 --- a/sys/vm/vm_kern.c +++ b/sys/vm/vm_kern.c @@ -429,7 +429,7 @@ retry: * Because this is kernel_pmap, this call will not block. */ pmap_enter(kernel_pmap, addr + i, m, VM_PROT_ALL, 1); - vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE | PG_REFERENCED); + vm_page_flag_set(m, PG_WRITEABLE | PG_REFERENCED); } vm_map_unlock(map); diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c index 5075fcec0707..ba1809b11405 100644 --- a/sys/vm/vm_page.c +++ b/sys/vm/vm_page.c @@ -447,7 +447,7 @@ vm_page_protect(vm_page_t mem, int prot) if (prot == VM_PROT_NONE) { if (pmap_page_is_mapped(mem) || (mem->flags & PG_WRITEABLE)) { pmap_page_protect(mem, VM_PROT_NONE); - vm_page_flag_clear(mem, PG_WRITEABLE|PG_MAPPED); + vm_page_flag_clear(mem, PG_WRITEABLE); } } else if ((prot == VM_PROT_READ) && (mem->flags & PG_WRITEABLE)) { pmap_page_protect(mem, VM_PROT_READ);