Complete the transition from pmap_page_protect() to pmap_remove_write().

Originally, I had adopted sparc64's name, pmap_clear_write(), for the
function that is now pmap_remove_write().  However, this function is more
like pmap_remove_all() than like pmap_clear_modify() or
pmap_clear_reference(), hence, the name change.

The higher-level rationale behind this change is described in
src/sys/amd64/amd64/pmap.c revision 1.567.  The short version is that I'm
trying to clean up and fix our support for execute access.

Reviewed by: marcel@ (ia64)
This commit is contained in:
Alan Cox 2006-08-01 19:06:06 +00:00
parent 0d024885b9
commit 78985e424a
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=160889
13 changed files with 153 additions and 296 deletions

View File

@ -2972,7 +2972,7 @@ pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
* Clear the write and modified bits in each of the given page's mappings.
*/
void
pmap_clear_write(vm_page_t m)
pmap_remove_write(vm_page_t m)
{
pv_entry_t pv;
pmap_t pmap;
@ -3001,23 +3001,6 @@ pmap_clear_write(vm_page_t m)
vm_page_flag_clear(m, PG_WRITEABLE);
}
/*
* pmap_page_protect:
*
* Lower the permission for all mappings to a given page.
*/
void
pmap_page_protect(vm_page_t m, vm_prot_t prot)
{
if ((prot & VM_PROT_WRITE) == 0) {
if (prot & (VM_PROT_READ | VM_PROT_EXECUTE)) {
pmap_clear_write(m);
} else {
pmap_remove_all(m);
}
}
}
/*
* pmap_ts_referenced:
*

View File

@ -2758,32 +2758,6 @@ pmap_growkernel(vm_offset_t addr)
}
/*
* pmap_page_protect:
*
* Lower the permission for all mappings to a given page.
*/
void
pmap_page_protect(vm_page_t m, vm_prot_t prot)
{
switch(prot) {
case VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE:
case VM_PROT_READ|VM_PROT_WRITE:
return;
case VM_PROT_READ:
case VM_PROT_READ|VM_PROT_EXECUTE:
pmap_clearbit(m, PVF_WRITE);
break;
default:
pmap_remove_all(m);
break;
}
}
/*
* Remove all pages from specified address space
* this aids process exit speeds. Also, this code
@ -4464,7 +4438,7 @@ pmap_clear_reference(vm_page_t m)
* Clear the write and modified bits in each of the given page's mappings.
*/
void
pmap_clear_write(vm_page_t m)
pmap_remove_write(vm_page_t m)
{
if (m->md.pvh_attrs & PVF_WRITE)

View File

@ -3066,7 +3066,7 @@ pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
* Clear the write and modified bits in each of the given page's mappings.
*/
void
pmap_clear_write(vm_page_t m)
pmap_remove_write(vm_page_t m)
{
pv_entry_t pv;
pmap_t pmap;
@ -3102,23 +3102,6 @@ pmap_clear_write(vm_page_t m)
sched_unpin();
}
/*
* pmap_page_protect:
*
* Lower the permission for all mappings to a given page.
*/
void
pmap_page_protect(vm_page_t m, vm_prot_t prot)
{
if ((prot & VM_PROT_WRITE) == 0) {
if (prot & (VM_PROT_READ | VM_PROT_EXECUTE)) {
pmap_clear_write(m);
} else {
pmap_remove_all(m);
}
}
}
/*
* pmap_ts_referenced:
*

View File

@ -1938,40 +1938,6 @@ pmap_remove_pages(pmap_t pmap)
vm_page_unlock_queues();
}
/*
* pmap_page_protect:
*
* Lower the permission for all mappings to a given page.
*/
void
pmap_page_protect(vm_page_t m, vm_prot_t prot)
{
struct ia64_lpte *pte;
pmap_t oldpmap, pmap;
pv_entry_t pv;
if ((prot & VM_PROT_WRITE) != 0)
return;
if (prot & (VM_PROT_READ | VM_PROT_EXECUTE)) {
if ((m->flags & PG_WRITEABLE) == 0)
return;
TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
pmap = pv->pv_pmap;
PMAP_LOCK(pmap);
oldpmap = pmap_install(pmap);
pte = pmap_find_vhpt(pv->pv_va);
KASSERT(pte != NULL, ("pte"));
pmap_pte_prot(pmap, pte, prot);
pmap_invalidate_page(pmap, pv->pv_va);
pmap_install(oldpmap);
PMAP_UNLOCK(pmap);
}
vm_page_flag_clear(m, PG_WRITEABLE);
} else {
pmap_remove_all(m);
}
}
/*
* pmap_ts_referenced:
*
@ -2118,6 +2084,43 @@ pmap_clear_reference(vm_page_t m)
}
}
/*
* Clear the write and modified bits in each of the given page's mappings.
*/
void
pmap_remove_write(vm_page_t m)
{
struct ia64_lpte *pte;
pmap_t oldpmap, pmap;
pv_entry_t pv;
vm_prot_t prot;
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
if ((m->flags & PG_FICTITIOUS) != 0 ||
(m->flags & PG_WRITEABLE) == 0)
return;
TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
pmap = pv->pv_pmap;
PMAP_LOCK(pmap);
oldpmap = pmap_install(pmap);
pte = pmap_find_vhpt(pv->pv_va);
KASSERT(pte != NULL, ("pte"));
prot = pmap_prot(pte);
if ((prot & VM_PROT_WRITE) != 0) {
if (pmap_dirty(pte)) {
vm_page_dirty(m);
pmap_clear_dirty(pte);
}
prot &= ~VM_PROT_WRITE;
pmap_pte_prot(pmap, pte, prot);
pmap_invalidate_page(pmap, pv->pv_va);
}
pmap_install(oldpmap);
PMAP_UNLOCK(pmap);
}
vm_page_flag_clear(m, PG_WRITEABLE);
}
/*
* Map a set of physical memory pages into the kernel virtual
* address space. Return a pointer to where it is mapped. This

View File

@ -322,7 +322,6 @@ boolean_t moea_is_modified(mmu_t, vm_page_t);
boolean_t moea_ts_referenced(mmu_t, vm_page_t);
vm_offset_t moea_map(mmu_t, vm_offset_t *, vm_offset_t, vm_offset_t, int);
boolean_t moea_page_exists_quick(mmu_t, pmap_t, vm_page_t);
void moea_page_protect(mmu_t, vm_page_t, vm_prot_t);
void moea_pinit(mmu_t, pmap_t);
void moea_pinit0(mmu_t, pmap_t);
void moea_protect(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_prot_t);
@ -331,6 +330,7 @@ void moea_qremove(mmu_t, vm_offset_t, int);
void moea_release(mmu_t, pmap_t);
void moea_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t);
void moea_remove_all(mmu_t, vm_page_t);
void moea_remove_write(mmu_t, vm_page_t);
void moea_zero_page(mmu_t, vm_page_t);
void moea_zero_page_area(mmu_t, vm_page_t, int, int);
void moea_zero_page_idle(mmu_t, vm_page_t);
@ -358,7 +358,6 @@ static mmu_method_t moea_methods[] = {
MMUMETHOD(mmu_ts_referenced, moea_ts_referenced),
MMUMETHOD(mmu_map, moea_map),
MMUMETHOD(mmu_page_exists_quick,moea_page_exists_quick),
MMUMETHOD(mmu_page_protect, moea_page_protect),
MMUMETHOD(mmu_pinit, moea_pinit),
MMUMETHOD(mmu_pinit0, moea_pinit0),
MMUMETHOD(mmu_protect, moea_protect),
@ -367,6 +366,7 @@ static mmu_method_t moea_methods[] = {
MMUMETHOD(mmu_release, moea_release),
MMUMETHOD(mmu_remove, moea_remove),
MMUMETHOD(mmu_remove_all, moea_remove_all),
MMUMETHOD(mmu_remove_write, moea_remove_write),
MMUMETHOD(mmu_zero_page, moea_zero_page),
MMUMETHOD(mmu_zero_page_area, moea_zero_page_area),
MMUMETHOD(mmu_zero_page_idle, moea_zero_page_idle),
@ -1292,6 +1292,48 @@ moea_clear_modify(mmu_t mmu, vm_page_t m)
moea_clear_bit(m, PTE_CHG, NULL);
}
/*
* Clear the write and modified bits in each of the given page's mappings.
*/
void
moea_remove_write(mmu_t mmu, vm_page_t m)
{
struct pvo_entry *pvo;
struct pte *pt;
pmap_t pmap;
u_int lo;
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0 ||
(m->flags & PG_WRITEABLE) == 0)
return;
lo = moea_attr_fetch(m);
SYNC();
LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
pmap = pvo->pvo_pmap;
PMAP_LOCK(pmap);
if ((pvo->pvo_pte.pte_lo & PTE_PP) != PTE_BR) {
pt = moea_pvo_to_pte(pvo, -1);
pvo->pvo_pte.pte_lo &= ~PTE_PP;
pvo->pvo_pte.pte_lo |= PTE_BR;
if (pt != NULL) {
moea_pte_synch(pt, &pvo->pvo_pte);
lo |= pvo->pvo_pte.pte_lo;
pvo->pvo_pte.pte_lo &= ~PTE_CHG;
moea_pte_change(pt, &pvo->pvo_pte,
pvo->pvo_vaddr);
mtx_unlock(&moea_table_mutex);
}
}
PMAP_UNLOCK(pmap);
}
if ((lo & PTE_CHG) != 0) {
moea_attr_clear(m, PTE_CHG);
vm_page_dirty(m);
}
vm_page_flag_clear(m, PG_WRITEABLE);
}
/*
* moea_ts_referenced:
*
@ -1419,81 +1461,6 @@ moea_map(mmu_t mmu, vm_offset_t *virt, vm_offset_t pa_start,
return (sva);
}
/*
* Lower the permission for all mappings to a given page.
*/
void
moea_page_protect(mmu_t mmu, vm_page_t m, vm_prot_t prot)
{
struct pvo_head *pvo_head;
struct pvo_entry *pvo, *next_pvo;
struct pte *pt;
pmap_t pmap;
/*
* Since the routine only downgrades protection, if the
* maximal protection is desired, there isn't any change
* to be made.
*/
if ((prot & (VM_PROT_READ|VM_PROT_WRITE)) ==
(VM_PROT_READ|VM_PROT_WRITE))
return;
pvo_head = vm_page_to_pvoh(m);
for (pvo = LIST_FIRST(pvo_head); pvo != NULL; pvo = next_pvo) {
next_pvo = LIST_NEXT(pvo, pvo_vlink);
MOEA_PVO_CHECK(pvo); /* sanity check */
pmap = pvo->pvo_pmap;
PMAP_LOCK(pmap);
/*
* Downgrading to no mapping at all, we just remove the entry.
*/
if ((prot & VM_PROT_READ) == 0) {
moea_pvo_remove(pvo, -1);
PMAP_UNLOCK(pmap);
continue;
}
/*
* If EXEC permission is being revoked, just clear the flag
* in the PVO.
*/
if ((prot & VM_PROT_EXECUTE) == 0)
pvo->pvo_vaddr &= ~PVO_EXECUTABLE;
/*
* If this entry is already RO, don't diddle with the page
* table.
*/
if ((pvo->pvo_pte.pte_lo & PTE_PP) == PTE_BR) {
PMAP_UNLOCK(pmap);
MOEA_PVO_CHECK(pvo);
continue;
}
/*
* Grab the PTE before we diddle the bits so pvo_to_pte can
* verify the pte contents are as expected.
*/
pt = moea_pvo_to_pte(pvo, -1);
pvo->pvo_pte.pte_lo &= ~PTE_PP;
pvo->pvo_pte.pte_lo |= PTE_BR;
if (pt != NULL) {
moea_pte_change(pt, &pvo->pvo_pte, pvo->pvo_vaddr);
mtx_unlock(&moea_table_mutex);
}
PMAP_UNLOCK(pmap);
MOEA_PVO_CHECK(pvo); /* sanity check */
}
/*
* Downgrading from writeable: clear the VM page flag
*/
if ((prot & VM_PROT_WRITE) != VM_PROT_WRITE)
vm_page_flag_clear(m, PG_WRITEABLE);
}
/*
* Returns true if the pmap's pv is one of the first
* 16 pvs linked to from this page. This count may

View File

@ -146,6 +146,18 @@ METHOD void clear_reference {
};
/**
* @brief Clear the write and modified bits in each of the given
* physical page's mappings
*
* @param _pg physical page
*/
METHOD void remove_write {
mmu_t _mmu;
vm_page_t _pg;
};
/**
* @brief Copy the address range given by the source physical map, virtual
* address and length to the destination physical map and virtual address.
@ -418,20 +430,6 @@ METHOD void page_init {
} DEFAULT mmu_null_page_init;
/**
* @brief Lower the protection to the given value for all mappings of the
* given physical page.
*
* @param _pg physical page
* @param _prot updated page protection
*/
METHOD void page_protect {
mmu_t _mmu;
vm_page_t _pg;
vm_prot_t _prot;
};
/**
* @brief Initialise a physical map data structure
*

View File

@ -322,7 +322,6 @@ boolean_t moea_is_modified(mmu_t, vm_page_t);
boolean_t moea_ts_referenced(mmu_t, vm_page_t);
vm_offset_t moea_map(mmu_t, vm_offset_t *, vm_offset_t, vm_offset_t, int);
boolean_t moea_page_exists_quick(mmu_t, pmap_t, vm_page_t);
void moea_page_protect(mmu_t, vm_page_t, vm_prot_t);
void moea_pinit(mmu_t, pmap_t);
void moea_pinit0(mmu_t, pmap_t);
void moea_protect(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_prot_t);
@ -331,6 +330,7 @@ void moea_qremove(mmu_t, vm_offset_t, int);
void moea_release(mmu_t, pmap_t);
void moea_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t);
void moea_remove_all(mmu_t, vm_page_t);
void moea_remove_write(mmu_t, vm_page_t);
void moea_zero_page(mmu_t, vm_page_t);
void moea_zero_page_area(mmu_t, vm_page_t, int, int);
void moea_zero_page_idle(mmu_t, vm_page_t);
@ -358,7 +358,6 @@ static mmu_method_t moea_methods[] = {
MMUMETHOD(mmu_ts_referenced, moea_ts_referenced),
MMUMETHOD(mmu_map, moea_map),
MMUMETHOD(mmu_page_exists_quick,moea_page_exists_quick),
MMUMETHOD(mmu_page_protect, moea_page_protect),
MMUMETHOD(mmu_pinit, moea_pinit),
MMUMETHOD(mmu_pinit0, moea_pinit0),
MMUMETHOD(mmu_protect, moea_protect),
@ -367,6 +366,7 @@ static mmu_method_t moea_methods[] = {
MMUMETHOD(mmu_release, moea_release),
MMUMETHOD(mmu_remove, moea_remove),
MMUMETHOD(mmu_remove_all, moea_remove_all),
MMUMETHOD(mmu_remove_write, moea_remove_write),
MMUMETHOD(mmu_zero_page, moea_zero_page),
MMUMETHOD(mmu_zero_page_area, moea_zero_page_area),
MMUMETHOD(mmu_zero_page_idle, moea_zero_page_idle),
@ -1292,6 +1292,48 @@ moea_clear_modify(mmu_t mmu, vm_page_t m)
moea_clear_bit(m, PTE_CHG, NULL);
}
/*
* Clear the write and modified bits in each of the given page's mappings.
*/
void
moea_remove_write(mmu_t mmu, vm_page_t m)
{
struct pvo_entry *pvo;
struct pte *pt;
pmap_t pmap;
u_int lo;
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0 ||
(m->flags & PG_WRITEABLE) == 0)
return;
lo = moea_attr_fetch(m);
SYNC();
LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
pmap = pvo->pvo_pmap;
PMAP_LOCK(pmap);
if ((pvo->pvo_pte.pte_lo & PTE_PP) != PTE_BR) {
pt = moea_pvo_to_pte(pvo, -1);
pvo->pvo_pte.pte_lo &= ~PTE_PP;
pvo->pvo_pte.pte_lo |= PTE_BR;
if (pt != NULL) {
moea_pte_synch(pt, &pvo->pvo_pte);
lo |= pvo->pvo_pte.pte_lo;
pvo->pvo_pte.pte_lo &= ~PTE_CHG;
moea_pte_change(pt, &pvo->pvo_pte,
pvo->pvo_vaddr);
mtx_unlock(&moea_table_mutex);
}
}
PMAP_UNLOCK(pmap);
}
if ((lo & PTE_CHG) != 0) {
moea_attr_clear(m, PTE_CHG);
vm_page_dirty(m);
}
vm_page_flag_clear(m, PG_WRITEABLE);
}
/*
* moea_ts_referenced:
*
@ -1419,81 +1461,6 @@ moea_map(mmu_t mmu, vm_offset_t *virt, vm_offset_t pa_start,
return (sva);
}
/*
* Lower the permission for all mappings to a given page.
*/
void
moea_page_protect(mmu_t mmu, vm_page_t m, vm_prot_t prot)
{
struct pvo_head *pvo_head;
struct pvo_entry *pvo, *next_pvo;
struct pte *pt;
pmap_t pmap;
/*
* Since the routine only downgrades protection, if the
* maximal protection is desired, there isn't any change
* to be made.
*/
if ((prot & (VM_PROT_READ|VM_PROT_WRITE)) ==
(VM_PROT_READ|VM_PROT_WRITE))
return;
pvo_head = vm_page_to_pvoh(m);
for (pvo = LIST_FIRST(pvo_head); pvo != NULL; pvo = next_pvo) {
next_pvo = LIST_NEXT(pvo, pvo_vlink);
MOEA_PVO_CHECK(pvo); /* sanity check */
pmap = pvo->pvo_pmap;
PMAP_LOCK(pmap);
/*
* Downgrading to no mapping at all, we just remove the entry.
*/
if ((prot & VM_PROT_READ) == 0) {
moea_pvo_remove(pvo, -1);
PMAP_UNLOCK(pmap);
continue;
}
/*
* If EXEC permission is being revoked, just clear the flag
* in the PVO.
*/
if ((prot & VM_PROT_EXECUTE) == 0)
pvo->pvo_vaddr &= ~PVO_EXECUTABLE;
/*
* If this entry is already RO, don't diddle with the page
* table.
*/
if ((pvo->pvo_pte.pte_lo & PTE_PP) == PTE_BR) {
PMAP_UNLOCK(pmap);
MOEA_PVO_CHECK(pvo);
continue;
}
/*
* Grab the PTE before we diddle the bits so pvo_to_pte can
* verify the pte contents are as expected.
*/
pt = moea_pvo_to_pte(pvo, -1);
pvo->pvo_pte.pte_lo &= ~PTE_PP;
pvo->pvo_pte.pte_lo |= PTE_BR;
if (pt != NULL) {
moea_pte_change(pt, &pvo->pvo_pte, pvo->pvo_vaddr);
mtx_unlock(&moea_table_mutex);
}
PMAP_UNLOCK(pmap);
MOEA_PVO_CHECK(pvo); /* sanity check */
}
/*
* Downgrading from writeable: clear the VM page flag
*/
if ((prot & VM_PROT_WRITE) != VM_PROT_WRITE)
vm_page_flag_clear(m, PG_WRITEABLE);
}
/*
* Returns true if the pmap's pv is one of the first
* 16 pvs linked to from this page. This count may

View File

@ -195,12 +195,6 @@ pmap_page_init(vm_page_t m)
MMU_PAGE_INIT(mmu_obj, m);
}
void
pmap_page_protect(vm_page_t m, vm_prot_t prot)
{
MMU_PAGE_PROTECT(mmu_obj, m, prot);
}
void
pmap_pinit(pmap_t pmap)
{
@ -255,6 +249,12 @@ pmap_remove_pages(pmap_t pmap)
MMU_REMOVE_PAGES(mmu_obj, pmap);
}
void
pmap_remove_write(vm_page_t m)
{
MMU_REMOVE_WRITE(mmu_obj, m);
}
void
pmap_zero_page(vm_page_t m)
{

View File

@ -1769,23 +1769,6 @@ pmap_page_is_mapped(vm_page_t m)
return (FALSE);
}
/*
* Lower the permission for all mappings to a given page.
*/
void
pmap_page_protect(vm_page_t m, vm_prot_t prot)
{
KASSERT((m->flags & PG_FICTITIOUS) == 0,
("pmap_page_protect: fake page"));
if ((prot & VM_PROT_WRITE) == 0) {
if (prot & (VM_PROT_READ | VM_PROT_EXECUTE))
pmap_clear_write(m);
else
pmap_remove_all(m);
}
}
/*
* pmap_ts_referenced:
*
@ -1895,7 +1878,7 @@ pmap_clear_reference(vm_page_t m)
}
void
pmap_clear_write(vm_page_t m)
pmap_remove_write(vm_page_t m)
{
struct tte *tp;
u_long data;

View File

@ -93,7 +93,6 @@ extern vm_offset_t kernel_vm_end;
void pmap_change_wiring(pmap_t, vm_offset_t, boolean_t);
void pmap_clear_modify(vm_page_t m);
void pmap_clear_reference(vm_page_t m);
void pmap_clear_write(vm_page_t m);
void pmap_copy(pmap_t, pmap_t, vm_offset_t, vm_size_t, vm_offset_t);
void pmap_copy_page(vm_page_t, vm_page_t);
void pmap_enter(pmap_t, vm_offset_t, vm_page_t, vm_prot_t,
@ -115,7 +114,6 @@ void pmap_object_init_pt(pmap_t pmap, vm_offset_t addr,
vm_object_t object, vm_pindex_t pindex, vm_size_t size);
boolean_t pmap_page_exists_quick(pmap_t pmap, vm_page_t m);
void pmap_page_init(vm_page_t m);
void pmap_page_protect(vm_page_t m, vm_prot_t prot);
void pmap_pinit(pmap_t);
void pmap_pinit0(pmap_t);
void pmap_protect(pmap_t, vm_offset_t, vm_offset_t, vm_prot_t);
@ -125,6 +123,7 @@ void pmap_release(pmap_t);
void pmap_remove(pmap_t, vm_offset_t, vm_offset_t);
void pmap_remove_all(vm_page_t m);
void pmap_remove_pages(pmap_t);
void pmap_remove_write(vm_page_t m);
void pmap_zero_page(vm_page_t);
void pmap_zero_page_area(vm_page_t, int off, int size);
void pmap_zero_page_idle(vm_page_t);

View File

@ -808,7 +808,7 @@ vm_object_page_clean(vm_object_t object, vm_pindex_t start, vm_pindex_t end, int
if ((flags & OBJPC_NOSYNC) && (p->flags & PG_NOSYNC))
clearobjflags = 0;
else
pmap_page_protect(p, VM_PROT_READ);
pmap_remove_write(p);
}
if (clearobjflags && (tstart == 0) && (tend == object->size)) {
@ -977,7 +977,7 @@ vm_object_page_collect_flush(vm_object_t object, vm_page_t p, int curgeneration,
vm_pageout_flush(ma, runlen, pagerflags);
for (i = 0; i < runlen; i++) {
if (ma[i]->valid & ma[i]->dirty) {
pmap_page_protect(ma[i], VM_PROT_READ);
pmap_remove_write(ma[i]);
vm_page_flag_set(ma[i], PG_CLEANCHK);
/*
@ -1829,7 +1829,7 @@ vm_object_page_remove(vm_object_t object, vm_pindex_t start, vm_pindex_t end,
if (vm_page_sleep_if_busy(p, TRUE, "vmopar"))
goto again;
if (clean_only && p->valid) {
pmap_page_protect(p, VM_PROT_READ | VM_PROT_EXECUTE);
pmap_remove_write(p);
if (p->valid & p->dirty)
continue;
}

View File

@ -1786,7 +1786,7 @@ vm_page_cowsetup(vm_page_t m)
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
m->cow++;
pmap_page_protect(m, VM_PROT_READ);
pmap_remove_write(m);
}
#include "opt_ddb.h"

View File

@ -431,7 +431,7 @@ vm_pageout_flush(vm_page_t *mc, int count, int flags)
("vm_pageout_flush: partially invalid page %p index %d/%d",
mc[i], i, count));
vm_page_io_start(mc[i]);
pmap_page_protect(mc[i], VM_PROT_READ);
pmap_remove_write(mc[i]);
}
vm_page_unlock_queues();
vm_object_pip_add(object, count);