Remove vm_page_protect(). Instead, use pmap_page_protect() directly.

This commit is contained in:
Alan Cox 2002-11-18 04:05:22 +00:00
parent 4ce7e0f43a
commit a12cc0e489
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=107039
5 changed files with 8 additions and 30 deletions

View File

@ -1597,7 +1597,7 @@ swp_pager_async_iodone(bp)
vm_page_undirty(m);
vm_page_io_finish(m);
if (!vm_page_count_severe() || !vm_page_try_to_cache(m))
vm_page_protect(m, VM_PROT_READ);
pmap_page_protect(m, VM_PROT_READ);
}
}
vm_page_unlock_queues();

View File

@ -754,7 +754,7 @@ vm_object_page_clean(vm_object_t object, vm_pindex_t start, vm_pindex_t end, int
if ((flags & OBJPC_NOSYNC) && (p->flags & PG_NOSYNC))
clearobjflags = 0;
else
vm_page_protect(p, VM_PROT_READ);
pmap_page_protect(p, VM_PROT_READ);
}
if (clearobjflags && (tstart == 0) && (tend == object->size)) {
@ -924,7 +924,7 @@ vm_object_page_collect_flush(vm_object_t object, vm_page_t p, int curgeneration,
vm_pageout_flush(ma, runlen, pagerflags);
for (i = 0; i < runlen; i++) {
if (ma[i]->valid & ma[i]->dirty) {
vm_page_protect(ma[i], VM_PROT_READ);
pmap_page_protect(ma[i], VM_PROT_READ);
vm_page_flag_set(ma[i], PG_CLEANCHK);
/*
@ -950,7 +950,7 @@ vm_object_page_collect_flush(vm_object_t object, vm_page_t p, int curgeneration,
* is converted to copy-on-write.
*
* NOTE: If the page is already at VM_PROT_NONE, calling
* vm_page_protect will have no effect.
* pmap_page_protect will have no effect.
*/
void
vm_object_pmap_copy_1(vm_object_t object, vm_pindex_t start, vm_pindex_t end)
@ -967,7 +967,7 @@ vm_object_pmap_copy_1(vm_object_t object, vm_pindex_t start, vm_pindex_t end)
p = vm_page_lookup(object, idx);
if (p == NULL)
continue;
vm_page_protect(p, VM_PROT_READ);
pmap_page_protect(p, VM_PROT_READ);
}
}
#endif

View File

@ -370,27 +370,6 @@ vm_page_unhold(vm_page_t mem)
vm_page_free_toq(mem);
}
/*
* vm_page_protect:
*
* Reduce the protection of a page. This routine never raises the
* protection and therefore can be safely called if the page is already
* at VM_PROT_NONE (it will be a NOP effectively ).
*/
void
vm_page_protect(vm_page_t mem, int prot)
{
if (prot == VM_PROT_NONE) {
if (pmap_page_is_mapped(mem) || (mem->flags & PG_WRITEABLE)) {
pmap_remove_all(mem);
vm_page_flag_clear(mem, PG_WRITEABLE);
}
} else if ((prot == VM_PROT_READ) && (mem->flags & PG_WRITEABLE)) {
pmap_page_protect(mem, VM_PROT_READ);
vm_page_flag_clear(mem, PG_WRITEABLE);
}
}
/*
* vm_page_copy:
*
@ -1822,7 +1801,7 @@ vm_page_cowsetup(vm_page_t m)
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
m->cow++;
vm_page_protect(m, VM_PROT_READ);
pmap_page_protect(m, VM_PROT_READ);
}
#include "opt_ddb.h"

View File

@ -327,7 +327,6 @@ void vm_page_io_start(vm_page_t m);
void vm_page_io_finish(vm_page_t m);
void vm_page_hold(vm_page_t mem);
void vm_page_unhold(vm_page_t mem);
void vm_page_protect(vm_page_t mem, int prot);
void vm_page_copy(vm_page_t src_m, vm_page_t dest_m);
void vm_page_free(vm_page_t m);
void vm_page_free_zero(vm_page_t m);

View File

@ -384,7 +384,7 @@ vm_pageout_flush(mc, count, flags)
for (i = 0; i < count; i++) {
KASSERT(mc[i]->valid == VM_PAGE_BITS_ALL, ("vm_pageout_flush page %p index %d/%d: partially invalid page", mc[i], i, count));
vm_page_io_start(mc[i]);
vm_page_protect(mc[i], VM_PROT_READ);
pmap_page_protect(mc[i], VM_PROT_READ);
}
object = mc[0]->object;
vm_page_unlock_queues();
@ -437,7 +437,7 @@ vm_pageout_flush(mc, count, flags)
vm_object_pip_wakeup(object);
vm_page_io_finish(mt);
if (!vm_page_count_severe() || !vm_page_try_to_cache(mt))
vm_page_protect(mt, VM_PROT_READ);
pmap_page_protect(mt, VM_PROT_READ);
}
}
return numpagedout;