When prot is VM_PROT_NONE, call pmap_page_protect() directly rather than
indirectly through vm_page_protect(). The one remaining page flag that is updated by vm_page_protect() is already being updated by our various pmap implementations. Note: A later commit will similarly change the VM_PROT_READ case and eliminate vm_page_protect().
This commit is contained in:
parent
f5464126bb
commit
d154fb4fe6
@ -771,7 +771,7 @@ exec_map_first_page(imgp)
|
||||
(ma[0]->valid == 0)) {
|
||||
if (ma[0]) {
|
||||
vm_page_lock_queues();
|
||||
vm_page_protect(ma[0], VM_PROT_NONE);
|
||||
pmap_page_protect(ma[0], VM_PROT_NONE);
|
||||
vm_page_free(ma[0]);
|
||||
vm_page_unlock_queues();
|
||||
}
|
||||
|
@ -1504,7 +1504,7 @@ vfs_vmio_release(bp)
|
||||
if ((bp->b_flags & B_ASYNC) == 0 && !m->valid &&
|
||||
m->hold_count == 0) {
|
||||
vm_page_busy(m);
|
||||
vm_page_protect(m, VM_PROT_NONE);
|
||||
pmap_page_protect(m, VM_PROT_NONE);
|
||||
vm_page_free(m);
|
||||
} else if (bp->b_flags & B_DIRECT) {
|
||||
vm_page_try_to_free(m);
|
||||
@ -3268,7 +3268,7 @@ vfs_busy_pages(struct buf * bp, int clear_modify)
|
||||
* It may not work properly with small-block devices.
|
||||
* We need to find a better way.
|
||||
*/
|
||||
vm_page_protect(m, VM_PROT_NONE);
|
||||
pmap_page_protect(m, VM_PROT_NONE);
|
||||
if (clear_modify)
|
||||
vfs_page_set_valid(bp, foff, i, m);
|
||||
else if (m->valid == VM_PAGE_BITS_ALL &&
|
||||
|
@ -474,7 +474,7 @@ RetryFault:;
|
||||
if (mt->dirty == 0)
|
||||
vm_page_test_dirty(mt);
|
||||
if (mt->dirty) {
|
||||
vm_page_protect(mt, VM_PROT_NONE);
|
||||
pmap_page_protect(mt, VM_PROT_NONE);
|
||||
vm_page_deactivate(mt);
|
||||
} else {
|
||||
vm_page_cache(mt);
|
||||
@ -700,7 +700,7 @@ RetryFault:;
|
||||
* get rid of the unnecessary page
|
||||
*/
|
||||
vm_page_lock_queues();
|
||||
vm_page_protect(fs.first_m, VM_PROT_NONE);
|
||||
pmap_page_protect(fs.first_m, VM_PROT_NONE);
|
||||
vm_page_free(fs.first_m);
|
||||
vm_page_unlock_queues();
|
||||
fs.first_m = NULL;
|
||||
|
@ -990,7 +990,7 @@ vm_object_pmap_remove(vm_object_t object, vm_pindex_t start, vm_pindex_t end)
|
||||
return;
|
||||
TAILQ_FOREACH(p, &object->memq, listq) {
|
||||
if (p->pindex >= start && p->pindex < end)
|
||||
vm_page_protect(p, VM_PROT_NONE);
|
||||
pmap_page_protect(p, VM_PROT_NONE);
|
||||
}
|
||||
if ((start == 0) && (object->size == end))
|
||||
vm_object_clear_flag(object, OBJ_WRITEABLE);
|
||||
@ -1439,7 +1439,7 @@ vm_object_backing_scan(vm_object_t object, int op)
|
||||
* can simply destroy it.
|
||||
*/
|
||||
vm_page_lock_queues();
|
||||
vm_page_protect(p, VM_PROT_NONE);
|
||||
pmap_page_protect(p, VM_PROT_NONE);
|
||||
vm_page_free(p);
|
||||
vm_page_unlock_queues();
|
||||
p = next;
|
||||
@ -1459,7 +1459,7 @@ vm_object_backing_scan(vm_object_t object, int op)
|
||||
* Leave the parent's page alone
|
||||
*/
|
||||
vm_page_lock_queues();
|
||||
vm_page_protect(p, VM_PROT_NONE);
|
||||
pmap_page_protect(p, VM_PROT_NONE);
|
||||
vm_page_free(p);
|
||||
vm_page_unlock_queues();
|
||||
p = next;
|
||||
@ -1746,7 +1746,7 @@ vm_object_page_remove(vm_object_t object, vm_pindex_t start, vm_pindex_t end, bo
|
||||
next = TAILQ_NEXT(p, listq);
|
||||
if (all || ((start <= p->pindex) && (p->pindex < end))) {
|
||||
if (p->wire_count != 0) {
|
||||
vm_page_protect(p, VM_PROT_NONE);
|
||||
pmap_page_protect(p, VM_PROT_NONE);
|
||||
if (!clean_only)
|
||||
p->valid = 0;
|
||||
continue;
|
||||
@ -1765,7 +1765,7 @@ vm_object_page_remove(vm_object_t object, vm_pindex_t start, vm_pindex_t end, bo
|
||||
continue;
|
||||
}
|
||||
vm_page_busy(p);
|
||||
vm_page_protect(p, VM_PROT_NONE);
|
||||
pmap_page_protect(p, VM_PROT_NONE);
|
||||
vm_page_free(p);
|
||||
}
|
||||
}
|
||||
@ -1773,7 +1773,7 @@ vm_object_page_remove(vm_object_t object, vm_pindex_t start, vm_pindex_t end, bo
|
||||
while (size > 0) {
|
||||
if ((p = vm_page_lookup(object, start)) != NULL) {
|
||||
if (p->wire_count != 0) {
|
||||
vm_page_protect(p, VM_PROT_NONE);
|
||||
pmap_page_protect(p, VM_PROT_NONE);
|
||||
if (!clean_only)
|
||||
p->valid = 0;
|
||||
start += 1;
|
||||
@ -1797,7 +1797,7 @@ vm_object_page_remove(vm_object_t object, vm_pindex_t start, vm_pindex_t end, bo
|
||||
}
|
||||
}
|
||||
vm_page_busy(p);
|
||||
vm_page_protect(p, VM_PROT_NONE);
|
||||
pmap_page_protect(p, VM_PROT_NONE);
|
||||
vm_page_free(p);
|
||||
}
|
||||
start += 1;
|
||||
@ -1968,7 +1968,7 @@ vm_freeze_copyopts(vm_object_t object, vm_pindex_t froma, vm_pindex_t toa)
|
||||
vm_page_unlock_queues();
|
||||
}
|
||||
|
||||
vm_page_protect(m_in, VM_PROT_NONE);
|
||||
pmap_page_protect(m_in, VM_PROT_NONE);
|
||||
pmap_copy_page(m_in, m_out);
|
||||
m_out->valid = m_in->valid;
|
||||
vm_page_dirty(m_out);
|
||||
|
@ -878,7 +878,7 @@ vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int req)
|
||||
}
|
||||
KASSERT(m->dirty == 0, ("Found dirty cache page %p", m));
|
||||
vm_page_busy(m);
|
||||
vm_page_protect(m, VM_PROT_NONE);
|
||||
pmap_page_protect(m, VM_PROT_NONE);
|
||||
vm_page_free(m);
|
||||
vm_page_unlock_queues();
|
||||
goto loop;
|
||||
@ -1384,7 +1384,7 @@ vm_page_try_to_free(vm_page_t m)
|
||||
if (m->dirty)
|
||||
return (0);
|
||||
vm_page_busy(m);
|
||||
vm_page_protect(m, VM_PROT_NONE);
|
||||
pmap_page_protect(m, VM_PROT_NONE);
|
||||
vm_page_free(m);
|
||||
return (1);
|
||||
}
|
||||
@ -1413,7 +1413,7 @@ vm_page_cache(vm_page_t m)
|
||||
* Remove all pmaps and indicate that the page is not
|
||||
* writeable or mapped.
|
||||
*/
|
||||
vm_page_protect(m, VM_PROT_NONE);
|
||||
pmap_page_protect(m, VM_PROT_NONE);
|
||||
if (m->dirty != 0) {
|
||||
panic("vm_page_cache: caching a dirty page, pindex: %ld",
|
||||
(long)m->pindex);
|
||||
|
@ -512,7 +512,7 @@ vm_pageout_object_deactivate_pages(map, object, desired, map_remove_only)
|
||||
if ((p->flags & PG_REFERENCED) == 0) {
|
||||
p->act_count -= min(p->act_count, ACT_DECLINE);
|
||||
if (!remove_mode && (vm_pageout_algorithm || (p->act_count == 0))) {
|
||||
vm_page_protect(p, VM_PROT_NONE);
|
||||
pmap_page_protect(p, VM_PROT_NONE);
|
||||
vm_page_deactivate(p);
|
||||
} else {
|
||||
vm_pageq_requeue(p);
|
||||
@ -525,7 +525,7 @@ vm_pageout_object_deactivate_pages(map, object, desired, map_remove_only)
|
||||
vm_pageq_requeue(p);
|
||||
}
|
||||
} else if (p->queue == PQ_INACTIVE) {
|
||||
vm_page_protect(p, VM_PROT_NONE);
|
||||
pmap_page_protect(p, VM_PROT_NONE);
|
||||
}
|
||||
p = next;
|
||||
}
|
||||
@ -618,7 +618,7 @@ vm_pageout_page_free(vm_page_t m) {
|
||||
if (type == OBJT_SWAP || type == OBJT_DEFAULT)
|
||||
vm_object_reference(object);
|
||||
vm_page_busy(m);
|
||||
vm_page_protect(m, VM_PROT_NONE);
|
||||
pmap_page_protect(m, VM_PROT_NONE);
|
||||
vm_page_free(m);
|
||||
cnt.v_dfree++;
|
||||
if (type == OBJT_SWAP || type == OBJT_DEFAULT)
|
||||
@ -1043,7 +1043,7 @@ vm_pageout_scan(int pass)
|
||||
m->act_count == 0) {
|
||||
page_shortage--;
|
||||
if (m->object->ref_count == 0) {
|
||||
vm_page_protect(m, VM_PROT_NONE);
|
||||
pmap_page_protect(m, VM_PROT_NONE);
|
||||
if (m->dirty == 0)
|
||||
vm_page_cache(m);
|
||||
else
|
||||
@ -1278,7 +1278,7 @@ vm_pageout_page_stats()
|
||||
* operations would be higher than the value
|
||||
* of doing the operation.
|
||||
*/
|
||||
vm_page_protect(m, VM_PROT_NONE);
|
||||
pmap_page_protect(m, VM_PROT_NONE);
|
||||
vm_page_deactivate(m);
|
||||
} else {
|
||||
m->act_count -= min(m->act_count, ACT_DECLINE);
|
||||
|
@ -348,7 +348,7 @@ vnode_pager_setsize(vp, nsize)
|
||||
* XXX should vm_pager_unmap_page() have
|
||||
* dealt with this?
|
||||
*/
|
||||
vm_page_protect(m, VM_PROT_NONE);
|
||||
pmap_page_protect(m, VM_PROT_NONE);
|
||||
|
||||
/*
|
||||
* Clear out partial-page dirty bits. This
|
||||
|
Loading…
Reference in New Issue
Block a user