Push down the page queues lock into vm_page_activate().

This commit is contained in:
Alan Cox 2010-05-07 15:49:43 +00:00
parent dc510c105f
commit 03679e2334
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=207746
7 changed files with 53 additions and 48 deletions

View File

@ -196,9 +196,6 @@ ncl_getpages(struct vop_getpages_args *ap)
nextoff = toff + PAGE_SIZE;
m = pages[i];
vm_page_lock(m);
vm_page_lock_queues();
if (nextoff <= size) {
/*
* Read operation filled an entire page
@ -236,18 +233,22 @@ ncl_getpages(struct vop_getpages_args *ap)
* now tell them that it is ok to use.
*/
if (!error) {
if (m->oflags & VPO_WANTED)
if (m->oflags & VPO_WANTED) {
vm_page_lock(m);
vm_page_activate(m);
else
vm_page_unlock(m);
} else {
vm_page_lock(m);
vm_page_deactivate(m);
vm_page_unlock(m);
}
vm_page_wakeup(m);
} else {
vm_page_lock(m);
vm_page_free(m);
vm_page_unlock(m);
}
}
vm_page_unlock_queues();
vm_page_unlock(m);
}
VM_OBJECT_UNLOCK(object);
return (0);

View File

@ -446,9 +446,6 @@ nwfs_getpages(ap)
nextoff = toff + PAGE_SIZE;
m = pages[i];
vm_page_lock(m);
vm_page_lock_queues();
if (nextoff <= size) {
m->valid = VM_PAGE_BITS_ALL;
KASSERT(m->dirty == 0,
@ -474,18 +471,22 @@ nwfs_getpages(ap)
* now tell them that it is ok to use.
*/
if (!error) {
if (m->oflags & VPO_WANTED)
if (m->oflags & VPO_WANTED) {
vm_page_lock(m);
vm_page_activate(m);
else
vm_page_unlock(m);
} else {
vm_page_lock(m);
vm_page_deactivate(m);
vm_page_unlock(m);
}
vm_page_wakeup(m);
} else {
vm_page_lock(m);
vm_page_free(m);
vm_page_unlock(m);
}
}
vm_page_unlock_queues();
vm_page_unlock(m);
}
VM_OBJECT_UNLOCK(object);
return 0;

View File

@ -497,9 +497,6 @@ smbfs_getpages(ap)
nextoff = toff + PAGE_SIZE;
m = pages[i];
vm_page_lock(m);
vm_page_lock_queues();
if (nextoff <= size) {
/*
* Read operation filled an entire page
@ -538,18 +535,22 @@ smbfs_getpages(ap)
* now tell them that it is ok to use.
*/
if (!error) {
if (m->oflags & VPO_WANTED)
if (m->oflags & VPO_WANTED) {
vm_page_lock(m);
vm_page_activate(m);
else
vm_page_unlock(m);
} else {
vm_page_lock(m);
vm_page_deactivate(m);
vm_page_unlock(m);
}
vm_page_wakeup(m);
} else {
vm_page_lock(m);
vm_page_free(m);
vm_page_unlock(m);
}
}
vm_page_unlock_queues();
vm_page_unlock(m);
}
VM_OBJECT_UNLOCK(object);
return 0;

View File

@ -194,9 +194,6 @@ nfs_getpages(struct vop_getpages_args *ap)
nextoff = toff + PAGE_SIZE;
m = pages[i];
vm_page_lock(m);
vm_page_lock_queues();
if (nextoff <= size) {
/*
* Read operation filled an entire page
@ -234,18 +231,22 @@ nfs_getpages(struct vop_getpages_args *ap)
* now tell them that it is ok to use.
*/
if (!error) {
if (m->oflags & VPO_WANTED)
if (m->oflags & VPO_WANTED) {
vm_page_lock(m);
vm_page_activate(m);
else
vm_page_unlock(m);
} else {
vm_page_lock(m);
vm_page_deactivate(m);
vm_page_unlock(m);
}
vm_page_wakeup(m);
} else {
vm_page_lock(m);
vm_page_free(m);
vm_page_unlock(m);
}
}
vm_page_unlock_queues();
vm_page_unlock(m);
}
VM_OBJECT_UNLOCK(object);
return (0);

View File

@ -937,7 +937,6 @@ RetryFault:;
vm_fault_prefault(fs.map->pmap, vaddr, fs.entry);
VM_OBJECT_LOCK(fs.object);
vm_page_lock(fs.m);
vm_page_lock_queues();
/*
* If the page is not wired down, then put it where the pageout daemon
@ -948,10 +947,8 @@ RetryFault:;
vm_page_wire(fs.m);
else
vm_page_unwire(fs.m, 1);
} else {
} else
vm_page_activate(fs.m);
}
vm_page_unlock_queues();
vm_page_unlock(fs.m);
vm_page_wakeup(fs.m);
@ -1267,9 +1264,7 @@ vm_fault_copy_entry(vm_map_t dst_map, vm_map_t src_map,
vm_page_unlock(dst_m);
} else {
vm_page_lock(dst_m);
vm_page_lock_queues();
vm_page_activate(dst_m);
vm_page_unlock_queues();
vm_page_unlock(dst_m);
}
vm_page_wakeup(dst_m);

View File

@ -1374,22 +1374,25 @@ vm_page_enqueue(int queue, vm_page_t m)
* Ensure that act_count is at least ACT_INIT but do not otherwise
* mess with it.
*
* The page queues must be locked.
* The page must be locked.
* This routine may not block.
*/
void
vm_page_activate(vm_page_t m)
{
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
vm_page_lock_assert(m, MA_OWNED);
if (VM_PAGE_GETKNOWNQUEUE2(m) != PQ_ACTIVE) {
vm_pageq_remove(m);
if (m->wire_count == 0 && (m->flags & PG_UNMANAGED) == 0) {
if (m->act_count < ACT_INIT)
m->act_count = ACT_INIT;
vm_page_lock_queues();
vm_pageq_remove(m);
vm_page_enqueue(PQ_ACTIVE, m);
}
vm_page_unlock_queues();
} else
KASSERT(m->queue == PQ_NONE,
("vm_page_activate: wired page %p is queued", m));
} else {
if (m->act_count < ACT_INIT)
m->act_count = ACT_INIT;

View File

@ -948,8 +948,6 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage)
nextoff = tfoff + PAGE_SIZE;
mt = m[i];
vm_page_lock(mt);
vm_page_lock_queues();
if (nextoff <= object->un_pager.vnp.vnp_size) {
/*
* Read filled up entire page.
@ -992,17 +990,22 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage)
* now tell them that it is ok to use
*/
if (!error) {
if (mt->oflags & VPO_WANTED)
if (mt->oflags & VPO_WANTED) {
vm_page_lock(mt);
vm_page_activate(mt);
else
vm_page_unlock(mt);
} else {
vm_page_lock(mt);
vm_page_deactivate(mt);
vm_page_unlock(mt);
}
vm_page_wakeup(mt);
} else {
vm_page_lock(mt);
vm_page_free(mt);
vm_page_unlock(mt);
}
}
vm_page_unlock_queues();
vm_page_unlock(mt);
}
VM_OBJECT_UNLOCK(object);
if (error) {