Eliminate page queues locking around most calls to vm_page_free().

This commit is contained in:
Alan Cox 2010-05-06 18:58:32 +00:00
parent 77dda2b96f
commit eb00b276ab
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=207728
12 changed files with 2 additions and 60 deletions

View File

@ -134,9 +134,7 @@ ncl_getpages(struct vop_getpages_args *ap)
for (i = 0; i < npages; ++i) {
if (i != ap->a_reqpage) {
vm_page_lock(pages[i]);
vm_page_lock_queues();
vm_page_free(pages[i]);
vm_page_unlock_queues();
vm_page_unlock(pages[i]);
}
}
@ -177,9 +175,7 @@ ncl_getpages(struct vop_getpages_args *ap)
for (i = 0; i < npages; ++i) {
if (i != ap->a_reqpage) {
vm_page_lock(pages[i]);
vm_page_lock_queues();
vm_page_free(pages[i]);
vm_page_unlock_queues();
vm_page_unlock(pages[i]);
}
}

View File

@ -431,9 +431,7 @@ nwfs_getpages(ap)
for (i = 0; i < npages; i++) {
if (ap->a_reqpage != i) {
vm_page_lock(pages[i]);
vm_page_lock_queues();
vm_page_free(pages[i]);
vm_page_unlock_queues();
vm_page_unlock(pages[i]);
}
}

View File

@ -443,9 +443,7 @@ smbfs_getpages(ap)
for (i = 0; i < npages; ++i) {
if (i != reqpage) {
vm_page_lock(pages[i]);
vm_page_lock_queues();
vm_page_free(pages[i]);
vm_page_unlock_queues();
vm_page_unlock(pages[i]);
}
}
@ -484,9 +482,7 @@ smbfs_getpages(ap)
for (i = 0; i < npages; i++) {
if (reqpage != i) {
vm_page_lock(pages[i]);
vm_page_lock_queues();
vm_page_free(pages[i]);
vm_page_unlock_queues();
vm_page_unlock(pages[i]);
}
}

View File

@ -948,11 +948,9 @@ exec_map_first_page(imgp)
rv = vm_pager_get_pages(object, ma, initial_pagein, 0);
ma[0] = vm_page_lookup(object, 0);
if ((rv != VM_PAGER_OK) || (ma[0] == NULL)) {
if (ma[0]) {
if (ma[0] != NULL) {
vm_page_lock(ma[0]);
vm_page_lock_queues();
vm_page_free(ma[0]);
vm_page_unlock_queues();
vm_page_unlock(ma[0]);
}
VM_OBJECT_UNLOCK(object);

View File

@ -132,9 +132,7 @@ nfs_getpages(struct vop_getpages_args *ap)
for (i = 0; i < npages; ++i) {
if (i != ap->a_reqpage) {
vm_page_lock(pages[i]);
vm_page_lock_queues();
vm_page_free(pages[i]);
vm_page_unlock_queues();
vm_page_unlock(pages[i]);
}
}
@ -175,9 +173,7 @@ nfs_getpages(struct vop_getpages_args *ap)
for (i = 0; i < npages; ++i) {
if (i != ap->a_reqpage) {
vm_page_lock(pages[i]);
vm_page_lock_queues();
vm_page_free(pages[i]);
vm_page_unlock_queues();
vm_page_unlock(pages[i]);
}
}

View File

@ -850,9 +850,7 @@ ffs_getpages(ap)
for (i = 0; i < pcount; i++) {
if (i != ap->a_reqpage) {
vm_page_lock(ap->a_m[i]);
vm_page_lock_queues();
vm_page_free(ap->a_m[i]);
vm_page_unlock_queues();
vm_page_unlock(ap->a_m[i]);
}
}

View File

@ -255,9 +255,7 @@ dev_pager_getpages(object, m, count, reqpage)
for (i = 0; i < count; i++) {
if (i != reqpage) {
vm_page_lock(m[i]);
vm_page_lock_queues();
vm_page_free(m[i]);
vm_page_unlock_queues();
vm_page_unlock(m[i]);
}
}
@ -272,9 +270,7 @@ dev_pager_getpages(object, m, count, reqpage)
TAILQ_INSERT_TAIL(&object->un_pager.devp.devp_pglist, page, pageq);
for (i = 0; i < count; i++) {
vm_page_lock(m[i]);
vm_page_lock_queues();
vm_page_free(m[i]);
vm_page_unlock_queues();
vm_page_unlock(m[i]);
}
vm_page_insert(page, object, offset);

View File

@ -200,9 +200,7 @@ sg_pager_getpages(vm_object_t object, vm_page_t *m, int count, int reqpage)
/* Free the original pages and insert this fake page into the object. */
for (i = 0; i < count; i++) {
vm_page_lock(m[i]);
vm_page_lock_queues();
vm_page_free(m[i]);
vm_page_unlock_queues();
vm_page_unlock(m[i]);
}
vm_page_insert(page, object, offset);

View File

@ -164,9 +164,7 @@ unlock_and_deallocate(struct faultstate *fs)
if (fs->object != fs->first_object) {
VM_OBJECT_LOCK(fs->first_object);
vm_page_lock(fs->first_m);
vm_page_lock_queues();
vm_page_free(fs->first_m);
vm_page_unlock_queues();
vm_page_unlock(fs->first_m);
vm_object_pip_wakeup(fs->first_object);
VM_OBJECT_UNLOCK(fs->first_object);
@ -348,9 +346,7 @@ RetryFault:;
if (fs.object != fs.first_object) {
VM_OBJECT_LOCK(fs.first_object);
vm_page_lock(fs.first_m);
vm_page_lock_queues();
vm_page_free(fs.first_m);
vm_page_unlock_queues();
vm_page_unlock(fs.first_m);
vm_object_pip_wakeup(fs.first_object);
VM_OBJECT_UNLOCK(fs.first_object);
@ -638,9 +634,7 @@ RetryFault:;
if (((fs.map != kernel_map) && (rv == VM_PAGER_ERROR)) ||
(rv == VM_PAGER_BAD)) {
vm_page_lock(fs.m);
vm_page_lock_queues();
vm_page_free(fs.m);
vm_page_unlock_queues();
vm_page_unlock(fs.m);
fs.m = NULL;
unlock_and_deallocate(&fs);
@ -648,9 +642,7 @@ RetryFault:;
}
if (fs.object != fs.first_object) {
vm_page_lock(fs.m);
vm_page_lock_queues();
vm_page_free(fs.m);
vm_page_unlock_queues();
vm_page_unlock(fs.m);
fs.m = NULL;
/*
@ -764,13 +756,11 @@ RetryFault:;
* We don't chase down the shadow chain
*/
fs.object == fs.first_object->backing_object) {
vm_page_lock(fs.first_m);
vm_page_lock_queues();
/*
* get rid of the unnecessary page
*/
vm_page_lock(fs.first_m);
vm_page_free(fs.first_m);
vm_page_unlock_queues();
vm_page_unlock(fs.first_m);
/*
* grab the page and put it into the

View File

@ -258,9 +258,7 @@ vm_imgact_hold_page(vm_object_t object, vm_ooffset_t offset)
goto out;
if (rv != VM_PAGER_OK) {
vm_page_lock(m);
vm_page_lock_queues();
vm_page_free(m);
vm_page_unlock_queues();
vm_page_unlock(m);
m = NULL;
goto out;
@ -437,10 +435,8 @@ vm_thread_stack_dispose(vm_object_t ksobj, vm_offset_t ks, int pages)
if (m == NULL)
panic("vm_thread_dispose: kstack already missing?");
vm_page_lock(m);
vm_page_lock_queues();
vm_page_unwire(m, 0);
vm_page_free(m);
vm_page_unlock_queues();
vm_page_unlock(m);
}
VM_OBJECT_UNLOCK(ksobj);

View File

@ -2046,9 +2046,7 @@ vm_object_populate(vm_object_t object, vm_pindex_t start, vm_pindex_t end)
break;
if (rv != VM_PAGER_OK) {
vm_page_lock(m);
vm_page_lock_queues();
vm_page_free(m);
vm_page_unlock_queues();
vm_page_unlock(m);
break;
}

View File

@ -725,9 +725,7 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage)
for (i = 0; i < count; i++)
if (i != reqpage) {
vm_page_lock(m[i]);
vm_page_lock_queues();
vm_page_free(m[i]);
vm_page_unlock_queues();
vm_page_unlock(m[i]);
}
PCPU_INC(cnt.v_vnodein);
@ -740,9 +738,7 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage)
for (i = 0; i < count; i++)
if (i != reqpage) {
vm_page_lock(m[i]);
vm_page_lock_queues();
vm_page_free(m[i]);
vm_page_unlock_queues();
vm_page_unlock(m[i]);
}
VM_OBJECT_UNLOCK(object);
@ -759,9 +755,7 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage)
for (i = 0; i < count; i++)
if (i != reqpage) {
vm_page_lock(m[i]);
vm_page_lock_queues();
vm_page_free(m[i]);
vm_page_unlock_queues();
vm_page_unlock(m[i]);
}
VM_OBJECT_UNLOCK(object);
@ -780,9 +774,7 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage)
for (i = 0; i < count; i++)
if (i != reqpage) {
vm_page_lock(m[i]);
vm_page_lock_queues();
vm_page_free(m[i]);
vm_page_unlock_queues();
vm_page_unlock(m[i]);
}
VM_OBJECT_UNLOCK(object);
@ -795,9 +787,7 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage)
for (i = 0; i < count; i++)
if (i != reqpage) {
vm_page_lock(m[i]);
vm_page_lock_queues();
vm_page_free(m[i]);
vm_page_unlock_queues();
vm_page_unlock(m[i]);
}
VM_OBJECT_UNLOCK(object);
@ -821,9 +811,7 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage)
for (; i < count; i++)
if (i != reqpage) {
vm_page_lock(m[i]);
vm_page_lock_queues();
vm_page_free(m[i]);
vm_page_unlock_queues();
vm_page_unlock(m[i]);
}
VM_OBJECT_UNLOCK(object);
@ -840,9 +828,7 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage)
(uintmax_t)object->un_pager.vnp.vnp_size);
}
vm_page_lock(m[i]);
vm_page_lock_queues();
vm_page_free(m[i]);
vm_page_unlock_queues();
vm_page_unlock(m[i]);
VM_OBJECT_UNLOCK(object);
runend = i + 1;
@ -854,9 +840,7 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage)
VM_OBJECT_LOCK(object);
for (j = i; j < runend; j++) {
vm_page_lock(m[j]);
vm_page_lock_queues();
vm_page_free(m[j]);
vm_page_unlock_queues();
vm_page_unlock(m[j]);
}
VM_OBJECT_UNLOCK(object);
@ -865,9 +849,7 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage)
VM_OBJECT_LOCK(object);
for (i = first + runpg; i < count; i++) {
vm_page_lock(m[i]);
vm_page_lock_queues();
vm_page_free(m[i]);
vm_page_unlock_queues();
vm_page_unlock(m[i]);
}
VM_OBJECT_UNLOCK(object);