Acquire the page lock around all remaining calls to vm_page_free() on

managed pages that didn't already have that lock held.  (Freeing an
unmanaged page, such as the various pmaps use, doesn't require the page
lock.)

This allows a change in vm_page_remove()'s locking requirements.  It now
expects the page lock to be held instead of the page queues lock.
Consequently, the page queues lock is no longer required at all by callers
to vm_page_rename().

Discussed with: kib
This commit is contained in:
Alan Cox 2010-05-05 18:16:06 +00:00
parent b5f770bd86
commit 5ac59343be
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=207669
11 changed files with 59 additions and 34 deletions

View File

@ -131,12 +131,15 @@ ncl_getpages(struct vop_getpages_args *ap)
*/
VM_OBJECT_LOCK(object);
if (pages[ap->a_reqpage]->valid != 0) {
vm_page_lock_queues();
for (i = 0; i < npages; ++i) {
if (i != ap->a_reqpage)
if (i != ap->a_reqpage) {
vm_page_lock(pages[i]);
vm_page_lock_queues();
vm_page_free(pages[i]);
vm_page_unlock_queues();
vm_page_unlock(pages[i]);
}
}
vm_page_unlock_queues();
VM_OBJECT_UNLOCK(object);
return (0);
}
@ -171,12 +174,15 @@ ncl_getpages(struct vop_getpages_args *ap)
if (error && (uio.uio_resid == count)) {
ncl_printf("nfs_getpages: error %d\n", error);
VM_OBJECT_LOCK(object);
vm_page_lock_queues();
for (i = 0; i < npages; ++i) {
if (i != ap->a_reqpage)
if (i != ap->a_reqpage) {
vm_page_lock(pages[i]);
vm_page_lock_queues();
vm_page_free(pages[i]);
vm_page_unlock_queues();
vm_page_unlock(pages[i]);
}
}
vm_page_unlock_queues();
VM_OBJECT_UNLOCK(object);
return (VM_PAGER_ERROR);
}

View File

@ -428,12 +428,15 @@ nwfs_getpages(ap)
VM_OBJECT_LOCK(object);
if (error && (uio.uio_resid == count)) {
printf("nwfs_getpages: error %d\n",error);
vm_page_lock_queues();
for (i = 0; i < npages; i++) {
if (ap->a_reqpage != i)
if (ap->a_reqpage != i) {
vm_page_lock(pages[i]);
vm_page_lock_queues();
vm_page_free(pages[i]);
vm_page_unlock_queues();
vm_page_unlock(pages[i]);
}
}
vm_page_unlock_queues();
VM_OBJECT_UNLOCK(object);
return VM_PAGER_ERROR;
}

View File

@ -440,12 +440,15 @@ smbfs_getpages(ap)
VM_OBJECT_LOCK(object);
if (m->valid != 0) {
vm_page_lock_queues();
for (i = 0; i < npages; ++i) {
if (i != reqpage)
if (i != reqpage) {
vm_page_lock(pages[i]);
vm_page_lock_queues();
vm_page_free(pages[i]);
vm_page_unlock_queues();
vm_page_unlock(pages[i]);
}
}
vm_page_unlock_queues();
VM_OBJECT_UNLOCK(object);
return 0;
}
@ -478,12 +481,15 @@ smbfs_getpages(ap)
VM_OBJECT_LOCK(object);
if (error && (uio.uio_resid == count)) {
printf("smbfs_getpages: error %d\n",error);
vm_page_lock_queues();
for (i = 0; i < npages; i++) {
if (reqpage != i)
if (reqpage != i) {
vm_page_lock(pages[i]);
vm_page_lock_queues();
vm_page_free(pages[i]);
vm_page_unlock_queues();
vm_page_unlock(pages[i]);
}
}
vm_page_unlock_queues();
VM_OBJECT_UNLOCK(object);
return VM_PAGER_ERROR;
}

View File

@ -949,9 +949,11 @@ exec_map_first_page(imgp)
ma[0] = vm_page_lookup(object, 0);
if ((rv != VM_PAGER_OK) || (ma[0] == NULL)) {
if (ma[0]) {
vm_page_lock(ma[0]);
vm_page_lock_queues();
vm_page_free(ma[0]);
vm_page_unlock_queues();
vm_page_unlock(ma[0]);
}
VM_OBJECT_UNLOCK(object);
return (EIO);

View File

@ -104,9 +104,11 @@ vm_pgmoveco(vm_map_t mapa, vm_offset_t kaddr, vm_offset_t uaddr)
if ((user_pg = vm_page_lookup(uobject, upindex)) != NULL) {
if (vm_page_sleep_if_busy(user_pg, TRUE, "vm_pgmoveco"))
goto retry;
vm_page_lock(user_pg);
vm_page_lock_queues();
pmap_remove_all(user_pg);
vm_page_free(user_pg);
vm_page_unlock(user_pg);
} else {
/*
* Even if a physical page does not exist in the

View File

@ -129,12 +129,15 @@ nfs_getpages(struct vop_getpages_args *ap)
*/
VM_OBJECT_LOCK(object);
if (pages[ap->a_reqpage]->valid != 0) {
vm_page_lock_queues();
for (i = 0; i < npages; ++i) {
if (i != ap->a_reqpage)
if (i != ap->a_reqpage) {
vm_page_lock(pages[i]);
vm_page_lock_queues();
vm_page_free(pages[i]);
vm_page_unlock_queues();
vm_page_unlock(pages[i]);
}
}
vm_page_unlock_queues();
VM_OBJECT_UNLOCK(object);
return (0);
}
@ -169,12 +172,15 @@ nfs_getpages(struct vop_getpages_args *ap)
if (error && (uio.uio_resid == count)) {
nfs_printf("nfs_getpages: error %d\n", error);
VM_OBJECT_LOCK(object);
vm_page_lock_queues();
for (i = 0; i < npages; ++i) {
if (i != ap->a_reqpage)
if (i != ap->a_reqpage) {
vm_page_lock(pages[i]);
vm_page_lock_queues();
vm_page_free(pages[i]);
vm_page_unlock_queues();
vm_page_unlock(pages[i]);
}
}
vm_page_unlock_queues();
VM_OBJECT_UNLOCK(object);
return (VM_PAGER_ERROR);
}

View File

@ -847,13 +847,15 @@ ffs_getpages(ap)
if (mreq->valid) {
if (mreq->valid != VM_PAGE_BITS_ALL)
vm_page_zero_invalid(mreq, TRUE);
vm_page_lock_queues();
for (i = 0; i < pcount; i++) {
if (i != ap->a_reqpage) {
vm_page_lock(ap->a_m[i]);
vm_page_lock_queues();
vm_page_free(ap->a_m[i]);
vm_page_unlock_queues();
vm_page_unlock(ap->a_m[i]);
}
}
vm_page_unlock_queues();
VM_OBJECT_UNLOCK(mreq->object);
return VM_PAGER_OK;
}

View File

@ -778,9 +778,7 @@ RetryFault:;
* automatically made dirty.
*/
vm_page_lock(fs.m);
vm_page_lock_queues();
vm_page_rename(fs.m, fs.first_object, fs.first_pindex);
vm_page_unlock_queues();
vm_page_unlock(fs.m);
vm_page_busy(fs.m);
fs.first_m = fs.m;

View File

@ -1461,9 +1461,7 @@ vm_object_split(vm_map_entry_t entry)
goto retry;
}
vm_page_lock(m);
vm_page_lock_queues();
vm_page_rename(m, new_object, idx);
vm_page_unlock_queues();
vm_page_unlock(m);
/* page automatically made dirty by rename and cache handled */
vm_page_busy(m);
@ -1691,9 +1689,7 @@ vm_object_backing_scan(vm_object_t object, int op)
* mapped through the rename.
*/
vm_page_lock(p);
vm_page_lock_queues();
vm_page_rename(p, object, new_pindex);
vm_page_unlock_queues();
vm_page_unlock(p);
/* page automatically made dirty by rename */
}

View File

@ -791,7 +791,7 @@ vm_page_remove(vm_page_t m)
vm_page_t root;
if ((m->flags & PG_UNMANAGED) == 0)
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
vm_page_lock_assert(m, MA_OWNED);
if ((object = m->object) == NULL)
return;
VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
@ -2234,11 +2234,11 @@ vm_page_cowfault(vm_page_t m)
retry_alloc:
pmap_remove_all(m);
vm_page_unlock_queues();
vm_page_remove(m);
mnew = vm_page_alloc(object, pindex, VM_ALLOC_NORMAL | VM_ALLOC_NOBUSY);
if (mnew == NULL) {
vm_page_insert(m, object, pindex);
vm_page_unlock_queues();
vm_page_unlock(m);
VM_OBJECT_UNLOCK(object);
VM_WAIT;
@ -2261,7 +2261,12 @@ vm_page_cowfault(vm_page_t m)
* waiting to allocate a page. If so, put things back
* the way they were
*/
vm_page_unlock(m);
vm_page_lock(mnew);
vm_page_lock_queues();
vm_page_free(mnew);
vm_page_unlock_queues();
vm_page_unlock(mnew);
vm_page_insert(m, object, pindex);
} else { /* clear COW & copy page */
if (!so_zerocp_fullpage)
@ -2270,9 +2275,8 @@ vm_page_cowfault(vm_page_t m)
vm_page_dirty(mnew);
mnew->wire_count = m->wire_count - m->cow;
m->wire_count = m->cow;
vm_page_unlock(m);
}
vm_page_unlock_queues();
vm_page_unlock(m);
}
void

View File

@ -103,7 +103,7 @@ struct vm_page {
struct vm_page *left; /* splay tree link (O) */
struct vm_page *right; /* splay tree link (O) */
vm_object_t object; /* which object am I in (O,Q)*/
vm_object_t object; /* which object am I in (O,P)*/
vm_pindex_t pindex; /* offset into object (O,Q) */
vm_paddr_t phys_addr; /* physical address of page */
struct md_page md; /* machine dependant stuff */