Lock the page around vm_page_activate() and vm_page_deactivate() calls

where it was missed. The wrapped fragments now protect wire_count with
page lock.

Reviewed by:	alc
This commit is contained in:
Konstantin Belousov 2010-05-03 20:31:13 +00:00
parent 5637a59143
commit fc0c3802f0
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=207584
5 changed files with 26 additions and 8 deletions

View File

@ -665,11 +665,13 @@ mdstart_swap(struct md_s *sc, struct bio *bp)
sf_buf_free(sf);
sched_unpin();
vm_page_wakeup(m);
vm_page_lock(m);
vm_page_lock_queues();
vm_page_activate(m);
if (bp->bio_cmd == BIO_WRITE)
vm_page_dirty(m);
vm_page_unlock_queues();
vm_page_unlock(m);
/* Actions on further pages start at offset 0 */
p += PAGE_SIZE - offs;

View File

@ -192,12 +192,14 @@ ncl_getpages(struct vop_getpages_args *ap)
size = count - uio.uio_resid;
VM_OBJECT_LOCK(object);
vm_page_lock_queues();
for (i = 0, toff = 0; i < npages; i++, toff = nextoff) {
vm_page_t m;
nextoff = toff + PAGE_SIZE;
m = pages[i];
vm_page_lock(m);
vm_page_lock_queues();
if (nextoff <= size) {
/*
* Read operation filled an entire page
@ -244,8 +246,10 @@ ncl_getpages(struct vop_getpages_args *ap)
vm_page_free(m);
}
}
vm_page_unlock_queues();
vm_page_unlock(m);
}
vm_page_unlock_queues();
VM_OBJECT_UNLOCK(object);
return (0);
}

View File

@ -449,12 +449,14 @@ nwfs_getpages(ap)
size = count - uio.uio_resid;
vm_page_lock_queues();
for (i = 0, toff = 0; i < npages; i++, toff = nextoff) {
vm_page_t m;
nextoff = toff + PAGE_SIZE;
m = pages[i];
vm_page_lock(m);
vm_page_lock_queues();
if (nextoff <= size) {
m->valid = VM_PAGE_BITS_ALL;
KASSERT(m->dirty == 0,
@ -489,8 +491,10 @@ nwfs_getpages(ap)
vm_page_free(m);
}
}
vm_page_unlock_queues();
vm_page_unlock(m);
}
vm_page_unlock_queues();
VM_OBJECT_UNLOCK(object);
return 0;
#endif /* NWFS_RWCACHE */

View File

@ -500,12 +500,14 @@ smbfs_getpages(ap)
size = count - uio.uio_resid;
vm_page_lock_queues();
for (i = 0, toff = 0; i < npages; i++, toff = nextoff) {
vm_page_t m;
nextoff = toff + PAGE_SIZE;
m = pages[i];
vm_page_lock(m);
vm_page_lock_queues();
if (nextoff <= size) {
/*
* Read operation filled an entire page
@ -553,8 +555,10 @@ smbfs_getpages(ap)
vm_page_free(m);
}
}
vm_page_unlock_queues();
vm_page_unlock(m);
}
vm_page_unlock_queues();
VM_OBJECT_UNLOCK(object);
return 0;
#endif /* SMBFS_RWGENERIC */

View File

@ -189,12 +189,14 @@ nfs_getpages(struct vop_getpages_args *ap)
size = count - uio.uio_resid;
VM_OBJECT_LOCK(object);
vm_page_lock_queues();
for (i = 0, toff = 0; i < npages; i++, toff = nextoff) {
vm_page_t m;
nextoff = toff + PAGE_SIZE;
m = pages[i];
vm_page_lock(m);
vm_page_lock_queues();
if (nextoff <= size) {
/*
* Read operation filled an entire page
@ -241,8 +243,10 @@ nfs_getpages(struct vop_getpages_args *ap)
vm_page_free(m);
}
}
vm_page_unlock_queues();
vm_page_unlock(m);
}
vm_page_unlock_queues();
VM_OBJECT_UNLOCK(object);
return (0);
}