Introduce a field to struct vm_page for storing flags that are

synchronized by the lock on the object containing the page.

Transition PG_WANTED and PG_SWAPINPROG to use the new field,
eliminating the need for holding the page queues lock when setting
or clearing these flags.  Rename PG_WANTED and PG_SWAPINPROG to
VPO_WANTED and VPO_SWAPINPROG, respectively.

Eliminate the assertion that the page queues lock is held in
vm_page_io_finish().

Eliminate the acquisition and release of the page queues lock
around calls to vm_page_io_finish() in kern_sendfile() and
vfs_unbusy_pages().
This commit is contained in:
Alan Cox 2006-08-09 17:43:27 +00:00
parent a359443290
commit 5786be7cc7
10 changed files with 35 additions and 31 deletions

View File

@ -483,7 +483,7 @@ nwfs_getpages(ap)
* now tell them that it is ok to use.
*/
if (!error) {
if (m->flags & PG_WANTED)
if (m->oflags & VPO_WANTED)
vm_page_activate(m);
else
vm_page_deactivate(m);

View File

@ -551,7 +551,7 @@ smbfs_getpages(ap)
* now tell them that it is ok to use.
*/
if (!error) {
if (m->flags & PG_WANTED)
if (m->oflags & VPO_WANTED)
vm_page_activate(m);
else
vm_page_deactivate(m);

View File

@ -2086,9 +2086,7 @@ kern_sendfile(struct thread *td, struct sendfile_args *uap,
VOP_UNLOCK(vp, 0, td);
VFS_UNLOCK_GIANT(vfslocked);
VM_OBJECT_LOCK(obj);
vm_page_lock_queues();
vm_page_io_finish(pg);
vm_page_unlock_queues();
if (!error)
VM_OBJECT_UNLOCK(obj);
mbstat.sf_iocnt++;

View File

@ -3299,7 +3299,6 @@ vfs_unbusy_pages(struct buf *bp)
obj = bp->b_bufobj->bo_object;
VM_OBJECT_LOCK(obj);
vm_page_lock_queues();
for (i = 0; i < bp->b_npages; i++) {
m = bp->b_pages[i];
if (m == bogus_page) {
@ -3313,7 +3312,6 @@ vfs_unbusy_pages(struct buf *bp)
vm_object_pip_subtract(obj, 1);
vm_page_io_finish(m);
}
vm_page_unlock_queues();
vm_object_pip_wakeupn(obj, 0);
VM_OBJECT_UNLOCK(obj);
}

View File

@ -240,7 +240,7 @@ nfs_getpages(struct vop_getpages_args *ap)
* now tell them that it is ok to use.
*/
if (!error) {
if (m->flags & PG_WANTED)
if (m->oflags & VPO_WANTED)
vm_page_activate(m);
else
vm_page_deactivate(m);

View File

@ -1056,16 +1056,14 @@ swap_pager_getpages(vm_object_t object, vm_page_t *m, int count, int reqpage)
bp->b_pager.pg_reqpage = reqpage - i;
VM_OBJECT_LOCK(object);
vm_page_lock_queues();
{
int k;
for (k = i; k < j; ++k) {
bp->b_pages[k - i] = m[k];
vm_page_flag_set(m[k], PG_SWAPINPROG);
m[k]->oflags |= VPO_SWAPINPROG;
}
}
vm_page_unlock_queues();
bp->b_npages = j - i;
cnt.v_swapin++;
@ -1093,14 +1091,15 @@ swap_pager_getpages(vm_object_t object, vm_page_t *m, int count, int reqpage)
swp_pager_strategy(bp);
/*
* wait for the page we want to complete. PG_SWAPINPROG is always
* wait for the page we want to complete. VPO_SWAPINPROG is always
* cleared on completion. If an I/O error occurs, SWAPBLK_NONE
* is set in the meta-data.
*/
VM_OBJECT_LOCK(object);
while ((mreq->flags & PG_SWAPINPROG) != 0) {
while ((mreq->oflags & VPO_SWAPINPROG) != 0) {
mreq->oflags |= VPO_WANTED;
vm_page_lock_queues();
vm_page_flag_set(mreq, PG_WANTED | PG_REFERENCED);
vm_page_flag_set(mreq, PG_REFERENCED);
vm_page_unlock_queues();
cnt.v_intrans++;
if (msleep(mreq, VM_OBJECT_MTX(object), PSWP, "swread", hz*20)) {
@ -1282,9 +1281,7 @@ swap_pager_putpages(vm_object_t object, vm_page_t *m, int count,
vm_page_dirty(mreq);
rtvals[i+j] = VM_PAGER_OK;
vm_page_lock_queues();
vm_page_flag_set(mreq, PG_SWAPINPROG);
vm_page_unlock_queues();
mreq->oflags |= VPO_SWAPINPROG;
bp->b_pages[j] = mreq;
}
VM_OBJECT_UNLOCK(object);
@ -1399,7 +1396,7 @@ swp_pager_async_iodone(struct buf *bp)
for (i = 0; i < bp->b_npages; ++i) {
vm_page_t m = bp->b_pages[i];
vm_page_flag_clear(m, PG_SWAPINPROG);
m->oflags &= ~VPO_SWAPINPROG;
if (bp->b_ioflags & BIO_ERROR) {
/*
@ -1418,7 +1415,7 @@ swp_pager_async_iodone(struct buf *bp)
* not match anything ).
*
* We have to wake specifically requested pages
* up too because we cleared PG_SWAPINPROG and
* up too because we cleared VPO_SWAPINPROG and
* someone may be waiting for that.
*
* NOTE: for reads, m->dirty will probably
@ -1472,7 +1469,7 @@ swp_pager_async_iodone(struct buf *bp)
/*
* We have to wake specifically requested pages
* up too because we cleared PG_SWAPINPROG and
* up too because we cleared VPO_SWAPINPROG and
* could be waiting for it in getpages. However,
* be sure to not unbusy getpages specifically
* requested page - getpages expects it to be

View File

@ -1153,10 +1153,11 @@ vm_object_madvise(vm_object_t object, vm_pindex_t pindex, int count, int advise)
goto unlock_tobject;
}
if ((m->flags & PG_BUSY) || m->busy) {
vm_page_flag_set(m, PG_WANTED | PG_REFERENCED);
vm_page_flag_set(m, PG_REFERENCED);
vm_page_unlock_queues();
if (object != tobject)
VM_OBJECT_UNLOCK(object);
m->oflags |= VPO_WANTED;
msleep(m, VM_OBJECT_MTX(tobject), PDROP | PVM, "madvpo", 0);
VM_OBJECT_LOCK(object);
goto relookup;
@ -1341,9 +1342,10 @@ vm_object_split(vm_map_entry_t entry)
* not be changed by this operation.
*/
if ((m->flags & PG_BUSY) || m->busy) {
vm_page_flag_set(m, PG_WANTED | PG_REFERENCED);
vm_page_flag_set(m, PG_REFERENCED);
vm_page_unlock_queues();
VM_OBJECT_UNLOCK(new_object);
m->oflags |= VPO_WANTED;
msleep(m, VM_OBJECT_MTX(orig_object), PDROP | PVM, "spltwt", 0);
VM_OBJECT_LOCK(new_object);
VM_OBJECT_LOCK(orig_object);
@ -1476,10 +1478,10 @@ vm_object_backing_scan(vm_object_t object, int op)
} else if (op & OBSC_COLLAPSE_WAIT) {
if ((p->flags & PG_BUSY) || p->busy) {
vm_page_lock_queues();
vm_page_flag_set(p,
PG_WANTED | PG_REFERENCED);
vm_page_flag_set(p, PG_REFERENCED);
vm_page_unlock_queues();
VM_OBJECT_UNLOCK(object);
p->oflags |= VPO_WANTED;
msleep(p, VM_OBJECT_MTX(backing_object),
PDROP | PVM, "vmocol", 0);
VM_OBJECT_LOCK(object);

View File

@ -387,8 +387,8 @@ vm_page_flash(vm_page_t m)
{
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
if (m->flags & PG_WANTED) {
vm_page_flag_clear(m, PG_WANTED);
if (m->oflags & VPO_WANTED) {
m->oflags &= ~VPO_WANTED;
wakeup(m);
}
}
@ -423,7 +423,6 @@ vm_page_io_finish(vm_page_t m)
{
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
m->busy--;
if (m->busy == 0)
vm_page_flash(m);
@ -500,7 +499,7 @@ vm_page_sleep_if_busy(vm_page_t m, int also_m_busy, const char *msg)
if ((m->flags & PG_BUSY) || (also_m_busy && m->busy)) {
if (!mtx_owned(&vm_page_queue_mtx))
vm_page_lock_queues();
vm_page_flag_set(m, PG_WANTED | PG_REFERENCED);
vm_page_flag_set(m, PG_REFERENCED);
vm_page_unlock_queues();
/*
@ -510,6 +509,7 @@ vm_page_sleep_if_busy(vm_page_t m, int also_m_busy, const char *msg)
* such that even if m->object changes, we can re-lock
* it.
*/
m->oflags |= VPO_WANTED;
msleep(m, VM_OBJECT_MTX(m->object), PVM, msg, 0);
return (TRUE);
}
@ -1480,8 +1480,9 @@ vm_page_grab(vm_object_t object, vm_pindex_t pindex, int allocflags)
if ((m = vm_page_lookup(object, pindex)) != NULL) {
vm_page_lock_queues();
if (m->busy || (m->flags & PG_BUSY)) {
vm_page_flag_set(m, PG_WANTED | PG_REFERENCED);
vm_page_flag_set(m, PG_REFERENCED);
vm_page_unlock_queues();
m->oflags |= VPO_WANTED;
msleep(m, VM_OBJECT_MTX(m->object), PVM, "pgrbwt", 0);
if ((allocflags & VM_ALLOC_RETRY) == 0)
return (NULL);

View File

@ -116,6 +116,7 @@ struct vm_page {
u_short wire_count; /* wired down maps refs (P) */
u_int cow; /* page cow mapping count */
short hold_count; /* page hold count */
u_short oflags; /* page flags (O) */
u_char act_count; /* page usage count */
u_char busy; /* page busy count (O) */
/* NOTE that these must support one bit per DEV_BSIZE in a page!!! */
@ -135,6 +136,15 @@ struct vm_page {
#endif
};
/*
* Page flags stored in oflags:
*
* Access to these page flags is synchronized by the lock on the object
* containing the page (O).
*/
#define VPO_WANTED 0x0002 /* someone is waiting for page */
#define VPO_SWAPINPROG 0x0200 /* swap I/O in progress on page */
/* Make sure that u_long is at least 64 bits when PAGE_SIZE is 32K. */
#if PAGE_SIZE == 32768
#ifdef CTASSERT
@ -210,14 +220,12 @@ extern struct pq_coloring page_queue_coloring;
* the object, and such pages are also not on any PQ queue.
*/
#define PG_BUSY 0x0001 /* page is in transit (O) */
#define PG_WANTED 0x0002 /* someone is waiting for page (O) */
#define PG_WINATCFLS 0x0004 /* flush dirty page on inactive q */
#define PG_FICTITIOUS 0x0008 /* physical page doesn't exist (O) */
#define PG_WRITEABLE 0x0010 /* page is mapped writeable */
#define PG_ZERO 0x0040 /* page is zeroed */
#define PG_REFERENCED 0x0080 /* page has been referenced */
#define PG_CLEANCHK 0x0100 /* page will be checked for cleaning */
#define PG_SWAPINPROG 0x0200 /* swap I/O in progress on page */
#define PG_NOSYNC 0x0400 /* do not collect for syncer */
#define PG_UNMANAGED 0x0800 /* No PV management for page */
#define PG_MARKER 0x1000 /* special queue marker page */

View File

@ -956,7 +956,7 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage)
* now tell them that it is ok to use
*/
if (!error) {
if (mt->flags & PG_WANTED)
if (mt->oflags & VPO_WANTED)
vm_page_activate(mt);
else
vm_page_deactivate(mt);