- Increase the object lock's scope in vm_contig_launder() so that access

to the object's type field and the call to vm_pageout_flush() are
   synchronized.
 - The above change allows for the eliminaton of the last parameter
   to vm_pageout_flush().
 - Synchronize access to the page's valid field in vm_pageout_flush()
   using the containing object's lock.
This commit is contained in:
alc 2003-10-18 21:09:21 +00:00
parent 39bde695bf
commit bccf1d15ab
4 changed files with 18 additions and 17 deletions

View File

@ -91,12 +91,16 @@ vm_contig_launder(int queue)
{
vm_object_t object;
vm_page_t m, m_tmp, next;
struct vnode *vp;
for (m = TAILQ_FIRST(&vm_page_queues[queue].pl); m != NULL; m = next) {
next = TAILQ_NEXT(m, pageq);
KASSERT(m->queue == queue,
("vm_contig_launder: page %p's queue is not %d", m, queue));
if (!VM_OBJECT_TRYLOCK(m->object))
continue;
if (vm_page_sleep_if_busy(m, TRUE, "vpctw0")) {
VM_OBJECT_UNLOCK(m->object);
vm_page_lock_queues();
return (TRUE);
}
@ -105,22 +109,25 @@ vm_contig_launder(int queue)
object = m->object;
if (object->type == OBJT_VNODE) {
vm_page_unlock_queues();
vn_lock(object->handle,
LK_EXCLUSIVE | LK_RETRY, curthread);
vp = object->handle;
VM_OBJECT_UNLOCK(object);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, curthread);
VM_OBJECT_LOCK(object);
vm_object_page_clean(object, 0, 0, OBJPC_SYNC);
VM_OBJECT_UNLOCK(object);
VOP_UNLOCK(object->handle, 0, curthread);
VOP_UNLOCK(vp, 0, curthread);
vm_page_lock_queues();
return (TRUE);
} else if (object->type == OBJT_SWAP ||
object->type == OBJT_DEFAULT) {
m_tmp = m;
vm_pageout_flush(&m_tmp, 1, 0, FALSE);
vm_pageout_flush(&m_tmp, 1, 0);
VM_OBJECT_UNLOCK(object);
return (TRUE);
}
} else if (m->busy == 0 && m->hold_count == 0)
vm_page_cache(m);
VM_OBJECT_UNLOCK(m->object);
}
return (FALSE);
}

View File

@ -926,7 +926,7 @@ vm_object_page_collect_flush(vm_object_t object, vm_page_t p, int curgeneration,
runlen = maxb + maxf + 1;
splx(s);
vm_pageout_flush(ma, runlen, pagerflags, TRUE);
vm_pageout_flush(ma, runlen, pagerflags);
for (i = 0; i < runlen; i++) {
if (ma[i]->valid & ma[i]->dirty) {
pmap_page_protect(ma[i], VM_PROT_READ);

View File

@ -348,7 +348,7 @@ vm_pageout_clean(m)
/*
* we allow reads during pageouts...
*/
return (vm_pageout_flush(&mc[page_base], pageout_count, 0, TRUE));
return (vm_pageout_flush(&mc[page_base], pageout_count, 0));
}
/*
@ -361,11 +361,7 @@ vm_pageout_clean(m)
* the ordering.
*/
int
vm_pageout_flush(mc, count, flags, is_object_locked)
vm_page_t *mc;
int count;
int flags;
int is_object_locked;
vm_pageout_flush(vm_page_t *mc, int count, int flags)
{
vm_object_t object;
int pageout_status[count];
@ -384,14 +380,14 @@ vm_pageout_flush(mc, count, flags, is_object_locked)
* edge case with file fragments.
*/
for (i = 0; i < count; i++) {
KASSERT(mc[i]->valid == VM_PAGE_BITS_ALL, ("vm_pageout_flush page %p index %d/%d: partially invalid page", mc[i], i, count));
KASSERT(mc[i]->valid == VM_PAGE_BITS_ALL,
("vm_pageout_flush: partially invalid page %p index %d/%d",
mc[i], i, count));
vm_page_io_start(mc[i]);
pmap_page_protect(mc[i], VM_PROT_READ);
}
object = mc[0]->object;
vm_page_unlock_queues();
if (!is_object_locked)
VM_OBJECT_LOCK(object);
vm_object_pip_add(object, count);
VM_OBJECT_UNLOCK(object);
@ -444,8 +440,6 @@ vm_pageout_flush(mc, count, flags, is_object_locked)
pmap_page_protect(mt, VM_PROT_READ);
}
}
if (!is_object_locked)
VM_OBJECT_UNLOCK(object);
return numpagedout;
}

View File

@ -108,6 +108,6 @@ void vm_proc_swapin_all(struct swdevt *);
#endif /* !NO_SWAPPING */
#ifdef _KERNEL
int vm_pageout_flush(vm_page_t *, int, int, int is_object_locked);
int vm_pageout_flush(vm_page_t *, int, int);
#endif
#endif /* _VM_VM_PAGEOUT_H_ */