o Require that the page queues lock is held on entry to vm_pageout_clean()

and vm_pageout_flush().
 o Acquire the page queues lock before calling vm_pageout_clean()
   or vm_pageout_flush().
This commit is contained in:
alc 2002-07-27 23:20:32 +00:00
parent 57f793c52f
commit 5805f56cf3
3 changed files with 9 additions and 5 deletions

View File

@ -108,7 +108,9 @@ vm_contig_launder(int queue)
} else if (object->type == OBJT_SWAP ||
object->type == OBJT_DEFAULT) {
m_tmp = m;
vm_page_lock_queues();
vm_pageout_flush(&m_tmp, 1, 0);
vm_page_unlock_queues();
return (TRUE);
}
} else if (m->busy == 0 && m->hold_count == 0) {

View File

@ -850,7 +850,7 @@ vm_object_page_collect_flush(vm_object_t object, vm_page_t p, int curgeneration,
return(0);
}
}
vm_page_lock_queues();
maxf = 0;
for(i = 1; i < vm_pageout_page_count; i++) {
vm_page_t tp;
@ -934,6 +934,7 @@ vm_object_page_collect_flush(vm_object_t object, vm_page_t p, int curgeneration,
maxf = i - maxb - 1;
}
}
vm_page_unlock_queues();
return(maxf + 1);
}

View File

@ -229,7 +229,7 @@ vm_pageout_clean(m)
int ib, is, page_base;
vm_pindex_t pindex = m->pindex;
GIANT_REQUIRED;
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
object = m->object;
@ -368,7 +368,7 @@ vm_pageout_flush(mc, count, flags)
int numpagedout = 0;
int i;
GIANT_REQUIRED;
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
/*
* Initiate I/O. Bump the vm_page_t->busy counter and
* mark the pages read-only.
@ -384,8 +384,8 @@ vm_pageout_flush(mc, count, flags)
vm_page_io_start(mc[i]);
vm_page_protect(mc[i], VM_PROT_READ);
}
object = mc[0]->object;
vm_page_unlock_queues();
vm_object_pip_add(object, count);
vm_pager_put_pages(object, mc, count,
@ -438,7 +438,6 @@ vm_pageout_flush(mc, count, flags)
vm_page_protect(mt, VM_PROT_READ);
}
}
vm_page_unlock_queues();
return numpagedout;
}
@ -938,6 +937,7 @@ vm_pageout_scan(int pass)
* the (future) cleaned page. Otherwise we could wind
* up laundering or cleaning too many pages.
*/
vm_page_lock_queues();
s = splvm();
TAILQ_INSERT_AFTER(&vm_page_queues[PQ_INACTIVE].pl, m, &marker, pageq);
splx(s);
@ -949,6 +949,7 @@ vm_pageout_scan(int pass)
next = TAILQ_NEXT(&marker, pageq);
TAILQ_REMOVE(&vm_page_queues[PQ_INACTIVE].pl, &marker, pageq);
splx(s);
vm_page_unlock_queues();
if (vp) {
vput(vp);
vn_finished_write(mp);