- Modify swap_pager_copy() and its callers such that the source and

destination objects are locked on entry and exit.  Add comments to
   the callers noting that the locks can be released by swap_pager_copy().
 - Remove several instances of GIANT_REQUIRED.
This commit is contained in:
alc 2003-11-01 08:57:26 +00:00
parent 6c7fafd896
commit 404767be51
2 changed files with 18 additions and 32 deletions

View File

@ -527,8 +527,6 @@ swap_pager_dealloc(vm_object_t object)
{
int s;
GIANT_REQUIRED;
/*
* Remove from list right away so lookups will fail if we block for
* pageout completion.
@ -788,7 +786,8 @@ swap_pager_copy(vm_object_t srcobject, vm_object_t dstobject,
vm_pindex_t i;
int s;
GIANT_REQUIRED;
VM_OBJECT_LOCK_ASSERT(srcobject, MA_OWNED);
VM_OBJECT_LOCK_ASSERT(dstobject, MA_OWNED);
s = splvm();
/*
@ -841,9 +840,16 @@ swap_pager_copy(vm_object_t srcobject, vm_object_t dstobject,
);
if (srcaddr != SWAPBLK_NONE) {
VM_OBJECT_LOCK(dstobject);
/*
* swp_pager_meta_build() can sleep.
*/
vm_object_pip_add(srcobject, 1);
VM_OBJECT_UNLOCK(srcobject);
vm_object_pip_add(dstobject, 1);
swp_pager_meta_build(dstobject, i, srcaddr);
VM_OBJECT_UNLOCK(dstobject);
vm_object_pip_wakeup(dstobject);
VM_OBJECT_LOCK(srcobject);
vm_object_pip_wakeup(srcobject);
}
} else {
/*
@ -862,9 +868,7 @@ swap_pager_copy(vm_object_t srcobject, vm_object_t dstobject,
* double-remove the object from the swap queues.
*/
if (destroysource) {
VM_OBJECT_LOCK(srcobject);
swp_pager_meta_free_all(srcobject);
VM_OBJECT_UNLOCK(srcobject);
/*
* Reverting the type is not necessary, the caller is going
* to destroy srcobject directly, but I'm doing it here
@ -1800,7 +1804,6 @@ swp_pager_meta_build(vm_object_t object, vm_pindex_t pindex, daddr_t swapblk)
struct swblock **pswap;
int idx;
GIANT_REQUIRED;
VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
/*
* Convert default object to swap object if necessary
@ -2008,7 +2011,7 @@ swp_pager_meta_ctl(vm_object_t object, vm_pindex_t pindex, int flags)
daddr_t r1;
int idx;
GIANT_REQUIRED;
VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
/*
* The meta data only exists of the object is OBJT_SWAP
* and even then might not be allocated yet.

View File

@ -1227,24 +1227,19 @@ vm_object_split(vm_map_entry_t entry)
vm_page_busy(m);
vm_page_unlock_queues();
}
VM_OBJECT_UNLOCK(new_object);
if (orig_object->type == OBJT_SWAP) {
vm_object_pip_add(orig_object, 1);
VM_OBJECT_UNLOCK(orig_object);
/*
* copy orig_object pages into new_object
* and destroy unneeded pages in
* shadow object.
* swap_pager_copy() can sleep, in which case the orig_object's
* and new_object's locks are released and reacquired.
*/
swap_pager_copy(orig_object, new_object, offidxstart, 0);
VM_OBJECT_LOCK(orig_object);
vm_object_pip_wakeup(orig_object);
}
VM_OBJECT_UNLOCK(orig_object);
vm_page_lock_queues();
TAILQ_FOREACH(m, &new_object->memq, listq)
vm_page_wakeup(m);
vm_page_unlock_queues();
VM_OBJECT_UNLOCK(new_object);
entry->object.vm_object = new_object;
entry->offset = 0LL;
vm_object_deallocate(orig_object);
@ -1547,27 +1542,15 @@ vm_object_collapse(vm_object_t object)
* Move the pager from backing_object to object.
*/
if (backing_object->type == OBJT_SWAP) {
vm_object_pip_add(backing_object, 1);
VM_OBJECT_UNLOCK(backing_object);
/*
* scrap the paging_offset junk and do a
* discrete copy. This also removes major
* assumptions about how the swap-pager
* works from where it doesn't belong. The
* new swapper is able to optimize the
* destroy-source case.
* swap_pager_copy() can sleep, in which case
* the backing_object's and object's locks are
* released and reacquired.
*/
vm_object_pip_add(object, 1);
VM_OBJECT_UNLOCK(object);
swap_pager_copy(
backing_object,
object,
OFF_TO_IDX(object->backing_object_offset), TRUE);
VM_OBJECT_LOCK(object);
vm_object_pip_wakeup(object);
VM_OBJECT_LOCK(backing_object);
vm_object_pip_wakeup(backing_object);
}
/*
* Object now shadows whatever backing_object did.