Acquire the page lock around vm_page_unwire(). For consistency, extend the

scope of the object lock in agp_i810.c.  (In this specific case, the scope
of the object lock shouldn't matter, but I don't want to create a bad
example that might be copied to a case where it did matter.)

Reviewed by:	kib
This commit is contained in:
Alan Cox 2010-05-03 16:55:50 +00:00
parent c5a648516e
commit 5fdd0a335f
3 changed files with 9 additions and 1 deletions

View File

@ -623,9 +623,11 @@ bad:
m = vm_page_lookup(mem->am_obj, OFF_TO_IDX(k));
if (k >= i)
vm_page_wakeup(m);
vm_page_lock(m);
vm_page_lock_queues();
vm_page_unwire(m, 0);
vm_page_unlock_queues();
vm_page_unlock(m);
}
VM_OBJECT_UNLOCK(mem->am_obj);
@ -657,9 +659,11 @@ agp_generic_unbind_memory(device_t dev, struct agp_memory *mem)
VM_OBJECT_LOCK(mem->am_obj);
for (i = 0; i < mem->am_size; i += PAGE_SIZE) {
m = vm_page_lookup(mem->am_obj, atop(i));
vm_page_lock(m);
vm_page_lock_queues();
vm_page_unwire(m, 0);
vm_page_unlock_queues();
vm_page_unlock(m);
}
VM_OBJECT_UNLOCK(mem->am_obj);

View File

@ -1010,10 +1010,12 @@ agp_i810_free_memory(device_t dev, struct agp_memory *mem)
VM_OBJECT_LOCK(mem->am_obj);
m = vm_page_lookup(mem->am_obj, 0);
VM_OBJECT_UNLOCK(mem->am_obj);
vm_page_lock(m);
vm_page_lock_queues();
vm_page_unwire(m, 0);
vm_page_unlock_queues();
vm_page_unlock(m);
VM_OBJECT_UNLOCK(mem->am_obj);
} else {
contigfree(sc->argb_cursor, mem->am_size, M_AGP);
sc->argb_cursor = NULL;

View File

@ -178,9 +178,11 @@ via_free_sg_info(drm_via_sg_info_t *vsg)
case dr_via_pages_locked:
for (i=0; i < vsg->num_pages; ++i) {
if ( NULL != (page = vsg->pages[i])) {
vm_page_lock(page);
vm_page_lock_queues();
vm_page_unwire(page, 0);
vm_page_unlock_queues();
vm_page_unlock(page);
}
}
case dr_via_pages_alloc: