Don't busy the page unless we are likely to release the object lock.

Reviewed by:	kib
Sponsored by:	EMC / Isilon Storage Division
This commit is contained in:
Alan Cox 2013-06-06 06:17:20 +00:00
parent ba39d89bc9
commit 27a18d6a23
4 changed files with 19 additions and 11 deletions

View File

@ -2487,8 +2487,10 @@ i915_gem_wire_page(vm_object_t object, vm_pindex_t pindex)
int rv;
VM_OBJECT_ASSERT_WLOCKED(object);
m = vm_page_grab(object, pindex, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
m = vm_page_grab(object, pindex, VM_ALLOC_NORMAL | VM_ALLOC_NOBUSY |
VM_ALLOC_RETRY);
if (m->valid != VM_PAGE_BITS_ALL) {
vm_page_busy(m);
if (vm_pager_has_page(object, pindex, NULL, NULL)) {
rv = vm_pager_get_pages(object, &m, 1, 0);
m = vm_page_lookup(object, pindex);
@ -2505,11 +2507,11 @@ i915_gem_wire_page(vm_object_t object, vm_pindex_t pindex)
m->valid = VM_PAGE_BITS_ALL;
m->dirty = 0;
}
vm_page_wakeup(m);
}
vm_page_lock(m);
vm_page_wire(m);
vm_page_unlock(m);
vm_page_wakeup(m);
atomic_add_long(&i915_gem_wired_pages_cnt, 1);
return (m);
}

View File

@ -288,8 +288,10 @@ int ttm_tt_swapin(struct ttm_tt *ttm)
VM_OBJECT_WLOCK(obj);
vm_object_pip_add(obj, 1);
for (i = 0; i < ttm->num_pages; ++i) {
from_page = vm_page_grab(obj, i, VM_ALLOC_RETRY);
from_page = vm_page_grab(obj, i, VM_ALLOC_NOBUSY |
VM_ALLOC_RETRY);
if (from_page->valid != VM_PAGE_BITS_ALL) {
vm_page_busy(from_page);
if (vm_pager_has_page(obj, i, NULL, NULL)) {
rv = vm_pager_get_pages(obj, &from_page, 1, 0);
if (rv != VM_PAGER_OK) {
@ -301,15 +303,14 @@ int ttm_tt_swapin(struct ttm_tt *ttm)
}
} else
vm_page_zero_invalid(from_page, TRUE);
vm_page_wakeup(from_page);
}
to_page = ttm->pages[i];
if (unlikely(to_page == NULL)) {
vm_page_wakeup(from_page);
ret = -ENOMEM;
goto err_ret;
}
pmap_copy_page(from_page, to_page);
vm_page_wakeup(from_page);
}
vm_object_pip_wakeup(obj);
VM_OBJECT_WUNLOCK(obj);

View File

@ -460,8 +460,9 @@ tmpfs_nocacheread(vm_object_t tobj, vm_pindex_t idx,
* type object.
*/
m = vm_page_grab(tobj, idx, VM_ALLOC_NORMAL | VM_ALLOC_RETRY |
VM_ALLOC_IGN_SBUSY);
VM_ALLOC_IGN_SBUSY | VM_ALLOC_NOBUSY);
if (m->valid != VM_PAGE_BITS_ALL) {
vm_page_busy(m);
if (vm_pager_has_page(tobj, idx, NULL, NULL)) {
rv = vm_pager_get_pages(tobj, &m, 1, 0);
m = vm_page_lookup(tobj, idx);
@ -483,10 +484,10 @@ tmpfs_nocacheread(vm_object_t tobj, vm_pindex_t idx,
}
} else
vm_page_zero_invalid(m, TRUE);
vm_page_wakeup(m);
}
vm_page_lock(m);
vm_page_hold(m);
vm_page_wakeup(m);
vm_page_unlock(m);
VM_OBJECT_WUNLOCK(tobj);
error = uiomove_fromphys(&m, offset, tlen, uio);
@ -574,8 +575,10 @@ tmpfs_mappedwrite(vm_object_t tobj, size_t len, struct uio *uio)
tlen = MIN(PAGE_SIZE - offset, len);
VM_OBJECT_WLOCK(tobj);
tpg = vm_page_grab(tobj, idx, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
tpg = vm_page_grab(tobj, idx, VM_ALLOC_NORMAL | VM_ALLOC_NOBUSY |
VM_ALLOC_RETRY);
if (tpg->valid != VM_PAGE_BITS_ALL) {
vm_page_busy(tpg);
if (vm_pager_has_page(tobj, idx, NULL, NULL)) {
rv = vm_pager_get_pages(tobj, &tpg, 1, 0);
tpg = vm_page_lookup(tobj, idx);
@ -597,10 +600,10 @@ tmpfs_mappedwrite(vm_object_t tobj, size_t len, struct uio *uio)
}
} else
vm_page_zero_invalid(tpg, TRUE);
vm_page_wakeup(tpg);
}
vm_page_lock(tpg);
vm_page_hold(tpg);
vm_page_wakeup(tpg);
vm_page_unlock(tpg);
VM_OBJECT_WUNLOCK(tobj);
error = uiomove_fromphys(&tpg, offset, tlen, uio);

View File

@ -937,8 +937,10 @@ exec_map_first_page(imgp)
object->pg_color = 0;
}
#endif
ma[0] = vm_page_grab(object, 0, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
ma[0] = vm_page_grab(object, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOBUSY |
VM_ALLOC_RETRY);
if (ma[0]->valid != VM_PAGE_BITS_ALL) {
vm_page_busy(ma[0]);
initial_pagein = VM_INITIAL_PAGEIN;
if (initial_pagein > object->size)
initial_pagein = object->size;
@ -968,11 +970,11 @@ exec_map_first_page(imgp)
VM_OBJECT_WUNLOCK(object);
return (EIO);
}
vm_page_wakeup(ma[0]);
}
vm_page_lock(ma[0]);
vm_page_hold(ma[0]);
vm_page_unlock(ma[0]);
vm_page_wakeup(ma[0]);
VM_OBJECT_WUNLOCK(object);
imgp->firstpage = sf_buf_alloc(ma[0], 0);