Handle the driver KPI change from r292373. Ensure that managed device

pagers fault routines always return with a result page, be it the
proper and valid result page, or initially passed freshly allocated
placeholder.  Do not free the passed in page until we are able to
provide the replacement, and do not assign NULL to *mres.

Reported and tested by:	dumbbell
Reviewed by:	royger (who also verified that Xen code is safe)
Sponsored by:	The FreeBSD Foundation
This commit is contained in:
kib 2016-03-24 09:56:53 +00:00
parent 2683d49bfb
commit c91e1d14af
2 changed files with 23 additions and 28 deletions

View File

@ -1481,7 +1481,7 @@ i915_gem_pager_fault(vm_object_t vm_obj, vm_ooffset_t offset, int prot,
struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
vm_page_t page, oldpage;
vm_page_t page;
int ret = 0;
#ifdef FREEBSD_WIP
bool write = (prot & VM_PROT_WRITE) != 0;
@ -1504,13 +1504,10 @@ i915_gem_pager_fault(vm_object_t vm_obj, vm_ooffset_t offset, int prot,
* progress.
*/
if (*mres != NULL) {
oldpage = *mres;
vm_page_lock(oldpage);
vm_page_remove(oldpage);
vm_page_unlock(oldpage);
*mres = NULL;
} else
oldpage = NULL;
vm_page_lock(*mres);
vm_page_remove(*mres);
vm_page_unlock(*mres);
}
VM_OBJECT_WUNLOCK(vm_obj);
retry:
ret = 0;
@ -1590,7 +1587,6 @@ retry:
}
page->valid = VM_PAGE_BITS_ALL;
have_page:
*mres = page;
vm_page_xbusy(page);
CTR4(KTR_DRM, "fault %p %jx %x phys %x", gem_obj, offset, prot,
@ -1603,11 +1599,13 @@ have_page:
i915_gem_object_unpin(obj);
}
DRM_UNLOCK(dev);
if (oldpage != NULL) {
vm_page_lock(oldpage);
vm_page_free(oldpage);
vm_page_unlock(oldpage);
if (*mres != NULL) {
KASSERT(*mres != page, ("loosing %p %p", *mres, page));
vm_page_lock(*mres);
vm_page_free(*mres);
vm_page_unlock(*mres);
}
*mres = page;
vm_object_pip_wakeup(vm_obj);
return (VM_PAGER_OK);

View File

@ -106,21 +106,18 @@ ttm_bo_vm_fault(vm_object_t vm_obj, vm_ooffset_t offset,
struct ttm_buffer_object *bo = vm_obj->handle;
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_tt *ttm = NULL;
vm_page_t m, m1, oldm;
vm_page_t m, m1;
int ret;
int retval = VM_PAGER_OK;
struct ttm_mem_type_manager *man =
&bdev->man[bo->mem.mem_type];
vm_object_pip_add(vm_obj, 1);
oldm = *mres;
if (oldm != NULL) {
vm_page_lock(oldm);
vm_page_remove(oldm);
vm_page_unlock(oldm);
*mres = NULL;
} else
oldm = NULL;
if (*mres != NULL) {
vm_page_lock(*mres);
vm_page_remove(*mres);
vm_page_unlock(*mres);
}
retry:
VM_OBJECT_WUNLOCK(vm_obj);
m = NULL;
@ -261,14 +258,14 @@ reserve:
bo, m, m1, (uintmax_t)offset));
}
m->valid = VM_PAGE_BITS_ALL;
*mres = m;
vm_page_xbusy(m);
if (oldm != NULL) {
vm_page_lock(oldm);
vm_page_free(oldm);
vm_page_unlock(oldm);
if (*mres != NULL) {
KASSERT(*mres != m, ("loosing %p %p", *mres, m));
vm_page_lock(*mres);
vm_page_free(*mres);
vm_page_unlock(*mres);
}
*mres = m;
out_io_unlock1:
ttm_mem_io_unlock(man);