Fix a few places that free a page from an object without busy held. This is

tightening constraints on busy as a precursor to lockless page lookup and
should largely be a NOP for these cases.

Reviewed by:	alc, kib, markj
Differential Revision:	https://reviews.freebsd.org/D22611
This commit is contained in:
Jeff Roberson 2019-12-02 22:42:05 +00:00
parent 4504268a1b
commit 0f9e06e18b
5 changed files with 20 additions and 22 deletions

View File

@ -389,14 +389,16 @@ sgx_enclave_remove(struct sgx_softc *sc,
* First remove all the pages except SECS,
* then remove SECS page.
*/
p_secs = NULL;
restart:
TAILQ_FOREACH_SAFE(p, &object->memq, listq, p_next) {
if (p->pindex == SGX_SECS_VM_OBJECT_INDEX) {
p_secs = p;
if (p->pindex == SGX_SECS_VM_OBJECT_INDEX)
continue;
}
if (vm_page_busy_acquire(p, VM_ALLOC_WAITFAIL) == 0)
goto restart;
sgx_page_remove(sc, p);
}
p_secs = vm_page_grab(object, SGX_SECS_VM_OBJECT_INDEX,
VM_ALLOC_NOCREAT);
/* Now remove SECS page */
if (p_secs != NULL)
sgx_page_remove(sc, p_secs);
@ -723,8 +725,9 @@ sgx_ioctl_create(struct sgx_softc *sc, struct sgx_enclave_create *param)
if ((sc->state & SGX_STATE_RUNNING) == 0) {
mtx_unlock(&sc->mtx);
/* Remove VA page that was just created for SECS page. */
p = vm_page_lookup(enclave->object,
- SGX_VA_PAGES_OFFS - SGX_SECS_VM_OBJECT_INDEX);
p = vm_page_grab(enclave->object,
- SGX_VA_PAGES_OFFS - SGX_SECS_VM_OBJECT_INDEX,
VM_ALLOC_NOCREAT);
sgx_page_remove(sc, p);
VM_OBJECT_WUNLOCK(object);
goto error;
@ -736,8 +739,9 @@ sgx_ioctl_create(struct sgx_softc *sc, struct sgx_enclave_create *param)
dprintf("%s: gp fault\n", __func__);
mtx_unlock(&sc->mtx);
/* Remove VA page that was just created for SECS page. */
p = vm_page_lookup(enclave->object,
- SGX_VA_PAGES_OFFS - SGX_SECS_VM_OBJECT_INDEX);
p = vm_page_grab(enclave->object,
- SGX_VA_PAGES_OFFS - SGX_SECS_VM_OBJECT_INDEX,
VM_ALLOC_NOCREAT);
sgx_page_remove(sc, p);
VM_OBJECT_WUNLOCK(object);
goto error;

View File

@ -64,6 +64,7 @@ tegra_bo_destruct(struct tegra_bo *bo)
VM_OBJECT_WLOCK(bo->cdev_pager);
for (i = 0; i < bo->npages; i++) {
m = bo->m[i];
vm_page_busy_acquire(m, 0);
cdev_pager_free_page(bo->cdev_pager, m);
m->flags &= ~PG_FICTITIOUS;
vm_page_unwire_noq(m);

View File

@ -1024,14 +1024,6 @@ mdstart_vnode(struct md_s *sc, struct bio *bp)
return (error);
}
static void
md_swap_page_free(vm_page_t m)
{
vm_page_xunbusy(m);
vm_page_free(m);
}
static int
mdstart_swap(struct md_s *sc, struct bio *bp)
{
@ -1080,7 +1072,7 @@ mdstart_swap(struct md_s *sc, struct bio *bp)
rv = vm_pager_get_pages(sc->object, &m, 1,
NULL, NULL);
if (rv == VM_PAGER_ERROR) {
md_swap_page_free(m);
vm_page_free(m);
break;
} else if (rv == VM_PAGER_FAIL) {
/*
@ -1110,7 +1102,7 @@ mdstart_swap(struct md_s *sc, struct bio *bp)
rv = vm_pager_get_pages(sc->object, &m, 1,
NULL, NULL);
if (rv == VM_PAGER_ERROR) {
md_swap_page_free(m);
vm_page_free(m);
break;
} else if (rv == VM_PAGER_FAIL)
pmap_zero_page(m);
@ -1137,10 +1129,10 @@ mdstart_swap(struct md_s *sc, struct bio *bp)
rv = vm_pager_get_pages(sc->object, &m, 1,
NULL, NULL);
if (rv == VM_PAGER_ERROR) {
md_swap_page_free(m);
vm_page_free(m);
break;
} else if (rv == VM_PAGER_FAIL) {
md_swap_page_free(m);
vm_page_free(m);
m = NULL;
} else {
/* Page is valid. */
@ -1152,7 +1144,7 @@ mdstart_swap(struct md_s *sc, struct bio *bp)
}
} else {
vm_pager_page_unswapped(m);
md_swap_page_free(m);
vm_page_free(m);
m = NULL;
}
}

View File

@ -363,6 +363,7 @@ vm_thread_stack_dispose(vm_object_t ksobj, vm_offset_t ks, int pages)
m = vm_page_lookup(ksobj, i);
if (m == NULL)
panic("%s: kstack already missing?", __func__);
vm_page_busy_acquire(m, 0);
vm_page_unwire_noq(m);
vm_page_free(m);
}

View File

@ -298,7 +298,7 @@ dmar_pgfree(vm_object_t obj, vm_pindex_t idx, int flags)
if ((flags & DMAR_PGF_OBJL) == 0)
VM_OBJECT_WLOCK(obj);
m = vm_page_lookup(obj, idx);
m = vm_page_grab(obj, idx, VM_ALLOC_NOCREAT);
if (m != NULL) {
vm_page_free(m);
atomic_subtract_int(&dmar_tbl_pagecnt, 1);