Add a return value to vm_page_remove().

Use it to indicate whether the page may be safely freed following
its removal from the object.  Also change vm_page_remove() to assume
that the page's object pointer is non-NULL, and have callers perform
this check instead.

This is a step towards an implementation of an atomic reference counter
for each physical page structure.

Reviewed by:	alc, dougm, kib
MFC after:	1 week
Sponsored by:	Netflix
Differential Revision:	https://reviews.freebsd.org/D20758
This commit is contained in:
Mark Johnston 2019-06-26 17:37:51 +00:00
parent 926c3367c8
commit 0fd977b3fa
7 changed files with 16 additions and 17 deletions

View File

@ -358,7 +358,7 @@ sgx_page_remove(struct sgx_softc *sc, vm_page_t p)
uint64_t offs;
vm_page_lock(p);
vm_page_remove(p);
(void)vm_page_remove(p);
vm_page_unlock(p);
dprintf("%s: p->pidx %ld\n", __func__, p->pindex);

View File

@ -115,7 +115,7 @@ ttm_bo_vm_fault(vm_object_t vm_obj, vm_ooffset_t offset,
vm_object_pip_add(vm_obj, 1);
if (*mres != NULL) {
vm_page_lock(*mres);
vm_page_remove(*mres);
(void)vm_page_remove(*mres);
vm_page_unlock(*mres);
}
retry:

View File

@ -236,7 +236,7 @@ cdev_pager_free_page(vm_object_t object, vm_page_t m)
KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("unmanaged %p", m));
pmap_remove_all(m);
vm_page_lock(m);
vm_page_remove(m);
(void)vm_page_remove(m);
vm_page_unlock(m);
} else if (object->type == OBJT_DEVICE)
dev_pager_free_page(object, m);

View File

@ -1144,7 +1144,7 @@ RetryFault:;
fs.object == fs.first_object->backing_object) {
vm_page_lock(fs.m);
vm_page_dequeue(fs.m);
vm_page_remove(fs.m);
(void)vm_page_remove(fs.m);
vm_page_unlock(fs.m);
vm_page_lock(fs.first_m);
vm_page_replace_checked(fs.m, fs.first_object,

View File

@ -1595,10 +1595,8 @@ vm_object_collapse_scan(vm_object_t object, int op)
vm_page_lock(p);
KASSERT(!pmap_page_is_mapped(p),
("freeing mapped page %p", p));
if (!vm_page_wired(p))
if (vm_page_remove(p))
vm_page_free(p);
else
vm_page_remove(p);
vm_page_unlock(p);
continue;
}
@ -1639,10 +1637,8 @@ vm_object_collapse_scan(vm_object_t object, int op)
vm_page_lock(p);
KASSERT(!pmap_page_is_mapped(p),
("freeing mapped page %p", p));
if (!vm_page_wired(p))
if (vm_page_remove(p))
vm_page_free(p);
else
vm_page_remove(p);
vm_page_unlock(p);
continue;
}

View File

@ -1458,20 +1458,21 @@ vm_page_insert_radixdone(vm_page_t m, vm_object_t object, vm_page_t mpred)
* vm_page_remove:
*
* Removes the specified page from its containing object, but does not
* invalidate any backing storage.
* invalidate any backing storage. Return true if the page may be safely
* freed and false otherwise.
*
* The object must be locked. The page must be locked if it is managed.
*/
void
bool
vm_page_remove(vm_page_t m)
{
vm_object_t object;
vm_page_t mrem;
object = m->object;
if ((m->oflags & VPO_UNMANAGED) == 0)
vm_page_assert_locked(m);
if ((object = m->object) == NULL)
return;
VM_OBJECT_ASSERT_WLOCKED(object);
if (vm_page_xbusied(m))
vm_page_xunbusy_maybelocked(m);
@ -1495,6 +1496,7 @@ vm_page_remove(vm_page_t m)
vdrop(object->handle);
m->object = NULL;
return (!vm_page_wired(m));
}
/*
@ -1665,7 +1667,7 @@ vm_page_rename(vm_page_t m, vm_object_t new_object, vm_pindex_t new_pindex)
*/
m->pindex = opidx;
vm_page_lock(m);
vm_page_remove(m);
(void)vm_page_remove(m);
/* Return back to the new pindex to complete vm_page_insert(). */
m->pindex = new_pindex;
@ -3436,7 +3438,8 @@ vm_page_free_prep(vm_page_t m)
if (vm_page_sbusied(m))
panic("vm_page_free_prep: freeing busy page %p", m);
vm_page_remove(m);
if (m->object != NULL)
(void)vm_page_remove(m);
/*
* If fictitious remove object association and

View File

@ -561,7 +561,7 @@ bool vm_page_reclaim_contig(int req, u_long npages, vm_paddr_t low,
bool vm_page_reclaim_contig_domain(int domain, int req, u_long npages,
vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary);
void vm_page_reference(vm_page_t m);
void vm_page_remove (vm_page_t);
bool vm_page_remove(vm_page_t);
int vm_page_rename (vm_page_t, vm_object_t, vm_pindex_t);
vm_page_t vm_page_replace(vm_page_t mnew, vm_object_t object,
vm_pindex_t pindex);