Avoid page lookups in the top-level object in vm_object_madvise().

We can iterate over consecutive resident pages in the top-level object
using the object's page list rather than by performing lookups in the
object radix tree. This extends one of the optimizations in r312208 to the
case where a shadow chain is present.

Suggested by:	alc
Reviewed by:	alc, kib (previous version)
MFC after:	2 weeks
Differential Revision:	https://reviews.freebsd.org/D9282
This commit is contained in:
Mark Johnston 2017-01-30 18:51:43 +00:00
parent fab7084f12
commit aa3650ea36
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=312994

View File

@ -1074,6 +1074,33 @@ vm_object_sync(vm_object_t object, vm_ooffset_t offset, vm_size_t size,
return (res);
}
/*
* Determine whether the given advice can be applied to the object. Advice is
* not applied to unmanaged pages since they never belong to page queues, and
* since MADV_FREE is destructive, it can apply only to anonymous pages that
* have been mapped at most once.
*/
static bool
vm_object_advice_applies(vm_object_t object, int advice)
{
if ((object->flags & OBJ_UNMANAGED) != 0)
return (false);
if (advice != MADV_FREE)
return (true);
return ((object->type == OBJT_DEFAULT || object->type == OBJT_SWAP) &&
(object->flags & OBJ_ONEMAPPING) != 0);
}
static void
vm_object_madvise_freespace(vm_object_t object, int advice, vm_pindex_t pindex,
vm_size_t size)
{
if (advice == MADV_FREE && object->type == OBJT_SWAP)
swap_pager_freespace(object, pindex, size);
}
/*
* vm_object_madvise:
*
@ -1101,100 +1128,102 @@ vm_object_madvise(vm_object_t object, vm_pindex_t pindex, vm_pindex_t end,
{
vm_pindex_t tpindex;
vm_object_t backing_object, tobject;
vm_page_t m;
vm_page_t m, tm;
if (object == NULL)
return;
VM_OBJECT_WLOCK(object);
for (m = NULL; pindex < end; pindex++) {
relookup:
VM_OBJECT_WLOCK(object);
if (!vm_object_advice_applies(object, advice)) {
VM_OBJECT_WUNLOCK(object);
return;
}
for (m = vm_page_find_least(object, pindex); pindex < end; pindex++) {
tobject = object;
tpindex = pindex;
shadowlookup:
/*
* MADV_FREE only operates on OBJT_DEFAULT or OBJT_SWAP pages
* and those pages must be OBJ_ONEMAPPING.
*/
if (advice == MADV_FREE) {
if ((tobject->type != OBJT_DEFAULT &&
tobject->type != OBJT_SWAP) ||
(tobject->flags & OBJ_ONEMAPPING) == 0) {
goto unlock_tobject;
}
} else if ((tobject->flags & OBJ_UNMANAGED) != 0)
goto unlock_tobject;
/*
* In the common case where the object has no backing object, we
* can avoid performing lookups at each pindex. In either case,
* when applying MADV_FREE we take care to release any swap
* space used to store non-resident pages.
* If the next page isn't resident in the top-level object, we
* need to search the shadow chain. When applying MADV_FREE, we
* take care to release any swap space used to store
* non-resident pages.
*/
if (object->backing_object == NULL) {
m = (m != NULL) ? TAILQ_NEXT(m, listq) :
vm_page_find_least(object, pindex);
tpindex = (m != NULL && m->pindex < end) ?
m->pindex : end;
if (advice == MADV_FREE && object->type == OBJT_SWAP &&
tpindex > pindex)
swap_pager_freespace(object, pindex,
tpindex - pindex);
if ((pindex = tpindex) == end)
break;
} else if ((m = vm_page_lookup(tobject, tpindex)) == NULL) {
if (advice == MADV_FREE && tobject->type == OBJT_SWAP)
swap_pager_freespace(tobject, tpindex, 1);
if (m == NULL || pindex < m->pindex) {
/*
* Prepare to search the next object in the chain.
* Optimize a common case: if the top-level object has
* no backing object, we can skip over the non-resident
* range in constant time.
*/
backing_object = tobject->backing_object;
if (backing_object == NULL)
goto unlock_tobject;
VM_OBJECT_WLOCK(backing_object);
tpindex += OFF_TO_IDX(tobject->backing_object_offset);
if (tobject != object)
VM_OBJECT_WUNLOCK(tobject);
tobject = backing_object;
goto shadowlookup;
if (object->backing_object == NULL) {
tpindex = (m != NULL && m->pindex < end) ?
m->pindex : end;
vm_object_madvise_freespace(object, advice,
pindex, tpindex - pindex);
if ((pindex = tpindex) == end)
break;
goto next_page;
}
tpindex = pindex;
do {
vm_object_madvise_freespace(tobject, advice,
tpindex, 1);
/*
* Prepare to search the next object in the
* chain.
*/
backing_object = tobject->backing_object;
if (backing_object == NULL)
goto next_pindex;
VM_OBJECT_WLOCK(backing_object);
tpindex +=
OFF_TO_IDX(tobject->backing_object_offset);
if (tobject != object)
VM_OBJECT_WUNLOCK(tobject);
tobject = backing_object;
if (!vm_object_advice_applies(tobject, advice))
goto next_pindex;
} while ((tm = vm_page_lookup(tobject, tpindex)) ==
NULL);
} else {
next_page:
tm = m;
m = TAILQ_NEXT(m, listq);
}
/*
* If the page is not in a normal state, skip it.
*/
if (m->valid != VM_PAGE_BITS_ALL)
goto unlock_tobject;
vm_page_lock(m);
if (m->hold_count != 0 || m->wire_count != 0) {
vm_page_unlock(m);
goto unlock_tobject;
if (tm->valid != VM_PAGE_BITS_ALL)
goto next_pindex;
vm_page_lock(tm);
if (tm->hold_count != 0 || tm->wire_count != 0) {
vm_page_unlock(tm);
goto next_pindex;
}
KASSERT((m->flags & PG_FICTITIOUS) == 0,
("vm_object_madvise: page %p is fictitious", m));
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
("vm_object_madvise: page %p is not managed", m));
if (vm_page_busied(m)) {
KASSERT((tm->flags & PG_FICTITIOUS) == 0,
("vm_object_madvise: page %p is fictitious", tm));
KASSERT((tm->oflags & VPO_UNMANAGED) == 0,
("vm_object_madvise: page %p is not managed", tm));
if (vm_page_busied(tm)) {
if (object != tobject)
VM_OBJECT_WUNLOCK(tobject);
VM_OBJECT_WUNLOCK(object);
if (advice == MADV_WILLNEED) {
/*
* Reference the page before unlocking and
* sleeping so that the page daemon is less
* likely to reclaim it.
* likely to reclaim it.
*/
vm_page_aflag_set(m, PGA_REFERENCED);
vm_page_aflag_set(tm, PGA_REFERENCED);
}
if (object != tobject)
VM_OBJECT_WUNLOCK(object);
VM_OBJECT_WUNLOCK(tobject);
vm_page_busy_sleep(m, "madvpo", false);
m = NULL;
VM_OBJECT_WLOCK(object);
vm_page_busy_sleep(tm, "madvpo", false);
goto relookup;
}
vm_page_advise(m, advice);
vm_page_unlock(m);
if (advice == MADV_FREE && tobject->type == OBJT_SWAP)
swap_pager_freespace(tobject, tpindex, 1);
unlock_tobject:
vm_page_advise(tm, advice);
vm_page_unlock(tm);
vm_object_madvise_freespace(tobject, advice, tm->pindex, 1);
next_pindex:
if (tobject != object)
VM_OBJECT_WUNLOCK(tobject);
}