Remove most of the code for implementing PG_CACHED pages. (This change does
not remove user-space visible fields from vm_cnt or all of the references to cached pages from comments. Those changes will come later.) Reviewed by: kib, markj Tested by: pho Sponsored by: Dell EMC Isilon Differential Revision: https://reviews.freebsd.org/D8497
This commit is contained in:
parent
08dc89a621
commit
7667839a7e
@ -75,8 +75,7 @@ vn_is_readonly(vnode_t *vp)
|
||||
#define vn_mountedvfs(vp) ((vp)->v_mountedhere)
|
||||
#define vn_has_cached_data(vp) \
|
||||
((vp)->v_object != NULL && \
|
||||
((vp)->v_object->resident_page_count > 0 || \
|
||||
!vm_object_cache_is_empty((vp)->v_object)))
|
||||
(vp)->v_object->resident_page_count > 0)
|
||||
#define vn_exists(vp) do { } while (0)
|
||||
#define vn_invalid(vp) do { } while (0)
|
||||
#define vn_renamepath(tdvp, svp, tnm, lentnm) do { } while (0)
|
||||
|
@ -426,10 +426,6 @@ page_busy(vnode_t *vp, int64_t start, int64_t off, int64_t nbytes)
|
||||
continue;
|
||||
}
|
||||
vm_page_sbusy(pp);
|
||||
} else if (pp == NULL) {
|
||||
pp = vm_page_alloc(obj, OFF_TO_IDX(start),
|
||||
VM_ALLOC_SYSTEM | VM_ALLOC_IFCACHED |
|
||||
VM_ALLOC_SBUSY);
|
||||
} else {
|
||||
ASSERT(pp != NULL && !pp->valid);
|
||||
pp = NULL;
|
||||
|
@ -1372,12 +1372,9 @@ tmpfs_reg_resize(struct vnode *vp, off_t newsize, boolean_t ignerr)
|
||||
VM_WAIT;
|
||||
VM_OBJECT_WLOCK(uobj);
|
||||
goto retry;
|
||||
} else if (m->valid != VM_PAGE_BITS_ALL)
|
||||
rv = vm_pager_get_pages(uobj, &m, 1,
|
||||
NULL, NULL);
|
||||
else
|
||||
/* A cached page was reactivated. */
|
||||
rv = VM_PAGER_OK;
|
||||
}
|
||||
rv = vm_pager_get_pages(uobj, &m, 1, NULL,
|
||||
NULL);
|
||||
vm_page_lock(m);
|
||||
if (rv == VM_PAGER_OK) {
|
||||
vm_page_deactivate(m);
|
||||
|
@ -1006,7 +1006,7 @@ exec_map_first_page(imgp)
|
||||
break;
|
||||
} else {
|
||||
ma[i] = vm_page_alloc(object, i,
|
||||
VM_ALLOC_NORMAL | VM_ALLOC_IFNOTCACHED);
|
||||
VM_ALLOC_NORMAL);
|
||||
if (ma[i] == NULL)
|
||||
break;
|
||||
}
|
||||
|
@ -455,12 +455,9 @@ shm_dotruncate(struct shmfd *shmfd, off_t length)
|
||||
VM_WAIT;
|
||||
VM_OBJECT_WLOCK(object);
|
||||
goto retry;
|
||||
} else if (m->valid != VM_PAGE_BITS_ALL)
|
||||
rv = vm_pager_get_pages(object, &m, 1,
|
||||
NULL, NULL);
|
||||
else
|
||||
/* A cached page was reactivated. */
|
||||
rv = VM_PAGER_OK;
|
||||
}
|
||||
rv = vm_pager_get_pages(object, &m, 1, NULL,
|
||||
NULL);
|
||||
vm_page_lock(m);
|
||||
if (rv == VM_PAGER_OK) {
|
||||
vm_page_deactivate(m);
|
||||
|
@ -1126,7 +1126,7 @@ swap_pager_getpages(vm_object_t object, vm_page_t *m, int count, int *rbehind,
|
||||
if (shift != 0) {
|
||||
for (i = 1; i <= shift; i++) {
|
||||
p = vm_page_alloc(object, m[0]->pindex - i,
|
||||
VM_ALLOC_NORMAL | VM_ALLOC_IFNOTCACHED);
|
||||
VM_ALLOC_NORMAL);
|
||||
if (p == NULL) {
|
||||
/* Shift allocated pages to the left. */
|
||||
for (j = 0; j < i - 1; j++)
|
||||
@ -1144,8 +1144,7 @@ swap_pager_getpages(vm_object_t object, vm_page_t *m, int count, int *rbehind,
|
||||
if (rahead != NULL) {
|
||||
for (i = 0; i < *rahead; i++) {
|
||||
p = vm_page_alloc(object,
|
||||
m[reqcount - 1]->pindex + i + 1,
|
||||
VM_ALLOC_NORMAL | VM_ALLOC_IFNOTCACHED);
|
||||
m[reqcount - 1]->pindex + i + 1, VM_ALLOC_NORMAL);
|
||||
if (p == NULL)
|
||||
break;
|
||||
bp->b_pages[shift + reqcount + i] = p;
|
||||
|
@ -559,8 +559,7 @@ RetryFault:;
|
||||
unlock_and_deallocate(&fs);
|
||||
VM_WAITPFAULT;
|
||||
goto RetryFault;
|
||||
} else if (fs.m->valid == VM_PAGE_BITS_ALL)
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
readrest:
|
||||
|
@ -877,9 +877,6 @@ sys_mincore(td, uap)
|
||||
pindex = OFF_TO_IDX(current->offset +
|
||||
(addr - current->start));
|
||||
m = vm_page_lookup(object, pindex);
|
||||
if (m == NULL &&
|
||||
vm_page_is_cached(object, pindex))
|
||||
mincoreinfo = MINCORE_INCORE;
|
||||
if (m != NULL && m->valid == 0)
|
||||
m = NULL;
|
||||
if (m != NULL)
|
||||
|
@ -178,9 +178,6 @@ vm_object_zdtor(void *mem, int size, void *arg)
|
||||
("object %p has reservations",
|
||||
object));
|
||||
#endif
|
||||
KASSERT(vm_object_cache_is_empty(object),
|
||||
("object %p has cached pages",
|
||||
object));
|
||||
KASSERT(object->paging_in_progress == 0,
|
||||
("object %p paging_in_progress = %d",
|
||||
object, object->paging_in_progress));
|
||||
@ -212,8 +209,6 @@ vm_object_zinit(void *mem, int size, int flags)
|
||||
object->paging_in_progress = 0;
|
||||
object->resident_page_count = 0;
|
||||
object->shadow_count = 0;
|
||||
object->cache.rt_root = 0;
|
||||
object->cache.rt_flags = 0;
|
||||
|
||||
mtx_lock(&vm_object_list_mtx);
|
||||
TAILQ_INSERT_TAIL(&vm_object_list, object, object_list);
|
||||
@ -792,8 +787,6 @@ vm_object_terminate(vm_object_t object)
|
||||
if (__predict_false(!LIST_EMPTY(&object->rvq)))
|
||||
vm_reserv_break_all(object);
|
||||
#endif
|
||||
if (__predict_false(!vm_object_cache_is_empty(object)))
|
||||
vm_page_cache_free(object, 0, 0);
|
||||
|
||||
KASSERT(object->cred == NULL || object->type == OBJT_DEFAULT ||
|
||||
object->type == OBJT_SWAP,
|
||||
@ -1135,13 +1128,6 @@ vm_object_madvise(vm_object_t object, vm_pindex_t pindex, vm_pindex_t end,
|
||||
} else if ((tobject->flags & OBJ_UNMANAGED) != 0)
|
||||
goto unlock_tobject;
|
||||
m = vm_page_lookup(tobject, tpindex);
|
||||
if (m == NULL && advise == MADV_WILLNEED) {
|
||||
/*
|
||||
* If the page is cached, reactivate it.
|
||||
*/
|
||||
m = vm_page_alloc(tobject, tpindex, VM_ALLOC_IFCACHED |
|
||||
VM_ALLOC_NOBUSY);
|
||||
}
|
||||
if (m == NULL) {
|
||||
/*
|
||||
* There may be swap even if there is no backing page
|
||||
@ -1406,19 +1392,6 @@ vm_object_split(vm_map_entry_t entry)
|
||||
swap_pager_copy(orig_object, new_object, offidxstart, 0);
|
||||
TAILQ_FOREACH(m, &new_object->memq, listq)
|
||||
vm_page_xunbusy(m);
|
||||
|
||||
/*
|
||||
* Transfer any cached pages from orig_object to new_object.
|
||||
* If swap_pager_copy() found swapped out pages within the
|
||||
* specified range of orig_object, then it changed
|
||||
* new_object's type to OBJT_SWAP when it transferred those
|
||||
* pages to new_object. Otherwise, new_object's type
|
||||
* should still be OBJT_DEFAULT and orig_object should not
|
||||
* contain any cached pages within the specified range.
|
||||
*/
|
||||
if (__predict_false(!vm_object_cache_is_empty(orig_object)))
|
||||
vm_page_cache_transfer(orig_object, offidxstart,
|
||||
new_object);
|
||||
}
|
||||
VM_OBJECT_WUNLOCK(orig_object);
|
||||
VM_OBJECT_WUNLOCK(new_object);
|
||||
@ -1754,13 +1727,6 @@ vm_object_collapse(vm_object_t object)
|
||||
backing_object,
|
||||
object,
|
||||
OFF_TO_IDX(object->backing_object_offset), TRUE);
|
||||
|
||||
/*
|
||||
* Free any cached pages from backing_object.
|
||||
*/
|
||||
if (__predict_false(
|
||||
!vm_object_cache_is_empty(backing_object)))
|
||||
vm_page_cache_free(backing_object, 0, 0);
|
||||
}
|
||||
/*
|
||||
* Object now shadows whatever backing_object did.
|
||||
@ -1889,7 +1855,7 @@ vm_object_page_remove(vm_object_t object, vm_pindex_t start, vm_pindex_t end,
|
||||
(options & (OBJPR_CLEANONLY | OBJPR_NOTMAPPED)) == OBJPR_NOTMAPPED,
|
||||
("vm_object_page_remove: illegal options for object %p", object));
|
||||
if (object->resident_page_count == 0)
|
||||
goto skipmemq;
|
||||
return;
|
||||
vm_object_pip_add(object, 1);
|
||||
again:
|
||||
p = vm_page_find_least(object, start);
|
||||
@ -1946,9 +1912,6 @@ vm_object_page_remove(vm_object_t object, vm_pindex_t start, vm_pindex_t end,
|
||||
vm_page_unlock(p);
|
||||
}
|
||||
vm_object_pip_wakeup(object);
|
||||
skipmemq:
|
||||
if (__predict_false(!vm_object_cache_is_empty(object)))
|
||||
vm_page_cache_free(object, start, end);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -118,7 +118,6 @@ struct vm_object {
|
||||
vm_ooffset_t backing_object_offset;/* Offset in backing object */
|
||||
TAILQ_ENTRY(vm_object) pager_object_list; /* list of all objects of this pager type */
|
||||
LIST_HEAD(, vm_reserv) rvq; /* list of reservations */
|
||||
struct vm_radix cache; /* (o + f) root of the cache page radix trie */
|
||||
void *handle;
|
||||
union {
|
||||
/*
|
||||
@ -290,13 +289,6 @@ void vm_object_pip_wakeup(vm_object_t object);
|
||||
void vm_object_pip_wakeupn(vm_object_t object, short i);
|
||||
void vm_object_pip_wait(vm_object_t object, char *waitid);
|
||||
|
||||
static __inline boolean_t
|
||||
vm_object_cache_is_empty(vm_object_t object)
|
||||
{
|
||||
|
||||
return (vm_radix_is_empty(&object->cache));
|
||||
}
|
||||
|
||||
void umtx_shm_object_init(vm_object_t object);
|
||||
void umtx_shm_object_terminated(vm_object_t object);
|
||||
extern int umtx_shm_vnobj_persistent;
|
||||
|
364
sys/vm/vm_page.c
364
sys/vm/vm_page.c
@ -154,8 +154,7 @@ static int vm_pageout_pages_needed;
|
||||
|
||||
static uma_zone_t fakepg_zone;
|
||||
|
||||
static struct vnode *vm_page_alloc_init(vm_page_t m);
|
||||
static void vm_page_cache_turn_free(vm_page_t m);
|
||||
static void vm_page_alloc_check(vm_page_t m);
|
||||
static void vm_page_clear_dirty_mask(vm_page_t m, vm_page_bits_t pagebits);
|
||||
static void vm_page_enqueue(uint8_t queue, vm_page_t m);
|
||||
static void vm_page_free_wakeup(void);
|
||||
@ -1118,9 +1117,7 @@ void
|
||||
vm_page_dirty_KBI(vm_page_t m)
|
||||
{
|
||||
|
||||
/* These assertions refer to this operation by its public name. */
|
||||
KASSERT((m->flags & PG_CACHED) == 0,
|
||||
("vm_page_dirty: page in cache!"));
|
||||
/* Refer to this operation by its public name. */
|
||||
KASSERT(m->valid == VM_PAGE_BITS_ALL,
|
||||
("vm_page_dirty: page is invalid!"));
|
||||
m->dirty = VM_PAGE_BITS_ALL;
|
||||
@ -1458,142 +1455,6 @@ vm_page_rename(vm_page_t m, vm_object_t new_object, vm_pindex_t new_pindex)
|
||||
return (0);
|
||||
}
|
||||
|
||||
/*
|
||||
* Convert all of the given object's cached pages that have a
|
||||
* pindex within the given range into free pages. If the value
|
||||
* zero is given for "end", then the range's upper bound is
|
||||
* infinity. If the given object is backed by a vnode and it
|
||||
* transitions from having one or more cached pages to none, the
|
||||
* vnode's hold count is reduced.
|
||||
*/
|
||||
void
|
||||
vm_page_cache_free(vm_object_t object, vm_pindex_t start, vm_pindex_t end)
|
||||
{
|
||||
vm_page_t m;
|
||||
boolean_t empty;
|
||||
|
||||
mtx_lock(&vm_page_queue_free_mtx);
|
||||
if (__predict_false(vm_radix_is_empty(&object->cache))) {
|
||||
mtx_unlock(&vm_page_queue_free_mtx);
|
||||
return;
|
||||
}
|
||||
while ((m = vm_radix_lookup_ge(&object->cache, start)) != NULL) {
|
||||
if (end != 0 && m->pindex >= end)
|
||||
break;
|
||||
vm_radix_remove(&object->cache, m->pindex);
|
||||
vm_page_cache_turn_free(m);
|
||||
}
|
||||
empty = vm_radix_is_empty(&object->cache);
|
||||
mtx_unlock(&vm_page_queue_free_mtx);
|
||||
if (object->type == OBJT_VNODE && empty)
|
||||
vdrop(object->handle);
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns the cached page that is associated with the given
|
||||
* object and offset. If, however, none exists, returns NULL.
|
||||
*
|
||||
* The free page queue must be locked.
|
||||
*/
|
||||
static inline vm_page_t
|
||||
vm_page_cache_lookup(vm_object_t object, vm_pindex_t pindex)
|
||||
{
|
||||
|
||||
mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
|
||||
return (vm_radix_lookup(&object->cache, pindex));
|
||||
}
|
||||
|
||||
/*
|
||||
* Remove the given cached page from its containing object's
|
||||
* collection of cached pages.
|
||||
*
|
||||
* The free page queue must be locked.
|
||||
*/
|
||||
static void
|
||||
vm_page_cache_remove(vm_page_t m)
|
||||
{
|
||||
|
||||
mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
|
||||
KASSERT((m->flags & PG_CACHED) != 0,
|
||||
("vm_page_cache_remove: page %p is not cached", m));
|
||||
vm_radix_remove(&m->object->cache, m->pindex);
|
||||
m->object = NULL;
|
||||
vm_cnt.v_cache_count--;
|
||||
}
|
||||
|
||||
/*
|
||||
* Transfer all of the cached pages with offset greater than or
|
||||
* equal to 'offidxstart' from the original object's cache to the
|
||||
* new object's cache. However, any cached pages with offset
|
||||
* greater than or equal to the new object's size are kept in the
|
||||
* original object. Initially, the new object's cache must be
|
||||
* empty. Offset 'offidxstart' in the original object must
|
||||
* correspond to offset zero in the new object.
|
||||
*
|
||||
* The new object must be locked.
|
||||
*/
|
||||
void
|
||||
vm_page_cache_transfer(vm_object_t orig_object, vm_pindex_t offidxstart,
|
||||
vm_object_t new_object)
|
||||
{
|
||||
vm_page_t m;
|
||||
|
||||
/*
|
||||
* Insertion into an object's collection of cached pages
|
||||
* requires the object to be locked. In contrast, removal does
|
||||
* not.
|
||||
*/
|
||||
VM_OBJECT_ASSERT_WLOCKED(new_object);
|
||||
KASSERT(vm_radix_is_empty(&new_object->cache),
|
||||
("vm_page_cache_transfer: object %p has cached pages",
|
||||
new_object));
|
||||
mtx_lock(&vm_page_queue_free_mtx);
|
||||
while ((m = vm_radix_lookup_ge(&orig_object->cache,
|
||||
offidxstart)) != NULL) {
|
||||
/*
|
||||
* Transfer all of the pages with offset greater than or
|
||||
* equal to 'offidxstart' from the original object's
|
||||
* cache to the new object's cache.
|
||||
*/
|
||||
if ((m->pindex - offidxstart) >= new_object->size)
|
||||
break;
|
||||
vm_radix_remove(&orig_object->cache, m->pindex);
|
||||
/* Update the page's object and offset. */
|
||||
m->object = new_object;
|
||||
m->pindex -= offidxstart;
|
||||
if (vm_radix_insert(&new_object->cache, m))
|
||||
vm_page_cache_turn_free(m);
|
||||
}
|
||||
mtx_unlock(&vm_page_queue_free_mtx);
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns TRUE if a cached page is associated with the given object and
|
||||
* offset, and FALSE otherwise.
|
||||
*
|
||||
* The object must be locked.
|
||||
*/
|
||||
boolean_t
|
||||
vm_page_is_cached(vm_object_t object, vm_pindex_t pindex)
|
||||
{
|
||||
vm_page_t m;
|
||||
|
||||
/*
|
||||
* Insertion into an object's collection of cached pages requires the
|
||||
* object to be locked. Therefore, if the object is locked and the
|
||||
* object's collection is empty, there is no need to acquire the free
|
||||
* page queues lock in order to prove that the specified page doesn't
|
||||
* exist.
|
||||
*/
|
||||
VM_OBJECT_ASSERT_WLOCKED(object);
|
||||
if (__predict_true(vm_object_cache_is_empty(object)))
|
||||
return (FALSE);
|
||||
mtx_lock(&vm_page_queue_free_mtx);
|
||||
m = vm_page_cache_lookup(object, pindex);
|
||||
mtx_unlock(&vm_page_queue_free_mtx);
|
||||
return (m != NULL);
|
||||
}
|
||||
|
||||
/*
|
||||
* vm_page_alloc:
|
||||
*
|
||||
@ -1610,9 +1471,6 @@ vm_page_is_cached(vm_object_t object, vm_pindex_t pindex)
|
||||
* optional allocation flags:
|
||||
* VM_ALLOC_COUNT(number) the number of additional pages that the caller
|
||||
* intends to allocate
|
||||
* VM_ALLOC_IFCACHED return page only if it is cached
|
||||
* VM_ALLOC_IFNOTCACHED return NULL, do not reactivate if the page
|
||||
* is cached
|
||||
* VM_ALLOC_NOBUSY do not exclusive busy the page
|
||||
* VM_ALLOC_NODUMP do not include the page in a kernel core dump
|
||||
* VM_ALLOC_NOOBJ page is not associated with an object and
|
||||
@ -1626,8 +1484,6 @@ vm_page_is_cached(vm_object_t object, vm_pindex_t pindex)
|
||||
vm_page_t
|
||||
vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int req)
|
||||
{
|
||||
struct vnode *vp = NULL;
|
||||
vm_object_t m_object;
|
||||
vm_page_t m, mpred;
|
||||
int flags, req_class;
|
||||
|
||||
@ -1670,31 +1526,12 @@ vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int req)
|
||||
* Allocate from the free queue if the number of free pages
|
||||
* exceeds the minimum for the request class.
|
||||
*/
|
||||
if (object != NULL &&
|
||||
(m = vm_page_cache_lookup(object, pindex)) != NULL) {
|
||||
if ((req & VM_ALLOC_IFNOTCACHED) != 0) {
|
||||
mtx_unlock(&vm_page_queue_free_mtx);
|
||||
return (NULL);
|
||||
}
|
||||
if (vm_phys_unfree_page(m))
|
||||
vm_phys_set_pool(VM_FREEPOOL_DEFAULT, m, 0);
|
||||
#if VM_NRESERVLEVEL > 0
|
||||
else if (!vm_reserv_reactivate_page(m))
|
||||
#else
|
||||
else
|
||||
#endif
|
||||
panic("vm_page_alloc: cache page %p is missing"
|
||||
" from the free queue", m);
|
||||
} else if ((req & VM_ALLOC_IFCACHED) != 0) {
|
||||
mtx_unlock(&vm_page_queue_free_mtx);
|
||||
return (NULL);
|
||||
#if VM_NRESERVLEVEL > 0
|
||||
} else if (object == NULL || (object->flags & (OBJ_COLORED |
|
||||
if (object == NULL || (object->flags & (OBJ_COLORED |
|
||||
OBJ_FICTITIOUS)) != OBJ_COLORED || (m =
|
||||
vm_reserv_alloc_page(object, pindex, mpred)) == NULL) {
|
||||
#else
|
||||
} else {
|
||||
vm_reserv_alloc_page(object, pindex, mpred)) == NULL)
|
||||
#endif
|
||||
{
|
||||
m = vm_phys_alloc_pages(object != NULL ?
|
||||
VM_FREEPOOL_DEFAULT : VM_FREEPOOL_DIRECT, 0);
|
||||
#if VM_NRESERVLEVEL > 0
|
||||
@ -1720,33 +1557,9 @@ vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int req)
|
||||
* At this point we had better have found a good page.
|
||||
*/
|
||||
KASSERT(m != NULL, ("vm_page_alloc: missing page"));
|
||||
KASSERT(m->queue == PQ_NONE,
|
||||
("vm_page_alloc: page %p has unexpected queue %d", m, m->queue));
|
||||
KASSERT(m->wire_count == 0, ("vm_page_alloc: page %p is wired", m));
|
||||
KASSERT(m->hold_count == 0, ("vm_page_alloc: page %p is held", m));
|
||||
KASSERT(!vm_page_busied(m), ("vm_page_alloc: page %p is busy", m));
|
||||
KASSERT(m->dirty == 0, ("vm_page_alloc: page %p is dirty", m));
|
||||
KASSERT(pmap_page_get_memattr(m) == VM_MEMATTR_DEFAULT,
|
||||
("vm_page_alloc: page %p has unexpected memattr %d", m,
|
||||
pmap_page_get_memattr(m)));
|
||||
if ((m->flags & PG_CACHED) != 0) {
|
||||
KASSERT((m->flags & PG_ZERO) == 0,
|
||||
("vm_page_alloc: cached page %p is PG_ZERO", m));
|
||||
KASSERT(m->valid != 0,
|
||||
("vm_page_alloc: cached page %p is invalid", m));
|
||||
if (m->object != object || m->pindex != pindex)
|
||||
m->valid = 0;
|
||||
m_object = m->object;
|
||||
vm_page_cache_remove(m);
|
||||
if (m_object->type == OBJT_VNODE &&
|
||||
vm_object_cache_is_empty(m_object))
|
||||
vp = m_object->handle;
|
||||
} else {
|
||||
KASSERT(m->valid == 0,
|
||||
("vm_page_alloc: free page %p is valid", m));
|
||||
vm_phys_freecnt_adj(m, -1);
|
||||
}
|
||||
vm_phys_freecnt_adj(m, -1);
|
||||
mtx_unlock(&vm_page_queue_free_mtx);
|
||||
vm_page_alloc_check(m);
|
||||
|
||||
/*
|
||||
* Initialize the page. Only the PG_ZERO flag is inherited.
|
||||
@ -1778,9 +1591,6 @@ vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int req)
|
||||
|
||||
if (object != NULL) {
|
||||
if (vm_page_insert_after(m, object, pindex, mpred)) {
|
||||
/* See the comment below about hold count. */
|
||||
if (vp != NULL)
|
||||
vdrop(vp);
|
||||
pagedaemon_wakeup();
|
||||
if (req & VM_ALLOC_WIRED) {
|
||||
atomic_subtract_int(&vm_cnt.v_wire_count, 1);
|
||||
@ -1800,15 +1610,6 @@ vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int req)
|
||||
} else
|
||||
m->pindex = pindex;
|
||||
|
||||
/*
|
||||
* The following call to vdrop() must come after the above call
|
||||
* to vm_page_insert() in case both affect the same object and
|
||||
* vnode. Otherwise, the affected vnode's hold count could
|
||||
* temporarily become zero.
|
||||
*/
|
||||
if (vp != NULL)
|
||||
vdrop(vp);
|
||||
|
||||
/*
|
||||
* Don't wakeup too often - wakeup the pageout daemon when
|
||||
* we would be nearly out of memory.
|
||||
@ -1819,16 +1620,6 @@ vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int req)
|
||||
return (m);
|
||||
}
|
||||
|
||||
static void
|
||||
vm_page_alloc_contig_vdrop(struct spglist *lst)
|
||||
{
|
||||
|
||||
while (!SLIST_EMPTY(lst)) {
|
||||
vdrop((struct vnode *)SLIST_FIRST(lst)-> plinks.s.pv);
|
||||
SLIST_REMOVE_HEAD(lst, plinks.s.ss);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* vm_page_alloc_contig:
|
||||
*
|
||||
@ -1873,8 +1664,6 @@ vm_page_alloc_contig(vm_object_t object, vm_pindex_t pindex, int req,
|
||||
u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment,
|
||||
vm_paddr_t boundary, vm_memattr_t memattr)
|
||||
{
|
||||
struct vnode *drop;
|
||||
struct spglist deferred_vdrop_list;
|
||||
vm_page_t m, m_tmp, m_ret;
|
||||
u_int flags;
|
||||
int req_class;
|
||||
@ -1900,7 +1689,6 @@ vm_page_alloc_contig(vm_object_t object, vm_pindex_t pindex, int req,
|
||||
if (curproc == pageproc && req_class != VM_ALLOC_INTERRUPT)
|
||||
req_class = VM_ALLOC_SYSTEM;
|
||||
|
||||
SLIST_INIT(&deferred_vdrop_list);
|
||||
mtx_lock(&vm_page_queue_free_mtx);
|
||||
if (vm_cnt.v_free_count + vm_cnt.v_cache_count >= npages +
|
||||
vm_cnt.v_free_reserved || (req_class == VM_ALLOC_SYSTEM &&
|
||||
@ -1922,17 +1710,7 @@ vm_page_alloc_contig(vm_object_t object, vm_pindex_t pindex, int req,
|
||||
return (NULL);
|
||||
}
|
||||
if (m_ret != NULL)
|
||||
for (m = m_ret; m < &m_ret[npages]; m++) {
|
||||
drop = vm_page_alloc_init(m);
|
||||
if (drop != NULL) {
|
||||
/*
|
||||
* Enqueue the vnode for deferred vdrop().
|
||||
*/
|
||||
m->plinks.s.pv = drop;
|
||||
SLIST_INSERT_HEAD(&deferred_vdrop_list, m,
|
||||
plinks.s.ss);
|
||||
}
|
||||
}
|
||||
vm_phys_freecnt_adj(m_ret, -npages);
|
||||
else {
|
||||
#if VM_NRESERVLEVEL > 0
|
||||
if (vm_reserv_reclaim_contig(npages, low, high, alignment,
|
||||
@ -1943,6 +1721,8 @@ vm_page_alloc_contig(vm_object_t object, vm_pindex_t pindex, int req,
|
||||
mtx_unlock(&vm_page_queue_free_mtx);
|
||||
if (m_ret == NULL)
|
||||
return (NULL);
|
||||
for (m = m_ret; m < &m_ret[npages]; m++)
|
||||
vm_page_alloc_check(m);
|
||||
|
||||
/*
|
||||
* Initialize the pages. Only the PG_ZERO flag is inherited.
|
||||
@ -1975,8 +1755,6 @@ vm_page_alloc_contig(vm_object_t object, vm_pindex_t pindex, int req,
|
||||
m->oflags = VPO_UNMANAGED;
|
||||
if (object != NULL) {
|
||||
if (vm_page_insert(m, object, pindex)) {
|
||||
vm_page_alloc_contig_vdrop(
|
||||
&deferred_vdrop_list);
|
||||
if (vm_paging_needed())
|
||||
pagedaemon_wakeup();
|
||||
if ((req & VM_ALLOC_WIRED) != 0)
|
||||
@ -2001,57 +1779,28 @@ vm_page_alloc_contig(vm_object_t object, vm_pindex_t pindex, int req,
|
||||
pmap_page_set_memattr(m, memattr);
|
||||
pindex++;
|
||||
}
|
||||
vm_page_alloc_contig_vdrop(&deferred_vdrop_list);
|
||||
if (vm_paging_needed())
|
||||
pagedaemon_wakeup();
|
||||
return (m_ret);
|
||||
}
|
||||
|
||||
/*
|
||||
* Initialize a page that has been freshly dequeued from a freelist.
|
||||
* The caller has to drop the vnode returned, if it is not NULL.
|
||||
*
|
||||
* This function may only be used to initialize unmanaged pages.
|
||||
*
|
||||
* To be called with vm_page_queue_free_mtx held.
|
||||
* Check a page that has been freshly dequeued from a freelist.
|
||||
*/
|
||||
static struct vnode *
|
||||
vm_page_alloc_init(vm_page_t m)
|
||||
static void
|
||||
vm_page_alloc_check(vm_page_t m)
|
||||
{
|
||||
struct vnode *drop;
|
||||
vm_object_t m_object;
|
||||
|
||||
KASSERT(m->queue == PQ_NONE,
|
||||
("vm_page_alloc_init: page %p has unexpected queue %d",
|
||||
m, m->queue));
|
||||
KASSERT(m->wire_count == 0,
|
||||
("vm_page_alloc_init: page %p is wired", m));
|
||||
KASSERT(m->hold_count == 0,
|
||||
("vm_page_alloc_init: page %p is held", m));
|
||||
KASSERT(!vm_page_busied(m),
|
||||
("vm_page_alloc_init: page %p is busy", m));
|
||||
KASSERT(m->dirty == 0,
|
||||
("vm_page_alloc_init: page %p is dirty", m));
|
||||
("page %p has unexpected queue %d", m, m->queue));
|
||||
KASSERT(m->wire_count == 0, ("page %p is wired", m));
|
||||
KASSERT(m->hold_count == 0, ("page %p is held", m));
|
||||
KASSERT(!vm_page_busied(m), ("page %p is busy", m));
|
||||
KASSERT(m->dirty == 0, ("page %p is dirty", m));
|
||||
KASSERT(pmap_page_get_memattr(m) == VM_MEMATTR_DEFAULT,
|
||||
("vm_page_alloc_init: page %p has unexpected memattr %d",
|
||||
("page %p has unexpected memattr %d",
|
||||
m, pmap_page_get_memattr(m)));
|
||||
mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
|
||||
drop = NULL;
|
||||
if ((m->flags & PG_CACHED) != 0) {
|
||||
KASSERT((m->flags & PG_ZERO) == 0,
|
||||
("vm_page_alloc_init: cached page %p is PG_ZERO", m));
|
||||
m->valid = 0;
|
||||
m_object = m->object;
|
||||
vm_page_cache_remove(m);
|
||||
if (m_object->type == OBJT_VNODE &&
|
||||
vm_object_cache_is_empty(m_object))
|
||||
drop = m_object->handle;
|
||||
} else {
|
||||
KASSERT(m->valid == 0,
|
||||
("vm_page_alloc_init: free page %p is valid", m));
|
||||
vm_phys_freecnt_adj(m, -1);
|
||||
}
|
||||
return (drop);
|
||||
KASSERT(m->valid == 0, ("free page %p is valid", m));
|
||||
}
|
||||
|
||||
/*
|
||||
@ -2077,7 +1826,6 @@ vm_page_alloc_init(vm_page_t m)
|
||||
vm_page_t
|
||||
vm_page_alloc_freelist(int flind, int req)
|
||||
{
|
||||
struct vnode *drop;
|
||||
vm_page_t m;
|
||||
u_int flags;
|
||||
int req_class;
|
||||
@ -2111,8 +1859,9 @@ vm_page_alloc_freelist(int flind, int req)
|
||||
mtx_unlock(&vm_page_queue_free_mtx);
|
||||
return (NULL);
|
||||
}
|
||||
drop = vm_page_alloc_init(m);
|
||||
vm_phys_freecnt_adj(m, -1);
|
||||
mtx_unlock(&vm_page_queue_free_mtx);
|
||||
vm_page_alloc_check(m);
|
||||
|
||||
/*
|
||||
* Initialize the page. Only the PG_ZERO flag is inherited.
|
||||
@ -2132,8 +1881,6 @@ vm_page_alloc_freelist(int flind, int req)
|
||||
}
|
||||
/* Unmanaged pages don't use "act_count". */
|
||||
m->oflags = VPO_UNMANAGED;
|
||||
if (drop != NULL)
|
||||
vdrop(drop);
|
||||
if (vm_paging_needed())
|
||||
pagedaemon_wakeup();
|
||||
return (m);
|
||||
@ -2259,38 +2006,8 @@ vm_page_scan_contig(u_long npages, vm_page_t m_start, vm_page_t m_end,
|
||||
/* Don't care: PG_NODUMP, PG_ZERO. */
|
||||
if (object->type != OBJT_DEFAULT &&
|
||||
object->type != OBJT_SWAP &&
|
||||
object->type != OBJT_VNODE)
|
||||
object->type != OBJT_VNODE) {
|
||||
run_ext = 0;
|
||||
else if ((m->flags & PG_CACHED) != 0 ||
|
||||
m != vm_page_lookup(object, m->pindex)) {
|
||||
/*
|
||||
* The page is cached or recently converted
|
||||
* from cached to free.
|
||||
*/
|
||||
#if VM_NRESERVLEVEL > 0
|
||||
if (level >= 0) {
|
||||
/*
|
||||
* The page is reserved. Extend the
|
||||
* current run by one page.
|
||||
*/
|
||||
run_ext = 1;
|
||||
} else
|
||||
#endif
|
||||
if ((order = m->order) < VM_NFREEORDER) {
|
||||
/*
|
||||
* The page is enqueued in the
|
||||
* physical memory allocator's cache/
|
||||
* free page queues. Moreover, it is
|
||||
* the first page in a power-of-two-
|
||||
* sized run of contiguous cache/free
|
||||
* pages. Add these pages to the end
|
||||
* of the current run, and jump
|
||||
* ahead.
|
||||
*/
|
||||
run_ext = 1 << order;
|
||||
m_inc = 1 << order;
|
||||
} else
|
||||
run_ext = 0;
|
||||
#if VM_NRESERVLEVEL > 0
|
||||
} else if ((options & VPSC_NOSUPER) != 0 &&
|
||||
(level = vm_reserv_level_iffullpop(m)) >= 0) {
|
||||
@ -2457,15 +2174,7 @@ vm_page_reclaim_run(int req_class, u_long npages, vm_page_t m_run,
|
||||
object->type != OBJT_SWAP &&
|
||||
object->type != OBJT_VNODE)
|
||||
error = EINVAL;
|
||||
else if ((m->flags & PG_CACHED) != 0 ||
|
||||
m != vm_page_lookup(object, m->pindex)) {
|
||||
/*
|
||||
* The page is cached or recently converted
|
||||
* from cached to free.
|
||||
*/
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
goto cached;
|
||||
} else if (object->memattr != VM_MEMATTR_DEFAULT)
|
||||
else if (object->memattr != VM_MEMATTR_DEFAULT)
|
||||
error = EINVAL;
|
||||
else if (m->queue != PQ_NONE && !vm_page_busied(m)) {
|
||||
KASSERT(pmap_page_get_memattr(m) ==
|
||||
@ -2566,7 +2275,6 @@ vm_page_reclaim_run(int req_class, u_long npages, vm_page_t m_run,
|
||||
unlock:
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
} else {
|
||||
cached:
|
||||
mtx_lock(&vm_page_queue_free_mtx);
|
||||
order = m->order;
|
||||
if (order < VM_NFREEORDER) {
|
||||
@ -2963,27 +2671,6 @@ vm_page_free_wakeup(void)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Turn a cached page into a free page, by changing its attributes.
|
||||
* Keep the statistics up-to-date.
|
||||
*
|
||||
* The free page queue must be locked.
|
||||
*/
|
||||
static void
|
||||
vm_page_cache_turn_free(vm_page_t m)
|
||||
{
|
||||
|
||||
mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
|
||||
|
||||
m->object = NULL;
|
||||
m->valid = 0;
|
||||
KASSERT((m->flags & PG_CACHED) != 0,
|
||||
("vm_page_cache_turn_free: page %p is not cached", m));
|
||||
m->flags &= ~PG_CACHED;
|
||||
vm_cnt.v_cache_count--;
|
||||
vm_phys_freecnt_adj(m, 1);
|
||||
}
|
||||
|
||||
/*
|
||||
* vm_page_free_toq:
|
||||
*
|
||||
@ -3383,8 +3070,7 @@ vm_page_grab(vm_object_t object, vm_pindex_t pindex, int allocflags)
|
||||
VM_WAIT;
|
||||
VM_OBJECT_WLOCK(object);
|
||||
goto retrylookup;
|
||||
} else if (m->valid != 0)
|
||||
return (m);
|
||||
}
|
||||
if (allocflags & VM_ALLOC_ZERO && (m->flags & PG_ZERO) == 0)
|
||||
pmap_zero_page(m);
|
||||
return (m);
|
||||
|
@ -326,7 +326,6 @@ extern struct mtx_padalign pa_lock[];
|
||||
* Page flags. If changed at any other time than page allocation or
|
||||
* freeing, the modification must be protected by the vm_page lock.
|
||||
*/
|
||||
#define PG_CACHED 0x0001 /* page is cached */
|
||||
#define PG_FICTITIOUS 0x0004 /* physical page doesn't exist */
|
||||
#define PG_ZERO 0x0008 /* page is zeroed */
|
||||
#define PG_MARKER 0x0010 /* special queue marker page */
|
||||
@ -409,8 +408,6 @@ vm_page_t PHYS_TO_VM_PAGE(vm_paddr_t pa);
|
||||
#define VM_ALLOC_ZERO 0x0040 /* (acfg) Try to obtain a zeroed page */
|
||||
#define VM_ALLOC_NOOBJ 0x0100 /* (acg) No associated object */
|
||||
#define VM_ALLOC_NOBUSY 0x0200 /* (acg) Do not busy the page */
|
||||
#define VM_ALLOC_IFCACHED 0x0400 /* (ag) Fail if page is not cached */
|
||||
#define VM_ALLOC_IFNOTCACHED 0x0800 /* (ag) Fail if page is cached */
|
||||
#define VM_ALLOC_IGN_SBUSY 0x1000 /* (g) Ignore shared busy flag */
|
||||
#define VM_ALLOC_NODUMP 0x2000 /* (ag) don't include in dump */
|
||||
#define VM_ALLOC_SBUSY 0x4000 /* (acg) Shared busy the page */
|
||||
@ -453,8 +450,6 @@ vm_page_t vm_page_alloc_contig(vm_object_t object, vm_pindex_t pindex, int req,
|
||||
vm_paddr_t boundary, vm_memattr_t memattr);
|
||||
vm_page_t vm_page_alloc_freelist(int, int);
|
||||
vm_page_t vm_page_grab (vm_object_t, vm_pindex_t, int);
|
||||
void vm_page_cache_free(vm_object_t, vm_pindex_t, vm_pindex_t);
|
||||
void vm_page_cache_transfer(vm_object_t, vm_pindex_t, vm_object_t);
|
||||
int vm_page_try_to_free (vm_page_t);
|
||||
void vm_page_deactivate (vm_page_t);
|
||||
void vm_page_deactivate_noreuse(vm_page_t);
|
||||
@ -464,7 +459,6 @@ vm_page_t vm_page_find_least(vm_object_t, vm_pindex_t);
|
||||
vm_page_t vm_page_getfake(vm_paddr_t paddr, vm_memattr_t memattr);
|
||||
void vm_page_initfake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr);
|
||||
int vm_page_insert (vm_page_t, vm_object_t, vm_pindex_t);
|
||||
boolean_t vm_page_is_cached(vm_object_t object, vm_pindex_t pindex);
|
||||
void vm_page_launder(vm_page_t m);
|
||||
vm_page_t vm_page_lookup (vm_object_t, vm_pindex_t);
|
||||
vm_page_t vm_page_next(vm_page_t m);
|
||||
|
@ -907,45 +907,6 @@ vm_reserv_level_iffullpop(vm_page_t m)
|
||||
return (rv->popcnt == VM_LEVEL_0_NPAGES ? 0 : -1);
|
||||
}
|
||||
|
||||
/*
|
||||
* Prepare for the reactivation of a cached page.
|
||||
*
|
||||
* First, suppose that the given page "m" was allocated individually, i.e., not
|
||||
* as part of a reservation, and cached. Then, suppose a reservation
|
||||
* containing "m" is allocated by the same object. Although "m" and the
|
||||
* reservation belong to the same object, "m"'s pindex may not match the
|
||||
* reservation's.
|
||||
*
|
||||
* The free page queue must be locked.
|
||||
*/
|
||||
boolean_t
|
||||
vm_reserv_reactivate_page(vm_page_t m)
|
||||
{
|
||||
vm_reserv_t rv;
|
||||
int index;
|
||||
|
||||
mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
|
||||
rv = vm_reserv_from_page(m);
|
||||
if (rv->object == NULL)
|
||||
return (FALSE);
|
||||
KASSERT((m->flags & PG_CACHED) != 0,
|
||||
("vm_reserv_reactivate_page: page %p is not cached", m));
|
||||
if (m->object == rv->object &&
|
||||
m->pindex - rv->pindex == (index = VM_RESERV_INDEX(m->object,
|
||||
m->pindex)))
|
||||
vm_reserv_populate(rv, index);
|
||||
else {
|
||||
KASSERT(rv->inpartpopq,
|
||||
("vm_reserv_reactivate_page: reserv %p's inpartpopq is FALSE",
|
||||
rv));
|
||||
TAILQ_REMOVE(&vm_rvq_partpop, rv, partpopq);
|
||||
rv->inpartpopq = FALSE;
|
||||
/* Don't release "m" to the physical memory allocator. */
|
||||
vm_reserv_break(rv, m);
|
||||
}
|
||||
return (TRUE);
|
||||
}
|
||||
|
||||
/*
|
||||
* Breaks the given partially-populated reservation, releasing its cached and
|
||||
* free pages to the physical memory allocator.
|
||||
|
@ -56,7 +56,6 @@ void vm_reserv_init(void);
|
||||
bool vm_reserv_is_page_free(vm_page_t m);
|
||||
int vm_reserv_level(vm_page_t m);
|
||||
int vm_reserv_level_iffullpop(vm_page_t m);
|
||||
boolean_t vm_reserv_reactivate_page(vm_page_t m);
|
||||
boolean_t vm_reserv_reclaim_contig(u_long npages, vm_paddr_t low,
|
||||
vm_paddr_t high, u_long alignment, vm_paddr_t boundary);
|
||||
boolean_t vm_reserv_reclaim_inactive(void);
|
||||
|
@ -466,10 +466,6 @@ vnode_pager_setsize(struct vnode *vp, vm_ooffset_t nsize)
|
||||
* replacement from working properly.
|
||||
*/
|
||||
vm_page_clear_dirty(m, base, PAGE_SIZE - base);
|
||||
} else if ((nsize & PAGE_MASK) &&
|
||||
vm_page_is_cached(object, OFF_TO_IDX(nsize))) {
|
||||
vm_page_cache_free(object, OFF_TO_IDX(nsize),
|
||||
nobjsize);
|
||||
}
|
||||
}
|
||||
object->un_pager.vnp.vnp_size = nsize;
|
||||
@ -894,8 +890,7 @@ vnode_pager_generic_getpages(struct vnode *vp, vm_page_t *m, int count,
|
||||
for (tpindex = m[0]->pindex - 1;
|
||||
tpindex >= startpindex && tpindex < m[0]->pindex;
|
||||
tpindex--, i++) {
|
||||
p = vm_page_alloc(object, tpindex, VM_ALLOC_NORMAL |
|
||||
VM_ALLOC_IFNOTCACHED);
|
||||
p = vm_page_alloc(object, tpindex, VM_ALLOC_NORMAL);
|
||||
if (p == NULL) {
|
||||
/* Shift the array. */
|
||||
for (int j = 0; j < i; j++)
|
||||
@ -932,8 +927,7 @@ vnode_pager_generic_getpages(struct vnode *vp, vm_page_t *m, int count,
|
||||
|
||||
for (tpindex = m[count - 1]->pindex + 1;
|
||||
tpindex < endpindex; i++, tpindex++) {
|
||||
p = vm_page_alloc(object, tpindex, VM_ALLOC_NORMAL |
|
||||
VM_ALLOC_IFNOTCACHED);
|
||||
p = vm_page_alloc(object, tpindex, VM_ALLOC_NORMAL);
|
||||
if (p == NULL)
|
||||
break;
|
||||
bp->b_pages[i] = p;
|
||||
|
Loading…
Reference in New Issue
Block a user