Split vm_page_free_toq() into two parts, preparation vm_page_free_prep()

and insertion into the phys allocator free queues vm_page_free_phys().
Also provide a wrapper vm_page_free_phys_pglist() for batched free.

Reviewed by:	alc, markj
Tested by:	mjg (previous version)
Sponsored by:	The FreeBSD Foundation
MFC after:	1 week
This commit is contained in:
Konstantin Belousov 2017-09-13 19:11:52 +00:00
parent b9e8fb647e
commit 540ac3b310
2 changed files with 80 additions and 38 deletions

View File

@ -163,6 +163,7 @@ static uma_zone_t fakepg_zone;
static void vm_page_alloc_check(vm_page_t m); static void vm_page_alloc_check(vm_page_t m);
static void vm_page_clear_dirty_mask(vm_page_t m, vm_page_bits_t pagebits); static void vm_page_clear_dirty_mask(vm_page_t m, vm_page_bits_t pagebits);
static void vm_page_enqueue(uint8_t queue, vm_page_t m); static void vm_page_enqueue(uint8_t queue, vm_page_t m);
static void vm_page_free_phys(vm_page_t m);
static void vm_page_free_wakeup(void); static void vm_page_free_wakeup(void);
static void vm_page_init(void *dummy); static void vm_page_init(void *dummy);
static int vm_page_insert_after(vm_page_t m, vm_object_t object, static int vm_page_insert_after(vm_page_t m, vm_object_t object,
@ -2402,13 +2403,7 @@ vm_page_reclaim_run(int req_class, u_long npages, vm_page_t m_run,
mtx_lock(&vm_page_queue_free_mtx); mtx_lock(&vm_page_queue_free_mtx);
do { do {
SLIST_REMOVE_HEAD(&free, plinks.s.ss); SLIST_REMOVE_HEAD(&free, plinks.s.ss);
vm_phys_freecnt_adj(m, 1); vm_page_free_phys(m);
#if VM_NRESERVLEVEL > 0
if (!vm_reserv_free_page(m))
#else
if (true)
#endif
vm_phys_free_pages(m, 0);
} while ((m = SLIST_FIRST(&free)) != NULL); } while ((m = SLIST_FIRST(&free)) != NULL);
vm_page_free_wakeup(); vm_page_free_wakeup();
mtx_unlock(&vm_page_queue_free_mtx); mtx_unlock(&vm_page_queue_free_mtx);
@ -2770,15 +2765,18 @@ vm_page_free_wakeup(void)
} }
/* /*
* vm_page_free_toq: * vm_page_free_prep:
* *
* Returns the given page to the free list, * Prepares the given page to be put on the free list,
* disassociating it with any VM object. * disassociating it from any VM object. The caller may return
* the page to the free list only if this function returns true.
* *
* The object must be locked. The page must be locked if it is managed. * The object must be locked. The page must be locked if it is
* managed. For a queued managed page, the pagequeue_locked
* argument specifies whether the page queue is already locked.
*/ */
void bool
vm_page_free_toq(vm_page_t m) vm_page_free_prep(vm_page_t m, bool pagequeue_locked)
{ {
if ((m->oflags & VPO_UNMANAGED) == 0) { if ((m->oflags & VPO_UNMANAGED) == 0) {
@ -2799,16 +2797,20 @@ vm_page_free_toq(vm_page_t m)
* callback routine until after we've put the page on the * callback routine until after we've put the page on the
* appropriate free queue. * appropriate free queue.
*/ */
vm_page_remque(m); if (m->queue != PQ_NONE) {
if (pagequeue_locked)
vm_page_dequeue_locked(m);
else
vm_page_dequeue(m);
}
vm_page_remove(m); vm_page_remove(m);
/* /*
* If fictitious remove object association and * If fictitious remove object association and
* return, otherwise delay object association removal. * return, otherwise delay object association removal.
*/ */
if ((m->flags & PG_FICTITIOUS) != 0) { if ((m->flags & PG_FICTITIOUS) != 0)
return; return (false);
}
m->valid = 0; m->valid = 0;
vm_page_undirty(m); vm_page_undirty(m);
@ -2820,28 +2822,66 @@ vm_page_free_toq(vm_page_t m)
KASSERT((m->flags & PG_UNHOLDFREE) == 0, KASSERT((m->flags & PG_UNHOLDFREE) == 0,
("vm_page_free: freeing PG_UNHOLDFREE page %p", m)); ("vm_page_free: freeing PG_UNHOLDFREE page %p", m));
m->flags |= PG_UNHOLDFREE; m->flags |= PG_UNHOLDFREE;
} else { return (false);
/*
* Restore the default memory attribute to the page.
*/
if (pmap_page_get_memattr(m) != VM_MEMATTR_DEFAULT)
pmap_page_set_memattr(m, VM_MEMATTR_DEFAULT);
/*
* Insert the page into the physical memory allocator's free
* page queues.
*/
mtx_lock(&vm_page_queue_free_mtx);
vm_phys_freecnt_adj(m, 1);
#if VM_NRESERVLEVEL > 0
if (!vm_reserv_free_page(m))
#else
if (TRUE)
#endif
vm_phys_free_pages(m, 0);
vm_page_free_wakeup();
mtx_unlock(&vm_page_queue_free_mtx);
} }
/*
* Restore the default memory attribute to the page.
*/
if (pmap_page_get_memattr(m) != VM_MEMATTR_DEFAULT)
pmap_page_set_memattr(m, VM_MEMATTR_DEFAULT);
return (true);
}
/*
* Insert the page into the physical memory allocator's free page
* queues. This is the last step to free a page.
*/
static void
vm_page_free_phys(vm_page_t m)
{
mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
vm_phys_freecnt_adj(m, 1);
#if VM_NRESERVLEVEL > 0
if (!vm_reserv_free_page(m))
#endif
vm_phys_free_pages(m, 0);
}
void
vm_page_free_phys_pglist(struct pglist *tq)
{
vm_page_t m;
mtx_lock(&vm_page_queue_free_mtx);
TAILQ_FOREACH(m, tq, listq)
vm_page_free_phys(m);
vm_page_free_wakeup();
mtx_unlock(&vm_page_queue_free_mtx);
}
/*
* vm_page_free_toq:
*
* Returns the given page to the free list, disassociating it
* from any VM object.
*
* The object must be locked. The page must be locked if it is
* managed.
*/
void
vm_page_free_toq(vm_page_t m)
{
if (!vm_page_free_prep(m, false))
return;
mtx_lock(&vm_page_queue_free_mtx);
vm_page_free_phys(m);
vm_page_free_wakeup();
mtx_unlock(&vm_page_queue_free_mtx);
} }
/* /*

View File

@ -483,6 +483,8 @@ void vm_page_deactivate_noreuse(vm_page_t);
void vm_page_dequeue(vm_page_t m); void vm_page_dequeue(vm_page_t m);
void vm_page_dequeue_locked(vm_page_t m); void vm_page_dequeue_locked(vm_page_t m);
vm_page_t vm_page_find_least(vm_object_t, vm_pindex_t); vm_page_t vm_page_find_least(vm_object_t, vm_pindex_t);
void vm_page_free_phys_pglist(struct pglist *tq);
bool vm_page_free_prep(vm_page_t m, bool pagequeue_locked);
vm_page_t vm_page_getfake(vm_paddr_t paddr, vm_memattr_t memattr); vm_page_t vm_page_getfake(vm_paddr_t paddr, vm_memattr_t memattr);
void vm_page_initfake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr); void vm_page_initfake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr);
int vm_page_insert (vm_page_t, vm_object_t, vm_pindex_t); int vm_page_insert (vm_page_t, vm_object_t, vm_pindex_t);