Mark pages allocated from the per-CPU cache.

Only free pages to the cache when they were allocated from that cache.
This mitigates rapid fragmentation of physical memory seen during
poudriere's dependency calculation phase.  In particular, pages
belonging to broken reservations are no longer freed to the per-CPU
cache, so they get a chance to coalesce with freed pages during the
break.  Otherwise, the optimized CoW handler may create object
chains in which multiple objects contain pages from the same
reservation, and the order in which we do object termination means
that the reservation is broken before all of those pages are freed,
so some of them end up in the per-CPU cache and thus permanently
fragment physical memory.

The flag may also be useful for eliding calls to vm_reserv_free_page(),
thus avoiding memory accesses for data that is likely not present
in the CPU caches.

Reviewed by:	alc
Discussed with:	jeff
MFC after:	1 week
Sponsored by:	The FreeBSD Foundation
Differential Revision:	https://reviews.freebsd.org/D20763
This commit is contained in:
Mark Johnston 2019-07-02 19:51:40 +00:00
parent 4b8b28e130
commit 9f74cdbf78
2 changed files with 10 additions and 10 deletions

View File

@ -1812,8 +1812,9 @@ vm_page_alloc_domain_after(vm_object_t object, vm_pindex_t pindex, int domain,
if (object != NULL)
VM_OBJECT_ASSERT_WLOCKED(object);
again:
flags = 0;
m = NULL;
again:
#if VM_NRESERVLEVEL > 0
/*
* Can we allocate the page from a reservation?
@ -1829,8 +1830,10 @@ vm_page_alloc_domain_after(vm_object_t object, vm_pindex_t pindex, int domain,
vmd = VM_DOMAIN(domain);
if (object != NULL && vmd->vmd_pgcache != NULL) {
m = uma_zalloc(vmd->vmd_pgcache, M_NOWAIT);
if (m != NULL)
if (m != NULL) {
flags |= PG_PCPU_CACHE;
goto found;
}
}
if (vm_domain_allocate(vmd, req, 1)) {
/*
@ -1858,10 +1861,8 @@ vm_page_alloc_domain_after(vm_object_t object, vm_pindex_t pindex, int domain,
}
/*
* At this point we had better have found a good page.
* At this point we had better have found a good page.
*/
KASSERT(m != NULL, ("missing page"));
found:
vm_page_dequeue(m);
vm_page_alloc_check(m);
@ -1869,10 +1870,8 @@ vm_page_alloc_domain_after(vm_object_t object, vm_pindex_t pindex, int domain,
/*
* Initialize the page. Only the PG_ZERO flag is inherited.
*/
flags = 0;
if ((req & VM_ALLOC_ZERO) != 0)
flags = PG_ZERO;
flags &= m->flags;
flags |= (m->flags & PG_ZERO);
if ((req & VM_ALLOC_NODUMP) != 0)
flags |= PG_NODUMP;
m->flags = flags;
@ -2018,6 +2017,7 @@ vm_page_alloc_contig_domain(vm_object_t object, vm_pindex_t pindex, int domain,
* Can we allocate the pages without the number of free pages falling
* below the lower bound for the allocation class?
*/
m_ret = NULL;
again:
#if VM_NRESERVLEVEL > 0
/*
@ -2031,7 +2031,6 @@ vm_page_alloc_contig_domain(vm_object_t object, vm_pindex_t pindex, int domain,
goto found;
}
#endif
m_ret = NULL;
vmd = VM_DOMAIN(domain);
if (vm_domain_allocate(vmd, req, npages)) {
/*
@ -3506,7 +3505,7 @@ vm_page_free_toq(vm_page_t m)
return;
vmd = vm_pagequeue_domain(m);
if (m->pool == VM_FREEPOOL_DEFAULT && vmd->vmd_pgcache != NULL) {
if ((m->flags & PG_PCPU_CACHE) != 0 && vmd->vmd_pgcache != NULL) {
uma_zfree(vmd->vmd_pgcache, m);
return;
}

View File

@ -379,6 +379,7 @@ extern struct mtx_padalign pa_lock[];
* Page flags. If changed at any other time than page allocation or
* freeing, the modification must be protected by the vm_page lock.
*/
#define PG_PCPU_CACHE 0x0001 /* was allocated from per-CPU caches */
#define PG_FICTITIOUS 0x0004 /* physical page doesn't exist */
#define PG_ZERO 0x0008 /* page is zeroed */
#define PG_MARKER 0x0010 /* special queue marker page */