Add a uma cache of free pages in the DEFAULT freepool. This gives us

per-cpu alloc and free of pages.  The cache is filled with as few trips
to the phys allocator as possible by the use of a new
vm_phys_alloc_npages() function which allocates as many as N pages.

This code was originally by markj with the import function rewritten by
me.

Reviewed by:	markj, kib
Tested by:	pho
Sponsored by:	Netflix, Dell/EMC Isilon
Differential Revision:	https://reviews.freebsd.org/D14905
This commit is contained in:
Jeff Roberson 2018-04-01 04:50:05 +00:00
parent e8bb2dc7c9
commit c33e3a642b
4 changed files with 109 additions and 8 deletions

View File

@ -186,6 +186,9 @@ static int vm_page_reclaim_run(int req_class, int domain, u_long npages,
vm_page_t m_run, vm_paddr_t high);
static int vm_domain_alloc_fail(struct vm_domain *vmd, vm_object_t object,
int req);
static int vm_page_import(void *arg, void **store, int cnt, int domain,
int flags);
static void vm_page_release(void *arg, void **store, int cnt);
SYSINIT(vm_page, SI_SUB_VM, SI_ORDER_SECOND, vm_page_init, NULL);
@ -199,6 +202,32 @@ vm_page_init(void *dummy)
VM_ALLOC_NORMAL | VM_ALLOC_WIRED);
}
/*
* The cache page zone is initialized later since we need to be able to allocate
* pages before UMA is fully initialized.
*/
static void
vm_page_init_cache_zones(void *dummy __unused)
{
struct vm_domain *vmd;
int i;
for (i = 0; i < vm_ndomains; i++) {
vmd = VM_DOMAIN(i);
/*
* Don't allow the page cache to take up more than .25% of
* memory.
*/
if (vmd->vmd_page_count / 400 < 256 * mp_ncpus)
continue;
vmd->vmd_pgcache = uma_zcache_create("vm pgcache",
sizeof(struct vm_page), NULL, NULL, NULL, NULL,
vm_page_import, vm_page_release, vmd,
UMA_ZONE_NOBUCKETCACHE | UMA_ZONE_MAXBUCKET | UMA_ZONE_VM);
}
}
SYSINIT(vm_page2, SI_SUB_VM_CONF, SI_ORDER_ANY, vm_page_init_cache_zones, NULL);
/* Make sure that u_long is at least 64 bits when PAGE_SIZE is 32K. */
#if PAGE_SIZE == 32768
#ifdef CTASSERT
@ -1753,6 +1782,11 @@ vm_page_alloc_domain_after(vm_object_t object, vm_pindex_t pindex, int domain,
}
#endif
vmd = VM_DOMAIN(domain);
if (object != NULL && vmd->vmd_pgcache != NULL) {
m = uma_zalloc(vmd->vmd_pgcache, M_NOWAIT);
if (m != NULL)
goto found;
}
if (vm_domain_allocate(vmd, req, 1)) {
/*
* If not, allocate it from the free page queues.
@ -1783,9 +1817,7 @@ vm_page_alloc_domain_after(vm_object_t object, vm_pindex_t pindex, int domain,
*/
KASSERT(m != NULL, ("missing page"));
#if VM_NRESERVLEVEL > 0
found:
#endif
vm_page_alloc_check(m);
/*
@ -2150,6 +2182,52 @@ vm_page_alloc_freelist_domain(int domain, int freelist, int req)
return (m);
}
static int
vm_page_import(void *arg, void **store, int cnt, int domain, int flags)
{
struct vm_domain *vmd;
vm_page_t m;
int i, j, n;
vmd = arg;
/* Only import if we can bring in a full bucket. */
if (cnt == 1 || !vm_domain_allocate(vmd, VM_ALLOC_NORMAL, cnt))
return (0);
domain = vmd->vmd_domain;
n = 64; /* Starting stride, arbitrary. */
vm_domain_free_lock(vmd);
for (i = 0; i < cnt; i+=n) {
n = vm_phys_alloc_npages(domain, VM_FREELIST_DEFAULT, &m,
MIN(n, cnt-i));
if (n == 0)
break;
for (j = 0; j < n; j++)
store[i+j] = m++;
}
vm_domain_free_unlock(vmd);
if (cnt != i)
vm_domain_freecnt_inc(vmd, cnt - i);
return (i);
}
static void
vm_page_release(void *arg, void **store, int cnt)
{
struct vm_domain *vmd;
vm_page_t m;
int i;
vmd = arg;
vm_domain_free_lock(vmd);
for (i = 0; i < cnt; i++) {
m = (vm_page_t)store[i];
vm_phys_free_pages(m, 0);
}
vm_domain_free_unlock(vmd);
vm_domain_freecnt_inc(vmd, cnt);
}
#define VPSC_ANY 0 /* No restrictions. */
#define VPSC_NORESERV 1 /* Skip reservations; implies VPSC_NOSUPER. */
#define VPSC_NOSUPER 2 /* Skip superpages. */
@ -3222,7 +3300,12 @@ vm_page_free_toq(vm_page_t m)
if (!vm_page_free_prep(m, false))
return;
vmd = vm_pagequeue_domain(m);
if (m->pool == VM_FREEPOOL_DEFAULT && vmd->vmd_pgcache != NULL) {
uma_zfree(vmd->vmd_pgcache, m);
return;
}
vm_domain_free_lock(vmd);
vm_phys_free_pages(m, 0);
vm_domain_free_unlock(vmd);
@ -3243,23 +3326,18 @@ void
vm_page_free_pages_toq(struct spglist *free, bool update_wire_count)
{
vm_page_t m;
struct pglist pgl;
int count;
if (SLIST_EMPTY(free))
return;
count = 0;
TAILQ_INIT(&pgl);
while ((m = SLIST_FIRST(free)) != NULL) {
count++;
SLIST_REMOVE_HEAD(free, plinks.s.ss);
if (vm_page_free_prep(m, false))
TAILQ_INSERT_TAIL(&pgl, m, listq);
vm_page_free_toq(m);
}
vm_page_free_phys_pglist(&pgl);
if (update_wire_count)
vm_wire_sub(count);
}

View File

@ -74,6 +74,7 @@ struct vm_pagequeue {
} __aligned(CACHE_LINE_SIZE);
#include <sys/pidctrl.h>
#include <vm/uma.h>
struct sysctl_oid;
/*
@ -92,6 +93,7 @@ struct vm_domain {
struct vm_pagequeue vmd_pagequeues[PQ_COUNT];
struct mtx_padalign vmd_free_mtx;
struct mtx_padalign vmd_pageout_mtx;
uma_zone_t vmd_pgcache; /* (c) page free cache. */
struct vmem *vmd_kernel_arena; /* (c) per-domain kva arena. */
u_int vmd_domain; /* (c) Domain number. */
u_int vmd_page_count; /* (c) Total page count. */

View File

@ -624,6 +624,26 @@ vm_phys_alloc_pages(int domain, int pool, int order)
return (NULL);
}
int
vm_phys_alloc_npages(int domain, int pool, vm_page_t *mp, int cnt)
{
vm_page_t m;
int order, freelist;
for (freelist = 0; freelist < VM_NFREELIST; freelist++) {
for (order = fls(cnt) -1; order >= 0; order--) {
m = vm_phys_alloc_freelist_pages(domain, freelist,
pool, order);
if (m != NULL) {
*mp = m;
return (1 << order);
}
}
}
*mp = NULL;
return (0);
}
/*
* Allocate a contiguous, power of two-sized set of physical pages from the
* specified free list. The free list must be specified using one of the

View File

@ -78,6 +78,7 @@ vm_page_t vm_phys_alloc_contig(int domain, u_long npages, vm_paddr_t low,
vm_page_t vm_phys_alloc_freelist_pages(int domain, int freelist, int pool,
int order);
vm_page_t vm_phys_alloc_pages(int domain, int pool, int order);
int vm_phys_alloc_npages(int domain, int pool, vm_page_t *m, int cnt);
int vm_phys_domain_match(int prefer, vm_paddr_t low, vm_paddr_t high);
int vm_phys_fictitious_reg_range(vm_paddr_t start, vm_paddr_t end,
vm_memattr_t memattr);