Add a per-CPU page cache per VM free pool.
Some workloads benefit from having a per-CPU cache for VM_FREEPOOL_DIRECT pages. Reviewed by: dougm, kib Discussed with: alc, jeff MFC after: 2 weeks Sponsored by: Netflix Differential Revision: https://reviews.freebsd.org/D20858
This commit is contained in:
parent
66778b0391
commit
d9a73522e3
@ -192,21 +192,28 @@ static void
|
||||
vm_page_init_cache_zones(void *dummy __unused)
|
||||
{
|
||||
struct vm_domain *vmd;
|
||||
int i;
|
||||
struct vm_pgcache *pgcache;
|
||||
int domain, pool;
|
||||
|
||||
for (domain = 0; domain < vm_ndomains; domain++) {
|
||||
vmd = VM_DOMAIN(domain);
|
||||
|
||||
for (i = 0; i < vm_ndomains; i++) {
|
||||
vmd = VM_DOMAIN(i);
|
||||
/*
|
||||
* Don't allow the page cache to take up more than .25% of
|
||||
* Don't allow the page caches to take up more than .25% of
|
||||
* memory.
|
||||
*/
|
||||
if (vmd->vmd_page_count / 400 < 256 * mp_ncpus)
|
||||
if (vmd->vmd_page_count / 400 < 256 * mp_ncpus * VM_NFREEPOOL)
|
||||
continue;
|
||||
vmd->vmd_pgcache = uma_zcache_create("vm pgcache",
|
||||
sizeof(struct vm_page), NULL, NULL, NULL, NULL,
|
||||
vm_page_import, vm_page_release, vmd,
|
||||
UMA_ZONE_MAXBUCKET | UMA_ZONE_VM);
|
||||
(void )uma_zone_set_maxcache(vmd->vmd_pgcache, 0);
|
||||
for (pool = 0; pool < VM_NFREEPOOL; pool++) {
|
||||
pgcache = &vmd->vmd_pgcache[pool];
|
||||
pgcache->domain = domain;
|
||||
pgcache->pool = pool;
|
||||
pgcache->zone = uma_zcache_create("vm pgcache",
|
||||
sizeof(struct vm_page), NULL, NULL, NULL, NULL,
|
||||
vm_page_import, vm_page_release, pgcache,
|
||||
UMA_ZONE_MAXBUCKET | UMA_ZONE_VM);
|
||||
(void)uma_zone_set_maxcache(pgcache->zone, 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
SYSINIT(vm_page2, SI_SUB_VM_CONF, SI_ORDER_ANY, vm_page_init_cache_zones, NULL);
|
||||
@ -1797,7 +1804,7 @@ vm_page_alloc_domain_after(vm_object_t object, vm_pindex_t pindex, int domain,
|
||||
{
|
||||
struct vm_domain *vmd;
|
||||
vm_page_t m;
|
||||
int flags;
|
||||
int flags, pool;
|
||||
|
||||
KASSERT((object != NULL) == ((req & VM_ALLOC_NOOBJ) == 0) &&
|
||||
(object != NULL || (req & VM_ALLOC_SBUSY) == 0) &&
|
||||
@ -1814,6 +1821,7 @@ vm_page_alloc_domain_after(vm_object_t object, vm_pindex_t pindex, int domain,
|
||||
|
||||
flags = 0;
|
||||
m = NULL;
|
||||
pool = object != NULL ? VM_FREEPOOL_DEFAULT : VM_FREEPOOL_DIRECT;
|
||||
again:
|
||||
#if VM_NRESERVLEVEL > 0
|
||||
/*
|
||||
@ -1828,8 +1836,8 @@ again:
|
||||
}
|
||||
#endif
|
||||
vmd = VM_DOMAIN(domain);
|
||||
if (object != NULL && vmd->vmd_pgcache != NULL) {
|
||||
m = uma_zalloc(vmd->vmd_pgcache, M_NOWAIT);
|
||||
if (vmd->vmd_pgcache[pool].zone != NULL) {
|
||||
m = uma_zalloc(vmd->vmd_pgcache[pool].zone, M_NOWAIT);
|
||||
if (m != NULL) {
|
||||
flags |= PG_PCPU_CACHE;
|
||||
goto found;
|
||||
@ -1840,8 +1848,7 @@ again:
|
||||
* If not, allocate it from the free page queues.
|
||||
*/
|
||||
vm_domain_free_lock(vmd);
|
||||
m = vm_phys_alloc_pages(domain, object != NULL ?
|
||||
VM_FREEPOOL_DEFAULT : VM_FREEPOOL_DIRECT, 0);
|
||||
m = vm_phys_alloc_pages(domain, pool, 0);
|
||||
vm_domain_free_unlock(vmd);
|
||||
if (m == NULL) {
|
||||
vm_domain_freecnt_inc(vmd, 1);
|
||||
@ -2231,15 +2238,17 @@ static int
|
||||
vm_page_import(void *arg, void **store, int cnt, int domain, int flags)
|
||||
{
|
||||
struct vm_domain *vmd;
|
||||
struct vm_pgcache *pgcache;
|
||||
int i;
|
||||
|
||||
vmd = arg;
|
||||
pgcache = arg;
|
||||
vmd = VM_DOMAIN(pgcache->domain);
|
||||
/* Only import if we can bring in a full bucket. */
|
||||
if (cnt == 1 || !vm_domain_allocate(vmd, VM_ALLOC_NORMAL, cnt))
|
||||
return (0);
|
||||
domain = vmd->vmd_domain;
|
||||
vm_domain_free_lock(vmd);
|
||||
i = vm_phys_alloc_npages(domain, VM_FREEPOOL_DEFAULT, cnt,
|
||||
i = vm_phys_alloc_npages(domain, pgcache->pool, cnt,
|
||||
(vm_page_t *)store);
|
||||
vm_domain_free_unlock(vmd);
|
||||
if (cnt != i)
|
||||
@ -2252,10 +2261,12 @@ static void
|
||||
vm_page_release(void *arg, void **store, int cnt)
|
||||
{
|
||||
struct vm_domain *vmd;
|
||||
struct vm_pgcache *pgcache;
|
||||
vm_page_t m;
|
||||
int i;
|
||||
|
||||
vmd = arg;
|
||||
pgcache = arg;
|
||||
vmd = VM_DOMAIN(pgcache->domain);
|
||||
vm_domain_free_lock(vmd);
|
||||
for (i = 0; i < cnt; i++) {
|
||||
m = (vm_page_t)store[i];
|
||||
@ -3500,13 +3511,15 @@ void
|
||||
vm_page_free_toq(vm_page_t m)
|
||||
{
|
||||
struct vm_domain *vmd;
|
||||
uma_zone_t zone;
|
||||
|
||||
if (!vm_page_free_prep(m))
|
||||
return;
|
||||
|
||||
vmd = vm_pagequeue_domain(m);
|
||||
if ((m->flags & PG_PCPU_CACHE) != 0 && vmd->vmd_pgcache != NULL) {
|
||||
uma_zfree(vmd->vmd_pgcache, m);
|
||||
zone = vmd->vmd_pgcache[m->pool].zone;
|
||||
if ((m->flags & PG_PCPU_CACHE) != 0 && zone != NULL) {
|
||||
uma_zfree(zone, m);
|
||||
return;
|
||||
}
|
||||
vm_domain_free_lock(vmd);
|
||||
|
@ -378,6 +378,10 @@ extern struct mtx_padalign pa_lock[];
|
||||
/*
|
||||
* Page flags. If changed at any other time than page allocation or
|
||||
* freeing, the modification must be protected by the vm_page lock.
|
||||
*
|
||||
* The PG_PCPU_CACHE flag is set at allocation time if the page was
|
||||
* allocated from a per-CPU cache. It is cleared the next time that the
|
||||
* page is allocated from the physical memory allocator.
|
||||
*/
|
||||
#define PG_PCPU_CACHE 0x0001 /* was allocated from per-CPU caches */
|
||||
#define PG_FICTITIOUS 0x0004 /* physical page doesn't exist */
|
||||
|
@ -103,7 +103,11 @@ struct vm_domain {
|
||||
struct vm_pagequeue vmd_pagequeues[PQ_COUNT];
|
||||
struct mtx_padalign vmd_free_mtx;
|
||||
struct mtx_padalign vmd_pageout_mtx;
|
||||
uma_zone_t vmd_pgcache; /* (c) page free cache. */
|
||||
struct vm_pgcache {
|
||||
int domain;
|
||||
int pool;
|
||||
uma_zone_t zone;
|
||||
} vmd_pgcache[VM_NFREEPOOL];
|
||||
struct vmem *vmd_kernel_arena; /* (c) per-domain kva R/W arena. */
|
||||
struct vmem *vmd_kernel_rwx_arena; /* (c) per-domain kva R/W/X arena. */
|
||||
u_int vmd_domain; /* (c) Domain number. */
|
||||
|
Loading…
x
Reference in New Issue
Block a user