Use a vm_domainset iterator in keg_fetch_slab().

Previously, it used a hand-rolled round-robin iterator.  This meant that
the minskip logic in r338507 didn't apply to UMA allocations, and also
meant that we would call vm_wait() for individual domains rather than
permitting an allocation from any domain with sufficient free pages.

Discussed with:	jeff
Tested by:	pho
MFC after:	1 week
Sponsored by:	The FreeBSD Foundation
Differential Revision:	https://reviews.freebsd.org/D17420
This commit is contained in:
Mark Johnston 2018-10-24 16:41:47 +00:00
parent dde8fe66d6
commit 194a979ee9
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=339686
2 changed files with 71 additions and 57 deletions

View File

@ -59,6 +59,7 @@ __FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/bitset.h>
#include <sys/domainset.h>
#include <sys/eventhandler.h>
#include <sys/kernel.h>
#include <sys/types.h>
@ -79,6 +80,7 @@ __FBSDID("$FreeBSD$");
#include <sys/vmmeter.h>
#include <vm/vm.h>
#include <vm/vm_domainset.h>
#include <vm/vm_object.h>
#include <vm/vm_page.h>
#include <vm/vm_pageout.h>
@ -991,6 +993,8 @@ zone_drain(uma_zone_t zone)
/*
* Allocate a new slab for a keg. This does not insert the slab onto a list.
* If the allocation was successful, the keg lock will be held upon return,
* otherwise the keg will be left unlocked.
*
* Arguments:
* wait Shall we wait?
@ -1012,13 +1016,12 @@ keg_alloc_slab(uma_keg_t keg, uma_zone_t zone, int domain, int wait)
KASSERT(domain >= 0 && domain < vm_ndomains,
("keg_alloc_slab: domain %d out of range", domain));
mtx_assert(&keg->uk_lock, MA_OWNED);
slab = NULL;
mem = NULL;
allocf = keg->uk_allocf;
KEG_UNLOCK(keg);
size = keg->uk_ppera * PAGE_SIZE;
slab = NULL;
mem = NULL;
if (keg->uk_flags & UMA_ZONE_OFFPAGE) {
slab = zone_alloc_item(keg->uk_slabzone, NULL, domain, wait);
if (slab == NULL)
@ -1041,6 +1044,7 @@ keg_alloc_slab(uma_keg_t keg, uma_zone_t zone, int domain, int wait)
wait |= M_NODUMP;
/* zone is passed for legacy reasons. */
size = keg->uk_ppera * PAGE_SIZE;
mem = allocf(zone, size, domain, &flags, wait);
if (mem == NULL) {
if (keg->uk_flags & UMA_ZONE_OFFPAGE)
@ -1079,20 +1083,18 @@ keg_alloc_slab(uma_keg_t keg, uma_zone_t zone, int domain, int wait)
goto out;
}
}
out:
KEG_LOCK(keg);
CTR3(KTR_UMA, "keg_alloc_slab: allocated slab %p for %s(%p)",
slab, keg->uk_name, keg);
if (slab != NULL) {
if (keg->uk_flags & UMA_ZONE_HASH)
UMA_HASH_INSERT(&keg->uk_hash, slab, mem);
if (keg->uk_flags & UMA_ZONE_HASH)
UMA_HASH_INSERT(&keg->uk_hash, slab, mem);
keg->uk_pages += keg->uk_ppera;
keg->uk_free += keg->uk_ipers;
}
keg->uk_pages += keg->uk_ppera;
keg->uk_free += keg->uk_ipers;
out:
return (slab);
}
@ -1559,13 +1561,20 @@ keg_ctor(void *mem, int size, void *udata, int flags)
keg->uk_init = arg->uminit;
keg->uk_fini = arg->fini;
keg->uk_align = arg->align;
keg->uk_cursor = 0;
keg->uk_free = 0;
keg->uk_reserve = 0;
keg->uk_pages = 0;
keg->uk_flags = arg->flags;
keg->uk_slabzone = NULL;
/*
* We use a global round-robin policy by default. Zones with
* UMA_ZONE_NUMA set will use first-touch instead, in which case the
* iterator is never run.
*/
keg->uk_dr.dr_policy = DOMAINSET_RR();
keg->uk_dr.dr_iter = 0;
/*
* The master zone is passed to us at keg-creation time.
*/
@ -2607,7 +2616,7 @@ uma_zalloc_domain(uma_zone_t zone, void *udata, int domain, int flags)
* only 'domain'.
*/
static uma_slab_t
keg_first_slab(uma_keg_t keg, int domain, int rr)
keg_first_slab(uma_keg_t keg, int domain, bool rr)
{
uma_domain_t dom;
uma_slab_t slab;
@ -2636,43 +2645,51 @@ keg_first_slab(uma_keg_t keg, int domain, int rr)
}
static uma_slab_t
keg_fetch_slab(uma_keg_t keg, uma_zone_t zone, int rdomain, int flags)
keg_fetch_free_slab(uma_keg_t keg, int domain, bool rr, int flags)
{
uma_domain_t dom;
uma_slab_t slab;
int allocflags, domain, reserve, rr, start;
uint32_t reserve;
mtx_assert(&keg->uk_lock, MA_OWNED);
slab = NULL;
reserve = 0;
allocflags = flags;
if ((flags & M_USE_RESERVE) == 0)
reserve = keg->uk_reserve;
reserve = (flags & M_USE_RESERVE) != 0 ? 0 : keg->uk_reserve;
if (keg->uk_free <= reserve)
return (NULL);
return (keg_first_slab(keg, domain, rr));
}
static uma_slab_t
keg_fetch_slab(uma_keg_t keg, uma_zone_t zone, int rdomain, const int flags)
{
struct vm_domainset_iter di;
uma_domain_t dom;
uma_slab_t slab;
int aflags, domain;
bool rr;
restart:
mtx_assert(&keg->uk_lock, MA_OWNED);
/*
* Round-robin for non first-touch zones when there is more than one
* domain.
* Use the keg's policy if upper layers haven't already specified a
* domain (as happens with first-touch zones).
*
* To avoid races we run the iterator with the keg lock held, but that
* means that we cannot allow the vm_domainset layer to sleep. Thus,
* clear M_WAITOK and handle low memory conditions locally.
*/
if (vm_ndomains == 1)
rdomain = 0;
rr = rdomain == UMA_ANYDOMAIN;
if (rr) {
start = keg->uk_cursor;
do {
keg->uk_cursor = (keg->uk_cursor + 1) % vm_ndomains;
domain = keg->uk_cursor;
} while (VM_DOMAIN_EMPTY(domain) && domain != start);
domain = start = keg->uk_cursor;
/* Only block on the second pass. */
if ((flags & (M_WAITOK | M_NOVM)) == M_WAITOK)
allocflags = (allocflags & ~M_WAITOK) | M_NOWAIT;
} else
domain = start = rdomain;
aflags = (flags & ~M_WAITOK) | M_NOWAIT;
vm_domainset_iter_policy_ref_init(&di, &keg->uk_dr, &domain,
&aflags);
} else {
aflags = flags;
domain = rdomain;
}
again:
do {
if (keg->uk_free > reserve &&
(slab = keg_first_slab(keg, domain, rr)) != NULL) {
for (;;) {
slab = keg_fetch_free_slab(keg, domain, rr, flags);
if (slab != NULL) {
MPASS(slab->us_keg == keg);
return (slab);
}
@ -2700,7 +2717,7 @@ keg_fetch_slab(uma_keg_t keg, uma_zone_t zone, int rdomain, int flags)
msleep(keg, &keg->uk_lock, PVM, "keglimit", 0);
continue;
}
slab = keg_alloc_slab(keg, zone, domain, allocflags);
slab = keg_alloc_slab(keg, zone, domain, aflags);
/*
* If we got a slab here it's safe to mark it partially used
* and return. We assume that the caller is going to remove
@ -2712,17 +2729,16 @@ keg_fetch_slab(uma_keg_t keg, uma_zone_t zone, int rdomain, int flags)
LIST_INSERT_HEAD(&dom->ud_part_slab, slab, us_link);
return (slab);
}
if (rr) {
do {
domain = (domain + 1) % vm_ndomains;
} while (VM_DOMAIN_EMPTY(domain) && domain != start);
KEG_LOCK(keg);
if (rr && vm_domainset_iter_policy(&di, &domain) != 0) {
if ((flags & M_WAITOK) != 0) {
KEG_UNLOCK(keg);
vm_wait_doms(&keg->uk_dr.dr_policy->ds_mask);
KEG_LOCK(keg);
goto restart;
}
break;
}
} while (domain != start);
/* Retry domain scan with blocking. */
if (allocflags != flags) {
allocflags = flags;
goto again;
}
/*
@ -2730,8 +2746,7 @@ keg_fetch_slab(uma_keg_t keg, uma_zone_t zone, int rdomain, int flags)
* could have while we were unlocked. Check again before we
* fail.
*/
if (keg->uk_free > reserve &&
(slab = keg_first_slab(keg, domain, rr)) != NULL) {
if ((slab = keg_fetch_free_slab(keg, domain, rr, flags)) != NULL) {
MPASS(slab->us_keg == keg);
return (slab);
}
@ -3606,14 +3621,13 @@ uma_prealloc(uma_zone_t zone, int items)
domain = 0;
if (slabs * keg->uk_ipers < items)
slabs++;
while (slabs > 0) {
while (slabs-- > 0) {
slab = keg_alloc_slab(keg, zone, domain, M_WAITOK);
if (slab == NULL)
break;
return;
MPASS(slab->us_keg == keg);
dom = &keg->uk_domain[slab->us_domain];
LIST_INSERT_HEAD(&dom->ud_free_slab, slab, us_link);
slabs--;
do {
domain = (domain + 1) % vm_ndomains;
} while (VM_DOMAIN_EMPTY(domain));

View File

@ -226,7 +226,7 @@ struct uma_keg {
struct uma_hash uk_hash;
LIST_HEAD(,uma_zone) uk_zones; /* Keg's zones */
uint32_t uk_cursor; /* Domain alloc cursor. */
struct domainset_ref uk_dr; /* Domain selection policy. */
uint32_t uk_align; /* Alignment mask */
uint32_t uk_pages; /* Total page count */
uint32_t uk_free; /* Count of items free in slabs */