uma: Use the bucket cache for cross-domain allocations

uma_zalloc_domain() allocates from the requested domain instead of
following a first-touch policy (the default for most zones).  Currently
it is only used by malloc_domainset(), and consumers free returned items
with free(9) since r363834.

Previously uma_zalloc_domain() worked by always going to the keg for an
item.  As a result, the use of UMA zone caches was unbalanced: we free
items to the caches, but always allocate from the keg, skipping the
caches.

Make some effort to allocate from the UMA caches when performing a
cross-domain allocation.  This avoids blowing up the caches when
something is performing many transient allocations with
malloc_domainset().

Reported and tested by:	dhw, glebius
Sponsored by:		The FreeBSD Foundation
Differential Revision:	https://reviews.freebsd.org/D26427
This commit is contained in:
Mark Johnston 2020-10-02 19:04:29 +00:00
parent 5afdf5c1ca
commit 06d8bdcbf7
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=366379

View File

@ -685,8 +685,13 @@ zone_fetch_bucket(uma_zone_t zone, uma_zone_domain_t zdom, bool reclaim)
if (STAILQ_NEXT(bucket, ub_link) != NULL)
zdom->uzd_seq = STAILQ_NEXT(bucket, ub_link)->ub_seq;
}
MPASS(zdom->uzd_nitems >= bucket->ub_cnt);
STAILQ_REMOVE_HEAD(&zdom->uzd_buckets, ub_link);
KASSERT(zdom->uzd_nitems >= bucket->ub_cnt,
("%s: item count underflow (%ld, %d)",
__func__, zdom->uzd_nitems, bucket->ub_cnt));
KASSERT(bucket->ub_cnt > 0,
("%s: empty bucket in bucket cache", __func__));
zdom->uzd_nitems -= bucket->ub_cnt;
/*
@ -914,11 +919,8 @@ cache_fetch_bucket(uma_zone_t zone, uma_cache_t cache, int domain)
* Check the zone's cache of buckets.
*/
zdom = zone_domain_lock(zone, domain);
if ((bucket = zone_fetch_bucket(zone, zdom, false)) != NULL) {
KASSERT(bucket->ub_cnt != 0,
("cache_fetch_bucket: Returning an empty bucket."));
if ((bucket = zone_fetch_bucket(zone, zdom, false)) != NULL)
return (bucket);
}
ZDOM_UNLOCK(zdom);
return (NULL);
@ -3495,6 +3497,11 @@ cache_alloc(uma_zone_t zone, uma_cache_t cache, void *udata, int flags)
void *
uma_zalloc_domain(uma_zone_t zone, void *udata, int domain, int flags)
{
#ifdef NUMA
uma_bucket_t bucket;
uma_zone_domain_t zdom;
void *item;
#endif
/* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */
random_harvest_fast_uma(&zone, sizeof(zone), RANDOM_UMA);
@ -3509,8 +3516,45 @@ uma_zalloc_domain(uma_zone_t zone, void *udata, int domain, int flags)
}
KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
("uma_zalloc_domain: called with spinlock or critical section held"));
KASSERT((zone->uz_flags & UMA_ZONE_SMR) == 0,
("uma_zalloc_domain: called with SMR zone."));
#ifdef NUMA
KASSERT((zone->uz_flags & UMA_ZONE_FIRSTTOUCH) != 0,
("uma_zalloc_domain: called with non-FIRSTTOUCH zone."));
if (vm_ndomains == 1)
return (uma_zalloc_arg(zone, udata, flags));
/*
* Try to allocate from the bucket cache before falling back to the keg.
* We could try harder and attempt to allocate from per-CPU caches or
* the per-domain cross-domain buckets, but the complexity is probably
* not worth it. It is more important that frees of previous
* cross-domain allocations do not blow up the cache.
*/
zdom = zone_domain_lock(zone, domain);
if ((bucket = zone_fetch_bucket(zone, zdom, false)) != NULL) {
item = bucket->ub_bucket[bucket->ub_cnt - 1];
#ifdef INVARIANTS
bucket->ub_bucket[bucket->ub_cnt - 1] = NULL;
#endif
bucket->ub_cnt--;
zone_put_bucket(zone, domain, bucket, udata, true);
item = item_ctor(zone, zone->uz_flags, zone->uz_size, udata,
flags, item);
if (item != NULL) {
KASSERT(item_domain(item) == domain,
("%s: bucket cache item %p from wrong domain",
__func__, item));
counter_u64_add(zone->uz_allocs, 1);
}
return (item);
}
ZDOM_UNLOCK(zdom);
return (zone_alloc_item(zone, udata, domain, flags));
#else
return (uma_zalloc_arg(zone, udata, flags));
#endif
}
/*