diff --git a/sys/vm/uma_core.c b/sys/vm/uma_core.c index f8c377c9420a..a5115c2133b3 100644 --- a/sys/vm/uma_core.c +++ b/sys/vm/uma_core.c @@ -685,8 +685,13 @@ zone_fetch_bucket(uma_zone_t zone, uma_zone_domain_t zdom, bool reclaim) if (STAILQ_NEXT(bucket, ub_link) != NULL) zdom->uzd_seq = STAILQ_NEXT(bucket, ub_link)->ub_seq; } - MPASS(zdom->uzd_nitems >= bucket->ub_cnt); STAILQ_REMOVE_HEAD(&zdom->uzd_buckets, ub_link); + + KASSERT(zdom->uzd_nitems >= bucket->ub_cnt, + ("%s: item count underflow (%ld, %d)", + __func__, zdom->uzd_nitems, bucket->ub_cnt)); + KASSERT(bucket->ub_cnt > 0, + ("%s: empty bucket in bucket cache", __func__)); zdom->uzd_nitems -= bucket->ub_cnt; /* @@ -914,11 +919,8 @@ cache_fetch_bucket(uma_zone_t zone, uma_cache_t cache, int domain) * Check the zone's cache of buckets. */ zdom = zone_domain_lock(zone, domain); - if ((bucket = zone_fetch_bucket(zone, zdom, false)) != NULL) { - KASSERT(bucket->ub_cnt != 0, - ("cache_fetch_bucket: Returning an empty bucket.")); + if ((bucket = zone_fetch_bucket(zone, zdom, false)) != NULL) return (bucket); - } ZDOM_UNLOCK(zdom); return (NULL); @@ -3495,6 +3497,11 @@ cache_alloc(uma_zone_t zone, uma_cache_t cache, void *udata, int flags) void * uma_zalloc_domain(uma_zone_t zone, void *udata, int domain, int flags) { +#ifdef NUMA + uma_bucket_t bucket; + uma_zone_domain_t zdom; + void *item; +#endif /* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */ random_harvest_fast_uma(&zone, sizeof(zone), RANDOM_UMA); @@ -3509,8 +3516,45 @@ uma_zalloc_domain(uma_zone_t zone, void *udata, int domain, int flags) } KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(), ("uma_zalloc_domain: called with spinlock or critical section held")); + KASSERT((zone->uz_flags & UMA_ZONE_SMR) == 0, + ("uma_zalloc_domain: called with SMR zone.")); +#ifdef NUMA + KASSERT((zone->uz_flags & UMA_ZONE_FIRSTTOUCH) != 0, + ("uma_zalloc_domain: called with non-FIRSTTOUCH zone.")); + if (vm_ndomains == 1) + return (uma_zalloc_arg(zone, udata, flags)); + + /* + * Try to allocate from the bucket cache before falling back to the keg. + * We could try harder and attempt to allocate from per-CPU caches or + * the per-domain cross-domain buckets, but the complexity is probably + * not worth it. It is more important that frees of previous + * cross-domain allocations do not blow up the cache. + */ + zdom = zone_domain_lock(zone, domain); + if ((bucket = zone_fetch_bucket(zone, zdom, false)) != NULL) { + item = bucket->ub_bucket[bucket->ub_cnt - 1]; +#ifdef INVARIANTS + bucket->ub_bucket[bucket->ub_cnt - 1] = NULL; +#endif + bucket->ub_cnt--; + zone_put_bucket(zone, domain, bucket, udata, true); + item = item_ctor(zone, zone->uz_flags, zone->uz_size, udata, + flags, item); + if (item != NULL) { + KASSERT(item_domain(item) == domain, + ("%s: bucket cache item %p from wrong domain", + __func__, item)); + counter_u64_add(zone->uz_allocs, 1); + } + return (item); + } + ZDOM_UNLOCK(zdom); return (zone_alloc_item(zone, udata, domain, flags)); +#else + return (uma_zalloc_arg(zone, udata, flags)); +#endif } /*