uma: Add UMA_ZONE_UNMANAGED

Allow a zone to opt out of cache size management.  In particular,
uma_reclaim() and uma_reclaim_domain() will not reclaim any memory from
the zone, nor will uma_timeout() purge cached items if the zone is idle.
This effectively means that the zone consumer has control over when
items are reclaimed from the cache.  In particular, uma_zone_reclaim()
will still reclaim cached items from an unmanaged zone.

Reviewed by:	hselasky, kib
MFC after:	3 weeks
Sponsored by:	The FreeBSD Foundation
Differential Revision:	https://reviews.freebsd.org/D34142
This commit is contained in:
Mark Johnston 2022-02-15 08:57:22 -05:00
parent 828e50092a
commit 389a3fa693
3 changed files with 46 additions and 39 deletions

View File

@ -25,7 +25,7 @@
.\"
.\" $FreeBSD$
.\"
.Dd April 14, 2021
.Dd February 15, 2022
.Dt UMA 9
.Os
.Sh NAME
@ -324,6 +324,12 @@ The zone is for the VM subsystem.
Items in this zone must be contiguous in physical address space.
Items will follow normal alignment constraints and may span page boundaries
between pages with contiguous physical addresses.
.It Dv UMA_ZONE_UNMANAGED
By default, UMA zone caches are shrunk to help resolve free page shortages.
Cached items that have not been used for a long period may also be freed from
zone.
When this flag is set, the system will not reclaim memory from the zone's
caches.
.El
.Pp
Zones can be destroyed using

View File

@ -233,6 +233,10 @@ uma_zone_t uma_zcache_create(const char *name, int size, uma_ctor ctor,
* These flags share space with UMA_ZFLAGs in uma_int.h. Be careful not to
* overlap when adding new features.
*/
#define UMA_ZONE_UNMANAGED 0x0001 /*
* Don't regulate the cache size, even
* under memory pressure.
*/
#define UMA_ZONE_ZINIT 0x0002 /* Initialize with zeros */
#define UMA_ZONE_CONTIG 0x0004 /*
* Physical memory underlying an object

View File

@ -1223,10 +1223,12 @@ zone_timeout(uma_zone_t zone, void *unused)
trim:
/* Trim caches not used for a long time. */
for (int i = 0; i < vm_ndomains; i++) {
if (bucket_cache_reclaim_domain(zone, false, false, i) &&
(zone->uz_flags & UMA_ZFLAG_CACHE) == 0)
keg_drain(zone->uz_keg, i);
if ((zone->uz_flags & UMA_ZONE_UNMANAGED) == 0) {
for (int i = 0; i < vm_ndomains; i++) {
if (bucket_cache_reclaim_domain(zone, false, false, i) &&
(zone->uz_flags & UMA_ZFLAG_CACHE) == 0)
keg_drain(zone->uz_keg, i);
}
}
}
@ -1735,24 +1737,6 @@ zone_reclaim(uma_zone_t zone, int domain, int waitok, bool drain)
ZONE_UNLOCK(zone);
}
static void
zone_drain(uma_zone_t zone, void *arg)
{
int domain;
domain = (int)(uintptr_t)arg;
zone_reclaim(zone, domain, M_NOWAIT, true);
}
static void
zone_trim(uma_zone_t zone, void *arg)
{
int domain;
domain = (int)(uintptr_t)arg;
zone_reclaim(zone, domain, M_NOWAIT, false);
}
/*
* Allocate a new slab for a keg and inserts it into the partial slab list.
* The keg should be unlocked on entry. If the allocation succeeds it will
@ -3548,7 +3532,8 @@ uma_zalloc_debug(uma_zone_t zone, void **itemp, void *udata, int flags)
zone->uz_ctor(item, zone->uz_size, udata,
flags) != 0) {
counter_u64_add(zone->uz_fails, 1);
zone->uz_fini(item, zone->uz_size);
if (zone->uz_fini != NULL)
zone->uz_fini(item, zone->uz_size);
*itemp = NULL;
return (error);
}
@ -5202,6 +5187,21 @@ uma_zone_memory(uma_zone_t zone)
return (sz * PAGE_SIZE);
}
struct uma_reclaim_args {
int domain;
int req;
};
static void
uma_reclaim_domain_cb(uma_zone_t zone, void *arg)
{
struct uma_reclaim_args *args;
args = arg;
if ((zone->uz_flags & UMA_ZONE_UNMANAGED) == 0)
uma_zone_reclaim_domain(zone, args->req, args->domain);
}
/* See uma.h */
void
uma_reclaim(int req)
@ -5212,23 +5212,23 @@ uma_reclaim(int req)
void
uma_reclaim_domain(int req, int domain)
{
void *arg;
struct uma_reclaim_args args;
bucket_enable();
arg = (void *)(uintptr_t)domain;
args.domain = domain;
args.req = req;
sx_slock(&uma_reclaim_lock);
switch (req) {
case UMA_RECLAIM_TRIM:
zone_foreach(zone_trim, arg);
break;
case UMA_RECLAIM_DRAIN:
zone_foreach(zone_drain, arg);
zone_foreach(uma_reclaim_domain_cb, &args);
break;
case UMA_RECLAIM_DRAIN_CPU:
zone_foreach(zone_drain, arg);
zone_foreach(uma_reclaim_domain_cb, &args);
pcpu_cache_drain_safe(NULL);
zone_foreach(zone_drain, arg);
zone_foreach(uma_reclaim_domain_cb, &args);
break;
default:
panic("unhandled reclamation request %d", req);
@ -5239,8 +5239,8 @@ uma_reclaim_domain(int req, int domain)
* we visit again so that we can free pages that are empty once other
* zones are drained. We have to do the same for buckets.
*/
zone_drain(slabzones[0], arg);
zone_drain(slabzones[1], arg);
uma_zone_reclaim_domain(slabzones[0], UMA_RECLAIM_DRAIN, domain);
uma_zone_reclaim_domain(slabzones[1], UMA_RECLAIM_DRAIN, domain);
bucket_zone_drain(domain);
sx_sunlock(&uma_reclaim_lock);
}
@ -5283,19 +5283,16 @@ uma_zone_reclaim(uma_zone_t zone, int req)
void
uma_zone_reclaim_domain(uma_zone_t zone, int req, int domain)
{
void *arg;
arg = (void *)(uintptr_t)domain;
switch (req) {
case UMA_RECLAIM_TRIM:
zone_trim(zone, arg);
zone_reclaim(zone, domain, M_NOWAIT, false);
break;
case UMA_RECLAIM_DRAIN:
zone_drain(zone, arg);
zone_reclaim(zone, domain, M_NOWAIT, true);
break;
case UMA_RECLAIM_DRAIN_CPU:
pcpu_cache_drain_safe(zone);
zone_drain(zone, arg);
zone_reclaim(zone, domain, M_NOWAIT, true);
break;
default:
panic("unhandled reclamation request %d", req);