uma: reorganize flags

- Garbage collect UMA_ZONE_PAGEABLE & UMA_ZONE_STATIC.
 - Move flag VTOSLAB from public to private.
 - Introduce public NOTPAGE flag and make HASH private.
 - Introduce public NOTOUCH flag and make OFFPAGE private.
 - Update man page.

The net effect of this should be to make the contract with clients more
clear.  Clients should choose constraints, UMA will figure out how to
implement them.  This also breaks the confusing double meaning of
OFFPAGE.

Reviewed by:	jeff, markj
Sponsored by:	Dell EMC Isilon
Differential Revision:	https://reviews.freebsd.org/D23016
This commit is contained in:
Ryan Libby 2020-01-09 02:03:03 +00:00
parent 1459a8eb24
commit 54c5ae804f
5 changed files with 133 additions and 129 deletions

View File

@ -25,7 +25,7 @@
.\" .\"
.\" $FreeBSD$ .\" $FreeBSD$
.\" .\"
.Dd November 22, 2019 .Dd January 8, 2020
.Dt UMA 9 .Dt UMA 9
.Os .Os
.Sh NAME .Sh NAME
@ -292,18 +292,12 @@ To obtain zeroed memory from a PCPU zone, use the
.Fn uma_zalloc_pcpu .Fn uma_zalloc_pcpu
function and its variants instead, and pass function and its variants instead, and pass
.Dv M_ZERO . .Dv M_ZERO .
.It Dv UMA_ZONE_OFFPAGE .It Dv UMA_ZONE_NOTOUCH
By default book-keeping of items within a slab is done in the slab page itself. The UMA subsystem may not directly touch (i.e. read or write) the slab memory.
This flag explicitly tells subsystem that book-keeping structure should be Otherwise, by default, book-keeping of items within a slab may be done in the
allocated separately from special internal zone. slab page itself, and
This flag requires either .Dv INVARIANTS
.Dv UMA_ZONE_VTOSLAB kernels may also do use-after-free checking by accessing the slab memory.
or
.Dv UMA_ZONE_HASH ,
since subsystem requires a mechanism to find a book-keeping structure
to an item being freed.
The subsystem may choose to prefer offpage book-keeping for certain zones
implicitly.
.It Dv UMA_ZONE_ZINIT .It Dv UMA_ZONE_ZINIT
The zone will have its The zone will have its
.Ft uma_init .Ft uma_init
@ -317,13 +311,11 @@ A zone with
.Dv UMA_ZONE_ZINIT .Dv UMA_ZONE_ZINIT
flag would not return zeroed memory on every flag would not return zeroed memory on every
.Fn uma_zalloc . .Fn uma_zalloc .
.It Dv UMA_ZONE_HASH .It Dv UMA_ZONE_NOTPAGE
The zone should use an internal hash table to find slab book-keeping An allocator function will be supplied with
structure where an allocation being freed belongs to. .Fn uma_zone_set_allocf
.It Dv UMA_ZONE_VTOSLAB and the memory that it returns may not be kernel virtual memory backed by VM
The zone should use special field of pages in the page array.
.Vt vm_page_t
to find slab book-keeping structure where an allocation being freed belongs to.
.It Dv UMA_ZONE_MALLOC .It Dv UMA_ZONE_MALLOC
The zone is for the The zone is for the
.Xr malloc 9 .Xr malloc 9

View File

@ -244,7 +244,7 @@ busdma_init(void *dummy)
* atomic ops on uma_slab_t fields and safety of this * atomic ops on uma_slab_t fields and safety of this
* operation is not guaranteed for write-back caches * operation is not guaranteed for write-back caches
*/ */
uma_flags = UMA_ZONE_OFFPAGE; uma_flags = UMA_ZONE_NOTOUCH;
#endif #endif
/* /*
* Create a cache of buffers in uncacheable memory, to implement the * Create a cache of buffers in uncacheable memory, to implement the

View File

@ -232,14 +232,10 @@ uma_zone_t uma_zcache_create(char *name, int size, uma_ctor ctor, uma_dtor dtor,
* Definitions for uma_zcreate flags * Definitions for uma_zcreate flags
* *
* These flags share space with UMA_ZFLAGs in uma_int.h. Be careful not to * These flags share space with UMA_ZFLAGs in uma_int.h. Be careful not to
* overlap when adding new features. 0xff000000 is in use by uma_int.h. * overlap when adding new features.
*/ */
#define UMA_ZONE_PAGEABLE 0x0001 /* Return items not fully backed by
physical memory XXX Not yet */
#define UMA_ZONE_ZINIT 0x0002 /* Initialize with zeros */ #define UMA_ZONE_ZINIT 0x0002 /* Initialize with zeros */
#define UMA_ZONE_STATIC 0x0004 /* Statically sized zone */ #define UMA_ZONE_NOTOUCH 0x0008 /* UMA may not access the memory */
#define UMA_ZONE_OFFPAGE 0x0008 /* Force the slab structure allocation
off of the real memory */
#define UMA_ZONE_MALLOC 0x0010 /* For use by malloc(9) only! */ #define UMA_ZONE_MALLOC 0x0010 /* For use by malloc(9) only! */
#define UMA_ZONE_NOFREE 0x0020 /* Do not free slabs of this type! */ #define UMA_ZONE_NOFREE 0x0020 /* Do not free slabs of this type! */
#define UMA_ZONE_MTXCLASS 0x0040 /* Create a new lock class */ #define UMA_ZONE_MTXCLASS 0x0040 /* Create a new lock class */
@ -247,20 +243,17 @@ uma_zone_t uma_zcache_create(char *name, int size, uma_ctor ctor, uma_dtor dtor,
* Used for internal vm datastructures * Used for internal vm datastructures
* only. * only.
*/ */
#define UMA_ZONE_HASH 0x0100 /* #define UMA_ZONE_NOTPAGE 0x0100 /* allocf memory not vm pages */
* Use a hash table instead of caching
* information in the vm_page.
*/
#define UMA_ZONE_SECONDARY 0x0200 /* Zone is a Secondary Zone */ #define UMA_ZONE_SECONDARY 0x0200 /* Zone is a Secondary Zone */
#define UMA_ZONE_NOBUCKET 0x0400 /* Do not use buckets. */ #define UMA_ZONE_NOBUCKET 0x0400 /* Do not use buckets. */
#define UMA_ZONE_MAXBUCKET 0x0800 /* Use largest buckets. */ #define UMA_ZONE_MAXBUCKET 0x0800 /* Use largest buckets. */
#define UMA_ZONE_CACHESPREAD 0x1000 /* #define UMA_ZONE_MINBUCKET 0x1000 /* Use smallest buckets. */
#define UMA_ZONE_CACHESPREAD 0x2000 /*
* Spread memory start locations across * Spread memory start locations across
* all possible cache lines. May * all possible cache lines. May
* require many virtually contiguous * require many virtually contiguous
* backend pages and can fail early. * backend pages and can fail early.
*/ */
#define UMA_ZONE_VTOSLAB 0x2000 /* Zone uses vtoslab for lookup. */
#define UMA_ZONE_NODUMP 0x4000 /* #define UMA_ZONE_NODUMP 0x4000 /*
* Zone's pages will not be included in * Zone's pages will not be included in
* mini-dumps. * mini-dumps.
@ -268,9 +261,9 @@ uma_zone_t uma_zcache_create(char *name, int size, uma_ctor ctor, uma_dtor dtor,
#define UMA_ZONE_PCPU 0x8000 /* #define UMA_ZONE_PCPU 0x8000 /*
* Allocates mp_maxid + 1 slabs of PAGE_SIZE * Allocates mp_maxid + 1 slabs of PAGE_SIZE
*/ */
#define UMA_ZONE_MINBUCKET 0x10000 /* Use smallest buckets. */ #define UMA_ZONE_FIRSTTOUCH 0x10000 /* First touch NUMA policy */
#define UMA_ZONE_FIRSTTOUCH 0x20000 /* First touch NUMA policy */ #define UMA_ZONE_ROUNDROBIN 0x20000 /* Round-robin NUMA policy. */
#define UMA_ZONE_ROUNDROBIN 0x40000 /* Round-robin NUMA policy. */ /* In use by UMA_ZFLAGs: 0xffe00000 */
/* /*
* These flags are shared between the keg and zone. In zones wishing to add * These flags are shared between the keg and zone. In zones wishing to add
@ -278,9 +271,9 @@ uma_zone_t uma_zcache_create(char *name, int size, uma_ctor ctor, uma_dtor dtor,
* physical parameters of the request and may not be provided by the consumer. * physical parameters of the request and may not be provided by the consumer.
*/ */
#define UMA_ZONE_INHERIT \ #define UMA_ZONE_INHERIT \
(UMA_ZONE_OFFPAGE | UMA_ZONE_MALLOC | UMA_ZONE_NOFREE | \ (UMA_ZONE_NOTOUCH | UMA_ZONE_MALLOC | UMA_ZONE_NOFREE | \
UMA_ZONE_HASH | UMA_ZONE_VTOSLAB | UMA_ZONE_PCPU | \ UMA_ZONE_NOTPAGE | UMA_ZONE_PCPU | UMA_ZONE_FIRSTTOUCH | \
UMA_ZONE_FIRSTTOUCH | UMA_ZONE_ROUNDROBIN) UMA_ZONE_ROUNDROBIN)
/* Definitions for align */ /* Definitions for align */
#define UMA_ALIGN_PTR (sizeof(void *) - 1) /* Alignment fit for ptr */ #define UMA_ALIGN_PTR (sizeof(void *) - 1) /* Alignment fit for ptr */

View File

@ -744,7 +744,7 @@ zone_timeout(uma_zone_t zone, void *unused)
uma_keg_t keg; uma_keg_t keg;
u_int slabs, pages; u_int slabs, pages;
if ((zone->uz_flags & UMA_ZONE_HASH) == 0) if ((zone->uz_flags & UMA_ZFLAG_HASH) == 0)
goto update_wss; goto update_wss;
keg = zone->uz_keg; keg = zone->uz_keg;
@ -1163,7 +1163,7 @@ keg_free_slab(uma_keg_t keg, uma_slab_t slab, int start)
#endif #endif
keg->uk_fini(slab_item(slab, keg, i), keg->uk_size); keg->uk_fini(slab_item(slab, keg, i), keg->uk_size);
} }
if (keg->uk_flags & UMA_ZONE_OFFPAGE) if (keg->uk_flags & UMA_ZFLAG_OFFPAGE)
zone_free_item(keg->uk_slabzone, slab, NULL, SKIP_NONE); zone_free_item(keg->uk_slabzone, slab, NULL, SKIP_NONE);
keg->uk_freef(mem, PAGE_SIZE * keg->uk_ppera, flags); keg->uk_freef(mem, PAGE_SIZE * keg->uk_ppera, flags);
uma_total_dec(PAGE_SIZE * keg->uk_ppera); uma_total_dec(PAGE_SIZE * keg->uk_ppera);
@ -1200,7 +1200,7 @@ keg_drain(uma_keg_t keg)
/* We have nowhere to free these to. */ /* We have nowhere to free these to. */
if (slab->us_flags & UMA_SLAB_BOOT) if (slab->us_flags & UMA_SLAB_BOOT)
continue; continue;
if (keg->uk_flags & UMA_ZONE_HASH) if (keg->uk_flags & UMA_ZFLAG_HASH)
UMA_HASH_REMOVE(&keg->uk_hash, slab); UMA_HASH_REMOVE(&keg->uk_hash, slab);
n++; n++;
LIST_REMOVE(slab, us_link); LIST_REMOVE(slab, us_link);
@ -1296,7 +1296,7 @@ keg_alloc_slab(uma_keg_t keg, uma_zone_t zone, int domain, int flags,
allocf = keg->uk_allocf; allocf = keg->uk_allocf;
slab = NULL; slab = NULL;
mem = NULL; mem = NULL;
if (keg->uk_flags & UMA_ZONE_OFFPAGE) { if (keg->uk_flags & UMA_ZFLAG_OFFPAGE) {
slab = zone_alloc_item(keg->uk_slabzone, NULL, domain, aflags); slab = zone_alloc_item(keg->uk_slabzone, NULL, domain, aflags);
if (slab == NULL) if (slab == NULL)
goto fail; goto fail;
@ -1321,23 +1321,23 @@ keg_alloc_slab(uma_keg_t keg, uma_zone_t zone, int domain, int flags,
size = keg->uk_ppera * PAGE_SIZE; size = keg->uk_ppera * PAGE_SIZE;
mem = allocf(zone, size, domain, &sflags, aflags); mem = allocf(zone, size, domain, &sflags, aflags);
if (mem == NULL) { if (mem == NULL) {
if (keg->uk_flags & UMA_ZONE_OFFPAGE) if (keg->uk_flags & UMA_ZFLAG_OFFPAGE)
zone_free_item(keg->uk_slabzone, slab, NULL, SKIP_NONE); zone_free_item(keg->uk_slabzone, slab, NULL, SKIP_NONE);
goto fail; goto fail;
} }
uma_total_inc(size); uma_total_inc(size);
/* For HASH zones all pages go to the same uma_domain. */ /* For HASH zones all pages go to the same uma_domain. */
if ((keg->uk_flags & UMA_ZONE_HASH) != 0) if ((keg->uk_flags & UMA_ZFLAG_HASH) != 0)
domain = 0; domain = 0;
/* Point the slab into the allocated memory */ /* Point the slab into the allocated memory */
if (!(keg->uk_flags & UMA_ZONE_OFFPAGE)) if (!(keg->uk_flags & UMA_ZFLAG_OFFPAGE))
slab = (uma_slab_t )(mem + keg->uk_pgoff); slab = (uma_slab_t )(mem + keg->uk_pgoff);
else else
((uma_hash_slab_t)slab)->uhs_data = mem; ((uma_hash_slab_t)slab)->uhs_data = mem;
if (keg->uk_flags & UMA_ZONE_VTOSLAB) if (keg->uk_flags & UMA_ZFLAG_VTOSLAB)
for (i = 0; i < keg->uk_ppera; i++) for (i = 0; i < keg->uk_ppera; i++)
vsetzoneslab((vm_offset_t)mem + (i * PAGE_SIZE), vsetzoneslab((vm_offset_t)mem + (i * PAGE_SIZE),
zone, slab); zone, slab);
@ -1366,7 +1366,7 @@ keg_alloc_slab(uma_keg_t keg, uma_zone_t zone, int domain, int flags,
CTR3(KTR_UMA, "keg_alloc_slab: allocated slab %p for %s(%p)", CTR3(KTR_UMA, "keg_alloc_slab: allocated slab %p for %s(%p)",
slab, keg->uk_name, keg); slab, keg->uk_name, keg);
if (keg->uk_flags & UMA_ZONE_HASH) if (keg->uk_flags & UMA_ZFLAG_HASH)
UMA_HASH_INSERT(&keg->uk_hash, slab, mem); UMA_HASH_INSERT(&keg->uk_hash, slab, mem);
/* /*
@ -1737,9 +1737,10 @@ keg_small_init(uma_keg_t keg)
* squeeze one more item in for very particular sizes if we were * squeeze one more item in for very particular sizes if we were
* to loop and reduce the bitsize if there is waste. * to loop and reduce the bitsize if there is waste.
*/ */
if (keg->uk_flags & UMA_ZONE_OFFPAGE) if (keg->uk_flags & (UMA_ZONE_NOTOUCH | UMA_ZONE_PCPU)) {
keg->uk_flags |= UMA_ZFLAG_OFFPAGE;
shsize = 0; shsize = 0;
else } else
shsize = slab_sizeof(slabsize / rsize); shsize = slab_sizeof(slabsize / rsize);
if (rsize <= slabsize - shsize) if (rsize <= slabsize - shsize)
@ -1765,8 +1766,12 @@ keg_small_init(uma_keg_t keg)
* of UMA_ZONE_VM, which clearly forbids it. * of UMA_ZONE_VM, which clearly forbids it.
*/ */
if ((keg->uk_flags & UMA_ZFLAG_INTERNAL) || if ((keg->uk_flags & UMA_ZFLAG_INTERNAL) ||
(keg->uk_flags & UMA_ZFLAG_CACHEONLY)) (keg->uk_flags & UMA_ZFLAG_CACHEONLY)) {
KASSERT((keg->uk_flags & UMA_ZFLAG_OFFPAGE) == 0,
("%s: incompatible flags 0x%b", __func__, keg->uk_flags,
PRINT_UMA_ZFLAGS));
return; return;
}
/* /*
* See if using an OFFPAGE slab will limit our waste. Only do * See if using an OFFPAGE slab will limit our waste. Only do
@ -1794,13 +1799,15 @@ keg_small_init(uma_keg_t keg)
* hash to find slabs. If the zone was explicitly created * hash to find slabs. If the zone was explicitly created
* OFFPAGE we can't necessarily touch the memory. * OFFPAGE we can't necessarily touch the memory.
*/ */
if ((keg->uk_flags & UMA_ZONE_OFFPAGE) == 0) keg->uk_flags |= UMA_ZFLAG_OFFPAGE;
keg->uk_flags |= UMA_ZONE_OFFPAGE | UMA_ZONE_VTOSLAB;
} }
if ((keg->uk_flags & UMA_ZONE_OFFPAGE) && if ((keg->uk_flags & UMA_ZFLAG_OFFPAGE) != 0) {
(keg->uk_flags & UMA_ZONE_VTOSLAB) == 0) if ((keg->uk_flags & UMA_ZONE_NOTPAGE) != 0)
keg->uk_flags |= UMA_ZONE_HASH; keg->uk_flags |= UMA_ZFLAG_HASH;
else
keg->uk_flags |= UMA_ZFLAG_VTOSLAB;
}
} }
/* /*
@ -1827,7 +1834,7 @@ keg_large_init(uma_keg_t keg)
keg->uk_rsize = keg->uk_size; keg->uk_rsize = keg->uk_size;
/* Check whether we have enough space to not do OFFPAGE. */ /* Check whether we have enough space to not do OFFPAGE. */
if ((keg->uk_flags & UMA_ZONE_OFFPAGE) == 0 && if ((keg->uk_flags & UMA_ZONE_NOTOUCH) == 0 &&
PAGE_SIZE * keg->uk_ppera - keg->uk_rsize < PAGE_SIZE * keg->uk_ppera - keg->uk_rsize <
slab_sizeof(SLAB_MIN_SETSIZE)) { slab_sizeof(SLAB_MIN_SETSIZE)) {
/* /*
@ -1836,14 +1843,17 @@ keg_large_init(uma_keg_t keg)
* slab header. * slab header.
*/ */
if ((keg->uk_flags & UMA_ZFLAG_INTERNAL) == 0) if ((keg->uk_flags & UMA_ZFLAG_INTERNAL) == 0)
keg->uk_flags |= UMA_ZONE_OFFPAGE | UMA_ZONE_VTOSLAB; keg->uk_flags |= UMA_ZFLAG_OFFPAGE;
else else
keg->uk_ppera++; keg->uk_ppera++;
} }
if ((keg->uk_flags & UMA_ZONE_OFFPAGE) && if ((keg->uk_flags & UMA_ZFLAG_OFFPAGE) != 0) {
(keg->uk_flags & UMA_ZONE_VTOSLAB) == 0) if ((keg->uk_flags & UMA_ZONE_NOTPAGE) != 0)
keg->uk_flags |= UMA_ZONE_HASH; keg->uk_flags |= UMA_ZFLAG_HASH;
else
keg->uk_flags |= UMA_ZFLAG_VTOSLAB;
}
} }
static void static void
@ -1875,7 +1885,7 @@ keg_cachespread_init(uma_keg_t keg)
keg->uk_rsize = rsize; keg->uk_rsize = rsize;
keg->uk_ppera = pages; keg->uk_ppera = pages;
keg->uk_ipers = ((pages * PAGE_SIZE) + trailer) / rsize; keg->uk_ipers = ((pages * PAGE_SIZE) + trailer) / rsize;
keg->uk_flags |= UMA_ZONE_OFFPAGE | UMA_ZONE_VTOSLAB; keg->uk_flags |= UMA_ZFLAG_OFFPAGE | UMA_ZFLAG_VTOSLAB;
KASSERT(keg->uk_ipers <= SLAB_MAX_SETSIZE, KASSERT(keg->uk_ipers <= SLAB_MAX_SETSIZE,
("%s: keg->uk_ipers too high(%d) increase max_ipers", __func__, ("%s: keg->uk_ipers too high(%d) increase max_ipers", __func__,
keg->uk_ipers)); keg->uk_ipers));
@ -1926,13 +1936,10 @@ keg_ctor(void *mem, int size, void *udata, int flags)
keg->uk_init = zero_init; keg->uk_init = zero_init;
if (arg->flags & UMA_ZONE_MALLOC) if (arg->flags & UMA_ZONE_MALLOC)
keg->uk_flags |= UMA_ZONE_VTOSLAB; keg->uk_flags |= UMA_ZFLAG_VTOSLAB;
if (arg->flags & UMA_ZONE_PCPU) #ifndef SMP
#ifdef SMP keg->uk_flags &= ~UMA_ZONE_PCPU;
keg->uk_flags |= UMA_ZONE_OFFPAGE;
#else
keg->uk_flags &= ~UMA_ZONE_PCPU;
#endif #endif
if (keg->uk_flags & UMA_ZONE_CACHESPREAD) { if (keg->uk_flags & UMA_ZONE_CACHESPREAD) {
@ -1953,13 +1960,13 @@ keg_ctor(void *mem, int size, void *udata, int flags)
*/ */
#ifdef NUMA #ifdef NUMA
if ((keg->uk_flags & if ((keg->uk_flags &
(UMA_ZONE_HASH | UMA_ZONE_VM | UMA_ZONE_ROUNDROBIN)) == 0) (UMA_ZFLAG_HASH | UMA_ZONE_VM | UMA_ZONE_ROUNDROBIN)) == 0)
keg->uk_flags |= UMA_ZONE_FIRSTTOUCH; keg->uk_flags |= UMA_ZONE_FIRSTTOUCH;
else if ((keg->uk_flags & UMA_ZONE_FIRSTTOUCH) == 0) else if ((keg->uk_flags & UMA_ZONE_FIRSTTOUCH) == 0)
keg->uk_flags |= UMA_ZONE_ROUNDROBIN; keg->uk_flags |= UMA_ZONE_ROUNDROBIN;
#endif #endif
if (keg->uk_flags & UMA_ZONE_OFFPAGE) if (keg->uk_flags & UMA_ZFLAG_OFFPAGE)
keg->uk_slabzone = slabzone; keg->uk_slabzone = slabzone;
/* /*
@ -1997,7 +2004,7 @@ keg_ctor(void *mem, int size, void *udata, int flags)
* figure out where in each page it goes. See slab_sizeof * figure out where in each page it goes. See slab_sizeof
* definition. * definition.
*/ */
if (!(keg->uk_flags & UMA_ZONE_OFFPAGE)) { if (!(keg->uk_flags & UMA_ZFLAG_OFFPAGE)) {
size_t shsize; size_t shsize;
shsize = slab_sizeof(keg->uk_ipers); shsize = slab_sizeof(keg->uk_ipers);
@ -2014,7 +2021,7 @@ keg_ctor(void *mem, int size, void *udata, int flags)
zone->uz_name, keg->uk_ipers, keg->uk_rsize, keg->uk_size)); zone->uz_name, keg->uk_ipers, keg->uk_rsize, keg->uk_size));
} }
if (keg->uk_flags & UMA_ZONE_HASH) if (keg->uk_flags & UMA_ZFLAG_HASH)
hash_alloc(&keg->uk_hash, 0); hash_alloc(&keg->uk_hash, 0);
CTR3(KTR_UMA, "keg_ctor %p zone %s(%p)\n", keg, zone->uz_name, zone); CTR3(KTR_UMA, "keg_ctor %p zone %s(%p)\n", keg, zone->uz_name, zone);
@ -2088,7 +2095,7 @@ zone_alloc_sysctl(uma_zone_t zone, void *unused)
/* /*
* keg if present. * keg if present.
*/ */
if ((zone->uz_flags & UMA_ZONE_HASH) == 0) if ((zone->uz_flags & UMA_ZFLAG_HASH) == 0)
domains = vm_ndomains; domains = vm_ndomains;
else else
domains = 1; domains = 1;
@ -2733,11 +2740,9 @@ uma_zcreate(const char *name, size_t size, uma_ctor ctor, uma_dtor dtor,
* or fini procedures, no dependency on the initial value of the * or fini procedures, no dependency on the initial value of the
* memory, and no (legitimate) use of the memory after free. Note, * memory, and no (legitimate) use of the memory after free. Note,
* the ctor and dtor do not need to be empty. * the ctor and dtor do not need to be empty.
*
* XXX UMA_ZONE_OFFPAGE.
*/ */
if ((!(flags & (UMA_ZONE_ZINIT | UMA_ZONE_NOFREE))) && if ((!(flags & (UMA_ZONE_ZINIT | UMA_ZONE_NOTOUCH |
uminit == NULL && fini == NULL) { UMA_ZONE_NOFREE))) && uminit == NULL && fini == NULL) {
args.uminit = trash_init; args.uminit = trash_init;
args.fini = trash_fini; args.fini = trash_fini;
} }
@ -3220,7 +3225,7 @@ keg_fetch_free_slab(uma_keg_t keg, int domain, bool rr, int flags)
uint32_t reserve; uint32_t reserve;
/* HASH has a single free list. */ /* HASH has a single free list. */
if ((keg->uk_flags & UMA_ZONE_HASH) != 0) if ((keg->uk_flags & UMA_ZFLAG_HASH) != 0)
domain = 0; domain = 0;
KEG_LOCK(keg, domain); KEG_LOCK(keg, domain);
@ -4022,15 +4027,15 @@ zone_release(void *arg, void **bucket, int cnt)
zone = arg; zone = arg;
keg = zone->uz_keg; keg = zone->uz_keg;
lock = NULL; lock = NULL;
if (__predict_false((zone->uz_flags & UMA_ZONE_HASH) != 0)) if (__predict_false((zone->uz_flags & UMA_ZFLAG_HASH) != 0))
lock = KEG_LOCK(keg, 0); lock = KEG_LOCK(keg, 0);
for (i = 0; i < cnt; i++) { for (i = 0; i < cnt; i++) {
item = bucket[i]; item = bucket[i];
if (__predict_true((zone->uz_flags & UMA_ZONE_VTOSLAB) != 0)) { if (__predict_true((zone->uz_flags & UMA_ZFLAG_VTOSLAB) != 0)) {
slab = vtoslab((vm_offset_t)item); slab = vtoslab((vm_offset_t)item);
} else { } else {
mem = (uint8_t *)((uintptr_t)item & (~UMA_SLAB_MASK)); mem = (uint8_t *)((uintptr_t)item & (~UMA_SLAB_MASK));
if ((zone->uz_flags & UMA_ZONE_HASH) != 0) if ((zone->uz_flags & UMA_ZFLAG_HASH) != 0)
slab = hash_sfind(&keg->uk_hash, mem); slab = hash_sfind(&keg->uk_hash, mem);
else else
slab = (uma_slab_t)(mem + keg->uk_pgoff); slab = (uma_slab_t)(mem + keg->uk_pgoff);
@ -4771,7 +4776,7 @@ sysctl_handle_uma_slab_efficiency(SYSCTL_HANDLER_ARGS)
int avail, effpct, total; int avail, effpct, total;
total = keg->uk_ppera * PAGE_SIZE; total = keg->uk_ppera * PAGE_SIZE;
if ((keg->uk_flags & UMA_ZONE_OFFPAGE) != 0) if ((keg->uk_flags & UMA_ZFLAG_OFFPAGE) != 0)
total += slab_sizeof(SLAB_MAX_SETSIZE); total += slab_sizeof(SLAB_MAX_SETSIZE);
/* /*
* We consider the client's requested size and alignment here, not the * We consider the client's requested size and alignment here, not the
@ -4811,10 +4816,10 @@ uma_dbg_getslab(uma_zone_t zone, void *item)
mem = (uint8_t *)((uintptr_t)item & (~UMA_SLAB_MASK)); mem = (uint8_t *)((uintptr_t)item & (~UMA_SLAB_MASK));
if ((zone->uz_flags & UMA_ZFLAG_CACHE) != 0) if ((zone->uz_flags & UMA_ZFLAG_CACHE) != 0)
return (NULL); return (NULL);
if (zone->uz_flags & UMA_ZONE_VTOSLAB) if (zone->uz_flags & UMA_ZFLAG_VTOSLAB)
return (vtoslab((vm_offset_t)mem)); return (vtoslab((vm_offset_t)mem));
keg = zone->uz_keg; keg = zone->uz_keg;
if ((keg->uk_flags & UMA_ZONE_HASH) == 0) if ((keg->uk_flags & UMA_ZFLAG_HASH) == 0)
return ((uma_slab_t)(mem + keg->uk_pgoff)); return ((uma_slab_t)(mem + keg->uk_pgoff));
KEG_LOCK(keg, 0); KEG_LOCK(keg, 0);
slab = hash_sfind(&keg->uk_hash, mem); slab = hash_sfind(&keg->uk_hash, mem);

View File

@ -139,6 +139,64 @@
/* Max waste percentage before going to off page slab management */ /* Max waste percentage before going to off page slab management */
#define UMA_MAX_WASTE 10 #define UMA_MAX_WASTE 10
/*
* These flags must not overlap with the UMA_ZONE flags specified in uma.h.
*/
#define UMA_ZFLAG_OFFPAGE 0x00200000 /*
* Force the slab structure
* allocation off of the real
* memory.
*/
#define UMA_ZFLAG_HASH 0x00400000 /*
* Use a hash table instead of
* caching information in the
* vm_page.
*/
#define UMA_ZFLAG_VTOSLAB 0x00800000 /*
* Zone uses vtoslab for
* lookup.
*/
#define UMA_ZFLAG_CTORDTOR 0x01000000 /* Zone has ctor/dtor set. */
#define UMA_ZFLAG_LIMIT 0x02000000 /* Zone has limit set. */
#define UMA_ZFLAG_CACHE 0x04000000 /* uma_zcache_create()d it */
#define UMA_ZFLAG_RECLAIMING 0x08000000 /* Running zone_reclaim(). */
#define UMA_ZFLAG_BUCKET 0x10000000 /* Bucket zone. */
#define UMA_ZFLAG_INTERNAL 0x20000000 /* No offpage no PCPU. */
#define UMA_ZFLAG_TRASH 0x40000000 /* Add trash ctor/dtor. */
#define UMA_ZFLAG_CACHEONLY 0x80000000 /* Don't ask VM for buckets. */
#define UMA_ZFLAG_INHERIT \
(UMA_ZFLAG_OFFPAGE | UMA_ZFLAG_HASH | UMA_ZFLAG_VTOSLAB | \
UMA_ZFLAG_BUCKET | UMA_ZFLAG_INTERNAL | UMA_ZFLAG_CACHEONLY)
#define PRINT_UMA_ZFLAGS "\20" \
"\40CACHEONLY" \
"\37TRASH" \
"\36INTERNAL" \
"\35BUCKET" \
"\34RECLAIMING" \
"\33CACHE" \
"\32LIMIT" \
"\31CTORDTOR" \
"\30VTOSLAB" \
"\27HASH" \
"\26OFFPAGE" \
"\22ROUNDROBIN" \
"\21FIRSTTOUCH" \
"\20PCPU" \
"\17NODUMP" \
"\16CACHESPREAD" \
"\15MINBUCKET" \
"\14MAXBUCKET" \
"\13NOBUCKET" \
"\12SECONDARY" \
"\11NOTPAGE" \
"\10VM" \
"\7MTXCLASS" \
"\6NOFREE" \
"\5MALLOC" \
"\4NOTOUCH" \
"\2ZINIT"
/* /*
* Hash table for freed address -> slab translation. * Hash table for freed address -> slab translation.
@ -373,7 +431,7 @@ static inline void *
slab_data(uma_slab_t slab, uma_keg_t keg) slab_data(uma_slab_t slab, uma_keg_t keg)
{ {
if ((keg->uk_flags & UMA_ZONE_OFFPAGE) == 0) if ((keg->uk_flags & UMA_ZFLAG_OFFPAGE) == 0)
return ((void *)((uintptr_t)slab - keg->uk_pgoff)); return ((void *)((uintptr_t)slab - keg->uk_pgoff));
else else
return (((uma_hash_slab_t)slab)->uhs_data); return (((uma_hash_slab_t)slab)->uhs_data);
@ -476,50 +534,6 @@ struct uma_zone {
/* uz_domain follows here. */ /* uz_domain follows here. */
}; };
/*
* These flags must not overlap with the UMA_ZONE flags specified in uma.h.
*/
#define UMA_ZFLAG_CTORDTOR 0x01000000 /* Zone has ctor/dtor set. */
#define UMA_ZFLAG_LIMIT 0x02000000 /* Zone has limit set. */
#define UMA_ZFLAG_CACHE 0x04000000 /* uma_zcache_create()d it */
#define UMA_ZFLAG_RECLAIMING 0x08000000 /* Running zone_reclaim(). */
#define UMA_ZFLAG_BUCKET 0x10000000 /* Bucket zone. */
#define UMA_ZFLAG_INTERNAL 0x20000000 /* No offpage no PCPU. */
#define UMA_ZFLAG_TRASH 0x40000000 /* Add trash ctor/dtor. */
#define UMA_ZFLAG_CACHEONLY 0x80000000 /* Don't ask VM for buckets. */
#define UMA_ZFLAG_INHERIT \
(UMA_ZFLAG_INTERNAL | UMA_ZFLAG_CACHEONLY | UMA_ZFLAG_BUCKET)
#define PRINT_UMA_ZFLAGS "\20" \
"\40CACHEONLY" \
"\37TRASH" \
"\36INTERNAL" \
"\35BUCKET" \
"\34RECLAIMING" \
"\33CACHE" \
"\32LIMIT" \
"\31CTORDTOR" \
"\23ROUNDROBIN" \
"\22FIRSTTOUCH" \
"\21MINBUCKET" \
"\20PCPU" \
"\17NODUMP" \
"\16VTOSLAB" \
"\15CACHESPREAD" \
"\14MAXBUCKET" \
"\13NOBUCKET" \
"\12SECONDARY" \
"\11HASH" \
"\10VM" \
"\7MTXCLASS" \
"\6NOFREE" \
"\5MALLOC" \
"\4OFFPAGE" \
"\3STATIC" \
"\2ZINIT" \
"\1PAGEABLE"
/* /*
* Macros for interpreting the uz_items field. 20 bits of sleeper count * Macros for interpreting the uz_items field. 20 bits of sleeper count
* and 44 bit of item count. * and 44 bit of item count.