- Add a per-zone lock for zones without kegs.

- Be more explicit about zone vs keg locking.  This functionally changes
   almost nothing.
 - Add a size parameter to uma_zcache_create() so we can size the buckets.
 - Pass the zone to bucket_alloc() so it can modify allocation flags
   as appropriate.
 - Fix a bug in zone_alloc_bucket() where I missed an address of operator
   in a failure case.  (Found by pho)

Sponsored by:	EMC / Isilon Storage Division
This commit is contained in:
Jeff Roberson 2013-06-20 19:08:12 +00:00
parent 707d0cef66
commit af5263743c
4 changed files with 104 additions and 105 deletions

View File

@ -234,7 +234,7 @@ int uma_zsecond_add(uma_zone_t zone, uma_zone_t master);
* zones. The 'arg' parameter is passed to import/release and is caller
* specific.
*/
uma_zone_t uma_zcache_create(char *name, uma_ctor ctor, uma_dtor dtor,
uma_zone_t uma_zcache_create(char *name, int size, uma_ctor ctor, uma_dtor dtor,
uma_init zinit, uma_fini zfini, uma_import zimport,
uma_release zrelease, void *arg, int flags);

View File

@ -246,8 +246,8 @@ static void *zone_alloc_item(uma_zone_t, void *, int);
static void zone_free_item(uma_zone_t, void *, void *, enum zfreeskip);
static void bucket_enable(void);
static void bucket_init(void);
static uma_bucket_t bucket_alloc(int, int);
static void bucket_free(uma_bucket_t);
static uma_bucket_t bucket_alloc(uma_zone_t zone, int);
static void bucket_free(uma_zone_t zone, uma_bucket_t);
static void bucket_zone_drain(void);
static uma_bucket_t zone_alloc_bucket(uma_zone_t zone, int flags);
static uma_slab_t zone_fetch_slab(uma_zone_t zone, uma_keg_t last, int flags);
@ -256,8 +256,6 @@ static void *slab_alloc_item(uma_keg_t keg, uma_slab_t slab);
static void slab_free_item(uma_keg_t keg, uma_slab_t slab, void *item);
static uma_keg_t uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit,
uma_fini fini, int align, uint32_t flags);
static inline void zone_relock(uma_zone_t zone, uma_keg_t keg);
static inline void keg_relock(uma_keg_t keg, uma_zone_t zone);
static int zone_import(uma_zone_t zone, void **bucket, int max, int flags);
static void zone_release(uma_zone_t zone, void **bucket, int cnt);
@ -352,7 +350,7 @@ bucket_select(int size)
}
static uma_bucket_t
bucket_alloc(int entries, int bflags)
bucket_alloc(uma_zone_t zone, int flags)
{
struct uma_bucket_zone *ubz;
uma_bucket_t bucket;
@ -366,8 +364,10 @@ bucket_alloc(int entries, int bflags)
if (bucketdisable)
return (NULL);
ubz = bucket_zone_lookup(entries);
bucket = uma_zalloc(ubz->ubz_zone, bflags);
if (zone->uz_flags & UMA_ZFLAG_CACHEONLY)
flags |= M_NOVM;
ubz = bucket_zone_lookup(zone->uz_count);
bucket = uma_zalloc(ubz->ubz_zone, flags);
if (bucket) {
#ifdef INVARIANTS
bzero(bucket->ub_bucket, sizeof(void *) * ubz->ubz_entries);
@ -380,7 +380,7 @@ bucket_alloc(int entries, int bflags)
}
static void
bucket_free(uma_bucket_t bucket)
bucket_free(uma_zone_t zone, uma_bucket_t bucket)
{
struct uma_bucket_zone *ubz;
@ -662,9 +662,9 @@ cache_drain(uma_zone_t zone)
bucket_drain(zone, cache->uc_allocbucket);
bucket_drain(zone, cache->uc_freebucket);
if (cache->uc_allocbucket != NULL)
bucket_free(cache->uc_allocbucket);
bucket_free(zone, cache->uc_allocbucket);
if (cache->uc_freebucket != NULL)
bucket_free(cache->uc_freebucket);
bucket_free(zone, cache->uc_freebucket);
cache->uc_allocbucket = cache->uc_freebucket = NULL;
}
ZONE_LOCK(zone);
@ -688,7 +688,7 @@ bucket_cache_drain(uma_zone_t zone)
LIST_REMOVE(bucket, ub_link);
ZONE_UNLOCK(zone);
bucket_drain(zone, bucket);
bucket_free(bucket);
bucket_free(zone, bucket);
ZONE_LOCK(zone);
}
}
@ -801,7 +801,7 @@ zone_drain_wait(uma_zone_t zone, int waitok)
if (waitok == M_NOWAIT)
goto out;
mtx_unlock(&uma_mtx);
msleep(zone, zone->uz_lock, PVM, "zonedrain", 1);
msleep(zone, zone->uz_lockptr, PVM, "zonedrain", 1);
mtx_lock(&uma_mtx);
}
zone->uz_flags |= UMA_ZFLAG_DRAINING;
@ -1375,12 +1375,9 @@ keg_ctor(void *mem, int size, void *udata, int flags)
keg->uk_allocf = startup_alloc;
/*
* Initialize keg's lock (shared among zones).
* Initialize keg's lock
*/
if (arg->flags & UMA_ZONE_MTXCLASS)
KEG_LOCK_INIT(keg, 1);
else
KEG_LOCK_INIT(keg, 0);
KEG_LOCK_INIT(keg, (arg->flags & UMA_ZONE_MTXCLASS));
/*
* If we're putting the slab header in the actual page we need to
@ -1469,15 +1466,18 @@ zone_ctor(void *mem, int size, void *udata, int flags)
timevalclear(&zone->uz_ratecheck);
keg = arg->keg;
ZONE_LOCK_INIT(zone, (arg->flags & UMA_ZONE_MTXCLASS));
/*
* This is a pure cache zone, no kegs.
*/
if (arg->import) {
zone->uz_size = arg->size;
zone->uz_import = arg->import;
zone->uz_release = arg->release;
zone->uz_arg = arg->arg;
zone->uz_count = BUCKET_MAX;
return (0);
zone->uz_lockptr = &zone->uz_lock;
goto out;
}
/*
@ -1491,7 +1491,7 @@ zone_ctor(void *mem, int size, void *udata, int flags)
KASSERT(arg->keg != NULL, ("Secondary zone on zero'd keg"));
zone->uz_init = arg->uminit;
zone->uz_fini = arg->fini;
zone->uz_lock = &keg->uk_lock;
zone->uz_lockptr = &keg->uk_lock;
zone->uz_flags |= UMA_ZONE_SECONDARY;
mtx_lock(&uma_mtx);
ZONE_LOCK(zone);
@ -1529,7 +1529,7 @@ zone_ctor(void *mem, int size, void *udata, int flags)
*/
zone->uz_klink.kl_keg = keg;
LIST_INSERT_HEAD(&zone->uz_kegs, &zone->uz_klink, kl_link);
zone->uz_lock = &keg->uk_lock;
zone->uz_lockptr = &keg->uk_lock;
zone->uz_size = keg->uk_size;
zone->uz_flags |= (keg->uk_flags &
(UMA_ZONE_INHERIT | UMA_ZFLAG_INHERIT));
@ -1544,8 +1544,9 @@ zone_ctor(void *mem, int size, void *udata, int flags)
return (0);
}
if ((keg->uk_flags & UMA_ZONE_MAXBUCKET) == 0)
zone->uz_count = bucket_select(keg->uk_rsize);
out:
if ((arg->flags & UMA_ZONE_MAXBUCKET) == 0)
zone->uz_count = bucket_select(zone->uz_size);
else
zone->uz_count = BUCKET_MAX;
@ -1626,6 +1627,7 @@ zone_dtor(void *arg, int size, void *udata)
mtx_unlock(&uma_mtx);
zone_free_item(kegs, keg, NULL, SKIP_NONE);
}
ZONE_LOCK_FINI(zone);
}
/*
@ -1849,15 +1851,15 @@ uma_zsecond_create(char *name, uma_ctor ctor, uma_dtor dtor,
/* See uma.h */
uma_zone_t
uma_zcache_create(char *name, uma_ctor ctor, uma_dtor dtor, uma_init zinit,
uma_fini zfini, uma_import zimport, uma_release zrelease,
void *arg, int flags)
uma_zcache_create(char *name, int size, uma_ctor ctor, uma_dtor dtor,
uma_init zinit, uma_fini zfini, uma_import zimport,
uma_release zrelease, void *arg, int flags)
{
struct uma_zctor_args args;
memset(&args, 0, sizeof(args));
args.name = name;
args.size = 0;
args.size = size;
args.ctor = ctor;
args.dtor = dtor;
args.uminit = zinit;
@ -1876,10 +1878,10 @@ zone_lock_pair(uma_zone_t a, uma_zone_t b)
{
if (a < b) {
ZONE_LOCK(a);
mtx_lock_flags(b->uz_lock, MTX_DUPOK);
mtx_lock_flags(b->uz_lockptr, MTX_DUPOK);
} else {
ZONE_LOCK(b);
mtx_lock_flags(a->uz_lock, MTX_DUPOK);
mtx_lock_flags(a->uz_lockptr, MTX_DUPOK);
}
}
@ -2071,7 +2073,7 @@ uma_zalloc_arg(uma_zone_t zone, void *udata, int flags)
cache->uc_allocbucket = NULL;
critical_exit();
if (bucket != NULL)
bucket_free(bucket);
bucket_free(zone, bucket);
/* Short-circuit for zones without buckets and low memory. */
if (zone->uz_count == 0 || bucketdisable)
@ -2241,31 +2243,15 @@ keg_fetch_slab(uma_keg_t keg, uma_zone_t zone, int flags)
return (slab);
}
static inline void
zone_relock(uma_zone_t zone, uma_keg_t keg)
{
if (zone->uz_lock != &keg->uk_lock) {
KEG_UNLOCK(keg);
ZONE_LOCK(zone);
}
}
static inline void
keg_relock(uma_keg_t keg, uma_zone_t zone)
{
if (zone->uz_lock != &keg->uk_lock) {
ZONE_UNLOCK(zone);
KEG_LOCK(keg);
}
}
static uma_slab_t
zone_fetch_slab(uma_zone_t zone, uma_keg_t keg, int flags)
{
uma_slab_t slab;
if (keg == NULL)
if (keg == NULL) {
keg = zone_first_keg(zone);
KEG_LOCK(keg);
}
for (;;) {
slab = keg_fetch_slab(keg, zone, flags);
@ -2274,13 +2260,13 @@ zone_fetch_slab(uma_zone_t zone, uma_keg_t keg, int flags)
if (flags & (M_NOWAIT | M_NOVM))
break;
}
KEG_UNLOCK(keg);
return (NULL);
}
/*
* uma_zone_fetch_slab_multi: Fetches a slab from one available keg. Returns
* with the keg locked. Caller must call zone_relock() afterwards if the
* zone lock is required. On NULL the zone lock is held.
* with the keg locked. On NULL no lock is held.
*
* The last pointer is used to seed the search. It is not required.
*/
@ -2304,12 +2290,11 @@ zone_fetch_slab_multi(uma_zone_t zone, uma_keg_t last, int rflags)
* Use the last slab allocated as a hint for where to start
* the search.
*/
if (last) {
if (last != NULL) {
slab = keg_fetch_slab(last, zone, flags);
if (slab)
return (slab);
zone_relock(zone, last);
last = NULL;
KEG_UNLOCK(last);
}
/*
* Loop until we have a slab incase of transient failures
@ -2325,7 +2310,7 @@ zone_fetch_slab_multi(uma_zone_t zone, uma_keg_t last, int rflags)
*/
LIST_FOREACH(klink, &zone->uz_kegs, kl_link) {
keg = klink->kl_keg;
keg_relock(keg, zone);
KEG_LOCK(keg);
if ((keg->uk_flags & UMA_ZFLAG_FULL) == 0) {
slab = keg_fetch_slab(keg, zone, flags);
if (slab)
@ -2335,7 +2320,7 @@ zone_fetch_slab_multi(uma_zone_t zone, uma_keg_t last, int rflags)
full++;
else
empty++;
zone_relock(zone, keg);
KEG_UNLOCK(keg);
}
if (rflags & (M_NOWAIT | M_NOVM))
break;
@ -2345,11 +2330,14 @@ zone_fetch_slab_multi(uma_zone_t zone, uma_keg_t last, int rflags)
* and sleep so just sleep for a short period and retry.
*/
if (full && !empty) {
ZONE_LOCK(zone);
zone->uz_flags |= UMA_ZFLAG_FULL;
zone->uz_sleeps++;
zone_log_warning(zone);
msleep(zone, zone->uz_lock, PVM, "zonelimit", hz/100);
msleep(zone, zone->uz_lockptr, PVM,
"zonelimit", hz/100);
zone->uz_flags &= ~UMA_ZFLAG_FULL;
ZONE_UNLOCK(zone);
continue;
}
}
@ -2387,15 +2375,14 @@ zone_import(uma_zone_t zone, void **bucket, int max, int flags)
uma_keg_t keg;
int i;
ZONE_LOCK(zone);
/* Try to keep the buckets totally full */
slab = NULL;
keg = NULL;
/* Try to keep the buckets totally full */
for (i = 0; i < max; ) {
if ((slab = zone->uz_slab(zone, keg, flags)) == NULL)
break;
keg = slab->us_keg;
while (slab->us_freecount && i < max)
while (slab->us_freecount && i < max)
bucket[i++] = slab_alloc_item(keg, slab);
/* Don't block on the next fill */
@ -2404,8 +2391,6 @@ zone_import(uma_zone_t zone, void **bucket, int max, int flags)
}
if (slab != NULL)
KEG_UNLOCK(keg);
else
ZONE_UNLOCK(zone);
return i;
}
@ -2414,18 +2399,13 @@ static uma_bucket_t
zone_alloc_bucket(uma_zone_t zone, int flags)
{
uma_bucket_t bucket;
int bflags;
int max;
max = zone->uz_count;
bflags = (flags & ~M_WAITOK) | M_NOWAIT;
if (zone->uz_flags & UMA_ZFLAG_CACHEONLY)
bflags |= M_NOVM;
bucket = bucket_alloc(zone->uz_count, bflags);
bucket = bucket_alloc(zone, M_NOWAIT | (flags & M_NOVM));
if (bucket == NULL)
goto out;
max = MIN(bucket->ub_entries, max);
max = MIN(bucket->ub_entries, zone->uz_count);
bucket->ub_cnt = zone->uz_import(zone->uz_arg, bucket->ub_bucket,
max, flags);
@ -2444,7 +2424,7 @@ zone_alloc_bucket(uma_zone_t zone, int flags)
* rest back onto the freelist.
*/
if (i != bucket->ub_cnt) {
zone->uz_release(zone->uz_arg, bucket->ub_bucket[i],
zone->uz_release(zone->uz_arg, &bucket->ub_bucket[i],
bucket->ub_cnt - i);
#ifdef INVARIANTS
bzero(&bucket->ub_bucket[i],
@ -2457,7 +2437,7 @@ zone_alloc_bucket(uma_zone_t zone, int flags)
out:
if (bucket == NULL || bucket->ub_cnt == 0) {
if (bucket != NULL)
bucket_free(bucket);
bucket_free(zone, bucket);
atomic_add_long(&zone->uz_fails, 1);
return (NULL);
}
@ -2529,7 +2509,6 @@ uma_zfree_arg(uma_zone_t zone, void *item, void *udata)
{
uma_cache_t cache;
uma_bucket_t bucket;
int bflags;
int cpu;
#ifdef UMA_DEBUG_ALLOC_1
@ -2654,10 +2633,7 @@ uma_zfree_arg(uma_zone_t zone, void *item, void *udata)
#ifdef UMA_DEBUG_ALLOC
printf("uma_zfree: Allocating new free bucket.\n");
#endif
bflags = M_NOWAIT;
if (zone->uz_flags & UMA_ZFLAG_CACHEONLY)
bflags |= M_NOVM;
bucket = bucket_alloc(zone->uz_count, bflags);
bucket = bucket_alloc(zone, M_NOWAIT);
if (bucket) {
critical_enter();
cpu = curcpu;
@ -2671,7 +2647,7 @@ uma_zfree_arg(uma_zone_t zone, void *item, void *udata)
* critical section to free the bucket.
*/
critical_exit();
bucket_free(bucket);
bucket_free(zone, bucket);
goto zfree_restart;
}
@ -2721,8 +2697,8 @@ zone_release(uma_zone_t zone, void **bucket, int cnt)
int i;
clearfull = 0;
ZONE_LOCK(zone);
keg = zone_first_keg(zone);
KEG_LOCK(keg);
for (i = 0; i < cnt; i++) {
item = bucket[i];
if (!(zone->uz_flags & UMA_ZONE_VTOSLAB)) {
@ -2758,12 +2734,13 @@ zone_release(uma_zone_t zone, void **bucket, int cnt)
wakeup(keg);
}
}
zone_relock(zone, keg);
KEG_UNLOCK(keg);
if (clearfull) {
ZONE_LOCK(zone);
zone->uz_flags &= ~UMA_ZFLAG_FULL;
wakeup(zone);
ZONE_UNLOCK(zone);
}
ZONE_UNLOCK(zone);
}
@ -2807,12 +2784,12 @@ uma_zone_set_max(uma_zone_t zone, int nitems)
keg = zone_first_keg(zone);
if (keg == NULL)
return (0);
ZONE_LOCK(zone);
KEG_LOCK(keg);
keg->uk_maxpages = (nitems / keg->uk_ipers) * keg->uk_ppera;
if (keg->uk_maxpages * keg->uk_ipers < nitems)
keg->uk_maxpages += keg->uk_ppera;
nitems = keg->uk_maxpages * keg->uk_ipers;
ZONE_UNLOCK(zone);
KEG_UNLOCK(keg);
return (nitems);
}
@ -2827,9 +2804,9 @@ uma_zone_get_max(uma_zone_t zone)
keg = zone_first_keg(zone);
if (keg == NULL)
return (0);
ZONE_LOCK(zone);
KEG_LOCK(keg);
nitems = keg->uk_maxpages * keg->uk_ipers;
ZONE_UNLOCK(zone);
KEG_UNLOCK(keg);
return (nitems);
}
@ -2873,13 +2850,13 @@ uma_zone_set_init(uma_zone_t zone, uma_init uminit)
{
uma_keg_t keg;
ZONE_LOCK(zone);
keg = zone_first_keg(zone);
KASSERT(keg != NULL, ("uma_zone_set_init: Invalid zone type"));
KEG_LOCK(keg);
KASSERT(keg->uk_pages == 0,
("uma_zone_set_init on non-empty keg"));
keg->uk_init = uminit;
ZONE_UNLOCK(zone);
KEG_UNLOCK(keg);
}
/* See uma.h */
@ -2888,19 +2865,20 @@ uma_zone_set_fini(uma_zone_t zone, uma_fini fini)
{
uma_keg_t keg;
ZONE_LOCK(zone);
keg = zone_first_keg(zone);
KASSERT(keg != NULL, ("uma_zone_set_init: Invalid zone type"));
KEG_LOCK(keg);
KASSERT(keg->uk_pages == 0,
("uma_zone_set_fini on non-empty keg"));
keg->uk_fini = fini;
ZONE_UNLOCK(zone);
KEG_UNLOCK(keg);
}
/* See uma.h */
void
uma_zone_set_zinit(uma_zone_t zone, uma_init zinit)
{
ZONE_LOCK(zone);
KASSERT(zone_first_keg(zone)->uk_pages == 0,
("uma_zone_set_zinit on non-empty keg"));
@ -2912,6 +2890,7 @@ uma_zone_set_zinit(uma_zone_t zone, uma_init zinit)
void
uma_zone_set_zfini(uma_zone_t zone, uma_fini zfini)
{
ZONE_LOCK(zone);
KASSERT(zone_first_keg(zone)->uk_pages == 0,
("uma_zone_set_zfini on non-empty keg"));
@ -2926,11 +2905,11 @@ uma_zone_set_freef(uma_zone_t zone, uma_free freef)
{
uma_keg_t keg;
ZONE_LOCK(zone);
keg = zone_first_keg(zone);
KASSERT(keg != NULL, ("uma_zone_set_init: Invalid zone type"));
KEG_LOCK(keg);
keg->uk_freef = freef;
ZONE_UNLOCK(zone);
KEG_UNLOCK(keg);
}
/* See uma.h */
@ -2940,11 +2919,11 @@ uma_zone_set_allocf(uma_zone_t zone, uma_alloc allocf)
{
uma_keg_t keg;
ZONE_LOCK(zone);
keg = zone_first_keg(zone);
KEG_LOCK(keg);
keg->uk_flags |= UMA_ZFLAG_PRIVALLOC;
keg->uk_allocf = allocf;
ZONE_UNLOCK(zone);
KEG_UNLOCK(keg);
}
/* See uma.h */
@ -2973,7 +2952,7 @@ uma_zone_reserve_kva(uma_zone_t zone, int count)
return (0);
} else
kva = 0;
ZONE_LOCK(zone);
KEG_LOCK(keg);
keg->uk_kva = kva;
keg->uk_offset = 0;
keg->uk_maxpages = pages;
@ -2983,7 +2962,8 @@ uma_zone_reserve_kva(uma_zone_t zone, int count)
keg->uk_allocf = noobj_alloc;
#endif
keg->uk_flags |= UMA_ZONE_NOFREE | UMA_ZFLAG_PRIVALLOC;
ZONE_UNLOCK(zone);
KEG_UNLOCK(keg);
return (1);
}
@ -2998,7 +2978,7 @@ uma_prealloc(uma_zone_t zone, int items)
keg = zone_first_keg(zone);
if (keg == NULL)
return;
ZONE_LOCK(zone);
KEG_LOCK(keg);
slabs = items / keg->uk_ipers;
if (slabs * keg->uk_ipers < items)
slabs++;
@ -3010,7 +2990,7 @@ uma_prealloc(uma_zone_t zone, int items)
LIST_INSERT_HEAD(&keg->uk_free_slab, slab, us_link);
slabs--;
}
ZONE_UNLOCK(zone);
KEG_UNLOCK(keg);
}
/* See uma.h */

View File

@ -231,6 +231,8 @@ uma_dbg_alloc(uma_zone_t zone, uma_slab_t slab, void *item)
uma_keg_t keg;
int freei;
if (zone_first_keg(zone) == NULL)
return;
if (slab == NULL) {
slab = uma_dbg_getslab(zone, item);
if (slab == NULL)
@ -259,6 +261,8 @@ uma_dbg_free(uma_zone_t zone, uma_slab_t slab, void *item)
uma_keg_t keg;
int freei;
if (zone_first_keg(zone) == NULL)
return;
if (slab == NULL) {
slab = uma_dbg_getslab(zone, item);
if (slab == NULL)

View File

@ -189,7 +189,7 @@ typedef struct uma_cache * uma_cache_t;
*
*/
struct uma_keg {
struct mtx uk_lock; /* Lock for the keg */
struct mtx_padalign uk_lock; /* Lock for the keg */
struct uma_hash uk_hash;
LIST_HEAD(,uma_zone) uk_zones; /* Keg's zones */
@ -281,8 +281,9 @@ typedef struct uma_klink *uma_klink_t;
*
*/
struct uma_zone {
const char *uz_name; /* Text name of the zone */
struct mtx *uz_lock; /* Lock for the zone (keg's lock) */
struct mtx_padalign uz_lock; /* Lock for the zone */
struct mtx_padalign *uz_lockptr;
const char *uz_name; /* Text name of the zone */
LIST_ENTRY(uma_zone) uz_link; /* List of all zones in keg */
LIST_HEAD(,uma_bucket) uz_buckets; /* full buckets */
@ -334,8 +335,10 @@ struct uma_zone {
static inline uma_keg_t
zone_first_keg(uma_zone_t zone)
{
uma_klink_t klink;
return (LIST_FIRST(&zone->uz_kegs)->kl_keg);
klink = LIST_FIRST(&zone->uz_kegs);
return (klink != NULL) ? klink->kl_keg : NULL;
}
#undef UMA_ALIGN
@ -357,13 +360,25 @@ void uma_large_free(uma_slab_t slab);
mtx_init(&(k)->uk_lock, (k)->uk_name, \
"UMA zone", MTX_DEF | MTX_DUPOK); \
} while (0)
#define KEG_LOCK_FINI(k) mtx_destroy(&(k)->uk_lock)
#define KEG_LOCK(k) mtx_lock(&(k)->uk_lock)
#define KEG_UNLOCK(k) mtx_unlock(&(k)->uk_lock)
#define ZONE_LOCK(z) mtx_lock((z)->uz_lock)
#define ZONE_TRYLOCK(z) mtx_trylock((z)->uz_lock)
#define ZONE_UNLOCK(z) mtx_unlock((z)->uz_lock)
#define ZONE_LOCK_INIT(z, lc) \
do { \
if ((lc)) \
mtx_init(&(z)->uz_lock, (z)->uz_name, \
(z)->uz_name, MTX_DEF | MTX_DUPOK); \
else \
mtx_init(&(z)->uz_lock, (z)->uz_name, \
"UMA zone", MTX_DEF | MTX_DUPOK); \
} while (0)
#define ZONE_LOCK(z) mtx_lock((z)->uz_lockptr)
#define ZONE_TRYLOCK(z) mtx_trylock((z)->uz_lockptr)
#define ZONE_UNLOCK(z) mtx_unlock((z)->uz_lockptr)
#define ZONE_LOCK_FINI(z) mtx_destroy(&(z)->uz_lock)
/*
* Find a slab within a hash table. This is used for OFFPAGE zones to lookup