Use a variant slab structure for offpage zones. This saves space in

embedded slabs but also is an opportunity to tidy up code and add
accessor inlines.

Reviewed by:	markj, rlibby
Differential Revision:	https://reviews.freebsd.org/D22609
This commit is contained in:
Jeff Roberson 2019-12-08 01:15:06 +00:00
parent 8e1906f700
commit 1e0701e1e5
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=355510
2 changed files with 87 additions and 57 deletions

View File

@ -664,8 +664,7 @@ hash_alloc(struct uma_hash *hash, u_int size)
if (size > UMA_HASH_SIZE_INIT) {
hash->uh_hashsize = size;
alloc = sizeof(hash->uh_slab_hash[0]) * hash->uh_hashsize;
hash->uh_slab_hash = (struct slabhead *)malloc(alloc,
M_UMAHASH, M_NOWAIT);
hash->uh_slab_hash = malloc(alloc, M_UMAHASH, M_NOWAIT);
} else {
alloc = sizeof(hash->uh_slab_hash[0]) * UMA_HASH_SIZE_INIT;
hash->uh_slab_hash = zone_alloc_item(hashzone, NULL,
@ -698,7 +697,7 @@ hash_alloc(struct uma_hash *hash, u_int size)
static int
hash_expand(struct uma_hash *oldhash, struct uma_hash *newhash)
{
uma_slab_t slab;
uma_hash_slab_t slab;
u_int hval;
u_int idx;
@ -714,12 +713,12 @@ hash_expand(struct uma_hash *oldhash, struct uma_hash *newhash)
*/
for (idx = 0; idx < oldhash->uh_hashsize; idx++)
while (!SLIST_EMPTY(&oldhash->uh_slab_hash[idx])) {
slab = SLIST_FIRST(&oldhash->uh_slab_hash[idx]);
SLIST_REMOVE_HEAD(&oldhash->uh_slab_hash[idx], us_hlink);
hval = UMA_HASH(newhash, slab->us_data);
SLIST_INSERT_HEAD(&newhash->uh_slab_hash[hval],
slab, us_hlink);
while (!LIST_EMPTY(&oldhash->uh_slab_hash[idx])) {
slab = LIST_FIRST(&oldhash->uh_slab_hash[idx]);
LIST_REMOVE(slab, uhs_hlink);
hval = UMA_HASH(newhash, slab->uhs_data);
LIST_INSERT_HEAD(&newhash->uh_slab_hash[hval],
slab, uhs_hlink);
}
return (1);
@ -992,7 +991,7 @@ keg_free_slab(uma_keg_t keg, uma_slab_t slab, int start)
CTR4(KTR_UMA, "keg_free_slab keg %s(%p) slab %p, returning %d bytes",
keg->uk_name, keg, slab, PAGE_SIZE * keg->uk_ppera);
mem = slab->us_data;
mem = slab_data(slab, keg);
flags = slab->us_flags;
i = start;
if (keg->uk_fini != NULL) {
@ -1006,11 +1005,10 @@ keg_free_slab(uma_keg_t keg, uma_slab_t slab, int start)
* albeit we don't make skip check for other init/fini
* invocations.
*/
if (!uma_dbg_kskip(keg, slab->us_data + (keg->uk_rsize * i)) ||
if (!uma_dbg_kskip(keg, slab_item(slab, keg, i)) ||
keg->uk_fini != trash_fini)
#endif
keg->uk_fini(slab->us_data + (keg->uk_rsize * i),
keg->uk_size);
keg->uk_fini(slab_item(slab, keg, i), keg->uk_size);
}
if (keg->uk_flags & UMA_ZONE_OFFPAGE)
zone_free_item(keg->uk_slabzone, slab, NULL, SKIP_NONE);
@ -1057,18 +1055,17 @@ keg_drain(uma_keg_t keg)
keg->uk_free -= keg->uk_ipers;
if (keg->uk_flags & UMA_ZONE_HASH)
UMA_HASH_REMOVE(&keg->uk_hash, slab,
slab->us_data);
UMA_HASH_REMOVE(&keg->uk_hash, slab);
SLIST_INSERT_HEAD(&freeslabs, slab, us_hlink);
LIST_INSERT_HEAD(&freeslabs, slab, us_link);
}
}
finished:
KEG_UNLOCK(keg);
while ((slab = SLIST_FIRST(&freeslabs)) != NULL) {
SLIST_REMOVE(&freeslabs, slab, uma_slab, us_hlink);
while ((slab = LIST_FIRST(&freeslabs)) != NULL) {
LIST_REMOVE(slab, us_link);
keg_free_slab(keg, slab, keg->uk_ipers);
}
}
@ -1190,13 +1187,14 @@ keg_alloc_slab(uma_keg_t keg, uma_zone_t zone, int domain, int flags,
/* Point the slab into the allocated memory */
if (!(keg->uk_flags & UMA_ZONE_OFFPAGE))
slab = (uma_slab_t )(mem + keg->uk_pgoff);
else
((uma_hash_slab_t)slab)->uhs_data = mem;
if (keg->uk_flags & UMA_ZONE_VTOSLAB)
for (i = 0; i < keg->uk_ppera; i++)
vsetzoneslab((vm_offset_t)mem + (i * PAGE_SIZE),
zone, slab);
slab->us_data = mem;
slab->us_freecount = keg->uk_ipers;
slab->us_flags = sflags;
slab->us_domain = domain;
@ -1207,7 +1205,7 @@ keg_alloc_slab(uma_keg_t keg, uma_zone_t zone, int domain, int flags,
if (keg->uk_init != NULL) {
for (i = 0; i < keg->uk_ipers; i++)
if (keg->uk_init(slab->us_data + (keg->uk_rsize * i),
if (keg->uk_init(slab_item(slab, keg, i),
keg->uk_size, flags) != 0)
break;
if (i != keg->uk_ipers) {
@ -2393,15 +2391,12 @@ uma_startup(void *mem, int npages)
zone_ctor(zones, zsize, &args, M_WAITOK);
/* Now make a zone for slab headers */
slabzone = uma_zcreate("UMA Slabs",
slab_sizeof(SLAB_MAX_SETSIZE),
NULL, NULL, NULL, NULL,
UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL);
slabzone = uma_zcreate("UMA Slabs", sizeof(struct uma_hash_slab),
NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL);
hashzone = uma_zcreate("UMA Hash",
sizeof(struct slabhead *) * UMA_HASH_SIZE_INIT,
NULL, NULL, NULL, NULL,
UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL);
NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL);
bucket_init();
@ -3097,7 +3092,7 @@ slab_alloc_item(uma_keg_t keg, uma_slab_t slab)
freei = BIT_FFS(keg->uk_ipers, &slab->us_free) - 1;
BIT_CLR(keg->uk_ipers, freei, &slab->us_free);
item = slab->us_data + (keg->uk_rsize * freei);
item = slab_item(slab, keg, freei);
slab->us_freecount--;
keg->uk_free--;
@ -3609,7 +3604,7 @@ slab_free_item(uma_zone_t zone, uma_slab_t slab, void *item)
}
/* Slab management. */
freei = ((uintptr_t)item - (uintptr_t)slab->us_data) / keg->uk_rsize;
freei = slab_item_index(slab, keg, item);
BIT_SET(keg->uk_ipers, freei, &slab->us_free);
slab->us_freecount++;
@ -4491,7 +4486,7 @@ uma_dbg_alloc(uma_zone_t zone, uma_slab_t slab, void *item)
item, zone->uz_name);
}
keg = zone->uz_keg;
freei = ((uintptr_t)item - (uintptr_t)slab->us_data) / keg->uk_rsize;
freei = slab_item_index(slab, keg, item);
if (BIT_ISSET(SLAB_MAX_SETSIZE, freei, &slab->us_debugfree))
panic("Duplicate alloc of %p from zone %p(%s) slab %p(%d)\n",
@ -4519,13 +4514,13 @@ uma_dbg_free(uma_zone_t zone, uma_slab_t slab, void *item)
item, zone->uz_name);
}
keg = zone->uz_keg;
freei = ((uintptr_t)item - (uintptr_t)slab->us_data) / keg->uk_rsize;
freei = slab_item_index(slab, keg, item);
if (freei >= keg->uk_ipers)
panic("Invalid free of %p from zone %p(%s) slab %p(%d)\n",
item, zone, zone->uz_name, slab, freei);
if (((freei * keg->uk_rsize) + slab->us_data) != item)
if (slab_item(slab, keg, freei) != item)
panic("Unaligned free of %p from zone %p(%s) slab %p(%d)\n",
item, zone, zone->uz_name, slab, freei);

View File

@ -139,33 +139,28 @@
/* Max waste percentage before going to off page slab management */
#define UMA_MAX_WASTE 10
/*
* I doubt there will be many cases where this is exceeded. This is the initial
* size of the hash table for uma_slabs that are managed off page. This hash
* does expand by powers of two. Currently it doesn't get smaller.
* Hash table for freed address -> slab translation.
*
* Only zones with memory not touchable by the allocator use the
* hash table. Otherwise slabs are found with vtoslab().
*/
#define UMA_HASH_SIZE_INIT 32
/*
* I should investigate other hashing algorithms. This should yield a low
* number of collisions if the pages are relatively contiguous.
*/
#define UMA_HASH(h, s) ((((uintptr_t)s) >> UMA_SLAB_SHIFT) & (h)->uh_hashmask)
#define UMA_HASH_INSERT(h, s, mem) \
SLIST_INSERT_HEAD(&(h)->uh_slab_hash[UMA_HASH((h), \
(mem))], (s), us_hlink)
#define UMA_HASH_REMOVE(h, s, mem) \
SLIST_REMOVE(&(h)->uh_slab_hash[UMA_HASH((h), \
(mem))], (s), uma_slab, us_hlink)
LIST_INSERT_HEAD(&(h)->uh_slab_hash[UMA_HASH((h), \
(mem))], (uma_hash_slab_t)(s), uhs_hlink)
/* Hash table for freed address -> slab translation */
#define UMA_HASH_REMOVE(h, s) \
LIST_REMOVE((uma_hash_slab_t)(s), uhs_hlink)
SLIST_HEAD(slabhead, uma_slab);
LIST_HEAD(slabhashhead, uma_hash_slab);
struct uma_hash {
struct slabhead *uh_slab_hash; /* Hash table for slabs */
struct slabhashhead *uh_slab_hash; /* Hash table for slabs */
u_int uh_hashsize; /* Current size of the hash table */
u_int uh_hashmask; /* Mask used during hashing */
};
@ -202,13 +197,15 @@ struct uma_cache {
typedef struct uma_cache * uma_cache_t;
LIST_HEAD(slabhead, uma_slab);
/*
* Per-domain memory list. Embedded in the kegs.
*/
struct uma_domain {
LIST_HEAD(,uma_slab) ud_part_slab; /* partially allocated slabs */
LIST_HEAD(,uma_slab) ud_free_slab; /* empty slab list */
LIST_HEAD(,uma_slab) ud_full_slab; /* full slabs */
struct slabhead ud_part_slab; /* partially allocated slabs */
struct slabhead ud_free_slab; /* completely unallocated slabs */
struct slabhead ud_full_slab; /* fully allocated slabs */
};
typedef struct uma_domain * uma_domain_t;
@ -271,8 +268,6 @@ BITSET_DEFINE(noslabbits, 0);
*/
struct uma_slab {
LIST_ENTRY(uma_slab) us_link; /* slabs in zone */
SLIST_ENTRY(uma_slab) us_hlink; /* Link for hash table */
uint8_t *us_data; /* First item */
uint16_t us_freecount; /* How many are free? */
uint8_t us_flags; /* Page flags see uma.h */
uint8_t us_domain; /* Backing NUMA domain. */
@ -281,7 +276,6 @@ struct uma_slab {
#endif
struct noslabbits us_free; /* Free bitmask. */
};
#if MAXMEMDOM >= 255
#error "Slab domain type insufficient"
#endif
@ -293,6 +287,47 @@ size_t slab_sizeof(int nitems);
size_t slab_space(int nitems);
int slab_ipers(size_t size, int align);
/*
* Slab structure with a full sized bitset and hash link for both
* HASH and OFFPAGE zones.
*/
struct uma_hash_slab {
struct uma_slab uhs_slab; /* Must be first. */
struct slabbits uhs_bits; /* Must be second. */
LIST_ENTRY(uma_hash_slab) uhs_hlink; /* Link for hash table */
uint8_t *uhs_data; /* First item */
};
typedef struct uma_hash_slab * uma_hash_slab_t;
static inline void *
slab_data(uma_slab_t slab, uma_keg_t keg)
{
if ((keg->uk_flags & UMA_ZONE_OFFPAGE) == 0)
return ((void *)((uintptr_t)slab - keg->uk_pgoff));
else
return (((uma_hash_slab_t)slab)->uhs_data);
}
static inline void *
slab_item(uma_slab_t slab, uma_keg_t keg, int index)
{
uintptr_t data;
data = (uintptr_t)slab_data(slab, keg);
return ((void *)(data + keg->uk_rsize * index));
}
static inline int
slab_item_index(uma_slab_t slab, uma_keg_t keg, void *item)
{
uintptr_t data;
data = (uintptr_t)slab_data(slab, keg);
return (((uintptr_t)item - data) / keg->uk_rsize);
}
TAILQ_HEAD(uma_bucketlist, uma_bucket);
struct uma_zone_domain {
@ -444,14 +479,14 @@ static __inline uma_slab_t hash_sfind(struct uma_hash *hash, uint8_t *data);
static __inline uma_slab_t
hash_sfind(struct uma_hash *hash, uint8_t *data)
{
uma_slab_t slab;
uma_hash_slab_t slab;
u_int hval;
hval = UMA_HASH(hash, data);
SLIST_FOREACH(slab, &hash->uh_slab_hash[hval], us_hlink) {
if ((uint8_t *)slab->us_data == data)
return (slab);
LIST_FOREACH(slab, &hash->uh_slab_hash[hval], uhs_hlink) {
if ((uint8_t *)slab->uhs_data == data)
return (&slab->uhs_slab);
}
return (NULL);
}