uma dbg: flexible size for slab debug bitset too

Recently (r355315) the size of the struct uma_slab bitset field us_free
became dynamic instead of conservative.  Now, make the debug bitset
size dynamic too.  The debug bitset is INVARIANTS-only, so in fact we
don't care too much about the space savings that results from this, but
enabling minimally-sized slabs on INVARIANTS builds is still important
in order to be able to test new slab layouts effectively.

Reviewed by:	jeff, markj
Sponsored by:	Dell EMC Isilon
Differential Revision:	https://reviews.freebsd.org/D22759
This commit is contained in:
Ryan Libby 2019-12-13 09:31:59 +00:00
parent 2006d590d6
commit 7508f15ff1
2 changed files with 34 additions and 13 deletions

View File

@ -292,6 +292,8 @@ static int sysctl_handle_uma_zone_frees(SYSCTL_HANDLER_ARGS);
static int sysctl_handle_uma_zone_flags(SYSCTL_HANDLER_ARGS); static int sysctl_handle_uma_zone_flags(SYSCTL_HANDLER_ARGS);
#ifdef INVARIANTS #ifdef INVARIANTS
static inline struct noslabbits *slab_dbg_bits(uma_slab_t slab, uma_keg_t keg);
static bool uma_dbg_kskip(uma_keg_t keg, void *mem); static bool uma_dbg_kskip(uma_keg_t keg, void *mem);
static bool uma_dbg_zskip(uma_zone_t zone, void *mem); static bool uma_dbg_zskip(uma_zone_t zone, void *mem);
static void uma_dbg_free(uma_zone_t zone, uma_slab_t slab, void *item); static void uma_dbg_free(uma_zone_t zone, uma_slab_t slab, void *item);
@ -1201,7 +1203,7 @@ keg_alloc_slab(uma_keg_t keg, uma_zone_t zone, int domain, int flags,
slab->us_domain = domain; slab->us_domain = domain;
BIT_FILL(keg->uk_ipers, &slab->us_free); BIT_FILL(keg->uk_ipers, &slab->us_free);
#ifdef INVARIANTS #ifdef INVARIANTS
BIT_ZERO(SLAB_MAX_SETSIZE, &slab->us_debugfree); BIT_ZERO(keg->uk_ipers, slab_dbg_bits(slab, keg));
#endif #endif
if (keg->uk_init != NULL) { if (keg->uk_init != NULL) {
@ -1484,6 +1486,15 @@ zero_init(void *mem, int size, int flags)
return (0); return (0);
} }
#ifdef INVARIANTS
struct noslabbits *
slab_dbg_bits(uma_slab_t slab, uma_keg_t keg)
{
return ((void *)((char *)&slab->us_free + BITSET_SIZE(keg->uk_ipers)));
}
#endif
/* /*
* Actual size of embedded struct slab (!OFFPAGE). * Actual size of embedded struct slab (!OFFPAGE).
*/ */
@ -1492,7 +1503,7 @@ slab_sizeof(int nitems)
{ {
size_t s; size_t s;
s = sizeof(struct uma_slab) + BITSET_SIZE(nitems); s = sizeof(struct uma_slab) + BITSET_SIZE(nitems) * SLAB_BITSETS;
return (roundup(s, UMA_ALIGN_PTR + 1)); return (roundup(s, UMA_ALIGN_PTR + 1));
} }
@ -4514,12 +4525,10 @@ uma_dbg_alloc(uma_zone_t zone, uma_slab_t slab, void *item)
keg = zone->uz_keg; keg = zone->uz_keg;
freei = slab_item_index(slab, keg, item); freei = slab_item_index(slab, keg, item);
if (BIT_ISSET(SLAB_MAX_SETSIZE, freei, &slab->us_debugfree)) if (BIT_ISSET(keg->uk_ipers, freei, slab_dbg_bits(slab, keg)))
panic("Duplicate alloc of %p from zone %p(%s) slab %p(%d)\n", panic("Duplicate alloc of %p from zone %p(%s) slab %p(%d)\n",
item, zone, zone->uz_name, slab, freei); item, zone, zone->uz_name, slab, freei);
BIT_SET_ATOMIC(SLAB_MAX_SETSIZE, freei, &slab->us_debugfree); BIT_SET_ATOMIC(keg->uk_ipers, freei, slab_dbg_bits(slab, keg));
return;
} }
/* /*
@ -4550,11 +4559,11 @@ uma_dbg_free(uma_zone_t zone, uma_slab_t slab, void *item)
panic("Unaligned free of %p from zone %p(%s) slab %p(%d)\n", panic("Unaligned free of %p from zone %p(%s) slab %p(%d)\n",
item, zone, zone->uz_name, slab, freei); item, zone, zone->uz_name, slab, freei);
if (!BIT_ISSET(SLAB_MAX_SETSIZE, freei, &slab->us_debugfree)) if (!BIT_ISSET(keg->uk_ipers, freei, slab_dbg_bits(slab, keg)))
panic("Duplicate free of %p from zone %p(%s) slab %p(%d)\n", panic("Duplicate free of %p from zone %p(%s) slab %p(%d)\n",
item, zone, zone->uz_name, slab, freei); item, zone, zone->uz_name, slab, freei);
BIT_CLR_ATOMIC(SLAB_MAX_SETSIZE, freei, &slab->us_debugfree); BIT_CLR_ATOMIC(keg->uk_ipers, freei, slab_dbg_bits(slab, keg));
} }
#endif /* INVARIANTS */ #endif /* INVARIANTS */

View File

@ -271,17 +271,26 @@ struct uma_slab {
uint16_t us_freecount; /* How many are free? */ uint16_t us_freecount; /* How many are free? */
uint8_t us_flags; /* Page flags see uma.h */ uint8_t us_flags; /* Page flags see uma.h */
uint8_t us_domain; /* Backing NUMA domain. */ uint8_t us_domain; /* Backing NUMA domain. */
#ifdef INVARIANTS struct noslabbits us_free; /* Free bitmask, flexible. */
struct slabbits us_debugfree; /* Debug bitmask. */
#endif
struct noslabbits us_free; /* Free bitmask. */
}; };
_Static_assert(sizeof(struct uma_slab) == offsetof(struct uma_slab, us_free),
"us_free field must be last");
#if MAXMEMDOM >= 255 #if MAXMEMDOM >= 255
#error "Slab domain type insufficient" #error "Slab domain type insufficient"
#endif #endif
typedef struct uma_slab * uma_slab_t; typedef struct uma_slab * uma_slab_t;
/*
* On INVARIANTS builds, the slab contains a second bitset of the same size,
* "dbg_bits", which is laid out immediately after us_free.
*/
#ifdef INVARIANTS
#define SLAB_BITSETS 2
#else
#define SLAB_BITSETS 1
#endif
/* These three functions are for embedded (!OFFPAGE) use only. */ /* These three functions are for embedded (!OFFPAGE) use only. */
size_t slab_sizeof(int nitems); size_t slab_sizeof(int nitems);
size_t slab_space(int nitems); size_t slab_space(int nitems);
@ -293,7 +302,10 @@ int slab_ipers(size_t size, int align);
*/ */
struct uma_hash_slab { struct uma_hash_slab {
struct uma_slab uhs_slab; /* Must be first. */ struct uma_slab uhs_slab; /* Must be first. */
struct slabbits uhs_bits; /* Must be second. */ struct slabbits uhs_bits1; /* Must be second. */
#ifdef INVARIANTS
struct slabbits uhs_bits2; /* Must be third. */
#endif
LIST_ENTRY(uma_hash_slab) uhs_hlink; /* Link for hash table */ LIST_ENTRY(uma_hash_slab) uhs_hlink; /* Link for hash table */
uint8_t *uhs_data; /* First item */ uint8_t *uhs_data; /* First item */
}; };