Convert UMA code to C99 uintXX_t types.

This commit is contained in:
Gleb Smirnoff 2013-04-09 17:43:48 +00:00
parent 04fc5741e0
commit 85dcf349c1
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=249313
4 changed files with 97 additions and 97 deletions

View File

@ -167,7 +167,7 @@ typedef void (*uma_fini)(void *mem, int size);
*/
uma_zone_t uma_zcreate(const char *name, size_t size, uma_ctor ctor,
uma_dtor dtor, uma_init uminit, uma_fini fini,
int align, u_int32_t flags);
int align, uint32_t flags);
/*
* Create a secondary uma zone
@ -359,7 +359,7 @@ uma_zfree(uma_zone_t zone, void *item)
* A pointer to the allocated memory or NULL on failure.
*/
typedef void *(*uma_alloc)(uma_zone_t zone, int size, u_int8_t *pflag, int wait);
typedef void *(*uma_alloc)(uma_zone_t zone, int size, uint8_t *pflag, int wait);
/*
* Backend page free routines
@ -372,7 +372,7 @@ typedef void *(*uma_alloc)(uma_zone_t zone, int size, u_int8_t *pflag, int wait)
* Returns:
* None
*/
typedef void (*uma_free)(void *item, int size, u_int8_t pflag);
typedef void (*uma_free)(void *item, int size, uint8_t pflag);
@ -590,9 +590,9 @@ void uma_prealloc(uma_zone_t zone, int itemcnt);
* item The address of the item for which we want a refcnt.
*
* Returns:
* A pointer to a u_int32_t reference counter.
* A pointer to a uint32_t reference counter.
*/
u_int32_t *uma_find_refcnt(uma_zone_t zone, void *item);
uint32_t *uma_find_refcnt(uma_zone_t zone, void *item);
/*
* Used to determine if a fixed-size zone is exhausted.
@ -613,10 +613,10 @@ int uma_zone_exhausted_nolock(uma_zone_t zone);
*/
#define UMA_STREAM_VERSION 0x00000001
struct uma_stream_header {
u_int32_t ush_version; /* Stream format version. */
u_int32_t ush_maxcpus; /* Value of MAXCPU for stream. */
u_int32_t ush_count; /* Number of records. */
u_int32_t _ush_pad; /* Pad/reserved field. */
uint32_t ush_version; /* Stream format version. */
uint32_t ush_maxcpus; /* Value of MAXCPU for stream. */
uint32_t ush_count; /* Number of records. */
uint32_t _ush_pad; /* Pad/reserved field. */
};
#define UTH_MAX_NAME 32
@ -626,32 +626,32 @@ struct uma_type_header {
* Static per-zone data, some extracted from the supporting keg.
*/
char uth_name[UTH_MAX_NAME];
u_int32_t uth_align; /* Keg: alignment. */
u_int32_t uth_size; /* Keg: requested size of item. */
u_int32_t uth_rsize; /* Keg: real size of item. */
u_int32_t uth_maxpages; /* Keg: maximum number of pages. */
u_int32_t uth_limit; /* Keg: max items to allocate. */
uint32_t uth_align; /* Keg: alignment. */
uint32_t uth_size; /* Keg: requested size of item. */
uint32_t uth_rsize; /* Keg: real size of item. */
uint32_t uth_maxpages; /* Keg: maximum number of pages. */
uint32_t uth_limit; /* Keg: max items to allocate. */
/*
* Current dynamic zone/keg-derived statistics.
*/
u_int32_t uth_pages; /* Keg: pages allocated. */
u_int32_t uth_keg_free; /* Keg: items free. */
u_int32_t uth_zone_free; /* Zone: items free. */
u_int32_t uth_bucketsize; /* Zone: desired bucket size. */
u_int32_t uth_zone_flags; /* Zone: flags. */
u_int64_t uth_allocs; /* Zone: number of allocations. */
u_int64_t uth_frees; /* Zone: number of frees. */
u_int64_t uth_fails; /* Zone: number of alloc failures. */
u_int64_t uth_sleeps; /* Zone: number of alloc sleeps. */
u_int64_t _uth_reserved1[2]; /* Reserved. */
uint32_t uth_pages; /* Keg: pages allocated. */
uint32_t uth_keg_free; /* Keg: items free. */
uint32_t uth_zone_free; /* Zone: items free. */
uint32_t uth_bucketsize; /* Zone: desired bucket size. */
uint32_t uth_zone_flags; /* Zone: flags. */
uint64_t uth_allocs; /* Zone: number of allocations. */
uint64_t uth_frees; /* Zone: number of frees. */
uint64_t uth_fails; /* Zone: number of alloc failures. */
uint64_t uth_sleeps; /* Zone: number of alloc sleeps. */
uint64_t _uth_reserved1[2]; /* Reserved. */
};
struct uma_percpu_stat {
u_int64_t ups_allocs; /* Cache: number of allocations. */
u_int64_t ups_frees; /* Cache: number of frees. */
u_int64_t ups_cache_free; /* Cache: free items in cache. */
u_int64_t _ups_reserved[5]; /* Reserved. */
uint64_t ups_allocs; /* Cache: number of allocations. */
uint64_t ups_frees; /* Cache: number of frees. */
uint64_t ups_cache_free; /* Cache: free items in cache. */
uint64_t _ups_reserved[5]; /* Reserved. */
};
#endif

View File

@ -168,7 +168,7 @@ struct uma_zctor_args {
uma_fini fini;
uma_keg_t keg;
int align;
u_int32_t flags;
uint32_t flags;
};
struct uma_kctor_args {
@ -177,7 +177,7 @@ struct uma_kctor_args {
uma_init uminit;
uma_fini fini;
int align;
u_int32_t flags;
uint32_t flags;
};
struct uma_bucket_zone {
@ -215,10 +215,10 @@ enum zfreeskip { SKIP_NONE, SKIP_DTOR, SKIP_FINI };
/* Prototypes.. */
static void *noobj_alloc(uma_zone_t, int, u_int8_t *, int);
static void *page_alloc(uma_zone_t, int, u_int8_t *, int);
static void *startup_alloc(uma_zone_t, int, u_int8_t *, int);
static void page_free(void *, int, u_int8_t);
static void *noobj_alloc(uma_zone_t, int, uint8_t *, int);
static void *page_alloc(uma_zone_t, int, uint8_t *, int);
static void *startup_alloc(uma_zone_t, int, uint8_t *, int);
static void page_free(void *, int, uint8_t);
static uma_slab_t keg_alloc_slab(uma_keg_t, uma_zone_t, int);
static void cache_drain(uma_zone_t);
static void bucket_drain(uma_zone_t, uma_bucket_t);
@ -250,7 +250,7 @@ static uma_slab_t zone_fetch_slab(uma_zone_t zone, uma_keg_t last, int flags);
static uma_slab_t zone_fetch_slab_multi(uma_zone_t zone, uma_keg_t last, int flags);
static void *slab_alloc_item(uma_zone_t zone, uma_slab_t slab);
static uma_keg_t uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit,
uma_fini fini, int align, u_int32_t flags);
uma_fini fini, int align, uint32_t flags);
static inline void zone_relock(uma_zone_t zone, uma_keg_t keg);
static inline void keg_relock(uma_keg_t keg, uma_zone_t zone);
@ -695,8 +695,8 @@ keg_drain(uma_keg_t keg)
struct slabhead freeslabs = { 0 };
uma_slab_t slab;
uma_slab_t n;
u_int8_t flags;
u_int8_t *mem;
uint8_t flags;
uint8_t *mem;
int i;
/*
@ -828,8 +828,8 @@ keg_alloc_slab(uma_keg_t keg, uma_zone_t zone, int wait)
uma_slabrefcnt_t slabref;
uma_alloc allocf;
uma_slab_t slab;
u_int8_t *mem;
u_int8_t flags;
uint8_t *mem;
uint8_t flags;
int i;
mtx_assert(&keg->uk_lock, MA_OWNED);
@ -950,7 +950,7 @@ keg_alloc_slab(uma_keg_t keg, uma_zone_t zone, int wait)
* the VM is ready.
*/
static void *
startup_alloc(uma_zone_t zone, int bytes, u_int8_t *pflag, int wait)
startup_alloc(uma_zone_t zone, int bytes, uint8_t *pflag, int wait)
{
uma_keg_t keg;
uma_slab_t tmps;
@ -1010,7 +1010,7 @@ startup_alloc(uma_zone_t zone, int bytes, u_int8_t *pflag, int wait)
* NULL if M_NOWAIT is set.
*/
static void *
page_alloc(uma_zone_t zone, int bytes, u_int8_t *pflag, int wait)
page_alloc(uma_zone_t zone, int bytes, uint8_t *pflag, int wait)
{
void *p; /* Returned page */
@ -1032,7 +1032,7 @@ page_alloc(uma_zone_t zone, int bytes, u_int8_t *pflag, int wait)
* NULL if M_NOWAIT is set.
*/
static void *
noobj_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait)
noobj_alloc(uma_zone_t zone, int bytes, uint8_t *flags, int wait)
{
TAILQ_HEAD(, vm_page) alloctail;
u_long npages;
@ -1095,7 +1095,7 @@ noobj_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait)
* Nothing
*/
static void
page_free(void *mem, int size, u_int8_t flags)
page_free(void *mem, int size, uint8_t flags)
{
vm_map_t map;
@ -1752,8 +1752,8 @@ uma_startup(void *bootmem, int boot_pages)
printf("Filling boot free list.\n");
#endif
for (i = 0; i < boot_pages; i++) {
slab = (uma_slab_t)((u_int8_t *)bootmem + (i * UMA_SLAB_SIZE));
slab->us_data = (u_int8_t *)slab;
slab = (uma_slab_t)((uint8_t *)bootmem + (i * UMA_SLAB_SIZE));
slab->us_data = (uint8_t *)slab;
slab->us_flags = UMA_SLAB_BOOT;
LIST_INSERT_HEAD(&uma_boot_pages, slab, us_link);
}
@ -1852,7 +1852,7 @@ uma_startup3(void)
static uma_keg_t
uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit, uma_fini fini,
int align, u_int32_t flags)
int align, uint32_t flags)
{
struct uma_kctor_args args;
@ -1877,7 +1877,7 @@ uma_set_align(int align)
/* See uma.h */
uma_zone_t
uma_zcreate(const char *name, size_t size, uma_ctor ctor, uma_dtor dtor,
uma_init uminit, uma_fini fini, int align, u_int32_t flags)
uma_init uminit, uma_fini fini, int align, uint32_t flags)
{
struct uma_zctor_args args;
@ -2404,7 +2404,7 @@ slab_alloc_item(uma_zone_t zone, uma_slab_t slab)
uma_keg_t keg;
uma_slabrefcnt_t slabref;
void *item;
u_int8_t freei;
uint8_t freei;
keg = slab->us_keg;
mtx_assert(&keg->uk_lock, MA_OWNED);
@ -2808,8 +2808,8 @@ zone_free_item(uma_zone_t zone, void *item, void *udata,
uma_slab_t slab;
uma_slabrefcnt_t slabref;
uma_keg_t keg;
u_int8_t *mem;
u_int8_t freei;
uint8_t *mem;
uint8_t freei;
int clearfull;
if (skip < SKIP_DTOR && zone->uz_dtor)
@ -2826,7 +2826,7 @@ zone_free_item(uma_zone_t zone, void *item, void *udata,
zone->uz_frees++;
if (!(zone->uz_flags & UMA_ZONE_VTOSLAB)) {
mem = (u_int8_t *)((unsigned long)item & (~UMA_SLAB_MASK));
mem = (uint8_t *)((unsigned long)item & (~UMA_SLAB_MASK));
keg = zone_first_keg(zone); /* Must only be one. */
if (zone->uz_flags & UMA_ZONE_HASH) {
slab = hash_sfind(&keg->uk_hash, mem);
@ -3102,12 +3102,12 @@ uma_prealloc(uma_zone_t zone, int items)
}
/* See uma.h */
u_int32_t *
uint32_t *
uma_find_refcnt(uma_zone_t zone, void *item)
{
uma_slabrefcnt_t slabref;
uma_keg_t keg;
u_int32_t *refcnt;
uint32_t *refcnt;
int idx;
slabref = (uma_slabrefcnt_t)vtoslab((vm_offset_t)item &
@ -3163,7 +3163,7 @@ uma_large_malloc(int size, int wait)
{
void *mem;
uma_slab_t slab;
u_int8_t flags;
uint8_t flags;
slab = zone_alloc_item(slabzone, NULL, wait);
if (slab == NULL)
@ -3267,11 +3267,11 @@ uma_print_zone(uma_zone_t zone)
* directly so that we don't have to.
*/
static void
uma_zone_sumstat(uma_zone_t z, int *cachefreep, u_int64_t *allocsp,
u_int64_t *freesp, u_int64_t *sleepsp)
uma_zone_sumstat(uma_zone_t z, int *cachefreep, uint64_t *allocsp,
uint64_t *freesp, uint64_t *sleepsp)
{
uma_cache_t cache;
u_int64_t allocs, frees, sleeps;
uint64_t allocs, frees, sleeps;
int cachefree, cpu;
allocs = frees = sleeps = 0;
@ -3422,7 +3422,7 @@ sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS)
#ifdef DDB
DB_SHOW_COMMAND(uma, db_show_uma)
{
u_int64_t allocs, frees, sleeps;
uint64_t allocs, frees, sleeps;
uma_bucket_t bucket;
uma_keg_t kz;
uma_zone_t z;

View File

@ -49,7 +49,7 @@ __FBSDID("$FreeBSD$");
#include <vm/uma_int.h>
#include <vm/uma_dbg.h>
static const u_int32_t uma_junk = 0xdeadc0de;
static const uint32_t uma_junk = 0xdeadc0de;
/*
* Checks an item to make sure it hasn't been overwritten since it was freed,
@ -62,7 +62,7 @@ int
trash_ctor(void *mem, int size, void *arg, int flags)
{
int cnt;
u_int32_t *p;
uint32_t *p;
cnt = size / sizeof(uma_junk);
@ -85,7 +85,7 @@ void
trash_dtor(void *mem, int size, void *arg)
{
int cnt;
u_int32_t *p;
uint32_t *p;
cnt = size / sizeof(uma_junk);
@ -122,7 +122,7 @@ int
mtrash_ctor(void *mem, int size, void *arg, int flags)
{
struct malloc_type **ksp;
u_int32_t *p = mem;
uint32_t *p = mem;
int cnt;
size -= sizeof(struct malloc_type *);
@ -150,7 +150,7 @@ void
mtrash_dtor(void *mem, int size, void *arg)
{
int cnt;
u_int32_t *p;
uint32_t *p;
size -= sizeof(struct malloc_type *);
cnt = size / sizeof(uma_junk);
@ -196,9 +196,9 @@ uma_dbg_getslab(uma_zone_t zone, void *item)
{
uma_slab_t slab;
uma_keg_t keg;
u_int8_t *mem;
uint8_t *mem;
mem = (u_int8_t *)((unsigned long)item & (~UMA_SLAB_MASK));
mem = (uint8_t *)((unsigned long)item & (~UMA_SLAB_MASK));
if (zone->uz_flags & UMA_ZONE_VTOSLAB) {
slab = vtoslab((vm_offset_t)mem);
} else {

View File

@ -184,8 +184,8 @@ typedef struct uma_bucket * uma_bucket_t;
struct uma_cache {
uma_bucket_t uc_freebucket; /* Bucket we're freeing to */
uma_bucket_t uc_allocbucket; /* Bucket to allocate from */
u_int64_t uc_allocs; /* Count of allocations */
u_int64_t uc_frees; /* Count of frees */
uint64_t uc_allocs; /* Count of allocations */
uint64_t uc_frees; /* Count of frees */
} UMA_ALIGN;
typedef struct uma_cache * uma_cache_t;
@ -205,13 +205,13 @@ struct uma_keg {
LIST_HEAD(,uma_slab) uk_free_slab; /* empty slab list */
LIST_HEAD(,uma_slab) uk_full_slab; /* full slabs */
u_int32_t uk_recurse; /* Allocation recursion count */
u_int32_t uk_align; /* Alignment mask */
u_int32_t uk_pages; /* Total page count */
u_int32_t uk_free; /* Count of items free in slabs */
u_int32_t uk_size; /* Requested size of each item */
u_int32_t uk_rsize; /* Real size of each item */
u_int32_t uk_maxpages; /* Maximum number of pages to alloc */
uint32_t uk_recurse; /* Allocation recursion count */
uint32_t uk_align; /* Alignment mask */
uint32_t uk_pages; /* Total page count */
uint32_t uk_free; /* Count of items free in slabs */
uint32_t uk_size; /* Requested size of each item */
uint32_t uk_rsize; /* Real size of each item */
uint32_t uk_maxpages; /* Maximum number of pages to alloc */
uma_init uk_init; /* Keg's init routine */
uma_fini uk_fini; /* Keg's fini routine */
@ -222,11 +222,11 @@ struct uma_keg {
vm_offset_t uk_kva; /* Zone base KVA */
uma_zone_t uk_slabzone; /* Slab zone backing us, if OFFPAGE */
u_int16_t uk_slabsize; /* Slab size for this keg */
u_int16_t uk_pgoff; /* Offset to uma_slab struct */
u_int16_t uk_ppera; /* pages per allocation from backend */
u_int16_t uk_ipers; /* Items per slab */
u_int32_t uk_flags; /* Internal flags */
uint16_t uk_slabsize; /* Slab size for this keg */
uint16_t uk_pgoff; /* Offset to uma_slab struct */
uint16_t uk_ppera; /* pages per allocation from backend */
uint16_t uk_ipers; /* Items per slab */
uint32_t uk_flags; /* Internal flags */
/* Least used fields go to the last cache line. */
const char *uk_name; /* Name of creating zone. */
@ -244,17 +244,17 @@ struct uma_slab_head {
unsigned long _us_size; /* Size of allocation */
} us_type;
SLIST_ENTRY(uma_slab) us_hlink; /* Link for hash table */
u_int8_t *us_data; /* First item */
u_int16_t us_freecount; /* How many are free? */
u_int8_t us_flags; /* Page flags see uma.h */
u_int8_t us_firstfree; /* First free item index */
uint8_t *us_data; /* First item */
uint16_t us_freecount; /* How many are free? */
uint8_t us_flags; /* Page flags see uma.h */
uint8_t us_firstfree; /* First free item index */
};
/* The standard slab structure */
struct uma_slab {
struct uma_slab_head us_head; /* slab header data */
struct {
u_int8_t us_item;
uint8_t us_item;
} us_freelist[1]; /* actual number bigger */
};
@ -265,8 +265,8 @@ struct uma_slab {
struct uma_slab_refcnt {
struct uma_slab_head us_head; /* slab header data */
struct {
u_int8_t us_item;
u_int32_t us_refcnt;
uint8_t us_item;
uint32_t us_refcnt;
} us_freelist[1]; /* actual number bigger */
};
@ -323,13 +323,13 @@ struct uma_zone {
uma_init uz_init; /* Initializer for each item */
uma_fini uz_fini; /* Discards memory */
u_int32_t uz_flags; /* Flags inherited from kegs */
u_int32_t uz_size; /* Size inherited from kegs */
uint32_t uz_flags; /* Flags inherited from kegs */
uint32_t uz_size; /* Size inherited from kegs */
u_int64_t uz_allocs UMA_ALIGN; /* Total number of allocations */
u_int64_t uz_frees; /* Total number of frees */
u_int64_t uz_fails; /* Total number of alloc failures */
u_int64_t uz_sleeps; /* Total number of alloc sleeps */
uint64_t uz_allocs UMA_ALIGN; /* Total number of allocations */
uint64_t uz_frees; /* Total number of frees */
uint64_t uz_fails; /* Total number of alloc failures */
uint64_t uz_sleeps; /* Total number of alloc sleeps */
uint16_t uz_fills; /* Outstanding bucket fills */
uint16_t uz_count; /* Highest amount of items in bucket */
@ -362,7 +362,7 @@ struct uma_zone {
#ifdef _KERNEL
/* Internal prototypes */
static __inline uma_slab_t hash_sfind(struct uma_hash *hash, u_int8_t *data);
static __inline uma_slab_t hash_sfind(struct uma_hash *hash, uint8_t *data);
void *uma_large_malloc(int size, int wait);
void uma_large_free(uma_slab_t slab);
@ -396,7 +396,7 @@ void uma_large_free(uma_slab_t slab);
* A pointer to a slab if successful, else NULL.
*/
static __inline uma_slab_t
hash_sfind(struct uma_hash *hash, u_int8_t *data)
hash_sfind(struct uma_hash *hash, uint8_t *data)
{
uma_slab_t slab;
int hval;
@ -404,7 +404,7 @@ hash_sfind(struct uma_hash *hash, u_int8_t *data)
hval = UMA_HASH(hash, data);
SLIST_FOREACH(slab, &hash->uh_slab_hash[hval], us_hlink) {
if ((u_int8_t *)slab->us_data == data)
if ((uint8_t *)slab->us_data == data)
return (slab);
}
return (NULL);
@ -450,8 +450,8 @@ vsetobj(vm_offset_t va, vm_object_t obj)
* if they can provide more effecient allocation functions. This is useful
* for using direct mapped addresses.
*/
void *uma_small_alloc(uma_zone_t zone, int bytes, u_int8_t *pflag, int wait);
void uma_small_free(void *mem, int size, u_int8_t flags);
void *uma_small_alloc(uma_zone_t zone, int bytes, uint8_t *pflag, int wait);
void uma_small_free(void *mem, int size, uint8_t flags);
#endif /* _KERNEL */
#endif /* VM_UMA_INT_H */