Add a new zone flag UMA_ZONE_MTXCLASS. This puts the zone in it's own

mutex class.  Currently this is only used for kmapentzone because kmapents
are are potentially allocated when freeing memory.  This is not dangerous
though because no other allocations will be done while holding the
kmapentzone lock.
This commit is contained in:
Jeff Roberson 2002-04-29 23:45:41 +00:00
parent d4aa427fd1
commit 28bc44195c
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=95758
4 changed files with 34 additions and 10 deletions

View File

@ -172,6 +172,7 @@ uma_zone_t uma_zcreate(char *name, int size, uma_ctor ctor, uma_dtor dtor,
off of the real memory */
#define UMA_ZONE_MALLOC 0x0010 /* For use by malloc(9) only! */
#define UMA_ZONE_NOFREE 0x0020 /* Do not free slabs of this type! */
#define UMA_ZONE_MTXCLASS 0x0040 /* Create a new lock class */
/* Definitions for align */
#define UMA_ALIGN_PTR (sizeof(void *) - 1) /* Alignment fit for ptr */

View File

@ -959,6 +959,7 @@ zone_ctor(void *mem, int size, void *udata)
{
struct uma_zctor_args *arg = udata;
uma_zone_t zone = mem;
int privlc;
int cplen;
int cpu;
@ -992,6 +993,11 @@ zone_ctor(void *mem, int size, void *udata)
else
zone_small_init(zone);
if (arg->flags & UMA_ZONE_MTXCLASS)
privlc = 1;
else
privlc = 0;
/* We do this so that the per cpu lock name is unique for each zone */
memcpy(zone->uz_lname, "PCPU ", 5);
cplen = min(strlen(zone->uz_name) + 1, LOCKNAME_LEN - 6);
@ -1053,7 +1059,7 @@ zone_ctor(void *mem, int size, void *udata)
zone->uz_size, zone->uz_ipers,
zone->uz_ppera, zone->uz_pgoff);
#endif
ZONE_LOCK_INIT(zone);
ZONE_LOCK_INIT(zone, privlc);
mtx_lock(&uma_mtx);
LIST_INSERT_HEAD(&uma_zones, zone, uz_link);
@ -1073,7 +1079,7 @@ zone_ctor(void *mem, int size, void *udata)
zone->uz_count = UMA_BUCKET_SIZE - 1;
for (cpu = 0; cpu < maxcpu; cpu++)
CPU_LOCK_INIT(zone, cpu);
CPU_LOCK_INIT(zone, cpu, privlc);
}
/*
@ -1799,10 +1805,12 @@ uma_zone_set_max(uma_zone_t zone, int nitems)
ZONE_LOCK(zone);
if (zone->uz_ppera > 1)
zone->uz_maxpages = nitems * zone->uz_ppera;
else
else
zone->uz_maxpages = nitems / zone->uz_ipers;
if (zone->uz_maxpages * zone->uz_ipers < nitems)
zone->uz_maxpages++;
ZONE_UNLOCK(zone);
}

View File

@ -284,16 +284,31 @@ void uma_large_free(uma_slab_t slab);
/* Lock Macros */
#define ZONE_LOCK_INIT(z) \
mtx_init(&(z)->uz_lock, (z)->uz_name, "UMA zone", \
MTX_DEF | MTX_DUPOK)
#define ZONE_LOCK_INIT(z, lc) \
do { \
if ((lc)) \
mtx_init(&(z)->uz_lock, (z)->uz_name, \
(z)->uz_name, MTX_DEF | MTX_DUPOK); \
else \
mtx_init(&(z)->uz_lock, (z)->uz_name, \
"UMA zone", MTX_DEF | MTX_DUPOK); \
} while (0)
#define ZONE_LOCK_FINI(z) mtx_destroy(&(z)->uz_lock)
#define ZONE_LOCK(z) mtx_lock(&(z)->uz_lock)
#define ZONE_UNLOCK(z) mtx_unlock(&(z)->uz_lock)
#define CPU_LOCK_INIT(z, cpu) \
mtx_init(&(z)->uz_cpu[(cpu)].uc_lock, (z)->uz_lname, "UMA cpu", \
MTX_DEF | MTX_DUPOK)
#define CPU_LOCK_INIT(z, cpu, lc) \
do { \
if ((lc)) \
mtx_init(&(z)->uz_cpu[(cpu)].uc_lock, \
(z)->uz_lname, (z)->uz_lname, \
MTX_DEF | MTX_DUPOK); \
else \
mtx_init(&(z)->uz_cpu[(cpu)].uc_lock, \
(z)->uz_lname, "UMA cpu", \
MTX_DEF | MTX_DUPOK); \
} while (0)
#define CPU_LOCK_FINI(z, cpu) \
mtx_destroy(&(z)->uz_cpu[(cpu)].uc_lock)

View File

@ -159,7 +159,7 @@ vm_map_startup(void)
vm_map_zinit, vm_map_zfini, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
uma_prealloc(mapzone, MAX_KMAP);
kmapentzone = uma_zcreate("KMAP ENTRY", sizeof(struct vm_map_entry),
NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_MTXCLASS);
uma_prealloc(kmapentzone, MAX_KMAPENT);
mapentzone = uma_zcreate("MAP ENTRY", sizeof(struct vm_map_entry),
NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);