- Introduce the new M_NOVM option which tells uma to only check the currently
allocated slabs and bucket caches for free items. It will not go ask the vm for pages. This differs from M_NOWAIT in that it not only doesn't block, it doesn't even ask. - Add a new zcreate option ZONE_VM, that sets the BUCKETCACHE zflag. This tells uma that it should only allocate buckets out of the bucket cache, and not from the VM. It does this by using the M_NOVM option to zalloc when getting a new bucket. This is so that the VM doesn't recursively enter itself while trying to allocate buckets for vm_map_entry zones. If there are already allocated buckets when we get here we'll still use them but otherwise we'll skip it. - Use the ZONE_VM flag on vm map entries and pv entries on x86.
This commit is contained in:
parent
ba0c1d407b
commit
030d3fdb72
@ -485,7 +485,7 @@ pmap_init(phys_start, phys_end)
|
||||
if (initial_pvs < MINPV)
|
||||
initial_pvs = MINPV;
|
||||
pvzone = uma_zcreate("PV ENTRY", sizeof (struct pv_entry), NULL, NULL,
|
||||
NULL, NULL, UMA_ALIGN_PTR, 0);
|
||||
NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM);
|
||||
uma_zone_set_allocf(pvzone, pmap_allocf);
|
||||
uma_prealloc(pvzone, initial_pvs);
|
||||
|
||||
|
@ -485,7 +485,7 @@ pmap_init(phys_start, phys_end)
|
||||
if (initial_pvs < MINPV)
|
||||
initial_pvs = MINPV;
|
||||
pvzone = uma_zcreate("PV ENTRY", sizeof (struct pv_entry), NULL, NULL,
|
||||
NULL, NULL, UMA_ALIGN_PTR, 0);
|
||||
NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM);
|
||||
uma_zone_set_allocf(pvzone, pmap_allocf);
|
||||
uma_prealloc(pvzone, initial_pvs);
|
||||
|
||||
|
@ -52,6 +52,7 @@
|
||||
#define M_NOWAIT 0x0001 /* do not block */
|
||||
#define M_USE_RESERVE 0x0002 /* can alloc out of reserve memory */
|
||||
#define M_ZERO 0x0004 /* bzero the allocation */
|
||||
#define M_NOVM 0x0008 /* Don't ask the VM for pages */
|
||||
|
||||
#define M_MAGIC 877983977 /* time when first defined :-) */
|
||||
|
||||
|
@ -173,6 +173,7 @@ uma_zone_t uma_zcreate(char *name, size_t size, uma_ctor ctor, uma_dtor dtor,
|
||||
#define UMA_ZONE_MALLOC 0x0010 /* For use by malloc(9) only! */
|
||||
#define UMA_ZONE_NOFREE 0x0020 /* Do not free slabs of this type! */
|
||||
#define UMA_ZONE_MTXCLASS 0x0040 /* Create a new lock class */
|
||||
#define UMA_ZONE_VM 0x0080 /* Used for internal vm datastructures */
|
||||
|
||||
/* Definitions for align */
|
||||
#define UMA_ALIGN_PTR (sizeof(void *) - 1) /* Alignment fit for ptr */
|
||||
|
@ -1013,6 +1013,9 @@ zone_ctor(void *mem, int size, void *udata)
|
||||
if (arg->flags & UMA_ZONE_NOFREE)
|
||||
zone->uz_flags |= UMA_ZFLAG_NOFREE;
|
||||
|
||||
if (arg->flags & UMA_ZONE_VM)
|
||||
zone->uz_flags |= UMA_ZFLAG_BUCKETCACHE;
|
||||
|
||||
if (zone->uz_size > UMA_SLAB_SIZE)
|
||||
zone_large_init(zone);
|
||||
else
|
||||
@ -1417,9 +1420,16 @@ uma_zalloc_arg(uma_zone_t zone, void *udata, int flags)
|
||||
/* Now we no longer need the zone lock. */
|
||||
ZONE_UNLOCK(zone);
|
||||
|
||||
if (bucket == NULL)
|
||||
if (bucket == NULL) {
|
||||
int bflags;
|
||||
|
||||
bflags = flags;
|
||||
if (zone->uz_flags & UMA_ZFLAG_BUCKETCACHE)
|
||||
bflags |= M_NOVM;
|
||||
|
||||
bucket = uma_zalloc_internal(bucketzone,
|
||||
NULL, flags, NULL);
|
||||
NULL, bflags, NULL);
|
||||
}
|
||||
|
||||
if (bucket != NULL) {
|
||||
#ifdef INVARIANTS
|
||||
@ -1524,7 +1534,8 @@ uma_zalloc_internal(uma_zone_t zone, void *udata, int flags, uma_bucket_t bucket
|
||||
* and cause the vm to allocate vm_map_entries. If we need new
|
||||
* buckets there too we will recurse in kmem_alloc and bad
|
||||
* things happen. So instead we return a NULL bucket, and make
|
||||
* the code that allocates buckets smart enough to deal with it */
|
||||
* the code that allocates buckets smart enough to deal with it
|
||||
*/
|
||||
if (zone == bucketzone && zone->uz_recurse != 0) {
|
||||
ZONE_UNLOCK(zone);
|
||||
return (NULL);
|
||||
@ -1541,6 +1552,9 @@ uma_zalloc_internal(uma_zone_t zone, void *udata, int flags, uma_bucket_t bucket
|
||||
goto new_slab;
|
||||
}
|
||||
|
||||
if (flags & M_NOVM)
|
||||
goto alloc_fail;
|
||||
|
||||
zone->uz_recurse++;
|
||||
slab = slab_zalloc(zone, flags);
|
||||
zone->uz_recurse--;
|
||||
|
@ -273,6 +273,7 @@ struct uma_zone {
|
||||
#define UMA_ZFLAG_MALLOC 0x0008 /* Zone created by malloc */
|
||||
#define UMA_ZFLAG_NOFREE 0x0010 /* Don't free data from this zone */
|
||||
#define UMA_ZFLAG_FULL 0x0020 /* This zone reached uz_maxpages */
|
||||
#define UMA_ZFLAG_BUCKETCACHE 0x0040 /* Only allocate buckets from cache */
|
||||
|
||||
/* This lives in uflags */
|
||||
#define UMA_ZONE_INTERNAL 0x1000 /* Internal zone for uflags */
|
||||
|
@ -159,7 +159,8 @@ vm_map_startup(void)
|
||||
vm_map_zinit, vm_map_zfini, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
|
||||
uma_prealloc(mapzone, MAX_KMAP);
|
||||
kmapentzone = uma_zcreate("KMAP ENTRY", sizeof(struct vm_map_entry),
|
||||
NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_MTXCLASS);
|
||||
NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
|
||||
UMA_ZONE_MTXCLASS | UMA_ZONE_VM);
|
||||
uma_prealloc(kmapentzone, MAX_KMAPENT);
|
||||
mapentzone = uma_zcreate("MAP ENTRY", sizeof(struct vm_map_entry),
|
||||
NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
|
||||
|
Loading…
Reference in New Issue
Block a user