- Remove bogus use of kmem_alloc that was inherited from the old zone

allocator.
- Properly set M_ZERO when talking to the back end page allocators for
  non malloc zones.  This forces us to zero fill pages when they are first
  brought into a cache.
- Properly handle M_ZERO in uma_zalloc_internal.  This fixes a problem where
  per cpu buckets weren't always getting zeroed.
This commit is contained in:
Jeff Roberson 2002-06-19 20:49:44 +00:00
parent 95f24639b7
commit 3370c5bfd7
2 changed files with 18 additions and 17 deletions

View File

@ -402,7 +402,6 @@ void uma_zone_set_freef(uma_zone_t zone, uma_free freef);
*/
#define UMA_SLAB_BOOT 0x01 /* Slab alloced from boot pages */
#define UMA_SLAB_KMEM 0x02 /* Slab alloced from kmem_map */
#define UMA_SLAB_KMAP 0x04 /* Slab alloced from kernel_map */
#define UMA_SLAB_PRIV 0x08 /* Slab alloced from priv allocator */
#define UMA_SLAB_OFFP 0x10 /* Slab is managed separately */
#define UMA_SLAB_MALLOC 0x20 /* Slab is a large malloc slab */

View File

@ -697,6 +697,18 @@ slab_zalloc(uma_zone_t zone, int wait)
}
}
/*
* This reproduces the old vm_zone behavior of zero filling pages the
* first time they are added to a zone.
*
* Malloced items are zeroed in uma_zalloc.
*/
if ((zone->uz_flags & UMA_ZFLAG_MALLOC) == 0)
wait |= M_ZERO;
else
wait &= ~M_ZERO;
if (booted || (zone->uz_flags & UMA_ZFLAG_PRIVALLOC)) {
mtx_lock(&Giant);
mem = zone->uz_allocf(zone,
@ -794,18 +806,8 @@ page_alloc(uma_zone_t zone, int bytes, u_int8_t *pflag, int wait)
{
void *p; /* Returned page */
/*
* XXX The original zone allocator did this, but I don't think it's
* necessary in current.
*/
if (lockstatus(&kernel_map->lock, NULL)) {
*pflag = UMA_SLAB_KMEM;
p = (void *) kmem_malloc(kmem_map, bytes, wait);
} else {
*pflag = UMA_SLAB_KMAP;
p = (void *) kmem_alloc(kernel_map, bytes);
}
*pflag = UMA_SLAB_KMEM;
p = (void *) kmem_malloc(kmem_map, bytes, wait);
return (p);
}
@ -874,10 +876,9 @@ static void
page_free(void *mem, int size, u_int8_t flags)
{
vm_map_t map;
if (flags & UMA_SLAB_KMEM)
map = kmem_map;
else if (flags & UMA_SLAB_KMAP)
map = kernel_map;
else
panic("UMA: page_free used with invalid flags %d\n", flags);
@ -1620,8 +1621,9 @@ uma_zalloc_internal(uma_zone_t zone, void *udata, int flags, uma_bucket_t bucket
ZONE_UNLOCK(zone);
/* Only construct at this time if we're not filling a bucket */
if (bucket == NULL && zone->uz_ctor != NULL) {
zone->uz_ctor(item, zone->uz_size, udata);
if (bucket == NULL) {
if (zone->uz_ctor != NULL)
zone->uz_ctor(item, zone->uz_size, udata);
if (flags & M_ZERO)
bzero(item, zone->uz_size);
}