2005-01-07 02:29:27 +00:00
|
|
|
/*-
|
2017-11-27 15:23:17 +00:00
|
|
|
* SPDX-License-Identifier: BSD-2-Clause-FreeBSD
|
|
|
|
*
|
2019-11-28 07:49:25 +00:00
|
|
|
* Copyright (c) 2002-2019 Jeffrey Roberson <jeff@FreeBSD.org>
|
2005-07-16 09:51:52 +00:00
|
|
|
* Copyright (c) 2004, 2005 Bosko Milekic <bmilekic@FreeBSD.org>
|
2006-10-26 12:55:32 +00:00
|
|
|
* Copyright (c) 2004-2006 Robert N. M. Watson
|
2005-07-16 09:51:52 +00:00
|
|
|
* All rights reserved.
|
2002-03-19 09:11:49 +00:00
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice unmodified, this list of conditions, and the following
|
|
|
|
* disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
|
|
|
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
|
|
|
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
|
|
|
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
|
|
|
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
|
|
|
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
|
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
|
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
|
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
|
|
|
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* uma_core.c Implementation of the Universal Memory allocator
|
|
|
|
*
|
|
|
|
* This allocator is intended to replace the multitude of similar object caches
|
|
|
|
* in the standard FreeBSD kernel. The intent is to be flexible as well as
|
2016-05-02 20:16:29 +00:00
|
|
|
* efficient. A primary design goal is to return unused memory to the rest of
|
2004-01-30 16:26:29 +00:00
|
|
|
* the system. This will make the system as a whole more flexible due to the
|
2002-03-19 09:11:49 +00:00
|
|
|
* ability to move memory to subsystems which most need it instead of leaving
|
|
|
|
* pools of reserved memory unused.
|
|
|
|
*
|
|
|
|
* The basic ideas stem from similar slab/zone based allocators whose algorithms
|
|
|
|
* are well known.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* TODO:
|
|
|
|
* - Improve memory usage for large allocations
|
|
|
|
* - Investigate cache size adjustments
|
|
|
|
*/
|
|
|
|
|
2003-06-11 23:50:51 +00:00
|
|
|
#include <sys/cdefs.h>
|
|
|
|
__FBSDID("$FreeBSD$");
|
|
|
|
|
2005-10-20 16:39:33 +00:00
|
|
|
#include "opt_ddb.h"
|
2002-03-19 09:11:49 +00:00
|
|
|
#include "opt_param.h"
|
2011-10-12 18:08:28 +00:00
|
|
|
#include "opt_vm.h"
|
2005-10-20 16:39:33 +00:00
|
|
|
|
2002-03-19 09:11:49 +00:00
|
|
|
#include <sys/param.h>
|
|
|
|
#include <sys/systm.h>
|
2013-06-13 21:05:38 +00:00
|
|
|
#include <sys/bitset.h>
|
2018-10-24 16:41:47 +00:00
|
|
|
#include <sys/domainset.h>
|
2017-02-25 16:39:21 +00:00
|
|
|
#include <sys/eventhandler.h>
|
2002-03-19 09:11:49 +00:00
|
|
|
#include <sys/kernel.h>
|
|
|
|
#include <sys/types.h>
|
2018-01-02 04:35:56 +00:00
|
|
|
#include <sys/limits.h>
|
2002-03-19 09:11:49 +00:00
|
|
|
#include <sys/queue.h>
|
|
|
|
#include <sys/malloc.h>
|
2004-08-06 21:52:38 +00:00
|
|
|
#include <sys/ktr.h>
|
2002-03-19 09:11:49 +00:00
|
|
|
#include <sys/lock.h>
|
|
|
|
#include <sys/sysctl.h>
|
|
|
|
#include <sys/mutex.h>
|
2002-05-20 17:54:48 +00:00
|
|
|
#include <sys/proc.h>
|
This is the much-discussed major upgrade to the random(4) device, known to you all as /dev/random.
This code has had an extensive rewrite and a good series of reviews, both by the author and other parties. This means a lot of code has been simplified. Pluggable structures for high-rate entropy generators are available, and it is most definitely not the case that /dev/random can be driven by only a hardware souce any more. This has been designed out of the device. Hardware sources are stirred into the CSPRNG (Yarrow, Fortuna) like any other entropy source. Pluggable modules may be written by third parties for additional sources.
The harvesting structures and consequently the locking have been simplified. Entropy harvesting is done in a more general way (the documentation for this will follow). There is some GREAT entropy to be had in the UMA allocator, but it is disabled for now as messing with that is likely to annoy many people.
The venerable (but effective) Yarrow algorithm, which is no longer supported by its authors now has an alternative, Fortuna. For now, Yarrow is retained as the default algorithm, but this may be changed using a kernel option. It is intended to make Fortuna the default algorithm for 11.0. Interested parties are encouraged to read ISBN 978-0-470-47424-2 "Cryptography Engineering" By Ferguson, Schneier and Kohno for Fortuna's gory details. Heck, read it anyway.
Many thanks to Arthur Mesh who did early grunt work, and who got caught in the crossfire rather more than he deserved to.
My thanks also to folks who helped me thresh this out on whiteboards and in the odd "Hallway track", or otherwise.
My Nomex pants are on. Let the feedback commence!
Reviewed by: trasz,des(partial),imp(partial?),rwatson(partial?)
Approved by: so(des)
2014-10-30 21:21:53 +00:00
|
|
|
#include <sys/random.h>
|
2013-03-09 02:32:23 +00:00
|
|
|
#include <sys/rwlock.h>
|
Introduce a new sysctl, vm.zone_stats, which exports UMA(9) allocator
statistics via a binary structure stream:
- Add structure 'uma_stream_header', which defines a stream version,
definition of MAXCPUs used in the stream, and the number of zone
records in the stream.
- Add structure 'uma_type_header', which defines the name, alignment,
size, resource allocation limits, current pages allocated, preferred
bucket size, and central zone + keg statistics.
- Add structure 'uma_percpu_stat', which, for each per-CPU cache,
includes the number of allocations and frees, as well as the number
of free items in the cache.
- When the sysctl is queried, return a stream header, followed by a
series of type descriptions, each consisting of a type header
followed by a series of MAXCPUs uma_percpu_stat structures holding
per-CPU allocation information. Typical values of MAXCPU will be
1 (UP compiled kernel) and 16 (SMP compiled kernel).
This query mechanism allows user space monitoring tools to extract
memory allocation statistics in a machine-readable form, and to do so
at a per-CPU granularity, allowing monitoring of allocation patterns
across CPUs in order to better understand the distribution of work and
memory flow over multiple CPUs.
While here, also export the number of UMA zones as a sysctl
vm.uma_count, in order to assist in sizing user swpace buffers to
receive the stream.
A follow-up commit of libmemstat(3), a library to monitor kernel memory
allocation, will occur in the next few days. This change directly
supports converting netstat(1)'s "-mb" mode to using UMA-sourced stats
rather than separately maintained mbuf allocator statistics.
MFC after: 1 week
2005-07-14 16:35:13 +00:00
|
|
|
#include <sys/sbuf.h>
|
2013-11-19 10:51:46 +00:00
|
|
|
#include <sys/sched.h>
|
2002-03-19 09:11:49 +00:00
|
|
|
#include <sys/smp.h>
|
2016-02-03 23:30:17 +00:00
|
|
|
#include <sys/taskqueue.h>
|
2002-04-08 06:20:34 +00:00
|
|
|
#include <sys/vmmeter.h>
|
|
|
|
|
2002-03-19 09:11:49 +00:00
|
|
|
#include <vm/vm.h>
|
2018-10-24 16:41:47 +00:00
|
|
|
#include <vm/vm_domainset.h>
|
2002-03-19 09:11:49 +00:00
|
|
|
#include <vm/vm_object.h>
|
|
|
|
#include <vm/vm_page.h>
|
2013-02-26 23:35:27 +00:00
|
|
|
#include <vm/vm_pageout.h>
|
2002-03-19 09:11:49 +00:00
|
|
|
#include <vm/vm_param.h>
|
2018-01-12 23:25:05 +00:00
|
|
|
#include <vm/vm_phys.h>
|
2018-10-01 14:14:21 +00:00
|
|
|
#include <vm/vm_pagequeue.h>
|
2002-03-19 09:11:49 +00:00
|
|
|
#include <vm/vm_map.h>
|
|
|
|
#include <vm/vm_kern.h>
|
|
|
|
#include <vm/vm_extern.h>
|
|
|
|
#include <vm/uma.h>
|
|
|
|
#include <vm/uma_int.h>
|
2002-05-02 02:08:48 +00:00
|
|
|
#include <vm/uma_dbg.h>
|
2002-03-19 09:11:49 +00:00
|
|
|
|
2005-10-20 16:39:33 +00:00
|
|
|
#include <ddb/ddb.h>
|
|
|
|
|
2011-10-12 18:08:28 +00:00
|
|
|
#ifdef DEBUG_MEMGUARD
|
|
|
|
#include <vm/memguard.h>
|
|
|
|
#endif
|
|
|
|
|
2002-03-19 09:11:49 +00:00
|
|
|
/*
|
2018-01-12 23:25:05 +00:00
|
|
|
* This is the zone and keg from which all zones are spawned.
|
2002-03-19 09:11:49 +00:00
|
|
|
*/
|
2018-01-12 23:25:05 +00:00
|
|
|
static uma_zone_t kegs;
|
|
|
|
static uma_zone_t zones;
|
2002-03-19 09:11:49 +00:00
|
|
|
|
2018-01-12 23:25:05 +00:00
|
|
|
/* This is the zone from which all offpage uma_slab_ts are allocated. */
|
2002-03-19 09:11:49 +00:00
|
|
|
static uma_zone_t slabzone;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The initial hash tables come out of this zone so they can be allocated
|
|
|
|
* prior to malloc coming up.
|
|
|
|
*/
|
|
|
|
static uma_zone_t hashzone;
|
|
|
|
|
2007-02-11 20:13:52 +00:00
|
|
|
/* The boot-time adjusted value for cache line alignment. */
|
2011-03-21 09:40:01 +00:00
|
|
|
int uma_align_cache = 64 - 1;
|
2007-02-11 20:13:52 +00:00
|
|
|
|
2003-09-19 07:23:50 +00:00
|
|
|
static MALLOC_DEFINE(M_UMAHASH, "UMAHash", "UMA Hash Buckets");
|
2019-11-28 00:19:09 +00:00
|
|
|
static MALLOC_DEFINE(M_UMA, "UMA", "UMA Misc");
|
2003-09-19 07:23:50 +00:00
|
|
|
|
2002-04-08 06:20:34 +00:00
|
|
|
/*
|
|
|
|
* Are we allowed to allocate buckets?
|
|
|
|
*/
|
|
|
|
static int bucketdisable = 1;
|
|
|
|
|
Bring in mbuma to replace mballoc.
mbuma is an Mbuf & Cluster allocator built on top of a number of
extensions to the UMA framework, all included herein.
Extensions to UMA worth noting:
- Better layering between slab <-> zone caches; introduce
Keg structure which splits off slab cache away from the
zone structure and allows multiple zones to be stacked
on top of a single Keg (single type of slab cache);
perhaps we should look into defining a subset API on
top of the Keg for special use by malloc(9),
for example.
- UMA_ZONE_REFCNT zones can now be added, and reference
counters automagically allocated for them within the end
of the associated slab structures. uma_find_refcnt()
does a kextract to fetch the slab struct reference from
the underlying page, and lookup the corresponding refcnt.
mbuma things worth noting:
- integrates mbuf & cluster allocations with extended UMA
and provides caches for commonly-allocated items; defines
several zones (two primary, one secondary) and two kegs.
- change up certain code paths that always used to do:
m_get() + m_clget() to instead just use m_getcl() and
try to take advantage of the newly defined secondary
Packet zone.
- netstat(1) and systat(1) quickly hacked up to do basic
stat reporting but additional stats work needs to be
done once some other details within UMA have been taken
care of and it becomes clearer to how stats will work
within the modified framework.
From the user perspective, one implication is that the
NMBCLUSTERS compile-time option is no longer used. The
maximum number of clusters is still capped off according
to maxusers, but it can be made unlimited by setting
the kern.ipc.nmbclusters boot-time tunable to zero.
Work should be done to write an appropriate sysctl
handler allowing dynamic tuning of kern.ipc.nmbclusters
at runtime.
Additional things worth noting/known issues (READ):
- One report of 'ips' (ServeRAID) driver acting really
slow in conjunction with mbuma. Need more data.
Latest report is that ips is equally sucking with
and without mbuma.
- Giant leak in NFS code sometimes occurs, can't
reproduce but currently analyzing; brueffer is
able to reproduce but THIS IS NOT an mbuma-specific
problem and currently occurs even WITHOUT mbuma.
- Issues in network locking: there is at least one
code path in the rip code where one or more locks
are acquired and we end up in m_prepend() with
M_WAITOK, which causes WITNESS to whine from within
UMA. Current temporary solution: force all UMA
allocations to be M_NOWAIT from within UMA for now
to avoid deadlocks unless WITNESS is defined and we
can determine with certainty that we're not holding
any locks when we're M_WAITOK.
- I've seen at least one weird socketbuffer empty-but-
mbuf-still-attached panic. I don't believe this
to be related to mbuma but please keep your eyes
open, turn on debugging, and capture crash dumps.
This change removes more code than it adds.
A paper is available detailing the change and considering
various performance issues, it was presented at BSDCan2004:
http://www.unixdaemons.com/~bmilekic/netbuf_bmilekic.pdf
Please read the paper for Future Work and implementation
details, as well as credits.
Testing and Debugging:
rwatson,
brueffer,
Ketrien I. Saihr-Kesenchedra,
...
Reviewed by: Lots of people (for different parts)
2004-05-31 21:46:06 +00:00
|
|
|
/* Linked list of all kegs in the system */
|
2009-12-28 22:56:30 +00:00
|
|
|
static LIST_HEAD(,uma_keg) uma_kegs = LIST_HEAD_INITIALIZER(uma_kegs);
|
2002-03-19 09:11:49 +00:00
|
|
|
|
2013-11-28 19:20:49 +00:00
|
|
|
/* Linked list of all cache-only zones in the system */
|
|
|
|
static LIST_HEAD(,uma_zone) uma_cachezones =
|
|
|
|
LIST_HEAD_INITIALIZER(uma_cachezones);
|
|
|
|
|
2014-10-05 21:34:56 +00:00
|
|
|
/* This RW lock protects the keg list */
|
2017-09-06 20:28:18 +00:00
|
|
|
static struct rwlock_padalign __exclusive_cache_line uma_rwlock;
|
2002-03-19 09:11:49 +00:00
|
|
|
|
2017-06-01 18:26:57 +00:00
|
|
|
/*
|
|
|
|
* Pointer and counter to pool of pages, that is preallocated at
|
2018-02-09 04:45:39 +00:00
|
|
|
* startup to bootstrap UMA.
|
2017-06-01 18:26:57 +00:00
|
|
|
*/
|
|
|
|
static char *bootmem;
|
|
|
|
static int boot_pages;
|
2002-03-19 09:11:49 +00:00
|
|
|
|
2019-09-01 22:22:43 +00:00
|
|
|
static struct sx uma_reclaim_lock;
|
2014-11-30 20:20:55 +00:00
|
|
|
|
2019-06-06 16:26:58 +00:00
|
|
|
/*
|
|
|
|
* kmem soft limit, initialized by uma_set_limit(). Ensure that early
|
|
|
|
* allocations don't trigger a wakeup of the reclaim thread.
|
|
|
|
*/
|
2019-11-29 03:14:10 +00:00
|
|
|
unsigned long uma_kmem_limit = LONG_MAX;
|
2019-06-06 16:26:58 +00:00
|
|
|
SYSCTL_ULONG(_vm, OID_AUTO, uma_kmem_limit, CTLFLAG_RD, &uma_kmem_limit, 0,
|
|
|
|
"UMA kernel memory soft limit");
|
2019-11-29 03:14:10 +00:00
|
|
|
unsigned long uma_kmem_total;
|
2019-06-06 16:26:58 +00:00
|
|
|
SYSCTL_ULONG(_vm, OID_AUTO, uma_kmem_total, CTLFLAG_RD, &uma_kmem_total, 0,
|
|
|
|
"UMA kernel memory usage");
|
2017-11-28 23:40:54 +00:00
|
|
|
|
2002-03-19 09:11:49 +00:00
|
|
|
/* Is the VM done starting up? */
|
Followup on r302393 by cperciva, improving calculation of boot pages required
for UMA startup.
o Introduce another stage of UMA startup, which is entered after
vm_page_startup() finishes. After this stage we don't yet enable buckets,
but we can ask VM for pages. Rename stages to meaningful names while here.
New list of stages: BOOT_COLD, BOOT_STRAPPED, BOOT_PAGEALLOC, BOOT_BUCKETS,
BOOT_RUNNING.
Enabling page alloc earlier allows us to dramatically reduce number of
boot pages required. What is more important number of zones becomes
consistent across different machines, as no MD allocations are done before
the BOOT_PAGEALLOC stage. Now only UMA internal zones actually need to use
startup_alloc(), however that may change, so vm_page_startup() provides
its need for early zones as argument.
o Introduce uma_startup_count() function, to avoid code duplication. The
functions calculates sizes of zones zone and kegs zone, and calculates how
many pages UMA will need to bootstrap.
It counts not only of zone structures, but also of kegs, slabs and hashes.
o Hide uma_startup_foo() declarations from public file.
o Provide several DIAGNOSTIC printfs on boot_pages usage.
o Bugfix: when calculating zone of zones size use (mp_maxid + 1) instead of
mp_ncpus. Use resulting number not only in the size argument to zone_ctor()
but also as args.size.
Reviewed by: imp, gallatin (earlier version)
Differential Revision: https://reviews.freebsd.org/D14054
2018-02-06 04:16:00 +00:00
|
|
|
static enum { BOOT_COLD = 0, BOOT_STRAPPED, BOOT_PAGEALLOC, BOOT_BUCKETS,
|
|
|
|
BOOT_RUNNING } booted = BOOT_COLD;
|
2002-03-19 09:11:49 +00:00
|
|
|
|
2003-09-19 23:27:46 +00:00
|
|
|
/*
|
|
|
|
* This is the handle used to schedule events that need to happen
|
|
|
|
* outside of the allocation fast path.
|
|
|
|
*/
|
2002-03-19 09:11:49 +00:00
|
|
|
static struct callout uma_callout;
|
2003-09-19 23:27:46 +00:00
|
|
|
#define UMA_TIMEOUT 20 /* Seconds for callout interval. */
|
2002-03-19 09:11:49 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* This structure is passed as the zone ctor arg so that I don't have to create
|
|
|
|
* a special allocation function just for zones.
|
|
|
|
*/
|
|
|
|
struct uma_zctor_args {
|
2012-10-26 17:51:05 +00:00
|
|
|
const char *name;
|
2002-05-02 07:36:30 +00:00
|
|
|
size_t size;
|
2002-03-19 09:11:49 +00:00
|
|
|
uma_ctor ctor;
|
|
|
|
uma_dtor dtor;
|
|
|
|
uma_init uminit;
|
|
|
|
uma_fini fini;
|
2013-06-17 03:43:47 +00:00
|
|
|
uma_import import;
|
|
|
|
uma_release release;
|
|
|
|
void *arg;
|
Bring in mbuma to replace mballoc.
mbuma is an Mbuf & Cluster allocator built on top of a number of
extensions to the UMA framework, all included herein.
Extensions to UMA worth noting:
- Better layering between slab <-> zone caches; introduce
Keg structure which splits off slab cache away from the
zone structure and allows multiple zones to be stacked
on top of a single Keg (single type of slab cache);
perhaps we should look into defining a subset API on
top of the Keg for special use by malloc(9),
for example.
- UMA_ZONE_REFCNT zones can now be added, and reference
counters automagically allocated for them within the end
of the associated slab structures. uma_find_refcnt()
does a kextract to fetch the slab struct reference from
the underlying page, and lookup the corresponding refcnt.
mbuma things worth noting:
- integrates mbuf & cluster allocations with extended UMA
and provides caches for commonly-allocated items; defines
several zones (two primary, one secondary) and two kegs.
- change up certain code paths that always used to do:
m_get() + m_clget() to instead just use m_getcl() and
try to take advantage of the newly defined secondary
Packet zone.
- netstat(1) and systat(1) quickly hacked up to do basic
stat reporting but additional stats work needs to be
done once some other details within UMA have been taken
care of and it becomes clearer to how stats will work
within the modified framework.
From the user perspective, one implication is that the
NMBCLUSTERS compile-time option is no longer used. The
maximum number of clusters is still capped off according
to maxusers, but it can be made unlimited by setting
the kern.ipc.nmbclusters boot-time tunable to zero.
Work should be done to write an appropriate sysctl
handler allowing dynamic tuning of kern.ipc.nmbclusters
at runtime.
Additional things worth noting/known issues (READ):
- One report of 'ips' (ServeRAID) driver acting really
slow in conjunction with mbuma. Need more data.
Latest report is that ips is equally sucking with
and without mbuma.
- Giant leak in NFS code sometimes occurs, can't
reproduce but currently analyzing; brueffer is
able to reproduce but THIS IS NOT an mbuma-specific
problem and currently occurs even WITHOUT mbuma.
- Issues in network locking: there is at least one
code path in the rip code where one or more locks
are acquired and we end up in m_prepend() with
M_WAITOK, which causes WITNESS to whine from within
UMA. Current temporary solution: force all UMA
allocations to be M_NOWAIT from within UMA for now
to avoid deadlocks unless WITNESS is defined and we
can determine with certainty that we're not holding
any locks when we're M_WAITOK.
- I've seen at least one weird socketbuffer empty-but-
mbuf-still-attached panic. I don't believe this
to be related to mbuma but please keep your eyes
open, turn on debugging, and capture crash dumps.
This change removes more code than it adds.
A paper is available detailing the change and considering
various performance issues, it was presented at BSDCan2004:
http://www.unixdaemons.com/~bmilekic/netbuf_bmilekic.pdf
Please read the paper for Future Work and implementation
details, as well as credits.
Testing and Debugging:
rwatson,
brueffer,
Ketrien I. Saihr-Kesenchedra,
...
Reviewed by: Lots of people (for different parts)
2004-05-31 21:46:06 +00:00
|
|
|
uma_keg_t keg;
|
|
|
|
int align;
|
2013-04-09 17:43:48 +00:00
|
|
|
uint32_t flags;
|
Bring in mbuma to replace mballoc.
mbuma is an Mbuf & Cluster allocator built on top of a number of
extensions to the UMA framework, all included herein.
Extensions to UMA worth noting:
- Better layering between slab <-> zone caches; introduce
Keg structure which splits off slab cache away from the
zone structure and allows multiple zones to be stacked
on top of a single Keg (single type of slab cache);
perhaps we should look into defining a subset API on
top of the Keg for special use by malloc(9),
for example.
- UMA_ZONE_REFCNT zones can now be added, and reference
counters automagically allocated for them within the end
of the associated slab structures. uma_find_refcnt()
does a kextract to fetch the slab struct reference from
the underlying page, and lookup the corresponding refcnt.
mbuma things worth noting:
- integrates mbuf & cluster allocations with extended UMA
and provides caches for commonly-allocated items; defines
several zones (two primary, one secondary) and two kegs.
- change up certain code paths that always used to do:
m_get() + m_clget() to instead just use m_getcl() and
try to take advantage of the newly defined secondary
Packet zone.
- netstat(1) and systat(1) quickly hacked up to do basic
stat reporting but additional stats work needs to be
done once some other details within UMA have been taken
care of and it becomes clearer to how stats will work
within the modified framework.
From the user perspective, one implication is that the
NMBCLUSTERS compile-time option is no longer used. The
maximum number of clusters is still capped off according
to maxusers, but it can be made unlimited by setting
the kern.ipc.nmbclusters boot-time tunable to zero.
Work should be done to write an appropriate sysctl
handler allowing dynamic tuning of kern.ipc.nmbclusters
at runtime.
Additional things worth noting/known issues (READ):
- One report of 'ips' (ServeRAID) driver acting really
slow in conjunction with mbuma. Need more data.
Latest report is that ips is equally sucking with
and without mbuma.
- Giant leak in NFS code sometimes occurs, can't
reproduce but currently analyzing; brueffer is
able to reproduce but THIS IS NOT an mbuma-specific
problem and currently occurs even WITHOUT mbuma.
- Issues in network locking: there is at least one
code path in the rip code where one or more locks
are acquired and we end up in m_prepend() with
M_WAITOK, which causes WITNESS to whine from within
UMA. Current temporary solution: force all UMA
allocations to be M_NOWAIT from within UMA for now
to avoid deadlocks unless WITNESS is defined and we
can determine with certainty that we're not holding
any locks when we're M_WAITOK.
- I've seen at least one weird socketbuffer empty-but-
mbuf-still-attached panic. I don't believe this
to be related to mbuma but please keep your eyes
open, turn on debugging, and capture crash dumps.
This change removes more code than it adds.
A paper is available detailing the change and considering
various performance issues, it was presented at BSDCan2004:
http://www.unixdaemons.com/~bmilekic/netbuf_bmilekic.pdf
Please read the paper for Future Work and implementation
details, as well as credits.
Testing and Debugging:
rwatson,
brueffer,
Ketrien I. Saihr-Kesenchedra,
...
Reviewed by: Lots of people (for different parts)
2004-05-31 21:46:06 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
struct uma_kctor_args {
|
|
|
|
uma_zone_t zone;
|
|
|
|
size_t size;
|
|
|
|
uma_init uminit;
|
|
|
|
uma_fini fini;
|
2002-03-19 09:11:49 +00:00
|
|
|
int align;
|
2013-04-09 17:43:48 +00:00
|
|
|
uint32_t flags;
|
2002-03-19 09:11:49 +00:00
|
|
|
};
|
|
|
|
|
2003-09-19 06:26:45 +00:00
|
|
|
struct uma_bucket_zone {
|
|
|
|
uma_zone_t ubz_zone;
|
|
|
|
char *ubz_name;
|
2013-06-18 04:50:20 +00:00
|
|
|
int ubz_entries; /* Number of items it can hold. */
|
|
|
|
int ubz_maxsize; /* Maximum allocation size per-item. */
|
2003-09-19 06:26:45 +00:00
|
|
|
};
|
|
|
|
|
2013-06-18 04:50:20 +00:00
|
|
|
/*
|
|
|
|
* Compute the actual number of bucket entries to pack them in power
|
|
|
|
* of two sizes for more efficient space utilization.
|
|
|
|
*/
|
|
|
|
#define BUCKET_SIZE(n) \
|
|
|
|
(((sizeof(void *) * (n)) - sizeof(struct uma_bucket)) / sizeof(void *))
|
|
|
|
|
2014-06-12 11:57:07 +00:00
|
|
|
#define BUCKET_MAX BUCKET_SIZE(256)
|
2019-08-06 23:04:59 +00:00
|
|
|
#define BUCKET_MIN BUCKET_SIZE(4)
|
2003-09-19 06:26:45 +00:00
|
|
|
|
|
|
|
struct uma_bucket_zone bucket_zones[] = {
|
2013-06-26 00:57:38 +00:00
|
|
|
{ NULL, "4 Bucket", BUCKET_SIZE(4), 4096 },
|
Add two new UMA bucket zones to store 3 and 9 items per bucket.
These new buckets make bucket size self-tuning more soft and precise.
Without them there are buckets for 1, 5, 13, 29, ... items. While at
bigger sizes difference about 2x is fine, at smallest ones it is 5x and
2.6x respectively. New buckets make that line look like 1, 3, 5, 9, 13,
29, reducing jumps between steps, making algorithm work softer, allocating
and freeing memory in better fitting chunks. Otherwise there is quite a
big gap between allocating 128K and 5x128K of RAM at once.
2013-11-19 10:10:44 +00:00
|
|
|
{ NULL, "6 Bucket", BUCKET_SIZE(6), 3072 },
|
2013-06-26 00:57:38 +00:00
|
|
|
{ NULL, "8 Bucket", BUCKET_SIZE(8), 2048 },
|
Add two new UMA bucket zones to store 3 and 9 items per bucket.
These new buckets make bucket size self-tuning more soft and precise.
Without them there are buckets for 1, 5, 13, 29, ... items. While at
bigger sizes difference about 2x is fine, at smallest ones it is 5x and
2.6x respectively. New buckets make that line look like 1, 3, 5, 9, 13,
29, reducing jumps between steps, making algorithm work softer, allocating
and freeing memory in better fitting chunks. Otherwise there is quite a
big gap between allocating 128K and 5x128K of RAM at once.
2013-11-19 10:10:44 +00:00
|
|
|
{ NULL, "12 Bucket", BUCKET_SIZE(12), 1536 },
|
2013-06-26 00:57:38 +00:00
|
|
|
{ NULL, "16 Bucket", BUCKET_SIZE(16), 1024 },
|
2013-06-18 04:50:20 +00:00
|
|
|
{ NULL, "32 Bucket", BUCKET_SIZE(32), 512 },
|
|
|
|
{ NULL, "64 Bucket", BUCKET_SIZE(64), 256 },
|
|
|
|
{ NULL, "128 Bucket", BUCKET_SIZE(128), 128 },
|
2014-06-12 11:57:07 +00:00
|
|
|
{ NULL, "256 Bucket", BUCKET_SIZE(256), 64 },
|
2003-09-19 06:26:45 +00:00
|
|
|
{ NULL, NULL, 0}
|
|
|
|
};
|
|
|
|
|
2005-07-15 23:34:39 +00:00
|
|
|
/*
|
|
|
|
* Flags and enumerations to be passed to internal functions.
|
|
|
|
*/
|
o Move zone limit from keg level up to zone level. This means that now
two zones sharing a keg may have different limits. Now this is going
to work:
zone = uma_zcreate();
uma_zone_set_max(zone, limit);
zone2 = uma_zsecond_create(zone);
uma_zone_set_max(zone2, limit2);
Kegs no longer have uk_maxpages field, but zones have uz_items. When
set, it may be rounded up to minimum possible CPU bucket cache size.
For small limits bucket cache can also be reconfigured to be smaller.
Counter uz_items is updated whenever items transition from keg to a
bucket cache or directly to a consumer. If zone has uz_maxitems set and
it is reached, then we are going to sleep.
o Since new limits don't play well with multi-keg zones, remove them. The
idea of multi-keg zones was introduced exactly 10 years ago, and never
have had a practical usage. In discussion with Jeff we came to a wild
agreement that if we ever want to reintroduce the idea of a smart allocator
that would be able to choose between two (or more) totally different
backing stores, that choice should be made one level higher than UMA,
e.g. in malloc(9) or in mget(), or whatever and choice should be controlled
by the caller.
o Sleeping code is improved to account number of sleepers and wake them one
by one, to avoid thundering herd problem.
o Flag UMA_ZONE_NOBUCKETCACHE removed, instead uma_zone_set_maxcache()
KPI added. Having no bucket cache basically means setting maxcache to 0.
o Now with many fields added and many removed (no multi-keg zones!) make
sure that struct uma_zone is perfectly aligned.
Reviewed by: markj, jeff
Tested by: pho
Differential Revision: https://reviews.freebsd.org/D17773
2019-01-15 00:02:06 +00:00
|
|
|
enum zfreeskip {
|
|
|
|
SKIP_NONE = 0,
|
|
|
|
SKIP_CNT = 0x00000001,
|
|
|
|
SKIP_DTOR = 0x00010000,
|
|
|
|
SKIP_FINI = 0x00020000,
|
|
|
|
};
|
2004-08-02 00:18:36 +00:00
|
|
|
|
2002-03-19 09:11:49 +00:00
|
|
|
/* Prototypes.. */
|
|
|
|
|
Followup on r302393 by cperciva, improving calculation of boot pages required
for UMA startup.
o Introduce another stage of UMA startup, which is entered after
vm_page_startup() finishes. After this stage we don't yet enable buckets,
but we can ask VM for pages. Rename stages to meaningful names while here.
New list of stages: BOOT_COLD, BOOT_STRAPPED, BOOT_PAGEALLOC, BOOT_BUCKETS,
BOOT_RUNNING.
Enabling page alloc earlier allows us to dramatically reduce number of
boot pages required. What is more important number of zones becomes
consistent across different machines, as no MD allocations are done before
the BOOT_PAGEALLOC stage. Now only UMA internal zones actually need to use
startup_alloc(), however that may change, so vm_page_startup() provides
its need for early zones as argument.
o Introduce uma_startup_count() function, to avoid code duplication. The
functions calculates sizes of zones zone and kegs zone, and calculates how
many pages UMA will need to bootstrap.
It counts not only of zone structures, but also of kegs, slabs and hashes.
o Hide uma_startup_foo() declarations from public file.
o Provide several DIAGNOSTIC printfs on boot_pages usage.
o Bugfix: when calculating zone of zones size use (mp_maxid + 1) instead of
mp_ncpus. Use resulting number not only in the size argument to zone_ctor()
but also as args.size.
Reviewed by: imp, gallatin (earlier version)
Differential Revision: https://reviews.freebsd.org/D14054
2018-02-06 04:16:00 +00:00
|
|
|
int uma_startup_count(int);
|
|
|
|
void uma_startup(void *, int);
|
|
|
|
void uma_startup1(void);
|
|
|
|
void uma_startup2(void);
|
|
|
|
|
2018-01-12 23:25:05 +00:00
|
|
|
static void *noobj_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int);
|
|
|
|
static void *page_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int);
|
2018-07-06 02:06:03 +00:00
|
|
|
static void *pcpu_page_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int);
|
2018-01-12 23:25:05 +00:00
|
|
|
static void *startup_alloc(uma_zone_t, vm_size_t, int, uint8_t *, int);
|
2015-04-01 12:42:26 +00:00
|
|
|
static void page_free(void *, vm_size_t, uint8_t);
|
2018-07-06 02:06:03 +00:00
|
|
|
static void pcpu_page_free(void *, vm_size_t, uint8_t);
|
2019-01-23 18:58:15 +00:00
|
|
|
static uma_slab_t keg_alloc_slab(uma_keg_t, uma_zone_t, int, int, int);
|
2003-09-19 23:27:46 +00:00
|
|
|
static void cache_drain(uma_zone_t);
|
2002-03-19 09:11:49 +00:00
|
|
|
static void bucket_drain(uma_zone_t, uma_bucket_t);
|
2019-09-01 22:22:43 +00:00
|
|
|
static void bucket_cache_reclaim(uma_zone_t zone, bool);
|
2004-08-02 00:18:36 +00:00
|
|
|
static int keg_ctor(void *, int, void *, int);
|
Bring in mbuma to replace mballoc.
mbuma is an Mbuf & Cluster allocator built on top of a number of
extensions to the UMA framework, all included herein.
Extensions to UMA worth noting:
- Better layering between slab <-> zone caches; introduce
Keg structure which splits off slab cache away from the
zone structure and allows multiple zones to be stacked
on top of a single Keg (single type of slab cache);
perhaps we should look into defining a subset API on
top of the Keg for special use by malloc(9),
for example.
- UMA_ZONE_REFCNT zones can now be added, and reference
counters automagically allocated for them within the end
of the associated slab structures. uma_find_refcnt()
does a kextract to fetch the slab struct reference from
the underlying page, and lookup the corresponding refcnt.
mbuma things worth noting:
- integrates mbuf & cluster allocations with extended UMA
and provides caches for commonly-allocated items; defines
several zones (two primary, one secondary) and two kegs.
- change up certain code paths that always used to do:
m_get() + m_clget() to instead just use m_getcl() and
try to take advantage of the newly defined secondary
Packet zone.
- netstat(1) and systat(1) quickly hacked up to do basic
stat reporting but additional stats work needs to be
done once some other details within UMA have been taken
care of and it becomes clearer to how stats will work
within the modified framework.
From the user perspective, one implication is that the
NMBCLUSTERS compile-time option is no longer used. The
maximum number of clusters is still capped off according
to maxusers, but it can be made unlimited by setting
the kern.ipc.nmbclusters boot-time tunable to zero.
Work should be done to write an appropriate sysctl
handler allowing dynamic tuning of kern.ipc.nmbclusters
at runtime.
Additional things worth noting/known issues (READ):
- One report of 'ips' (ServeRAID) driver acting really
slow in conjunction with mbuma. Need more data.
Latest report is that ips is equally sucking with
and without mbuma.
- Giant leak in NFS code sometimes occurs, can't
reproduce but currently analyzing; brueffer is
able to reproduce but THIS IS NOT an mbuma-specific
problem and currently occurs even WITHOUT mbuma.
- Issues in network locking: there is at least one
code path in the rip code where one or more locks
are acquired and we end up in m_prepend() with
M_WAITOK, which causes WITNESS to whine from within
UMA. Current temporary solution: force all UMA
allocations to be M_NOWAIT from within UMA for now
to avoid deadlocks unless WITNESS is defined and we
can determine with certainty that we're not holding
any locks when we're M_WAITOK.
- I've seen at least one weird socketbuffer empty-but-
mbuf-still-attached panic. I don't believe this
to be related to mbuma but please keep your eyes
open, turn on debugging, and capture crash dumps.
This change removes more code than it adds.
A paper is available detailing the change and considering
various performance issues, it was presented at BSDCan2004:
http://www.unixdaemons.com/~bmilekic/netbuf_bmilekic.pdf
Please read the paper for Future Work and implementation
details, as well as credits.
Testing and Debugging:
rwatson,
brueffer,
Ketrien I. Saihr-Kesenchedra,
...
Reviewed by: Lots of people (for different parts)
2004-05-31 21:46:06 +00:00
|
|
|
static void keg_dtor(void *, int, void *);
|
2004-08-02 00:18:36 +00:00
|
|
|
static int zone_ctor(void *, int, void *, int);
|
2002-04-08 04:48:58 +00:00
|
|
|
static void zone_dtor(void *, int, void *);
|
2004-08-02 00:18:36 +00:00
|
|
|
static int zero_init(void *, int, int);
|
2009-01-25 09:11:24 +00:00
|
|
|
static void keg_small_init(uma_keg_t keg);
|
|
|
|
static void keg_large_init(uma_keg_t keg);
|
2019-11-28 00:19:09 +00:00
|
|
|
static void zone_foreach(void (*zfunc)(uma_zone_t, void *), void *);
|
|
|
|
static void zone_timeout(uma_zone_t zone, void *);
|
2019-06-06 23:57:28 +00:00
|
|
|
static int hash_alloc(struct uma_hash *, u_int);
|
2002-05-13 04:39:28 +00:00
|
|
|
static int hash_expand(struct uma_hash *, struct uma_hash *);
|
|
|
|
static void hash_free(struct uma_hash *hash);
|
2002-03-19 09:11:49 +00:00
|
|
|
static void uma_timeout(void *);
|
|
|
|
static void uma_startup3(void);
|
2018-01-12 23:25:05 +00:00
|
|
|
static void *zone_alloc_item(uma_zone_t, void *, int, int);
|
o Move zone limit from keg level up to zone level. This means that now
two zones sharing a keg may have different limits. Now this is going
to work:
zone = uma_zcreate();
uma_zone_set_max(zone, limit);
zone2 = uma_zsecond_create(zone);
uma_zone_set_max(zone2, limit2);
Kegs no longer have uk_maxpages field, but zones have uz_items. When
set, it may be rounded up to minimum possible CPU bucket cache size.
For small limits bucket cache can also be reconfigured to be smaller.
Counter uz_items is updated whenever items transition from keg to a
bucket cache or directly to a consumer. If zone has uz_maxitems set and
it is reached, then we are going to sleep.
o Since new limits don't play well with multi-keg zones, remove them. The
idea of multi-keg zones was introduced exactly 10 years ago, and never
have had a practical usage. In discussion with Jeff we came to a wild
agreement that if we ever want to reintroduce the idea of a smart allocator
that would be able to choose between two (or more) totally different
backing stores, that choice should be made one level higher than UMA,
e.g. in malloc(9) or in mget(), or whatever and choice should be controlled
by the caller.
o Sleeping code is improved to account number of sleepers and wake them one
by one, to avoid thundering herd problem.
o Flag UMA_ZONE_NOBUCKETCACHE removed, instead uma_zone_set_maxcache()
KPI added. Having no bucket cache basically means setting maxcache to 0.
o Now with many fields added and many removed (no multi-keg zones!) make
sure that struct uma_zone is perfectly aligned.
Reviewed by: markj, jeff
Tested by: pho
Differential Revision: https://reviews.freebsd.org/D17773
2019-01-15 00:02:06 +00:00
|
|
|
static void *zone_alloc_item_locked(uma_zone_t, void *, int, int);
|
2013-06-17 03:43:47 +00:00
|
|
|
static void zone_free_item(uma_zone_t, void *, void *, enum zfreeskip);
|
2002-04-08 06:20:34 +00:00
|
|
|
static void bucket_enable(void);
|
2003-09-19 06:26:45 +00:00
|
|
|
static void bucket_init(void);
|
2013-06-26 00:57:38 +00:00
|
|
|
static uma_bucket_t bucket_alloc(uma_zone_t zone, void *, int);
|
|
|
|
static void bucket_free(uma_zone_t zone, uma_bucket_t, void *);
|
2003-09-19 06:26:45 +00:00
|
|
|
static void bucket_zone_drain(void);
|
2019-11-26 22:17:02 +00:00
|
|
|
static uma_bucket_t zone_alloc_bucket(uma_zone_t, void *, int, int);
|
2013-06-17 03:43:47 +00:00
|
|
|
static void *slab_alloc_item(uma_keg_t keg, uma_slab_t slab);
|
o Move zone limit from keg level up to zone level. This means that now
two zones sharing a keg may have different limits. Now this is going
to work:
zone = uma_zcreate();
uma_zone_set_max(zone, limit);
zone2 = uma_zsecond_create(zone);
uma_zone_set_max(zone2, limit2);
Kegs no longer have uk_maxpages field, but zones have uz_items. When
set, it may be rounded up to minimum possible CPU bucket cache size.
For small limits bucket cache can also be reconfigured to be smaller.
Counter uz_items is updated whenever items transition from keg to a
bucket cache or directly to a consumer. If zone has uz_maxitems set and
it is reached, then we are going to sleep.
o Since new limits don't play well with multi-keg zones, remove them. The
idea of multi-keg zones was introduced exactly 10 years ago, and never
have had a practical usage. In discussion with Jeff we came to a wild
agreement that if we ever want to reintroduce the idea of a smart allocator
that would be able to choose between two (or more) totally different
backing stores, that choice should be made one level higher than UMA,
e.g. in malloc(9) or in mget(), or whatever and choice should be controlled
by the caller.
o Sleeping code is improved to account number of sleepers and wake them one
by one, to avoid thundering herd problem.
o Flag UMA_ZONE_NOBUCKETCACHE removed, instead uma_zone_set_maxcache()
KPI added. Having no bucket cache basically means setting maxcache to 0.
o Now with many fields added and many removed (no multi-keg zones!) make
sure that struct uma_zone is perfectly aligned.
Reviewed by: markj, jeff
Tested by: pho
Differential Revision: https://reviews.freebsd.org/D17773
2019-01-15 00:02:06 +00:00
|
|
|
static void slab_free_item(uma_zone_t zone, uma_slab_t slab, void *item);
|
2009-01-25 09:11:24 +00:00
|
|
|
static uma_keg_t uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit,
|
2013-04-09 17:43:48 +00:00
|
|
|
uma_fini fini, int align, uint32_t flags);
|
2019-12-04 18:40:05 +00:00
|
|
|
static int zone_import(void *, void **, int, int, int);
|
|
|
|
static void zone_release(void *, void **, int);
|
2018-01-12 23:25:05 +00:00
|
|
|
static void uma_zero_item(void *, uma_zone_t);
|
2019-11-26 22:17:02 +00:00
|
|
|
static bool cache_alloc(uma_zone_t, uma_cache_t, void *, int);
|
2019-11-27 23:19:06 +00:00
|
|
|
static bool cache_free(uma_zone_t, uma_cache_t, void *, void *, int);
|
2002-10-24 07:59:03 +00:00
|
|
|
|
Introduce a new sysctl, vm.zone_stats, which exports UMA(9) allocator
statistics via a binary structure stream:
- Add structure 'uma_stream_header', which defines a stream version,
definition of MAXCPUs used in the stream, and the number of zone
records in the stream.
- Add structure 'uma_type_header', which defines the name, alignment,
size, resource allocation limits, current pages allocated, preferred
bucket size, and central zone + keg statistics.
- Add structure 'uma_percpu_stat', which, for each per-CPU cache,
includes the number of allocations and frees, as well as the number
of free items in the cache.
- When the sysctl is queried, return a stream header, followed by a
series of type descriptions, each consisting of a type header
followed by a series of MAXCPUs uma_percpu_stat structures holding
per-CPU allocation information. Typical values of MAXCPU will be
1 (UP compiled kernel) and 16 (SMP compiled kernel).
This query mechanism allows user space monitoring tools to extract
memory allocation statistics in a machine-readable form, and to do so
at a per-CPU granularity, allowing monitoring of allocation patterns
across CPUs in order to better understand the distribution of work and
memory flow over multiple CPUs.
While here, also export the number of UMA zones as a sysctl
vm.uma_count, in order to assist in sizing user swpace buffers to
receive the stream.
A follow-up commit of libmemstat(3), a library to monitor kernel memory
allocation, will occur in the next few days. This change directly
supports converting netstat(1)'s "-mb" mode to using UMA-sourced stats
rather than separately maintained mbuf allocator statistics.
MFC after: 1 week
2005-07-14 16:35:13 +00:00
|
|
|
static int sysctl_vm_zone_count(SYSCTL_HANDLER_ARGS);
|
|
|
|
static int sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS);
|
2019-11-28 00:19:09 +00:00
|
|
|
static int sysctl_handle_uma_zone_allocs(SYSCTL_HANDLER_ARGS);
|
|
|
|
static int sysctl_handle_uma_zone_frees(SYSCTL_HANDLER_ARGS);
|
2019-12-11 06:50:55 +00:00
|
|
|
static int sysctl_handle_uma_zone_flags(SYSCTL_HANDLER_ARGS);
|
2002-03-19 09:11:49 +00:00
|
|
|
|
2016-02-03 22:02:36 +00:00
|
|
|
#ifdef INVARIANTS
|
2019-12-13 09:31:59 +00:00
|
|
|
static inline struct noslabbits *slab_dbg_bits(uma_slab_t slab, uma_keg_t keg);
|
|
|
|
|
2018-06-08 00:15:08 +00:00
|
|
|
static bool uma_dbg_kskip(uma_keg_t keg, void *mem);
|
|
|
|
static bool uma_dbg_zskip(uma_zone_t zone, void *mem);
|
2016-02-03 22:02:36 +00:00
|
|
|
static void uma_dbg_free(uma_zone_t zone, uma_slab_t slab, void *item);
|
|
|
|
static void uma_dbg_alloc(uma_zone_t zone, uma_slab_t slab, void *item);
|
2018-06-08 00:15:08 +00:00
|
|
|
|
|
|
|
static SYSCTL_NODE(_vm, OID_AUTO, debug, CTLFLAG_RD, 0,
|
|
|
|
"Memory allocation debugging");
|
|
|
|
|
|
|
|
static u_int dbg_divisor = 1;
|
|
|
|
SYSCTL_UINT(_vm_debug, OID_AUTO, divisor,
|
|
|
|
CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &dbg_divisor, 0,
|
|
|
|
"Debug & thrash every this item in memory allocator");
|
|
|
|
|
|
|
|
static counter_u64_t uma_dbg_cnt = EARLY_COUNTER;
|
|
|
|
static counter_u64_t uma_skip_cnt = EARLY_COUNTER;
|
|
|
|
SYSCTL_COUNTER_U64(_vm_debug, OID_AUTO, trashed, CTLFLAG_RD,
|
|
|
|
&uma_dbg_cnt, "memory items debugged");
|
|
|
|
SYSCTL_COUNTER_U64(_vm_debug, OID_AUTO, skipped, CTLFLAG_RD,
|
|
|
|
&uma_skip_cnt, "memory items skipped, not debugged");
|
2016-02-03 22:02:36 +00:00
|
|
|
#endif
|
|
|
|
|
2002-03-19 09:11:49 +00:00
|
|
|
SYSINIT(uma_startup3, SI_SUB_VM_CONF, SI_ORDER_SECOND, uma_startup3, NULL);
|
|
|
|
|
2019-11-28 04:15:16 +00:00
|
|
|
SYSCTL_NODE(_vm, OID_AUTO, uma, CTLFLAG_RW, 0, "Universal Memory Allocator");
|
|
|
|
|
Introduce a new sysctl, vm.zone_stats, which exports UMA(9) allocator
statistics via a binary structure stream:
- Add structure 'uma_stream_header', which defines a stream version,
definition of MAXCPUs used in the stream, and the number of zone
records in the stream.
- Add structure 'uma_type_header', which defines the name, alignment,
size, resource allocation limits, current pages allocated, preferred
bucket size, and central zone + keg statistics.
- Add structure 'uma_percpu_stat', which, for each per-CPU cache,
includes the number of allocations and frees, as well as the number
of free items in the cache.
- When the sysctl is queried, return a stream header, followed by a
series of type descriptions, each consisting of a type header
followed by a series of MAXCPUs uma_percpu_stat structures holding
per-CPU allocation information. Typical values of MAXCPU will be
1 (UP compiled kernel) and 16 (SMP compiled kernel).
This query mechanism allows user space monitoring tools to extract
memory allocation statistics in a machine-readable form, and to do so
at a per-CPU granularity, allowing monitoring of allocation patterns
across CPUs in order to better understand the distribution of work and
memory flow over multiple CPUs.
While here, also export the number of UMA zones as a sysctl
vm.uma_count, in order to assist in sizing user swpace buffers to
receive the stream.
A follow-up commit of libmemstat(3), a library to monitor kernel memory
allocation, will occur in the next few days. This change directly
supports converting netstat(1)'s "-mb" mode to using UMA-sourced stats
rather than separately maintained mbuf allocator statistics.
MFC after: 1 week
2005-07-14 16:35:13 +00:00
|
|
|
SYSCTL_PROC(_vm, OID_AUTO, zone_count, CTLFLAG_RD|CTLTYPE_INT,
|
|
|
|
0, 0, sysctl_vm_zone_count, "I", "Number of UMA zones");
|
|
|
|
|
|
|
|
SYSCTL_PROC(_vm, OID_AUTO, zone_stats, CTLFLAG_RD|CTLTYPE_STRUCT,
|
|
|
|
0, 0, sysctl_vm_zone_stats, "s,struct uma_type_header", "Zone Stats");
|
|
|
|
|
2012-12-07 22:27:13 +00:00
|
|
|
static int zone_warnings = 1;
|
2014-06-28 03:56:17 +00:00
|
|
|
SYSCTL_INT(_vm, OID_AUTO, zone_warnings, CTLFLAG_RWTUN, &zone_warnings, 0,
|
2012-12-07 22:27:13 +00:00
|
|
|
"Warn when UMA zones becomes full");
|
|
|
|
|
2002-04-08 06:20:34 +00:00
|
|
|
/*
|
|
|
|
* This routine checks to see whether or not it's safe to enable buckets.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
bucket_enable(void)
|
|
|
|
{
|
2012-05-23 18:56:29 +00:00
|
|
|
bucketdisable = vm_page_count_min();
|
2002-04-08 06:20:34 +00:00
|
|
|
}
|
|
|
|
|
2004-11-06 11:43:30 +00:00
|
|
|
/*
|
|
|
|
* Initialize bucket_zones, the array of zones of buckets of various sizes.
|
|
|
|
*
|
|
|
|
* For each zone, calculate the memory required for each bucket, consisting
|
2013-06-18 04:50:20 +00:00
|
|
|
* of the header and an array of pointers.
|
2004-11-06 11:43:30 +00:00
|
|
|
*/
|
2003-09-19 06:26:45 +00:00
|
|
|
static void
|
|
|
|
bucket_init(void)
|
|
|
|
{
|
|
|
|
struct uma_bucket_zone *ubz;
|
2013-06-18 04:50:20 +00:00
|
|
|
int size;
|
2003-09-19 06:26:45 +00:00
|
|
|
|
2015-04-20 16:48:21 +00:00
|
|
|
for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++) {
|
2003-09-19 06:26:45 +00:00
|
|
|
size = roundup(sizeof(struct uma_bucket), sizeof(void *));
|
|
|
|
size += sizeof(void *) * ubz->ubz_entries;
|
|
|
|
ubz->ubz_zone = uma_zcreate(ubz->ubz_name, size,
|
2009-01-25 09:11:24 +00:00
|
|
|
NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
|
2018-01-12 23:25:05 +00:00
|
|
|
UMA_ZONE_MTXCLASS | UMA_ZFLAG_BUCKET | UMA_ZONE_NUMA);
|
2003-09-19 06:26:45 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2004-11-06 11:43:30 +00:00
|
|
|
/*
|
|
|
|
* Given a desired number of entries for a bucket, return the zone from which
|
|
|
|
* to allocate the bucket.
|
|
|
|
*/
|
|
|
|
static struct uma_bucket_zone *
|
|
|
|
bucket_zone_lookup(int entries)
|
|
|
|
{
|
2013-06-18 04:50:20 +00:00
|
|
|
struct uma_bucket_zone *ubz;
|
2004-11-06 11:43:30 +00:00
|
|
|
|
2013-06-18 04:50:20 +00:00
|
|
|
for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++)
|
|
|
|
if (ubz->ubz_entries >= entries)
|
|
|
|
return (ubz);
|
|
|
|
ubz--;
|
|
|
|
return (ubz);
|
|
|
|
}
|
|
|
|
|
2019-11-22 16:30:47 +00:00
|
|
|
static struct uma_bucket_zone *
|
|
|
|
bucket_zone_max(uma_zone_t zone, int nitems)
|
|
|
|
{
|
|
|
|
struct uma_bucket_zone *ubz;
|
|
|
|
int bpcpu;
|
|
|
|
|
|
|
|
bpcpu = 2;
|
|
|
|
#ifdef UMA_XDOMAIN
|
|
|
|
if ((zone->uz_flags & UMA_ZONE_NUMA) != 0)
|
|
|
|
/* Count the cross-domain bucket. */
|
|
|
|
bpcpu++;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++)
|
|
|
|
if (ubz->ubz_entries * bpcpu * mp_ncpus > nitems)
|
|
|
|
break;
|
|
|
|
if (ubz == &bucket_zones[0])
|
|
|
|
ubz = NULL;
|
|
|
|
else
|
|
|
|
ubz--;
|
|
|
|
return (ubz);
|
|
|
|
}
|
|
|
|
|
2013-06-18 04:50:20 +00:00
|
|
|
static int
|
|
|
|
bucket_select(int size)
|
|
|
|
{
|
|
|
|
struct uma_bucket_zone *ubz;
|
|
|
|
|
|
|
|
ubz = &bucket_zones[0];
|
|
|
|
if (size > ubz->ubz_maxsize)
|
|
|
|
return MAX((ubz->ubz_maxsize * ubz->ubz_entries) / size, 1);
|
|
|
|
|
|
|
|
for (; ubz->ubz_entries != 0; ubz++)
|
|
|
|
if (ubz->ubz_maxsize < size)
|
|
|
|
break;
|
|
|
|
ubz--;
|
|
|
|
return (ubz->ubz_entries);
|
2004-11-06 11:43:30 +00:00
|
|
|
}
|
|
|
|
|
2003-09-19 06:26:45 +00:00
|
|
|
static uma_bucket_t
|
2013-06-26 00:57:38 +00:00
|
|
|
bucket_alloc(uma_zone_t zone, void *udata, int flags)
|
2003-09-19 06:26:45 +00:00
|
|
|
{
|
|
|
|
struct uma_bucket_zone *ubz;
|
|
|
|
uma_bucket_t bucket;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This is to stop us from allocating per cpu buckets while we're
|
2005-10-08 21:03:54 +00:00
|
|
|
* running out of vm.boot_pages. Otherwise, we would exhaust the
|
2003-09-19 06:26:45 +00:00
|
|
|
* boot pages. This also prevents us from allocating buckets in
|
|
|
|
* low memory situations.
|
|
|
|
*/
|
|
|
|
if (bucketdisable)
|
|
|
|
return (NULL);
|
2013-06-26 00:57:38 +00:00
|
|
|
/*
|
|
|
|
* To limit bucket recursion we store the original zone flags
|
|
|
|
* in a cookie passed via zalloc_arg/zfree_arg. This allows the
|
|
|
|
* NOVM flag to persist even through deep recursions. We also
|
|
|
|
* store ZFLAG_BUCKET once we have recursed attempting to allocate
|
|
|
|
* a bucket for a bucket zone so we do not allow infinite bucket
|
|
|
|
* recursion. This cookie will even persist to frees of unused
|
|
|
|
* buckets via the allocation path or bucket allocations in the
|
|
|
|
* free path.
|
|
|
|
*/
|
|
|
|
if ((zone->uz_flags & UMA_ZFLAG_BUCKET) == 0)
|
|
|
|
udata = (void *)(uintptr_t)zone->uz_flags;
|
2013-11-27 19:55:42 +00:00
|
|
|
else {
|
|
|
|
if ((uintptr_t)udata & UMA_ZFLAG_BUCKET)
|
|
|
|
return (NULL);
|
2013-06-26 00:57:38 +00:00
|
|
|
udata = (void *)((uintptr_t)udata | UMA_ZFLAG_BUCKET);
|
2013-11-27 19:55:42 +00:00
|
|
|
}
|
2013-06-26 00:57:38 +00:00
|
|
|
if ((uintptr_t)udata & UMA_ZFLAG_CACHEONLY)
|
2013-06-20 19:08:12 +00:00
|
|
|
flags |= M_NOVM;
|
2019-11-28 00:19:09 +00:00
|
|
|
ubz = bucket_zone_lookup(zone->uz_bucket_size);
|
2014-06-12 11:36:22 +00:00
|
|
|
if (ubz->ubz_zone == zone && (ubz + 1)->ubz_entries != 0)
|
|
|
|
ubz++;
|
2013-06-26 00:57:38 +00:00
|
|
|
bucket = uma_zalloc_arg(ubz->ubz_zone, udata, flags);
|
2003-09-19 06:26:45 +00:00
|
|
|
if (bucket) {
|
|
|
|
#ifdef INVARIANTS
|
|
|
|
bzero(bucket->ub_bucket, sizeof(void *) * ubz->ubz_entries);
|
|
|
|
#endif
|
|
|
|
bucket->ub_cnt = 0;
|
|
|
|
bucket->ub_entries = ubz->ubz_entries;
|
|
|
|
}
|
|
|
|
|
|
|
|
return (bucket);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2013-06-26 00:57:38 +00:00
|
|
|
bucket_free(uma_zone_t zone, uma_bucket_t bucket, void *udata)
|
2003-09-19 06:26:45 +00:00
|
|
|
{
|
|
|
|
struct uma_bucket_zone *ubz;
|
|
|
|
|
2013-06-18 04:50:20 +00:00
|
|
|
KASSERT(bucket->ub_cnt == 0,
|
|
|
|
("bucket_free: Freeing a non free bucket."));
|
2013-06-26 00:57:38 +00:00
|
|
|
if ((zone->uz_flags & UMA_ZFLAG_BUCKET) == 0)
|
|
|
|
udata = (void *)(uintptr_t)zone->uz_flags;
|
2004-11-06 11:43:30 +00:00
|
|
|
ubz = bucket_zone_lookup(bucket->ub_entries);
|
2013-06-26 00:57:38 +00:00
|
|
|
uma_zfree_arg(ubz->ubz_zone, bucket, udata);
|
2003-09-19 06:26:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
bucket_zone_drain(void)
|
|
|
|
{
|
|
|
|
struct uma_bucket_zone *ubz;
|
|
|
|
|
|
|
|
for (ubz = &bucket_zones[0]; ubz->ubz_entries != 0; ubz++)
|
2019-09-01 22:22:43 +00:00
|
|
|
uma_zone_reclaim(ubz->ubz_zone, UMA_RECLAIM_DRAIN);
|
2003-09-19 06:26:45 +00:00
|
|
|
}
|
|
|
|
|
2019-09-01 22:22:43 +00:00
|
|
|
/*
|
|
|
|
* Attempt to satisfy an allocation by retrieving a full bucket from one of the
|
|
|
|
* zone's caches.
|
|
|
|
*/
|
2018-11-13 19:44:40 +00:00
|
|
|
static uma_bucket_t
|
2019-09-01 22:22:43 +00:00
|
|
|
zone_fetch_bucket(uma_zone_t zone, uma_zone_domain_t zdom)
|
2018-11-13 19:44:40 +00:00
|
|
|
{
|
|
|
|
uma_bucket_t bucket;
|
|
|
|
|
|
|
|
ZONE_LOCK_ASSERT(zone);
|
|
|
|
|
2019-09-01 22:22:43 +00:00
|
|
|
if ((bucket = TAILQ_FIRST(&zdom->uzd_buckets)) != NULL) {
|
2018-11-13 19:44:40 +00:00
|
|
|
MPASS(zdom->uzd_nitems >= bucket->ub_cnt);
|
2019-09-01 22:22:43 +00:00
|
|
|
TAILQ_REMOVE(&zdom->uzd_buckets, bucket, ub_link);
|
2018-11-13 19:44:40 +00:00
|
|
|
zdom->uzd_nitems -= bucket->ub_cnt;
|
2019-09-01 22:22:43 +00:00
|
|
|
if (zdom->uzd_imin > zdom->uzd_nitems)
|
2018-11-13 19:44:40 +00:00
|
|
|
zdom->uzd_imin = zdom->uzd_nitems;
|
o Move zone limit from keg level up to zone level. This means that now
two zones sharing a keg may have different limits. Now this is going
to work:
zone = uma_zcreate();
uma_zone_set_max(zone, limit);
zone2 = uma_zsecond_create(zone);
uma_zone_set_max(zone2, limit2);
Kegs no longer have uk_maxpages field, but zones have uz_items. When
set, it may be rounded up to minimum possible CPU bucket cache size.
For small limits bucket cache can also be reconfigured to be smaller.
Counter uz_items is updated whenever items transition from keg to a
bucket cache or directly to a consumer. If zone has uz_maxitems set and
it is reached, then we are going to sleep.
o Since new limits don't play well with multi-keg zones, remove them. The
idea of multi-keg zones was introduced exactly 10 years ago, and never
have had a practical usage. In discussion with Jeff we came to a wild
agreement that if we ever want to reintroduce the idea of a smart allocator
that would be able to choose between two (or more) totally different
backing stores, that choice should be made one level higher than UMA,
e.g. in malloc(9) or in mget(), or whatever and choice should be controlled
by the caller.
o Sleeping code is improved to account number of sleepers and wake them one
by one, to avoid thundering herd problem.
o Flag UMA_ZONE_NOBUCKETCACHE removed, instead uma_zone_set_maxcache()
KPI added. Having no bucket cache basically means setting maxcache to 0.
o Now with many fields added and many removed (no multi-keg zones!) make
sure that struct uma_zone is perfectly aligned.
Reviewed by: markj, jeff
Tested by: pho
Differential Revision: https://reviews.freebsd.org/D17773
2019-01-15 00:02:06 +00:00
|
|
|
zone->uz_bkt_count -= bucket->ub_cnt;
|
2018-11-13 19:44:40 +00:00
|
|
|
}
|
|
|
|
return (bucket);
|
|
|
|
}
|
|
|
|
|
2019-09-01 22:22:43 +00:00
|
|
|
/*
|
|
|
|
* Insert a full bucket into the specified cache. The "ws" parameter indicates
|
|
|
|
* whether the bucket's contents should be counted as part of the zone's working
|
|
|
|
* set.
|
|
|
|
*/
|
2018-11-13 19:44:40 +00:00
|
|
|
static void
|
|
|
|
zone_put_bucket(uma_zone_t zone, uma_zone_domain_t zdom, uma_bucket_t bucket,
|
|
|
|
const bool ws)
|
|
|
|
{
|
|
|
|
|
|
|
|
ZONE_LOCK_ASSERT(zone);
|
2019-11-10 09:25:19 +00:00
|
|
|
KASSERT(!ws || zone->uz_bkt_count < zone->uz_bkt_max,
|
|
|
|
("%s: zone %p overflow", __func__, zone));
|
2018-11-13 19:44:40 +00:00
|
|
|
|
2019-09-01 22:22:43 +00:00
|
|
|
if (ws)
|
|
|
|
TAILQ_INSERT_HEAD(&zdom->uzd_buckets, bucket, ub_link);
|
|
|
|
else
|
|
|
|
TAILQ_INSERT_TAIL(&zdom->uzd_buckets, bucket, ub_link);
|
2018-11-13 19:44:40 +00:00
|
|
|
zdom->uzd_nitems += bucket->ub_cnt;
|
|
|
|
if (ws && zdom->uzd_imax < zdom->uzd_nitems)
|
|
|
|
zdom->uzd_imax = zdom->uzd_nitems;
|
o Move zone limit from keg level up to zone level. This means that now
two zones sharing a keg may have different limits. Now this is going
to work:
zone = uma_zcreate();
uma_zone_set_max(zone, limit);
zone2 = uma_zsecond_create(zone);
uma_zone_set_max(zone2, limit2);
Kegs no longer have uk_maxpages field, but zones have uz_items. When
set, it may be rounded up to minimum possible CPU bucket cache size.
For small limits bucket cache can also be reconfigured to be smaller.
Counter uz_items is updated whenever items transition from keg to a
bucket cache or directly to a consumer. If zone has uz_maxitems set and
it is reached, then we are going to sleep.
o Since new limits don't play well with multi-keg zones, remove them. The
idea of multi-keg zones was introduced exactly 10 years ago, and never
have had a practical usage. In discussion with Jeff we came to a wild
agreement that if we ever want to reintroduce the idea of a smart allocator
that would be able to choose between two (or more) totally different
backing stores, that choice should be made one level higher than UMA,
e.g. in malloc(9) or in mget(), or whatever and choice should be controlled
by the caller.
o Sleeping code is improved to account number of sleepers and wake them one
by one, to avoid thundering herd problem.
o Flag UMA_ZONE_NOBUCKETCACHE removed, instead uma_zone_set_maxcache()
KPI added. Having no bucket cache basically means setting maxcache to 0.
o Now with many fields added and many removed (no multi-keg zones!) make
sure that struct uma_zone is perfectly aligned.
Reviewed by: markj, jeff
Tested by: pho
Differential Revision: https://reviews.freebsd.org/D17773
2019-01-15 00:02:06 +00:00
|
|
|
zone->uz_bkt_count += bucket->ub_cnt;
|
2018-11-13 19:44:40 +00:00
|
|
|
}
|
|
|
|
|
2012-12-07 22:27:13 +00:00
|
|
|
static void
|
|
|
|
zone_log_warning(uma_zone_t zone)
|
|
|
|
{
|
|
|
|
static const struct timeval warninterval = { 300, 0 };
|
|
|
|
|
|
|
|
if (!zone_warnings || zone->uz_warning == NULL)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (ratecheck(&zone->uz_ratecheck, &warninterval))
|
|
|
|
printf("[zone: %s] %s\n", zone->uz_name, zone->uz_warning);
|
|
|
|
}
|
|
|
|
|
2015-12-20 02:05:33 +00:00
|
|
|
static inline void
|
|
|
|
zone_maxaction(uma_zone_t zone)
|
|
|
|
{
|
2016-02-03 23:30:17 +00:00
|
|
|
|
|
|
|
if (zone->uz_maxaction.ta_func != NULL)
|
|
|
|
taskqueue_enqueue(taskqueue_thread, &zone->uz_maxaction);
|
2015-12-20 02:05:33 +00:00
|
|
|
}
|
|
|
|
|
2002-03-19 09:11:49 +00:00
|
|
|
/*
|
|
|
|
* Routine called by timeout which is used to fire off some time interval
|
2003-09-19 23:27:46 +00:00
|
|
|
* based calculations. (stats, hash size, etc.)
|
2002-03-19 09:11:49 +00:00
|
|
|
*
|
|
|
|
* Arguments:
|
|
|
|
* arg Unused
|
2004-01-30 16:26:29 +00:00
|
|
|
*
|
2002-03-19 09:11:49 +00:00
|
|
|
* Returns:
|
|
|
|
* Nothing
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
uma_timeout(void *unused)
|
|
|
|
{
|
2002-04-08 06:20:34 +00:00
|
|
|
bucket_enable();
|
2019-11-28 00:19:09 +00:00
|
|
|
zone_foreach(zone_timeout, NULL);
|
2002-03-19 09:11:49 +00:00
|
|
|
|
|
|
|
/* Reschedule this event */
|
2003-09-19 23:27:46 +00:00
|
|
|
callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL);
|
2002-03-19 09:11:49 +00:00
|
|
|
}
|
|
|
|
|
2018-11-13 19:44:40 +00:00
|
|
|
/*
|
|
|
|
* Update the working set size estimate for the zone's bucket cache.
|
|
|
|
* The constants chosen here are somewhat arbitrary. With an update period of
|
|
|
|
* 20s (UMA_TIMEOUT), this estimate is dominated by zone activity over the
|
|
|
|
* last 100s.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
zone_domain_update_wss(uma_zone_domain_t zdom)
|
|
|
|
{
|
|
|
|
long wss;
|
|
|
|
|
|
|
|
MPASS(zdom->uzd_imax >= zdom->uzd_imin);
|
|
|
|
wss = zdom->uzd_imax - zdom->uzd_imin;
|
|
|
|
zdom->uzd_imax = zdom->uzd_imin = zdom->uzd_nitems;
|
2019-09-01 22:22:43 +00:00
|
|
|
zdom->uzd_wss = (4 * wss + zdom->uzd_wss) / 5;
|
2018-11-13 19:44:40 +00:00
|
|
|
}
|
|
|
|
|
2002-03-19 09:11:49 +00:00
|
|
|
/*
|
2003-09-19 23:27:46 +00:00
|
|
|
* Routine to perform timeout driven calculations. This expands the
|
|
|
|
* hashes and does per cpu statistics aggregation.
|
2002-03-19 09:11:49 +00:00
|
|
|
*
|
2009-01-25 09:11:24 +00:00
|
|
|
* Returns nothing.
|
2002-03-19 09:11:49 +00:00
|
|
|
*/
|
|
|
|
static void
|
2019-11-28 00:19:09 +00:00
|
|
|
zone_timeout(uma_zone_t zone, void *unused)
|
2002-03-19 09:11:49 +00:00
|
|
|
{
|
2019-11-10 09:25:19 +00:00
|
|
|
uma_keg_t keg;
|
2019-06-06 23:57:28 +00:00
|
|
|
u_int slabs;
|
2002-03-19 09:11:49 +00:00
|
|
|
|
2019-11-20 01:57:33 +00:00
|
|
|
if ((zone->uz_flags & UMA_ZONE_HASH) == 0)
|
2019-11-10 09:25:19 +00:00
|
|
|
goto update_wss;
|
|
|
|
|
|
|
|
keg = zone->uz_keg;
|
2009-01-25 09:11:24 +00:00
|
|
|
KEG_LOCK(keg);
|
2002-03-19 09:11:49 +00:00
|
|
|
/*
|
2009-01-25 09:11:24 +00:00
|
|
|
* Expand the keg hash table.
|
2004-01-30 16:26:29 +00:00
|
|
|
*
|
2002-03-19 09:11:49 +00:00
|
|
|
* This is done if the number of slabs is larger than the hash size.
|
|
|
|
* What I'm trying to do here is completely reduce collisions. This
|
|
|
|
* may be a little aggressive. Should I allow for two collisions max?
|
|
|
|
*/
|
Bring in mbuma to replace mballoc.
mbuma is an Mbuf & Cluster allocator built on top of a number of
extensions to the UMA framework, all included herein.
Extensions to UMA worth noting:
- Better layering between slab <-> zone caches; introduce
Keg structure which splits off slab cache away from the
zone structure and allows multiple zones to be stacked
on top of a single Keg (single type of slab cache);
perhaps we should look into defining a subset API on
top of the Keg for special use by malloc(9),
for example.
- UMA_ZONE_REFCNT zones can now be added, and reference
counters automagically allocated for them within the end
of the associated slab structures. uma_find_refcnt()
does a kextract to fetch the slab struct reference from
the underlying page, and lookup the corresponding refcnt.
mbuma things worth noting:
- integrates mbuf & cluster allocations with extended UMA
and provides caches for commonly-allocated items; defines
several zones (two primary, one secondary) and two kegs.
- change up certain code paths that always used to do:
m_get() + m_clget() to instead just use m_getcl() and
try to take advantage of the newly defined secondary
Packet zone.
- netstat(1) and systat(1) quickly hacked up to do basic
stat reporting but additional stats work needs to be
done once some other details within UMA have been taken
care of and it becomes clearer to how stats will work
within the modified framework.
From the user perspective, one implication is that the
NMBCLUSTERS compile-time option is no longer used. The
maximum number of clusters is still capped off according
to maxusers, but it can be made unlimited by setting
the kern.ipc.nmbclusters boot-time tunable to zero.
Work should be done to write an appropriate sysctl
handler allowing dynamic tuning of kern.ipc.nmbclusters
at runtime.
Additional things worth noting/known issues (READ):
- One report of 'ips' (ServeRAID) driver acting really
slow in conjunction with mbuma. Need more data.
Latest report is that ips is equally sucking with
and without mbuma.
- Giant leak in NFS code sometimes occurs, can't
reproduce but currently analyzing; brueffer is
able to reproduce but THIS IS NOT an mbuma-specific
problem and currently occurs even WITHOUT mbuma.
- Issues in network locking: there is at least one
code path in the rip code where one or more locks
are acquired and we end up in m_prepend() with
M_WAITOK, which causes WITNESS to whine from within
UMA. Current temporary solution: force all UMA
allocations to be M_NOWAIT from within UMA for now
to avoid deadlocks unless WITNESS is defined and we
can determine with certainty that we're not holding
any locks when we're M_WAITOK.
- I've seen at least one weird socketbuffer empty-but-
mbuf-still-attached panic. I don't believe this
to be related to mbuma but please keep your eyes
open, turn on debugging, and capture crash dumps.
This change removes more code than it adds.
A paper is available detailing the change and considering
various performance issues, it was presented at BSDCan2004:
http://www.unixdaemons.com/~bmilekic/netbuf_bmilekic.pdf
Please read the paper for Future Work and implementation
details, as well as credits.
Testing and Debugging:
rwatson,
brueffer,
Ketrien I. Saihr-Kesenchedra,
...
Reviewed by: Lots of people (for different parts)
2004-05-31 21:46:06 +00:00
|
|
|
if (keg->uk_flags & UMA_ZONE_HASH &&
|
2019-06-06 23:57:28 +00:00
|
|
|
(slabs = keg->uk_pages / keg->uk_ppera) >
|
|
|
|
keg->uk_hash.uh_hashsize) {
|
2002-09-18 08:26:30 +00:00
|
|
|
struct uma_hash newhash;
|
|
|
|
struct uma_hash oldhash;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
/*
|
2004-01-30 16:26:29 +00:00
|
|
|
* This is so involved because allocating and freeing
|
2009-01-25 09:11:24 +00:00
|
|
|
* while the keg lock is held will lead to deadlock.
|
2002-09-18 08:26:30 +00:00
|
|
|
* I have to do everything in stages and check for
|
|
|
|
* races.
|
|
|
|
*/
|
2009-01-25 09:11:24 +00:00
|
|
|
KEG_UNLOCK(keg);
|
2019-06-06 23:57:28 +00:00
|
|
|
ret = hash_alloc(&newhash, 1 << fls(slabs));
|
2009-01-25 09:11:24 +00:00
|
|
|
KEG_LOCK(keg);
|
2002-09-18 08:26:30 +00:00
|
|
|
if (ret) {
|
Bring in mbuma to replace mballoc.
mbuma is an Mbuf & Cluster allocator built on top of a number of
extensions to the UMA framework, all included herein.
Extensions to UMA worth noting:
- Better layering between slab <-> zone caches; introduce
Keg structure which splits off slab cache away from the
zone structure and allows multiple zones to be stacked
on top of a single Keg (single type of slab cache);
perhaps we should look into defining a subset API on
top of the Keg for special use by malloc(9),
for example.
- UMA_ZONE_REFCNT zones can now be added, and reference
counters automagically allocated for them within the end
of the associated slab structures. uma_find_refcnt()
does a kextract to fetch the slab struct reference from
the underlying page, and lookup the corresponding refcnt.
mbuma things worth noting:
- integrates mbuf & cluster allocations with extended UMA
and provides caches for commonly-allocated items; defines
several zones (two primary, one secondary) and two kegs.
- change up certain code paths that always used to do:
m_get() + m_clget() to instead just use m_getcl() and
try to take advantage of the newly defined secondary
Packet zone.
- netstat(1) and systat(1) quickly hacked up to do basic
stat reporting but additional stats work needs to be
done once some other details within UMA have been taken
care of and it becomes clearer to how stats will work
within the modified framework.
From the user perspective, one implication is that the
NMBCLUSTERS compile-time option is no longer used. The
maximum number of clusters is still capped off according
to maxusers, but it can be made unlimited by setting
the kern.ipc.nmbclusters boot-time tunable to zero.
Work should be done to write an appropriate sysctl
handler allowing dynamic tuning of kern.ipc.nmbclusters
at runtime.
Additional things worth noting/known issues (READ):
- One report of 'ips' (ServeRAID) driver acting really
slow in conjunction with mbuma. Need more data.
Latest report is that ips is equally sucking with
and without mbuma.
- Giant leak in NFS code sometimes occurs, can't
reproduce but currently analyzing; brueffer is
able to reproduce but THIS IS NOT an mbuma-specific
problem and currently occurs even WITHOUT mbuma.
- Issues in network locking: there is at least one
code path in the rip code where one or more locks
are acquired and we end up in m_prepend() with
M_WAITOK, which causes WITNESS to whine from within
UMA. Current temporary solution: force all UMA
allocations to be M_NOWAIT from within UMA for now
to avoid deadlocks unless WITNESS is defined and we
can determine with certainty that we're not holding
any locks when we're M_WAITOK.
- I've seen at least one weird socketbuffer empty-but-
mbuf-still-attached panic. I don't believe this
to be related to mbuma but please keep your eyes
open, turn on debugging, and capture crash dumps.
This change removes more code than it adds.
A paper is available detailing the change and considering
various performance issues, it was presented at BSDCan2004:
http://www.unixdaemons.com/~bmilekic/netbuf_bmilekic.pdf
Please read the paper for Future Work and implementation
details, as well as credits.
Testing and Debugging:
rwatson,
brueffer,
Ketrien I. Saihr-Kesenchedra,
...
Reviewed by: Lots of people (for different parts)
2004-05-31 21:46:06 +00:00
|
|
|
if (hash_expand(&keg->uk_hash, &newhash)) {
|
|
|
|
oldhash = keg->uk_hash;
|
|
|
|
keg->uk_hash = newhash;
|
2002-09-18 08:26:30 +00:00
|
|
|
} else
|
|
|
|
oldhash = newhash;
|
2002-05-13 04:39:28 +00:00
|
|
|
|
2009-01-25 09:11:24 +00:00
|
|
|
KEG_UNLOCK(keg);
|
2002-09-18 08:26:30 +00:00
|
|
|
hash_free(&oldhash);
|
2013-06-28 21:13:19 +00:00
|
|
|
return;
|
2002-04-14 13:47:10 +00:00
|
|
|
}
|
2002-03-19 09:11:49 +00:00
|
|
|
}
|
2019-09-01 22:22:43 +00:00
|
|
|
KEG_UNLOCK(keg);
|
2018-11-13 19:44:40 +00:00
|
|
|
|
2019-11-10 09:25:19 +00:00
|
|
|
update_wss:
|
2019-09-01 22:22:43 +00:00
|
|
|
ZONE_LOCK(zone);
|
o Move zone limit from keg level up to zone level. This means that now
two zones sharing a keg may have different limits. Now this is going
to work:
zone = uma_zcreate();
uma_zone_set_max(zone, limit);
zone2 = uma_zsecond_create(zone);
uma_zone_set_max(zone2, limit2);
Kegs no longer have uk_maxpages field, but zones have uz_items. When
set, it may be rounded up to minimum possible CPU bucket cache size.
For small limits bucket cache can also be reconfigured to be smaller.
Counter uz_items is updated whenever items transition from keg to a
bucket cache or directly to a consumer. If zone has uz_maxitems set and
it is reached, then we are going to sleep.
o Since new limits don't play well with multi-keg zones, remove them. The
idea of multi-keg zones was introduced exactly 10 years ago, and never
have had a practical usage. In discussion with Jeff we came to a wild
agreement that if we ever want to reintroduce the idea of a smart allocator
that would be able to choose between two (or more) totally different
backing stores, that choice should be made one level higher than UMA,
e.g. in malloc(9) or in mget(), or whatever and choice should be controlled
by the caller.
o Sleeping code is improved to account number of sleepers and wake them one
by one, to avoid thundering herd problem.
o Flag UMA_ZONE_NOBUCKETCACHE removed, instead uma_zone_set_maxcache()
KPI added. Having no bucket cache basically means setting maxcache to 0.
o Now with many fields added and many removed (no multi-keg zones!) make
sure that struct uma_zone is perfectly aligned.
Reviewed by: markj, jeff
Tested by: pho
Differential Revision: https://reviews.freebsd.org/D17773
2019-01-15 00:02:06 +00:00
|
|
|
for (int i = 0; i < vm_ndomains; i++)
|
2018-11-13 19:44:40 +00:00
|
|
|
zone_domain_update_wss(&zone->uz_domain[i]);
|
2019-09-01 22:22:43 +00:00
|
|
|
ZONE_UNLOCK(zone);
|
2002-03-19 09:11:49 +00:00
|
|
|
}
|
|
|
|
|
2002-04-14 13:47:10 +00:00
|
|
|
/*
|
|
|
|
* Allocate and zero fill the next sized hash table from the appropriate
|
|
|
|
* backing store.
|
|
|
|
*
|
|
|
|
* Arguments:
|
2002-05-13 04:39:28 +00:00
|
|
|
* hash A new hash structure with the old hash size in uh_hashsize
|
2002-04-14 13:47:10 +00:00
|
|
|
*
|
|
|
|
* Returns:
|
2016-05-02 20:16:29 +00:00
|
|
|
* 1 on success and 0 on failure.
|
2002-04-14 13:47:10 +00:00
|
|
|
*/
|
2002-09-28 17:15:38 +00:00
|
|
|
static int
|
2019-06-06 23:57:28 +00:00
|
|
|
hash_alloc(struct uma_hash *hash, u_int size)
|
2002-04-14 13:47:10 +00:00
|
|
|
{
|
2019-02-02 04:11:59 +00:00
|
|
|
size_t alloc;
|
2002-04-14 13:47:10 +00:00
|
|
|
|
2019-06-06 23:57:28 +00:00
|
|
|
KASSERT(powerof2(size), ("hash size must be power of 2"));
|
|
|
|
if (size > UMA_HASH_SIZE_INIT) {
|
|
|
|
hash->uh_hashsize = size;
|
2002-05-13 04:39:28 +00:00
|
|
|
alloc = sizeof(hash->uh_slab_hash[0]) * hash->uh_hashsize;
|
2019-12-08 01:15:06 +00:00
|
|
|
hash->uh_slab_hash = malloc(alloc, M_UMAHASH, M_NOWAIT);
|
2002-04-14 13:47:10 +00:00
|
|
|
} else {
|
2002-05-13 04:39:28 +00:00
|
|
|
alloc = sizeof(hash->uh_slab_hash[0]) * UMA_HASH_SIZE_INIT;
|
2009-01-25 09:11:24 +00:00
|
|
|
hash->uh_slab_hash = zone_alloc_item(hashzone, NULL,
|
2018-01-12 23:25:05 +00:00
|
|
|
UMA_ANYDOMAIN, M_WAITOK);
|
2002-05-13 04:39:28 +00:00
|
|
|
hash->uh_hashsize = UMA_HASH_SIZE_INIT;
|
|
|
|
}
|
|
|
|
if (hash->uh_slab_hash) {
|
|
|
|
bzero(hash->uh_slab_hash, alloc);
|
|
|
|
hash->uh_hashmask = hash->uh_hashsize - 1;
|
|
|
|
return (1);
|
2002-04-14 13:47:10 +00:00
|
|
|
}
|
|
|
|
|
2002-05-13 04:39:28 +00:00
|
|
|
return (0);
|
2002-04-14 13:47:10 +00:00
|
|
|
}
|
|
|
|
|
2002-03-19 09:11:49 +00:00
|
|
|
/*
|
2003-09-19 22:31:45 +00:00
|
|
|
* Expands the hash table for HASH zones. This is done from zone_timeout
|
|
|
|
* to reduce collisions. This must not be done in the regular allocation
|
|
|
|
* path, otherwise, we can recurse on the vm while allocating pages.
|
2002-03-19 09:11:49 +00:00
|
|
|
*
|
|
|
|
* Arguments:
|
2004-01-30 16:26:29 +00:00
|
|
|
* oldhash The hash you want to expand
|
2002-05-13 04:39:28 +00:00
|
|
|
* newhash The hash structure for the new table
|
2002-03-19 09:11:49 +00:00
|
|
|
*
|
|
|
|
* Returns:
|
2004-01-30 16:26:29 +00:00
|
|
|
* Nothing
|
2002-03-19 09:11:49 +00:00
|
|
|
*
|
|
|
|
* Discussion:
|
|
|
|
*/
|
2002-05-13 04:39:28 +00:00
|
|
|
static int
|
|
|
|
hash_expand(struct uma_hash *oldhash, struct uma_hash *newhash)
|
2002-03-19 09:11:49 +00:00
|
|
|
{
|
2019-12-08 01:15:06 +00:00
|
|
|
uma_hash_slab_t slab;
|
2019-02-12 04:33:05 +00:00
|
|
|
u_int hval;
|
|
|
|
u_int idx;
|
2002-03-19 09:11:49 +00:00
|
|
|
|
2002-05-13 04:39:28 +00:00
|
|
|
if (!newhash->uh_slab_hash)
|
|
|
|
return (0);
|
2002-03-19 09:11:49 +00:00
|
|
|
|
2002-05-13 04:39:28 +00:00
|
|
|
if (oldhash->uh_hashsize >= newhash->uh_hashsize)
|
|
|
|
return (0);
|
2002-03-19 09:11:49 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* I need to investigate hash algorithms for resizing without a
|
|
|
|
* full rehash.
|
|
|
|
*/
|
|
|
|
|
2019-02-12 04:33:05 +00:00
|
|
|
for (idx = 0; idx < oldhash->uh_hashsize; idx++)
|
2019-12-08 01:15:06 +00:00
|
|
|
while (!LIST_EMPTY(&oldhash->uh_slab_hash[idx])) {
|
|
|
|
slab = LIST_FIRST(&oldhash->uh_slab_hash[idx]);
|
|
|
|
LIST_REMOVE(slab, uhs_hlink);
|
|
|
|
hval = UMA_HASH(newhash, slab->uhs_data);
|
|
|
|
LIST_INSERT_HEAD(&newhash->uh_slab_hash[hval],
|
|
|
|
slab, uhs_hlink);
|
2002-03-19 09:11:49 +00:00
|
|
|
}
|
|
|
|
|
2002-05-13 04:39:28 +00:00
|
|
|
return (1);
|
2002-03-19 09:11:49 +00:00
|
|
|
}
|
|
|
|
|
2002-04-14 13:47:10 +00:00
|
|
|
/*
|
|
|
|
* Free the hash bucket to the appropriate backing store.
|
|
|
|
*
|
|
|
|
* Arguments:
|
|
|
|
* slab_hash The hash bucket we're freeing
|
|
|
|
* hashsize The number of entries in that hash bucket
|
|
|
|
*
|
|
|
|
* Returns:
|
|
|
|
* Nothing
|
|
|
|
*/
|
2002-04-08 04:48:58 +00:00
|
|
|
static void
|
2002-05-13 04:39:28 +00:00
|
|
|
hash_free(struct uma_hash *hash)
|
2002-04-08 04:48:58 +00:00
|
|
|
{
|
2002-05-13 04:39:28 +00:00
|
|
|
if (hash->uh_slab_hash == NULL)
|
|
|
|
return;
|
|
|
|
if (hash->uh_hashsize == UMA_HASH_SIZE_INIT)
|
2013-06-17 03:43:47 +00:00
|
|
|
zone_free_item(hashzone, hash->uh_slab_hash, NULL, SKIP_NONE);
|
2002-04-08 04:48:58 +00:00
|
|
|
else
|
2003-09-19 07:23:50 +00:00
|
|
|
free(hash->uh_slab_hash, M_UMAHASH);
|
2002-04-08 04:48:58 +00:00
|
|
|
}
|
|
|
|
|
2002-03-19 09:11:49 +00:00
|
|
|
/*
|
|
|
|
* Frees all outstanding items in a bucket
|
|
|
|
*
|
|
|
|
* Arguments:
|
|
|
|
* zone The zone to free to, must be unlocked.
|
|
|
|
* bucket The free/alloc bucket with items, cpu queue must be locked.
|
|
|
|
*
|
|
|
|
* Returns:
|
|
|
|
* Nothing
|
|
|
|
*/
|
|
|
|
|
|
|
|
static void
|
|
|
|
bucket_drain(uma_zone_t zone, uma_bucket_t bucket)
|
|
|
|
{
|
2013-06-17 03:43:47 +00:00
|
|
|
int i;
|
2002-03-19 09:11:49 +00:00
|
|
|
|
|
|
|
if (bucket == NULL)
|
|
|
|
return;
|
|
|
|
|
2013-06-17 03:43:47 +00:00
|
|
|
if (zone->uz_fini)
|
|
|
|
for (i = 0; i < bucket->ub_cnt; i++)
|
|
|
|
zone->uz_fini(bucket->ub_bucket[i], zone->uz_size);
|
|
|
|
zone->uz_release(zone->uz_arg, bucket->ub_bucket, bucket->ub_cnt);
|
2019-01-15 18:32:26 +00:00
|
|
|
if (zone->uz_max_items > 0) {
|
|
|
|
ZONE_LOCK(zone);
|
|
|
|
zone->uz_items -= bucket->ub_cnt;
|
|
|
|
if (zone->uz_sleepers && zone->uz_items < zone->uz_max_items)
|
|
|
|
wakeup_one(zone);
|
|
|
|
ZONE_UNLOCK(zone);
|
|
|
|
}
|
2013-06-17 03:43:47 +00:00
|
|
|
bucket->ub_cnt = 0;
|
2002-03-19 09:11:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Drains the per cpu caches for a zone.
|
|
|
|
*
|
Modify UMA to use critical sections to protect per-CPU caches, rather than
mutexes, which offers lower overhead on both UP and SMP. When allocating
from or freeing to the per-cpu cache, without INVARIANTS enabled, we now
no longer perform any mutex operations, which offers a 1%-3% performance
improvement in a variety of micro-benchmarks. We rely on critical
sections to prevent (a) preemption resulting in reentrant access to UMA on
a single CPU, and (b) migration of the thread during access. In the event
we need to go back to the zone for a new bucket, we release the critical
section to acquire the global zone mutex, and must re-acquire the critical
section and re-evaluate which cache we are accessing in case migration has
occured, or circumstances have changed in the current cache.
Per-CPU cache statistics are now gathered lock-free by the sysctl, which
can result in small races in statistics reporting for caches.
Reviewed by: bmilekic, jeff (somewhat)
Tested by: rwatson, kris, gnn, scottl, mike at sentex dot net, others
2005-04-29 18:56:36 +00:00
|
|
|
* NOTE: This may only be called while the zone is being turn down, and not
|
|
|
|
* during normal operation. This is necessary in order that we do not have
|
|
|
|
* to migrate CPUs to drain the per-CPU caches.
|
|
|
|
*
|
2002-03-19 09:11:49 +00:00
|
|
|
* Arguments:
|
2003-07-30 18:55:15 +00:00
|
|
|
* zone The zone to drain, must be unlocked.
|
2002-03-19 09:11:49 +00:00
|
|
|
*
|
|
|
|
* Returns:
|
|
|
|
* Nothing
|
|
|
|
*/
|
|
|
|
static void
|
2003-09-19 23:27:46 +00:00
|
|
|
cache_drain(uma_zone_t zone)
|
2002-03-19 09:11:49 +00:00
|
|
|
{
|
|
|
|
uma_cache_t cache;
|
|
|
|
int cpu;
|
|
|
|
|
|
|
|
/*
|
Modify UMA to use critical sections to protect per-CPU caches, rather than
mutexes, which offers lower overhead on both UP and SMP. When allocating
from or freeing to the per-cpu cache, without INVARIANTS enabled, we now
no longer perform any mutex operations, which offers a 1%-3% performance
improvement in a variety of micro-benchmarks. We rely on critical
sections to prevent (a) preemption resulting in reentrant access to UMA on
a single CPU, and (b) migration of the thread during access. In the event
we need to go back to the zone for a new bucket, we release the critical
section to acquire the global zone mutex, and must re-acquire the critical
section and re-evaluate which cache we are accessing in case migration has
occured, or circumstances have changed in the current cache.
Per-CPU cache statistics are now gathered lock-free by the sysctl, which
can result in small races in statistics reporting for caches.
Reviewed by: bmilekic, jeff (somewhat)
Tested by: rwatson, kris, gnn, scottl, mike at sentex dot net, others
2005-04-29 18:56:36 +00:00
|
|
|
* XXX: It is safe to not lock the per-CPU caches, because we're
|
|
|
|
* tearing down the zone anyway. I.e., there will be no further use
|
|
|
|
* of the caches at this point.
|
|
|
|
*
|
|
|
|
* XXX: It would good to be able to assert that the zone is being
|
|
|
|
* torn down to prevent improper use of cache_drain().
|
|
|
|
*
|
2019-09-01 22:22:43 +00:00
|
|
|
* XXX: We lock the zone before passing into bucket_cache_reclaim() as
|
Modify UMA to use critical sections to protect per-CPU caches, rather than
mutexes, which offers lower overhead on both UP and SMP. When allocating
from or freeing to the per-cpu cache, without INVARIANTS enabled, we now
no longer perform any mutex operations, which offers a 1%-3% performance
improvement in a variety of micro-benchmarks. We rely on critical
sections to prevent (a) preemption resulting in reentrant access to UMA on
a single CPU, and (b) migration of the thread during access. In the event
we need to go back to the zone for a new bucket, we release the critical
section to acquire the global zone mutex, and must re-acquire the critical
section and re-evaluate which cache we are accessing in case migration has
occured, or circumstances have changed in the current cache.
Per-CPU cache statistics are now gathered lock-free by the sysctl, which
can result in small races in statistics reporting for caches.
Reviewed by: bmilekic, jeff (somewhat)
Tested by: rwatson, kris, gnn, scottl, mike at sentex dot net, others
2005-04-29 18:56:36 +00:00
|
|
|
* it is used elsewhere. Should the tear-down path be made special
|
|
|
|
* there in some form?
|
2002-03-19 09:11:49 +00:00
|
|
|
*/
|
2010-06-11 18:46:34 +00:00
|
|
|
CPU_FOREACH(cpu) {
|
2002-03-19 09:11:49 +00:00
|
|
|
cache = &zone->uz_cpu[cpu];
|
|
|
|
bucket_drain(zone, cache->uc_allocbucket);
|
2003-09-19 23:27:46 +00:00
|
|
|
if (cache->uc_allocbucket != NULL)
|
2013-06-26 00:57:38 +00:00
|
|
|
bucket_free(zone, cache->uc_allocbucket, NULL);
|
2019-08-06 21:50:34 +00:00
|
|
|
cache->uc_allocbucket = NULL;
|
|
|
|
bucket_drain(zone, cache->uc_freebucket);
|
2003-09-19 23:27:46 +00:00
|
|
|
if (cache->uc_freebucket != NULL)
|
2013-06-26 00:57:38 +00:00
|
|
|
bucket_free(zone, cache->uc_freebucket, NULL);
|
2019-08-06 21:50:34 +00:00
|
|
|
cache->uc_freebucket = NULL;
|
|
|
|
bucket_drain(zone, cache->uc_crossbucket);
|
|
|
|
if (cache->uc_crossbucket != NULL)
|
|
|
|
bucket_free(zone, cache->uc_crossbucket, NULL);
|
|
|
|
cache->uc_crossbucket = NULL;
|
2002-03-19 09:11:49 +00:00
|
|
|
}
|
2004-02-01 06:15:17 +00:00
|
|
|
ZONE_LOCK(zone);
|
2019-09-01 22:22:43 +00:00
|
|
|
bucket_cache_reclaim(zone, true);
|
2004-02-01 06:15:17 +00:00
|
|
|
ZONE_UNLOCK(zone);
|
|
|
|
}
|
|
|
|
|
2013-11-19 10:51:46 +00:00
|
|
|
static void
|
2019-11-28 00:19:09 +00:00
|
|
|
cache_shrink(uma_zone_t zone, void *unused)
|
2013-11-19 10:51:46 +00:00
|
|
|
{
|
|
|
|
|
|
|
|
if (zone->uz_flags & UMA_ZFLAG_INTERNAL)
|
|
|
|
return;
|
|
|
|
|
|
|
|
ZONE_LOCK(zone);
|
2019-11-28 00:19:09 +00:00
|
|
|
zone->uz_bucket_size =
|
|
|
|
(zone->uz_bucket_size_min + zone->uz_bucket_size) / 2;
|
2013-11-19 10:51:46 +00:00
|
|
|
ZONE_UNLOCK(zone);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2019-11-28 00:19:09 +00:00
|
|
|
cache_drain_safe_cpu(uma_zone_t zone, void *unused)
|
2013-11-19 10:51:46 +00:00
|
|
|
{
|
|
|
|
uma_cache_t cache;
|
2019-08-06 21:50:34 +00:00
|
|
|
uma_bucket_t b1, b2, b3;
|
2018-01-12 23:25:05 +00:00
|
|
|
int domain;
|
2013-11-19 10:51:46 +00:00
|
|
|
|
|
|
|
if (zone->uz_flags & UMA_ZFLAG_INTERNAL)
|
|
|
|
return;
|
|
|
|
|
2019-08-06 21:50:34 +00:00
|
|
|
b1 = b2 = b3 = NULL;
|
2013-11-19 10:51:46 +00:00
|
|
|
ZONE_LOCK(zone);
|
|
|
|
critical_enter();
|
2018-01-12 23:25:05 +00:00
|
|
|
if (zone->uz_flags & UMA_ZONE_NUMA)
|
|
|
|
domain = PCPU_GET(domain);
|
|
|
|
else
|
|
|
|
domain = 0;
|
2013-11-19 10:51:46 +00:00
|
|
|
cache = &zone->uz_cpu[curcpu];
|
|
|
|
if (cache->uc_allocbucket) {
|
2013-11-23 13:42:56 +00:00
|
|
|
if (cache->uc_allocbucket->ub_cnt != 0)
|
2018-11-13 19:44:40 +00:00
|
|
|
zone_put_bucket(zone, &zone->uz_domain[domain],
|
|
|
|
cache->uc_allocbucket, false);
|
2013-11-23 13:42:56 +00:00
|
|
|
else
|
|
|
|
b1 = cache->uc_allocbucket;
|
2013-11-19 10:51:46 +00:00
|
|
|
cache->uc_allocbucket = NULL;
|
|
|
|
}
|
|
|
|
if (cache->uc_freebucket) {
|
2013-11-23 13:42:56 +00:00
|
|
|
if (cache->uc_freebucket->ub_cnt != 0)
|
2018-11-13 19:44:40 +00:00
|
|
|
zone_put_bucket(zone, &zone->uz_domain[domain],
|
|
|
|
cache->uc_freebucket, false);
|
2013-11-23 13:42:56 +00:00
|
|
|
else
|
|
|
|
b2 = cache->uc_freebucket;
|
2013-11-19 10:51:46 +00:00
|
|
|
cache->uc_freebucket = NULL;
|
|
|
|
}
|
2019-08-06 21:50:34 +00:00
|
|
|
b3 = cache->uc_crossbucket;
|
|
|
|
cache->uc_crossbucket = NULL;
|
2013-11-19 10:51:46 +00:00
|
|
|
critical_exit();
|
|
|
|
ZONE_UNLOCK(zone);
|
2013-11-23 13:42:56 +00:00
|
|
|
if (b1)
|
|
|
|
bucket_free(zone, b1, NULL);
|
|
|
|
if (b2)
|
|
|
|
bucket_free(zone, b2, NULL);
|
2019-08-06 21:50:34 +00:00
|
|
|
if (b3) {
|
|
|
|
bucket_drain(zone, b3);
|
|
|
|
bucket_free(zone, b3, NULL);
|
|
|
|
}
|
2013-11-19 10:51:46 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Safely drain per-CPU caches of a zone(s) to alloc bucket.
|
|
|
|
* This is an expensive call because it needs to bind to all CPUs
|
|
|
|
* one by one and enter a critical section on each of them in order
|
|
|
|
* to safely access their cache buckets.
|
|
|
|
* Zone lock must not be held on call this function.
|
|
|
|
*/
|
|
|
|
static void
|
2019-09-01 22:22:43 +00:00
|
|
|
pcpu_cache_drain_safe(uma_zone_t zone)
|
2013-11-19 10:51:46 +00:00
|
|
|
{
|
|
|
|
int cpu;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Polite bucket sizes shrinking was not enouth, shrink aggressively.
|
|
|
|
*/
|
|
|
|
if (zone)
|
2019-11-28 00:19:09 +00:00
|
|
|
cache_shrink(zone, NULL);
|
2013-11-19 10:51:46 +00:00
|
|
|
else
|
2019-11-28 00:19:09 +00:00
|
|
|
zone_foreach(cache_shrink, NULL);
|
2013-11-19 10:51:46 +00:00
|
|
|
|
|
|
|
CPU_FOREACH(cpu) {
|
|
|
|
thread_lock(curthread);
|
|
|
|
sched_bind(curthread, cpu);
|
|
|
|
thread_unlock(curthread);
|
|
|
|
|
|
|
|
if (zone)
|
2019-11-28 00:19:09 +00:00
|
|
|
cache_drain_safe_cpu(zone, NULL);
|
2013-11-19 10:51:46 +00:00
|
|
|
else
|
2019-11-28 00:19:09 +00:00
|
|
|
zone_foreach(cache_drain_safe_cpu, NULL);
|
2013-11-19 10:51:46 +00:00
|
|
|
}
|
|
|
|
thread_lock(curthread);
|
|
|
|
sched_unbind(curthread);
|
|
|
|
thread_unlock(curthread);
|
|
|
|
}
|
|
|
|
|
2004-02-01 06:15:17 +00:00
|
|
|
/*
|
2019-09-01 22:22:43 +00:00
|
|
|
* Reclaim cached buckets from a zone. All buckets are reclaimed if the caller
|
|
|
|
* requested a drain, otherwise the per-domain caches are trimmed to either
|
|
|
|
* estimated working set size.
|
2004-02-01 06:15:17 +00:00
|
|
|
*/
|
|
|
|
static void
|
2019-09-01 22:22:43 +00:00
|
|
|
bucket_cache_reclaim(uma_zone_t zone, bool drain)
|
2004-02-01 06:15:17 +00:00
|
|
|
{
|
2018-01-12 23:25:05 +00:00
|
|
|
uma_zone_domain_t zdom;
|
2004-02-01 06:15:17 +00:00
|
|
|
uma_bucket_t bucket;
|
2019-09-01 22:22:43 +00:00
|
|
|
long target, tofree;
|
2018-01-12 23:25:05 +00:00
|
|
|
int i;
|
2002-03-19 09:11:49 +00:00
|
|
|
|
2018-01-12 23:25:05 +00:00
|
|
|
for (i = 0; i < vm_ndomains; i++) {
|
|
|
|
zdom = &zone->uz_domain[i];
|
2019-09-01 22:22:43 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If we were asked to drain the zone, we are done only once
|
|
|
|
* this bucket cache is empty. Otherwise, we reclaim items in
|
|
|
|
* excess of the zone's estimated working set size. If the
|
|
|
|
* difference nitems - imin is larger than the WSS estimate,
|
|
|
|
* then the estimate will grow at the end of this interval and
|
|
|
|
* we ignore the historical average.
|
|
|
|
*/
|
|
|
|
target = drain ? 0 : lmax(zdom->uzd_wss, zdom->uzd_nitems -
|
|
|
|
zdom->uzd_imin);
|
|
|
|
while (zdom->uzd_nitems > target) {
|
|
|
|
bucket = TAILQ_LAST(&zdom->uzd_buckets, uma_bucketlist);
|
|
|
|
if (bucket == NULL)
|
|
|
|
break;
|
|
|
|
tofree = bucket->ub_cnt;
|
|
|
|
TAILQ_REMOVE(&zdom->uzd_buckets, bucket, ub_link);
|
|
|
|
zdom->uzd_nitems -= tofree;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Shift the bounds of the current WSS interval to avoid
|
|
|
|
* perturbing the estimate.
|
|
|
|
*/
|
|
|
|
zdom->uzd_imax -= lmin(zdom->uzd_imax, tofree);
|
|
|
|
zdom->uzd_imin -= lmin(zdom->uzd_imin, tofree);
|
|
|
|
|
2018-01-12 23:25:05 +00:00
|
|
|
ZONE_UNLOCK(zone);
|
|
|
|
bucket_drain(zone, bucket);
|
|
|
|
bucket_free(zone, bucket, NULL);
|
|
|
|
ZONE_LOCK(zone);
|
|
|
|
}
|
2002-03-19 09:11:49 +00:00
|
|
|
}
|
2013-11-19 10:05:53 +00:00
|
|
|
|
|
|
|
/*
|
2019-09-01 22:22:43 +00:00
|
|
|
* Shrink the zone bucket size to ensure that the per-CPU caches
|
|
|
|
* don't grow too large.
|
2013-11-19 10:05:53 +00:00
|
|
|
*/
|
2019-11-28 00:19:09 +00:00
|
|
|
if (zone->uz_bucket_size > zone->uz_bucket_size_min)
|
|
|
|
zone->uz_bucket_size--;
|
2013-06-18 04:50:20 +00:00
|
|
|
}
|
2002-03-19 09:11:49 +00:00
|
|
|
|
2013-06-18 04:50:20 +00:00
|
|
|
static void
|
|
|
|
keg_free_slab(uma_keg_t keg, uma_slab_t slab, int start)
|
|
|
|
{
|
|
|
|
uint8_t *mem;
|
|
|
|
int i;
|
|
|
|
uint8_t flags;
|
|
|
|
|
2017-06-01 18:36:52 +00:00
|
|
|
CTR4(KTR_UMA, "keg_free_slab keg %s(%p) slab %p, returning %d bytes",
|
|
|
|
keg->uk_name, keg, slab, PAGE_SIZE * keg->uk_ppera);
|
|
|
|
|
2019-12-08 01:15:06 +00:00
|
|
|
mem = slab_data(slab, keg);
|
2013-06-18 04:50:20 +00:00
|
|
|
flags = slab->us_flags;
|
|
|
|
i = start;
|
|
|
|
if (keg->uk_fini != NULL) {
|
|
|
|
for (i--; i > -1; i--)
|
2018-06-08 00:15:08 +00:00
|
|
|
#ifdef INVARIANTS
|
|
|
|
/*
|
|
|
|
* trash_fini implies that dtor was trash_dtor. trash_fini
|
|
|
|
* would check that memory hasn't been modified since free,
|
|
|
|
* which executed trash_dtor.
|
|
|
|
* That's why we need to run uma_dbg_kskip() check here,
|
|
|
|
* albeit we don't make skip check for other init/fini
|
|
|
|
* invocations.
|
|
|
|
*/
|
2019-12-08 01:15:06 +00:00
|
|
|
if (!uma_dbg_kskip(keg, slab_item(slab, keg, i)) ||
|
2018-06-08 00:15:08 +00:00
|
|
|
keg->uk_fini != trash_fini)
|
|
|
|
#endif
|
2019-12-08 01:15:06 +00:00
|
|
|
keg->uk_fini(slab_item(slab, keg, i), keg->uk_size);
|
2002-03-19 09:11:49 +00:00
|
|
|
}
|
2013-06-18 04:50:20 +00:00
|
|
|
if (keg->uk_flags & UMA_ZONE_OFFPAGE)
|
|
|
|
zone_free_item(keg->uk_slabzone, slab, NULL, SKIP_NONE);
|
|
|
|
keg->uk_freef(mem, PAGE_SIZE * keg->uk_ppera, flags);
|
2017-11-28 23:40:54 +00:00
|
|
|
uma_total_dec(PAGE_SIZE * keg->uk_ppera);
|
2002-03-19 09:11:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2009-01-25 09:11:24 +00:00
|
|
|
* Frees pages from a keg back to the system. This is done on demand from
|
2002-03-19 09:11:49 +00:00
|
|
|
* the pageout daemon.
|
|
|
|
*
|
2009-01-25 09:11:24 +00:00
|
|
|
* Returns nothing.
|
2002-03-19 09:11:49 +00:00
|
|
|
*/
|
2009-01-25 09:11:24 +00:00
|
|
|
static void
|
|
|
|
keg_drain(uma_keg_t keg)
|
2002-03-19 09:11:49 +00:00
|
|
|
{
|
2005-01-10 20:30:04 +00:00
|
|
|
struct slabhead freeslabs = { 0 };
|
2018-01-12 23:25:05 +00:00
|
|
|
uma_domain_t dom;
|
2016-10-20 23:10:27 +00:00
|
|
|
uma_slab_t slab, tmp;
|
2018-01-12 23:25:05 +00:00
|
|
|
int i;
|
2002-03-19 09:11:49 +00:00
|
|
|
|
|
|
|
/*
|
2009-01-25 09:11:24 +00:00
|
|
|
* We don't want to take pages from statically allocated kegs at this
|
2002-03-19 09:11:49 +00:00
|
|
|
* time
|
|
|
|
*/
|
Bring in mbuma to replace mballoc.
mbuma is an Mbuf & Cluster allocator built on top of a number of
extensions to the UMA framework, all included herein.
Extensions to UMA worth noting:
- Better layering between slab <-> zone caches; introduce
Keg structure which splits off slab cache away from the
zone structure and allows multiple zones to be stacked
on top of a single Keg (single type of slab cache);
perhaps we should look into defining a subset API on
top of the Keg for special use by malloc(9),
for example.
- UMA_ZONE_REFCNT zones can now be added, and reference
counters automagically allocated for them within the end
of the associated slab structures. uma_find_refcnt()
does a kextract to fetch the slab struct reference from
the underlying page, and lookup the corresponding refcnt.
mbuma things worth noting:
- integrates mbuf & cluster allocations with extended UMA
and provides caches for commonly-allocated items; defines
several zones (two primary, one secondary) and two kegs.
- change up certain code paths that always used to do:
m_get() + m_clget() to instead just use m_getcl() and
try to take advantage of the newly defined secondary
Packet zone.
- netstat(1) and systat(1) quickly hacked up to do basic
stat reporting but additional stats work needs to be
done once some other details within UMA have been taken
care of and it becomes clearer to how stats will work
within the modified framework.
From the user perspective, one implication is that the
NMBCLUSTERS compile-time option is no longer used. The
maximum number of clusters is still capped off according
to maxusers, but it can be made unlimited by setting
the kern.ipc.nmbclusters boot-time tunable to zero.
Work should be done to write an appropriate sysctl
handler allowing dynamic tuning of kern.ipc.nmbclusters
at runtime.
Additional things worth noting/known issues (READ):
- One report of 'ips' (ServeRAID) driver acting really
slow in conjunction with mbuma. Need more data.
Latest report is that ips is equally sucking with
and without mbuma.
- Giant leak in NFS code sometimes occurs, can't
reproduce but currently analyzing; brueffer is
able to reproduce but THIS IS NOT an mbuma-specific
problem and currently occurs even WITHOUT mbuma.
- Issues in network locking: there is at least one
code path in the rip code where one or more locks
are acquired and we end up in m_prepend() with
M_WAITOK, which causes WITNESS to whine from within
UMA. Current temporary solution: force all UMA
allocations to be M_NOWAIT from within UMA for now
to avoid deadlocks unless WITNESS is defined and we
can determine with certainty that we're not holding
any locks when we're M_WAITOK.
- I've seen at least one weird socketbuffer empty-but-
mbuf-still-attached panic. I don't believe this
to be related to mbuma but please keep your eyes
open, turn on debugging, and capture crash dumps.
This change removes more code than it adds.
A paper is available detailing the change and considering
various performance issues, it was presented at BSDCan2004:
http://www.unixdaemons.com/~bmilekic/netbuf_bmilekic.pdf
Please read the paper for Future Work and implementation
details, as well as credits.
Testing and Debugging:
rwatson,
brueffer,
Ketrien I. Saihr-Kesenchedra,
...
Reviewed by: Lots of people (for different parts)
2004-05-31 21:46:06 +00:00
|
|
|
if (keg->uk_flags & UMA_ZONE_NOFREE || keg->uk_freef == NULL)
|
2002-03-19 09:11:49 +00:00
|
|
|
return;
|
|
|
|
|
2017-06-01 18:36:52 +00:00
|
|
|
CTR3(KTR_UMA, "keg_drain %s(%p) free items: %u",
|
|
|
|
keg->uk_name, keg, keg->uk_free);
|
2009-01-25 09:11:24 +00:00
|
|
|
KEG_LOCK(keg);
|
Bring in mbuma to replace mballoc.
mbuma is an Mbuf & Cluster allocator built on top of a number of
extensions to the UMA framework, all included herein.
Extensions to UMA worth noting:
- Better layering between slab <-> zone caches; introduce
Keg structure which splits off slab cache away from the
zone structure and allows multiple zones to be stacked
on top of a single Keg (single type of slab cache);
perhaps we should look into defining a subset API on
top of the Keg for special use by malloc(9),
for example.
- UMA_ZONE_REFCNT zones can now be added, and reference
counters automagically allocated for them within the end
of the associated slab structures. uma_find_refcnt()
does a kextract to fetch the slab struct reference from
the underlying page, and lookup the corresponding refcnt.
mbuma things worth noting:
- integrates mbuf & cluster allocations with extended UMA
and provides caches for commonly-allocated items; defines
several zones (two primary, one secondary) and two kegs.
- change up certain code paths that always used to do:
m_get() + m_clget() to instead just use m_getcl() and
try to take advantage of the newly defined secondary
Packet zone.
- netstat(1) and systat(1) quickly hacked up to do basic
stat reporting but additional stats work needs to be
done once some other details within UMA have been taken
care of and it becomes clearer to how stats will work
within the modified framework.
From the user perspective, one implication is that the
NMBCLUSTERS compile-time option is no longer used. The
maximum number of clusters is still capped off according
to maxusers, but it can be made unlimited by setting
the kern.ipc.nmbclusters boot-time tunable to zero.
Work should be done to write an appropriate sysctl
handler allowing dynamic tuning of kern.ipc.nmbclusters
at runtime.
Additional things worth noting/known issues (READ):
- One report of 'ips' (ServeRAID) driver acting really
slow in conjunction with mbuma. Need more data.
Latest report is that ips is equally sucking with
and without mbuma.
- Giant leak in NFS code sometimes occurs, can't
reproduce but currently analyzing; brueffer is
able to reproduce but THIS IS NOT an mbuma-specific
problem and currently occurs even WITHOUT mbuma.
- Issues in network locking: there is at least one
code path in the rip code where one or more locks
are acquired and we end up in m_prepend() with
M_WAITOK, which causes WITNESS to whine from within
UMA. Current temporary solution: force all UMA
allocations to be M_NOWAIT from within UMA for now
to avoid deadlocks unless WITNESS is defined and we
can determine with certainty that we're not holding
any locks when we're M_WAITOK.
- I've seen at least one weird socketbuffer empty-but-
mbuf-still-attached panic. I don't believe this
to be related to mbuma but please keep your eyes
open, turn on debugging, and capture crash dumps.
This change removes more code than it adds.
A paper is available detailing the change and considering
various performance issues, it was presented at BSDCan2004:
http://www.unixdaemons.com/~bmilekic/netbuf_bmilekic.pdf
Please read the paper for Future Work and implementation
details, as well as credits.
Testing and Debugging:
rwatson,
brueffer,
Ketrien I. Saihr-Kesenchedra,
...
Reviewed by: Lots of people (for different parts)
2004-05-31 21:46:06 +00:00
|
|
|
if (keg->uk_free == 0)
|
2002-03-19 09:11:49 +00:00
|
|
|
goto finished;
|
|
|
|
|
2018-01-12 23:25:05 +00:00
|
|
|
for (i = 0; i < vm_ndomains; i++) {
|
|
|
|
dom = &keg->uk_domain[i];
|
|
|
|
LIST_FOREACH_SAFE(slab, &dom->ud_free_slab, us_link, tmp) {
|
|
|
|
/* We have nowhere to free these to. */
|
|
|
|
if (slab->us_flags & UMA_SLAB_BOOT)
|
|
|
|
continue;
|
2002-03-19 09:11:49 +00:00
|
|
|
|
2018-01-12 23:25:05 +00:00
|
|
|
LIST_REMOVE(slab, us_link);
|
|
|
|
keg->uk_pages -= keg->uk_ppera;
|
|
|
|
keg->uk_free -= keg->uk_ipers;
|
2002-05-13 05:08:18 +00:00
|
|
|
|
2018-01-12 23:25:05 +00:00
|
|
|
if (keg->uk_flags & UMA_ZONE_HASH)
|
2019-12-08 01:15:06 +00:00
|
|
|
UMA_HASH_REMOVE(&keg->uk_hash, slab);
|
2002-05-13 05:08:18 +00:00
|
|
|
|
2019-12-08 01:15:06 +00:00
|
|
|
LIST_INSERT_HEAD(&freeslabs, slab, us_link);
|
2018-01-12 23:25:05 +00:00
|
|
|
}
|
2002-05-13 05:08:18 +00:00
|
|
|
}
|
2018-01-12 23:25:05 +00:00
|
|
|
|
2002-05-13 05:08:18 +00:00
|
|
|
finished:
|
2009-01-25 09:11:24 +00:00
|
|
|
KEG_UNLOCK(keg);
|
2002-05-13 05:08:18 +00:00
|
|
|
|
2019-12-08 01:15:06 +00:00
|
|
|
while ((slab = LIST_FIRST(&freeslabs)) != NULL) {
|
|
|
|
LIST_REMOVE(slab, us_link);
|
2013-08-31 15:40:15 +00:00
|
|
|
keg_free_slab(keg, slab, keg->uk_ipers);
|
2002-03-19 09:11:49 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-01-25 09:11:24 +00:00
|
|
|
static void
|
2019-09-01 22:22:43 +00:00
|
|
|
zone_reclaim(uma_zone_t zone, int waitok, bool drain)
|
2009-01-25 09:11:24 +00:00
|
|
|
{
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Set draining to interlock with zone_dtor() so we can release our
|
|
|
|
* locks as we go. Only dtor() should do a WAITOK call since it
|
|
|
|
* is the only call that knows the structure will still be available
|
|
|
|
* when it wakes up.
|
|
|
|
*/
|
|
|
|
ZONE_LOCK(zone);
|
2019-09-01 22:22:43 +00:00
|
|
|
while (zone->uz_flags & UMA_ZFLAG_RECLAIMING) {
|
2009-01-25 09:11:24 +00:00
|
|
|
if (waitok == M_NOWAIT)
|
|
|
|
goto out;
|
2013-06-20 19:08:12 +00:00
|
|
|
msleep(zone, zone->uz_lockptr, PVM, "zonedrain", 1);
|
2009-01-25 09:11:24 +00:00
|
|
|
}
|
2019-09-01 22:22:43 +00:00
|
|
|
zone->uz_flags |= UMA_ZFLAG_RECLAIMING;
|
|
|
|
bucket_cache_reclaim(zone, drain);
|
2009-01-25 09:11:24 +00:00
|
|
|
ZONE_UNLOCK(zone);
|
2019-09-01 22:22:43 +00:00
|
|
|
|
2009-01-25 09:11:24 +00:00
|
|
|
/*
|
|
|
|
* The DRAINING flag protects us from being freed while
|
2014-10-05 21:34:56 +00:00
|
|
|
* we're running. Normally the uma_rwlock would protect us but we
|
2009-01-25 09:11:24 +00:00
|
|
|
* must be able to release and acquire the right lock for each keg.
|
|
|
|
*/
|
2019-11-10 09:25:19 +00:00
|
|
|
if ((zone->uz_flags & UMA_ZFLAG_CACHE) == 0)
|
|
|
|
keg_drain(zone->uz_keg);
|
2009-01-25 09:11:24 +00:00
|
|
|
ZONE_LOCK(zone);
|
2019-09-01 22:22:43 +00:00
|
|
|
zone->uz_flags &= ~UMA_ZFLAG_RECLAIMING;
|
2009-01-25 09:11:24 +00:00
|
|
|
wakeup(zone);
|
|
|
|
out:
|
|
|
|
ZONE_UNLOCK(zone);
|
|
|
|
}
|
|
|
|
|
2019-09-01 22:22:43 +00:00
|
|
|
static void
|
2019-11-28 00:19:09 +00:00
|
|
|
zone_drain(uma_zone_t zone, void *unused)
|
2009-01-25 09:11:24 +00:00
|
|
|
{
|
|
|
|
|
2019-09-01 22:22:43 +00:00
|
|
|
zone_reclaim(zone, M_NOWAIT, true);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2019-11-28 00:19:09 +00:00
|
|
|
zone_trim(uma_zone_t zone, void *unused)
|
2019-09-01 22:22:43 +00:00
|
|
|
{
|
|
|
|
|
|
|
|
zone_reclaim(zone, M_NOWAIT, false);
|
2009-01-25 09:11:24 +00:00
|
|
|
}
|
|
|
|
|
2002-03-19 09:11:49 +00:00
|
|
|
/*
|
2009-01-25 09:11:24 +00:00
|
|
|
* Allocate a new slab for a keg. This does not insert the slab onto a list.
|
2018-10-24 16:41:47 +00:00
|
|
|
* If the allocation was successful, the keg lock will be held upon return,
|
|
|
|
* otherwise the keg will be left unlocked.
|
2002-03-19 09:11:49 +00:00
|
|
|
*
|
|
|
|
* Arguments:
|
2019-01-23 18:58:15 +00:00
|
|
|
* flags Wait flags for the item initialization routine
|
|
|
|
* aflags Wait flags for the slab allocation
|
2002-03-19 09:11:49 +00:00
|
|
|
*
|
|
|
|
* Returns:
|
|
|
|
* The slab that was allocated or NULL if there is no memory and the
|
|
|
|
* caller specified M_NOWAIT.
|
|
|
|
*/
|
2004-01-30 16:26:29 +00:00
|
|
|
static uma_slab_t
|
2019-01-23 18:58:15 +00:00
|
|
|
keg_alloc_slab(uma_keg_t keg, uma_zone_t zone, int domain, int flags,
|
|
|
|
int aflags)
|
2002-03-19 09:11:49 +00:00
|
|
|
{
|
2009-01-25 09:11:24 +00:00
|
|
|
uma_alloc allocf;
|
Bring in mbuma to replace mballoc.
mbuma is an Mbuf & Cluster allocator built on top of a number of
extensions to the UMA framework, all included herein.
Extensions to UMA worth noting:
- Better layering between slab <-> zone caches; introduce
Keg structure which splits off slab cache away from the
zone structure and allows multiple zones to be stacked
on top of a single Keg (single type of slab cache);
perhaps we should look into defining a subset API on
top of the Keg for special use by malloc(9),
for example.
- UMA_ZONE_REFCNT zones can now be added, and reference
counters automagically allocated for them within the end
of the associated slab structures. uma_find_refcnt()
does a kextract to fetch the slab struct reference from
the underlying page, and lookup the corresponding refcnt.
mbuma things worth noting:
- integrates mbuf & cluster allocations with extended UMA
and provides caches for commonly-allocated items; defines
several zones (two primary, one secondary) and two kegs.
- change up certain code paths that always used to do:
m_get() + m_clget() to instead just use m_getcl() and
try to take advantage of the newly defined secondary
Packet zone.
- netstat(1) and systat(1) quickly hacked up to do basic
stat reporting but additional stats work needs to be
done once some other details within UMA have been taken
care of and it becomes clearer to how stats will work
within the modified framework.
From the user perspective, one implication is that the
NMBCLUSTERS compile-time option is no longer used. The
maximum number of clusters is still capped off according
to maxusers, but it can be made unlimited by setting
the kern.ipc.nmbclusters boot-time tunable to zero.
Work should be done to write an appropriate sysctl
handler allowing dynamic tuning of kern.ipc.nmbclusters
at runtime.
Additional things worth noting/known issues (READ):
- One report of 'ips' (ServeRAID) driver acting really
slow in conjunction with mbuma. Need more data.
Latest report is that ips is equally sucking with
and without mbuma.
- Giant leak in NFS code sometimes occurs, can't
reproduce but currently analyzing; brueffer is
able to reproduce but THIS IS NOT an mbuma-specific
problem and currently occurs even WITHOUT mbuma.
- Issues in network locking: there is at least one
code path in the rip code where one or more locks
are acquired and we end up in m_prepend() with
M_WAITOK, which causes WITNESS to whine from within
UMA. Current temporary solution: force all UMA
allocations to be M_NOWAIT from within UMA for now
to avoid deadlocks unless WITNESS is defined and we
can determine with certainty that we're not holding
any locks when we're M_WAITOK.
- I've seen at least one weird socketbuffer empty-but-
mbuf-still-attached panic. I don't believe this
to be related to mbuma but please keep your eyes
open, turn on debugging, and capture crash dumps.
This change removes more code than it adds.
A paper is available detailing the change and considering
various performance issues, it was presented at BSDCan2004:
http://www.unixdaemons.com/~bmilekic/netbuf_bmilekic.pdf
Please read the paper for Future Work and implementation
details, as well as credits.
Testing and Debugging:
rwatson,
brueffer,
Ketrien I. Saihr-Kesenchedra,
...
Reviewed by: Lots of people (for different parts)
2004-05-31 21:46:06 +00:00
|
|
|
uma_slab_t slab;
|
2017-11-28 23:40:54 +00:00
|
|
|
unsigned long size;
|
2013-04-09 17:43:48 +00:00
|
|
|
uint8_t *mem;
|
2019-01-23 18:58:15 +00:00
|
|
|
uint8_t sflags;
|
2002-03-19 09:11:49 +00:00
|
|
|
int i;
|
|
|
|
|
2018-01-12 23:25:05 +00:00
|
|
|
KASSERT(domain >= 0 && domain < vm_ndomains,
|
|
|
|
("keg_alloc_slab: domain %d out of range", domain));
|
o Move zone limit from keg level up to zone level. This means that now
two zones sharing a keg may have different limits. Now this is going
to work:
zone = uma_zcreate();
uma_zone_set_max(zone, limit);
zone2 = uma_zsecond_create(zone);
uma_zone_set_max(zone2, limit2);
Kegs no longer have uk_maxpages field, but zones have uz_items. When
set, it may be rounded up to minimum possible CPU bucket cache size.
For small limits bucket cache can also be reconfigured to be smaller.
Counter uz_items is updated whenever items transition from keg to a
bucket cache or directly to a consumer. If zone has uz_maxitems set and
it is reached, then we are going to sleep.
o Since new limits don't play well with multi-keg zones, remove them. The
idea of multi-keg zones was introduced exactly 10 years ago, and never
have had a practical usage. In discussion with Jeff we came to a wild
agreement that if we ever want to reintroduce the idea of a smart allocator
that would be able to choose between two (or more) totally different
backing stores, that choice should be made one level higher than UMA,
e.g. in malloc(9) or in mget(), or whatever and choice should be controlled
by the caller.
o Sleeping code is improved to account number of sleepers and wake them one
by one, to avoid thundering herd problem.
o Flag UMA_ZONE_NOBUCKETCACHE removed, instead uma_zone_set_maxcache()
KPI added. Having no bucket cache basically means setting maxcache to 0.
o Now with many fields added and many removed (no multi-keg zones!) make
sure that struct uma_zone is perfectly aligned.
Reviewed by: markj, jeff
Tested by: pho
Differential Revision: https://reviews.freebsd.org/D17773
2019-01-15 00:02:06 +00:00
|
|
|
KEG_LOCK_ASSERT(keg);
|
|
|
|
MPASS(zone->uz_lockptr == &keg->uk_lock);
|
2002-04-08 02:42:55 +00:00
|
|
|
|
2009-01-25 09:11:24 +00:00
|
|
|
allocf = keg->uk_allocf;
|
|
|
|
KEG_UNLOCK(keg);
|
2002-04-08 02:42:55 +00:00
|
|
|
|
2018-10-24 16:41:47 +00:00
|
|
|
slab = NULL;
|
|
|
|
mem = NULL;
|
Bring in mbuma to replace mballoc.
mbuma is an Mbuf & Cluster allocator built on top of a number of
extensions to the UMA framework, all included herein.
Extensions to UMA worth noting:
- Better layering between slab <-> zone caches; introduce
Keg structure which splits off slab cache away from the
zone structure and allows multiple zones to be stacked
on top of a single Keg (single type of slab cache);
perhaps we should look into defining a subset API on
top of the Keg for special use by malloc(9),
for example.
- UMA_ZONE_REFCNT zones can now be added, and reference
counters automagically allocated for them within the end
of the associated slab structures. uma_find_refcnt()
does a kextract to fetch the slab struct reference from
the underlying page, and lookup the corresponding refcnt.
mbuma things worth noting:
- integrates mbuf & cluster allocations with extended UMA
and provides caches for commonly-allocated items; defines
several zones (two primary, one secondary) and two kegs.
- change up certain code paths that always used to do:
m_get() + m_clget() to instead just use m_getcl() and
try to take advantage of the newly defined secondary
Packet zone.
- netstat(1) and systat(1) quickly hacked up to do basic
stat reporting but additional stats work needs to be
done once some other details within UMA have been taken
care of and it becomes clearer to how stats will work
within the modified framework.
From the user perspective, one implication is that the
NMBCLUSTERS compile-time option is no longer used. The
maximum number of clusters is still capped off according
to maxusers, but it can be made unlimited by setting
the kern.ipc.nmbclusters boot-time tunable to zero.
Work should be done to write an appropriate sysctl
handler allowing dynamic tuning of kern.ipc.nmbclusters
at runtime.
Additional things worth noting/known issues (READ):
- One report of 'ips' (ServeRAID) driver acting really
slow in conjunction with mbuma. Need more data.
Latest report is that ips is equally sucking with
and without mbuma.
- Giant leak in NFS code sometimes occurs, can't
reproduce but currently analyzing; brueffer is
able to reproduce but THIS IS NOT an mbuma-specific
problem and currently occurs even WITHOUT mbuma.
- Issues in network locking: there is at least one
code path in the rip code where one or more locks
are acquired and we end up in m_prepend() with
M_WAITOK, which causes WITNESS to whine from within
UMA. Current temporary solution: force all UMA
allocations to be M_NOWAIT from within UMA for now
to avoid deadlocks unless WITNESS is defined and we
can determine with certainty that we're not holding
any locks when we're M_WAITOK.
- I've seen at least one weird socketbuffer empty-but-
mbuf-still-attached panic. I don't believe this
to be related to mbuma but please keep your eyes
open, turn on debugging, and capture crash dumps.
This change removes more code than it adds.
A paper is available detailing the change and considering
various performance issues, it was presented at BSDCan2004:
http://www.unixdaemons.com/~bmilekic/netbuf_bmilekic.pdf
Please read the paper for Future Work and implementation
details, as well as credits.
Testing and Debugging:
rwatson,
brueffer,
Ketrien I. Saihr-Kesenchedra,
...
Reviewed by: Lots of people (for different parts)
2004-05-31 21:46:06 +00:00
|
|
|
if (keg->uk_flags & UMA_ZONE_OFFPAGE) {
|
2019-01-23 18:58:15 +00:00
|
|
|
slab = zone_alloc_item(keg->uk_slabzone, NULL, domain, aflags);
|
2013-06-18 04:50:20 +00:00
|
|
|
if (slab == NULL)
|
|
|
|
goto out;
|
2002-04-08 02:42:55 +00:00
|
|
|
}
|
|
|
|
|
2002-06-19 20:49:44 +00:00
|
|
|
/*
|
|
|
|
* This reproduces the old vm_zone behavior of zero filling pages the
|
|
|
|
* first time they are added to a zone.
|
|
|
|
*
|
|
|
|
* Malloced items are zeroed in uma_zalloc.
|
|
|
|
*/
|
|
|
|
|
Bring in mbuma to replace mballoc.
mbuma is an Mbuf & Cluster allocator built on top of a number of
extensions to the UMA framework, all included herein.
Extensions to UMA worth noting:
- Better layering between slab <-> zone caches; introduce
Keg structure which splits off slab cache away from the
zone structure and allows multiple zones to be stacked
on top of a single Keg (single type of slab cache);
perhaps we should look into defining a subset API on
top of the Keg for special use by malloc(9),
for example.
- UMA_ZONE_REFCNT zones can now be added, and reference
counters automagically allocated for them within the end
of the associated slab structures. uma_find_refcnt()
does a kextract to fetch the slab struct reference from
the underlying page, and lookup the corresponding refcnt.
mbuma things worth noting:
- integrates mbuf & cluster allocations with extended UMA
and provides caches for commonly-allocated items; defines
several zones (two primary, one secondary) and two kegs.
- change up certain code paths that always used to do:
m_get() + m_clget() to instead just use m_getcl() and
try to take advantage of the newly defined secondary
Packet zone.
- netstat(1) and systat(1) quickly hacked up to do basic
stat reporting but additional stats work needs to be
done once some other details within UMA have been taken
care of and it becomes clearer to how stats will work
within the modified framework.
From the user perspective, one implication is that the
NMBCLUSTERS compile-time option is no longer used. The
maximum number of clusters is still capped off according
to maxusers, but it can be made unlimited by setting
the kern.ipc.nmbclusters boot-time tunable to zero.
Work should be done to write an appropriate sysctl
handler allowing dynamic tuning of kern.ipc.nmbclusters
at runtime.
Additional things worth noting/known issues (READ):
- One report of 'ips' (ServeRAID) driver acting really
slow in conjunction with mbuma. Need more data.
Latest report is that ips is equally sucking with
and without mbuma.
- Giant leak in NFS code sometimes occurs, can't
reproduce but currently analyzing; brueffer is
able to reproduce but THIS IS NOT an mbuma-specific
problem and currently occurs even WITHOUT mbuma.
- Issues in network locking: there is at least one
code path in the rip code where one or more locks
are acquired and we end up in m_prepend() with
M_WAITOK, which causes WITNESS to whine from within
UMA. Current temporary solution: force all UMA
allocations to be M_NOWAIT from within UMA for now
to avoid deadlocks unless WITNESS is defined and we
can determine with certainty that we're not holding
any locks when we're M_WAITOK.
- I've seen at least one weird socketbuffer empty-but-
mbuf-still-attached panic. I don't believe this
to be related to mbuma but please keep your eyes
open, turn on debugging, and capture crash dumps.
This change removes more code than it adds.
A paper is available detailing the change and considering
various performance issues, it was presented at BSDCan2004:
http://www.unixdaemons.com/~bmilekic/netbuf_bmilekic.pdf
Please read the paper for Future Work and implementation
details, as well as credits.
Testing and Debugging:
rwatson,
brueffer,
Ketrien I. Saihr-Kesenchedra,
...
Reviewed by: Lots of people (for different parts)
2004-05-31 21:46:06 +00:00
|
|
|
if ((keg->uk_flags & UMA_ZONE_MALLOC) == 0)
|
2019-01-23 18:58:15 +00:00
|
|
|
aflags |= M_ZERO;
|
2002-06-19 20:49:44 +00:00
|
|
|
else
|
2019-01-23 18:58:15 +00:00
|
|
|
aflags &= ~M_ZERO;
|
2002-06-19 20:49:44 +00:00
|
|
|
|
2012-01-27 20:18:31 +00:00
|
|
|
if (keg->uk_flags & UMA_ZONE_NODUMP)
|
2019-01-23 18:58:15 +00:00
|
|
|
aflags |= M_NODUMP;
|
2012-01-27 20:18:31 +00:00
|
|
|
|
2009-01-25 09:11:24 +00:00
|
|
|
/* zone is passed for legacy reasons. */
|
2018-10-24 16:41:47 +00:00
|
|
|
size = keg->uk_ppera * PAGE_SIZE;
|
2019-01-23 18:58:15 +00:00
|
|
|
mem = allocf(zone, size, domain, &sflags, aflags);
|
2003-09-19 08:53:33 +00:00
|
|
|
if (mem == NULL) {
|
2004-08-02 00:18:36 +00:00
|
|
|
if (keg->uk_flags & UMA_ZONE_OFFPAGE)
|
2013-06-17 03:43:47 +00:00
|
|
|
zone_free_item(keg->uk_slabzone, slab, NULL, SKIP_NONE);
|
2013-06-18 04:50:20 +00:00
|
|
|
slab = NULL;
|
|
|
|
goto out;
|
2002-03-19 09:11:49 +00:00
|
|
|
}
|
2017-11-28 23:40:54 +00:00
|
|
|
uma_total_inc(size);
|
2002-03-19 09:11:49 +00:00
|
|
|
|
2002-06-25 21:04:50 +00:00
|
|
|
/* Point the slab into the allocated memory */
|
Bring in mbuma to replace mballoc.
mbuma is an Mbuf & Cluster allocator built on top of a number of
extensions to the UMA framework, all included herein.
Extensions to UMA worth noting:
- Better layering between slab <-> zone caches; introduce
Keg structure which splits off slab cache away from the
zone structure and allows multiple zones to be stacked
on top of a single Keg (single type of slab cache);
perhaps we should look into defining a subset API on
top of the Keg for special use by malloc(9),
for example.
- UMA_ZONE_REFCNT zones can now be added, and reference
counters automagically allocated for them within the end
of the associated slab structures. uma_find_refcnt()
does a kextract to fetch the slab struct reference from
the underlying page, and lookup the corresponding refcnt.
mbuma things worth noting:
- integrates mbuf & cluster allocations with extended UMA
and provides caches for commonly-allocated items; defines
several zones (two primary, one secondary) and two kegs.
- change up certain code paths that always used to do:
m_get() + m_clget() to instead just use m_getcl() and
try to take advantage of the newly defined secondary
Packet zone.
- netstat(1) and systat(1) quickly hacked up to do basic
stat reporting but additional stats work needs to be
done once some other details within UMA have been taken
care of and it becomes clearer to how stats will work
within the modified framework.
From the user perspective, one implication is that the
NMBCLUSTERS compile-time option is no longer used. The
maximum number of clusters is still capped off according
to maxusers, but it can be made unlimited by setting
the kern.ipc.nmbclusters boot-time tunable to zero.
Work should be done to write an appropriate sysctl
handler allowing dynamic tuning of kern.ipc.nmbclusters
at runtime.
Additional things worth noting/known issues (READ):
- One report of 'ips' (ServeRAID) driver acting really
slow in conjunction with mbuma. Need more data.
Latest report is that ips is equally sucking with
and without mbuma.
- Giant leak in NFS code sometimes occurs, can't
reproduce but currently analyzing; brueffer is
able to reproduce but THIS IS NOT an mbuma-specific
problem and currently occurs even WITHOUT mbuma.
- Issues in network locking: there is at least one
code path in the rip code where one or more locks
are acquired and we end up in m_prepend() with
M_WAITOK, which causes WITNESS to whine from within
UMA. Current temporary solution: force all UMA
allocations to be M_NOWAIT from within UMA for now
to avoid deadlocks unless WITNESS is defined and we
can determine with certainty that we're not holding
any locks when we're M_WAITOK.
- I've seen at least one weird socketbuffer empty-but-
mbuf-still-attached panic. I don't believe this
to be related to mbuma but please keep your eyes
open, turn on debugging, and capture crash dumps.
This change removes more code than it adds.
A paper is available detailing the change and considering
various performance issues, it was presented at BSDCan2004:
http://www.unixdaemons.com/~bmilekic/netbuf_bmilekic.pdf
Please read the paper for Future Work and implementation
details, as well as credits.
Testing and Debugging:
rwatson,
brueffer,
Ketrien I. Saihr-Kesenchedra,
...
Reviewed by: Lots of people (for different parts)
2004-05-31 21:46:06 +00:00
|
|
|
if (!(keg->uk_flags & UMA_ZONE_OFFPAGE))
|
|
|
|
slab = (uma_slab_t )(mem + keg->uk_pgoff);
|
2019-12-08 01:15:06 +00:00
|
|
|
else
|
|
|
|
((uma_hash_slab_t)slab)->uhs_data = mem;
|
2002-06-25 21:04:50 +00:00
|
|
|
|
2009-01-25 09:11:24 +00:00
|
|
|
if (keg->uk_flags & UMA_ZONE_VTOSLAB)
|
Bring in mbuma to replace mballoc.
mbuma is an Mbuf & Cluster allocator built on top of a number of
extensions to the UMA framework, all included herein.
Extensions to UMA worth noting:
- Better layering between slab <-> zone caches; introduce
Keg structure which splits off slab cache away from the
zone structure and allows multiple zones to be stacked
on top of a single Keg (single type of slab cache);
perhaps we should look into defining a subset API on
top of the Keg for special use by malloc(9),
for example.
- UMA_ZONE_REFCNT zones can now be added, and reference
counters automagically allocated for them within the end
of the associated slab structures. uma_find_refcnt()
does a kextract to fetch the slab struct reference from
the underlying page, and lookup the corresponding refcnt.
mbuma things worth noting:
- integrates mbuf & cluster allocations with extended UMA
and provides caches for commonly-allocated items; defines
several zones (two primary, one secondary) and two kegs.
- change up certain code paths that always used to do:
m_get() + m_clget() to instead just use m_getcl() and
try to take advantage of the newly defined secondary
Packet zone.
- netstat(1) and systat(1) quickly hacked up to do basic
stat reporting but additional stats work needs to be
done once some other details within UMA have been taken
care of and it becomes clearer to how stats will work
within the modified framework.
From the user perspective, one implication is that the
NMBCLUSTERS compile-time option is no longer used. The
maximum number of clusters is still capped off according
to maxusers, but it can be made unlimited by setting
the kern.ipc.nmbclusters boot-time tunable to zero.
Work should be done to write an appropriate sysctl
handler allowing dynamic tuning of kern.ipc.nmbclusters
at runtime.
Additional things worth noting/known issues (READ):
- One report of 'ips' (ServeRAID) driver acting really
slow in conjunction with mbuma. Need more data.
Latest report is that ips is equally sucking with
and without mbuma.
- Giant leak in NFS code sometimes occurs, can't
reproduce but currently analyzing; brueffer is
able to reproduce but THIS IS NOT an mbuma-specific
problem and currently occurs even WITHOUT mbuma.
- Issues in network locking: there is at least one
code path in the rip code where one or more locks
are acquired and we end up in m_prepend() with
M_WAITOK, which causes WITNESS to whine from within
UMA. Current temporary solution: force all UMA
allocations to be M_NOWAIT from within UMA for now
to avoid deadlocks unless WITNESS is defined and we
can determine with certainty that we're not holding
any locks when we're M_WAITOK.
- I've seen at least one weird socketbuffer empty-but-
mbuf-still-attached panic. I don't believe this
to be related to mbuma but please keep your eyes
open, turn on debugging, and capture crash dumps.
This change removes more code than it adds.
A paper is available detailing the change and considering
various performance issues, it was presented at BSDCan2004:
http://www.unixdaemons.com/~bmilekic/netbuf_bmilekic.pdf
Please read the paper for Future Work and implementation
details, as well as credits.
Testing and Debugging:
rwatson,
brueffer,
Ketrien I. Saihr-Kesenchedra,
...
Reviewed by: Lots of people (for different parts)
2004-05-31 21:46:06 +00:00
|
|
|
for (i = 0; i < keg->uk_ppera; i++)
|
2019-11-28 07:49:25 +00:00
|
|
|
vsetzoneslab((vm_offset_t)mem + (i * PAGE_SIZE),
|
|
|
|
zone, slab);
|
2002-03-19 09:11:49 +00:00
|
|
|
|
Bring in mbuma to replace mballoc.
mbuma is an Mbuf & Cluster allocator built on top of a number of
extensions to the UMA framework, all included herein.
Extensions to UMA worth noting:
- Better layering between slab <-> zone caches; introduce
Keg structure which splits off slab cache away from the
zone structure and allows multiple zones to be stacked
on top of a single Keg (single type of slab cache);
perhaps we should look into defining a subset API on
top of the Keg for special use by malloc(9),
for example.
- UMA_ZONE_REFCNT zones can now be added, and reference
counters automagically allocated for them within the end
of the associated slab structures. uma_find_refcnt()
does a kextract to fetch the slab struct reference from
the underlying page, and lookup the corresponding refcnt.
mbuma things worth noting:
- integrates mbuf & cluster allocations with extended UMA
and provides caches for commonly-allocated items; defines
several zones (two primary, one secondary) and two kegs.
- change up certain code paths that always used to do:
m_get() + m_clget() to instead just use m_getcl() and
try to take advantage of the newly defined secondary
Packet zone.
- netstat(1) and systat(1) quickly hacked up to do basic
stat reporting but additional stats work needs to be
done once some other details within UMA have been taken
care of and it becomes clearer to how stats will work
within the modified framework.
From the user perspective, one implication is that the
NMBCLUSTERS compile-time option is no longer used. The
maximum number of clusters is still capped off according
to maxusers, but it can be made unlimited by setting
the kern.ipc.nmbclusters boot-time tunable to zero.
Work should be done to write an appropriate sysctl
handler allowing dynamic tuning of kern.ipc.nmbclusters
at runtime.
Additional things worth noting/known issues (READ):
- One report of 'ips' (ServeRAID) driver acting really
slow in conjunction with mbuma. Need more data.
Latest report is that ips is equally sucking with
and without mbuma.
- Giant leak in NFS code sometimes occurs, can't
reproduce but currently analyzing; brueffer is
able to reproduce but THIS IS NOT an mbuma-specific
problem and currently occurs even WITHOUT mbuma.
- Issues in network locking: there is at least one
code path in the rip code where one or more locks
are acquired and we end up in m_prepend() with
M_WAITOK, which causes WITNESS to whine from within
UMA. Current temporary solution: force all UMA
allocations to be M_NOWAIT from within UMA for now
to avoid deadlocks unless WITNESS is defined and we
can determine with certainty that we're not holding
any locks when we're M_WAITOK.
- I've seen at least one weird socketbuffer empty-but-
mbuf-still-attached panic. I don't believe this
to be related to mbuma but please keep your eyes
open, turn on debugging, and capture crash dumps.
This change removes more code than it adds.
A paper is available detailing the change and considering
various performance issues, it was presented at BSDCan2004:
http://www.unixdaemons.com/~bmilekic/netbuf_bmilekic.pdf
Please read the paper for Future Work and implementation
details, as well as credits.
Testing and Debugging:
rwatson,
brueffer,
Ketrien I. Saihr-Kesenchedra,
...
Reviewed by: Lots of people (for different parts)
2004-05-31 21:46:06 +00:00
|
|
|
slab->us_freecount = keg->uk_ipers;
|
2019-01-23 18:58:15 +00:00
|
|
|
slab->us_flags = sflags;
|
2018-01-12 23:25:05 +00:00
|
|
|
slab->us_domain = domain;
|
2019-12-02 22:44:34 +00:00
|
|
|
BIT_FILL(keg->uk_ipers, &slab->us_free);
|
2013-06-13 21:05:38 +00:00
|
|
|
#ifdef INVARIANTS
|
2019-12-13 09:31:59 +00:00
|
|
|
BIT_ZERO(keg->uk_ipers, slab_dbg_bits(slab, keg));
|
2013-06-13 21:05:38 +00:00
|
|
|
#endif
|
Bring in mbuma to replace mballoc.
mbuma is an Mbuf & Cluster allocator built on top of a number of
extensions to the UMA framework, all included herein.
Extensions to UMA worth noting:
- Better layering between slab <-> zone caches; introduce
Keg structure which splits off slab cache away from the
zone structure and allows multiple zones to be stacked
on top of a single Keg (single type of slab cache);
perhaps we should look into defining a subset API on
top of the Keg for special use by malloc(9),
for example.
- UMA_ZONE_REFCNT zones can now be added, and reference
counters automagically allocated for them within the end
of the associated slab structures. uma_find_refcnt()
does a kextract to fetch the slab struct reference from
the underlying page, and lookup the corresponding refcnt.
mbuma things worth noting:
- integrates mbuf & cluster allocations with extended UMA
and provides caches for commonly-allocated items; defines
several zones (two primary, one secondary) and two kegs.
- change up certain code paths that always used to do:
m_get() + m_clget() to instead just use m_getcl() and
try to take advantage of the newly defined secondary
Packet zone.
- netstat(1) and systat(1) quickly hacked up to do basic
stat reporting but additional stats work needs to be
done once some other details within UMA have been taken
care of and it becomes clearer to how stats will work
within the modified framework.
From the user perspective, one implication is that the
NMBCLUSTERS compile-time option is no longer used. The
maximum number of clusters is still capped off according
to maxusers, but it can be made unlimited by setting
the kern.ipc.nmbclusters boot-time tunable to zero.
Work should be done to write an appropriate sysctl
handler allowing dynamic tuning of kern.ipc.nmbclusters
at runtime.
Additional things worth noting/known issues (READ):
- One report of 'ips' (ServeRAID) driver acting really
slow in conjunction with mbuma. Need more data.
Latest report is that ips is equally sucking with
and without mbuma.
- Giant leak in NFS code sometimes occurs, can't
reproduce but currently analyzing; brueffer is
able to reproduce but THIS IS NOT an mbuma-specific
problem and currently occurs even WITHOUT mbuma.
- Issues in network locking: there is at least one
code path in the rip code where one or more locks
are acquired and we end up in m_prepend() with
M_WAITOK, which causes WITNESS to whine from within
UMA. Current temporary solution: force all UMA
allocations to be M_NOWAIT from within UMA for now
to avoid deadlocks unless WITNESS is defined and we
can determine with certainty that we're not holding
any locks when we're M_WAITOK.
- I've seen at least one weird socketbuffer empty-but-
mbuf-still-attached panic. I don't believe this
to be related to mbuma but please keep your eyes
open, turn on debugging, and capture crash dumps.
This change removes more code than it adds.
A paper is available detailing the change and considering
various performance issues, it was presented at BSDCan2004:
http://www.unixdaemons.com/~bmilekic/netbuf_bmilekic.pdf
Please read the paper for Future Work and implementation
details, as well as credits.
Testing and Debugging:
rwatson,
brueffer,
Ketrien I. Saihr-Kesenchedra,
...
Reviewed by: Lots of people (for different parts)
2004-05-31 21:46:06 +00:00
|
|
|
|
2004-08-02 00:18:36 +00:00
|
|
|
if (keg->uk_init != NULL) {
|
Bring in mbuma to replace mballoc.
mbuma is an Mbuf & Cluster allocator built on top of a number of
extensions to the UMA framework, all included herein.
Extensions to UMA worth noting:
- Better layering between slab <-> zone caches; introduce
Keg structure which splits off slab cache away from the
zone structure and allows multiple zones to be stacked
on top of a single Keg (single type of slab cache);
perhaps we should look into defining a subset API on
top of the Keg for special use by malloc(9),
for example.
- UMA_ZONE_REFCNT zones can now be added, and reference
counters automagically allocated for them within the end
of the associated slab structures. uma_find_refcnt()
does a kextract to fetch the slab struct reference from
the underlying page, and lookup the corresponding refcnt.
mbuma things worth noting:
- integrates mbuf & cluster allocations with extended UMA
and provides caches for commonly-allocated items; defines
several zones (two primary, one secondary) and two kegs.
- change up certain code paths that always used to do:
m_get() + m_clget() to instead just use m_getcl() and
try to take advantage of the newly defined secondary
Packet zone.
- netstat(1) and systat(1) quickly hacked up to do basic
stat reporting but additional stats work needs to be
done once some other details within UMA have been taken
care of and it becomes clearer to how stats will work
within the modified framework.
From the user perspective, one implication is that the
NMBCLUSTERS compile-time option is no longer used. The
maximum number of clusters is still capped off according
to maxusers, but it can be made unlimited by setting
the kern.ipc.nmbclusters boot-time tunable to zero.
Work should be done to write an appropriate sysctl
handler allowing dynamic tuning of kern.ipc.nmbclusters
at runtime.
Additional things worth noting/known issues (READ):
- One report of 'ips' (ServeRAID) driver acting really
slow in conjunction with mbuma. Need more data.
Latest report is that ips is equally sucking with
and without mbuma.
- Giant leak in NFS code sometimes occurs, can't
reproduce but currently analyzing; brueffer is
able to reproduce but THIS IS NOT an mbuma-specific
problem and currently occurs even WITHOUT mbuma.
- Issues in network locking: there is at least one
code path in the rip code where one or more locks
are acquired and we end up in m_prepend() with
M_WAITOK, which causes WITNESS to whine from within
UMA. Current temporary solution: force all UMA
allocations to be M_NOWAIT from within UMA for now
to avoid deadlocks unless WITNESS is defined and we
can determine with certainty that we're not holding
any locks when we're M_WAITOK.
- I've seen at least one weird socketbuffer empty-but-
mbuf-still-attached panic. I don't believe this
to be related to mbuma but please keep your eyes
open, turn on debugging, and capture crash dumps.
This change removes more code than it adds.
A paper is available detailing the change and considering
various performance issues, it was presented at BSDCan2004:
http://www.unixdaemons.com/~bmilekic/netbuf_bmilekic.pdf
Please read the paper for Future Work and implementation
details, as well as credits.
Testing and Debugging:
rwatson,
brueffer,
Ketrien I. Saihr-Kesenchedra,
...
Reviewed by: Lots of people (for different parts)
2004-05-31 21:46:06 +00:00
|
|
|
for (i = 0; i < keg->uk_ipers; i++)
|
2019-12-08 01:15:06 +00:00
|
|
|
if (keg->uk_init(slab_item(slab, keg, i),
|
2019-01-23 18:58:15 +00:00
|
|
|
keg->uk_size, flags) != 0)
|
2004-08-02 00:18:36 +00:00
|
|
|
break;
|
|
|
|
if (i != keg->uk_ipers) {
|
2013-06-18 04:50:20 +00:00
|
|
|
keg_free_slab(keg, slab, i);
|
|
|
|
slab = NULL;
|
|
|
|
goto out;
|
2004-08-02 00:18:36 +00:00
|
|
|
}
|
|
|
|
}
|
2009-01-25 09:11:24 +00:00
|
|
|
KEG_LOCK(keg);
|
2002-06-25 21:04:50 +00:00
|
|
|
|
2017-06-01 18:36:52 +00:00
|
|
|
CTR3(KTR_UMA, "keg_alloc_slab: allocated slab %p for %s(%p)",
|
|
|
|
slab, keg->uk_name, keg);
|
|
|
|
|
2018-10-24 16:41:47 +00:00
|
|
|
if (keg->uk_flags & UMA_ZONE_HASH)
|
|
|
|
UMA_HASH_INSERT(&keg->uk_hash, slab, mem);
|
2002-03-19 09:11:49 +00:00
|
|
|
|
2018-10-24 16:41:47 +00:00
|
|
|
keg->uk_pages += keg->uk_ppera;
|
|
|
|
keg->uk_free += keg->uk_ipers;
|
2002-03-19 09:11:49 +00:00
|
|
|
|
2018-10-24 16:41:47 +00:00
|
|
|
out:
|
2002-03-19 09:11:49 +00:00
|
|
|
return (slab);
|
|
|
|
}
|
|
|
|
|
2003-09-21 07:39:16 +00:00
|
|
|
/*
|
|
|
|
* This function is intended to be used early on in place of page_alloc() so
|
|
|
|
* that we may use the boot time page cache to satisfy allocations before
|
|
|
|
* the VM is ready.
|
|
|
|
*/
|
|
|
|
static void *
|
2018-01-12 23:25:05 +00:00
|
|
|
startup_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *pflag,
|
|
|
|
int wait)
|
2003-09-21 07:39:16 +00:00
|
|
|
{
|
Bring in mbuma to replace mballoc.
mbuma is an Mbuf & Cluster allocator built on top of a number of
extensions to the UMA framework, all included herein.
Extensions to UMA worth noting:
- Better layering between slab <-> zone caches; introduce
Keg structure which splits off slab cache away from the
zone structure and allows multiple zones to be stacked
on top of a single Keg (single type of slab cache);
perhaps we should look into defining a subset API on
top of the Keg for special use by malloc(9),
for example.
- UMA_ZONE_REFCNT zones can now be added, and reference
counters automagically allocated for them within the end
of the associated slab structures. uma_find_refcnt()
does a kextract to fetch the slab struct reference from
the underlying page, and lookup the corresponding refcnt.
mbuma things worth noting:
- integrates mbuf & cluster allocations with extended UMA
and provides caches for commonly-allocated items; defines
several zones (two primary, one secondary) and two kegs.
- change up certain code paths that always used to do:
m_get() + m_clget() to instead just use m_getcl() and
try to take advantage of the newly defined secondary
Packet zone.
- netstat(1) and systat(1) quickly hacked up to do basic
stat reporting but additional stats work needs to be
done once some other details within UMA have been taken
care of and it becomes clearer to how stats will work
within the modified framework.
From the user perspective, one implication is that the
NMBCLUSTERS compile-time option is no longer used. The
maximum number of clusters is still capped off according
to maxusers, but it can be made unlimited by setting
the kern.ipc.nmbclusters boot-time tunable to zero.
Work should be done to write an appropriate sysctl
handler allowing dynamic tuning of kern.ipc.nmbclusters
at runtime.
Additional things worth noting/known issues (READ):
- One report of 'ips' (ServeRAID) driver acting really
slow in conjunction with mbuma. Need more data.
Latest report is that ips is equally sucking with
and without mbuma.
- Giant leak in NFS code sometimes occurs, can't
reproduce but currently analyzing; brueffer is
able to reproduce but THIS IS NOT an mbuma-specific
problem and currently occurs even WITHOUT mbuma.
- Issues in network locking: there is at least one
code path in the rip code where one or more locks
are acquired and we end up in m_prepend() with
M_WAITOK, which causes WITNESS to whine from within
UMA. Current temporary solution: force all UMA
allocations to be M_NOWAIT from within UMA for now
to avoid deadlocks unless WITNESS is defined and we
can determine with certainty that we're not holding
any locks when we're M_WAITOK.
- I've seen at least one weird socketbuffer empty-but-
mbuf-still-attached panic. I don't believe this
to be related to mbuma but please keep your eyes
open, turn on debugging, and capture crash dumps.
This change removes more code than it adds.
A paper is available detailing the change and considering
various performance issues, it was presented at BSDCan2004:
http://www.unixdaemons.com/~bmilekic/netbuf_bmilekic.pdf
Please read the paper for Future Work and implementation
details, as well as credits.
Testing and Debugging:
rwatson,
brueffer,
Ketrien I. Saihr-Kesenchedra,
...
Reviewed by: Lots of people (for different parts)
2004-05-31 21:46:06 +00:00
|
|
|
uma_keg_t keg;
|
2017-06-01 18:26:57 +00:00
|
|
|
void *mem;
|
|
|
|
int pages;
|
Bring in mbuma to replace mballoc.
mbuma is an Mbuf & Cluster allocator built on top of a number of
extensions to the UMA framework, all included herein.
Extensions to UMA worth noting:
- Better layering between slab <-> zone caches; introduce
Keg structure which splits off slab cache away from the
zone structure and allows multiple zones to be stacked
on top of a single Keg (single type of slab cache);
perhaps we should look into defining a subset API on
top of the Keg for special use by malloc(9),
for example.
- UMA_ZONE_REFCNT zones can now be added, and reference
counters automagically allocated for them within the end
of the associated slab structures. uma_find_refcnt()
does a kextract to fetch the slab struct reference from
the underlying page, and lookup the corresponding refcnt.
mbuma things worth noting:
- integrates mbuf & cluster allocations with extended UMA
and provides caches for commonly-allocated items; defines
several zones (two primary, one secondary) and two kegs.
- change up certain code paths that always used to do:
m_get() + m_clget() to instead just use m_getcl() and
try to take advantage of the newly defined secondary
Packet zone.
- netstat(1) and systat(1) quickly hacked up to do basic
stat reporting but additional stats work needs to be
done once some other details within UMA have been taken
care of and it becomes clearer to how stats will work
within the modified framework.
From the user perspective, one implication is that the
NMBCLUSTERS compile-time option is no longer used. The
maximum number of clusters is still capped off according
to maxusers, but it can be made unlimited by setting
the kern.ipc.nmbclusters boot-time tunable to zero.
Work should be done to write an appropriate sysctl
handler allowing dynamic tuning of kern.ipc.nmbclusters
at runtime.
Additional things worth noting/known issues (READ):
- One report of 'ips' (ServeRAID) driver acting really
slow in conjunction with mbuma. Need more data.
Latest report is that ips is equally sucking with
and without mbuma.
- Giant leak in NFS code sometimes occurs, can't
reproduce but currently analyzing; brueffer is
able to reproduce but THIS IS NOT an mbuma-specific
problem and currently occurs even WITHOUT mbuma.
- Issues in network locking: there is at least one
code path in the rip code where one or more locks
are acquired and we end up in m_prepend() with
M_WAITOK, which causes WITNESS to whine from within
UMA. Current temporary solution: force all UMA
allocations to be M_NOWAIT from within UMA for now
to avoid deadlocks unless WITNESS is defined and we
can determine with certainty that we're not holding
any locks when we're M_WAITOK.
- I've seen at least one weird socketbuffer empty-but-
mbuf-still-attached panic. I don't believe this
to be related to mbuma but please keep your eyes
open, turn on debugging, and capture crash dumps.
This change removes more code than it adds.
A paper is available detailing the change and considering
various performance issues, it was presented at BSDCan2004:
http://www.unixdaemons.com/~bmilekic/netbuf_bmilekic.pdf
Please read the paper for Future Work and implementation
details, as well as credits.
Testing and Debugging:
rwatson,
brueffer,
Ketrien I. Saihr-Kesenchedra,
...
Reviewed by: Lots of people (for different parts)
2004-05-31 21:46:06 +00:00
|
|
|
|
o Move zone limit from keg level up to zone level. This means that now
two zones sharing a keg may have different limits. Now this is going
to work:
zone = uma_zcreate();
uma_zone_set_max(zone, limit);
zone2 = uma_zsecond_create(zone);
uma_zone_set_max(zone2, limit2);
Kegs no longer have uk_maxpages field, but zones have uz_items. When
set, it may be rounded up to minimum possible CPU bucket cache size.
For small limits bucket cache can also be reconfigured to be smaller.
Counter uz_items is updated whenever items transition from keg to a
bucket cache or directly to a consumer. If zone has uz_maxitems set and
it is reached, then we are going to sleep.
o Since new limits don't play well with multi-keg zones, remove them. The
idea of multi-keg zones was introduced exactly 10 years ago, and never
have had a practical usage. In discussion with Jeff we came to a wild
agreement that if we ever want to reintroduce the idea of a smart allocator
that would be able to choose between two (or more) totally different
backing stores, that choice should be made one level higher than UMA,
e.g. in malloc(9) or in mget(), or whatever and choice should be controlled
by the caller.
o Sleeping code is improved to account number of sleepers and wake them one
by one, to avoid thundering herd problem.
o Flag UMA_ZONE_NOBUCKETCACHE removed, instead uma_zone_set_maxcache()
KPI added. Having no bucket cache basically means setting maxcache to 0.
o Now with many fields added and many removed (no multi-keg zones!) make
sure that struct uma_zone is perfectly aligned.
Reviewed by: markj, jeff
Tested by: pho
Differential Revision: https://reviews.freebsd.org/D17773
2019-01-15 00:02:06 +00:00
|
|
|
keg = zone->uz_keg;
|
2003-09-21 07:39:16 +00:00
|
|
|
/*
|
2018-02-09 04:45:39 +00:00
|
|
|
* If we are in BOOT_BUCKETS or higher, than switch to real
|
|
|
|
* allocator. Zones with page sized slabs switch at BOOT_PAGEALLOC.
|
2003-09-21 07:39:16 +00:00
|
|
|
*/
|
2018-02-09 04:45:39 +00:00
|
|
|
switch (booted) {
|
|
|
|
case BOOT_COLD:
|
|
|
|
case BOOT_STRAPPED:
|
|
|
|
break;
|
|
|
|
case BOOT_PAGEALLOC:
|
|
|
|
if (keg->uk_ppera > 1)
|
|
|
|
break;
|
|
|
|
case BOOT_BUCKETS:
|
|
|
|
case BOOT_RUNNING:
|
|
|
|
#ifdef UMA_MD_SMALL_ALLOC
|
|
|
|
keg->uk_allocf = (keg->uk_ppera > 1) ?
|
|
|
|
page_alloc : uma_small_alloc;
|
|
|
|
#else
|
|
|
|
keg->uk_allocf = page_alloc;
|
Followup on r302393 by cperciva, improving calculation of boot pages required
for UMA startup.
o Introduce another stage of UMA startup, which is entered after
vm_page_startup() finishes. After this stage we don't yet enable buckets,
but we can ask VM for pages. Rename stages to meaningful names while here.
New list of stages: BOOT_COLD, BOOT_STRAPPED, BOOT_PAGEALLOC, BOOT_BUCKETS,
BOOT_RUNNING.
Enabling page alloc earlier allows us to dramatically reduce number of
boot pages required. What is more important number of zones becomes
consistent across different machines, as no MD allocations are done before
the BOOT_PAGEALLOC stage. Now only UMA internal zones actually need to use
startup_alloc(), however that may change, so vm_page_startup() provides
its need for early zones as argument.
o Introduce uma_startup_count() function, to avoid code duplication. The
functions calculates sizes of zones zone and kegs zone, and calculates how
many pages UMA will need to bootstrap.
It counts not only of zone structures, but also of kegs, slabs and hashes.
o Hide uma_startup_foo() declarations from public file.
o Provide several DIAGNOSTIC printfs on boot_pages usage.
o Bugfix: when calculating zone of zones size use (mp_maxid + 1) instead of
mp_ncpus. Use resulting number not only in the size argument to zone_ctor()
but also as args.size.
Reviewed by: imp, gallatin (earlier version)
Differential Revision: https://reviews.freebsd.org/D14054
2018-02-06 04:16:00 +00:00
|
|
|
#endif
|
2018-02-09 04:45:39 +00:00
|
|
|
return keg->uk_allocf(zone, bytes, domain, pflag, wait);
|
2003-09-21 07:39:16 +00:00
|
|
|
}
|
2018-02-09 04:45:39 +00:00
|
|
|
|
2003-09-21 07:39:16 +00:00
|
|
|
/*
|
2018-02-09 04:45:39 +00:00
|
|
|
* Check our small startup cache to see if it has pages remaining.
|
2003-09-21 07:39:16 +00:00
|
|
|
*/
|
2018-02-09 04:45:39 +00:00
|
|
|
pages = howmany(bytes, PAGE_SIZE);
|
|
|
|
KASSERT(pages > 0, ("%s can't reserve 0 pages", __func__));
|
|
|
|
if (pages > boot_pages)
|
|
|
|
panic("UMA zone \"%s\": Increase vm.boot_pages", zone->uz_name);
|
|
|
|
#ifdef DIAGNOSTIC
|
|
|
|
printf("%s from \"%s\", %d boot pages left\n", __func__, zone->uz_name,
|
|
|
|
boot_pages);
|
2003-09-21 07:39:16 +00:00
|
|
|
#endif
|
2018-02-09 04:45:39 +00:00
|
|
|
mem = bootmem;
|
|
|
|
boot_pages -= pages;
|
|
|
|
bootmem += pages * PAGE_SIZE;
|
|
|
|
*pflag = UMA_SLAB_BOOT;
|
|
|
|
|
|
|
|
return (mem);
|
2003-09-21 07:39:16 +00:00
|
|
|
}
|
|
|
|
|
2002-03-19 09:11:49 +00:00
|
|
|
/*
|
|
|
|
* Allocates a number of pages from the system
|
|
|
|
*
|
|
|
|
* Arguments:
|
|
|
|
* bytes The number of bytes requested
|
|
|
|
* wait Shall we wait?
|
|
|
|
*
|
|
|
|
* Returns:
|
2004-01-30 16:26:29 +00:00
|
|
|
* A pointer to the alloced memory or possibly
|
2002-03-19 09:11:49 +00:00
|
|
|
* NULL if M_NOWAIT is set.
|
|
|
|
*/
|
|
|
|
static void *
|
2018-01-12 23:25:05 +00:00
|
|
|
page_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *pflag,
|
|
|
|
int wait)
|
2002-03-19 09:11:49 +00:00
|
|
|
{
|
|
|
|
void *p; /* Returned page */
|
|
|
|
|
2017-11-28 23:40:54 +00:00
|
|
|
*pflag = UMA_SLAB_KERNEL;
|
2018-10-30 18:26:34 +00:00
|
|
|
p = (void *)kmem_malloc_domainset(DOMAINSET_FIXED(domain), bytes, wait);
|
2004-01-30 16:26:29 +00:00
|
|
|
|
2002-03-19 09:11:49 +00:00
|
|
|
return (p);
|
|
|
|
}
|
|
|
|
|
2018-07-06 02:06:03 +00:00
|
|
|
static void *
|
|
|
|
pcpu_page_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *pflag,
|
|
|
|
int wait)
|
|
|
|
{
|
|
|
|
struct pglist alloctail;
|
|
|
|
vm_offset_t addr, zkva;
|
|
|
|
int cpu, flags;
|
|
|
|
vm_page_t p, p_next;
|
|
|
|
#ifdef NUMA
|
|
|
|
struct pcpu *pc;
|
|
|
|
#endif
|
|
|
|
|
Fix pre-SI_SUB_CPU initialization of per-CPU counters.
r336020 introduced pcpu_page_alloc(), replacing page_alloc() as the
backend allocator for PCPU UMA zones. Unlike page_alloc(), it does
not honour malloc(9) flags such as M_ZERO or M_NODUMP, so fix that.
r336020 also changed counter(9) to initialize each counter using a
CPU_FOREACH() loop instead of an SMP rendezvous. Before SI_SUB_CPU,
smp_rendezvous() will only execute the callback on the current CPU
(i.e., CPU 0), so only one counter gets zeroed. The rest are zeroed
by virtue of the fact that UMA gratuitously zeroes slabs when importing
them into a zone.
Prior to SI_SUB_CPU, all_cpus is clear, so with r336020 we weren't
zeroing vm_cnt counters during boot: the CPU_FOREACH() loop had no
effect, and pcpu_page_alloc() didn't honour M_ZERO. Fix this by
iterating over the full range of CPU IDs when zeroing counters,
ignoring whether the corresponding bits in all_cpus are set.
Reported and tested by: pho (previous version)
Reviewed by: kib (previous version)
Differential Revision: https://reviews.freebsd.org/D16190
2018-07-10 00:18:12 +00:00
|
|
|
MPASS(bytes == (mp_maxid + 1) * PAGE_SIZE);
|
2018-07-06 02:06:03 +00:00
|
|
|
|
Fix pre-SI_SUB_CPU initialization of per-CPU counters.
r336020 introduced pcpu_page_alloc(), replacing page_alloc() as the
backend allocator for PCPU UMA zones. Unlike page_alloc(), it does
not honour malloc(9) flags such as M_ZERO or M_NODUMP, so fix that.
r336020 also changed counter(9) to initialize each counter using a
CPU_FOREACH() loop instead of an SMP rendezvous. Before SI_SUB_CPU,
smp_rendezvous() will only execute the callback on the current CPU
(i.e., CPU 0), so only one counter gets zeroed. The rest are zeroed
by virtue of the fact that UMA gratuitously zeroes slabs when importing
them into a zone.
Prior to SI_SUB_CPU, all_cpus is clear, so with r336020 we weren't
zeroing vm_cnt counters during boot: the CPU_FOREACH() loop had no
effect, and pcpu_page_alloc() didn't honour M_ZERO. Fix this by
iterating over the full range of CPU IDs when zeroing counters,
ignoring whether the corresponding bits in all_cpus are set.
Reported and tested by: pho (previous version)
Reviewed by: kib (previous version)
Differential Revision: https://reviews.freebsd.org/D16190
2018-07-10 00:18:12 +00:00
|
|
|
TAILQ_INIT(&alloctail);
|
2018-07-06 02:06:03 +00:00
|
|
|
flags = VM_ALLOC_SYSTEM | VM_ALLOC_WIRED | VM_ALLOC_NOOBJ |
|
Fix pre-SI_SUB_CPU initialization of per-CPU counters.
r336020 introduced pcpu_page_alloc(), replacing page_alloc() as the
backend allocator for PCPU UMA zones. Unlike page_alloc(), it does
not honour malloc(9) flags such as M_ZERO or M_NODUMP, so fix that.
r336020 also changed counter(9) to initialize each counter using a
CPU_FOREACH() loop instead of an SMP rendezvous. Before SI_SUB_CPU,
smp_rendezvous() will only execute the callback on the current CPU
(i.e., CPU 0), so only one counter gets zeroed. The rest are zeroed
by virtue of the fact that UMA gratuitously zeroes slabs when importing
them into a zone.
Prior to SI_SUB_CPU, all_cpus is clear, so with r336020 we weren't
zeroing vm_cnt counters during boot: the CPU_FOREACH() loop had no
effect, and pcpu_page_alloc() didn't honour M_ZERO. Fix this by
iterating over the full range of CPU IDs when zeroing counters,
ignoring whether the corresponding bits in all_cpus are set.
Reported and tested by: pho (previous version)
Reviewed by: kib (previous version)
Differential Revision: https://reviews.freebsd.org/D16190
2018-07-10 00:18:12 +00:00
|
|
|
malloc2vm_flags(wait);
|
|
|
|
*pflag = UMA_SLAB_KERNEL;
|
2018-07-06 02:06:03 +00:00
|
|
|
for (cpu = 0; cpu <= mp_maxid; cpu++) {
|
|
|
|
if (CPU_ABSENT(cpu)) {
|
|
|
|
p = vm_page_alloc(NULL, 0, flags);
|
|
|
|
} else {
|
|
|
|
#ifndef NUMA
|
|
|
|
p = vm_page_alloc(NULL, 0, flags);
|
|
|
|
#else
|
|
|
|
pc = pcpu_find(cpu);
|
|
|
|
p = vm_page_alloc_domain(NULL, 0, pc->pc_domain, flags);
|
|
|
|
if (__predict_false(p == NULL))
|
|
|
|
p = vm_page_alloc(NULL, 0, flags);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
if (__predict_false(p == NULL))
|
|
|
|
goto fail;
|
|
|
|
TAILQ_INSERT_TAIL(&alloctail, p, listq);
|
|
|
|
}
|
|
|
|
if ((addr = kva_alloc(bytes)) == 0)
|
|
|
|
goto fail;
|
|
|
|
zkva = addr;
|
|
|
|
TAILQ_FOREACH(p, &alloctail, listq) {
|
|
|
|
pmap_qenter(zkva, &p, 1);
|
|
|
|
zkva += PAGE_SIZE;
|
|
|
|
}
|
|
|
|
return ((void*)addr);
|
2019-06-07 18:23:29 +00:00
|
|
|
fail:
|
2018-07-06 02:06:03 +00:00
|
|
|
TAILQ_FOREACH_SAFE(p, &alloctail, listq, p_next) {
|
2019-06-07 18:23:29 +00:00
|
|
|
vm_page_unwire_noq(p);
|
2018-07-06 02:06:03 +00:00
|
|
|
vm_page_free(p);
|
|
|
|
}
|
|
|
|
return (NULL);
|
|
|
|
}
|
|
|
|
|
2002-03-19 09:11:49 +00:00
|
|
|
/*
|
|
|
|
* Allocates a number of pages from within an object
|
|
|
|
*
|
|
|
|
* Arguments:
|
|
|
|
* bytes The number of bytes requested
|
|
|
|
* wait Shall we wait?
|
|
|
|
*
|
|
|
|
* Returns:
|
2004-01-30 16:26:29 +00:00
|
|
|
* A pointer to the alloced memory or possibly
|
2002-03-19 09:11:49 +00:00
|
|
|
* NULL if M_NOWAIT is set.
|
|
|
|
*/
|
|
|
|
static void *
|
2018-01-12 23:25:05 +00:00
|
|
|
noobj_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *flags,
|
|
|
|
int wait)
|
2002-03-19 09:11:49 +00:00
|
|
|
{
|
2013-02-26 23:35:27 +00:00
|
|
|
TAILQ_HEAD(, vm_page) alloctail;
|
|
|
|
u_long npages;
|
2003-08-03 06:08:48 +00:00
|
|
|
vm_offset_t retkva, zkva;
|
2013-02-26 23:35:27 +00:00
|
|
|
vm_page_t p, p_next;
|
2009-01-25 09:11:24 +00:00
|
|
|
uma_keg_t keg;
|
2002-03-19 09:11:49 +00:00
|
|
|
|
2013-02-26 23:35:27 +00:00
|
|
|
TAILQ_INIT(&alloctail);
|
o Move zone limit from keg level up to zone level. This means that now
two zones sharing a keg may have different limits. Now this is going
to work:
zone = uma_zcreate();
uma_zone_set_max(zone, limit);
zone2 = uma_zsecond_create(zone);
uma_zone_set_max(zone2, limit2);
Kegs no longer have uk_maxpages field, but zones have uz_items. When
set, it may be rounded up to minimum possible CPU bucket cache size.
For small limits bucket cache can also be reconfigured to be smaller.
Counter uz_items is updated whenever items transition from keg to a
bucket cache or directly to a consumer. If zone has uz_maxitems set and
it is reached, then we are going to sleep.
o Since new limits don't play well with multi-keg zones, remove them. The
idea of multi-keg zones was introduced exactly 10 years ago, and never
have had a practical usage. In discussion with Jeff we came to a wild
agreement that if we ever want to reintroduce the idea of a smart allocator
that would be able to choose between two (or more) totally different
backing stores, that choice should be made one level higher than UMA,
e.g. in malloc(9) or in mget(), or whatever and choice should be controlled
by the caller.
o Sleeping code is improved to account number of sleepers and wake them one
by one, to avoid thundering herd problem.
o Flag UMA_ZONE_NOBUCKETCACHE removed, instead uma_zone_set_maxcache()
KPI added. Having no bucket cache basically means setting maxcache to 0.
o Now with many fields added and many removed (no multi-keg zones!) make
sure that struct uma_zone is perfectly aligned.
Reviewed by: markj, jeff
Tested by: pho
Differential Revision: https://reviews.freebsd.org/D17773
2019-01-15 00:02:06 +00:00
|
|
|
keg = zone->uz_keg;
|
2002-03-19 09:11:49 +00:00
|
|
|
|
2013-02-26 23:35:27 +00:00
|
|
|
npages = howmany(bytes, PAGE_SIZE);
|
|
|
|
while (npages > 0) {
|
2018-01-12 23:25:05 +00:00
|
|
|
p = vm_page_alloc_domain(NULL, 0, domain, VM_ALLOC_INTERRUPT |
|
2017-11-08 02:39:37 +00:00
|
|
|
VM_ALLOC_WIRED | VM_ALLOC_NOOBJ |
|
2017-11-08 23:25:05 +00:00
|
|
|
((wait & M_WAITOK) != 0 ? VM_ALLOC_WAITOK :
|
|
|
|
VM_ALLOC_NOWAIT));
|
2013-02-26 23:35:27 +00:00
|
|
|
if (p != NULL) {
|
|
|
|
/*
|
|
|
|
* Since the page does not belong to an object, its
|
|
|
|
* listq is unused.
|
|
|
|
*/
|
|
|
|
TAILQ_INSERT_TAIL(&alloctail, p, listq);
|
|
|
|
npages--;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* Page allocation failed, free intermediate pages and
|
|
|
|
* exit.
|
|
|
|
*/
|
|
|
|
TAILQ_FOREACH_SAFE(p, &alloctail, listq, p_next) {
|
2019-06-07 18:23:29 +00:00
|
|
|
vm_page_unwire_noq(p);
|
2013-02-26 23:35:27 +00:00
|
|
|
vm_page_free(p);
|
|
|
|
}
|
|
|
|
return (NULL);
|
|
|
|
}
|
|
|
|
*flags = UMA_SLAB_PRIV;
|
|
|
|
zkva = keg->uk_kva +
|
|
|
|
atomic_fetchadd_long(&keg->uk_offset, round_page(bytes));
|
|
|
|
retkva = zkva;
|
|
|
|
TAILQ_FOREACH(p, &alloctail, listq) {
|
2003-08-03 06:08:48 +00:00
|
|
|
pmap_qenter(zkva, &p, 1);
|
|
|
|
zkva += PAGE_SIZE;
|
2002-03-19 09:11:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return ((void *)retkva);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Frees a number of pages to the system
|
2004-01-30 16:26:29 +00:00
|
|
|
*
|
2002-03-19 09:11:49 +00:00
|
|
|
* Arguments:
|
|
|
|
* mem A pointer to the memory to be freed
|
|
|
|
* size The size of the memory being freed
|
|
|
|
* flags The original p->us_flags field
|
|
|
|
*
|
|
|
|
* Returns:
|
|
|
|
* Nothing
|
|
|
|
*/
|
|
|
|
static void
|
2015-04-01 12:42:26 +00:00
|
|
|
page_free(void *mem, vm_size_t size, uint8_t flags)
|
2002-03-19 09:11:49 +00:00
|
|
|
{
|
2002-06-19 20:49:44 +00:00
|
|
|
|
2018-08-25 19:38:08 +00:00
|
|
|
if ((flags & UMA_SLAB_KERNEL) == 0)
|
2017-01-02 16:50:52 +00:00
|
|
|
panic("UMA: page_free used with invalid flags %x", flags);
|
2002-03-19 09:11:49 +00:00
|
|
|
|
2018-08-25 19:38:08 +00:00
|
|
|
kmem_free((vm_offset_t)mem, size);
|
2002-03-19 09:11:49 +00:00
|
|
|
}
|
|
|
|
|
2018-07-06 02:06:03 +00:00
|
|
|
/*
|
|
|
|
* Frees pcpu zone allocations
|
|
|
|
*
|
|
|
|
* Arguments:
|
|
|
|
* mem A pointer to the memory to be freed
|
|
|
|
* size The size of the memory being freed
|
|
|
|
* flags The original p->us_flags field
|
|
|
|
*
|
|
|
|
* Returns:
|
|
|
|
* Nothing
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
pcpu_page_free(void *mem, vm_size_t size, uint8_t flags)
|
|
|
|
{
|
|
|
|
vm_offset_t sva, curva;
|
|
|
|
vm_paddr_t paddr;
|
|
|
|
vm_page_t m;
|
|
|
|
|
|
|
|
MPASS(size == (mp_maxid+1)*PAGE_SIZE);
|
|
|
|
sva = (vm_offset_t)mem;
|
|
|
|
for (curva = sva; curva < sva + size; curva += PAGE_SIZE) {
|
|
|
|
paddr = pmap_kextract(curva);
|
|
|
|
m = PHYS_TO_VM_PAGE(paddr);
|
2019-06-07 18:23:29 +00:00
|
|
|
vm_page_unwire_noq(m);
|
2018-07-06 02:06:03 +00:00
|
|
|
vm_page_free(m);
|
|
|
|
}
|
|
|
|
pmap_qremove(sva, size >> PAGE_SHIFT);
|
|
|
|
kva_free(sva, size);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2002-03-19 09:11:49 +00:00
|
|
|
/*
|
|
|
|
* Zero fill initializer
|
|
|
|
*
|
|
|
|
* Arguments/Returns follow uma_init specifications
|
|
|
|
*/
|
2004-08-02 00:18:36 +00:00
|
|
|
static int
|
|
|
|
zero_init(void *mem, int size, int flags)
|
2002-03-19 09:11:49 +00:00
|
|
|
{
|
|
|
|
bzero(mem, size);
|
2004-08-02 00:18:36 +00:00
|
|
|
return (0);
|
2002-03-19 09:11:49 +00:00
|
|
|
}
|
|
|
|
|
2019-12-13 09:31:59 +00:00
|
|
|
#ifdef INVARIANTS
|
|
|
|
struct noslabbits *
|
|
|
|
slab_dbg_bits(uma_slab_t slab, uma_keg_t keg)
|
|
|
|
{
|
|
|
|
|
|
|
|
return ((void *)((char *)&slab->us_free + BITSET_SIZE(keg->uk_ipers)));
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2019-12-02 22:44:34 +00:00
|
|
|
/*
|
|
|
|
* Actual size of embedded struct slab (!OFFPAGE).
|
|
|
|
*/
|
|
|
|
size_t
|
|
|
|
slab_sizeof(int nitems)
|
|
|
|
{
|
|
|
|
size_t s;
|
|
|
|
|
2019-12-13 09:31:59 +00:00
|
|
|
s = sizeof(struct uma_slab) + BITSET_SIZE(nitems) * SLAB_BITSETS;
|
2019-12-02 22:44:34 +00:00
|
|
|
return (roundup(s, UMA_ALIGN_PTR + 1));
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Size of memory for embedded slabs (!OFFPAGE).
|
|
|
|
*/
|
|
|
|
size_t
|
|
|
|
slab_space(int nitems)
|
|
|
|
{
|
|
|
|
return (UMA_SLAB_SIZE - slab_sizeof(nitems));
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Compute the number of items that will fit in an embedded (!OFFPAGE) slab
|
|
|
|
* with a given size and alignment.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
slab_ipers(size_t size, int align)
|
|
|
|
{
|
|
|
|
int rsize;
|
|
|
|
int nitems;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Compute the ideal number of items that will fit in a page and
|
|
|
|
* then compute the actual number based on a bitset nitems wide.
|
|
|
|
*/
|
|
|
|
rsize = roundup(size, align + 1);
|
|
|
|
nitems = UMA_SLAB_SIZE / rsize;
|
|
|
|
return (slab_space(nitems) / rsize);
|
|
|
|
}
|
|
|
|
|
2002-03-19 09:11:49 +00:00
|
|
|
/*
|
2009-01-25 09:11:24 +00:00
|
|
|
* Finish creating a small uma keg. This calculates ipers, and the keg size.
|
2002-03-19 09:11:49 +00:00
|
|
|
*
|
|
|
|
* Arguments
|
2009-01-25 09:11:24 +00:00
|
|
|
* keg The zone we should initialize
|
2002-03-19 09:11:49 +00:00
|
|
|
*
|
|
|
|
* Returns
|
|
|
|
* Nothing
|
|
|
|
*/
|
|
|
|
static void
|
2009-01-25 09:11:24 +00:00
|
|
|
keg_small_init(uma_keg_t keg)
|
2002-03-19 09:11:49 +00:00
|
|
|
{
|
2004-07-29 15:25:40 +00:00
|
|
|
u_int rsize;
|
|
|
|
u_int memused;
|
|
|
|
u_int wastedspace;
|
|
|
|
u_int shsize;
|
2017-03-11 16:35:36 +00:00
|
|
|
u_int slabsize;
|
2002-03-19 09:11:49 +00:00
|
|
|
|
2013-04-08 19:10:45 +00:00
|
|
|
if (keg->uk_flags & UMA_ZONE_PCPU) {
|
2016-07-06 14:09:49 +00:00
|
|
|
u_int ncpus = (mp_maxid + 1) ? (mp_maxid + 1) : MAXCPU;
|
2013-07-23 11:16:40 +00:00
|
|
|
|
2018-07-06 02:06:03 +00:00
|
|
|
slabsize = UMA_PCPU_ALLOC_SIZE;
|
|
|
|
keg->uk_ppera = ncpus;
|
2013-04-08 19:10:45 +00:00
|
|
|
} else {
|
2017-03-11 16:35:36 +00:00
|
|
|
slabsize = UMA_SLAB_SIZE;
|
2013-04-08 19:10:45 +00:00
|
|
|
keg->uk_ppera = 1;
|
|
|
|
}
|
|
|
|
|
2013-06-13 21:05:38 +00:00
|
|
|
/*
|
|
|
|
* Calculate the size of each allocation (rsize) according to
|
|
|
|
* alignment. If the requested size is smaller than we have
|
|
|
|
* allocation bits for we round it up.
|
|
|
|
*/
|
Bring in mbuma to replace mballoc.
mbuma is an Mbuf & Cluster allocator built on top of a number of
extensions to the UMA framework, all included herein.
Extensions to UMA worth noting:
- Better layering between slab <-> zone caches; introduce
Keg structure which splits off slab cache away from the
zone structure and allows multiple zones to be stacked
on top of a single Keg (single type of slab cache);
perhaps we should look into defining a subset API on
top of the Keg for special use by malloc(9),
for example.
- UMA_ZONE_REFCNT zones can now be added, and reference
counters automagically allocated for them within the end
of the associated slab structures. uma_find_refcnt()
does a kextract to fetch the slab struct reference from
the underlying page, and lookup the corresponding refcnt.
mbuma things worth noting:
- integrates mbuf & cluster allocations with extended UMA
and provides caches for commonly-allocated items; defines
several zones (two primary, one secondary) and two kegs.
- change up certain code paths that always used to do:
m_get() + m_clget() to instead just use m_getcl() and
try to take advantage of the newly defined secondary
Packet zone.
- netstat(1) and systat(1) quickly hacked up to do basic
stat reporting but additional stats work needs to be
done once some other details within UMA have been taken
care of and it becomes clearer to how stats will work
within the modified framework.
From the user perspective, one implication is that the
NMBCLUSTERS compile-time option is no longer used. The
maximum number of clusters is still capped off according
to maxusers, but it can be made unlimited by setting
the kern.ipc.nmbclusters boot-time tunable to zero.
Work should be done to write an appropriate sysctl
handler allowing dynamic tuning of kern.ipc.nmbclusters
at runtime.
Additional things worth noting/known issues (READ):
- One report of 'ips' (ServeRAID) driver acting really
slow in conjunction with mbuma. Need more data.
Latest report is that ips is equally sucking with
and without mbuma.
- Giant leak in NFS code sometimes occurs, can't
reproduce but currently analyzing; brueffer is
able to reproduce but THIS IS NOT an mbuma-specific
problem and currently occurs even WITHOUT mbuma.
- Issues in network locking: there is at least one
code path in the rip code where one or more locks
are acquired and we end up in m_prepend() with
M_WAITOK, which causes WITNESS to whine from within
UMA. Current temporary solution: force all UMA
allocations to be M_NOWAIT from within UMA for now
to avoid deadlocks unless WITNESS is defined and we
can determine with certainty that we're not holding
any locks when we're M_WAITOK.
- I've seen at least one weird socketbuffer empty-but-
mbuf-still-attached panic. I don't believe this
to be related to mbuma but please keep your eyes
open, turn on debugging, and capture crash dumps.
This change removes more code than it adds.
A paper is available detailing the change and considering
various performance issues, it was presented at BSDCan2004:
http://www.unixdaemons.com/~bmilekic/netbuf_bmilekic.pdf
Please read the paper for Future Work and implementation
details, as well as credits.
Testing and Debugging:
rwatson,
brueffer,
Ketrien I. Saihr-Kesenchedra,
...
Reviewed by: Lots of people (for different parts)
2004-05-31 21:46:06 +00:00
|
|
|
rsize = keg->uk_size;
|
2019-12-02 22:44:34 +00:00
|
|
|
if (rsize < slabsize / SLAB_MAX_SETSIZE)
|
|
|
|
rsize = slabsize / SLAB_MAX_SETSIZE;
|
Bring in mbuma to replace mballoc.
mbuma is an Mbuf & Cluster allocator built on top of a number of
extensions to the UMA framework, all included herein.
Extensions to UMA worth noting:
- Better layering between slab <-> zone caches; introduce
Keg structure which splits off slab cache away from the
zone structure and allows multiple zones to be stacked
on top of a single Keg (single type of slab cache);
perhaps we should look into defining a subset API on
top of the Keg for special use by malloc(9),
for example.
- UMA_ZONE_REFCNT zones can now be added, and reference
counters automagically allocated for them within the end
of the associated slab structures. uma_find_refcnt()
does a kextract to fetch the slab struct reference from
the underlying page, and lookup the corresponding refcnt.
mbuma things worth noting:
- integrates mbuf & cluster allocations with extended UMA
and provides caches for commonly-allocated items; defines
several zones (two primary, one secondary) and two kegs.
- change up certain code paths that always used to do:
m_get() + m_clget() to instead just use m_getcl() and
try to take advantage of the newly defined secondary
Packet zone.
- netstat(1) and systat(1) quickly hacked up to do basic
stat reporting but additional stats work needs to be
done once some other details within UMA have been taken
care of and it becomes clearer to how stats will work
within the modified framework.
From the user perspective, one implication is that the
NMBCLUSTERS compile-time option is no longer used. The
maximum number of clusters is still capped off according
to maxusers, but it can be made unlimited by setting
the kern.ipc.nmbclusters boot-time tunable to zero.
Work should be done to write an appropriate sysctl
handler allowing dynamic tuning of kern.ipc.nmbclusters
at runtime.
Additional things worth noting/known issues (READ):
- One report of 'ips' (ServeRAID) driver acting really
slow in conjunction with mbuma. Need more data.
Latest report is that ips is equally sucking with
and without mbuma.
- Giant leak in NFS code sometimes occurs, can't
reproduce but currently analyzing; brueffer is
able to reproduce but THIS IS NOT an mbuma-specific
problem and currently occurs even WITHOUT mbuma.
- Issues in network locking: there is at least one
code path in the rip code where one or more locks
are acquired and we end up in m_prepend() with
M_WAITOK, which causes WITNESS to whine from within
UMA. Current temporary solution: force all UMA
allocations to be M_NOWAIT from within UMA for now
to avoid deadlocks unless WITNESS is defined and we
can determine with certainty that we're not holding
any locks when we're M_WAITOK.
- I've seen at least one weird socketbuffer empty-but-
mbuf-still-attached panic. I don't believe this
to be related to mbuma but please keep your eyes
open, turn on debugging, and capture crash dumps.
This change removes more code than it adds.
A paper is available detailing the change and considering
various performance issues, it was presented at BSDCan2004:
http://www.unixdaemons.com/~bmilekic/netbuf_bmilekic.pdf
Please read the paper for Future Work and implementation
details, as well as credits.
Testing and Debugging:
rwatson,
brueffer,
Ketrien I. Saihr-Kesenchedra,
...
Reviewed by: Lots of people (for different parts)
2004-05-31 21:46:06 +00:00
|
|
|
if (rsize & keg->uk_align)
|
2019-12-02 22:44:34 +00:00
|
|
|
rsize = roundup(rsize, keg->uk_align + 1);
|
Bring in mbuma to replace mballoc.
mbuma is an Mbuf & Cluster allocator built on top of a number of
extensions to the UMA framework, all included herein.
Extensions to UMA worth noting:
- Better layering between slab <-> zone caches; introduce
Keg structure which splits off slab cache away from the
zone structure and allows multiple zones to be stacked
on top of a single Keg (single type of slab cache);
perhaps we should look into defining a subset API on
top of the Keg for special use by malloc(9),
for example.
- UMA_ZONE_REFCNT zones can now be added, and reference
counters automagically allocated for them within the end
of the associated slab structures. uma_find_refcnt()
does a kextract to fetch the slab struct reference from
the underlying page, and lookup the corresponding refcnt.
mbuma things worth noting:
- integrates mbuf & cluster allocations with extended UMA
and provides caches for commonly-allocated items; defines
several zones (two primary, one secondary) and two kegs.
- change up certain code paths that always used to do:
m_get() + m_clget() to instead just use m_getcl() and
try to take advantage of the newly defined secondary
Packet zone.
- netstat(1) and systat(1) quickly hacked up to do basic
stat reporting but additional stats work needs to be
done once some other details within UMA have been taken
care of and it becomes clearer to how stats will work
within the modified framework.
From the user perspective, one implication is that the
NMBCLUSTERS compile-time option is no longer used. The
maximum number of clusters is still capped off according
to maxusers, but it can be made unlimited by setting
the kern.ipc.nmbclusters boot-time tunable to zero.
Work should be done to write an appropriate sysctl
handler allowing dynamic tuning of kern.ipc.nmbclusters
at runtime.
Additional things worth noting/known issues (READ):
- One report of 'ips' (ServeRAID) driver acting really
slow in conjunction with mbuma. Need more data.
Latest report is that ips is equally sucking with
and without mbuma.
- Giant leak in NFS code sometimes occurs, can't
reproduce but currently analyzing; brueffer is
able to reproduce but THIS IS NOT an mbuma-specific
problem and currently occurs even WITHOUT mbuma.
- Issues in network locking: there is at least one
code path in the rip code where one or more locks
are acquired and we end up in m_prepend() with
M_WAITOK, which causes WITNESS to whine from within
UMA. Current temporary solution: force all UMA
allocations to be M_NOWAIT from within UMA for now
to avoid deadlocks unless WITNESS is defined and we
can determine with certainty that we're not holding
any locks when we're M_WAITOK.
- I've seen at least one weird socketbuffer empty-but-
mbuf-still-attached panic. I don't believe this
to be related to mbuma but please keep your eyes
open, turn on debugging, and capture crash dumps.
This change removes more code than it adds.
A paper is available detailing the change and considering
various performance issues, it was presented at BSDCan2004:
http://www.unixdaemons.com/~bmilekic/netbuf_bmilekic.pdf
Please read the paper for Future Work and implementation
details, as well as credits.
Testing and Debugging:
rwatson,
brueffer,
Ketrien I. Saihr-Kesenchedra,
...
Reviewed by: Lots of people (for different parts)
2004-05-31 21:46:06 +00:00
|
|
|
keg->uk_rsize = rsize;
|
2013-04-08 19:10:45 +00:00
|
|
|
|
|
|
|
KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0 ||
|
2018-07-06 02:06:03 +00:00
|
|
|
keg->uk_rsize < UMA_PCPU_ALLOC_SIZE,
|
2013-04-08 19:10:45 +00:00
|
|
|
("%s: size %u too large", __func__, keg->uk_rsize));
|
2002-03-19 09:11:49 +00:00
|
|
|
|
2019-12-02 22:44:34 +00:00
|
|
|
/*
|
|
|
|
* Use a pessimistic bit count for shsize. It may be possible to
|
|
|
|
* squeeze one more item in for very particular sizes if we were
|
|
|
|
* to loop and reduce the bitsize if there is waste.
|
|
|
|
*/
|
2013-06-13 21:05:38 +00:00
|
|
|
if (keg->uk_flags & UMA_ZONE_OFFPAGE)
|
2012-09-18 20:28:55 +00:00
|
|
|
shsize = 0;
|
2013-06-13 21:05:38 +00:00
|
|
|
else
|
2019-12-02 22:44:34 +00:00
|
|
|
shsize = slab_sizeof(slabsize / rsize);
|
2004-07-29 15:25:40 +00:00
|
|
|
|
Handle a special case when a slab can fit only one allocation,
and zone has a large alignment. With alignment taken into
account uk_rsize will be greater than space in a slab. However,
since we have only one item per slab, it is always naturally
aligned.
Code that will panic before this change with 4k page:
z = uma_zcreate("test", 3984, NULL, NULL, NULL, NULL, 31, 0);
uma_zalloc(z, M_WAITOK);
A practical scenario to hit the panic is a machine with 56 CPUs
and 2 NUMA domains, which yields in zone size of 3984.
PR: 227116
MFC after: 2 weeks
2018-04-02 05:11:59 +00:00
|
|
|
if (rsize <= slabsize - shsize)
|
|
|
|
keg->uk_ipers = (slabsize - shsize) / rsize;
|
|
|
|
else {
|
|
|
|
/* Handle special case when we have 1 item per slab, so
|
|
|
|
* alignment requirement can be relaxed. */
|
|
|
|
KASSERT(keg->uk_size <= slabsize - shsize,
|
|
|
|
("%s: size %u greater than slab", __func__, keg->uk_size));
|
|
|
|
keg->uk_ipers = 1;
|
|
|
|
}
|
2019-12-02 22:44:34 +00:00
|
|
|
KASSERT(keg->uk_ipers > 0 && keg->uk_ipers <= SLAB_MAX_SETSIZE,
|
2013-04-08 19:10:45 +00:00
|
|
|
("%s: keg->uk_ipers %u", __func__, keg->uk_ipers));
|
|
|
|
|
2004-07-29 15:25:40 +00:00
|
|
|
memused = keg->uk_ipers * rsize + shsize;
|
2017-03-11 16:35:36 +00:00
|
|
|
wastedspace = slabsize - memused;
|
2002-03-19 09:11:49 +00:00
|
|
|
|
2004-07-29 15:25:40 +00:00
|
|
|
/*
|
|
|
|
* We can't do OFFPAGE if we're internal or if we've been
|
|
|
|
* asked to not go to the VM for buckets. If we do this we
|
2013-06-26 00:57:38 +00:00
|
|
|
* may end up going to the VM for slabs which we do not
|
|
|
|
* want to do if we're UMA_ZFLAG_CACHEONLY as a result
|
|
|
|
* of UMA_ZONE_VM, which clearly forbids it.
|
2004-07-29 15:25:40 +00:00
|
|
|
*/
|
|
|
|
if ((keg->uk_flags & UMA_ZFLAG_INTERNAL) ||
|
|
|
|
(keg->uk_flags & UMA_ZFLAG_CACHEONLY))
|
|
|
|
return;
|
|
|
|
|
2013-06-13 21:05:38 +00:00
|
|
|
/*
|
|
|
|
* See if using an OFFPAGE slab will limit our waste. Only do
|
|
|
|
* this if it permits more items per-slab.
|
|
|
|
*
|
|
|
|
* XXX We could try growing slabsize to limit max waste as well.
|
|
|
|
* Historically this was not done because the VM could not
|
|
|
|
* efficiently handle contiguous allocations.
|
|
|
|
*/
|
2017-03-11 16:35:36 +00:00
|
|
|
if ((wastedspace >= slabsize / UMA_MAX_WASTE) &&
|
|
|
|
(keg->uk_ipers < (slabsize / keg->uk_rsize))) {
|
|
|
|
keg->uk_ipers = slabsize / keg->uk_rsize;
|
2019-12-02 22:44:34 +00:00
|
|
|
KASSERT(keg->uk_ipers > 0 && keg->uk_ipers <= SLAB_MAX_SETSIZE,
|
2013-04-08 19:10:45 +00:00
|
|
|
("%s: keg->uk_ipers %u", __func__, keg->uk_ipers));
|
2017-06-01 18:36:52 +00:00
|
|
|
CTR6(KTR_UMA, "UMA decided we need offpage slab headers for "
|
|
|
|
"keg: %s(%p), calculated wastedspace = %d, "
|
2004-07-29 15:25:40 +00:00
|
|
|
"maximum wasted space allowed = %d, "
|
|
|
|
"calculated ipers = %d, "
|
2017-06-01 18:36:52 +00:00
|
|
|
"new wasted space = %d\n", keg->uk_name, keg, wastedspace,
|
2017-03-11 16:35:36 +00:00
|
|
|
slabsize / UMA_MAX_WASTE, keg->uk_ipers,
|
|
|
|
slabsize - keg->uk_ipers * keg->uk_rsize);
|
2019-11-20 01:57:33 +00:00
|
|
|
/*
|
|
|
|
* If we had access to memory to embed a slab header we
|
|
|
|
* also have a page structure to use vtoslab() instead of
|
|
|
|
* hash to find slabs. If the zone was explicitly created
|
|
|
|
* OFFPAGE we can't necessarily touch the memory.
|
|
|
|
*/
|
|
|
|
if ((keg->uk_flags & UMA_ZONE_OFFPAGE) == 0)
|
|
|
|
keg->uk_flags |= UMA_ZONE_OFFPAGE | UMA_ZONE_VTOSLAB;
|
2002-03-19 09:11:49 +00:00
|
|
|
}
|
2013-04-08 19:10:45 +00:00
|
|
|
|
|
|
|
if ((keg->uk_flags & UMA_ZONE_OFFPAGE) &&
|
|
|
|
(keg->uk_flags & UMA_ZONE_VTOSLAB) == 0)
|
|
|
|
keg->uk_flags |= UMA_ZONE_HASH;
|
2002-03-19 09:11:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2009-01-25 09:11:24 +00:00
|
|
|
* Finish creating a large (> UMA_SLAB_SIZE) uma kegs. Just give in and do
|
2002-03-19 09:11:49 +00:00
|
|
|
* OFFPAGE for now. When I can allow for more dynamic slab sizes this will be
|
|
|
|
* more complicated.
|
|
|
|
*
|
|
|
|
* Arguments
|
2009-01-25 09:11:24 +00:00
|
|
|
* keg The keg we should initialize
|
2002-03-19 09:11:49 +00:00
|
|
|
*
|
|
|
|
* Returns
|
|
|
|
* Nothing
|
|
|
|
*/
|
|
|
|
static void
|
2009-01-25 09:11:24 +00:00
|
|
|
keg_large_init(uma_keg_t keg)
|
2004-01-30 16:26:29 +00:00
|
|
|
{
|
2002-03-19 09:11:49 +00:00
|
|
|
|
2009-01-25 09:11:24 +00:00
|
|
|
KASSERT(keg != NULL, ("Keg is null in keg_large_init"));
|
2013-04-08 19:10:45 +00:00
|
|
|
KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0,
|
|
|
|
("%s: Cannot large-init a UMA_ZONE_PCPU keg", __func__));
|
2003-08-11 19:39:45 +00:00
|
|
|
|
2013-04-08 19:10:45 +00:00
|
|
|
keg->uk_ppera = howmany(keg->uk_size, PAGE_SIZE);
|
Bring in mbuma to replace mballoc.
mbuma is an Mbuf & Cluster allocator built on top of a number of
extensions to the UMA framework, all included herein.
Extensions to UMA worth noting:
- Better layering between slab <-> zone caches; introduce
Keg structure which splits off slab cache away from the
zone structure and allows multiple zones to be stacked
on top of a single Keg (single type of slab cache);
perhaps we should look into defining a subset API on
top of the Keg for special use by malloc(9),
for example.
- UMA_ZONE_REFCNT zones can now be added, and reference
counters automagically allocated for them within the end
of the associated slab structures. uma_find_refcnt()
does a kextract to fetch the slab struct reference from
the underlying page, and lookup the corresponding refcnt.
mbuma things worth noting:
- integrates mbuf & cluster allocations with extended UMA
and provides caches for commonly-allocated items; defines
several zones (two primary, one secondary) and two kegs.
- change up certain code paths that always used to do:
m_get() + m_clget() to instead just use m_getcl() and
try to take advantage of the newly defined secondary
Packet zone.
- netstat(1) and systat(1) quickly hacked up to do basic
stat reporting but additional stats work needs to be
done once some other details within UMA have been taken
care of and it becomes clearer to how stats will work
within the modified framework.
From the user perspective, one implication is that the
NMBCLUSTERS compile-time option is no longer used. The
maximum number of clusters is still capped off according
to maxusers, but it can be made unlimited by setting
the kern.ipc.nmbclusters boot-time tunable to zero.
Work should be done to write an appropriate sysctl
handler allowing dynamic tuning of kern.ipc.nmbclusters
at runtime.
Additional things worth noting/known issues (READ):
- One report of 'ips' (ServeRAID) driver acting really
slow in conjunction with mbuma. Need more data.
Latest report is that ips is equally sucking with
and without mbuma.
- Giant leak in NFS code sometimes occurs, can't
reproduce but currently analyzing; brueffer is
able to reproduce but THIS IS NOT an mbuma-specific
problem and currently occurs even WITHOUT mbuma.
- Issues in network locking: there is at least one
code path in the rip code where one or more locks
are acquired and we end up in m_prepend() with
M_WAITOK, which causes WITNESS to whine from within
UMA. Current temporary solution: force all UMA
allocations to be M_NOWAIT from within UMA for now
to avoid deadlocks unless WITNESS is defined and we
can determine with certainty that we're not holding
any locks when we're M_WAITOK.
- I've seen at least one weird socketbuffer empty-but-
mbuf-still-attached panic. I don't believe this
to be related to mbuma but please keep your eyes
open, turn on debugging, and capture crash dumps.
This change removes more code than it adds.
A paper is available detailing the change and considering
various performance issues, it was presented at BSDCan2004:
http://www.unixdaemons.com/~bmilekic/netbuf_bmilekic.pdf
Please read the paper for Future Work and implementation
details, as well as credits.
Testing and Debugging:
rwatson,
brueffer,
Ketrien I. Saihr-Kesenchedra,
...
Reviewed by: Lots of people (for different parts)
2004-05-31 21:46:06 +00:00
|
|
|
keg->uk_ipers = 1;
|
2010-11-04 15:33:50 +00:00
|
|
|
keg->uk_rsize = keg->uk_size;
|
|
|
|
|
2013-11-27 20:56:10 +00:00
|
|
|
/* Check whether we have enough space to not do OFFPAGE. */
|
2018-11-28 19:17:27 +00:00
|
|
|
if ((keg->uk_flags & UMA_ZONE_OFFPAGE) == 0 &&
|
2019-12-02 22:44:34 +00:00
|
|
|
PAGE_SIZE * keg->uk_ppera - keg->uk_rsize <
|
|
|
|
slab_sizeof(SLAB_MIN_SETSIZE)) {
|
2018-11-28 19:17:27 +00:00
|
|
|
/*
|
|
|
|
* We can't do OFFPAGE if we're internal, in which case
|
|
|
|
* we need an extra page per allocation to contain the
|
|
|
|
* slab header.
|
|
|
|
*/
|
|
|
|
if ((keg->uk_flags & UMA_ZFLAG_INTERNAL) == 0)
|
2019-11-20 01:57:33 +00:00
|
|
|
keg->uk_flags |= UMA_ZONE_OFFPAGE | UMA_ZONE_VTOSLAB;
|
2018-11-28 19:17:27 +00:00
|
|
|
else
|
|
|
|
keg->uk_ppera++;
|
2013-11-27 20:56:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if ((keg->uk_flags & UMA_ZONE_OFFPAGE) &&
|
|
|
|
(keg->uk_flags & UMA_ZONE_VTOSLAB) == 0)
|
Bring in mbuma to replace mballoc.
mbuma is an Mbuf & Cluster allocator built on top of a number of
extensions to the UMA framework, all included herein.
Extensions to UMA worth noting:
- Better layering between slab <-> zone caches; introduce
Keg structure which splits off slab cache away from the
zone structure and allows multiple zones to be stacked
on top of a single Keg (single type of slab cache);
perhaps we should look into defining a subset API on
top of the Keg for special use by malloc(9),
for example.
- UMA_ZONE_REFCNT zones can now be added, and reference
counters automagically allocated for them within the end
of the associated slab structures. uma_find_refcnt()
does a kextract to fetch the slab struct reference from
the underlying page, and lookup the corresponding refcnt.
mbuma things worth noting:
- integrates mbuf & cluster allocations with extended UMA
and provides caches for commonly-allocated items; defines
several zones (two primary, one secondary) and two kegs.
- change up certain code paths that always used to do:
m_get() + m_clget() to instead just use m_getcl() and
try to take advantage of the newly defined secondary
Packet zone.
- netstat(1) and systat(1) quickly hacked up to do basic
stat reporting but additional stats work needs to be
done once some other details within UMA have been taken
care of and it becomes clearer to how stats will work
within the modified framework.
From the user perspective, one implication is that the
NMBCLUSTERS compile-time option is no longer used. The
maximum number of clusters is still capped off according
to maxusers, but it can be made unlimited by setting
the kern.ipc.nmbclusters boot-time tunable to zero.
Work should be done to write an appropriate sysctl
handler allowing dynamic tuning of kern.ipc.nmbclusters
at runtime.
Additional things worth noting/known issues (READ):
- One report of 'ips' (ServeRAID) driver acting really
slow in conjunction with mbuma. Need more data.
Latest report is that ips is equally sucking with
and without mbuma.
- Giant leak in NFS code sometimes occurs, can't
reproduce but currently analyzing; brueffer is
able to reproduce but THIS IS NOT an mbuma-specific
problem and currently occurs even WITHOUT mbuma.
- Issues in network locking: there is at least one
code path in the rip code where one or more locks
are acquired and we end up in m_prepend() with
M_WAITOK, which causes WITNESS to whine from within
UMA. Current temporary solution: force all UMA
allocations to be M_NOWAIT from within UMA for now
to avoid deadlocks unless WITNESS is defined and we
can determine with certainty that we're not holding
any locks when we're M_WAITOK.
- I've seen at least one weird socketbuffer empty-but-
mbuf-still-attached panic. I don't believe this
to be related to mbuma but please keep your eyes
open, turn on debugging, and capture crash dumps.
This change removes more code than it adds.
A paper is available detailing the change and considering
various performance issues, it was presented at BSDCan2004:
http://www.unixdaemons.com/~bmilekic/netbuf_bmilekic.pdf
Please read the paper for Future Work and implementation
details, as well as credits.
Testing and Debugging:
rwatson,
brueffer,
Ketrien I. Saihr-Kesenchedra,
...
Reviewed by: Lots of people (for different parts)
2004-05-31 21:46:06 +00:00
|
|
|
keg->uk_flags |= UMA_ZONE_HASH;
|
2002-03-19 09:11:49 +00:00
|
|
|
}
|
|
|
|
|
2009-01-25 09:11:24 +00:00
|
|
|
static void
|
|
|
|
keg_cachespread_init(uma_keg_t keg)
|
|
|
|
{
|
|
|
|
int alignsize;
|
|
|
|
int trailer;
|
|
|
|
int pages;
|
|
|
|
int rsize;
|
|
|
|
|
2013-04-08 19:10:45 +00:00
|
|
|
KASSERT((keg->uk_flags & UMA_ZONE_PCPU) == 0,
|
|
|
|
("%s: Cannot cachespread-init a UMA_ZONE_PCPU keg", __func__));
|
|
|
|
|
2009-01-25 09:11:24 +00:00
|
|
|
alignsize = keg->uk_align + 1;
|
|
|
|
rsize = keg->uk_size;
|
|
|
|
/*
|
|
|
|
* We want one item to start on every align boundary in a page. To
|
|
|
|
* do this we will span pages. We will also extend the item by the
|
|
|
|
* size of align if it is an even multiple of align. Otherwise, it
|
|
|
|
* would fall on the same boundary every time.
|
|
|
|
*/
|
|
|
|
if (rsize & keg->uk_align)
|
|
|
|
rsize = (rsize & ~keg->uk_align) + alignsize;
|
|
|
|
if ((rsize & alignsize) == 0)
|
|
|
|
rsize += alignsize;
|
|
|
|
trailer = rsize - keg->uk_size;
|
|
|
|
pages = (rsize * (PAGE_SIZE / alignsize)) / PAGE_SIZE;
|
|
|
|
pages = MIN(pages, (128 * 1024) / PAGE_SIZE);
|
|
|
|
keg->uk_rsize = rsize;
|
|
|
|
keg->uk_ppera = pages;
|
|
|
|
keg->uk_ipers = ((pages * PAGE_SIZE) + trailer) / rsize;
|
|
|
|
keg->uk_flags |= UMA_ZONE_OFFPAGE | UMA_ZONE_VTOSLAB;
|
2019-12-02 22:44:34 +00:00
|
|
|
KASSERT(keg->uk_ipers <= SLAB_MAX_SETSIZE,
|
2012-08-26 09:54:11 +00:00
|
|
|
("%s: keg->uk_ipers too high(%d) increase max_ipers", __func__,
|
2009-01-25 09:11:24 +00:00
|
|
|
keg->uk_ipers));
|
|
|
|
}
|
|
|
|
|
2004-01-30 16:26:29 +00:00
|
|
|
/*
|
Bring in mbuma to replace mballoc.
mbuma is an Mbuf & Cluster allocator built on top of a number of
extensions to the UMA framework, all included herein.
Extensions to UMA worth noting:
- Better layering between slab <-> zone caches; introduce
Keg structure which splits off slab cache away from the
zone structure and allows multiple zones to be stacked
on top of a single Keg (single type of slab cache);
perhaps we should look into defining a subset API on
top of the Keg for special use by malloc(9),
for example.
- UMA_ZONE_REFCNT zones can now be added, and reference
counters automagically allocated for them within the end
of the associated slab structures. uma_find_refcnt()
does a kextract to fetch the slab struct reference from
the underlying page, and lookup the corresponding refcnt.
mbuma things worth noting:
- integrates mbuf & cluster allocations with extended UMA
and provides caches for commonly-allocated items; defines
several zones (two primary, one secondary) and two kegs.
- change up certain code paths that always used to do:
m_get() + m_clget() to instead just use m_getcl() and
try to take advantage of the newly defined secondary
Packet zone.
- netstat(1) and systat(1) quickly hacked up to do basic
stat reporting but additional stats work needs to be
done once some other details within UMA have been taken
care of and it becomes clearer to how stats will work
within the modified framework.
From the user perspective, one implication is that the
NMBCLUSTERS compile-time option is no longer used. The
maximum number of clusters is still capped off according
to maxusers, but it can be made unlimited by setting
the kern.ipc.nmbclusters boot-time tunable to zero.
Work should be done to write an appropriate sysctl
handler allowing dynamic tuning of kern.ipc.nmbclusters
at runtime.
Additional things worth noting/known issues (READ):
- One report of 'ips' (ServeRAID) driver acting really
slow in conjunction with mbuma. Need more data.
Latest report is that ips is equally sucking with
and without mbuma.
- Giant leak in NFS code sometimes occurs, can't
reproduce but currently analyzing; brueffer is
able to reproduce but THIS IS NOT an mbuma-specific
problem and currently occurs even WITHOUT mbuma.
- Issues in network locking: there is at least one
code path in the rip code where one or more locks
are acquired and we end up in m_prepend() with
M_WAITOK, which causes WITNESS to whine from within
UMA. Current temporary solution: force all UMA
allocations to be M_NOWAIT from within UMA for now
to avoid deadlocks unless WITNESS is defined and we
can determine with certainty that we're not holding
any locks when we're M_WAITOK.
- I've seen at least one weird socketbuffer empty-but-
mbuf-still-attached panic. I don't believe this
to be related to mbuma but please keep your eyes
open, turn on debugging, and capture crash dumps.
This change removes more code than it adds.
A paper is available detailing the change and considering
various performance issues, it was presented at BSDCan2004:
http://www.unixdaemons.com/~bmilekic/netbuf_bmilekic.pdf
Please read the paper for Future Work and implementation
details, as well as credits.
Testing and Debugging:
rwatson,
brueffer,
Ketrien I. Saihr-Kesenchedra,
...
Reviewed by: Lots of people (for different parts)
2004-05-31 21:46:06 +00:00
|
|
|
* Keg header ctor. This initializes all fields, locks, etc. And inserts
|
|
|
|
* the keg onto the global keg list.
|
2002-03-19 09:11:49 +00:00
|
|
|
*
|
|
|
|
* Arguments/Returns follow uma_ctor specifications
|
Bring in mbuma to replace mballoc.
mbuma is an Mbuf & Cluster allocator built on top of a number of
extensions to the UMA framework, all included herein.
Extensions to UMA worth noting:
- Better layering between slab <-> zone caches; introduce
Keg structure which splits off slab cache away from the
zone structure and allows multiple zones to be stacked
on top of a single Keg (single type of slab cache);
perhaps we should look into defining a subset API on
top of the Keg for special use by malloc(9),
for example.
- UMA_ZONE_REFCNT zones can now be added, and reference
counters automagically allocated for them within the end
of the associated slab structures. uma_find_refcnt()
does a kextract to fetch the slab struct reference from
the underlying page, and lookup the corresponding refcnt.
mbuma things worth noting:
- integrates mbuf & cluster allocations with extended UMA
and provides caches for commonly-allocated items; defines
several zones (two primary, one secondary) and two kegs.
- change up certain code paths that always used to do:
m_get() + m_clget() to instead just use m_getcl() and
try to take advantage of the newly defined secondary
Packet zone.
- netstat(1) and systat(1) quickly hacked up to do basic
stat reporting but additional stats work needs to be
done once some other details within UMA have been taken
care of and it becomes clearer to how stats will work
within the modified framework.
From the user perspective, one implication is that the
NMBCLUSTERS compile-time option is no longer used. The
maximum number of clusters is still capped off according
to maxusers, but it can be made unlimited by setting
the kern.ipc.nmbclusters boot-time tunable to zero.
Work should be done to write an appropriate sysctl
handler allowing dynamic tuning of kern.ipc.nmbclusters
at runtime.
Additional things worth noting/known issues (READ):
- One report of 'ips' (ServeRAID) driver acting really
slow in conjunction with mbuma. Need more data.
Latest report is that ips is equally sucking with
and without mbuma.
- Giant leak in NFS code sometimes occurs, can't
reproduce but currently analyzing; brueffer is
able to reproduce but THIS IS NOT an mbuma-specific
problem and currently occurs even WITHOUT mbuma.
- Issues in network locking: there is at least one
code path in the rip code where one or more locks
are acquired and we end up in m_prepend() with
M_WAITOK, which causes WITNESS to whine from within
UMA. Current temporary solution: force all UMA
allocations to be M_NOWAIT from within UMA for now
to avoid deadlocks unless WITNESS is defined and we
can determine with certainty that we're not holding
any locks when we're M_WAITOK.
- I've seen at least one weird socketbuffer empty-but-
mbuf-still-attached panic. I don't believe this
to be related to mbuma but please keep your eyes
open, turn on debugging, and capture crash dumps.
This change removes more code than it adds.
A paper is available detailing the change and considering
various performance issues, it was presented at BSDCan2004:
http://www.unixdaemons.com/~bmilekic/netbuf_bmilekic.pdf
Please read the paper for Future Work and implementation
details, as well as credits.
Testing and Debugging:
rwatson,
brueffer,
Ketrien I. Saihr-Kesenchedra,
...
Reviewed by: Lots of people (for different parts)
2004-05-31 21:46:06 +00:00
|
|
|
* udata Actually uma_kctor_args
|
2002-03-19 09:11:49 +00:00
|
|
|
*/
|
2004-08-02 00:18:36 +00:00
|
|
|
static int
|
|
|
|
keg_ctor(void *mem, int size, void *udata, int flags)
|
2002-03-19 09:11:49 +00:00
|
|
|
{
|
Bring in mbuma to replace mballoc.
mbuma is an Mbuf & Cluster allocator built on top of a number of
extensions to the UMA framework, all included herein.
Extensions to UMA worth noting:
- Better layering between slab <-> zone caches; introduce
Keg structure which splits off slab cache away from the
zone structure and allows multiple zones to be stacked
on top of a single Keg (single type of slab cache);
perhaps we should look into defining a subset API on
top of the Keg for special use by malloc(9),
for example.
- UMA_ZONE_REFCNT zones can now be added, and reference
counters automagically allocated for them within the end
of the associated slab structures. uma_find_refcnt()
does a kextract to fetch the slab struct reference from
the underlying page, and lookup the corresponding refcnt.
mbuma things worth noting:
- integrates mbuf & cluster allocations with extended UMA
and provides caches for commonly-allocated items; defines
several zones (two primary, one secondary) and two kegs.
- change up certain code paths that always used to do:
m_get() + m_clget() to instead just use m_getcl() and
try to take advantage of the newly defined secondary
Packet zone.
- netstat(1) and systat(1) quickly hacked up to do basic
stat reporting but additional stats work needs to be
done once some other details within UMA have been taken
care of and it becomes clearer to how stats will work
within the modified framework.
From the user perspective, one implication is that the
NMBCLUSTERS compile-time option is no longer used. The
maximum number of clusters is still capped off according
to maxusers, but it can be made unlimited by setting
the kern.ipc.nmbclusters boot-time tunable to zero.
Work should be done to write an appropriate sysctl
handler allowing dynamic tuning of kern.ipc.nmbclusters
at runtime.
Additional things worth noting/known issues (READ):
- One report of 'ips' (ServeRAID) driver acting really
slow in conjunction with mbuma. Need more data.
Latest report is that ips is equally sucking with
and without mbuma.
- Giant leak in NFS code sometimes occurs, can't
reproduce but currently analyzing; brueffer is
able to reproduce but THIS IS NOT an mbuma-specific
problem and currently occurs even WITHOUT mbuma.
- Issues in network locking: there is at least one
code path in the rip code where one or more locks
are acquired and we end up in m_prepend() with
M_WAITOK, which causes WITNESS to whine from within
UMA. Current temporary solution: force all UMA
allocations to be M_NOWAIT from within UMA for now
to avoid deadlocks unless WITNESS is defined and we
can determine with certainty that we're not holding
any locks when we're M_WAITOK.
- I've seen at least one weird socketbuffer empty-but-
mbuf-still-attached panic. I don't believe this
to be related to mbuma but please keep your eyes
open, turn on debugging, and capture crash dumps.
This change removes more code than it adds.
A paper is available detailing the change and considering
various performance issues, it was presented at BSDCan2004:
http://www.unixdaemons.com/~bmilekic/netbuf_bmilekic.pdf
Please read the paper for Future Work and implementation
details, as well as credits.
Testing and Debugging:
rwatson,
brueffer,
Ketrien I. Saihr-Kesenchedra,
...
Reviewed by: Lots of people (for different parts)
2004-05-31 21:46:06 +00:00
|
|
|
struct uma_kctor_args *arg = udata;
|
|
|
|
uma_keg_t keg = mem;
|
|
|
|
uma_zone_t zone;
|
2002-03-19 09:11:49 +00:00
|
|
|
|
Bring in mbuma to replace mballoc.
mbuma is an Mbuf & Cluster allocator built on top of a number of
extensions to the UMA framework, all included herein.
Extensions to UMA worth noting:
- Better layering between slab <-> zone caches; introduce
Keg structure which splits off slab cache away from the
zone structure and allows multiple zones to be stacked
on top of a single Keg (single type of slab cache);
perhaps we should look into defining a subset API on
top of the Keg for special use by malloc(9),
for example.
- UMA_ZONE_REFCNT zones can now be added, and reference
counters automagically allocated for them within the end
of the associated slab structures. uma_find_refcnt()
does a kextract to fetch the slab struct reference from
the underlying page, and lookup the corresponding refcnt.
mbuma things worth noting:
- integrates mbuf & cluster allocations with extended UMA
and provides caches for commonly-allocated items; defines
several zones (two primary, one secondary) and two kegs.
- change up certain code paths that always used to do:
m_get() + m_clget() to instead just use m_getcl() and
try to take advantage of the newly defined secondary
Packet zone.
- netstat(1) and systat(1) quickly hacked up to do basic
stat reporting but additional stats work needs to be
done once some other details within UMA have been taken
care of and it becomes clearer to how stats will work
within the modified framework.
From the user perspective, one implication is that the
NMBCLUSTERS compile-time option is no longer used. The
maximum number of clusters is still capped off according
to maxusers, but it can be made unlimited by setting
the kern.ipc.nmbclusters boot-time tunable to zero.
Work should be done to write an appropriate sysctl
handler allowing dynamic tuning of kern.ipc.nmbclusters
at runtime.
Additional things worth noting/known issues (READ):
- One report of 'ips' (ServeRAID) driver acting really
slow in conjunction with mbuma. Need more data.
Latest report is that ips is equally sucking with
and without mbuma.
- Giant leak in NFS code sometimes occurs, can't
reproduce but currently analyzing; brueffer is
able to reproduce but THIS IS NOT an mbuma-specific
problem and currently occurs even WITHOUT mbuma.
- Issues in network locking: there is at least one
code path in the rip code where one or more locks
are acquired and we end up in m_prepend() with
M_WAITOK, which causes WITNESS to whine from within
UMA. Current temporary solution: force all UMA
allocations to be M_NOWAIT from within UMA for now
to avoid deadlocks unless WITNESS is defined and we
can determine with certainty that we're not holding
any locks when we're M_WAITOK.
- I've seen at least one weird socketbuffer empty-but-
mbuf-still-attached panic. I don't believe this
to be related to mbuma but please keep your eyes
open, turn on debugging, and capture crash dumps.
This change removes more code than it adds.
A paper is available detailing the change and considering
various performance issues, it was presented at BSDCan2004:
http://www.unixdaemons.com/~bmilekic/netbuf_bmilekic.pdf
Please read the paper for Future Work and implementation
details, as well as credits.
Testing and Debugging:
rwatson,
brueffer,
Ketrien I. Saihr-Kesenchedra,
...
Reviewed by: Lots of people (for different parts)
2004-05-31 21:46:06 +00:00
|
|
|
bzero(keg, size);
|
|
|
|
keg->uk_size = arg->size;
|
|
|
|
keg->uk_init = arg->uminit;
|
|
|
|
keg->uk_fini = arg->fini;
|
|
|
|
keg->uk_align = arg->align;
|
|
|
|
keg->uk_free = 0;
|
2013-06-26 00:57:38 +00:00
|
|
|
keg->uk_reserve = 0;
|
Bring in mbuma to replace mballoc.
mbuma is an Mbuf & Cluster allocator built on top of a number of
extensions to the UMA framework, all included herein.
Extensions to UMA worth noting:
- Better layering between slab <-> zone caches; introduce
Keg structure which splits off slab cache away from the
zone structure and allows multiple zones to be stacked
on top of a single Keg (single type of slab cache);
perhaps we should look into defining a subset API on
top of the Keg for special use by malloc(9),
for example.
- UMA_ZONE_REFCNT zones can now be added, and reference
counters automagically allocated for them within the end
of the associated slab structures. uma_find_refcnt()
does a kextract to fetch the slab struct reference from
the underlying page, and lookup the corresponding refcnt.
mbuma things worth noting:
- integrates mbuf & cluster allocations with extended UMA
and provides caches for commonly-allocated items; defines
several zones (two primary, one secondary) and two kegs.
- change up certain code paths that always used to do:
m_get() + m_clget() to instead just use m_getcl() and
try to take advantage of the newly defined secondary
Packet zone.
- netstat(1) and systat(1) quickly hacked up to do basic
stat reporting but additional stats work needs to be
done once some other details within UMA have been taken
care of and it becomes clearer to how stats will work
within the modified framework.
From the user perspective, one implication is that the
NMBCLUSTERS compile-time option is no longer used. The
maximum number of clusters is still capped off according
to maxusers, but it can be made unlimited by setting
the kern.ipc.nmbclusters boot-time tunable to zero.
Work should be done to write an appropriate sysctl
handler allowing dynamic tuning of kern.ipc.nmbclusters
at runtime.
Additional things worth noting/known issues (READ):
- One report of 'ips' (ServeRAID) driver acting really
slow in conjunction with mbuma. Need more data.
Latest report is that ips is equally sucking with
and without mbuma.
- Giant leak in NFS code sometimes occurs, can't
reproduce but currently analyzing; brueffer is
able to reproduce but THIS IS NOT an mbuma-specific
problem and currently occurs even WITHOUT mbuma.
- Issues in network locking: there is at least one
code path in the rip code where one or more locks
are acquired and we end up in m_prepend() with
M_WAITOK, which causes WITNESS to whine from within
UMA. Current temporary solution: force all UMA
allocations to be M_NOWAIT from within UMA for now
to avoid deadlocks unless WITNESS is defined and we
can determine with certainty that we're not holding
any locks when we're M_WAITOK.
- I've seen at least one weird socketbuffer empty-but-
mbuf-still-attached panic. I don't believe this
to be related to mbuma but please keep your eyes
open, turn on debugging, and capture crash dumps.
This change removes more code than it adds.
A paper is available detailing the change and considering
various performance issues, it was presented at BSDCan2004:
http://www.unixdaemons.com/~bmilekic/netbuf_bmilekic.pdf
Please read the paper for Future Work and implementation
details, as well as credits.
Testing and Debugging:
rwatson,
brueffer,
Ketrien I. Saihr-Kesenchedra,
...
Reviewed by: Lots of people (for different parts)
2004-05-31 21:46:06 +00:00
|
|
|
keg->uk_pages = 0;
|
|
|
|
keg->uk_flags = arg->flags;
|
|
|
|
keg->uk_slabzone = NULL;
|
2002-03-19 09:11:49 +00:00
|
|
|
|
2018-10-24 16:41:47 +00:00
|
|
|
/*
|
|
|
|
* We use a global round-robin policy by default. Zones with
|
|
|
|
* UMA_ZONE_NUMA set will use first-touch instead, in which case the
|
|
|
|
* iterator is never run.
|
|
|
|
*/
|
|
|
|
keg->uk_dr.dr_policy = DOMAINSET_RR();
|
|
|
|
keg->uk_dr.dr_iter = 0;
|
|
|
|
|
Bring in mbuma to replace mballoc.
mbuma is an Mbuf & Cluster allocator built on top of a number of
extensions to the UMA framework, all included herein.
Extensions to UMA worth noting:
- Better layering between slab <-> zone caches; introduce
Keg structure which splits off slab cache away from the
zone structure and allows multiple zones to be stacked
on top of a single Keg (single type of slab cache);
perhaps we should look into defining a subset API on
top of the Keg for special use by malloc(9),
for example.
- UMA_ZONE_REFCNT zones can now be added, and reference
counters automagically allocated for them within the end
of the associated slab structures. uma_find_refcnt()
does a kextract to fetch the slab struct reference from
the underlying page, and lookup the corresponding refcnt.
mbuma things worth noting:
- integrates mbuf & cluster allocations with extended UMA
and provides caches for commonly-allocated items; defines
several zones (two primary, one secondary) and two kegs.
- change up certain code paths that always used to do:
m_get() + m_clget() to instead just use m_getcl() and
try to take advantage of the newly defined secondary
Packet zone.
- netstat(1) and systat(1) quickly hacked up to do basic
stat reporting but additional stats work needs to be
done once some other details within UMA have been taken
care of and it becomes clearer to how stats will work
within the modified framework.
From the user perspective, one implication is that the
NMBCLUSTERS compile-time option is no longer used. The
maximum number of clusters is still capped off according
to maxusers, but it can be made unlimited by setting
the kern.ipc.nmbclusters boot-time tunable to zero.
Work should be done to write an appropriate sysctl
handler allowing dynamic tuning of kern.ipc.nmbclusters
at runtime.
Additional things worth noting/known issues (READ):
- One report of 'ips' (ServeRAID) driver acting really
slow in conjunction with mbuma. Need more data.
Latest report is that ips is equally sucking with
and without mbuma.
- Giant leak in NFS code sometimes occurs, can't
reproduce but currently analyzing; brueffer is
able to reproduce but THIS IS NOT an mbuma-specific
problem and currently occurs even WITHOUT mbuma.
- Issues in network locking: there is at least one
code path in the rip code where one or more locks
are acquired and we end up in m_prepend() with
M_WAITOK, which causes WITNESS to whine from within
UMA. Current temporary solution: force all UMA
allocations to be M_NOWAIT from within UMA for now
to avoid deadlocks unless WITNESS is defined and we
can determine with certainty that we're not holding
any locks when we're M_WAITOK.
- I've seen at least one weird socketbuffer empty-but-
mbuf-still-attached panic. I don't believe this
to be related to mbuma but please keep your eyes
open, turn on debugging, and capture crash dumps.
This change removes more code than it adds.
A paper is available detailing the change and considering
various performance issues, it was presented at BSDCan2004:
http://www.unixdaemons.com/~bmilekic/netbuf_bmilekic.pdf
Please read the paper for Future Work and implementation
details, as well as credits.
Testing and Debugging:
rwatson,
brueffer,
Ketrien I. Saihr-Kesenchedra,
...
Reviewed by: Lots of people (for different parts)
2004-05-31 21:46:06 +00:00
|
|
|
/*
|
|
|
|
* The master zone is passed to us at keg-creation time.
|
|
|
|
*/
|
|
|
|
zone = arg->zone;
|
2009-01-25 09:11:24 +00:00
|
|
|
keg->uk_name = zone->uz_name;
|
2002-03-19 09:11:49 +00:00
|
|
|
|
2002-06-17 22:02:41 +00:00
|
|
|
if (arg->flags & UMA_ZONE_VM)
|
Bring in mbuma to replace mballoc.
mbuma is an Mbuf & Cluster allocator built on top of a number of
extensions to the UMA framework, all included herein.
Extensions to UMA worth noting:
- Better layering between slab <-> zone caches; introduce
Keg structure which splits off slab cache away from the
zone structure and allows multiple zones to be stacked
on top of a single Keg (single type of slab cache);
perhaps we should look into defining a subset API on
top of the Keg for special use by malloc(9),
for example.
- UMA_ZONE_REFCNT zones can now be added, and reference
counters automagically allocated for them within the end
of the associated slab structures. uma_find_refcnt()
does a kextract to fetch the slab struct reference from
the underlying page, and lookup the corresponding refcnt.
mbuma things worth noting:
- integrates mbuf & cluster allocations with extended UMA
and provides caches for commonly-allocated items; defines
several zones (two primary, one secondary) and two kegs.
- change up certain code paths that always used to do:
m_get() + m_clget() to instead just use m_getcl() and
try to take advantage of the newly defined secondary
Packet zone.
- netstat(1) and systat(1) quickly hacked up to do basic
stat reporting but additional stats work needs to be
done once some other details within UMA have been taken
care of and it becomes clearer to how stats will work
within the modified framework.
From the user perspective, one implication is that the
NMBCLUSTERS compile-time option is no longer used. The
maximum number of clusters is still capped off according
to maxusers, but it can be made unlimited by setting
the kern.ipc.nmbclusters boot-time tunable to zero.
Work should be done to write an appropriate sysctl
handler allowing dynamic tuning of kern.ipc.nmbclusters
at runtime.
Additional things worth noting/known issues (READ):
- One report of 'ips' (ServeRAID) driver acting really
slow in conjunction with mbuma. Need more data.
Latest report is that ips is equally sucking with
and without mbuma.
- Giant leak in NFS code sometimes occurs, can't
reproduce but currently analyzing; brueffer is
able to reproduce but THIS IS NOT an mbuma-specific
problem and currently occurs even WITHOUT mbuma.
- Issues in network locking: there is at least one
code path in the rip code where one or more locks
are acquired and we end up in m_prepend() with
M_WAITOK, which causes WITNESS to whine from within
UMA. Current temporary solution: force all UMA
allocations to be M_NOWAIT from within UMA for now
to avoid deadlocks unless WITNESS is defined and we
can determine with certainty that we're not holding
any locks when we're M_WAITOK.
- I've seen at least one weird socketbuffer empty-but-
mbuf-still-attached panic. I don't believe this
to be related to mbuma but please keep your eyes
open, turn on debugging, and capture crash dumps.
This change removes more code than it adds.
A paper is available detailing the change and considering
various performance issues, it was presented at BSDCan2004:
http://www.unixdaemons.com/~bmilekic/netbuf_bmilekic.pdf
Please read the paper for Future Work and implementation
details, as well as credits.
Testing and Debugging:
rwatson,
brueffer,
Ketrien I. Saihr-Kesenchedra,
...
Reviewed by: Lots of people (for different parts)
2004-05-31 21:46:06 +00:00
|
|
|
keg->uk_flags |= UMA_ZFLAG_CACHEONLY;
|
|
|
|
|
|
|
|
if (arg->flags & UMA_ZONE_ZINIT)
|
|
|
|
keg->uk_init = zero_init;
|
2002-06-17 22:02:41 +00:00
|
|
|
|
2016-03-01 00:33:32 +00:00
|
|
|
if (arg->flags & UMA_ZONE_MALLOC)
|
2009-01-25 09:11:24 +00:00
|
|
|
keg->uk_flags |= UMA_ZONE_VTOSLAB;
|
|
|
|
|
2013-04-08 19:10:45 +00:00
|
|
|
if (arg->flags & UMA_ZONE_PCPU)
|
|
|
|
#ifdef SMP
|
|
|
|
keg->uk_flags |= UMA_ZONE_OFFPAGE;
|
|
|
|
#else
|
|
|
|
keg->uk_flags &= ~UMA_ZONE_PCPU;
|
|
|
|
#endif
|
|
|
|
|
2013-06-13 21:05:38 +00:00
|
|
|
if (keg->uk_flags & UMA_ZONE_CACHESPREAD) {
|
|
|
|
keg_cachespread_init(keg);
|
2004-07-29 15:25:40 +00:00
|
|
|
} else {
|
2019-12-02 22:44:34 +00:00
|
|
|
if (keg->uk_size > slab_space(SLAB_MIN_SETSIZE))
|
2009-01-25 09:11:24 +00:00
|
|
|
keg_large_init(keg);
|
2004-07-29 15:25:40 +00:00
|
|
|
else
|
2009-01-25 09:11:24 +00:00
|
|
|
keg_small_init(keg);
|
2004-07-29 15:25:40 +00:00
|
|
|
}
|
Bring in mbuma to replace mballoc.
mbuma is an Mbuf & Cluster allocator built on top of a number of
extensions to the UMA framework, all included herein.
Extensions to UMA worth noting:
- Better layering between slab <-> zone caches; introduce
Keg structure which splits off slab cache away from the
zone structure and allows multiple zones to be stacked
on top of a single Keg (single type of slab cache);
perhaps we should look into defining a subset API on
top of the Keg for special use by malloc(9),
for example.
- UMA_ZONE_REFCNT zones can now be added, and reference
counters automagically allocated for them within the end
of the associated slab structures. uma_find_refcnt()
does a kextract to fetch the slab struct reference from
the underlying page, and lookup the corresponding refcnt.
mbuma things worth noting:
- integrates mbuf & cluster allocations with extended UMA
and provides caches for commonly-allocated items; defines
several zones (two primary, one secondary) and two kegs.
- change up certain code paths that always used to do:
m_get() + m_clget() to instead just use m_getcl() and
try to take advantage of the newly defined secondary
Packet zone.
- netstat(1) and systat(1) quickly hacked up to do basic
stat reporting but additional stats work needs to be
done once some other details within UMA have been taken
care of and it becomes clearer to how stats will work
within the modified framework.
From the user perspective, one implication is that the
NMBCLUSTERS compile-time option is no longer used. The
maximum number of clusters is still capped off according
to maxusers, but it can be made unlimited by setting
the kern.ipc.nmbclusters boot-time tunable to zero.
Work should be done to write an appropriate sysctl
handler allowing dynamic tuning of kern.ipc.nmbclusters
at runtime.
Additional things worth noting/known issues (READ):
- One report of 'ips' (ServeRAID) driver acting really
slow in conjunction with mbuma. Need more data.
Latest report is that ips is equally sucking with
and without mbuma.
- Giant leak in NFS code sometimes occurs, can't
reproduce but currently analyzing; brueffer is
able to reproduce but THIS IS NOT an mbuma-specific
problem and currently occurs even WITHOUT mbuma.
- Issues in network locking: there is at least one
code path in the rip code where one or more locks
are acquired and we end up in m_prepend() with
M_WAITOK, which causes WITNESS to whine from within
UMA. Current temporary solution: force all UMA
allocations to be M_NOWAIT from within UMA for now
to avoid deadlocks unless WITNESS is defined and we
can determine with certainty that we're not holding
any locks when we're M_WAITOK.
- I've seen at least one weird socketbuffer empty-but-
mbuf-still-attached panic. I don't believe this
to be related to mbuma but please keep your eyes
open, turn on debugging, and capture crash dumps.
This change removes more code than it adds.
A paper is available detailing the change and considering
various performance issues, it was presented at BSDCan2004:
http://www.unixdaemons.com/~bmilekic/netbuf_bmilekic.pdf
Please read the paper for Future Work and implementation
details, as well as credits.
Testing and Debugging:
rwatson,
brueffer,
Ketrien I. Saihr-Kesenchedra,
...
Reviewed by: Lots of people (for different parts)
2004-05-31 21:46:06 +00:00
|
|
|
|
2016-03-01 00:33:32 +00:00
|
|
|
if (keg->uk_flags & UMA_ZONE_OFFPAGE)
|
|
|
|
keg->uk_slabzone = slabzone;
|
Bring in mbuma to replace mballoc.
mbuma is an Mbuf & Cluster allocator built on top of a number of
extensions to the UMA framework, all included herein.
Extensions to UMA worth noting:
- Better layering between slab <-> zone caches; introduce
Keg structure which splits off slab cache away from the
zone structure and allows multiple zones to be stacked
on top of a single Keg (single type of slab cache);
perhaps we should look into defining a subset API on
top of the Keg for special use by malloc(9),
for example.
- UMA_ZONE_REFCNT zones can now be added, and reference
counters automagically allocated for them within the end
of the associated slab structures. uma_find_refcnt()
does a kextract to fetch the slab struct reference from
the underlying page, and lookup the corresponding refcnt.
mbuma things worth noting:
- integrates mbuf & cluster allocations with extended UMA
and provides caches for commonly-allocated items; defines
several zones (two primary, one secondary) and two kegs.
- change up certain code paths that always used to do:
m_get() + m_clget() to instead just use m_getcl() and
try to take advantage of the newly defined secondary
Packet zone.
- netstat(1) and systat(1) quickly hacked up to do basic
stat reporting but additional stats work needs to be
done once some other details within UMA have been taken
care of and it becomes clearer to how stats will work
within the modified framework.
From the user perspective, one implication is that the
NMBCLUSTERS compile-time option is no longer used. The
maximum number of clusters is still capped off according
to maxusers, but it can be made unlimited by setting
the kern.ipc.nmbclusters boot-time tunable to zero.
Work should be done to write an appropriate sysctl
handler allowing dynamic tuning of kern.ipc.nmbclusters
at runtime.
Additional things worth noting/known issues (READ):
- One report of 'ips' (ServeRAID) driver acting really
slow in conjunction with mbuma. Need more data.
Latest report is that ips is equally sucking with
and without mbuma.
- Giant leak in NFS code sometimes occurs, can't
reproduce but currently analyzing; brueffer is
able to reproduce but THIS IS NOT an mbuma-specific
problem and currently occurs even WITHOUT mbuma.
- Issues in network locking: there is at least one
code path in the rip code where one or more locks
are acquired and we end up in m_prepend() with
M_WAITOK, which causes WITNESS to whine from within
UMA. Current temporary solution: force all UMA
allocations to be M_NOWAIT from within UMA for now
to avoid deadlocks unless WITNESS is defined and we
can determine with certainty that we're not holding
any locks when we're M_WAITOK.
- I've seen at least one weird socketbuffer empty-but-
mbuf-still-attached panic. I don't believe this
to be related to mbuma but please keep your eyes
open, turn on debugging, and capture crash dumps.
This change removes more code than it adds.
A paper is available detailing the change and considering
various performance issues, it was presented at BSDCan2004:
http://www.unixdaemons.com/~bmilekic/netbuf_bmilekic.pdf
Please read the paper for Future Work and implementation
details, as well as credits.
Testing and Debugging:
rwatson,
brueffer,
Ketrien I. Saihr-Kesenchedra,
...
Reviewed by: Lots of people (for different parts)
2004-05-31 21:46:06 +00:00
|
|
|
|
2003-09-21 07:39:16 +00:00
|
|
|
/*
|
|
|
|
* If we haven't booted yet we need allocations to go through the
|
|
|
|
* startup cache until the vm is ready.
|
|
|
|
*/
|
Followup on r302393 by cperciva, improving calculation of boot pages required
for UMA startup.
o Introduce another stage of UMA startup, which is entered after
vm_page_startup() finishes. After this stage we don't yet enable buckets,
but we can ask VM for pages. Rename stages to meaningful names while here.
New list of stages: BOOT_COLD, BOOT_STRAPPED, BOOT_PAGEALLOC, BOOT_BUCKETS,
BOOT_RUNNING.
Enabling page alloc earlier allows us to dramatically reduce number of
boot pages required. What is more important number of zones becomes
consistent across different machines, as no MD allocations are done before
the BOOT_PAGEALLOC stage. Now only UMA internal zones actually need to use
startup_alloc(), however that may change, so vm_page_startup() provides
its need for early zones as argument.
o Introduce uma_startup_count() function, to avoid code duplication. The
functions calculates sizes of zones zone and kegs zone, and calculates how
many pages UMA will need to bootstrap.
It counts not only of zone structures, but also of kegs, slabs and hashes.
o Hide uma_startup_foo() declarations from public file.
o Provide several DIAGNOSTIC printfs on boot_pages usage.
o Bugfix: when calculating zone of zones size use (mp_maxid + 1) instead of
mp_ncpus. Use resulting number not only in the size argument to zone_ctor()
but also as args.size.
Reviewed by: imp, gallatin (earlier version)
Differential Revision: https://reviews.freebsd.org/D14054
2018-02-06 04:16:00 +00:00
|
|
|
if (booted < BOOT_PAGEALLOC)
|
2017-06-08 21:33:19 +00:00
|
|
|
keg->uk_allocf = startup_alloc;
|
2003-09-21 07:39:16 +00:00
|
|
|
#ifdef UMA_MD_SMALL_ALLOC
|
2017-06-08 21:33:19 +00:00
|
|
|
else if (keg->uk_ppera == 1)
|
Bring in mbuma to replace mballoc.
mbuma is an Mbuf & Cluster allocator built on top of a number of
extensions to the UMA framework, all included herein.
Extensions to UMA worth noting:
- Better layering between slab <-> zone caches; introduce
Keg structure which splits off slab cache away from the
zone structure and allows multiple zones to be stacked
on top of a single Keg (single type of slab cache);
perhaps we should look into defining a subset API on
top of the Keg for special use by malloc(9),
for example.
- UMA_ZONE_REFCNT zones can now be added, and reference
counters automagically allocated for them within the end
of the associated slab structures. uma_find_refcnt()
does a kextract to fetch the slab struct reference from
the underlying page, and lookup the corresponding refcnt.
mbuma things worth noting:
- integrates mbuf & cluster allocations with extended UMA
and provides caches for commonly-allocated items; defines
several zones (two primary, one secondary) and two kegs.
- change up certain code paths that always used to do:
m_get() + m_clget() to instead just use m_getcl() and
try to take advantage of the newly defined secondary
Packet zone.
- netstat(1) and systat(1) quickly hacked up to do basic
stat reporting but additional stats work needs to be
done once some other details within UMA have been taken
care of and it becomes clearer to how stats will work
within the modified framework.
From the user perspective, one implication is that the
NMBCLUSTERS compile-time option is no longer used. The
maximum number of clusters is still capped off according
to maxusers, but it can be made unlimited by setting
the kern.ipc.nmbclusters boot-time tunable to zero.
Work should be done to write an appropriate sysctl
handler allowing dynamic tuning of kern.ipc.nmbclusters
at runtime.
Additional things worth noting/known issues (READ):
- One report of 'ips' (ServeRAID) driver acting really
slow in conjunction with mbuma. Need more data.
Latest report is that ips is equally sucking with
and without mbuma.
- Giant leak in NFS code sometimes occurs, can't
reproduce but currently analyzing; brueffer is
able to reproduce but THIS IS NOT an mbuma-specific
problem and currently occurs even WITHOUT mbuma.
- Issues in network locking: there is at least one
code path in the rip code where one or more locks
are acquired and we end up in m_prepend() with
M_WAITOK, which causes WITNESS to whine from within
UMA. Current temporary solution: force all UMA
allocations to be M_NOWAIT from within UMA for now
to avoid deadlocks unless WITNESS is defined and we
can determine with certainty that we're not holding
any locks when we're M_WAITOK.
- I've seen at least one weird socketbuffer empty-but-
mbuf-still-attached panic. I don't believe this
to be related to mbuma but please keep your eyes
open, turn on debugging, and capture crash dumps.
This change removes more code than it adds.
A paper is available detailing the change and considering
various performance issues, it was presented at BSDCan2004:
http://www.unixdaemons.com/~bmilekic/netbuf_bmilekic.pdf
Please read the paper for Future Work and implementation
details, as well as credits.
Testing and Debugging:
rwatson,
brueffer,
Ketrien I. Saihr-Kesenchedra,
...
Reviewed by: Lots of people (for different parts)
2004-05-31 21:46:06 +00:00
|
|
|
keg->uk_allocf = uma_small_alloc;
|
2017-06-08 21:33:19 +00:00
|
|
|
#endif
|
2018-07-06 02:06:03 +00:00
|
|
|
else if (keg->uk_flags & UMA_ZONE_PCPU)
|
|
|
|
keg->uk_allocf = pcpu_page_alloc;
|
2017-06-08 21:33:19 +00:00
|
|
|
else
|
|
|
|
keg->uk_allocf = page_alloc;
|
|
|
|
#ifdef UMA_MD_SMALL_ALLOC
|
|
|
|
if (keg->uk_ppera == 1)
|
Bring in mbuma to replace mballoc.
mbuma is an Mbuf & Cluster allocator built on top of a number of
extensions to the UMA framework, all included herein.
Extensions to UMA worth noting:
- Better layering between slab <-> zone caches; introduce
Keg structure which splits off slab cache away from the
zone structure and allows multiple zones to be stacked
on top of a single Keg (single type of slab cache);
perhaps we should look into defining a subset API on
top of the Keg for special use by malloc(9),
for example.
- UMA_ZONE_REFCNT zones can now be added, and reference
counters automagically allocated for them within the end
of the associated slab structures. uma_find_refcnt()
does a kextract to fetch the slab struct reference from
the underlying page, and lookup the corresponding refcnt.
mbuma things worth noting:
- integrates mbuf & cluster allocations with extended UMA
and provides caches for commonly-allocated items; defines
several zones (two primary, one secondary) and two kegs.
- change up certain code paths that always used to do:
m_get() + m_clget() to instead just use m_getcl() and
try to take advantage of the newly defined secondary
Packet zone.
- netstat(1) and systat(1) quickly hacked up to do basic
stat reporting but additional stats work needs to be
done once some other details within UMA have been taken
care of and it becomes clearer to how stats will work
within the modified framework.
From the user perspective, one implication is that the
NMBCLUSTERS compile-time option is no longer used. The
maximum number of clusters is still capped off according
to maxusers, but it can be made unlimited by setting
the kern.ipc.nmbclusters boot-time tunable to zero.
Work should be done to write an appropriate sysctl
handler allowing dynamic tuning of kern.ipc.nmbclusters
at runtime.
Additional things worth noting/known issues (READ):
- One report of 'ips' (ServeRAID) driver acting really
slow in conjunction with mbuma. Need more data.
Latest report is that ips is equally sucking with
and without mbuma.
- Giant leak in NFS code sometimes occurs, can't
reproduce but currently analyzing; brueffer is
able to reproduce but THIS IS NOT an mbuma-specific
problem and currently occurs even WITHOUT mbuma.
- Issues in network locking: there is at least one
code path in the rip code where one or more locks
are acquired and we end up in m_prepend() with
M_WAITOK, which causes WITNESS to whine from within
UMA. Current temporary solution: force all UMA
allocations to be M_NOWAIT from within UMA for now
to avoid deadlocks unless WITNESS is defined and we
can determine with certainty that we're not holding
any locks when we're M_WAITOK.
- I've seen at least one weird socketbuffer empty-but-
mbuf-still-attached panic. I don't believe this
to be related to mbuma but please keep your eyes
open, turn on debugging, and capture crash dumps.
This change removes more code than it adds.
A paper is available detailing the change and considering
various performance issues, it was presented at BSDCan2004:
http://www.unixdaemons.com/~bmilekic/netbuf_bmilekic.pdf
Please read the paper for Future Work and implementation
details, as well as credits.
Testing and Debugging:
rwatson,
brueffer,
Ketrien I. Saihr-Kesenchedra,
...
Reviewed by: Lots of people (for different parts)
2004-05-31 21:46:06 +00:00
|
|
|
keg->uk_freef = uma_small_free;
|
2017-06-08 21:33:19 +00:00
|
|
|
else
|
2011-05-22 17:46:16 +00:00
|
|
|
#endif
|
2018-07-06 02:06:03 +00:00
|
|
|
if (keg->uk_flags & UMA_ZONE_PCPU)
|
|
|
|
keg->uk_freef = pcpu_page_free;
|
|
|
|
else
|
2017-06-08 21:33:19 +00:00
|
|
|
keg->uk_freef = page_free;
|
Bring in mbuma to replace mballoc.
mbuma is an Mbuf & Cluster allocator built on top of a number of
extensions to the UMA framework, all included herein.
Extensions to UMA worth noting:
- Better layering between slab <-> zone caches; introduce
Keg structure which splits off slab cache away from the
zone structure and allows multiple zones to be stacked
on top of a single Keg (single type of slab cache);
perhaps we should look into defining a subset API on
top of the Keg for special use by malloc(9),
for example.
- UMA_ZONE_REFCNT zones can now be added, and reference
counters automagically allocated for them within the end
of the associated slab structures. uma_find_refcnt()
does a kextract to fetch the slab struct reference from
the underlying page, and lookup the corresponding refcnt.
mbuma things worth noting:
- integrates mbuf & cluster allocations with extended UMA
and provides caches for commonly-allocated items; defines
several zones (two primary, one secondary) and two kegs.
- change up certain code paths that always used to do:
m_get() + m_clget() to instead just use m_getcl() and
try to take advantage of the newly defined secondary
Packet zone.
- netstat(1) and systat(1) quickly hacked up to do basic
stat reporting but additional stats work needs to be
done once some other details within UMA have been taken
care of and it becomes clearer to how stats will work
within the modified framework.
From the user perspective, one implication is that the
NMBCLUSTERS compile-time option is no longer used. The
maximum number of clusters is still capped off according
to maxusers, but it can be made unlimited by setting
the kern.ipc.nmbclusters boot-time tunable to zero.
Work should be done to write an appropriate sysctl
handler allowing dynamic tuning of kern.ipc.nmbclusters
at runtime.
Additional things worth noting/known issues (READ):
- One report of 'ips' (ServeRAID) driver acting really
slow in conjunction with mbuma. Need more data.
Latest report is that ips is equally sucking with
and without mbuma.
- Giant leak in NFS code sometimes occurs, can't
reproduce but currently analyzing; brueffer is
able to reproduce but THIS IS NOT an mbuma-specific
problem and currently occurs even WITHOUT mbuma.
- Issues in network locking: there is at least one
code path in the rip code where one or more locks
are acquired and we end up in m_prepend() with
M_WAITOK, which causes WITNESS to whine from within
UMA. Current temporary solution: force all UMA
allocations to be M_NOWAIT from within UMA for now
to avoid deadlocks unless WITNESS is defined and we
can determine with certainty that we're not holding
any locks when we're M_WAITOK.
- I've seen at least one weird socketbuffer empty-but-
mbuf-still-attached panic. I don't believe this
to be related to mbuma but please keep your eyes
open, turn on debugging, and capture crash dumps.
This change removes more code than it adds.
A paper is available detailing the change and considering
various performance issues, it was presented at BSDCan2004:
http://www.unixdaemons.com/~bmilekic/netbuf_bmilekic.pdf
Please read the paper for Future Work and implementation
details, as well as credits.
Testing and Debugging:
rwatson,
brueffer,
Ketrien I. Saihr-Kesenchedra,
...
Reviewed by: Lots of people (for different parts)
2004-05-31 21:46:06 +00:00
|
|
|
|
|
|
|
/*
|
2013-06-20 19:08:12 +00:00
|
|
|
* Initialize keg's lock
|
Bring in mbuma to replace mballoc.
mbuma is an Mbuf & Cluster allocator built on top of a number of
extensions to the UMA framework, all included herein.
Extensions to UMA worth noting:
- Better layering between slab <-> zone caches; introduce
Keg structure which splits off slab cache away from the
zone structure and allows multiple zones to be stacked
on top of a single Keg (single type of slab cache);
perhaps we should look into defining a subset API on
top of the Keg for special use by malloc(9),
for example.
- UMA_ZONE_REFCNT zones can now be added, and reference
counters automagically allocated for them within the end
of the associated slab structures. uma_find_refcnt()
does a kextract to fetch the slab struct reference from
the underlying page, and lookup the corresponding refcnt.
mbuma things worth noting:
- integrates mbuf & cluster allocations with extended UMA
and provides caches for commonly-allocated items; defines
several zones (two primary, one secondary) and two kegs.
- change up certain code paths that always used to do:
m_get() + m_clget() to instead just use m_getcl() and
try to take advantage of the newly defined secondary
Packet zone.
- netstat(1) and systat(1) quickly hacked up to do basic
stat reporting but additional stats work needs to be
done once some other details within UMA have been taken
care of and it becomes clearer to how stats will work
within the modified framework.
From the user perspective, one implication is that the
NMBCLUSTERS compile-time option is no longer used. The
maximum number of clusters is still capped off according
to maxusers, but it can be made unlimited by setting
the kern.ipc.nmbclusters boot-time tunable to zero.
Work should be done to write an appropriate sysctl
handler allowing dynamic tuning of kern.ipc.nmbclusters
at runtime.
Additional things worth noting/known issues (READ):
- One report of 'ips' (ServeRAID) driver acting really
slow in conjunction with mbuma. Need more data.
Latest report is that ips is equally sucking with
and without mbuma.
- Giant leak in NFS code sometimes occurs, can't
reproduce but currently analyzing; brueffer is
able to reproduce but THIS IS NOT an mbuma-specific
problem and currently occurs even WITHOUT mbuma.
- Issues in network locking: there is at least one
code path in the rip code where one or more locks
are acquired and we end up in m_prepend() with
M_WAITOK, which causes WITNESS to whine from within
UMA. Current temporary solution: force all UMA
allocations to be M_NOWAIT from within UMA for now
to avoid deadlocks unless WITNESS is defined and we
can determine with certainty that we're not holding
any locks when we're M_WAITOK.
- I've seen at least one weird socketbuffer empty-but-
mbuf-still-attached panic. I don't believe this
to be related to mbuma but please keep your eyes
open, turn on debugging, and capture crash dumps.
This change removes more code than it adds.
A paper is available detailing the change and considering
various performance issues, it was presented at BSDCan2004:
http://www.unixdaemons.com/~bmilekic/netbuf_bmilekic.pdf
Please read the paper for Future Work and implementation
details, as well as credits.
Testing and Debugging:
rwatson,
brueffer,
Ketrien I. Saihr-Kesenchedra,
...
Reviewed by: Lots of people (for different parts)
2004-05-31 21:46:06 +00:00
|
|
|
*/
|
2013-06-20 19:08:12 +00:00
|
|
|
KEG_LOCK_INIT(keg, (arg->flags & UMA_ZONE_MTXCLASS));
|
2002-04-29 23:45:41 +00:00
|
|
|
|
2002-03-19 09:11:49 +00:00
|
|
|
/*
|
|
|
|
* If we're putting the slab header in the actual page we need to
|
2019-12-02 22:44:34 +00:00
|
|
|
* figure out where in each page it goes. See slab_sizeof
|
|
|
|
* definition.
|
2002-03-19 09:11:49 +00:00
|
|
|
*/
|
Bring in mbuma to replace mballoc.
mbuma is an Mbuf & Cluster allocator built on top of a number of
extensions to the UMA framework, all included herein.
Extensions to UMA worth noting:
- Better layering between slab <-> zone caches; introduce
Keg structure which splits off slab cache away from the
zone structure and allows multiple zones to be stacked
on top of a single Keg (single type of slab cache);
perhaps we should look into defining a subset API on
top of the Keg for special use by malloc(9),
for example.
- UMA_ZONE_REFCNT zones can now be added, and reference
counters automagically allocated for them within the end
of the associated slab structures. uma_find_refcnt()
does a kextract to fetch the slab struct reference from
the underlying page, and lookup the corresponding refcnt.
mbuma things worth noting:
- integrates mbuf & cluster allocations with extended UMA
and provides caches for commonly-allocated items; defines
several zones (two primary, one secondary) and two kegs.
- change up certain code paths that always used to do:
m_get() + m_clget() to instead just use m_getcl() and
try to take advantage of the newly defined secondary
Packet zone.
- netstat(1) and systat(1) quickly hacked up to do basic
stat reporting but additional stats work needs to be
done once some other details within UMA have been taken
care of and it becomes clearer to how stats will work
within the modified framework.
From the user perspective, one implication is that the
NMBCLUSTERS compile-time option is no longer used. The
maximum number of clusters is still capped off according
to maxusers, but it can be made unlimited by setting
the kern.ipc.nmbclusters boot-time tunable to zero.
Work should be done to write an appropriate sysctl
handler allowing dynamic tuning of kern.ipc.nmbclusters
at runtime.
Additional things worth noting/known issues (READ):
- One report of 'ips' (ServeRAID) driver acting really
slow in conjunction with mbuma. Need more data.
Latest report is that ips is equally sucking with
and without mbuma.
- Giant leak in NFS code sometimes occurs, can't
reproduce but currently analyzing; brueffer is
able to reproduce but THIS IS NOT an mbuma-specific
problem and currently occurs even WITHOUT mbuma.
- Issues in network locking: there is at least one
code path in the rip code where one or more locks
are acquired and we end up in m_prepend() with
M_WAITOK, which causes WITNESS to whine from within
UMA. Current temporary solution: force all UMA
allocations to be M_NOWAIT from within UMA for now
to avoid deadlocks unless WITNESS is defined and we
can determine with certainty that we're not holding
any locks when we're M_WAITOK.
- I've seen at least one weird socketbuffer empty-but-
mbuf-still-attached panic. I don't believe this
to be related to mbuma but please keep your eyes
open, turn on debugging, and capture crash dumps.
This change removes more code than it adds.
A paper is available detailing the change and considering
various performance issues, it was presented at BSDCan2004:
http://www.unixdaemons.com/~bmilekic/netbuf_bmilekic.pdf
Please read the paper for Future Work and implementation
details, as well as credits.
Testing and Debugging:
rwatson,
brueffer,
Ketrien I. Saihr-Kesenchedra,
...
Reviewed by: Lots of people (for different parts)
2004-05-31 21:46:06 +00:00
|
|
|
if (!(keg->uk_flags & UMA_ZONE_OFFPAGE)) {
|
2019-12-02 22:44:34 +00:00
|
|
|
size_t shsize;
|
|
|
|
|
|
|
|
shsize = slab_sizeof(keg->uk_ipers);
|
|
|
|
keg->uk_pgoff = (PAGE_SIZE * keg->uk_ppera) - shsize;
|
2004-07-29 15:25:40 +00:00
|
|
|
/*
|
|
|
|
* The only way the following is possible is if with our
|
|
|
|
* UMA_ALIGN_PTR adjustments we are now bigger than
|
|
|
|
* UMA_SLAB_SIZE. I haven't checked whether this is
|
|
|
|
* mathematically possible for all cases, so we make
|
|
|
|
* sure here anyway.
|
|
|
|
*/
|
2019-12-02 22:44:34 +00:00
|
|
|
KASSERT(keg->uk_pgoff + shsize <= PAGE_SIZE * keg->uk_ppera,
|
2018-11-28 19:17:27 +00:00
|
|
|
("zone %s ipers %d rsize %d size %d slab won't fit",
|
|
|
|
zone->uz_name, keg->uk_ipers, keg->uk_rsize, keg->uk_size));
|
2002-03-19 09:11:49 +00:00
|
|
|
}
|
|
|
|
|
Bring in mbuma to replace mballoc.
mbuma is an Mbuf & Cluster allocator built on top of a number of
extensions to the UMA framework, all included herein.
Extensions to UMA worth noting:
- Better layering between slab <-> zone caches; introduce
Keg structure which splits off slab cache away from the
zone structure and allows multiple zones to be stacked
on top of a single Keg (single type of slab cache);
perhaps we should look into defining a subset API on
top of the Keg for special use by malloc(9),
for example.
- UMA_ZONE_REFCNT zones can now be added, and reference
counters automagically allocated for them within the end
of the associated slab structures. uma_find_refcnt()
does a kextract to fetch the slab struct reference from
the underlying page, and lookup the corresponding refcnt.
mbuma things worth noting:
- integrates mbuf & cluster allocations with extended UMA
and provides caches for commonly-allocated items; defines
several zones (two primary, one secondary) and two kegs.
- change up certain code paths that always used to do:
m_get() + m_clget() to instead just use m_getcl() and
try to take advantage of the newly defined secondary
Packet zone.
- netstat(1) and systat(1) quickly hacked up to do basic
stat reporting but additional stats work needs to be
done once some other details within UMA have been taken
care of and it becomes clearer to how stats will work
within the modified framework.
From the user perspective, one implication is that the
NMBCLUSTERS compile-time option is no longer used. The
maximum number of clusters is still capped off according
to maxusers, but it can be made unlimited by setting
the kern.ipc.nmbclusters boot-time tunable to zero.
Work should be done to write an appropriate sysctl
handler allowing dynamic tuning of kern.ipc.nmbclusters
at runtime.
Additional things worth noting/known issues (READ):
- One report of 'ips' (ServeRAID) driver acting really
slow in conjunction with mbuma. Need more data.
Latest report is that ips is equally sucking with
and without mbuma.
- Giant leak in NFS code sometimes occurs, can't
reproduce but currently analyzing; brueffer is
able to reproduce but THIS IS NOT an mbuma-specific
problem and currently occurs even WITHOUT mbuma.
- Issues in network locking: there is at least one
code path in the rip code where one or more locks
are acquired and we end up in m_prepend() with
M_WAITOK, which causes WITNESS to whine from within
UMA. Current temporary solution: force all UMA
allocations to be M_NOWAIT from within UMA for now
to avoid deadlocks unless WITNESS is defined and we
can determine with certainty that we're not holding
any locks when we're M_WAITOK.
- I've seen at least one weird socketbuffer empty-but-
mbuf-still-attached panic. I don't believe this
to be related to mbuma but please keep your eyes
open, turn on debugging, and capture crash dumps.
This change removes more code than it adds.
A paper is available detailing the change and considering
various performance issues, it was presented at BSDCan2004:
http://www.unixdaemons.com/~bmilekic/netbuf_bmilekic.pdf
Please read the paper for Future Work and implementation
details, as well as credits.
Testing and Debugging:
rwatson,
brueffer,
Ketrien I. Saihr-Kesenchedra,
...
Reviewed by: Lots of people (for different parts)
2004-05-31 21:46:06 +00:00
|
|
|
if (keg->uk_flags & UMA_ZONE_HASH)
|
2019-06-06 23:57:28 +00:00
|
|
|
hash_alloc(&keg->uk_hash, 0);
|
2002-09-18 08:26:30 +00:00
|
|
|
|
2017-06-01 18:36:52 +00:00
|
|
|
CTR5(KTR_UMA, "keg_ctor %p zone %s(%p) out %d free %d\n",
|
|
|
|
keg, zone->uz_name, zone,
|
2017-03-11 16:43:38 +00:00
|
|
|
(keg->uk_pages / keg->uk_ppera) * keg->uk_ipers - keg->uk_free,
|
|
|
|
keg->uk_free);
|
Bring in mbuma to replace mballoc.
mbuma is an Mbuf & Cluster allocator built on top of a number of
extensions to the UMA framework, all included herein.
Extensions to UMA worth noting:
- Better layering between slab <-> zone caches; introduce
Keg structure which splits off slab cache away from the
zone structure and allows multiple zones to be stacked
on top of a single Keg (single type of slab cache);
perhaps we should look into defining a subset API on
top of the Keg for special use by malloc(9),
for example.
- UMA_ZONE_REFCNT zones can now be added, and reference
counters automagically allocated for them within the end
of the associated slab structures. uma_find_refcnt()
does a kextract to fetch the slab struct reference from
the underlying page, and lookup the corresponding refcnt.
mbuma things worth noting:
- integrates mbuf & cluster allocations with extended UMA
and provides caches for commonly-allocated items; defines
several zones (two primary, one secondary) and two kegs.
- change up certain code paths that always used to do:
m_get() + m_clget() to instead just use m_getcl() and
try to take advantage of the newly defined secondary
Packet zone.
- netstat(1) and systat(1) quickly hacked up to do basic
stat reporting but additional stats work needs to be
done once some other details within UMA have been taken
care of and it becomes clearer to how stats will work
within the modified framework.
From the user perspective, one implication is that the
NMBCLUSTERS compile-time option is no longer used. The
maximum number of clusters is still capped off according
to maxusers, but it can be made unlimited by setting
the kern.ipc.nmbclusters boot-time tunable to zero.
Work should be done to write an appropriate sysctl
handler allowing dynamic tuning of kern.ipc.nmbclusters
at runtime.
Additional things worth noting/known issues (READ):
- One report of 'ips' (ServeRAID) driver acting really
slow in conjunction with mbuma. Need more data.
Latest report is that ips is equally sucking with
and without mbuma.
- Giant leak in NFS code sometimes occurs, can't
reproduce but currently analyzing; brueffer is
able to reproduce but THIS IS NOT an mbuma-specific
problem and currently occurs even WITHOUT mbuma.
- Issues in network locking: there is at least one
code path in the rip code where one or more locks
are acquired and we end up in m_prepend() with
M_WAITOK, which causes WITNESS to whine from within
UMA. Current temporary solution: force all UMA
allocations to be M_NOWAIT from within UMA for now
to avoid deadlocks unless WITNESS is defined and we
can determine with certainty that we're not holding
any locks when we're M_WAITOK.
- I've seen at least one weird socketbuffer empty-but-
mbuf-still-attached panic. I don't believe this
to be related to mbuma but please keep your eyes
open, turn on debugging, and capture crash dumps.
This change removes more code than it adds.
A paper is available detailing the change and considering
various performance issues, it was presented at BSDCan2004:
http://www.unixdaemons.com/~bmilekic/netbuf_bmilekic.pdf
Please read the paper for Future Work and implementation
details, as well as credits.
Testing and Debugging:
rwatson,
brueffer,
Ketrien I. Saihr-Kesenchedra,
...
Reviewed by: Lots of people (for different parts)
2004-05-31 21:46:06 +00:00
|
|
|
|
|
|
|
LIST_INSERT_HEAD(&keg->uk_zones, zone, uz_link);
|
2002-03-19 09:11:49 +00:00
|
|
|
|
2014-10-05 21:34:56 +00:00
|
|
|
rw_wlock(&uma_rwlock);
|
Bring in mbuma to replace mballoc.
mbuma is an Mbuf & Cluster allocator built on top of a number of
extensions to the UMA framework, all included herein.
Extensions to UMA worth noting:
- Better layering between slab <-> zone caches; introduce
Keg structure which splits off slab cache away from the
zone structure and allows multiple zones to be stacked
on top of a single Keg (single type of slab cache);
perhaps we should look into defining a subset API on
top of the Keg for special use by malloc(9),
for example.
- UMA_ZONE_REFCNT zones can now be added, and reference
counters automagically allocated for them within the end
of the associated slab structures. uma_find_refcnt()
does a kextract to fetch the slab struct reference from
the underlying page, and lookup the corresponding refcnt.
mbuma things worth noting:
- integrates mbuf & cluster allocations with extended UMA
and provides caches for commonly-allocated items; defines
several zones (two primary, one secondary) and two kegs.
- change up certain code paths that always used to do:
m_get() + m_clget() to instead just use m_getcl() and
try to take advantage of the newly defined secondary
Packet zone.
- netstat(1) and systat(1) quickly hacked up to do basic
stat reporting but additional stats work needs to be
done once some other details within UMA have been taken
care of and it becomes clearer to how stats will work
within the modified framework.
From the user perspective, one implication is that the
NMBCLUSTERS compile-time option is no longer used. The
maximum number of clusters is still capped off according
to maxusers, but it can be made unlimited by setting
the kern.ipc.nmbclusters boot-time tunable to zero.
Work should be done to write an appropriate sysctl
handler allowing dynamic tuning of kern.ipc.nmbclusters
at runtime.
Additional things worth noting/known issues (READ):
- One report of 'ips' (ServeRAID) driver acting really
slow in conjunction with mbuma. Need more data.
Latest report is that ips is equally sucking with
and without mbuma.
- Giant leak in NFS code sometimes occurs, can't
reproduce but currently analyzing; brueffer is
able to reproduce but THIS IS NOT an mbuma-specific
problem and currently occurs even WITHOUT mbuma.
- Issues in network locking: there is at least one
code path in the rip code where one or more locks
are acquired and we end up in m_prepend() with
M_WAITOK, which causes WITNESS to whine from within
UMA. Current temporary solution: force all UMA
allocations to be M_NOWAIT from within UMA for now
to avoid deadlocks unless WITNESS is defined and we
can determine with certainty that we're not holding
any locks when we're M_WAITOK.
- I've seen at least one weird socketbuffer empty-but-
mbuf-still-attached panic. I don't believe this
to be related to mbuma but please keep your eyes
open, turn on debugging, and capture crash dumps.
This change removes more code than it adds.
A paper is available detailing the change and considering
various performance issues, it was presented at BSDCan2004:
http://www.unixdaemons.com/~bmilekic/netbuf_bmilekic.pdf
Please read the paper for Future Work and implementation
details, as well as credits.
Testing and Debugging:
rwatson,
brueffer,
Ketrien I. Saihr-Kesenchedra,
...
Reviewed by: Lots of people (for different parts)
2004-05-31 21:46:06 +00:00
|
|
|
LIST_INSERT_HEAD(&uma_kegs, keg, uk_link);
|
2014-10-05 21:34:56 +00:00
|
|
|
rw_wunlock(&uma_rwlock);
|
2004-08-02 00:18:36 +00:00
|
|
|
return (0);
|
Bring in mbuma to replace mballoc.
mbuma is an Mbuf & Cluster allocator built on top of a number of
extensions to the UMA framework, all included herein.
Extensions to UMA worth noting:
- Better layering between slab <-> zone caches; introduce
Keg structure which splits off slab cache away from the
zone structure and allows multiple zones to be stacked
on top of a single Keg (single type of slab cache);
perhaps we should look into defining a subset API on
top of the Keg for special use by malloc(9),
for example.
- UMA_ZONE_REFCNT zones can now be added, and reference
counters automagically allocated for them within the end
of the associated slab structures. uma_find_refcnt()
does a kextract to fetch the slab struct reference from
the underlying page, and lookup the corresponding refcnt.
mbuma things worth noting:
- integrates mbuf & cluster allocations with extended UMA
and provides caches for commonly-allocated items; defines
several zones (two primary, one secondary) and two kegs.
- change up certain code paths that always used to do:
m_get() + m_clget() to instead just use m_getcl() and
try to take advantage of the newly defined secondary
Packet zone.
- netstat(1) and systat(1) quickly hacked up to do basic
stat reporting but additional stats work needs to be
done once some other details within UMA have been taken
care of and it becomes clearer to how stats will work
within the modified framework.
From the user perspective, one implication is that the
NMBCLUSTERS compile-time option is no longer used. The
maximum number of clusters is still capped off according
to maxusers, but it can be made unlimited by setting
the kern.ipc.nmbclusters boot-time tunable to zero.
Work should be done to write an appropriate sysctl
handler allowing dynamic tuning of kern.ipc.nmbclusters
at runtime.
Additional things worth noting/known issues (READ):
- One report of 'ips' (ServeRAID) driver acting really
slow in conjunction with mbuma. Need more data.
Latest report is that ips is equally sucking with
and without mbuma.
- Giant leak in NFS code sometimes occurs, can't
reproduce but currently analyzing; brueffer is
able to reproduce but THIS IS NOT an mbuma-specific
problem and currently occurs even WITHOUT mbuma.
- Issues in network locking: there is at least one
code path in the rip code where one or more locks
are acquired and we end up in m_prepend() with
M_WAITOK, which causes WITNESS to whine from within
UMA. Current temporary solution: force all UMA
allocations to be M_NOWAIT from within UMA for now
to avoid deadlocks unless WITNESS is defined and we
can determine with certainty that we're not holding
any locks when we're M_WAITOK.
- I've seen at least one weird socketbuffer empty-but-
mbuf-still-attached panic. I don't believe this
to be related to mbuma but please keep your eyes
open, turn on debugging, and capture crash dumps.
This change removes more code than it adds.
A paper is available detailing the change and considering
various performance issues, it was presented at BSDCan2004:
http://www.unixdaemons.com/~bmilekic/netbuf_bmilekic.pdf
Please read the paper for Future Work and implementation
details, as well as credits.
Testing and Debugging:
rwatson,
brueffer,
Ketrien I. Saihr-Kesenchedra,
...
Reviewed by: Lots of people (for different parts)
2004-05-31 21:46:06 +00:00
|
|
|
}
|
|
|
|
|
2019-01-15 18:24:34 +00:00
|
|
|
static void
|
2019-11-28 00:19:09 +00:00
|
|
|
zone_alloc_counters(uma_zone_t zone, void *unused)
|
2019-01-15 18:24:34 +00:00
|
|
|
{
|
|
|
|
|
|
|
|
zone->uz_allocs = counter_u64_alloc(M_WAITOK);
|
|
|
|
zone->uz_frees = counter_u64_alloc(M_WAITOK);
|
|
|
|
zone->uz_fails = counter_u64_alloc(M_WAITOK);
|
|
|
|
}
|
|
|
|
|
2019-11-28 00:19:09 +00:00
|
|
|
static void
|
|
|
|
zone_alloc_sysctl(uma_zone_t zone, void *unused)
|
|
|
|
{
|
|
|
|
uma_zone_domain_t zdom;
|
|
|
|
uma_keg_t keg;
|
|
|
|
struct sysctl_oid *oid, *domainoid;
|
2019-12-08 01:55:23 +00:00
|
|
|
int domains, i, cnt;
|
2019-11-28 00:19:09 +00:00
|
|
|
static const char *nokeg = "cache zone";
|
|
|
|
char *c;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Make a sysctl safe copy of the zone name by removing
|
|
|
|
* any special characters and handling dups by appending
|
|
|
|
* an index.
|
|
|
|
*/
|
|
|
|
if (zone->uz_namecnt != 0) {
|
2019-12-08 01:55:23 +00:00
|
|
|
/* Count the number of decimal digits and '_' separator. */
|
|
|
|
for (i = 1, cnt = zone->uz_namecnt; cnt != 0; i++)
|
|
|
|
cnt /= 10;
|
|
|
|
zone->uz_ctlname = malloc(strlen(zone->uz_name) + i + 1,
|
|
|
|
M_UMA, M_WAITOK);
|
2019-11-28 00:19:09 +00:00
|
|
|
sprintf(zone->uz_ctlname, "%s_%d", zone->uz_name,
|
|
|
|
zone->uz_namecnt);
|
|
|
|
} else
|
|
|
|
zone->uz_ctlname = strdup(zone->uz_name, M_UMA);
|
|
|
|
for (c = zone->uz_ctlname; *c != '\0'; c++)
|
|
|
|
if (strchr("./\\ -", *c) != NULL)
|
|
|
|
*c = '_';
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Basic parameters at the root.
|
|
|
|
*/
|
|
|
|
zone->uz_oid = SYSCTL_ADD_NODE(NULL, SYSCTL_STATIC_CHILDREN(_vm_uma),
|
|
|
|
OID_AUTO, zone->uz_ctlname, CTLFLAG_RD, NULL, "");
|
|
|
|
oid = zone->uz_oid;
|
|
|
|
SYSCTL_ADD_U32(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
|
|
|
|
"size", CTLFLAG_RD, &zone->uz_size, 0, "Allocation size");
|
2019-12-11 06:50:55 +00:00
|
|
|
SYSCTL_ADD_PROC(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
|
|
|
|
"flags", CTLFLAG_RD | CTLTYPE_STRING | CTLFLAG_MPSAFE,
|
|
|
|
zone, 0, sysctl_handle_uma_zone_flags, "A",
|
2019-11-28 00:19:09 +00:00
|
|
|
"Allocator configuration flags");
|
|
|
|
SYSCTL_ADD_U16(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
|
|
|
|
"bucket_size", CTLFLAG_RD, &zone->uz_bucket_size, 0,
|
|
|
|
"Desired per-cpu cache size");
|
|
|
|
SYSCTL_ADD_U16(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
|
|
|
|
"bucket_size_max", CTLFLAG_RD, &zone->uz_bucket_size_max, 0,
|
|
|
|
"Maximum allowed per-cpu cache size");
|
|
|
|
|
|
|
|
/*
|
|
|
|
* keg if present.
|
|
|
|
*/
|
|
|
|
oid = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(zone->uz_oid), OID_AUTO,
|
|
|
|
"keg", CTLFLAG_RD, NULL, "");
|
|
|
|
keg = zone->uz_keg;
|
2019-12-08 01:55:23 +00:00
|
|
|
if ((zone->uz_flags & UMA_ZFLAG_CACHE) == 0) {
|
2019-11-28 00:19:09 +00:00
|
|
|
SYSCTL_ADD_CONST_STRING(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
|
|
|
|
"name", CTLFLAG_RD, keg->uk_name, "Keg name");
|
|
|
|
SYSCTL_ADD_U32(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
|
|
|
|
"rsize", CTLFLAG_RD, &keg->uk_rsize, 0,
|
|
|
|
"Real object size with alignment");
|
|
|
|
SYSCTL_ADD_U16(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
|
|
|
|
"ppera", CTLFLAG_RD, &keg->uk_ppera, 0,
|
|
|
|
"pages per-slab allocation");
|
|
|
|
SYSCTL_ADD_U16(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
|
|
|
|
"ipers", CTLFLAG_RD, &keg->uk_ipers, 0,
|
|
|
|
"items available per-slab");
|
|
|
|
SYSCTL_ADD_U32(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
|
|
|
|
"align", CTLFLAG_RD, &keg->uk_align, 0,
|
|
|
|
"item alignment mask");
|
|
|
|
SYSCTL_ADD_U32(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
|
|
|
|
"pages", CTLFLAG_RD, &keg->uk_pages, 0,
|
|
|
|
"Total pages currently allocated from VM");
|
|
|
|
SYSCTL_ADD_U32(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
|
|
|
|
"free", CTLFLAG_RD, &keg->uk_free, 0,
|
|
|
|
"items free in the slab layer");
|
|
|
|
} else
|
|
|
|
SYSCTL_ADD_CONST_STRING(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
|
|
|
|
"name", CTLFLAG_RD, nokeg, "Keg name");
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Information about zone limits.
|
|
|
|
*/
|
|
|
|
oid = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(zone->uz_oid), OID_AUTO,
|
|
|
|
"limit", CTLFLAG_RD, NULL, "");
|
|
|
|
SYSCTL_ADD_U64(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
|
|
|
|
"items", CTLFLAG_RD, &zone->uz_items, 0,
|
|
|
|
"current number of cached items");
|
|
|
|
SYSCTL_ADD_U64(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
|
|
|
|
"max_items", CTLFLAG_RD, &zone->uz_max_items, 0,
|
|
|
|
"Maximum number of cached items");
|
|
|
|
SYSCTL_ADD_U32(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
|
|
|
|
"sleepers", CTLFLAG_RD, &zone->uz_sleepers, 0,
|
|
|
|
"Number of threads sleeping at limit");
|
|
|
|
SYSCTL_ADD_U64(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
|
|
|
|
"sleeps", CTLFLAG_RD, &zone->uz_sleeps, 0,
|
|
|
|
"Total zone limit sleeps");
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Per-domain information.
|
|
|
|
*/
|
|
|
|
if ((zone->uz_flags & UMA_ZONE_NUMA) != 0)
|
|
|
|
domains = vm_ndomains;
|
|
|
|
else
|
|
|
|
domains = 1;
|
|
|
|
domainoid = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(zone->uz_oid),
|
|
|
|
OID_AUTO, "domain", CTLFLAG_RD, NULL, "");
|
|
|
|
for (i = 0; i < domains; i++) {
|
|
|
|
zdom = &zone->uz_domain[i];
|
|
|
|
oid = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(domainoid),
|
|
|
|
OID_AUTO, VM_DOMAIN(i)->vmd_name, CTLFLAG_RD, NULL, "");
|
|
|
|
SYSCTL_ADD_LONG(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
|
|
|
|
"nitems", CTLFLAG_RD, &zdom->uzd_nitems,
|
|
|
|
"number of items in this domain");
|
|
|
|
SYSCTL_ADD_LONG(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
|
|
|
|
"imax", CTLFLAG_RD, &zdom->uzd_imax,
|
|
|
|
"maximum item count in this period");
|
|
|
|
SYSCTL_ADD_LONG(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
|
|
|
|
"imin", CTLFLAG_RD, &zdom->uzd_imin,
|
|
|
|
"minimum item count in this period");
|
|
|
|
SYSCTL_ADD_LONG(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
|
|
|
|
"wss", CTLFLAG_RD, &zdom->uzd_wss,
|
|
|
|
"Working set size");
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* General statistics.
|
|
|
|
*/
|
|
|
|
oid = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(zone->uz_oid), OID_AUTO,
|
|
|
|
"stats", CTLFLAG_RD, NULL, "");
|
|
|
|
SYSCTL_ADD_PROC(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
|
|
|
|
"current", CTLFLAG_RD | CTLTYPE_INT | CTLFLAG_MPSAFE,
|
|
|
|
zone, 1, sysctl_handle_uma_zone_cur, "I",
|
|
|
|
"Current number of allocated items");
|
|
|
|
SYSCTL_ADD_PROC(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
|
|
|
|
"allocs", CTLFLAG_RD | CTLTYPE_U64 | CTLFLAG_MPSAFE,
|
|
|
|
zone, 0, sysctl_handle_uma_zone_allocs, "QU",
|
|
|
|
"Total allocation calls");
|
|
|
|
SYSCTL_ADD_PROC(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
|
|
|
|
"frees", CTLFLAG_RD | CTLTYPE_U64 | CTLFLAG_MPSAFE,
|
|
|
|
zone, 0, sysctl_handle_uma_zone_frees, "QU",
|
|
|
|
"Total free calls");
|
|
|
|
SYSCTL_ADD_COUNTER_U64(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
|
|
|
|
"fails", CTLFLAG_RD, &zone->uz_fails,
|
|
|
|
"Number of allocation failures");
|
|
|
|
SYSCTL_ADD_U64(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
|
|
|
|
"xdomain", CTLFLAG_RD, &zone->uz_xdomain, 0,
|
|
|
|
"Free calls from the wrong domain");
|
|
|
|
}
|
|
|
|
|
|
|
|
struct uma_zone_count {
|
|
|
|
const char *name;
|
|
|
|
int count;
|
|
|
|
};
|
|
|
|
|
|
|
|
static void
|
|
|
|
zone_count(uma_zone_t zone, void *arg)
|
|
|
|
{
|
|
|
|
struct uma_zone_count *cnt;
|
|
|
|
|
|
|
|
cnt = arg;
|
2019-12-08 01:55:23 +00:00
|
|
|
/*
|
|
|
|
* Some zones are rapidly created with identical names and
|
|
|
|
* destroyed out of order. This can lead to gaps in the count.
|
|
|
|
* Use one greater than the maximum observed for this name.
|
|
|
|
*/
|
2019-11-28 00:19:09 +00:00
|
|
|
if (strcmp(zone->uz_name, cnt->name) == 0)
|
2019-12-08 01:55:23 +00:00
|
|
|
cnt->count = MAX(cnt->count,
|
|
|
|
zone->uz_namecnt + 1);
|
2019-11-28 00:19:09 +00:00
|
|
|
}
|
|
|
|
|
Bring in mbuma to replace mballoc.
mbuma is an Mbuf & Cluster allocator built on top of a number of
extensions to the UMA framework, all included herein.
Extensions to UMA worth noting:
- Better layering between slab <-> zone caches; introduce
Keg structure which splits off slab cache away from the
zone structure and allows multiple zones to be stacked
on top of a single Keg (single type of slab cache);
perhaps we should look into defining a subset API on
top of the Keg for special use by malloc(9),
for example.
- UMA_ZONE_REFCNT zones can now be added, and reference
counters automagically allocated for them within the end
of the associated slab structures. uma_find_refcnt()
does a kextract to fetch the slab struct reference from
the underlying page, and lookup the corresponding refcnt.
mbuma things worth noting:
- integrates mbuf & cluster allocations with extended UMA
and provides caches for commonly-allocated items; defines
several zones (two primary, one secondary) and two kegs.
- change up certain code paths that always used to do:
m_get() + m_clget() to instead just use m_getcl() and
try to take advantage of the newly defined secondary
Packet zone.
- netstat(1) and systat(1) quickly hacked up to do basic
stat reporting but additional stats work needs to be
done once some other details within UMA have been taken
care of and it becomes clearer to how stats will work
within the modified framework.
From the user perspective, one implication is that the
NMBCLUSTERS compile-time option is no longer used. The
maximum number of clusters is still capped off according
to maxusers, but it can be made unlimited by setting
the kern.ipc.nmbclusters boot-time tunable to zero.
Work should be done to write an appropriate sysctl
handler allowing dynamic tuning of kern.ipc.nmbclusters
at runtime.
Additional things worth noting/known issues (READ):
- One report of 'ips' (ServeRAID) driver acting really
slow in conjunction with mbuma. Need more data.
Latest report is that ips is equally sucking with
and without mbuma.
- Giant leak in NFS code sometimes occurs, can't
reproduce but currently analyzing; brueffer is
able to reproduce but THIS IS NOT an mbuma-specific
problem and currently occurs even WITHOUT mbuma.
- Issues in network locking: there is at least one
code path in the rip code where one or more locks
are acquired and we end up in m_prepend() with
M_WAITOK, which causes WITNESS to whine from within
UMA. Current temporary solution: force all UMA
allocations to be M_NOWAIT from within UMA for now
to avoid deadlocks unless WITNESS is defined and we
can determine with certainty that we're not holding
any locks when we're M_WAITOK.
- I've seen at least one weird socketbuffer empty-but-
mbuf-still-attached panic. I don't believe this
to be related to mbuma but please keep your eyes
open, turn on debugging, and capture crash dumps.
This change removes more code than it adds.
A paper is available detailing the change and considering
various performance issues, it was presented at BSDCan2004:
http://www.unixdaemons.com/~bmilekic/netbuf_bmilekic.pdf
Please read the paper for Future Work and implementation
details, as well as credits.
Testing and Debugging:
rwatson,
brueffer,
Ketrien I. Saihr-Kesenchedra,
...
Reviewed by: Lots of people (for different parts)
2004-05-31 21:46:06 +00:00
|
|
|
/*
|
|
|
|
* Zone header ctor. This initializes all fields, locks, etc.
|
|
|
|
*
|
|
|
|
* Arguments/Returns follow uma_ctor specifications
|
|
|
|
* udata Actually uma_zctor_args
|
|
|
|
*/
|
2004-08-02 00:18:36 +00:00
|
|
|
static int
|
|
|
|
zone_ctor(void *mem, int size, void *udata, int flags)
|
Bring in mbuma to replace mballoc.
mbuma is an Mbuf & Cluster allocator built on top of a number of
extensions to the UMA framework, all included herein.
Extensions to UMA worth noting:
- Better layering between slab <-> zone caches; introduce
Keg structure which splits off slab cache away from the
zone structure and allows multiple zones to be stacked
on top of a single Keg (single type of slab cache);
perhaps we should look into defining a subset API on
top of the Keg for special use by malloc(9),
for example.
- UMA_ZONE_REFCNT zones can now be added, and reference
counters automagically allocated for them within the end
of the associated slab structures. uma_find_refcnt()
does a kextract to fetch the slab struct reference from
the underlying page, and lookup the corresponding refcnt.
mbuma things worth noting:
- integrates mbuf & cluster allocations with extended UMA
and provides caches for commonly-allocated items; defines
several zones (two primary, one secondary) and two kegs.
- change up certain code paths that always used to do:
m_get() + m_clget() to instead just use m_getcl() and
try to take advantage of the newly defined secondary
Packet zone.
- netstat(1) and systat(1) quickly hacked up to do basic
stat reporting but additional stats work needs to be
done once some other details within UMA have been taken
care of and it becomes clearer to how stats will work
within the modified framework.
From the user perspective, one implication is that the
NMBCLUSTERS compile-time option is no longer used. The
maximum number of clusters is still capped off according
to maxusers, but it can be made unlimited by setting
the kern.ipc.nmbclusters boot-time tunable to zero.
Work should be done to write an appropriate sysctl
handler allowing dynamic tuning of kern.ipc.nmbclusters
at runtime.
Additional things worth noting/known issues (READ):
- One report of 'ips' (ServeRAID) driver acting really
slow in conjunction with mbuma. Need more data.
Latest report is that ips is equally sucking with
and without mbuma.
- Giant leak in NFS code sometimes occurs, can't
reproduce but currently analyzing; brueffer is
able to reproduce but THIS IS NOT an mbuma-specific
problem and currently occurs even WITHOUT mbuma.
- Issues in network locking: there is at least one
code path in the rip code where one or more locks
are acquired and we end up in m_prepend() with
M_WAITOK, which causes WITNESS to whine from within
UMA. Current temporary solution: force all UMA
allocations to be M_NOWAIT from within UMA for now
to avoid deadlocks unless WITNESS is defined and we
can determine with certainty that we're not holding
any locks when we're M_WAITOK.
- I've seen at least one weird socketbuffer empty-but-
mbuf-still-attached panic. I don't believe this
to be related to mbuma but please keep your eyes
open, turn on debugging, and capture crash dumps.
This change removes more code than it adds.
A paper is available detailing the change and considering
various performance issues, it was presented at BSDCan2004:
http://www.unixdaemons.com/~bmilekic/netbuf_bmilekic.pdf
Please read the paper for Future Work and implementation
details, as well as credits.
Testing and Debugging:
rwatson,
brueffer,
Ketrien I. Saihr-Kesenchedra,
...
Reviewed by: Lots of people (for different parts)
2004-05-31 21:46:06 +00:00
|
|
|
{
|
2019-11-28 00:19:09 +00:00
|
|
|
struct uma_zone_count cnt;
|
Bring in mbuma to replace mballoc.
mbuma is an Mbuf & Cluster allocator built on top of a number of
extensions to the UMA framework, all included herein.
Extensions to UMA worth noting:
- Better layering between slab <-> zone caches; introduce
Keg structure which splits off slab cache away from the
zone structure and allows multiple zones to be stacked
on top of a single Keg (single type of slab cache);
perhaps we should look into defining a subset API on
top of the Keg for special use by malloc(9),
for example.
- UMA_ZONE_REFCNT zones can now be added, and reference
counters automagically allocated for them within the end
of the associated slab structures. uma_find_refcnt()
does a kextract to fetch the slab struct reference from
the underlying page, and lookup the corresponding refcnt.
mbuma things worth noting:
- integrates mbuf & cluster allocations with extended UMA
and provides caches for commonly-allocated items; defines
several zones (two primary, one secondary) and two kegs.
- change up certain code paths that always used to do:
m_get() + m_clget() to instead just use m_getcl() and
try to take advantage of the newly defined secondary
Packet zone.
- netstat(1) and systat(1) quickly hacked up to do basic
stat reporting but additional stats work needs to be
done once some other details within UMA have been taken
care of and it becomes clearer to how stats will work
within the modified framework.
From the user perspective, one implication is that the
NMBCLUSTERS compile-time option is no longer used. The
maximum number of clusters is still capped off according
to maxusers, but it can be made unlimited by setting
the kern.ipc.nmbclusters boot-time tunable to zero.
Work should be done to write an appropriate sysctl
handler allowing dynamic tuning of kern.ipc.nmbclusters
at runtime.
Additional things worth noting/known issues (READ):
- One report of 'ips' (ServeRAID) driver acting really
slow in conjunction with mbuma. Need more data.
Latest report is that ips is equally sucking with
and without mbuma.
- Giant leak in NFS code sometimes occurs, can't
reproduce but currently analyzing; brueffer is
able to reproduce but THIS IS NOT an mbuma-specific
problem and currently occurs even WITHOUT mbuma.
- Issues in network locking: there is at least one
code path in the rip code where one or more locks
are acquired and we end up in m_prepend() with
M_WAITOK, which causes WITNESS to whine from within
UMA. Current temporary solution: force all UMA
allocations to be M_NOWAIT from within UMA for now
to avoid deadlocks unless WITNESS is defined and we
can determine with certainty that we're not holding
any locks when we're M_WAITOK.
- I've seen at least one weird socketbuffer empty-but-
mbuf-still-attached panic. I don't believe this
to be related to mbuma but please keep your eyes
open, turn on debugging, and capture crash dumps.
This change removes more code than it adds.
A paper is available detailing the change and considering
various performance issues, it was presented at BSDCan2004:
http://www.unixdaemons.com/~bmilekic/netbuf_bmilekic.pdf
Please read the paper for Future Work and implementation
details, as well as credits.
Testing and Debugging:
rwatson,
brueffer,
Ketrien I. Saihr-Kesenchedra,
...
Reviewed by: Lots of people (for different parts)
2004-05-31 21:46:06 +00:00
|
|
|
struct uma_zctor_args *arg = udata;
|
|
|
|
uma_zone_t zone = mem;
|
|
|
|
uma_zone_t z;
|
|
|
|
uma_keg_t keg;
|
2019-09-01 22:22:43 +00:00
|
|
|
int i;
|
Bring in mbuma to replace mballoc.
mbuma is an Mbuf & Cluster allocator built on top of a number of
extensions to the UMA framework, all included herein.
Extensions to UMA worth noting:
- Better layering between slab <-> zone caches; introduce
Keg structure which splits off slab cache away from the
zone structure and allows multiple zones to be stacked
on top of a single Keg (single type of slab cache);
perhaps we should look into defining a subset API on
top of the Keg for special use by malloc(9),
for example.
- UMA_ZONE_REFCNT zones can now be added, and reference
counters automagically allocated for them within the end
of the associated slab structures. uma_find_refcnt()
does a kextract to fetch the slab struct reference from
the underlying page, and lookup the corresponding refcnt.
mbuma things worth noting:
- integrates mbuf & cluster allocations with extended UMA
and provides caches for commonly-allocated items; defines
several zones (two primary, one secondary) and two kegs.
- change up certain code paths that always used to do:
m_get() + m_clget() to instead just use m_getcl() and
try to take advantage of the newly defined secondary
Packet zone.
- netstat(1) and systat(1) quickly hacked up to do basic
stat reporting but additional stats work needs to be
done once some other details within UMA have been taken
care of and it becomes clearer to how stats will work
within the modified framework.
From the user perspective, one implication is that the
NMBCLUSTERS compile-time option is no longer used. The
maximum number of clusters is still capped off according
to maxusers, but it can be made unlimited by setting
the kern.ipc.nmbclusters boot-time tunable to zero.
Work should be done to write an appropriate sysctl
handler allowing dynamic tuning of kern.ipc.nmbclusters
at runtime.
Additional things worth noting/known issues (READ):
- One report of 'ips' (ServeRAID) driver acting really
slow in conjunction with mbuma. Need more data.
Latest report is that ips is equally sucking with
and without mbuma.
- Giant leak in NFS code sometimes occurs, can't
reproduce but currently analyzing; brueffer is
able to reproduce but THIS IS NOT an mbuma-specific
problem and currently occurs even WITHOUT mbuma.
- Issues in network locking: there is at least one
code path in the rip code where one or more locks
are acquired and we end up in m_prepend() with
M_WAITOK, which causes WITNESS to whine from within
UMA. Current temporary solution: force all UMA
allocations to be M_NOWAIT from within UMA for now
to avoid deadlocks unless WITNESS is defined and we
can determine with certainty that we're not holding
any locks when we're M_WAITOK.
- I've seen at least one weird socketbuffer empty-but-
mbuf-still-attached panic. I don't believe this
to be related to mbuma but please keep your eyes
open, turn on debugging, and capture crash dumps.
This change removes more code than it adds.
A paper is available detailing the change and considering
various performance issues, it was presented at BSDCan2004:
http://www.unixdaemons.com/~bmilekic/netbuf_bmilekic.pdf
Please read the paper for Future Work and implementation
details, as well as credits.
Testing and Debugging:
rwatson,
brueffer,
Ketrien I. Saihr-Kesenchedra,
...
Reviewed by: Lots of people (for different parts)
2004-05-31 21:46:06 +00:00
|
|
|
|
|
|
|
bzero(zone, size);
|
|
|
|
zone->uz_name = arg->name;
|
|
|
|
zone->uz_ctor = arg->ctor;
|
|
|
|
zone->uz_dtor = arg->dtor;
|
|
|
|
zone->uz_init = NULL;
|
|
|
|
zone->uz_fini = NULL;
|
2010-06-15 19:28:37 +00:00
|
|
|
zone->uz_sleeps = 0;
|
2019-08-06 21:50:34 +00:00
|
|
|
zone->uz_xdomain = 0;
|
2019-11-28 00:19:09 +00:00
|
|
|
zone->uz_bucket_size = 0;
|
|
|
|
zone->uz_bucket_size_min = 0;
|
|
|
|
zone->uz_bucket_size_max = BUCKET_MAX;
|
2009-01-25 09:11:24 +00:00
|
|
|
zone->uz_flags = 0;
|
2012-12-07 22:27:13 +00:00
|
|
|
zone->uz_warning = NULL;
|
2018-01-12 23:25:05 +00:00
|
|
|
/* The domain structures follow the cpu structures. */
|
|
|
|
zone->uz_domain = (struct uma_zone_domain *)&zone->uz_cpu[mp_ncpus];
|
o Move zone limit from keg level up to zone level. This means that now
two zones sharing a keg may have different limits. Now this is going
to work:
zone = uma_zcreate();
uma_zone_set_max(zone, limit);
zone2 = uma_zsecond_create(zone);
uma_zone_set_max(zone2, limit2);
Kegs no longer have uk_maxpages field, but zones have uz_items. When
set, it may be rounded up to minimum possible CPU bucket cache size.
For small limits bucket cache can also be reconfigured to be smaller.
Counter uz_items is updated whenever items transition from keg to a
bucket cache or directly to a consumer. If zone has uz_maxitems set and
it is reached, then we are going to sleep.
o Since new limits don't play well with multi-keg zones, remove them. The
idea of multi-keg zones was introduced exactly 10 years ago, and never
have had a practical usage. In discussion with Jeff we came to a wild
agreement that if we ever want to reintroduce the idea of a smart allocator
that would be able to choose between two (or more) totally different
backing stores, that choice should be made one level higher than UMA,
e.g. in malloc(9) or in mget(), or whatever and choice should be controlled
by the caller.
o Sleeping code is improved to account number of sleepers and wake them one
by one, to avoid thundering herd problem.
o Flag UMA_ZONE_NOBUCKETCACHE removed, instead uma_zone_set_maxcache()
KPI added. Having no bucket cache basically means setting maxcache to 0.
o Now with many fields added and many removed (no multi-keg zones!) make
sure that struct uma_zone is perfectly aligned.
Reviewed by: markj, jeff
Tested by: pho
Differential Revision: https://reviews.freebsd.org/D17773
2019-01-15 00:02:06 +00:00
|
|
|
zone->uz_bkt_max = ULONG_MAX;
|
2012-12-07 22:27:13 +00:00
|
|
|
timevalclear(&zone->uz_ratecheck);
|
2013-06-20 19:08:12 +00:00
|
|
|
|
2019-11-28 00:19:09 +00:00
|
|
|
/* Count the number of duplicate names. */
|
|
|
|
cnt.name = arg->name;
|
|
|
|
cnt.count = 0;
|
|
|
|
zone_foreach(zone_count, &cnt);
|
|
|
|
zone->uz_namecnt = cnt.count;
|
2019-01-15 18:24:34 +00:00
|
|
|
|
2019-09-01 22:22:43 +00:00
|
|
|
for (i = 0; i < vm_ndomains; i++)
|
|
|
|
TAILQ_INIT(&zone->uz_domain[i].uzd_buckets);
|
|
|
|
|
2019-11-27 19:49:55 +00:00
|
|
|
#ifdef INVARIANTS
|
|
|
|
if (arg->uminit == trash_init && arg->fini == trash_fini)
|
|
|
|
zone->uz_flags |= UMA_ZFLAG_TRASH;
|
|
|
|
#endif
|
|
|
|
|
2013-06-17 03:43:47 +00:00
|
|
|
/*
|
|
|
|
* This is a pure cache zone, no kegs.
|
|
|
|
*/
|
|
|
|
if (arg->import) {
|
2013-06-26 00:57:38 +00:00
|
|
|
if (arg->flags & UMA_ZONE_VM)
|
|
|
|
arg->flags |= UMA_ZFLAG_CACHEONLY;
|
|
|
|
zone->uz_flags = arg->flags;
|
2013-06-20 19:08:12 +00:00
|
|
|
zone->uz_size = arg->size;
|
2013-06-17 03:43:47 +00:00
|
|
|
zone->uz_import = arg->import;
|
|
|
|
zone->uz_release = arg->release;
|
|
|
|
zone->uz_arg = arg->arg;
|
2013-06-20 19:08:12 +00:00
|
|
|
zone->uz_lockptr = &zone->uz_lock;
|
o Move zone limit from keg level up to zone level. This means that now
two zones sharing a keg may have different limits. Now this is going
to work:
zone = uma_zcreate();
uma_zone_set_max(zone, limit);
zone2 = uma_zsecond_create(zone);
uma_zone_set_max(zone2, limit2);
Kegs no longer have uk_maxpages field, but zones have uz_items. When
set, it may be rounded up to minimum possible CPU bucket cache size.
For small limits bucket cache can also be reconfigured to be smaller.
Counter uz_items is updated whenever items transition from keg to a
bucket cache or directly to a consumer. If zone has uz_maxitems set and
it is reached, then we are going to sleep.
o Since new limits don't play well with multi-keg zones, remove them. The
idea of multi-keg zones was introduced exactly 10 years ago, and never
have had a practical usage. In discussion with Jeff we came to a wild
agreement that if we ever want to reintroduce the idea of a smart allocator
that would be able to choose between two (or more) totally different
backing stores, that choice should be made one level higher than UMA,
e.g. in malloc(9) or in mget(), or whatever and choice should be controlled
by the caller.
o Sleeping code is improved to account number of sleepers and wake them one
by one, to avoid thundering herd problem.
o Flag UMA_ZONE_NOBUCKETCACHE removed, instead uma_zone_set_maxcache()
KPI added. Having no bucket cache basically means setting maxcache to 0.
o Now with many fields added and many removed (no multi-keg zones!) make
sure that struct uma_zone is perfectly aligned.
Reviewed by: markj, jeff
Tested by: pho
Differential Revision: https://reviews.freebsd.org/D17773
2019-01-15 00:02:06 +00:00
|
|
|
ZONE_LOCK_INIT(zone, (arg->flags & UMA_ZONE_MTXCLASS));
|
2014-10-05 21:34:56 +00:00
|
|
|
rw_wlock(&uma_rwlock);
|
2013-11-28 19:20:49 +00:00
|
|
|
LIST_INSERT_HEAD(&uma_cachezones, zone, uz_link);
|
2014-10-05 21:34:56 +00:00
|
|
|
rw_wunlock(&uma_rwlock);
|
2013-06-20 19:08:12 +00:00
|
|
|
goto out;
|
2013-06-17 03:43:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Use the regular zone/keg/slab allocator.
|
|
|
|
*/
|
2019-12-04 18:40:05 +00:00
|
|
|
zone->uz_import = zone_import;
|
|
|
|
zone->uz_release = zone_release;
|
2013-06-17 03:43:47 +00:00
|
|
|
zone->uz_arg = zone;
|
o Move zone limit from keg level up to zone level. This means that now
two zones sharing a keg may have different limits. Now this is going
to work:
zone = uma_zcreate();
uma_zone_set_max(zone, limit);
zone2 = uma_zsecond_create(zone);
uma_zone_set_max(zone2, limit2);
Kegs no longer have uk_maxpages field, but zones have uz_items. When
set, it may be rounded up to minimum possible CPU bucket cache size.
For small limits bucket cache can also be reconfigured to be smaller.
Counter uz_items is updated whenever items transition from keg to a
bucket cache or directly to a consumer. If zone has uz_maxitems set and
it is reached, then we are going to sleep.
o Since new limits don't play well with multi-keg zones, remove them. The
idea of multi-keg zones was introduced exactly 10 years ago, and never
have had a practical usage. In discussion with Jeff we came to a wild
agreement that if we ever want to reintroduce the idea of a smart allocator
that would be able to choose between two (or more) totally different
backing stores, that choice should be made one level higher than UMA,
e.g. in malloc(9) or in mget(), or whatever and choice should be controlled
by the caller.
o Sleeping code is improved to account number of sleepers and wake them one
by one, to avoid thundering herd problem.
o Flag UMA_ZONE_NOBUCKETCACHE removed, instead uma_zone_set_maxcache()
KPI added. Having no bucket cache basically means setting maxcache to 0.
o Now with many fields added and many removed (no multi-keg zones!) make
sure that struct uma_zone is perfectly aligned.
Reviewed by: markj, jeff
Tested by: pho
Differential Revision: https://reviews.freebsd.org/D17773
2019-01-15 00:02:06 +00:00
|
|
|
keg = arg->keg;
|
2013-06-17 03:43:47 +00:00
|
|
|
|
Bring in mbuma to replace mballoc.
mbuma is an Mbuf & Cluster allocator built on top of a number of
extensions to the UMA framework, all included herein.
Extensions to UMA worth noting:
- Better layering between slab <-> zone caches; introduce
Keg structure which splits off slab cache away from the
zone structure and allows multiple zones to be stacked
on top of a single Keg (single type of slab cache);
perhaps we should look into defining a subset API on
top of the Keg for special use by malloc(9),
for example.
- UMA_ZONE_REFCNT zones can now be added, and reference
counters automagically allocated for them within the end
of the associated slab structures. uma_find_refcnt()
does a kextract to fetch the slab struct reference from
the underlying page, and lookup the corresponding refcnt.
mbuma things worth noting:
- integrates mbuf & cluster allocations with extended UMA
and provides caches for commonly-allocated items; defines
several zones (two primary, one secondary) and two kegs.
- change up certain code paths that always used to do:
m_get() + m_clget() to instead just use m_getcl() and
try to take advantage of the newly defined secondary
Packet zone.
- netstat(1) and systat(1) quickly hacked up to do basic
stat reporting but additional stats work needs to be
done once some other details within UMA have been taken
care of and it becomes clearer to how stats will work
within the modified framework.
From the user perspective, one implication is that the
NMBCLUSTERS compile-time option is no longer used. The
maximum number of clusters is still capped off according
to maxusers, but it can be made unlimited by setting
the kern.ipc.nmbclusters boot-time tunable to zero.
Work should be done to write an appropriate sysctl
handler allowing dynamic tuning of kern.ipc.nmbclusters
at runtime.
Additional things worth noting/known issues (READ):
- One report of 'ips' (ServeRAID) driver acting really
slow in conjunction with mbuma. Need more data.
Latest report is that ips is equally sucking with
and without mbuma.
- Giant leak in NFS code sometimes occurs, can't
reproduce but currently analyzing; brueffer is
able to reproduce but THIS IS NOT an mbuma-specific
problem and currently occurs even WITHOUT mbuma.
- Issues in network locking: there is at least one
code path in the rip code where one or more locks
are acquired and we end up in m_prepend() with
M_WAITOK, which causes WITNESS to whine from within
UMA. Current temporary solution: force all UMA
allocations to be M_NOWAIT from within UMA for now
to avoid deadlocks unless WITNESS is defined and we
can determine with certainty that we're not holding
any locks when we're M_WAITOK.
- I've seen at least one weird socketbuffer empty-but-
mbuf-still-attached panic. I don't believe this
to be related to mbuma but please keep your eyes
open, turn on debugging, and capture crash dumps.
This change removes more code than it adds.
A paper is available detailing the change and considering
various performance issues, it was presented at BSDCan2004:
http://www.unixdaemons.com/~bmilekic/netbuf_bmilekic.pdf
Please read the paper for Future Work and implementation
details, as well as credits.
Testing and Debugging:
rwatson,
brueffer,
Ketrien I. Saihr-Kesenchedra,
...
Reviewed by: Lots of people (for different parts)
2004-05-31 21:46:06 +00:00
|
|
|
if (arg->flags & UMA_ZONE_SECONDARY) {
|
2019-11-28 00:19:09 +00:00
|
|
|
KASSERT((zone->uz_flags & UMA_ZONE_SECONDARY) == 0,
|
|
|
|
("Secondary zone requested UMA_ZFLAG_INTERNAL"));
|
Bring in mbuma to replace mballoc.
mbuma is an Mbuf & Cluster allocator built on top of a number of
extensions to the UMA framework, all included herein.
Extensions to UMA worth noting:
- Better layering between slab <-> zone caches; introduce
Keg structure which splits off slab cache away from the
zone structure and allows multiple zones to be stacked
on top of a single Keg (single type of slab cache);
perhaps we should look into defining a subset API on
top of the Keg for special use by malloc(9),
for example.
- UMA_ZONE_REFCNT zones can now be added, and reference
counters automagically allocated for them within the end
of the associated slab structures. uma_find_refcnt()
does a kextract to fetch the slab struct reference from
the underlying page, and lookup the corresponding refcnt.
mbuma things worth noting:
- integrates mbuf & cluster allocations with extended UMA
and provides caches for commonly-allocated items; defines
several zones (two primary, one secondary) and two kegs.
- change up certain code paths that always used to do:
m_get() + m_clget() to instead just use m_getcl() and
try to take advantage of the newly defined secondary
Packet zone.
- netstat(1) and systat(1) quickly hacked up to do basic
stat reporting but additional stats work needs to be
done once some other details within UMA have been taken
care of and it becomes clearer to how stats will work
within the modified framework.
From the user perspective, one implication is that the
NMBCLUSTERS compile-time option is no longer used. The
maximum number of clusters is still capped off according
to maxusers, but it can be made unlimited by setting
the kern.ipc.nmbclusters boot-time tunable to zero.
Work should be done to write an appropriate sysctl
handler allowing dynamic tuning of kern.ipc.nmbclusters
at runtime.
Additional things worth noting/known issues (READ):
- One report of 'ips' (ServeRAID) driver acting really
slow in conjunction with mbuma. Need more data.
Latest report is that ips is equally sucking with
and without mbuma.
- Giant leak in NFS code sometimes occurs, can't
reproduce but currently analyzing; brueffer is
able to reproduce but THIS IS NOT an mbuma-specific
problem and currently occurs even WITHOUT mbuma.
- Issues in network locking: there is at least one
code path in the rip code where one or more locks
are acquired and we end up in m_prepend() with
M_WAITOK, which causes WITNESS to whine from within
UMA. Current temporary solution: force all UMA
allocations to be M_NOWAIT from within UMA for now
to avoid deadlocks unless WITNESS is defined and we
can determine with certainty that we're not holding
any locks when we're M_WAITOK.
- I've seen at least one weird socketbuffer empty-but-
mbuf-still-attached panic. I don't believe this
to be related to mbuma but please keep your eyes
open, turn on debugging, and capture crash dumps.
This change removes more code than it adds.
A paper is available detailing the change and considering
various performance issues, it was presented at BSDCan2004:
http://www.unixdaemons.com/~bmilekic/netbuf_bmilekic.pdf
Please read the paper for Future Work and implementation
details, as well as credits.
Testing and Debugging:
rwatson,
brueffer,
Ketrien I. Saihr-Kesenchedra,
...
Reviewed by: Lots of people (for different parts)
2004-05-31 21:46:06 +00:00
|
|
|
KASSERT(arg->keg != NULL, ("Secondary zone on zero'd keg"));
|
|
|
|
zone->uz_init = arg->uminit;
|
|
|
|
zone->uz_fini = arg->fini;
|
2013-06-20 19:08:12 +00:00
|
|
|
zone->uz_lockptr = &keg->uk_lock;
|
2009-01-25 09:11:24 +00:00
|
|
|
zone->uz_flags |= UMA_ZONE_SECONDARY;
|
2014-10-05 21:34:56 +00:00
|
|
|
rw_wlock(&uma_rwlock);
|
Bring in mbuma to replace mballoc.
mbuma is an Mbuf & Cluster allocator built on top of a number of
extensions to the UMA framework, all included herein.
Extensions to UMA worth noting:
- Better layering between slab <-> zone caches; introduce
Keg structure which splits off slab cache away from the
zone structure and allows multiple zones to be stacked
on top of a single Keg (single type of slab cache);
perhaps we should look into defining a subset API on
top of the Keg for special use by malloc(9),
for example.
- UMA_ZONE_REFCNT zones can now be added, and reference
counters automagically allocated for them within the end
of the associated slab structures. uma_find_refcnt()
does a kextract to fetch the slab struct reference from
the underlying page, and lookup the corresponding refcnt.
mbuma things worth noting:
- integrates mbuf & cluster allocations with extended UMA
and provides caches for commonly-allocated items; defines
several zones (two primary, one secondary) and two kegs.
- change up certain code paths that always used to do:
m_get() + m_clget() to instead just use m_getcl() and
try to take advantage of the newly defined secondary
Packet zone.
- netstat(1) and systat(1) quickly hacked up to do basic
stat reporting but additional stats work needs to be
done once some other details within UMA have been taken
care of and it becomes clearer to how stats will work
within the modified framework.
From the user perspective, one implication is that the
NMBCLUSTERS compile-time option is no longer used. The
maximum number of clusters is still capped off according
to maxusers, but it can be made unlimited by setting
the kern.ipc.nmbclusters boot-time tunable to zero.
Work should be done to write an appropriate sysctl
handler allowing dynamic tuning of kern.ipc.nmbclusters
at runtime.
Additional things worth noting/known issues (READ):
- One report of 'ips' (ServeRAID) driver acting really
slow in conjunction with mbuma. Need more data.
Latest report is that ips is equally sucking with
and without mbuma.
- Giant leak in NFS code sometimes occurs, can't
reproduce but currently analyzing; brueffer is
able to reproduce but THIS IS NOT an mbuma-specific
problem and currently occurs even WITHOUT mbuma.
- Issues in network locking: there is at least one
code path in the rip code where one or more locks
are acquired and we end up in m_prepend() with
M_WAITOK, which causes WITNESS to whine from within
UMA. Current temporary solution: force all UMA
allocations to be M_NOWAIT from within UMA for now
to avoid deadlocks unless WITNESS is defined and we
can determine with certainty that we're not holding
any locks when we're M_WAITOK.
- I've seen at least one weird socketbuffer empty-but-
mbuf-still-attached panic. I don't believe this
to be related to mbuma but please keep your eyes
open, turn on debugging, and capture crash dumps.
This change removes more code than it adds.
A paper is available detailing the change and considering
various performance issues, it was presented at BSDCan2004:
http://www.unixdaemons.com/~bmilekic/netbuf_bmilekic.pdf
Please read the paper for Future Work and implementation
details, as well as credits.
Testing and Debugging:
rwatson,
brueffer,
Ketrien I. Saihr-Kesenchedra,
...
Reviewed by: Lots of people (for different parts)
2004-05-31 21:46:06 +00:00
|
|
|
ZONE_LOCK(zone);
|
|
|
|
LIST_FOREACH(z, &keg->uk_zones, uz_link) {
|
|
|
|
if (LIST_NEXT(z, uz_link) == NULL) {
|
|
|
|
LIST_INSERT_AFTER(z, zone, uz_link);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
ZONE_UNLOCK(zone);
|
2014-10-05 21:34:56 +00:00
|
|
|
rw_wunlock(&uma_rwlock);
|
2009-01-25 09:11:24 +00:00
|
|
|
} else if (keg == NULL) {
|
|
|
|
if ((keg = uma_kcreate(zone, arg->size, arg->uminit, arg->fini,
|
|
|
|
arg->align, arg->flags)) == NULL)
|
2004-08-02 00:18:36 +00:00
|
|
|
return (ENOMEM);
|
Bring in mbuma to replace mballoc.
mbuma is an Mbuf & Cluster allocator built on top of a number of
extensions to the UMA framework, all included herein.
Extensions to UMA worth noting:
- Better layering between slab <-> zone caches; introduce
Keg structure which splits off slab cache away from the
zone structure and allows multiple zones to be stacked
on top of a single Keg (single type of slab cache);
perhaps we should look into defining a subset API on
top of the Keg for special use by malloc(9),
for example.
- UMA_ZONE_REFCNT zones can now be added, and reference
counters automagically allocated for them within the end
of the associated slab structures. uma_find_refcnt()
does a kextract to fetch the slab struct reference from
the underlying page, and lookup the corresponding refcnt.
mbuma things worth noting:
- integrates mbuf & cluster allocations with extended UMA
and provides caches for commonly-allocated items; defines
several zones (two primary, one secondary) and two kegs.
- change up certain code paths that always used to do:
m_get() + m_clget() to instead just use m_getcl() and
try to take advantage of the newly defined secondary
Packet zone.
- netstat(1) and systat(1) quickly hacked up to do basic
stat reporting but additional stats work needs to be
done once some other details within UMA have been taken
care of and it becomes clearer to how stats will work
within the modified framework.
From the user perspective, one implication is that the
NMBCLUSTERS compile-time option is no longer used. The
maximum number of clusters is still capped off according
to maxusers, but it can be made unlimited by setting
the kern.ipc.nmbclusters boot-time tunable to zero.
Work should be done to write an appropriate sysctl
handler allowing dynamic tuning of kern.ipc.nmbclusters
at runtime.
Additional things worth noting/known issues (READ):
- One report of 'ips' (ServeRAID) driver acting really
slow in conjunction with mbuma. Need more data.
Latest report is that ips is equally sucking with
and without mbuma.
- Giant leak in NFS code sometimes occurs, can't
reproduce but currently analyzing; brueffer is
able to reproduce but THIS IS NOT an mbuma-specific
problem and currently occurs even WITHOUT mbuma.
- Issues in network locking: there is at least one
code path in the rip code where one or more locks
are acquired and we end up in m_prepend() with
M_WAITOK, which causes WITNESS to whine from within
UMA. Current temporary solution: force all UMA
allocations to be M_NOWAIT from within UMA for now
to avoid deadlocks unless WITNESS is defined and we
can determine with certainty that we're not holding
any locks when we're M_WAITOK.
- I've seen at least one weird socketbuffer empty-but-
mbuf-still-attached panic. I don't believe this
to be related to mbuma but please keep your eyes
open, turn on debugging, and capture crash dumps.
This change removes more code than it adds.
A paper is available detailing the change and considering
various performance issues, it was presented at BSDCan2004:
http://www.unixdaemons.com/~bmilekic/netbuf_bmilekic.pdf
Please read the paper for Future Work and implementation
details, as well as credits.
Testing and Debugging:
rwatson,
brueffer,
Ketrien I. Saihr-Kesenchedra,
...
Reviewed by: Lots of people (for different parts)
2004-05-31 21:46:06 +00:00
|
|
|
} else {
|
|
|
|
struct uma_kctor_args karg;
|
2004-08-02 00:18:36 +00:00
|
|
|
int error;
|
Bring in mbuma to replace mballoc.
mbuma is an Mbuf & Cluster allocator built on top of a number of
extensions to the UMA framework, all included herein.
Extensions to UMA worth noting:
- Better layering between slab <-> zone caches; introduce
Keg structure which splits off slab cache away from the
zone structure and allows multiple zones to be stacked
on top of a single Keg (single type of slab cache);
perhaps we should look into defining a subset API on
top of the Keg for special use by malloc(9),
for example.
- UMA_ZONE_REFCNT zones can now be added, and reference
counters automagically allocated for them within the end
of the associated slab structures. uma_find_refcnt()
does a kextract to fetch the slab struct reference from
the underlying page, and lookup the corresponding refcnt.
mbuma things worth noting:
- integrates mbuf & cluster allocations with extended UMA
and provides caches for commonly-allocated items; defines
several zones (two primary, one secondary) and two kegs.
- change up certain code paths that always used to do:
m_get() + m_clget() to instead just use m_getcl() and
try to take advantage of the newly defined secondary
Packet zone.
- netstat(1) and systat(1) quickly hacked up to do basic
stat reporting but additional stats work needs to be
done once some other details within UMA have been taken
care of and it becomes clearer to how stats will work
within the modified framework.
From the user perspective, one implication is that the
NMBCLUSTERS compile-time option is no longer used. The
maximum number of clusters is still capped off according
to maxusers, but it can be made unlimited by setting
the kern.ipc.nmbclusters boot-time tunable to zero.
Work should be done to write an appropriate sysctl
handler allowing dynamic tuning of kern.ipc.nmbclusters
at runtime.
Additional things worth noting/known issues (READ):
- One report of 'ips' (ServeRAID) driver acting really
slow in conjunction with mbuma. Need more data.
Latest report is that ips is equally sucking with
and without mbuma.
- Giant leak in NFS code sometimes occurs, can't
reproduce but currently analyzing; brueffer is
able to reproduce but THIS IS NOT an mbuma-specific
problem and currently occurs even WITHOUT mbuma.
- Issues in network locking: there is at least one
code path in the rip code where one or more locks
are acquired and we end up in m_prepend() with
M_WAITOK, which causes WITNESS to whine from within
UMA. Current temporary solution: force all UMA
allocations to be M_NOWAIT from within UMA for now
to avoid deadlocks unless WITNESS is defined and we
can determine with certainty that we're not holding
any locks when we're M_WAITOK.
- I've seen at least one weird socketbuffer empty-but-
mbuf-still-attached panic. I don't believe this
to be related to mbuma but please keep your eyes
open, turn on debugging, and capture crash dumps.
This change removes more code than it adds.
A paper is available detailing the change and considering
various performance issues, it was presented at BSDCan2004:
http://www.unixdaemons.com/~bmilekic/netbuf_bmilekic.pdf
Please read the paper for Future Work and implementation
details, as well as credits.
Testing and Debugging:
rwatson,
brueffer,
Ketrien I. Saihr-Kesenchedra,
...
Reviewed by: Lots of people (for different parts)
2004-05-31 21:46:06 +00:00
|
|
|
|
|
|
|
/* We should only be here from uma_startup() */
|
|
|
|
karg.size = arg->size;
|
|
|
|
karg.uminit = arg->uminit;
|
|
|
|
karg.fini = arg->fini;
|
|
|
|
karg.align = arg->align;
|
|
|
|
karg.flags = arg->flags;
|
|
|
|
karg.zone = zone;
|
2004-08-02 00:18:36 +00:00
|
|
|
error = keg_ctor(arg->keg, sizeof(struct uma_keg), &karg,
|
|
|
|
flags);
|
|
|
|
if (error)
|
|
|
|
return (error);
|
Bring in mbuma to replace mballoc.
mbuma is an Mbuf & Cluster allocator built on top of a number of
extensions to the UMA framework, all included herein.
Extensions to UMA worth noting:
- Better layering between slab <-> zone caches; introduce
Keg structure which splits off slab cache away from the
zone structure and allows multiple zones to be stacked
on top of a single Keg (single type of slab cache);
perhaps we should look into defining a subset API on
top of the Keg for special use by malloc(9),
for example.
- UMA_ZONE_REFCNT zones can now be added, and reference
counters automagically allocated for them within the end
of the associated slab structures. uma_find_refcnt()
does a kextract to fetch the slab struct reference from
the underlying page, and lookup the corresponding refcnt.
mbuma things worth noting:
- integrates mbuf & cluster allocations with extended UMA
and provides caches for commonly-allocated items; defines
several zones (two primary, one secondary) and two kegs.
- change up certain code paths that always used to do:
m_get() + m_clget() to instead just use m_getcl() and
try to take advantage of the newly defined secondary
Packet zone.
- netstat(1) and systat(1) quickly hacked up to do basic
stat reporting but additional stats work needs to be
done once some other details within UMA have been taken
care of and it becomes clearer to how stats will work
within the modified framework.
From the user perspective, one implication is that the
NMBCLUSTERS compile-time option is no longer used. The
maximum number of clusters is still capped off according
to maxusers, but it can be made unlimited by setting
the kern.ipc.nmbclusters boot-time tunable to zero.
Work should be done to write an appropriate sysctl
handler allowing dynamic tuning of kern.ipc.nmbclusters
at runtime.
Additional things worth noting/known issues (READ):
- One report of 'ips' (ServeRAID) driver acting really
slow in conjunction with mbuma. Need more data.
Latest report is that ips is equally sucking with
and without mbuma.
- Giant leak in NFS code sometimes occurs, can't
reproduce but currently analyzing; brueffer is
able to reproduce but THIS IS NOT an mbuma-specific
problem and currently occurs even WITHOUT mbuma.
- Issues in network locking: there is at least one
code path in the rip code where one or more locks
are acquired and we end up in m_prepend() with
M_WAITOK, which causes WITNESS to whine from within
UMA. Current temporary solution: force all UMA
allocations to be M_NOWAIT from within UMA for now
to avoid deadlocks unless WITNESS is defined and we
can determine with certainty that we're not holding
any locks when we're M_WAITOK.
- I've seen at least one weird socketbuffer empty-but-
mbuf-still-attached panic. I don't believe this
to be related to mbuma but please keep your eyes
open, turn on debugging, and capture crash dumps.
This change removes more code than it adds.
A paper is available detailing the change and considering
various performance issues, it was presented at BSDCan2004:
http://www.unixdaemons.com/~bmilekic/netbuf_bmilekic.pdf
Please read the paper for Future Work and implementation
details, as well as credits.
Testing and Debugging:
rwatson,
brueffer,
Ketrien I. Saihr-Kesenchedra,
...
Reviewed by: Lots of people (for different parts)
2004-05-31 21:46:06 +00:00
|
|
|
}
|
2013-06-17 03:43:47 +00:00
|
|
|
|
2019-11-28 00:19:09 +00:00
|
|
|
/* Inherit properties from the keg. */
|
o Move zone limit from keg level up to zone level. This means that now
two zones sharing a keg may have different limits. Now this is going
to work:
zone = uma_zcreate();
uma_zone_set_max(zone, limit);
zone2 = uma_zsecond_create(zone);
uma_zone_set_max(zone2, limit2);
Kegs no longer have uk_maxpages field, but zones have uz_items. When
set, it may be rounded up to minimum possible CPU bucket cache size.
For small limits bucket cache can also be reconfigured to be smaller.
Counter uz_items is updated whenever items transition from keg to a
bucket cache or directly to a consumer. If zone has uz_maxitems set and
it is reached, then we are going to sleep.
o Since new limits don't play well with multi-keg zones, remove them. The
idea of multi-keg zones was introduced exactly 10 years ago, and never
have had a practical usage. In discussion with Jeff we came to a wild
agreement that if we ever want to reintroduce the idea of a smart allocator
that would be able to choose between two (or more) totally different
backing stores, that choice should be made one level higher than UMA,
e.g. in malloc(9) or in mget(), or whatever and choice should be controlled
by the caller.
o Sleeping code is improved to account number of sleepers and wake them one
by one, to avoid thundering herd problem.
o Flag UMA_ZONE_NOBUCKETCACHE removed, instead uma_zone_set_maxcache()
KPI added. Having no bucket cache basically means setting maxcache to 0.
o Now with many fields added and many removed (no multi-keg zones!) make
sure that struct uma_zone is perfectly aligned.
Reviewed by: markj, jeff
Tested by: pho
Differential Revision: https://reviews.freebsd.org/D17773
2019-01-15 00:02:06 +00:00
|
|
|
zone->uz_keg = keg;
|
2009-01-25 09:11:24 +00:00
|
|
|
zone->uz_size = keg->uk_size;
|
|
|
|
zone->uz_flags |= (keg->uk_flags &
|
|
|
|
(UMA_ZONE_INHERIT | UMA_ZFLAG_INHERIT));
|
2002-03-19 09:11:49 +00:00
|
|
|
|
2019-11-28 00:19:09 +00:00
|
|
|
out:
|
|
|
|
if (__predict_true(booted == BOOT_RUNNING)) {
|
|
|
|
zone_alloc_counters(zone, NULL);
|
|
|
|
zone_alloc_sysctl(zone, NULL);
|
|
|
|
} else {
|
|
|
|
zone->uz_allocs = EARLY_COUNTER;
|
|
|
|
zone->uz_frees = EARLY_COUNTER;
|
|
|
|
zone->uz_fails = EARLY_COUNTER;
|
Bring in mbuma to replace mballoc.
mbuma is an Mbuf & Cluster allocator built on top of a number of
extensions to the UMA framework, all included herein.
Extensions to UMA worth noting:
- Better layering between slab <-> zone caches; introduce
Keg structure which splits off slab cache away from the
zone structure and allows multiple zones to be stacked
on top of a single Keg (single type of slab cache);
perhaps we should look into defining a subset API on
top of the Keg for special use by malloc(9),
for example.
- UMA_ZONE_REFCNT zones can now be added, and reference
counters automagically allocated for them within the end
of the associated slab structures. uma_find_refcnt()
does a kextract to fetch the slab struct reference from
the underlying page, and lookup the corresponding refcnt.
mbuma things worth noting:
- integrates mbuf & cluster allocations with extended UMA
and provides caches for commonly-allocated items; defines
several zones (two primary, one secondary) and two kegs.
- change up certain code paths that always used to do:
m_get() + m_clget() to instead just use m_getcl() and
try to take advantage of the newly defined secondary
Packet zone.
- netstat(1) and systat(1) quickly hacked up to do basic
stat reporting but additional stats work needs to be
done once some other details within UMA have been taken
care of and it becomes clearer to how stats will work
within the modified framework.
From the user perspective, one implication is that the
NMBCLUSTERS compile-time option is no longer used. The
maximum number of clusters is still capped off according
to maxusers, but it can be made unlimited by setting
the kern.ipc.nmbclusters boot-time tunable to zero.
Work should be done to write an appropriate sysctl
handler allowing dynamic tuning of kern.ipc.nmbclusters
at runtime.
Additional things worth noting/known issues (READ):
- One report of 'ips' (ServeRAID) driver acting really
slow in conjunction with mbuma. Need more data.
Latest report is that ips is equally sucking with
and without mbuma.
- Giant leak in NFS code sometimes occurs, can't
reproduce but currently analyzing; brueffer is
able to reproduce but THIS IS NOT an mbuma-specific
problem and currently occurs even WITHOUT mbuma.
- Issues in network locking: there is at least one
code path in the rip code where one or more locks
are acquired and we end up in m_prepend() with
M_WAITOK, which causes WITNESS to whine from within
UMA. Current temporary solution: force all UMA
allocations to be M_NOWAIT from within UMA for now
to avoid deadlocks unless WITNESS is defined and we
can determine with certainty that we're not holding
any locks when we're M_WAITOK.
- I've seen at least one weird socketbuffer empty-but-
mbuf-still-attached panic. I don't believe this
to be related to mbuma but please keep your eyes
open, turn on debugging, and capture crash dumps.
This change removes more code than it adds.
A paper is available detailing the change and considering
various performance issues, it was presented at BSDCan2004:
http://www.unixdaemons.com/~bmilekic/netbuf_bmilekic.pdf
Please read the paper for Future Work and implementation
details, as well as credits.
Testing and Debugging:
rwatson,
brueffer,
Ketrien I. Saihr-Kesenchedra,
...
Reviewed by: Lots of people (for different parts)
2004-05-31 21:46:06 +00:00
|
|
|
}
|
2002-03-19 09:11:49 +00:00
|
|
|
|
2018-04-24 20:05:45 +00:00
|
|
|
KASSERT((arg->flags & (UMA_ZONE_MAXBUCKET | UMA_ZONE_NOBUCKET)) !=
|
|
|
|
(UMA_ZONE_MAXBUCKET | UMA_ZONE_NOBUCKET),
|
|
|
|
("Invalid zone flag combination"));
|
2019-11-28 00:19:09 +00:00
|
|
|
if (arg->flags & UMA_ZFLAG_INTERNAL)
|
|
|
|
zone->uz_bucket_size_max = zone->uz_bucket_size = 0;
|
|
|
|
if ((arg->flags & UMA_ZONE_MAXBUCKET) != 0)
|
|
|
|
zone->uz_bucket_size = BUCKET_MAX;
|
|
|
|
else if ((arg->flags & UMA_ZONE_MINBUCKET) != 0)
|
|
|
|
zone->uz_bucket_size_max = zone->uz_bucket_size = BUCKET_MIN;
|
|
|
|
else if ((arg->flags & UMA_ZONE_NOBUCKET) != 0)
|
|
|
|
zone->uz_bucket_size = 0;
|
2018-04-24 20:05:45 +00:00
|
|
|
else
|
2019-11-28 00:19:09 +00:00
|
|
|
zone->uz_bucket_size = bucket_select(zone->uz_size);
|
|
|
|
zone->uz_bucket_size_min = zone->uz_bucket_size;
|
2013-06-18 04:50:20 +00:00
|
|
|
|
2004-08-02 00:18:36 +00:00
|
|
|
return (0);
|
2002-03-19 09:11:49 +00:00
|
|
|
}
|
|
|
|
|
2004-01-30 16:26:29 +00:00
|
|
|
/*
|
Bring in mbuma to replace mballoc.
mbuma is an Mbuf & Cluster allocator built on top of a number of
extensions to the UMA framework, all included herein.
Extensions to UMA worth noting:
- Better layering between slab <-> zone caches; introduce
Keg structure which splits off slab cache away from the
zone structure and allows multiple zones to be stacked
on top of a single Keg (single type of slab cache);
perhaps we should look into defining a subset API on
top of the Keg for special use by malloc(9),
for example.
- UMA_ZONE_REFCNT zones can now be added, and reference
counters automagically allocated for them within the end
of the associated slab structures. uma_find_refcnt()
does a kextract to fetch the slab struct reference from
the underlying page, and lookup the corresponding refcnt.
mbuma things worth noting:
- integrates mbuf & cluster allocations with extended UMA
and provides caches for commonly-allocated items; defines
several zones (two primary, one secondary) and two kegs.
- change up certain code paths that always used to do:
m_get() + m_clget() to instead just use m_getcl() and
try to take advantage of the newly defined secondary
Packet zone.
- netstat(1) and systat(1) quickly hacked up to do basic
stat reporting but additional stats work needs to be
done once some other details within UMA have been taken
care of and it becomes clearer to how stats will work
within the modified framework.
From the user perspective, one implication is that the
NMBCLUSTERS compile-time option is no longer used. The
maximum number of clusters is still capped off according
to maxusers, but it can be made unlimited by setting
the kern.ipc.nmbclusters boot-time tunable to zero.
Work should be done to write an appropriate sysctl
handler allowing dynamic tuning of kern.ipc.nmbclusters
at runtime.
Additional things worth noting/known issues (READ):
- One report of 'ips' (ServeRAID) driver acting really
slow in conjunction with mbuma. Need more data.
Latest report is that ips is equally sucking with
and without mbuma.
- Giant leak in NFS code sometimes occurs, can't
reproduce but currently analyzing; brueffer is
able to reproduce but THIS IS NOT an mbuma-specific
problem and currently occurs even WITHOUT mbuma.
- Issues in network locking: there is at least one
code path in the rip code where one or more locks
are acquired and we end up in m_prepend() with
M_WAITOK, which causes WITNESS to whine from within
UMA. Current temporary solution: force all UMA
allocations to be M_NOWAIT from within UMA for now
to avoid deadlocks unless WITNESS is defined and we
can determine with certainty that we're not holding
any locks when we're M_WAITOK.
- I've seen at least one weird socketbuffer empty-but-
mbuf-still-attached panic. I don't believe this
to be related to mbuma but please keep your eyes
open, turn on debugging, and capture crash dumps.
This change removes more code than it adds.
A paper is available detailing the change and considering
various performance issues, it was presented at BSDCan2004:
http://www.unixdaemons.com/~bmilekic/netbuf_bmilekic.pdf
Please read the paper for Future Work and implementation
details, as well as credits.
Testing and Debugging:
rwatson,
brueffer,
Ketrien I. Saihr-Kesenchedra,
...
Reviewed by: Lots of people (for different parts)
2004-05-31 21:46:06 +00:00
|
|
|
* Keg header dtor. This frees all data, destroys locks, frees the hash
|
|
|
|
* table and removes the keg from the global list.
|
2002-04-08 04:48:58 +00:00
|
|
|
*
|
|
|
|
* Arguments/Returns follow uma_dtor specifications
|
|
|
|
* udata unused
|
|
|
|
*/
|
Bring in mbuma to replace mballoc.
mbuma is an Mbuf & Cluster allocator built on top of a number of
extensions to the UMA framework, all included herein.
Extensions to UMA worth noting:
- Better layering between slab <-> zone caches; introduce
Keg structure which splits off slab cache away from the
zone structure and allows multiple zones to be stacked
on top of a single Keg (single type of slab cache);
perhaps we should look into defining a subset API on
top of the Keg for special use by malloc(9),
for example.
- UMA_ZONE_REFCNT zones can now be added, and reference
counters automagically allocated for them within the end
of the associated slab structures. uma_find_refcnt()
does a kextract to fetch the slab struct reference from
the underlying page, and lookup the corresponding refcnt.
mbuma things worth noting:
- integrates mbuf & cluster allocations with extended UMA
and provides caches for commonly-allocated items; defines
several zones (two primary, one secondary) and two kegs.
- change up certain code paths that always used to do:
m_get() + m_clget() to instead just use m_getcl() and
try to take advantage of the newly defined secondary
Packet zone.
- netstat(1) and systat(1) quickly hacked up to do basic
stat reporting but additional stats work needs to be
done once some other details within UMA have been taken
care of and it becomes clearer to how stats will work
within the modified framework.
From the user perspective, one implication is that the
NMBCLUSTERS compile-time option is no longer used. The
maximum number of clusters is still capped off according
to maxusers, but it can be made unlimited by setting
the kern.ipc.nmbclusters boot-time tunable to zero.
Work should be done to write an appropriate sysctl
handler allowing dynamic tuning of kern.ipc.nmbclusters
at runtime.
Additional things worth noting/known issues (READ):
- One report of 'ips' (ServeRAID) driver acting really
slow in conjunction with mbuma. Need more data.
Latest report is that ips is equally sucking with
and without mbuma.
- Giant leak in NFS code sometimes occurs, can't
reproduce but currently analyzing; brueffer is
able to reproduce but THIS IS NOT an mbuma-specific
problem and currently occurs even WITHOUT mbuma.
- Issues in network locking: there is at least one
code path in the rip code where one or more locks
are acquired and we end up in m_prepend() with
M_WAITOK, which causes WITNESS to whine from within
UMA. Current temporary solution: force all UMA
allocations to be M_NOWAIT from within UMA for now
to avoid deadlocks unless WITNESS is defined and we
can determine with certainty that we're not holding
any locks when we're M_WAITOK.
- I've seen at least one weird socketbuffer empty-but-
mbuf-still-attached panic. I don't believe this
to be related to mbuma but please keep your eyes
open, turn on debugging, and capture crash dumps.
This change removes more code than it adds.
A paper is available detailing the change and considering
various performance issues, it was presented at BSDCan2004:
http://www.unixdaemons.com/~bmilekic/netbuf_bmilekic.pdf
Please read the paper for Future Work and implementation
details, as well as credits.
Testing and Debugging:
rwatson,
brueffer,
Ketrien I. Saihr-Kesenchedra,
...
Reviewed by: Lots of people (for different parts)
2004-05-31 21:46:06 +00:00
|
|
|
static void
|
|
|
|
keg_dtor(void *arg, int size, void *udata)
|
|
|
|
{
|
|
|
|
uma_keg_t keg;
|
2002-04-08 04:48:58 +00:00
|
|
|
|
Bring in mbuma to replace mballoc.
mbuma is an Mbuf & Cluster allocator built on top of a number of
extensions to the UMA framework, all included herein.
Extensions to UMA worth noting:
- Better layering between slab <-> zone caches; introduce
Keg structure which splits off slab cache away from the
zone structure and allows multiple zones to be stacked
on top of a single Keg (single type of slab cache);
perhaps we should look into defining a subset API on
top of the Keg for special use by malloc(9),
for example.
- UMA_ZONE_REFCNT zones can now be added, and reference
counters automagically allocated for them within the end
of the associated slab structures. uma_find_refcnt()
does a kextract to fetch the slab struct reference from
the underlying page, and lookup the corresponding refcnt.
mbuma things worth noting:
- integrates mbuf & cluster allocations with extended UMA
and provides caches for commonly-allocated items; defines
several zones (two primary, one secondary) and two kegs.
- change up certain code paths that always used to do:
m_get() + m_clget() to instead just use m_getcl() and
try to take advantage of the newly defined secondary
Packet zone.
- netstat(1) and systat(1) quickly hacked up to do basic
stat reporting but additional stats work needs to be
done once some other details within UMA have been taken
care of and it becomes clearer to how stats will work
within the modified framework.
From the user perspective, one implication is that the
NMBCLUSTERS compile-time option is no longer used. The
maximum number of clusters is still capped off according
to maxusers, but it can be made unlimited by setting
the kern.ipc.nmbclusters boot-time tunable to zero.
Work should be done to write an appropriate sysctl
handler allowing dynamic tuning of kern.ipc.nmbclusters
at runtime.
Additional things worth noting/known issues (READ):
- One report of 'ips' (ServeRAID) driver acting really
slow in conjunction with mbuma. Need more data.
Latest report is that ips is equally sucking with
and without mbuma.
- Giant leak in NFS code sometimes occurs, can't
reproduce but currently analyzing; brueffer is
able to reproduce but THIS IS NOT an mbuma-specific
problem and currently occurs even WITHOUT mbuma.
- Issues in network locking: there is at least one
code path in the rip code where one or more locks
are acquired and we end up in m_prepend() with
M_WAITOK, which causes WITNESS to whine from within
UMA. Current temporary solution: force all UMA
allocations to be M_NOWAIT from within UMA for now
to avoid deadlocks unless WITNESS is defined and we
can determine with certainty that we're not holding
any locks when we're M_WAITOK.
- I've seen at least one weird socketbuffer empty-but-
mbuf-still-attached panic. I don't believe this
to be related to mbuma but please keep your eyes
open, turn on debugging, and capture crash dumps.
This change removes more code than it adds.
A paper is available detailing the change and considering
various performance issues, it was presented at BSDCan2004:
http://www.unixdaemons.com/~bmilekic/netbuf_bmilekic.pdf
Please read the paper for Future Work and implementation
details, as well as credits.
Testing and Debugging:
rwatson,
brueffer,
Ketrien I. Saihr-Kesenchedra,
...
Reviewed by: Lots of people (for different parts)
2004-05-31 21:46:06 +00:00
|
|
|
keg = (uma_keg_t)arg;
|
2009-01-25 09:11:24 +00:00
|
|
|
KEG_LOCK(keg);
|
Bring in mbuma to replace mballoc.
mbuma is an Mbuf & Cluster allocator built on top of a number of
extensions to the UMA framework, all included herein.
Extensions to UMA worth noting:
- Better layering between slab <-> zone caches; introduce
Keg structure which splits off slab cache away from the
zone structure and allows multiple zones to be stacked
on top of a single Keg (single type of slab cache);
perhaps we should look into defining a subset API on
top of the Keg for special use by malloc(9),
for example.
- UMA_ZONE_REFCNT zones can now be added, and reference
counters automagically allocated for them within the end
of the associated slab structures. uma_find_refcnt()
does a kextract to fetch the slab struct reference from
the underlying page, and lookup the corresponding refcnt.
mbuma things worth noting:
- integrates mbuf & cluster allocations with extended UMA
and provides caches for commonly-allocated items; defines
several zones (two primary, one secondary) and two kegs.
- change up certain code paths that always used to do:
m_get() + m_clget() to instead just use m_getcl() and
try to take advantage of the newly defined secondary
Packet zone.
- netstat(1) and systat(1) quickly hacked up to do basic
stat reporting but additional stats work needs to be
done once some other details within UMA have been taken
care of and it becomes clearer to how stats will work
within the modified framework.
From the user perspective, one implication is that the
NMBCLUSTERS compile-time option is no longer used. The
maximum number of clusters is still capped off according
to maxusers, but it can be made unlimited by setting
the kern.ipc.nmbclusters boot-time tunable to zero.
Work should be done to write an appropriate sysctl
handler allowing dynamic tuning of kern.ipc.nmbclusters
at runtime.
Additional things worth noting/known issues (READ):
- One report of 'ips' (ServeRAID) driver acting really
slow in conjunction with mbuma. Need more data.
Latest report is that ips is equally sucking with
and without mbuma.
- Giant leak in NFS code sometimes occurs, can't
reproduce but currently analyzing; brueffer is
able to reproduce but THIS IS NOT an mbuma-specific
problem and currently occurs even WITHOUT mbuma.
- Issues in network locking: there is at least one
code path in the rip code where one or more locks
are acquired and we end up in m_prepend() with
M_WAITOK, which causes WITNESS to whine from within
UMA. Current temporary solution: force all UMA
allocations to be M_NOWAIT from within UMA for now
to avoid deadlocks unless WITNESS is defined and we
can determine with certainty that we're not holding
any locks when we're M_WAITOK.
- I've seen at least one weird socketbuffer empty-but-
mbuf-still-attached panic. I don't believe this
to be related to mbuma but please keep your eyes
open, turn on debugging, and capture crash dumps.
This change removes more code than it adds.
A paper is available detailing the change and considering
various performance issues, it was presented at BSDCan2004:
http://www.unixdaemons.com/~bmilekic/netbuf_bmilekic.pdf
Please read the paper for Future Work and implementation
details, as well as credits.
Testing and Debugging:
rwatson,
brueffer,
Ketrien I. Saihr-Kesenchedra,
...
Reviewed by: Lots of people (for different parts)
2004-05-31 21:46:06 +00:00
|
|
|
if (keg->uk_free != 0) {
|
2013-11-29 08:04:45 +00:00
|
|
|
printf("Freed UMA keg (%s) was not empty (%d items). "
|
Bring in mbuma to replace mballoc.
mbuma is an Mbuf & Cluster allocator built on top of a number of
extensions to the UMA framework, all included herein.
Extensions to UMA worth noting:
- Better layering between slab <-> zone caches; introduce
Keg structure which splits off slab cache away from the
zone structure and allows multiple zones to be stacked
on top of a single Keg (single type of slab cache);
perhaps we should look into defining a subset API on
top of the Keg for special use by malloc(9),
for example.
- UMA_ZONE_REFCNT zones can now be added, and reference
counters automagically allocated for them within the end
of the associated slab structures. uma_find_refcnt()
does a kextract to fetch the slab struct reference from
the underlying page, and lookup the corresponding refcnt.
mbuma things worth noting:
- integrates mbuf & cluster allocations with extended UMA
and provides caches for commonly-allocated items; defines
several zones (two primary, one secondary) and two kegs.
- change up certain code paths that always used to do:
m_get() + m_clget() to instead just use m_getcl() and
try to take advantage of the newly defined secondary
Packet zone.
- netstat(1) and systat(1) quickly hacked up to do basic
stat reporting but additional stats work needs to be
done once some other details within UMA have been taken
care of and it becomes clearer to how stats will work
within the modified framework.
From the user perspective, one implication is that the
NMBCLUSTERS compile-time option is no longer used. The
maximum number of clusters is still capped off according
to maxusers, but it can be made unlimited by setting
the kern.ipc.nmbclusters boot-time tunable to zero.
Work should be done to write an appropriate sysctl
handler allowing dynamic tuning of kern.ipc.nmbclusters
at runtime.
Additional things worth noting/known issues (READ):
- One report of 'ips' (ServeRAID) driver acting really
slow in conjunction with mbuma. Need more data.
Latest report is that ips is equally sucking with
and without mbuma.
- Giant leak in NFS code sometimes occurs, can't
reproduce but currently analyzing; brueffer is
able to reproduce but THIS IS NOT an mbuma-specific
problem and currently occurs even WITHOUT mbuma.
- Issues in network locking: there is at least one
code path in the rip code where one or more locks
are acquired and we end up in m_prepend() with
M_WAITOK, which causes WITNESS to whine from within
UMA. Current temporary solution: force all UMA
allocations to be M_NOWAIT from within UMA for now
to avoid deadlocks unless WITNESS is defined and we
can determine with certainty that we're not holding
any locks when we're M_WAITOK.
- I've seen at least one weird socketbuffer empty-but-
mbuf-still-attached panic. I don't believe this
to be related to mbuma but please keep your eyes
open, turn on debugging, and capture crash dumps.
This change removes more code than it adds.
A paper is available detailing the change and considering
various performance issues, it was presented at BSDCan2004:
http://www.unixdaemons.com/~bmilekic/netbuf_bmilekic.pdf
Please read the paper for Future Work and implementation
details, as well as credits.
Testing and Debugging:
rwatson,
brueffer,
Ketrien I. Saihr-Kesenchedra,
...
Reviewed by: Lots of people (for different parts)
2004-05-31 21:46:06 +00:00
|
|
|
" Lost %d pages of memory.\n",
|
2013-11-29 08:04:45 +00:00
|
|
|
keg->uk_name ? keg->uk_name : "",
|
Bring in mbuma to replace mballoc.
mbuma is an Mbuf & Cluster allocator built on top of a number of
extensions to the UMA framework, all included herein.
Extensions to UMA worth noting:
- Better layering between slab <-> zone caches; introduce
Keg structure which splits off slab cache away from the
zone structure and allows multiple zones to be stacked
on top of a single Keg (single type of slab cache);
perhaps we should look into defining a subset API on
top of the Keg for special use by malloc(9),
for example.
- UMA_ZONE_REFCNT zones can now be added, and reference
counters automagically allocated for them within the end
of the associated slab structures. uma_find_refcnt()
does a kextract to fetch the slab struct reference from
the underlying page, and lookup the corresponding refcnt.
mbuma things worth noting:
- integrates mbuf & cluster allocations with extended UMA
and provides caches for commonly-allocated items; defines
several zones (two primary, one secondary) and two kegs.
- change up certain code paths that always used to do:
m_get() + m_clget() to instead just use m_getcl() and
try to take advantage of the newly defined secondary
Packet zone.
- netstat(1) and systat(1) quickly hacked up to do basic
stat reporting but additional stats work needs to be
done once some other details within UMA have been taken
care of and it becomes clearer to how stats will work
within the modified framework.
From the user perspective, one implication is that the
NMBCLUSTERS compile-time option is no longer used. The
maximum number of clusters is still capped off according
to maxusers, but it can be made unlimited by setting
the kern.ipc.nmbclusters boot-time tunable to zero.
Work should be done to write an appropriate sysctl
handler allowing dynamic tuning of kern.ipc.nmbclusters
at runtime.
Additional things worth noting/known issues (READ):
- One report of 'ips' (ServeRAID) driver acting really
slow in conjunction with mbuma. Need more data.
Latest report is that ips is equally sucking with
and without mbuma.
- Giant leak in NFS code sometimes occurs, can't
reproduce but currently analyzing; brueffer is
able to reproduce but THIS IS NOT an mbuma-specific
problem and currently occurs even WITHOUT mbuma.
- Issues in network locking: there is at least one
code path in the rip code where one or more locks
are acquired and we end up in m_prepend() with
M_WAITOK, which causes WITNESS to whine from within
UMA. Current temporary solution: force all UMA
allocations to be M_NOWAIT from within UMA for now
to avoid deadlocks unless WITNESS is defined and we
can determine with certainty that we're not holding
any locks when we're M_WAITOK.
- I've seen at least one weird socketbuffer empty-but-
mbuf-still-attached panic. I don't believe this
to be related to mbuma but please keep your eyes
open, turn on debugging, and capture crash dumps.
This change removes more code than it adds.
A paper is available detailing the change and considering
various performance issues, it was presented at BSDCan2004:
http://www.unixdaemons.com/~bmilekic/netbuf_bmilekic.pdf
Please read the paper for Future Work and implementation
details, as well as credits.
Testing and Debugging:
rwatson,
brueffer,
Ketrien I. Saihr-Kesenchedra,
...
Reviewed by: Lots of people (for different parts)
2004-05-31 21:46:06 +00:00
|
|
|
keg->uk_free, keg->uk_pages);
|
|
|
|
}
|
2009-01-25 09:11:24 +00:00
|
|
|
KEG_UNLOCK(keg);
|
Bring in mbuma to replace mballoc.
mbuma is an Mbuf & Cluster allocator built on top of a number of
extensions to the UMA framework, all included herein.
Extensions to UMA worth noting:
- Better layering between slab <-> zone caches; introduce
Keg structure which splits off slab cache away from the
zone structure and allows multiple zones to be stacked
on top of a single Keg (single type of slab cache);
perhaps we should look into defining a subset API on
top of the Keg for special use by malloc(9),
for example.
- UMA_ZONE_REFCNT zones can now be added, and reference
counters automagically allocated for them within the end
of the associated slab structures. uma_find_refcnt()
does a kextract to fetch the slab struct reference from
the underlying page, and lookup the corresponding refcnt.
mbuma things worth noting:
- integrates mbuf & cluster allocations with extended UMA
and provides caches for commonly-allocated items; defines
several zones (two primary, one secondary) and two kegs.
- change up certain code paths that always used to do:
m_get() + m_clget() to instead just use m_getcl() and
try to take advantage of the newly defined secondary
Packet zone.
- netstat(1) and systat(1) quickly hacked up to do basic
stat reporting but additional stats work needs to be
done once some other details within UMA have been taken
care of and it becomes clearer to how stats will work
within the modified framework.
From the user perspective, one implication is that the
NMBCLUSTERS compile-time option is no longer used. The
maximum number of clusters is still capped off according
to maxusers, but it can be made unlimited by setting
the kern.ipc.nmbclusters boot-time tunable to zero.
Work should be done to write an appropriate sysctl
handler allowing dynamic tuning of kern.ipc.nmbclusters
at runtime.
Additional things worth noting/known issues (READ):
- One report of 'ips' (ServeRAID) driver acting really
slow in conjunction with mbuma. Need more data.
Latest report is that ips is equally sucking with
and without mbuma.
- Giant leak in NFS code sometimes occurs, can't
reproduce but currently analyzing; brueffer is
able to reproduce but THIS IS NOT an mbuma-specific
problem and currently occurs even WITHOUT mbuma.
- Issues in network locking: there is at least one
code path in the rip code where one or more locks
are acquired and we end up in m_prepend() with
M_WAITOK, which causes WITNESS to whine from within
UMA. Current temporary solution: force all UMA
allocations to be M_NOWAIT from within UMA for now
to avoid deadlocks unless WITNESS is defined and we
can determine with certainty that we're not holding
any locks when we're M_WAITOK.
- I've seen at least one weird socketbuffer empty-but-
mbuf-still-attached panic. I don't believe this
to be related to mbuma but please keep your eyes
open, turn on debugging, and capture crash dumps.
This change removes more code than it adds.
A paper is available detailing the change and considering
various performance issues, it was presented at BSDCan2004:
http://www.unixdaemons.com/~bmilekic/netbuf_bmilekic.pdf
Please read the paper for Future Work and implementation
details, as well as credits.
Testing and Debugging:
rwatson,
brueffer,
Ketrien I. Saihr-Kesenchedra,
...
Reviewed by: Lots of people (for different parts)
2004-05-31 21:46:06 +00:00
|
|
|
|
2009-01-25 09:11:24 +00:00
|
|
|
hash_free(&keg->uk_hash);
|
Bring in mbuma to replace mballoc.
mbuma is an Mbuf & Cluster allocator built on top of a number of
extensions to the UMA framework, all included herein.
Extensions to UMA worth noting:
- Better layering between slab <-> zone caches; introduce
Keg structure which splits off slab cache away from the
zone structure and allows multiple zones to be stacked
on top of a single Keg (single type of slab cache);
perhaps we should look into defining a subset API on
top of the Keg for special use by malloc(9),
for example.
- UMA_ZONE_REFCNT zones can now be added, and reference
counters automagically allocated for them within the end
of the associated slab structures. uma_find_refcnt()
does a kextract to fetch the slab struct reference from
the underlying page, and lookup the corresponding refcnt.
mbuma things worth noting:
- integrates mbuf & cluster allocations with extended UMA
and provides caches for commonly-allocated items; defines
several zones (two primary, one secondary) and two kegs.
- change up certain code paths that always used to do:
m_get() + m_clget() to instead just use m_getcl() and
try to take advantage of the newly defined secondary
Packet zone.
- netstat(1) and systat(1) quickly hacked up to do basic
stat reporting but additional stats work needs to be
done once some other details within UMA have been taken
care of and it becomes clearer to how stats will work
within the modified framework.
From the user perspective, one implication is that the
NMBCLUSTERS compile-time option is no longer used. The
maximum number of clusters is still capped off according
to maxusers, but it can be made unlimited by setting
the kern.ipc.nmbclusters boot-time tunable to zero.
Work should be done to write an appropriate sysctl
handler allowing dynamic tuning of kern.ipc.nmbclusters
at runtime.
Additional things worth noting/known issues (READ):
- One report of 'ips' (ServeRAID) driver acting really
slow in conjunction with mbuma. Need more data.
Latest report is that ips is equally sucking with
and without mbuma.
- Giant leak in NFS code sometimes occurs, can't
reproduce but currently analyzing; brueffer is
able to reproduce but THIS IS NOT an mbuma-specific
problem and currently occurs even WITHOUT mbuma.
- Issues in network locking: there is at least one
code path in the rip code where one or more locks
are acquired and we end up in m_prepend() with
M_WAITOK, which causes WITNESS to whine from within
UMA. Current temporary solution: force all UMA
allocations to be M_NOWAIT from within UMA for now
to avoid deadlocks unless WITNESS is defined and we
can determine with certainty that we're not holding
any locks when we're M_WAITOK.
- I've seen at least one weird socketbuffer empty-but-
mbuf-still-attached panic. I don't believe this
to be related to mbuma but please keep your eyes
open, turn on debugging, and capture crash dumps.
This change removes more code than it adds.
A paper is available detailing the change and considering
various performance issues, it was presented at BSDCan2004:
http://www.unixdaemons.com/~bmilekic/netbuf_bmilekic.pdf
Please read the paper for Future Work and implementation
details, as well as credits.
Testing and Debugging:
rwatson,
brueffer,
Ketrien I. Saihr-Kesenchedra,
...
Reviewed by: Lots of people (for different parts)
2004-05-31 21:46:06 +00:00
|
|
|
|
2009-01-25 09:11:24 +00:00
|
|
|
KEG_LOCK_FINI(keg);
|
Bring in mbuma to replace mballoc.
mbuma is an Mbuf & Cluster allocator built on top of a number of
extensions to the UMA framework, all included herein.
Extensions to UMA worth noting:
- Better layering between slab <-> zone caches; introduce
Keg structure which splits off slab cache away from the
zone structure and allows multiple zones to be stacked
on top of a single Keg (single type of slab cache);
perhaps we should look into defining a subset API on
top of the Keg for special use by malloc(9),
for example.
- UMA_ZONE_REFCNT zones can now be added, and reference
counters automagically allocated for them within the end
of the associated slab structures. uma_find_refcnt()
does a kextract to fetch the slab struct reference from
the underlying page, and lookup the corresponding refcnt.
mbuma things worth noting:
- integrates mbuf & cluster allocations with extended UMA
and provides caches for commonly-allocated items; defines
several zones (two primary, one secondary) and two kegs.
- change up certain code paths that always used to do:
m_get() + m_clget() to instead just use m_getcl() and
try to take advantage of the newly defined secondary
Packet zone.
- netstat(1) and systat(1) quickly hacked up to do basic
stat reporting but additional stats work needs to be
done once some other details within UMA have been taken
care of and it becomes clearer to how stats will work
within the modified framework.
From the user perspective, one implication is that the
NMBCLUSTERS compile-time option is no longer used. The
maximum number of clusters is still capped off according
to maxusers, but it can be made unlimited by setting
the kern.ipc.nmbclusters boot-time tunable to zero.
Work should be done to write an appropriate sysctl
handler allowing dynamic tuning of kern.ipc.nmbclusters
at runtime.
Additional things worth noting/known issues (READ):
- One report of 'ips' (ServeRAID) driver acting really
slow in conjunction with mbuma. Need more data.
Latest report is that ips is equally sucking with
and without mbuma.
- Giant leak in NFS code sometimes occurs, can't
reproduce but currently analyzing; brueffer is
able to reproduce but THIS IS NOT an mbuma-specific
problem and currently occurs even WITHOUT mbuma.
- Issues in network locking: there is at least one
code path in the rip code where one or more locks
are acquired and we end up in m_prepend() with
M_WAITOK, which causes WITNESS to whine from within
UMA. Current temporary solution: force all UMA
allocations to be M_NOWAIT from within UMA for now
to avoid deadlocks unless WITNESS is defined and we
can determine with certainty that we're not holding
any locks when we're M_WAITOK.
- I've seen at least one weird socketbuffer empty-but-
mbuf-still-attached panic. I don't believe this
to be related to mbuma but please keep your eyes
open, turn on debugging, and capture crash dumps.
This change removes more code than it adds.
A paper is available detailing the change and considering
various performance issues, it was presented at BSDCan2004:
http://www.unixdaemons.com/~bmilekic/netbuf_bmilekic.pdf
Please read the paper for Future Work and implementation
details, as well as credits.
Testing and Debugging:
rwatson,
brueffer,
Ketrien I. Saihr-Kesenchedra,
...
Reviewed by: Lots of people (for different parts)
2004-05-31 21:46:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Zone header dtor.
|
|
|
|
*
|
|
|
|
* Arguments/Returns follow uma_dtor specifications
|
|
|
|
* udata unused
|
|
|
|
*/
|
2002-04-08 04:48:58 +00:00
|
|
|
static void
|
|
|
|
zone_dtor(void *arg, int size, void *udata)
|
|
|
|
{
|
|
|
|
uma_zone_t zone;
|
Bring in mbuma to replace mballoc.
mbuma is an Mbuf & Cluster allocator built on top of a number of
extensions to the UMA framework, all included herein.
Extensions to UMA worth noting:
- Better layering between slab <-> zone caches; introduce
Keg structure which splits off slab cache away from the
zone structure and allows multiple zones to be stacked
on top of a single Keg (single type of slab cache);
perhaps we should look into defining a subset API on
top of the Keg for special use by malloc(9),
for example.
- UMA_ZONE_REFCNT zones can now be added, and reference
counters automagically allocated for them within the end
of the associated slab structures. uma_find_refcnt()
does a kextract to fetch the slab struct reference from
the underlying page, and lookup the corresponding refcnt.
mbuma things worth noting:
- integrates mbuf & cluster allocations with extended UMA
and provides caches for commonly-allocated items; defines
several zones (two primary, one secondary) and two kegs.
- change up certain code paths that always used to do:
m_get() + m_clget() to instead just use m_getcl() and
try to take advantage of the newly defined secondary
Packet zone.
- netstat(1) and systat(1) quickly hacked up to do basic
stat reporting but additional stats work needs to be
done once some other details within UMA have been taken
care of and it becomes clearer to how stats will work
within the modified framework.
From the user perspective, one implication is that the
NMBCLUSTERS compile-time option is no longer used. The
maximum number of clusters is still capped off according
to maxusers, but it can be made unlimited by setting
the kern.ipc.nmbclusters boot-time tunable to zero.
Work should be done to write an appropriate sysctl
handler allowing dynamic tuning of kern.ipc.nmbclusters
at runtime.
Additional things worth noting/known issues (READ):
- One report of 'ips' (ServeRAID) driver acting really
slow in conjunction with mbuma. Need more data.
Latest report is that ips is equally sucking with
and without mbuma.
- Giant leak in NFS code sometimes occurs, can't
reproduce but currently analyzing; brueffer is
able to reproduce but THIS IS NOT an mbuma-specific
problem and currently occurs even WITHOUT mbuma.
- Issues in network locking: there is at least one
code path in the rip code where one or more locks
are acquired and we end up in m_prepend() with
M_WAITOK, which causes WITNESS to whine from within
UMA. Current temporary solution: force all UMA
allocations to be M_NOWAIT from within UMA for now
to avoid deadlocks unless WITNESS is defined and we
can determine with certainty that we're not holding
any locks when we're M_WAITOK.
- I've seen at least one weird socketbuffer empty-but-
mbuf-still-attached panic. I don't believe this
to be related to mbuma but please keep your eyes
open, turn on debugging, and capture crash dumps.
This change removes more code than it adds.
A paper is available detailing the change and considering
various performance issues, it was presented at BSDCan2004:
http://www.unixdaemons.com/~bmilekic/netbuf_bmilekic.pdf
Please read the paper for Future Work and implementation
details, as well as credits.
Testing and Debugging:
rwatson,
brueffer,
Ketrien I. Saihr-Kesenchedra,
...
Reviewed by: Lots of people (for different parts)
2004-05-31 21:46:06 +00:00
|
|
|
uma_keg_t keg;
|
2002-04-08 04:48:58 +00:00
|
|
|
|
|
|
|
zone = (uma_zone_t)arg;
|
2003-09-19 23:27:46 +00:00
|
|
|
|
2019-11-28 00:19:09 +00:00
|
|
|
sysctl_remove_oid(zone->uz_oid, 1, 1);
|
|
|
|
|
2009-01-25 09:11:24 +00:00
|
|
|
if (!(zone->uz_flags & UMA_ZFLAG_INTERNAL))
|
2003-09-19 23:27:46 +00:00
|
|
|
cache_drain(zone);
|
Bring in mbuma to replace mballoc.
mbuma is an Mbuf & Cluster allocator built on top of a number of
extensions to the UMA framework, all included herein.
Extensions to UMA worth noting:
- Better layering between slab <-> zone caches; introduce
Keg structure which splits off slab cache away from the
zone structure and allows multiple zones to be stacked
on top of a single Keg (single type of slab cache);
perhaps we should look into defining a subset API on
top of the Keg for special use by malloc(9),
for example.
- UMA_ZONE_REFCNT zones can now be added, and reference
counters automagically allocated for them within the end
of the associated slab structures. uma_find_refcnt()
does a kextract to fetch the slab struct reference from
the underlying page, and lookup the corresponding refcnt.
mbuma things worth noting:
- integrates mbuf & cluster allocations with extended UMA
and provides caches for commonly-allocated items; defines
several zones (two primary, one secondary) and two kegs.
- change up certain code paths that always used to do:
m_get() + m_clget() to instead just use m_getcl() and
try to take advantage of the newly defined secondary
Packet zone.
- netstat(1) and systat(1) quickly hacked up to do basic
stat reporting but additional stats work needs to be
done once some other details within UMA have been taken
care of and it becomes clearer to how stats will work
within the modified framework.
From the user perspective, one implication is that the
NMBCLUSTERS compile-time option is no longer used. The
maximum number of clusters is still capped off according
to maxusers, but it can be made unlimited by setting
the kern.ipc.nmbclusters boot-time tunable to zero.
Work should be done to write an appropriate sysctl
handler allowing dynamic tuning of kern.ipc.nmbclusters
at runtime.
Additional things worth noting/known issues (READ):
- One report of 'ips' (ServeRAID) driver acting really
slow in conjunction with mbuma. Need more data.
Latest report is that ips is equally sucking with
and without mbuma.
- Giant leak in NFS code sometimes occurs, can't
reproduce but currently analyzing; brueffer is
able to reproduce but THIS IS NOT an mbuma-specific
problem and currently occurs even WITHOUT mbuma.
- Issues in network locking: there is at least one
code path in the rip code where one or more locks
are acquired and we end up in m_prepend() with
M_WAITOK, which causes WITNESS to whine from within
UMA. Current temporary solution: force all UMA
allocations to be M_NOWAIT from within UMA for now
to avoid deadlocks unless WITNESS is defined and we
can determine with certainty that we're not holding
any locks when we're M_WAITOK.
- I've seen at least one weird socketbuffer empty-but-
mbuf-still-attached panic. I don't believe this
to be related to mbuma but please keep your eyes
open, turn on debugging, and capture crash dumps.
This change removes more code than it adds.
A paper is available detailing the change and considering
various performance issues, it was presented at BSDCan2004:
http://www.unixdaemons.com/~bmilekic/netbuf_bmilekic.pdf
Please read the paper for Future Work and implementation
details, as well as credits.
Testing and Debugging:
rwatson,
brueffer,
Ketrien I. Saihr-Kesenchedra,
...
Reviewed by: Lots of people (for different parts)
2004-05-31 21:46:06 +00:00
|
|
|
|
2014-10-05 21:34:56 +00:00
|
|
|
rw_wlock(&uma_rwlock);
|
2009-01-25 09:11:24 +00:00
|
|
|
LIST_REMOVE(zone, uz_link);
|
2014-10-05 21:34:56 +00:00
|
|
|
rw_wunlock(&uma_rwlock);
|
2009-01-25 09:11:24 +00:00
|
|
|
/*
|
|
|
|
* XXX there are some races here where
|
|
|
|
* the zone can be drained but zone lock
|
|
|
|
* released and then refilled before we
|
|
|
|
* remove it... we dont care for now
|
|
|
|
*/
|
2019-09-01 22:22:43 +00:00
|
|
|
zone_reclaim(zone, M_WAITOK, true);
|
2009-01-25 09:11:24 +00:00
|
|
|
/*
|
2019-04-12 12:46:25 +00:00
|
|
|
* We only destroy kegs from non secondary/non cache zones.
|
2009-01-25 09:11:24 +00:00
|
|
|
*/
|
2019-04-12 12:46:25 +00:00
|
|
|
if ((zone->uz_flags & (UMA_ZONE_SECONDARY | UMA_ZFLAG_CACHE)) == 0) {
|
|
|
|
keg = zone->uz_keg;
|
2014-10-05 21:34:56 +00:00
|
|
|
rw_wlock(&uma_rwlock);
|
Bring in mbuma to replace mballoc.
mbuma is an Mbuf & Cluster allocator built on top of a number of
extensions to the UMA framework, all included herein.
Extensions to UMA worth noting:
- Better layering between slab <-> zone caches; introduce
Keg structure which splits off slab cache away from the
zone structure and allows multiple zones to be stacked
on top of a single Keg (single type of slab cache);
perhaps we should look into defining a subset API on
top of the Keg for special use by malloc(9),
for example.
- UMA_ZONE_REFCNT zones can now be added, and reference
counters automagically allocated for them within the end
of the associated slab structures. uma_find_refcnt()
does a kextract to fetch the slab struct reference from
the underlying page, and lookup the corresponding refcnt.
mbuma things worth noting:
- integrates mbuf & cluster allocations with extended UMA
and provides caches for commonly-allocated items; defines
several zones (two primary, one secondary) and two kegs.
- change up certain code paths that always used to do:
m_get() + m_clget() to instead just use m_getcl() and
try to take advantage of the newly defined secondary
Packet zone.
- netstat(1) and systat(1) quickly hacked up to do basic
stat reporting but additional stats work needs to be
done once some other details within UMA have been taken
care of and it becomes clearer to how stats will work
within the modified framework.
From the user perspective, one implication is that the
NMBCLUSTERS compile-time option is no longer used. The
maximum number of clusters is still capped off according
to maxusers, but it can be made unlimited by setting
the kern.ipc.nmbclusters boot-time tunable to zero.
Work should be done to write an appropriate sysctl
handler allowing dynamic tuning of kern.ipc.nmbclusters
at runtime.
Additional things worth noting/known issues (READ):
- One report of 'ips' (ServeRAID) driver acting really
slow in conjunction with mbuma. Need more data.
Latest report is that ips is equally sucking with
and without mbuma.
- Giant leak in NFS code sometimes occurs, can't
reproduce but currently analyzing; brueffer is
able to reproduce but THIS IS NOT an mbuma-specific
problem and currently occurs even WITHOUT mbuma.
- Issues in network locking: there is at least one
code path in the rip code where one or more locks
are acquired and we end up in m_prepend() with
M_WAITOK, which causes WITNESS to whine from within
UMA. Current temporary solution: force all UMA
allocations to be M_NOWAIT from within UMA for now
to avoid deadlocks unless WITNESS is defined and we
can determine with certainty that we're not holding
any locks when we're M_WAITOK.
- I've seen at least one weird socketbuffer empty-but-
mbuf-still-attached panic. I don't believe this
to be related to mbuma but please keep your eyes
open, turn on debugging, and capture crash dumps.
This change removes more code than it adds.
A paper is available detailing the change and considering
various performance issues, it was presented at BSDCan2004:
http://www.unixdaemons.com/~bmilekic/netbuf_bmilekic.pdf
Please read the paper for Future Work and implementation
details, as well as credits.
Testing and Debugging:
rwatson,
brueffer,
Ketrien I. Saihr-Kesenchedra,
...
Reviewed by: Lots of people (for different parts)
2004-05-31 21:46:06 +00:00
|
|
|
LIST_REMOVE(keg, uk_link);
|
2014-10-05 21:34:56 +00:00
|
|
|
rw_wunlock(&uma_rwlock);
|
2013-06-17 03:43:47 +00:00
|
|
|
zone_free_item(kegs, keg, NULL, SKIP_NONE);
|
2003-11-30 08:04:01 +00:00
|
|
|
}
|
2019-01-15 18:24:34 +00:00
|
|
|
counter_u64_free(zone->uz_allocs);
|
|
|
|
counter_u64_free(zone->uz_frees);
|
|
|
|
counter_u64_free(zone->uz_fails);
|
2019-11-28 00:19:09 +00:00
|
|
|
free(zone->uz_ctlname, M_UMA);
|
o Move zone limit from keg level up to zone level. This means that now
two zones sharing a keg may have different limits. Now this is going
to work:
zone = uma_zcreate();
uma_zone_set_max(zone, limit);
zone2 = uma_zsecond_create(zone);
uma_zone_set_max(zone2, limit2);
Kegs no longer have uk_maxpages field, but zones have uz_items. When
set, it may be rounded up to minimum possible CPU bucket cache size.
For small limits bucket cache can also be reconfigured to be smaller.
Counter uz_items is updated whenever items transition from keg to a
bucket cache or directly to a consumer. If zone has uz_maxitems set and
it is reached, then we are going to sleep.
o Since new limits don't play well with multi-keg zones, remove them. The
idea of multi-keg zones was introduced exactly 10 years ago, and never
have had a practical usage. In discussion with Jeff we came to a wild
agreement that if we ever want to reintroduce the idea of a smart allocator
that would be able to choose between two (or more) totally different
backing stores, that choice should be made one level higher than UMA,
e.g. in malloc(9) or in mget(), or whatever and choice should be controlled
by the caller.
o Sleeping code is improved to account number of sleepers and wake them one
by one, to avoid thundering herd problem.
o Flag UMA_ZONE_NOBUCKETCACHE removed, instead uma_zone_set_maxcache()
KPI added. Having no bucket cache basically means setting maxcache to 0.
o Now with many fields added and many removed (no multi-keg zones!) make
sure that struct uma_zone is perfectly aligned.
Reviewed by: markj, jeff
Tested by: pho
Differential Revision: https://reviews.freebsd.org/D17773
2019-01-15 00:02:06 +00:00
|
|
|
if (zone->uz_lockptr == &zone->uz_lock)
|
|
|
|
ZONE_LOCK_FINI(zone);
|
2002-04-08 04:48:58 +00:00
|
|
|
}
|
Bring in mbuma to replace mballoc.
mbuma is an Mbuf & Cluster allocator built on top of a number of
extensions to the UMA framework, all included herein.
Extensions to UMA worth noting:
- Better layering between slab <-> zone caches; introduce
Keg structure which splits off slab cache away from the
zone structure and allows multiple zones to be stacked
on top of a single Keg (single type of slab cache);
perhaps we should look into defining a subset API on
top of the Keg for special use by malloc(9),
for example.
- UMA_ZONE_REFCNT zones can now be added, and reference
counters automagically allocated for them within the end
of the associated slab structures. uma_find_refcnt()
does a kextract to fetch the slab struct reference from
the underlying page, and lookup the corresponding refcnt.
mbuma things worth noting:
- integrates mbuf & cluster allocations with extended UMA
and provides caches for commonly-allocated items; defines
several zones (two primary, one secondary) and two kegs.
- change up certain code paths that always used to do:
m_get() + m_clget() to instead just use m_getcl() and
try to take advantage of the newly defined secondary
Packet zone.
- netstat(1) and systat(1) quickly hacked up to do basic
stat reporting but additional stats work needs to be
done once some other details within UMA have been taken
care of and it becomes clearer to how stats will work
within the modified framework.
From the user perspective, one implication is that the
NMBCLUSTERS compile-time option is no longer used. The
maximum number of clusters is still capped off according
to maxusers, but it can be made unlimited by setting
the kern.ipc.nmbclusters boot-time tunable to zero.
Work should be done to write an appropriate sysctl
handler allowing dynamic tuning of kern.ipc.nmbclusters
at runtime.
Additional things worth noting/known issues (READ):
- One report of 'ips' (ServeRAID) driver acting really
slow in conjunction with mbuma. Need more data.
Latest report is that ips is equally sucking with
and without mbuma.
- Giant leak in NFS code sometimes occurs, can't
reproduce but currently analyzing; brueffer is
able to reproduce but THIS IS NOT an mbuma-specific
problem and currently occurs even WITHOUT mbuma.
- Issues in network locking: there is at least one
code path in the rip code where one or more locks
are acquired and we end up in m_prepend() with
M_WAITOK, which causes WITNESS to whine from within
UMA. Current temporary solution: force all UMA
allocations to be M_NOWAIT from within UMA for now
to avoid deadlocks unless WITNESS is defined and we
can determine with certainty that we're not holding
any locks when we're M_WAITOK.
- I've seen at least one weird socketbuffer empty-but-
mbuf-still-attached panic. I don't believe this
to be related to mbuma but please keep your eyes
open, turn on debugging, and capture crash dumps.
This change removes more code than it adds.
A paper is available detailing the change and considering
various performance issues, it was presented at BSDCan2004:
http://www.unixdaemons.com/~bmilekic/netbuf_bmilekic.pdf
Please read the paper for Future Work and implementation
details, as well as credits.
Testing and Debugging:
rwatson,
brueffer,
Ketrien I. Saihr-Kesenchedra,
...
Reviewed by: Lots of people (for different parts)
2004-05-31 21:46:06 +00:00
|
|
|
|
2002-03-19 09:11:49 +00:00
|
|
|
/*
|
|
|
|
* Traverses every zone in the system and calls a callback
|
|
|
|
*
|
|
|
|
* Arguments:
|
|
|
|
* zfunc A pointer to a function which accepts a zone
|
|
|
|
* as an argument.
|
2004-01-30 16:26:29 +00:00
|
|
|
*
|
2002-03-19 09:11:49 +00:00
|
|
|
* Returns:
|
|
|
|
* Nothing
|
|
|
|
*/
|
2004-01-30 16:26:29 +00:00
|
|
|
static void
|
2019-11-28 00:19:09 +00:00
|
|
|
zone_foreach(void (*zfunc)(uma_zone_t, void *arg), void *arg)
|
2002-03-19 09:11:49 +00:00
|
|
|
{
|
Bring in mbuma to replace mballoc.
mbuma is an Mbuf & Cluster allocator built on top of a number of
extensions to the UMA framework, all included herein.
Extensions to UMA worth noting:
- Better layering between slab <-> zone caches; introduce
Keg structure which splits off slab cache away from the
zone structure and allows multiple zones to be stacked
on top of a single Keg (single type of slab cache);
perhaps we should look into defining a subset API on
top of the Keg for special use by malloc(9),
for example.
- UMA_ZONE_REFCNT zones can now be added, and reference
counters automagically allocated for them within the end
of the associated slab structures. uma_find_refcnt()
does a kextract to fetch the slab struct reference from
the underlying page, and lookup the corresponding refcnt.
mbuma things worth noting:
- integrates mbuf & cluster allocations with extended UMA
and provides caches for commonly-allocated items; defines
several zones (two primary, one secondary) and two kegs.
- change up certain code paths that always used to do:
m_get() + m_clget() to instead just use m_getcl() and
try to take advantage of the newly defined secondary
Packet zone.
- netstat(1) and systat(1) quickly hacked up to do basic
stat reporting but additional stats work needs to be
done once some other details within UMA have been taken
care of and it becomes clearer to how stats will work
within the modified framework.
From the user perspective, one implication is that the
NMBCLUSTERS compile-time option is no longer used. The
maximum number of clusters is still capped off according
to maxusers, but it can be made unlimited by setting
the kern.ipc.nmbclusters boot-time tunable to zero.
Work should be done to write an appropriate sysctl
handler allowing dynamic tuning of kern.ipc.nmbclusters
at runtime.
Additional things worth noting/known issues (READ):
- One report of 'ips' (ServeRAID) driver acting really
slow in conjunction with mbuma. Need more data.
Latest report is that ips is equally sucking with
and without mbuma.
- Giant leak in NFS code sometimes occurs, can't
reproduce but currently analyzing; brueffer is
able to reproduce but THIS IS NOT an mbuma-specific
problem and currently occurs even WITHOUT mbuma.
- Issues in network locking: there is at least one
code path in the rip code where one or more locks
are acquired and we end up in m_prepend() with
M_WAITOK, which causes WITNESS to whine from within
UMA. Current temporary solution: force all UMA
allocations to be M_NOWAIT from within UMA for now
to avoid deadlocks unless WITNESS is defined and we
can determine with certainty that we're not holding
any locks when we're M_WAITOK.
- I've seen at least one weird socketbuffer empty-but-
mbuf-still-attached panic. I don't believe this
to be related to mbuma but please keep your eyes
open, turn on debugging, and capture crash dumps.
This change removes more code than it adds.
A paper is available detailing the change and considering
various performance issues, it was presented at BSDCan2004:
http://www.unixdaemons.com/~bmilekic/netbuf_bmilekic.pdf
Please read the paper for Future Work and implementation
details, as well as credits.
Testing and Debugging:
rwatson,
brueffer,
Ketrien I. Saihr-Kesenchedra,
...
Reviewed by: Lots of people (for different parts)
2004-05-31 21:46:06 +00:00
|
|
|
uma_keg_t keg;
|
2002-03-19 09:11:49 +00:00
|
|
|
uma_zone_t zone;
|
|
|
|
|
2019-01-15 18:24:34 +00:00
|
|
|
/*
|
|
|
|
* Before BOOT_RUNNING we are guaranteed to be single
|
|
|
|
* threaded, so locking isn't needed. Startup functions
|
|
|
|
* are allowed to use M_WAITOK.
|
|
|
|
*/
|
|
|
|
if (__predict_true(booted == BOOT_RUNNING))
|
|
|
|
rw_rlock(&uma_rwlock);
|
Bring in mbuma to replace mballoc.
mbuma is an Mbuf & Cluster allocator built on top of a number of
extensions to the UMA framework, all included herein.
Extensions to UMA worth noting:
- Better layering between slab <-> zone caches; introduce
Keg structure which splits off slab cache away from the
zone structure and allows multiple zones to be stacked
on top of a single Keg (single type of slab cache);
perhaps we should look into defining a subset API on
top of the Keg for special use by malloc(9),
for example.
- UMA_ZONE_REFCNT zones can now be added, and reference
counters automagically allocated for them within the end
of the associated slab structures. uma_find_refcnt()
does a kextract to fetch the slab struct reference from
the underlying page, and lookup the corresponding refcnt.
mbuma things worth noting:
- integrates mbuf & cluster allocations with extended UMA
and provides caches for commonly-allocated items; defines
several zones (two primary, one secondary) and two kegs.
- change up certain code paths that always used to do:
m_get() + m_clget() to instead just use m_getcl() and
try to take advantage of the newly defined secondary
Packet zone.
- netstat(1) and systat(1) quickly hacked up to do basic
stat reporting but additional stats work needs to be
done once some other details within UMA have been taken
care of and it becomes clearer to how stats will work
within the modified framework.
From the user perspective, one implication is that the
NMBCLUSTERS compile-time option is no longer used. The
maximum number of clusters is still capped off according
to maxusers, but it can be made unlimited by setting
the kern.ipc.nmbclusters boot-time tunable to zero.
Work should be done to write an appropriate sysctl
handler allowing dynamic tuning of kern.ipc.nmbclusters
at runtime.
Additional things worth noting/known issues (READ):
- One report of 'ips' (ServeRAID) driver acting really
slow in conjunction with mbuma. Need more data.
Latest report is that ips is equally sucking with
and without mbuma.
- Giant leak in NFS code sometimes occurs, can't
reproduce but currently analyzing; brueffer is
able to reproduce but THIS IS NOT an mbuma-specific
problem and currently occurs even WITHOUT mbuma.
- Issues in network locking: there is at least one
code path in the rip code where one or more locks
are acquired and we end up in m_prepend() with
M_WAITOK, which causes WITNESS to whine from within
UMA. Current temporary solution: force all UMA
allocations to be M_NOWAIT from within UMA for now
to avoid deadlocks unless WITNESS is defined and we
can determine with certainty that we're not holding
any locks when we're M_WAITOK.
- I've seen at least one weird socketbuffer empty-but-
mbuf-still-attached panic. I don't believe this
to be related to mbuma but please keep your eyes
open, turn on debugging, and capture crash dumps.
This change removes more code than it adds.
A paper is available detailing the change and considering
various performance issues, it was presented at BSDCan2004:
http://www.unixdaemons.com/~bmilekic/netbuf_bmilekic.pdf
Please read the paper for Future Work and implementation
details, as well as credits.
Testing and Debugging:
rwatson,
brueffer,
Ketrien I. Saihr-Kesenchedra,
...
Reviewed by: Lots of people (for different parts)
2004-05-31 21:46:06 +00:00
|
|
|
LIST_FOREACH(keg, &uma_kegs, uk_link) {
|
|
|
|
LIST_FOREACH(zone, &keg->uk_zones, uz_link)
|
2019-11-28 00:19:09 +00:00
|
|
|
zfunc(zone, arg);
|
Bring in mbuma to replace mballoc.
mbuma is an Mbuf & Cluster allocator built on top of a number of
extensions to the UMA framework, all included herein.
Extensions to UMA worth noting:
- Better layering between slab <-> zone caches; introduce
Keg structure which splits off slab cache away from the
zone structure and allows multiple zones to be stacked
on top of a single Keg (single type of slab cache);
perhaps we should look into defining a subset API on
top of the Keg for special use by malloc(9),
for example.
- UMA_ZONE_REFCNT zones can now be added, and reference
counters automagically allocated for them within the end
of the associated slab structures. uma_find_refcnt()
does a kextract to fetch the slab struct reference from
the underlying page, and lookup the corresponding refcnt.
mbuma things worth noting:
- integrates mbuf & cluster allocations with extended UMA
and provides caches for commonly-allocated items; defines
several zones (two primary, one secondary) and two kegs.
- change up certain code paths that always used to do:
m_get() + m_clget() to instead just use m_getcl() and
try to take advantage of the newly defined secondary
Packet zone.
- netstat(1) and systat(1) quickly hacked up to do basic
stat reporting but additional stats work needs to be
done once some other details within UMA have been taken
care of and it becomes clearer to how stats will work
within the modified framework.
From the user perspective, one implication is that the
NMBCLUSTERS compile-time option is no longer used. The
maximum number of clusters is still capped off according
to maxusers, but it can be made unlimited by setting
the kern.ipc.nmbclusters boot-time tunable to zero.
Work should be done to write an appropriate sysctl
handler allowing dynamic tuning of kern.ipc.nmbclusters
at runtime.
Additional things worth noting/known issues (READ):
- One report of 'ips' (ServeRAID) driver acting really
slow in conjunction with mbuma. Need more data.
Latest report is that ips is equally sucking with
and without mbuma.
- Giant leak in NFS code sometimes occurs, can't
reproduce but currently analyzing; brueffer is
able to reproduce but THIS IS NOT an mbuma-specific
problem and currently occurs even WITHOUT mbuma.
- Issues in network locking: there is at least one
code path in the rip code where one or more locks
are acquired and we end up in m_prepend() with
M_WAITOK, which causes WITNESS to whine from within
UMA. Current temporary solution: force all UMA
allocations to be M_NOWAIT from within UMA for now
to avoid deadlocks unless WITNESS is defined and we
can determine with certainty that we're not holding
any locks when we're M_WAITOK.
- I've seen at least one weird socketbuffer empty-but-
mbuf-still-attached panic. I don't believe this
to be related to mbuma but please keep your eyes
open, turn on debugging, and capture crash dumps.
This change removes more code than it adds.
A paper is available detailing the change and considering
various performance issues, it was presented at BSDCan2004:
http://www.unixdaemons.com/~bmilekic/netbuf_bmilekic.pdf
Please read the paper for Future Work and implementation
details, as well as credits.
Testing and Debugging:
rwatson,
brueffer,
Ketrien I. Saihr-Kesenchedra,
...
Reviewed by: Lots of people (for different parts)
2004-05-31 21:46:06 +00:00
|
|
|
}
|
2019-11-10 09:25:19 +00:00
|
|
|
LIST_FOREACH(zone, &uma_cachezones, uz_link)
|
2019-11-28 00:19:09 +00:00
|
|
|
zfunc(zone, arg);
|
2019-01-15 18:24:34 +00:00
|
|
|
if (__predict_true(booted == BOOT_RUNNING))
|
|
|
|
rw_runlock(&uma_rwlock);
|
2002-03-19 09:11:49 +00:00
|
|
|
}
|
|
|
|
|
Followup on r302393 by cperciva, improving calculation of boot pages required
for UMA startup.
o Introduce another stage of UMA startup, which is entered after
vm_page_startup() finishes. After this stage we don't yet enable buckets,
but we can ask VM for pages. Rename stages to meaningful names while here.
New list of stages: BOOT_COLD, BOOT_STRAPPED, BOOT_PAGEALLOC, BOOT_BUCKETS,
BOOT_RUNNING.
Enabling page alloc earlier allows us to dramatically reduce number of
boot pages required. What is more important number of zones becomes
consistent across different machines, as no MD allocations are done before
the BOOT_PAGEALLOC stage. Now only UMA internal zones actually need to use
startup_alloc(), however that may change, so vm_page_startup() provides
its need for early zones as argument.
o Introduce uma_startup_count() function, to avoid code duplication. The
functions calculates sizes of zones zone and kegs zone, and calculates how
many pages UMA will need to bootstrap.
It counts not only of zone structures, but also of kegs, slabs and hashes.
o Hide uma_startup_foo() declarations from public file.
o Provide several DIAGNOSTIC printfs on boot_pages usage.
o Bugfix: when calculating zone of zones size use (mp_maxid + 1) instead of
mp_ncpus. Use resulting number not only in the size argument to zone_ctor()
but also as args.size.
Reviewed by: imp, gallatin (earlier version)
Differential Revision: https://reviews.freebsd.org/D14054
2018-02-06 04:16:00 +00:00
|
|
|
/*
|
|
|
|
* Count how many pages do we need to bootstrap. VM supplies
|
|
|
|
* its need in early zones in the argument, we add up our zones,
|
|
|
|
* which consist of: UMA Slabs, UMA Hash and 9 Bucket zones. The
|
|
|
|
* zone of zones and zone of kegs are accounted separately.
|
|
|
|
*/
|
|
|
|
#define UMA_BOOT_ZONES 11
|
2018-02-07 18:32:51 +00:00
|
|
|
/* Zone of zones and zone of kegs have arbitrary alignment. */
|
|
|
|
#define UMA_BOOT_ALIGN 32
|
Followup on r302393 by cperciva, improving calculation of boot pages required
for UMA startup.
o Introduce another stage of UMA startup, which is entered after
vm_page_startup() finishes. After this stage we don't yet enable buckets,
but we can ask VM for pages. Rename stages to meaningful names while here.
New list of stages: BOOT_COLD, BOOT_STRAPPED, BOOT_PAGEALLOC, BOOT_BUCKETS,
BOOT_RUNNING.
Enabling page alloc earlier allows us to dramatically reduce number of
boot pages required. What is more important number of zones becomes
consistent across different machines, as no MD allocations are done before
the BOOT_PAGEALLOC stage. Now only UMA internal zones actually need to use
startup_alloc(), however that may change, so vm_page_startup() provides
its need for early zones as argument.
o Introduce uma_startup_count() function, to avoid code duplication. The
functions calculates sizes of zones zone and kegs zone, and calculates how
many pages UMA will need to bootstrap.
It counts not only of zone structures, but also of kegs, slabs and hashes.
o Hide uma_startup_foo() declarations from public file.
o Provide several DIAGNOSTIC printfs on boot_pages usage.
o Bugfix: when calculating zone of zones size use (mp_maxid + 1) instead of
mp_ncpus. Use resulting number not only in the size argument to zone_ctor()
but also as args.size.
Reviewed by: imp, gallatin (earlier version)
Differential Revision: https://reviews.freebsd.org/D14054
2018-02-06 04:16:00 +00:00
|
|
|
static int zsize, ksize;
|
|
|
|
int
|
2018-02-09 04:45:39 +00:00
|
|
|
uma_startup_count(int vm_zones)
|
Followup on r302393 by cperciva, improving calculation of boot pages required
for UMA startup.
o Introduce another stage of UMA startup, which is entered after
vm_page_startup() finishes. After this stage we don't yet enable buckets,
but we can ask VM for pages. Rename stages to meaningful names while here.
New list of stages: BOOT_COLD, BOOT_STRAPPED, BOOT_PAGEALLOC, BOOT_BUCKETS,
BOOT_RUNNING.
Enabling page alloc earlier allows us to dramatically reduce number of
boot pages required. What is more important number of zones becomes
consistent across different machines, as no MD allocations are done before
the BOOT_PAGEALLOC stage. Now only UMA internal zones actually need to use
startup_alloc(), however that may change, so vm_page_startup() provides
its need for early zones as argument.
o Introduce uma_startup_count() function, to avoid code duplication. The
functions calculates sizes of zones zone and kegs zone, and calculates how
many pages UMA will need to bootstrap.
It counts not only of zone structures, but also of kegs, slabs and hashes.
o Hide uma_startup_foo() declarations from public file.
o Provide several DIAGNOSTIC printfs on boot_pages usage.
o Bugfix: when calculating zone of zones size use (mp_maxid + 1) instead of
mp_ncpus. Use resulting number not only in the size argument to zone_ctor()
but also as args.size.
Reviewed by: imp, gallatin (earlier version)
Differential Revision: https://reviews.freebsd.org/D14054
2018-02-06 04:16:00 +00:00
|
|
|
{
|
2018-02-09 04:45:39 +00:00
|
|
|
int zones, pages;
|
2019-12-02 22:44:34 +00:00
|
|
|
size_t space, size;
|
Followup on r302393 by cperciva, improving calculation of boot pages required
for UMA startup.
o Introduce another stage of UMA startup, which is entered after
vm_page_startup() finishes. After this stage we don't yet enable buckets,
but we can ask VM for pages. Rename stages to meaningful names while here.
New list of stages: BOOT_COLD, BOOT_STRAPPED, BOOT_PAGEALLOC, BOOT_BUCKETS,
BOOT_RUNNING.
Enabling page alloc earlier allows us to dramatically reduce number of
boot pages required. What is more important number of zones becomes
consistent across different machines, as no MD allocations are done before
the BOOT_PAGEALLOC stage. Now only UMA internal zones actually need to use
startup_alloc(), however that may change, so vm_page_startup() provides
its need for early zones as argument.
o Introduce uma_startup_count() function, to avoid code duplication. The
functions calculates sizes of zones zone and kegs zone, and calculates how
many pages UMA will need to bootstrap.
It counts not only of zone structures, but also of kegs, slabs and hashes.
o Hide uma_startup_foo() declarations from public file.
o Provide several DIAGNOSTIC printfs on boot_pages usage.
o Bugfix: when calculating zone of zones size use (mp_maxid + 1) instead of
mp_ncpus. Use resulting number not only in the size argument to zone_ctor()
but also as args.size.
Reviewed by: imp, gallatin (earlier version)
Differential Revision: https://reviews.freebsd.org/D14054
2018-02-06 04:16:00 +00:00
|
|
|
|
|
|
|
ksize = sizeof(struct uma_keg) +
|
|
|
|
(sizeof(struct uma_domain) * vm_ndomains);
|
|
|
|
zsize = sizeof(struct uma_zone) +
|
|
|
|
(sizeof(struct uma_cache) * (mp_maxid + 1)) +
|
|
|
|
(sizeof(struct uma_zone_domain) * vm_ndomains);
|
|
|
|
|
2018-02-07 18:32:51 +00:00
|
|
|
/*
|
|
|
|
* Memory for the zone of kegs and its keg,
|
|
|
|
* and for zone of zones.
|
|
|
|
*/
|
Followup on r302393 by cperciva, improving calculation of boot pages required
for UMA startup.
o Introduce another stage of UMA startup, which is entered after
vm_page_startup() finishes. After this stage we don't yet enable buckets,
but we can ask VM for pages. Rename stages to meaningful names while here.
New list of stages: BOOT_COLD, BOOT_STRAPPED, BOOT_PAGEALLOC, BOOT_BUCKETS,
BOOT_RUNNING.
Enabling page alloc earlier allows us to dramatically reduce number of
boot pages required. What is more important number of zones becomes
consistent across different machines, as no MD allocations are done before
the BOOT_PAGEALLOC stage. Now only UMA internal zones actually need to use
startup_alloc(), however that may change, so vm_page_startup() provides
its need for early zones as argument.
o Introduce uma_startup_count() function, to avoid code duplication. The
functions calculates sizes of zones zone and kegs zone, and calculates how
many pages UMA will need to bootstrap.
It counts not only of zone structures, but also of kegs, slabs and hashes.
o Hide uma_startup_foo() declarations from public file.
o Provide several DIAGNOSTIC printfs on boot_pages usage.
o Bugfix: when calculating zone of zones size use (mp_maxid + 1) instead of
mp_ncpus. Use resulting number not only in the size argument to zone_ctor()
but also as args.size.
Reviewed by: imp, gallatin (earlier version)
Differential Revision: https://reviews.freebsd.org/D14054
2018-02-06 04:16:00 +00:00
|
|
|
pages = howmany(roundup(zsize, CACHE_LINE_SIZE) * 2 +
|
|
|
|
roundup(ksize, CACHE_LINE_SIZE), PAGE_SIZE);
|
|
|
|
|
2018-02-09 04:45:39 +00:00
|
|
|
#ifdef UMA_MD_SMALL_ALLOC
|
|
|
|
zones = UMA_BOOT_ZONES;
|
|
|
|
#else
|
|
|
|
zones = UMA_BOOT_ZONES + vm_zones;
|
|
|
|
vm_zones = 0;
|
|
|
|
#endif
|
2019-12-02 22:44:34 +00:00
|
|
|
size = slab_sizeof(SLAB_MAX_SETSIZE);
|
|
|
|
space = slab_space(SLAB_MAX_SETSIZE);
|
Followup on r302393 by cperciva, improving calculation of boot pages required
for UMA startup.
o Introduce another stage of UMA startup, which is entered after
vm_page_startup() finishes. After this stage we don't yet enable buckets,
but we can ask VM for pages. Rename stages to meaningful names while here.
New list of stages: BOOT_COLD, BOOT_STRAPPED, BOOT_PAGEALLOC, BOOT_BUCKETS,
BOOT_RUNNING.
Enabling page alloc earlier allows us to dramatically reduce number of
boot pages required. What is more important number of zones becomes
consistent across different machines, as no MD allocations are done before
the BOOT_PAGEALLOC stage. Now only UMA internal zones actually need to use
startup_alloc(), however that may change, so vm_page_startup() provides
its need for early zones as argument.
o Introduce uma_startup_count() function, to avoid code duplication. The
functions calculates sizes of zones zone and kegs zone, and calculates how
many pages UMA will need to bootstrap.
It counts not only of zone structures, but also of kegs, slabs and hashes.
o Hide uma_startup_foo() declarations from public file.
o Provide several DIAGNOSTIC printfs on boot_pages usage.
o Bugfix: when calculating zone of zones size use (mp_maxid + 1) instead of
mp_ncpus. Use resulting number not only in the size argument to zone_ctor()
but also as args.size.
Reviewed by: imp, gallatin (earlier version)
Differential Revision: https://reviews.freebsd.org/D14054
2018-02-06 04:16:00 +00:00
|
|
|
|
2018-02-07 18:32:51 +00:00
|
|
|
/* Memory for the rest of startup zones, UMA and VM, ... */
|
2019-12-02 22:44:34 +00:00
|
|
|
if (zsize > space) {
|
2018-11-28 19:54:02 +00:00
|
|
|
/* See keg_large_init(). */
|
|
|
|
u_int ppera;
|
|
|
|
|
|
|
|
ppera = howmany(roundup2(zsize, UMA_BOOT_ALIGN), PAGE_SIZE);
|
2019-12-02 22:44:34 +00:00
|
|
|
if (PAGE_SIZE * ppera - roundup2(zsize, UMA_BOOT_ALIGN) < size)
|
2018-11-28 19:54:02 +00:00
|
|
|
ppera++;
|
|
|
|
pages += (zones + vm_zones) * ppera;
|
2019-12-02 22:44:34 +00:00
|
|
|
} else if (roundup2(zsize, UMA_BOOT_ALIGN) > space)
|
2018-11-28 19:54:02 +00:00
|
|
|
/* See keg_small_init() special case for uk_ppera = 1. */
|
2018-04-02 05:14:31 +00:00
|
|
|
pages += zones;
|
Followup on r302393 by cperciva, improving calculation of boot pages required
for UMA startup.
o Introduce another stage of UMA startup, which is entered after
vm_page_startup() finishes. After this stage we don't yet enable buckets,
but we can ask VM for pages. Rename stages to meaningful names while here.
New list of stages: BOOT_COLD, BOOT_STRAPPED, BOOT_PAGEALLOC, BOOT_BUCKETS,
BOOT_RUNNING.
Enabling page alloc earlier allows us to dramatically reduce number of
boot pages required. What is more important number of zones becomes
consistent across different machines, as no MD allocations are done before
the BOOT_PAGEALLOC stage. Now only UMA internal zones actually need to use
startup_alloc(), however that may change, so vm_page_startup() provides
its need for early zones as argument.
o Introduce uma_startup_count() function, to avoid code duplication. The
functions calculates sizes of zones zone and kegs zone, and calculates how
many pages UMA will need to bootstrap.
It counts not only of zone structures, but also of kegs, slabs and hashes.
o Hide uma_startup_foo() declarations from public file.
o Provide several DIAGNOSTIC printfs on boot_pages usage.
o Bugfix: when calculating zone of zones size use (mp_maxid + 1) instead of
mp_ncpus. Use resulting number not only in the size argument to zone_ctor()
but also as args.size.
Reviewed by: imp, gallatin (earlier version)
Differential Revision: https://reviews.freebsd.org/D14054
2018-02-06 04:16:00 +00:00
|
|
|
else
|
2018-02-07 18:32:51 +00:00
|
|
|
pages += howmany(zones,
|
2019-12-02 22:44:34 +00:00
|
|
|
space / roundup2(zsize, UMA_BOOT_ALIGN));
|
Followup on r302393 by cperciva, improving calculation of boot pages required
for UMA startup.
o Introduce another stage of UMA startup, which is entered after
vm_page_startup() finishes. After this stage we don't yet enable buckets,
but we can ask VM for pages. Rename stages to meaningful names while here.
New list of stages: BOOT_COLD, BOOT_STRAPPED, BOOT_PAGEALLOC, BOOT_BUCKETS,
BOOT_RUNNING.
Enabling page alloc earlier allows us to dramatically reduce number of
boot pages required. What is more important number of zones becomes
consistent across different machines, as no MD allocations are done before
the BOOT_PAGEALLOC stage. Now only UMA internal zones actually need to use
startup_alloc(), however that may change, so vm_page_startup() provides
its need for early zones as argument.
o Introduce uma_startup_count() function, to avoid code duplication. The
functions calculates sizes of zones zone and kegs zone, and calculates how
many pages UMA will need to bootstrap.
It counts not only of zone structures, but also of kegs, slabs and hashes.
o Hide uma_startup_foo() declarations from public file.
o Provide several DIAGNOSTIC printfs on boot_pages usage.
o Bugfix: when calculating zone of zones size use (mp_maxid + 1) instead of
mp_ncpus. Use resulting number not only in the size argument to zone_ctor()
but also as args.size.
Reviewed by: imp, gallatin (earlier version)
Differential Revision: https://reviews.freebsd.org/D14054
2018-02-06 04:16:00 +00:00
|
|
|
|
2018-02-07 18:32:51 +00:00
|
|
|
/* ... and their kegs. Note that zone of zones allocates a keg! */
|
|
|
|
pages += howmany(zones + 1,
|
2019-12-02 22:44:34 +00:00
|
|
|
space / roundup2(ksize, UMA_BOOT_ALIGN));
|
Followup on r302393 by cperciva, improving calculation of boot pages required
for UMA startup.
o Introduce another stage of UMA startup, which is entered after
vm_page_startup() finishes. After this stage we don't yet enable buckets,
but we can ask VM for pages. Rename stages to meaningful names while here.
New list of stages: BOOT_COLD, BOOT_STRAPPED, BOOT_PAGEALLOC, BOOT_BUCKETS,
BOOT_RUNNING.
Enabling page alloc earlier allows us to dramatically reduce number of
boot pages required. What is more important number of zones becomes
consistent across different machines, as no MD allocations are done before
the BOOT_PAGEALLOC stage. Now only UMA internal zones actually need to use
startup_alloc(), however that may change, so vm_page_startup() provides
its need for early zones as argument.
o Introduce uma_startup_count() function, to avoid code duplication. The
functions calculates sizes of zones zone and kegs zone, and calculates how
many pages UMA will need to bootstrap.
It counts not only of zone structures, but also of kegs, slabs and hashes.
o Hide uma_startup_foo() declarations from public file.
o Provide several DIAGNOSTIC printfs on boot_pages usage.
o Bugfix: when calculating zone of zones size use (mp_maxid + 1) instead of
mp_ncpus. Use resulting number not only in the size argument to zone_ctor()
but also as args.size.
Reviewed by: imp, gallatin (earlier version)
Differential Revision: https://reviews.freebsd.org/D14054
2018-02-06 04:16:00 +00:00
|
|
|
|
|
|
|
return (pages);
|
|
|
|
}
|
|
|
|
|
2002-03-19 09:11:49 +00:00
|
|
|
void
|
2017-06-01 18:26:57 +00:00
|
|
|
uma_startup(void *mem, int npages)
|
2002-03-19 09:11:49 +00:00
|
|
|
{
|
|
|
|
struct uma_zctor_args args;
|
2018-01-12 23:25:05 +00:00
|
|
|
uma_keg_t masterkeg;
|
|
|
|
uintptr_t m;
|
2002-03-19 09:11:49 +00:00
|
|
|
|
Followup on r302393 by cperciva, improving calculation of boot pages required
for UMA startup.
o Introduce another stage of UMA startup, which is entered after
vm_page_startup() finishes. After this stage we don't yet enable buckets,
but we can ask VM for pages. Rename stages to meaningful names while here.
New list of stages: BOOT_COLD, BOOT_STRAPPED, BOOT_PAGEALLOC, BOOT_BUCKETS,
BOOT_RUNNING.
Enabling page alloc earlier allows us to dramatically reduce number of
boot pages required. What is more important number of zones becomes
consistent across different machines, as no MD allocations are done before
the BOOT_PAGEALLOC stage. Now only UMA internal zones actually need to use
startup_alloc(), however that may change, so vm_page_startup() provides
its need for early zones as argument.
o Introduce uma_startup_count() function, to avoid code duplication. The
functions calculates sizes of zones zone and kegs zone, and calculates how
many pages UMA will need to bootstrap.
It counts not only of zone structures, but also of kegs, slabs and hashes.
o Hide uma_startup_foo() declarations from public file.
o Provide several DIAGNOSTIC printfs on boot_pages usage.
o Bugfix: when calculating zone of zones size use (mp_maxid + 1) instead of
mp_ncpus. Use resulting number not only in the size argument to zone_ctor()
but also as args.size.
Reviewed by: imp, gallatin (earlier version)
Differential Revision: https://reviews.freebsd.org/D14054
2018-02-06 04:16:00 +00:00
|
|
|
#ifdef DIAGNOSTIC
|
|
|
|
printf("Entering %s with %d boot pages configured\n", __func__, npages);
|
|
|
|
#endif
|
Bring in mbuma to replace mballoc.
mbuma is an Mbuf & Cluster allocator built on top of a number of
extensions to the UMA framework, all included herein.
Extensions to UMA worth noting:
- Better layering between slab <-> zone caches; introduce
Keg structure which splits off slab cache away from the
zone structure and allows multiple zones to be stacked
on top of a single Keg (single type of slab cache);
perhaps we should look into defining a subset API on
top of the Keg for special use by malloc(9),
for example.
- UMA_ZONE_REFCNT zones can now be added, and reference
counters automagically allocated for them within the end
of the associated slab structures. uma_find_refcnt()
does a kextract to fetch the slab struct reference from
the underlying page, and lookup the corresponding refcnt.
mbuma things worth noting:
- integrates mbuf & cluster allocations with extended UMA
and provides caches for commonly-allocated items; defines
several zones (two primary, one secondary) and two kegs.
- change up certain code paths that always used to do:
m_get() + m_clget() to instead just use m_getcl() and
try to take advantage of the newly defined secondary
Packet zone.
- netstat(1) and systat(1) quickly hacked up to do basic
stat reporting but additional stats work needs to be
done once some other details within UMA have been taken
care of and it becomes clearer to how stats will work
within the modified framework.
From the user perspective, one implication is that the
NMBCLUSTERS compile-time option is no longer used. The
maximum number of clusters is still capped off according
to maxusers, but it can be made unlimited by setting
the kern.ipc.nmbclusters boot-time tunable to zero.
Work should be done to write an appropriate sysctl
handler allowing dynamic tuning of kern.ipc.nmbclusters
at runtime.
Additional things worth noting/known issues (READ):
- One report of 'ips' (ServeRAID) driver acting really
slow in conjunction with mbuma. Need more data.
Latest report is that ips is equally sucking with
and without mbuma.
- Giant leak in NFS code sometimes occurs, can't
reproduce but currently analyzing; brueffer is
able to reproduce but THIS IS NOT an mbuma-specific
problem and currently occurs even WITHOUT mbuma.
- Issues in network locking: there is at least one
code path in the rip code where one or more locks
are acquired and we end up in m_prepend() with
M_WAITOK, which causes WITNESS to whine from within
UMA. Current temporary solution: force all UMA
allocations to be M_NOWAIT from within UMA for now
to avoid deadlocks unless WITNESS is defined and we
can determine with certainty that we're not holding
any locks when we're M_WAITOK.
- I've seen at least one weird socketbuffer empty-but-
mbuf-still-attached panic. I don't believe this
to be related to mbuma but please keep your eyes
open, turn on debugging, and capture crash dumps.
This change removes more code than it adds.
A paper is available detailing the change and considering
various performance issues, it was presented at BSDCan2004:
http://www.unixdaemons.com/~bmilekic/netbuf_bmilekic.pdf
Please read the paper for Future Work and implementation
details, as well as credits.
Testing and Debugging:
rwatson,
brueffer,
Ketrien I. Saihr-Kesenchedra,
...
Reviewed by: Lots of people (for different parts)
2004-05-31 21:46:06 +00:00
|
|
|
|
Followup on r302393 by cperciva, improving calculation of boot pages required
for UMA startup.
o Introduce another stage of UMA startup, which is entered after
vm_page_startup() finishes. After this stage we don't yet enable buckets,
but we can ask VM for pages. Rename stages to meaningful names while here.
New list of stages: BOOT_COLD, BOOT_STRAPPED, BOOT_PAGEALLOC, BOOT_BUCKETS,
BOOT_RUNNING.
Enabling page alloc earlier allows us to dramatically reduce number of
boot pages required. What is more important number of zones becomes
consistent across different machines, as no MD allocations are done before
the BOOT_PAGEALLOC stage. Now only UMA internal zones actually need to use
startup_alloc(), however that may change, so vm_page_startup() provides
its need for early zones as argument.
o Introduce uma_startup_count() function, to avoid code duplication. The
functions calculates sizes of zones zone and kegs zone, and calculates how
many pages UMA will need to bootstrap.
It counts not only of zone structures, but also of kegs, slabs and hashes.
o Hide uma_startup_foo() declarations from public file.
o Provide several DIAGNOSTIC printfs on boot_pages usage.
o Bugfix: when calculating zone of zones size use (mp_maxid + 1) instead of
mp_ncpus. Use resulting number not only in the size argument to zone_ctor()
but also as args.size.
Reviewed by: imp, gallatin (earlier version)
Differential Revision: https://reviews.freebsd.org/D14054
2018-02-06 04:16:00 +00:00
|
|
|
rw_init(&uma_rwlock, "UMA lock");
|
2018-01-12 23:25:05 +00:00
|
|
|
|
|
|
|
/* Use bootpages memory for the zone of zones and zone of kegs. */
|
|
|
|
m = (uintptr_t)mem;
|
|
|
|
zones = (uma_zone_t)m;
|
|
|
|
m += roundup(zsize, CACHE_LINE_SIZE);
|
|
|
|
kegs = (uma_zone_t)m;
|
|
|
|
m += roundup(zsize, CACHE_LINE_SIZE);
|
|
|
|
masterkeg = (uma_keg_t)m;
|
|
|
|
m += roundup(ksize, CACHE_LINE_SIZE);
|
|
|
|
m = roundup(m, PAGE_SIZE);
|
|
|
|
npages -= (m - (uintptr_t)mem) / PAGE_SIZE;
|
|
|
|
mem = (void *)m;
|
|
|
|
|
Bring in mbuma to replace mballoc.
mbuma is an Mbuf & Cluster allocator built on top of a number of
extensions to the UMA framework, all included herein.
Extensions to UMA worth noting:
- Better layering between slab <-> zone caches; introduce
Keg structure which splits off slab cache away from the
zone structure and allows multiple zones to be stacked
on top of a single Keg (single type of slab cache);
perhaps we should look into defining a subset API on
top of the Keg for special use by malloc(9),
for example.
- UMA_ZONE_REFCNT zones can now be added, and reference
counters automagically allocated for them within the end
of the associated slab structures. uma_find_refcnt()
does a kextract to fetch the slab struct reference from
the underlying page, and lookup the corresponding refcnt.
mbuma things worth noting:
- integrates mbuf & cluster allocations with extended UMA
and provides caches for commonly-allocated items; defines
several zones (two primary, one secondary) and two kegs.
- change up certain code paths that always used to do:
m_get() + m_clget() to instead just use m_getcl() and
try to take advantage of the newly defined secondary
Packet zone.
- netstat(1) and systat(1) quickly hacked up to do basic
stat reporting but additional stats work needs to be
done once some other details within UMA have been taken
care of and it becomes clearer to how stats will work
within the modified framework.
From the user perspective, one implication is that the
NMBCLUSTERS compile-time option is no longer used. The
maximum number of clusters is still capped off according
to maxusers, but it can be made unlimited by setting
the kern.ipc.nmbclusters boot-time tunable to zero.
Work should be done to write an appropriate sysctl
handler allowing dynamic tuning of kern.ipc.nmbclusters
at runtime.
Additional things worth noting/known issues (READ):
- One report of 'ips' (ServeRAID) driver acting really
slow in conjunction with mbuma. Need more data.
Latest report is that ips is equally sucking with
and without mbuma.
- Giant leak in NFS code sometimes occurs, can't
reproduce but currently analyzing; brueffer is
able to reproduce but THIS IS NOT an mbuma-specific
problem and currently occurs even WITHOUT mbuma.
- Issues in network locking: there is at least one
code path in the rip code where one or more locks
are acquired and we end up in m_prepend() with
M_WAITOK, which causes WITNESS to whine from within
UMA. Current temporary solution: force all UMA
allocations to be M_NOWAIT from within UMA for now
to avoid deadlocks unless WITNESS is defined and we
can determine with certainty that we're not holding
any locks when we're M_WAITOK.
- I've seen at least one weird socketbuffer empty-but-
mbuf-still-attached panic. I don't believe this
to be related to mbuma but please keep your eyes
open, turn on debugging, and capture crash dumps.
This change removes more code than it adds.
A paper is available detailing the change and considering
various performance issues, it was presented at BSDCan2004:
http://www.unixdaemons.com/~bmilekic/netbuf_bmilekic.pdf
Please read the paper for Future Work and implementation
details, as well as credits.
Testing and Debugging:
rwatson,
brueffer,
Ketrien I. Saihr-Kesenchedra,
...
Reviewed by: Lots of people (for different parts)
2004-05-31 21:46:06 +00:00
|
|
|
/* "manually" create the initial zone */
|
2013-06-17 03:43:47 +00:00
|
|
|
memset(&args, 0, sizeof(args));
|
Bring in mbuma to replace mballoc.
mbuma is an Mbuf & Cluster allocator built on top of a number of
extensions to the UMA framework, all included herein.
Extensions to UMA worth noting:
- Better layering between slab <-> zone caches; introduce
Keg structure which splits off slab cache away from the
zone structure and allows multiple zones to be stacked
on top of a single Keg (single type of slab cache);
perhaps we should look into defining a subset API on
top of the Keg for special use by malloc(9),
for example.
- UMA_ZONE_REFCNT zones can now be added, and reference
counters automagically allocated for them within the end
of the associated slab structures. uma_find_refcnt()
does a kextract to fetch the slab struct reference from
the underlying page, and lookup the corresponding refcnt.
mbuma things worth noting:
- integrates mbuf & cluster allocations with extended UMA
and provides caches for commonly-allocated items; defines
several zones (two primary, one secondary) and two kegs.
- change up certain code paths that always used to do:
m_get() + m_clget() to instead just use m_getcl() and
try to take advantage of the newly defined secondary
Packet zone.
- netstat(1) and systat(1) quickly hacked up to do basic
stat reporting but additional stats work needs to be
done once some other details within UMA have been taken
care of and it becomes clearer to how stats will work
within the modified framework.
From the user perspective, one implication is that the
NMBCLUSTERS compile-time option is no longer used. The
maximum number of clusters is still capped off according
to maxusers, but it can be made unlimited by setting
the kern.ipc.nmbclusters boot-time tunable to zero.
Work should be done to write an appropriate sysctl
handler allowing dynamic tuning of kern.ipc.nmbclusters
at runtime.
Additional things worth noting/known issues (READ):
- One report of 'ips' (ServeRAID) driver acting really
slow in conjunction with mbuma. Need more data.
Latest report is that ips is equally sucking with
and without mbuma.
- Giant leak in NFS code sometimes occurs, can't
reproduce but currently analyzing; brueffer is
able to reproduce but THIS IS NOT an mbuma-specific
problem and currently occurs even WITHOUT mbuma.
- Issues in network locking: there is at least one
code path in the rip code where one or more locks
are acquired and we end up in m_prepend() with
M_WAITOK, which causes WITNESS to whine from within
UMA. Current temporary solution: force all UMA
allocations to be M_NOWAIT from within UMA for now
to avoid deadlocks unless WITNESS is defined and we
can determine with certainty that we're not holding
any locks when we're M_WAITOK.
- I've seen at least one weird socketbuffer empty-but-
mbuf-still-attached panic. I don't believe this
to be related to mbuma but please keep your eyes
open, turn on debugging, and capture crash dumps.
This change removes more code than it adds.
A paper is available detailing the change and considering
various performance issues, it was presented at BSDCan2004:
http://www.unixdaemons.com/~bmilekic/netbuf_bmilekic.pdf
Please read the paper for Future Work and implementation
details, as well as credits.
Testing and Debugging:
rwatson,
brueffer,
Ketrien I. Saihr-Kesenchedra,
...
Reviewed by: Lots of people (for different parts)
2004-05-31 21:46:06 +00:00
|
|
|
args.name = "UMA Kegs";
|
2018-01-12 23:25:05 +00:00
|
|
|
args.size = ksize;
|
Bring in mbuma to replace mballoc.
mbuma is an Mbuf & Cluster allocator built on top of a number of
extensions to the UMA framework, all included herein.
Extensions to UMA worth noting:
- Better layering between slab <-> zone caches; introduce
Keg structure which splits off slab cache away from the
zone structure and allows multiple zones to be stacked
on top of a single Keg (single type of slab cache);
perhaps we should look into defining a subset API on
top of the Keg for special use by malloc(9),
for example.
- UMA_ZONE_REFCNT zones can now be added, and reference
counters automagically allocated for them within the end
of the associated slab structures. uma_find_refcnt()
does a kextract to fetch the slab struct reference from
the underlying page, and lookup the corresponding refcnt.
mbuma things worth noting:
- integrates mbuf & cluster allocations with extended UMA
and provides caches for commonly-allocated items; defines
several zones (two primary, one secondary) and two kegs.
- change up certain code paths that always used to do:
m_get() + m_clget() to instead just use m_getcl() and
try to take advantage of the newly defined secondary
Packet zone.
- netstat(1) and systat(1) quickly hacked up to do basic
stat reporting but additional stats work needs to be
done once some other details within UMA have been taken
care of and it becomes clearer to how stats will work
within the modified framework.
From the user perspective, one implication is that the
NMBCLUSTERS compile-time option is no longer used. The
maximum number of clusters is still capped off according
to maxusers, but it can be made unlimited by setting
the kern.ipc.nmbclusters boot-time tunable to zero.
Work should be done to write an appropriate sysctl
handler allowing dynamic tuning of kern.ipc.nmbclusters
at runtime.
Additional things worth noting/known issues (READ):
- One report of 'ips' (ServeRAID) driver acting really
slow in conjunction with mbuma. Need more data.
Latest report is that ips is equally sucking with
and without mbuma.
- Giant leak in NFS code sometimes occurs, can't
reproduce but currently analyzing; brueffer is
able to reproduce but THIS IS NOT an mbuma-specific
problem and currently occurs even WITHOUT mbuma.
- Issues in network locking: there is at least one
code path in the rip code where one or more locks
are acquired and we end up in m_prepend() with
M_WAITOK, which causes WITNESS to whine from within
UMA. Current temporary solution: force all UMA
allocations to be M_NOWAIT from within UMA for now
to avoid deadlocks unless WITNESS is defined and we
can determine with certainty that we're not holding
any locks when we're M_WAITOK.
- I've seen at least one weird socketbuffer empty-but-
mbuf-still-attached panic. I don't believe this
to be related to mbuma but please keep your eyes
open, turn on debugging, and capture crash dumps.
This change removes more code than it adds.
A paper is available detailing the change and considering
various performance issues, it was presented at BSDCan2004:
http://www.unixdaemons.com/~bmilekic/netbuf_bmilekic.pdf
Please read the paper for Future Work and implementation
details, as well as credits.
Testing and Debugging:
rwatson,
brueffer,
Ketrien I. Saihr-Kesenchedra,
...
Reviewed by: Lots of people (for different parts)
2004-05-31 21:46:06 +00:00
|
|
|
args.ctor = keg_ctor;
|
|
|
|
args.dtor = keg_dtor;
|
2002-03-19 09:11:49 +00:00
|
|
|
args.uminit = zero_init;
|
|
|
|
args.fini = NULL;
|
2018-01-12 23:25:05 +00:00
|
|
|
args.keg = masterkeg;
|
2018-02-07 18:32:51 +00:00
|
|
|
args.align = UMA_BOOT_ALIGN - 1;
|
2003-09-19 08:37:44 +00:00
|
|
|
args.flags = UMA_ZFLAG_INTERNAL;
|
2018-01-12 23:25:05 +00:00
|
|
|
zone_ctor(kegs, zsize, &args, M_WAITOK);
|
2002-03-19 09:11:49 +00:00
|
|
|
|
2017-06-01 18:26:57 +00:00
|
|
|
bootmem = mem;
|
|
|
|
boot_pages = npages;
|
2002-03-19 09:11:49 +00:00
|
|
|
|
Bring in mbuma to replace mballoc.
mbuma is an Mbuf & Cluster allocator built on top of a number of
extensions to the UMA framework, all included herein.
Extensions to UMA worth noting:
- Better layering between slab <-> zone caches; introduce
Keg structure which splits off slab cache away from the
zone structure and allows multiple zones to be stacked
on top of a single Keg (single type of slab cache);
perhaps we should look into defining a subset API on
top of the Keg for special use by malloc(9),
for example.
- UMA_ZONE_REFCNT zones can now be added, and reference
counters automagically allocated for them within the end
of the associated slab structures. uma_find_refcnt()
does a kextract to fetch the slab struct reference from
the underlying page, and lookup the corresponding refcnt.
mbuma things worth noting:
- integrates mbuf & cluster allocations with extended UMA
and provides caches for commonly-allocated items; defines
several zones (two primary, one secondary) and two kegs.
- change up certain code paths that always used to do:
m_get() + m_clget() to instead just use m_getcl() and
try to take advantage of the newly defined secondary
Packet zone.
- netstat(1) and systat(1) quickly hacked up to do basic
stat reporting but additional stats work needs to be
done once some other details within UMA have been taken
care of and it becomes clearer to how stats will work
within the modified framework.
From the user perspective, one implication is that the
NMBCLUSTERS compile-time option is no longer used. The
maximum number of clusters is still capped off according
to maxusers, but it can be made unlimited by setting
the kern.ipc.nmbclusters boot-time tunable to zero.
Work should be done to write an appropriate sysctl
handler allowing dynamic tuning of kern.ipc.nmbclusters
at runtime.
Additional things worth noting/known issues (READ):
- One report of 'ips' (ServeRAID) driver acting really
slow in conjunction with mbuma. Need more data.
Latest report is that ips is equally sucking with
and without mbuma.
- Giant leak in NFS code sometimes occurs, can't
reproduce but currently analyzing; brueffer is
able to reproduce but THIS IS NOT an mbuma-specific
problem and currently occurs even WITHOUT mbuma.
- Issues in network locking: there is at least one
code path in the rip code where one or more locks
are acquired and we end up in m_prepend() with
M_WAITOK, which causes WITNESS to whine from within
UMA. Current temporary solution: force all UMA
allocations to be M_NOWAIT from within UMA for now
to avoid deadlocks unless WITNESS is defined and we
can determine with certainty that we're not holding
any locks when we're M_WAITOK.
- I've seen at least one weird socketbuffer empty-but-
mbuf-still-attached panic. I don't believe this
to be related to mbuma but please keep your eyes
open, turn on debugging, and capture crash dumps.
This change removes more code than it adds.
A paper is available detailing the change and considering
various performance issues, it was presented at BSDCan2004:
http://www.unixdaemons.com/~bmilekic/netbuf_bmilekic.pdf
Please read the paper for Future Work and implementation
details, as well as credits.
Testing and Debugging:
rwatson,
brueffer,
Ketrien I. Saihr-Kesenchedra,
...
Reviewed by: Lots of people (for different parts)
2004-05-31 21:46:06 +00:00
|
|
|
args.name = "UMA Zones";
|
Followup on r302393 by cperciva, improving calculation of boot pages required
for UMA startup.
o Introduce another stage of UMA startup, which is entered after
vm_page_startup() finishes. After this stage we don't yet enable buckets,
but we can ask VM for pages. Rename stages to meaningful names while here.
New list of stages: BOOT_COLD, BOOT_STRAPPED, BOOT_PAGEALLOC, BOOT_BUCKETS,
BOOT_RUNNING.
Enabling page alloc earlier allows us to dramatically reduce number of
boot pages required. What is more important number of zones becomes
consistent across different machines, as no MD allocations are done before
the BOOT_PAGEALLOC stage. Now only UMA internal zones actually need to use
startup_alloc(), however that may change, so vm_page_startup() provides
its need for early zones as argument.
o Introduce uma_startup_count() function, to avoid code duplication. The
functions calculates sizes of zones zone and kegs zone, and calculates how
many pages UMA will need to bootstrap.
It counts not only of zone structures, but also of kegs, slabs and hashes.
o Hide uma_startup_foo() declarations from public file.
o Provide several DIAGNOSTIC printfs on boot_pages usage.
o Bugfix: when calculating zone of zones size use (mp_maxid + 1) instead of
mp_ncpus. Use resulting number not only in the size argument to zone_ctor()
but also as args.size.
Reviewed by: imp, gallatin (earlier version)
Differential Revision: https://reviews.freebsd.org/D14054
2018-02-06 04:16:00 +00:00
|
|
|
args.size = zsize;
|
Bring in mbuma to replace mballoc.
mbuma is an Mbuf & Cluster allocator built on top of a number of
extensions to the UMA framework, all included herein.
Extensions to UMA worth noting:
- Better layering between slab <-> zone caches; introduce
Keg structure which splits off slab cache away from the
zone structure and allows multiple zones to be stacked
on top of a single Keg (single type of slab cache);
perhaps we should look into defining a subset API on
top of the Keg for special use by malloc(9),
for example.
- UMA_ZONE_REFCNT zones can now be added, and reference
counters automagically allocated for them within the end
of the associated slab structures. uma_find_refcnt()
does a kextract to fetch the slab struct reference from
the underlying page, and lookup the corresponding refcnt.
mbuma things worth noting:
- integrates mbuf & cluster allocations with extended UMA
and provides caches for commonly-allocated items; defines
several zones (two primary, one secondary) and two kegs.
- change up certain code paths that always used to do:
m_get() + m_clget() to instead just use m_getcl() and
try to take advantage of the newly defined secondary
Packet zone.
- netstat(1) and systat(1) quickly hacked up to do basic
stat reporting but additional stats work needs to be
done once some other details within UMA have been taken
care of and it becomes clearer to how stats will work
within the modified framework.
From the user perspective, one implication is that the
NMBCLUSTERS compile-time option is no longer used. The
maximum number of clusters is still capped off according
to maxusers, but it can be made unlimited by setting
the kern.ipc.nmbclusters boot-time tunable to zero.
Work should be done to write an appropriate sysctl
handler allowing dynamic tuning of kern.ipc.nmbclusters
at runtime.
Additional things worth noting/known issues (READ):
- One report of 'ips' (ServeRAID) driver acting really
slow in conjunction with mbuma. Need more data.
Latest report is that ips is equally sucking with
and without mbuma.
- Giant leak in NFS code sometimes occurs, can't
reproduce but currently analyzing; brueffer is
able to reproduce but THIS IS NOT an mbuma-specific
problem and currently occurs even WITHOUT mbuma.
- Issues in network locking: there is at least one
code path in the rip code where one or more locks
are acquired and we end up in m_prepend() with
M_WAITOK, which causes WITNESS to whine from within
UMA. Current temporary solution: force all UMA
allocations to be M_NOWAIT from within UMA for now
to avoid deadlocks unless WITNESS is defined and we
can determine with certainty that we're not holding
any locks when we're M_WAITOK.
- I've seen at least one weird socketbuffer empty-but-
mbuf-still-attached panic. I don't believe this
to be related to mbuma but please keep your eyes
open, turn on debugging, and capture crash dumps.
This change removes more code than it adds.
A paper is available detailing the change and considering
various performance issues, it was presented at BSDCan2004:
http://www.unixdaemons.com/~bmilekic/netbuf_bmilekic.pdf
Please read the paper for Future Work and implementation
details, as well as credits.
Testing and Debugging:
rwatson,
brueffer,
Ketrien I. Saihr-Kesenchedra,
...
Reviewed by: Lots of people (for different parts)
2004-05-31 21:46:06 +00:00
|
|
|
args.ctor = zone_ctor;
|
|
|
|
args.dtor = zone_dtor;
|
|
|
|
args.uminit = zero_init;
|
|
|
|
args.fini = NULL;
|
|
|
|
args.keg = NULL;
|
2018-02-07 18:32:51 +00:00
|
|
|
args.align = UMA_BOOT_ALIGN - 1;
|
Bring in mbuma to replace mballoc.
mbuma is an Mbuf & Cluster allocator built on top of a number of
extensions to the UMA framework, all included herein.
Extensions to UMA worth noting:
- Better layering between slab <-> zone caches; introduce
Keg structure which splits off slab cache away from the
zone structure and allows multiple zones to be stacked
on top of a single Keg (single type of slab cache);
perhaps we should look into defining a subset API on
top of the Keg for special use by malloc(9),
for example.
- UMA_ZONE_REFCNT zones can now be added, and reference
counters automagically allocated for them within the end
of the associated slab structures. uma_find_refcnt()
does a kextract to fetch the slab struct reference from
the underlying page, and lookup the corresponding refcnt.
mbuma things worth noting:
- integrates mbuf & cluster allocations with extended UMA
and provides caches for commonly-allocated items; defines
several zones (two primary, one secondary) and two kegs.
- change up certain code paths that always used to do:
m_get() + m_clget() to instead just use m_getcl() and
try to take advantage of the newly defined secondary
Packet zone.
- netstat(1) and systat(1) quickly hacked up to do basic
stat reporting but additional stats work needs to be
done once some other details within UMA have been taken
care of and it becomes clearer to how stats will work
within the modified framework.
From the user perspective, one implication is that the
NMBCLUSTERS compile-time option is no longer used. The
maximum number of clusters is still capped off according
to maxusers, but it can be made unlimited by setting
the kern.ipc.nmbclusters boot-time tunable to zero.
Work should be done to write an appropriate sysctl
handler allowing dynamic tuning of kern.ipc.nmbclusters
at runtime.
Additional things worth noting/known issues (READ):
- One report of 'ips' (ServeRAID) driver acting really
slow in conjunction with mbuma. Need more data.
Latest report is that ips is equally sucking with
and without mbuma.
- Giant leak in NFS code sometimes occurs, can't
reproduce but currently analyzing; brueffer is
able to reproduce but THIS IS NOT an mbuma-specific
problem and currently occurs even WITHOUT mbuma.
- Issues in network locking: there is at least one
code path in the rip code where one or more locks
are acquired and we end up in m_prepend() with
M_WAITOK, which causes WITNESS to whine from within
UMA. Current temporary solution: force all UMA
allocations to be M_NOWAIT from within UMA for now
to avoid deadlocks unless WITNESS is defined and we
can determine with certainty that we're not holding
any locks when we're M_WAITOK.
- I've seen at least one weird socketbuffer empty-but-
mbuf-still-attached panic. I don't believe this
to be related to mbuma but please keep your eyes
open, turn on debugging, and capture crash dumps.
This change removes more code than it adds.
A paper is available detailing the change and considering
various performance issues, it was presented at BSDCan2004:
http://www.unixdaemons.com/~bmilekic/netbuf_bmilekic.pdf
Please read the paper for Future Work and implementation
details, as well as credits.
Testing and Debugging:
rwatson,
brueffer,
Ketrien I. Saihr-Kesenchedra,
...
Reviewed by: Lots of people (for different parts)
2004-05-31 21:46:06 +00:00
|
|
|
args.flags = UMA_ZFLAG_INTERNAL;
|
2018-01-12 23:25:05 +00:00
|
|
|
zone_ctor(zones, zsize, &args, M_WAITOK);
|
Bring in mbuma to replace mballoc.
mbuma is an Mbuf & Cluster allocator built on top of a number of
extensions to the UMA framework, all included herein.
Extensions to UMA worth noting:
- Better layering between slab <-> zone caches; introduce
Keg structure which splits off slab cache away from the
zone structure and allows multiple zones to be stacked
on top of a single Keg (single type of slab cache);
perhaps we should look into defining a subset API on
top of the Keg for special use by malloc(9),
for example.
- UMA_ZONE_REFCNT zones can now be added, and reference
counters automagically allocated for them within the end
of the associated slab structures. uma_find_refcnt()
does a kextract to fetch the slab struct reference from
the underlying page, and lookup the corresponding refcnt.
mbuma things worth noting:
- integrates mbuf & cluster allocations with extended UMA
and provides caches for commonly-allocated items; defines
several zones (two primary, one secondary) and two kegs.
- change up certain code paths that always used to do:
m_get() + m_clget() to instead just use m_getcl() and
try to take advantage of the newly defined secondary
Packet zone.
- netstat(1) and systat(1) quickly hacked up to do basic
stat reporting but additional stats work needs to be
done once some other details within UMA have been taken
care of and it becomes clearer to how stats will work
within the modified framework.
From the user perspective, one implication is that the
NMBCLUSTERS compile-time option is no longer used. The
maximum number of clusters is still capped off according
to maxusers, but it can be made unlimited by setting
the kern.ipc.nmbclusters boot-time tunable to zero.
Work should be done to write an appropriate sysctl
handler allowing dynamic tuning of kern.ipc.nmbclusters
at runtime.
Additional things worth noting/known issues (READ):
- One report of 'ips' (ServeRAID) driver acting really
slow in conjunction with mbuma. Need more data.
Latest report is that ips is equally sucking with
and without mbuma.
- Giant leak in NFS code sometimes occurs, can't
reproduce but currently analyzing; brueffer is
able to reproduce but THIS IS NOT an mbuma-specific
problem and currently occurs even WITHOUT mbuma.
- Issues in network locking: there is at least one
code path in the rip code where one or more locks
are acquired and we end up in m_prepend() with
M_WAITOK, which causes WITNESS to whine from within
UMA. Current temporary solution: force all UMA
allocations to be M_NOWAIT from within UMA for now
to avoid deadlocks unless WITNESS is defined and we
can determine with certainty that we're not holding
any locks when we're M_WAITOK.
- I've seen at least one weird socketbuffer empty-but-
mbuf-still-attached panic. I don't believe this
to be related to mbuma but please keep your eyes
open, turn on debugging, and capture crash dumps.
This change removes more code than it adds.
A paper is available detailing the change and considering
various performance issues, it was presented at BSDCan2004:
http://www.unixdaemons.com/~bmilekic/netbuf_bmilekic.pdf
Please read the paper for Future Work and implementation
details, as well as credits.
Testing and Debugging:
rwatson,
brueffer,
Ketrien I. Saihr-Kesenchedra,
...
Reviewed by: Lots of people (for different parts)
2004-05-31 21:46:06 +00:00
|
|
|
|
2002-03-19 09:11:49 +00:00
|
|
|
/* Now make a zone for slab headers */
|
2019-12-08 01:15:06 +00:00
|
|
|
slabzone = uma_zcreate("UMA Slabs", sizeof(struct uma_hash_slab),
|
|
|
|
NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL);
|
2002-03-19 09:11:49 +00:00
|
|
|
|
|
|
|
hashzone = uma_zcreate("UMA Hash",
|
|
|
|
sizeof(struct slabhead *) * UMA_HASH_SIZE_INIT,
|
2019-12-08 01:15:06 +00:00
|
|
|
NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZFLAG_INTERNAL);
|
2002-03-19 09:11:49 +00:00
|
|
|
|
2003-09-19 06:26:45 +00:00
|
|
|
bucket_init();
|
2002-03-19 09:11:49 +00:00
|
|
|
|
Followup on r302393 by cperciva, improving calculation of boot pages required
for UMA startup.
o Introduce another stage of UMA startup, which is entered after
vm_page_startup() finishes. After this stage we don't yet enable buckets,
but we can ask VM for pages. Rename stages to meaningful names while here.
New list of stages: BOOT_COLD, BOOT_STRAPPED, BOOT_PAGEALLOC, BOOT_BUCKETS,
BOOT_RUNNING.
Enabling page alloc earlier allows us to dramatically reduce number of
boot pages required. What is more important number of zones becomes
consistent across different machines, as no MD allocations are done before
the BOOT_PAGEALLOC stage. Now only UMA internal zones actually need to use
startup_alloc(), however that may change, so vm_page_startup() provides
its need for early zones as argument.
o Introduce uma_startup_count() function, to avoid code duplication. The
functions calculates sizes of zones zone and kegs zone, and calculates how
many pages UMA will need to bootstrap.
It counts not only of zone structures, but also of kegs, slabs and hashes.
o Hide uma_startup_foo() declarations from public file.
o Provide several DIAGNOSTIC printfs on boot_pages usage.
o Bugfix: when calculating zone of zones size use (mp_maxid + 1) instead of
mp_ncpus. Use resulting number not only in the size argument to zone_ctor()
but also as args.size.
Reviewed by: imp, gallatin (earlier version)
Differential Revision: https://reviews.freebsd.org/D14054
2018-02-06 04:16:00 +00:00
|
|
|
booted = BOOT_STRAPPED;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
uma_startup1(void)
|
|
|
|
{
|
|
|
|
|
|
|
|
#ifdef DIAGNOSTIC
|
|
|
|
printf("Entering %s with %d boot pages left\n", __func__, boot_pages);
|
|
|
|
#endif
|
|
|
|
booted = BOOT_PAGEALLOC;
|
2002-03-19 09:11:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2002-09-18 08:26:30 +00:00
|
|
|
uma_startup2(void)
|
2002-03-19 09:11:49 +00:00
|
|
|
{
|
Followup on r302393 by cperciva, improving calculation of boot pages required
for UMA startup.
o Introduce another stage of UMA startup, which is entered after
vm_page_startup() finishes. After this stage we don't yet enable buckets,
but we can ask VM for pages. Rename stages to meaningful names while here.
New list of stages: BOOT_COLD, BOOT_STRAPPED, BOOT_PAGEALLOC, BOOT_BUCKETS,
BOOT_RUNNING.
Enabling page alloc earlier allows us to dramatically reduce number of
boot pages required. What is more important number of zones becomes
consistent across different machines, as no MD allocations are done before
the BOOT_PAGEALLOC stage. Now only UMA internal zones actually need to use
startup_alloc(), however that may change, so vm_page_startup() provides
its need for early zones as argument.
o Introduce uma_startup_count() function, to avoid code duplication. The
functions calculates sizes of zones zone and kegs zone, and calculates how
many pages UMA will need to bootstrap.
It counts not only of zone structures, but also of kegs, slabs and hashes.
o Hide uma_startup_foo() declarations from public file.
o Provide several DIAGNOSTIC printfs on boot_pages usage.
o Bugfix: when calculating zone of zones size use (mp_maxid + 1) instead of
mp_ncpus. Use resulting number not only in the size argument to zone_ctor()
but also as args.size.
Reviewed by: imp, gallatin (earlier version)
Differential Revision: https://reviews.freebsd.org/D14054
2018-02-06 04:16:00 +00:00
|
|
|
|
2018-02-09 04:45:39 +00:00
|
|
|
#ifdef DIAGNOSTIC
|
|
|
|
printf("Entering %s with %d boot pages left\n", __func__, boot_pages);
|
|
|
|
#endif
|
Followup on r302393 by cperciva, improving calculation of boot pages required
for UMA startup.
o Introduce another stage of UMA startup, which is entered after
vm_page_startup() finishes. After this stage we don't yet enable buckets,
but we can ask VM for pages. Rename stages to meaningful names while here.
New list of stages: BOOT_COLD, BOOT_STRAPPED, BOOT_PAGEALLOC, BOOT_BUCKETS,
BOOT_RUNNING.
Enabling page alloc earlier allows us to dramatically reduce number of
boot pages required. What is more important number of zones becomes
consistent across different machines, as no MD allocations are done before
the BOOT_PAGEALLOC stage. Now only UMA internal zones actually need to use
startup_alloc(), however that may change, so vm_page_startup() provides
its need for early zones as argument.
o Introduce uma_startup_count() function, to avoid code duplication. The
functions calculates sizes of zones zone and kegs zone, and calculates how
many pages UMA will need to bootstrap.
It counts not only of zone structures, but also of kegs, slabs and hashes.
o Hide uma_startup_foo() declarations from public file.
o Provide several DIAGNOSTIC printfs on boot_pages usage.
o Bugfix: when calculating zone of zones size use (mp_maxid + 1) instead of
mp_ncpus. Use resulting number not only in the size argument to zone_ctor()
but also as args.size.
Reviewed by: imp, gallatin (earlier version)
Differential Revision: https://reviews.freebsd.org/D14054
2018-02-06 04:16:00 +00:00
|
|
|
booted = BOOT_BUCKETS;
|
2019-09-01 22:22:43 +00:00
|
|
|
sx_init(&uma_reclaim_lock, "umareclaim");
|
Followup on r302393 by cperciva, improving calculation of boot pages required
for UMA startup.
o Introduce another stage of UMA startup, which is entered after
vm_page_startup() finishes. After this stage we don't yet enable buckets,
but we can ask VM for pages. Rename stages to meaningful names while here.
New list of stages: BOOT_COLD, BOOT_STRAPPED, BOOT_PAGEALLOC, BOOT_BUCKETS,
BOOT_RUNNING.
Enabling page alloc earlier allows us to dramatically reduce number of
boot pages required. What is more important number of zones becomes
consistent across different machines, as no MD allocations are done before
the BOOT_PAGEALLOC stage. Now only UMA internal zones actually need to use
startup_alloc(), however that may change, so vm_page_startup() provides
its need for early zones as argument.
o Introduce uma_startup_count() function, to avoid code duplication. The
functions calculates sizes of zones zone and kegs zone, and calculates how
many pages UMA will need to bootstrap.
It counts not only of zone structures, but also of kegs, slabs and hashes.
o Hide uma_startup_foo() declarations from public file.
o Provide several DIAGNOSTIC printfs on boot_pages usage.
o Bugfix: when calculating zone of zones size use (mp_maxid + 1) instead of
mp_ncpus. Use resulting number not only in the size argument to zone_ctor()
but also as args.size.
Reviewed by: imp, gallatin (earlier version)
Differential Revision: https://reviews.freebsd.org/D14054
2018-02-06 04:16:00 +00:00
|
|
|
bucket_enable();
|
2002-03-19 09:11:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Initialize our callout handle
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
uma_startup3(void)
|
|
|
|
{
|
2017-06-01 18:36:52 +00:00
|
|
|
|
2018-06-08 00:15:08 +00:00
|
|
|
#ifdef INVARIANTS
|
|
|
|
TUNABLE_INT_FETCH("vm.debug.divisor", &dbg_divisor);
|
|
|
|
uma_dbg_cnt = counter_u64_alloc(M_WAITOK);
|
|
|
|
uma_skip_cnt = counter_u64_alloc(M_WAITOK);
|
|
|
|
#endif
|
2019-11-28 00:19:09 +00:00
|
|
|
zone_foreach(zone_alloc_counters, NULL);
|
|
|
|
zone_foreach(zone_alloc_sysctl, NULL);
|
2015-05-22 17:05:21 +00:00
|
|
|
callout_init(&uma_callout, 1);
|
2003-09-19 23:27:46 +00:00
|
|
|
callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL);
|
2018-06-08 00:15:08 +00:00
|
|
|
booted = BOOT_RUNNING;
|
2002-03-19 09:11:49 +00:00
|
|
|
}
|
|
|
|
|
2009-01-25 09:11:24 +00:00
|
|
|
static uma_keg_t
|
Bring in mbuma to replace mballoc.
mbuma is an Mbuf & Cluster allocator built on top of a number of
extensions to the UMA framework, all included herein.
Extensions to UMA worth noting:
- Better layering between slab <-> zone caches; introduce
Keg structure which splits off slab cache away from the
zone structure and allows multiple zones to be stacked
on top of a single Keg (single type of slab cache);
perhaps we should look into defining a subset API on
top of the Keg for special use by malloc(9),
for example.
- UMA_ZONE_REFCNT zones can now be added, and reference
counters automagically allocated for them within the end
of the associated slab structures. uma_find_refcnt()
does a kextract to fetch the slab struct reference from
the underlying page, and lookup the corresponding refcnt.
mbuma things worth noting:
- integrates mbuf & cluster allocations with extended UMA
and provides caches for commonly-allocated items; defines
several zones (two primary, one secondary) and two kegs.
- change up certain code paths that always used to do:
m_get() + m_clget() to instead just use m_getcl() and
try to take advantage of the newly defined secondary
Packet zone.
- netstat(1) and systat(1) quickly hacked up to do basic
stat reporting but additional stats work needs to be
done once some other details within UMA have been taken
care of and it becomes clearer to how stats will work
within the modified framework.
From the user perspective, one implication is that the
NMBCLUSTERS compile-time option is no longer used. The
maximum number of clusters is still capped off according
to maxusers, but it can be made unlimited by setting
the kern.ipc.nmbclusters boot-time tunable to zero.
Work should be done to write an appropriate sysctl
handler allowing dynamic tuning of kern.ipc.nmbclusters
at runtime.
Additional things worth noting/known issues (READ):
- One report of 'ips' (ServeRAID) driver acting really
slow in conjunction with mbuma. Need more data.
Latest report is that ips is equally sucking with
and without mbuma.
- Giant leak in NFS code sometimes occurs, can't
reproduce but currently analyzing; brueffer is
able to reproduce but THIS IS NOT an mbuma-specific
problem and currently occurs even WITHOUT mbuma.
- Issues in network locking: there is at least one
code path in the rip code where one or more locks
are acquired and we end up in m_prepend() with
M_WAITOK, which causes WITNESS to whine from within
UMA. Current temporary solution: force all UMA
allocations to be M_NOWAIT from within UMA for now
to avoid deadlocks unless WITNESS is defined and we
can determine with certainty that we're not holding
any locks when we're M_WAITOK.
- I've seen at least one weird socketbuffer empty-but-
mbuf-still-attached panic. I don't believe this
to be related to mbuma but please keep your eyes
open, turn on debugging, and capture crash dumps.
This change removes more code than it adds.
A paper is available detailing the change and considering
various performance issues, it was presented at BSDCan2004:
http://www.unixdaemons.com/~bmilekic/netbuf_bmilekic.pdf
Please read the paper for Future Work and implementation
details, as well as credits.
Testing and Debugging:
rwatson,
brueffer,
Ketrien I. Saihr-Kesenchedra,
...
Reviewed by: Lots of people (for different parts)
2004-05-31 21:46:06 +00:00
|
|
|
uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit, uma_fini fini,
|
2013-04-09 17:43:48 +00:00
|
|
|
int align, uint32_t flags)
|
Bring in mbuma to replace mballoc.
mbuma is an Mbuf & Cluster allocator built on top of a number of
extensions to the UMA framework, all included herein.
Extensions to UMA worth noting:
- Better layering between slab <-> zone caches; introduce
Keg structure which splits off slab cache away from the
zone structure and allows multiple zones to be stacked
on top of a single Keg (single type of slab cache);
perhaps we should look into defining a subset API on
top of the Keg for special use by malloc(9),
for example.
- UMA_ZONE_REFCNT zones can now be added, and reference
counters automagically allocated for them within the end
of the associated slab structures. uma_find_refcnt()
does a kextract to fetch the slab struct reference from
the underlying page, and lookup the corresponding refcnt.
mbuma things worth noting:
- integrates mbuf & cluster allocations with extended UMA
and provides caches for commonly-allocated items; defines
several zones (two primary, one secondary) and two kegs.
- change up certain code paths that always used to do:
m_get() + m_clget() to instead just use m_getcl() and
try to take advantage of the newly defined secondary
Packet zone.
- netstat(1) and systat(1) quickly hacked up to do basic
stat reporting but additional stats work needs to be
done once some other details within UMA have been taken
care of and it becomes clearer to how stats will work
within the modified framework.
From the user perspective, one implication is that the
NMBCLUSTERS compile-time option is no longer used. The
maximum number of clusters is still capped off according
to maxusers, but it can be made unlimited by setting
the kern.ipc.nmbclusters boot-time tunable to zero.
Work should be done to write an appropriate sysctl
handler allowing dynamic tuning of kern.ipc.nmbclusters
at runtime.
Additional things worth noting/known issues (READ):
- One report of 'ips' (ServeRAID) driver acting really
slow in conjunction with mbuma. Need more data.
Latest report is that ips is equally sucking with
and without mbuma.
- Giant leak in NFS code sometimes occurs, can't
reproduce but currently analyzing; brueffer is
able to reproduce but THIS IS NOT an mbuma-specific
problem and currently occurs even WITHOUT mbuma.
- Issues in network locking: there is at least one
code path in the rip code where one or more locks
are acquired and we end up in m_prepend() with
M_WAITOK, which causes WITNESS to whine from within
UMA. Current temporary solution: force all UMA
allocations to be M_NOWAIT from within UMA for now
to avoid deadlocks unless WITNESS is defined and we
can determine with certainty that we're not holding
any locks when we're M_WAITOK.
- I've seen at least one weird socketbuffer empty-but-
mbuf-still-attached panic. I don't believe this
to be related to mbuma but please keep your eyes
open, turn on debugging, and capture crash dumps.
This change removes more code than it adds.
A paper is available detailing the change and considering
various performance issues, it was presented at BSDCan2004:
http://www.unixdaemons.com/~bmilekic/netbuf_bmilekic.pdf
Please read the paper for Future Work and implementation
details, as well as credits.
Testing and Debugging:
rwatson,
brueffer,
Ketrien I. Saihr-Kesenchedra,
...
Reviewed by: Lots of people (for different parts)
2004-05-31 21:46:06 +00:00
|
|
|
{
|
|
|
|
struct uma_kctor_args args;
|
|
|
|
|
|
|
|
args.size = size;
|
|
|
|
args.uminit = uminit;
|
|
|
|
args.fini = fini;
|
2007-02-11 20:13:52 +00:00
|
|
|
args.align = (align == UMA_ALIGN_CACHE) ? uma_align_cache : align;
|
Bring in mbuma to replace mballoc.
mbuma is an Mbuf & Cluster allocator built on top of a number of
extensions to the UMA framework, all included herein.
Extensions to UMA worth noting:
- Better layering between slab <-> zone caches; introduce
Keg structure which splits off slab cache away from the
zone structure and allows multiple zones to be stacked
on top of a single Keg (single type of slab cache);
perhaps we should look into defining a subset API on
top of the Keg for special use by malloc(9),
for example.
- UMA_ZONE_REFCNT zones can now be added, and reference
counters automagically allocated for them within the end
of the associated slab structures. uma_find_refcnt()
does a kextract to fetch the slab struct reference from
the underlying page, and lookup the corresponding refcnt.
mbuma things worth noting:
- integrates mbuf & cluster allocations with extended UMA
and provides caches for commonly-allocated items; defines
several zones (two primary, one secondary) and two kegs.
- change up certain code paths that always used to do:
m_get() + m_clget() to instead just use m_getcl() and
try to take advantage of the newly defined secondary
Packet zone.
- netstat(1) and systat(1) quickly hacked up to do basic
stat reporting but additional stats work needs to be
done once some other details within UMA have been taken
care of and it becomes clearer to how stats will work
within the modified framework.
From the user perspective, one implication is that the
NMBCLUSTERS compile-time option is no longer used. The
maximum number of clusters is still capped off according
to maxusers, but it can be made unlimited by setting
the kern.ipc.nmbclusters boot-time tunable to zero.
Work should be done to write an appropriate sysctl
handler allowing dynamic tuning of kern.ipc.nmbclusters
at runtime.
Additional things worth noting/known issues (READ):
- One report of 'ips' (ServeRAID) driver acting really
slow in conjunction with mbuma. Need more data.
Latest report is that ips is equally sucking with
and without mbuma.
- Giant leak in NFS code sometimes occurs, can't
reproduce but currently analyzing; brueffer is
able to reproduce but THIS IS NOT an mbuma-specific
problem and currently occurs even WITHOUT mbuma.
- Issues in network locking: there is at least one
code path in the rip code where one or more locks
are acquired and we end up in m_prepend() with
M_WAITOK, which causes WITNESS to whine from within
UMA. Current temporary solution: force all UMA
allocations to be M_NOWAIT from within UMA for now
to avoid deadlocks unless WITNESS is defined and we
can determine with certainty that we're not holding
any locks when we're M_WAITOK.
- I've seen at least one weird socketbuffer empty-but-
mbuf-still-attached panic. I don't believe this
to be related to mbuma but please keep your eyes
open, turn on debugging, and capture crash dumps.
This change removes more code than it adds.
A paper is available detailing the change and considering
various performance issues, it was presented at BSDCan2004:
http://www.unixdaemons.com/~bmilekic/netbuf_bmilekic.pdf
Please read the paper for Future Work and implementation
details, as well as credits.
Testing and Debugging:
rwatson,
brueffer,
Ketrien I. Saihr-Kesenchedra,
...
Reviewed by: Lots of people (for different parts)
2004-05-31 21:46:06 +00:00
|
|
|
args.flags = flags;
|
|
|
|
args.zone = zone;
|
2018-01-12 23:25:05 +00:00
|
|
|
return (zone_alloc_item(kegs, &args, UMA_ANYDOMAIN, M_WAITOK));
|
Bring in mbuma to replace mballoc.
mbuma is an Mbuf & Cluster allocator built on top of a number of
extensions to the UMA framework, all included herein.
Extensions to UMA worth noting:
- Better layering between slab <-> zone caches; introduce
Keg structure which splits off slab cache away from the
zone structure and allows multiple zones to be stacked
on top of a single Keg (single type of slab cache);
perhaps we should look into defining a subset API on
top of the Keg for special use by malloc(9),
for example.
- UMA_ZONE_REFCNT zones can now be added, and reference
counters automagically allocated for them within the end
of the associated slab structures. uma_find_refcnt()
does a kextract to fetch the slab struct reference from
the underlying page, and lookup the corresponding refcnt.
mbuma things worth noting:
- integrates mbuf & cluster allocations with extended UMA
and provides caches for commonly-allocated items; defines
several zones (two primary, one secondary) and two kegs.
- change up certain code paths that always used to do:
m_get() + m_clget() to instead just use m_getcl() and
try to take advantage of the newly defined secondary
Packet zone.
- netstat(1) and systat(1) quickly hacked up to do basic
stat reporting but additional stats work needs to be
done once some other details within UMA have been taken
care of and it becomes clearer to how stats will work
within the modified framework.
From the user perspective, one implication is that the
NMBCLUSTERS compile-time option is no longer used. The
maximum number of clusters is still capped off according
to maxusers, but it can be made unlimited by setting
the kern.ipc.nmbclusters boot-time tunable to zero.
Work should be done to write an appropriate sysctl
handler allowing dynamic tuning of kern.ipc.nmbclusters
at runtime.
Additional things worth noting/known issues (READ):
- One report of 'ips' (ServeRAID) driver acting really
slow in conjunction with mbuma. Need more data.
Latest report is that ips is equally sucking with
and without mbuma.
- Giant leak in NFS code sometimes occurs, can't
reproduce but currently analyzing; brueffer is
able to reproduce but THIS IS NOT an mbuma-specific
problem and currently occurs even WITHOUT mbuma.
- Issues in network locking: there is at least one
code path in the rip code where one or more locks
are acquired and we end up in m_prepend() with
M_WAITOK, which causes WITNESS to whine from within
UMA. Current temporary solution: force all UMA
allocations to be M_NOWAIT from within UMA for now
to avoid deadlocks unless WITNESS is defined and we
can determine with certainty that we're not holding
any locks when we're M_WAITOK.
- I've seen at least one weird socketbuffer empty-but-
mbuf-still-attached panic. I don't believe this
to be related to mbuma but please keep your eyes
open, turn on debugging, and capture crash dumps.
This change removes more code than it adds.
A paper is available detailing the change and considering
various performance issues, it was presented at BSDCan2004:
http://www.unixdaemons.com/~bmilekic/netbuf_bmilekic.pdf
Please read the paper for Future Work and implementation
details, as well as credits.
Testing and Debugging:
rwatson,
brueffer,
Ketrien I. Saihr-Kesenchedra,
...
Reviewed by: Lots of people (for different parts)
2004-05-31 21:46:06 +00:00
|
|
|
}
|
|
|
|
|
Followup on r302393 by cperciva, improving calculation of boot pages required
for UMA startup.
o Introduce another stage of UMA startup, which is entered after
vm_page_startup() finishes. After this stage we don't yet enable buckets,
but we can ask VM for pages. Rename stages to meaningful names while here.
New list of stages: BOOT_COLD, BOOT_STRAPPED, BOOT_PAGEALLOC, BOOT_BUCKETS,
BOOT_RUNNING.
Enabling page alloc earlier allows us to dramatically reduce number of
boot pages required. What is more important number of zones becomes
consistent across different machines, as no MD allocations are done before
the BOOT_PAGEALLOC stage. Now only UMA internal zones actually need to use
startup_alloc(), however that may change, so vm_page_startup() provides
its need for early zones as argument.
o Introduce uma_startup_count() function, to avoid code duplication. The
functions calculates sizes of zones zone and kegs zone, and calculates how
many pages UMA will need to bootstrap.
It counts not only of zone structures, but also of kegs, slabs and hashes.
o Hide uma_startup_foo() declarations from public file.
o Provide several DIAGNOSTIC printfs on boot_pages usage.
o Bugfix: when calculating zone of zones size use (mp_maxid + 1) instead of
mp_ncpus. Use resulting number not only in the size argument to zone_ctor()
but also as args.size.
Reviewed by: imp, gallatin (earlier version)
Differential Revision: https://reviews.freebsd.org/D14054
2018-02-06 04:16:00 +00:00
|
|
|
/* Public functions */
|
2007-02-11 20:13:52 +00:00
|
|
|
/* See uma.h */
|
|
|
|
void
|
|
|
|
uma_set_align(int align)
|
|
|
|
{
|
|
|
|
|
|
|
|
if (align != UMA_ALIGN_CACHE)
|
|
|
|
uma_align_cache = align;
|
|
|
|
}
|
|
|
|
|
2002-03-19 09:11:49 +00:00
|
|
|
/* See uma.h */
|
2004-01-30 16:26:29 +00:00
|
|
|
uma_zone_t
|
2012-10-26 17:51:05 +00:00
|
|
|
uma_zcreate(const char *name, size_t size, uma_ctor ctor, uma_dtor dtor,
|
2013-04-09 17:43:48 +00:00
|
|
|
uma_init uminit, uma_fini fini, int align, uint32_t flags)
|
2004-01-30 16:26:29 +00:00
|
|
|
|
2002-03-19 09:11:49 +00:00
|
|
|
{
|
|
|
|
struct uma_zctor_args args;
|
2014-11-30 20:20:55 +00:00
|
|
|
uma_zone_t res;
|
|
|
|
bool locked;
|
2002-03-19 09:11:49 +00:00
|
|
|
|
2017-04-04 16:26:46 +00:00
|
|
|
KASSERT(powerof2(align + 1), ("invalid zone alignment %d for \"%s\"",
|
|
|
|
align, name));
|
|
|
|
|
2019-08-06 21:50:34 +00:00
|
|
|
/* Sets all zones to a first-touch domain policy. */
|
|
|
|
#ifdef UMA_FIRSTTOUCH
|
|
|
|
flags |= UMA_ZONE_NUMA;
|
|
|
|
#endif
|
|
|
|
|
2002-03-19 09:11:49 +00:00
|
|
|
/* This stuff is essential for the zone ctor */
|
2013-06-17 03:43:47 +00:00
|
|
|
memset(&args, 0, sizeof(args));
|
2002-03-19 09:11:49 +00:00
|
|
|
args.name = name;
|
|
|
|
args.size = size;
|
|
|
|
args.ctor = ctor;
|
|
|
|
args.dtor = dtor;
|
|
|
|
args.uminit = uminit;
|
|
|
|
args.fini = fini;
|
2015-06-25 20:44:46 +00:00
|
|
|
#ifdef INVARIANTS
|
|
|
|
/*
|
2019-11-27 19:49:55 +00:00
|
|
|
* Inject procedures which check for memory use after free if we are
|
|
|
|
* allowed to scramble the memory while it is not allocated. This
|
|
|
|
* requires that: UMA is actually able to access the memory, no init
|
|
|
|
* or fini procedures, no dependency on the initial value of the
|
|
|
|
* memory, and no (legitimate) use of the memory after free. Note,
|
|
|
|
* the ctor and dtor do not need to be empty.
|
|
|
|
*
|
|
|
|
* XXX UMA_ZONE_OFFPAGE.
|
2015-06-25 20:44:46 +00:00
|
|
|
*/
|
2015-09-02 23:09:01 +00:00
|
|
|
if ((!(flags & (UMA_ZONE_ZINIT | UMA_ZONE_NOFREE))) &&
|
2019-11-27 19:49:55 +00:00
|
|
|
uminit == NULL && fini == NULL) {
|
2015-06-25 20:44:46 +00:00
|
|
|
args.uminit = trash_init;
|
|
|
|
args.fini = trash_fini;
|
|
|
|
}
|
|
|
|
#endif
|
2002-03-19 09:11:49 +00:00
|
|
|
args.align = align;
|
|
|
|
args.flags = flags;
|
Bring in mbuma to replace mballoc.
mbuma is an Mbuf & Cluster allocator built on top of a number of
extensions to the UMA framework, all included herein.
Extensions to UMA worth noting:
- Better layering between slab <-> zone caches; introduce
Keg structure which splits off slab cache away from the
zone structure and allows multiple zones to be stacked
on top of a single Keg (single type of slab cache);
perhaps we should look into defining a subset API on
top of the Keg for special use by malloc(9),
for example.
- UMA_ZONE_REFCNT zones can now be added, and reference
counters automagically allocated for them within the end
of the associated slab structures. uma_find_refcnt()
does a kextract to fetch the slab struct reference from
the underlying page, and lookup the corresponding refcnt.
mbuma things worth noting:
- integrates mbuf & cluster allocations with extended UMA
and provides caches for commonly-allocated items; defines
several zones (two primary, one secondary) and two kegs.
- change up certain code paths that always used to do:
m_get() + m_clget() to instead just use m_getcl() and
try to take advantage of the newly defined secondary
Packet zone.
- netstat(1) and systat(1) quickly hacked up to do basic
stat reporting but additional stats work needs to be
done once some other details within UMA have been taken
care of and it becomes clearer to how stats will work
within the modified framework.
From the user perspective, one implication is that the
NMBCLUSTERS compile-time option is no longer used. The
maximum number of clusters is still capped off according
to maxusers, but it can be made unlimited by setting
the kern.ipc.nmbclusters boot-time tunable to zero.
Work should be done to write an appropriate sysctl
handler allowing dynamic tuning of kern.ipc.nmbclusters
at runtime.
Additional things worth noting/known issues (READ):
- One report of 'ips' (ServeRAID) driver acting really
slow in conjunction with mbuma. Need more data.
Latest report is that ips is equally sucking with
and without mbuma.
- Giant leak in NFS code sometimes occurs, can't
reproduce but currently analyzing; brueffer is
able to reproduce but THIS IS NOT an mbuma-specific
problem and currently occurs even WITHOUT mbuma.
- Issues in network locking: there is at least one
code path in the rip code where one or more locks
are acquired and we end up in m_prepend() with
M_WAITOK, which causes WITNESS to whine from within
UMA. Current temporary solution: force all UMA
allocations to be M_NOWAIT from within UMA for now
to avoid deadlocks unless WITNESS is defined and we
can determine with certainty that we're not holding
any locks when we're M_WAITOK.
- I've seen at least one weird socketbuffer empty-but-
mbuf-still-attached panic. I don't believe this
to be related to mbuma but please keep your eyes
open, turn on debugging, and capture crash dumps.
This change removes more code than it adds.
A paper is available detailing the change and considering
various performance issues, it was presented at BSDCan2004:
http://www.unixdaemons.com/~bmilekic/netbuf_bmilekic.pdf
Please read the paper for Future Work and implementation
details, as well as credits.
Testing and Debugging:
rwatson,
brueffer,
Ketrien I. Saihr-Kesenchedra,
...
Reviewed by: Lots of people (for different parts)
2004-05-31 21:46:06 +00:00
|
|
|
args.keg = NULL;
|
|
|
|
|
Followup on r302393 by cperciva, improving calculation of boot pages required
for UMA startup.
o Introduce another stage of UMA startup, which is entered after
vm_page_startup() finishes. After this stage we don't yet enable buckets,
but we can ask VM for pages. Rename stages to meaningful names while here.
New list of stages: BOOT_COLD, BOOT_STRAPPED, BOOT_PAGEALLOC, BOOT_BUCKETS,
BOOT_RUNNING.
Enabling page alloc earlier allows us to dramatically reduce number of
boot pages required. What is more important number of zones becomes
consistent across different machines, as no MD allocations are done before
the BOOT_PAGEALLOC stage. Now only UMA internal zones actually need to use
startup_alloc(), however that may change, so vm_page_startup() provides
its need for early zones as argument.
o Introduce uma_startup_count() function, to avoid code duplication. The
functions calculates sizes of zones zone and kegs zone, and calculates how
many pages UMA will need to bootstrap.
It counts not only of zone structures, but also of kegs, slabs and hashes.
o Hide uma_startup_foo() declarations from public file.
o Provide several DIAGNOSTIC printfs on boot_pages usage.
o Bugfix: when calculating zone of zones size use (mp_maxid + 1) instead of
mp_ncpus. Use resulting number not only in the size argument to zone_ctor()
but also as args.size.
Reviewed by: imp, gallatin (earlier version)
Differential Revision: https://reviews.freebsd.org/D14054
2018-02-06 04:16:00 +00:00
|
|
|
if (booted < BOOT_BUCKETS) {
|
2014-11-30 20:20:55 +00:00
|
|
|
locked = false;
|
|
|
|
} else {
|
2019-09-01 22:22:43 +00:00
|
|
|
sx_slock(&uma_reclaim_lock);
|
2014-11-30 20:20:55 +00:00
|
|
|
locked = true;
|
|
|
|
}
|
2018-01-12 23:25:05 +00:00
|
|
|
res = zone_alloc_item(zones, &args, UMA_ANYDOMAIN, M_WAITOK);
|
2014-11-30 20:20:55 +00:00
|
|
|
if (locked)
|
2019-09-01 22:22:43 +00:00
|
|
|
sx_sunlock(&uma_reclaim_lock);
|
2014-11-30 20:20:55 +00:00
|
|
|
return (res);
|
Bring in mbuma to replace mballoc.
mbuma is an Mbuf & Cluster allocator built on top of a number of
extensions to the UMA framework, all included herein.
Extensions to UMA worth noting:
- Better layering between slab <-> zone caches; introduce
Keg structure which splits off slab cache away from the
zone structure and allows multiple zones to be stacked
on top of a single Keg (single type of slab cache);
perhaps we should look into defining a subset API on
top of the Keg for special use by malloc(9),
for example.
- UMA_ZONE_REFCNT zones can now be added, and reference
counters automagically allocated for them within the end
of the associated slab structures. uma_find_refcnt()
does a kextract to fetch the slab struct reference from
the underlying page, and lookup the corresponding refcnt.
mbuma things worth noting:
- integrates mbuf & cluster allocations with extended UMA
and provides caches for commonly-allocated items; defines
several zones (two primary, one secondary) and two kegs.
- change up certain code paths that always used to do:
m_get() + m_clget() to instead just use m_getcl() and
try to take advantage of the newly defined secondary
Packet zone.
- netstat(1) and systat(1) quickly hacked up to do basic
stat reporting but additional stats work needs to be
done once some other details within UMA have been taken
care of and it becomes clearer to how stats will work
within the modified framework.
From the user perspective, one implication is that the
NMBCLUSTERS compile-time option is no longer used. The
maximum number of clusters is still capped off according
to maxusers, but it can be made unlimited by setting
the kern.ipc.nmbclusters boot-time tunable to zero.
Work should be done to write an appropriate sysctl
handler allowing dynamic tuning of kern.ipc.nmbclusters
at runtime.
Additional things worth noting/known issues (READ):
- One report of 'ips' (ServeRAID) driver acting really
slow in conjunction with mbuma. Need more data.
Latest report is that ips is equally sucking with
and without mbuma.
- Giant leak in NFS code sometimes occurs, can't
reproduce but currently analyzing; brueffer is
able to reproduce but THIS IS NOT an mbuma-specific
problem and currently occurs even WITHOUT mbuma.
- Issues in network locking: there is at least one
code path in the rip code where one or more locks
are acquired and we end up in m_prepend() with
M_WAITOK, which causes WITNESS to whine from within
UMA. Current temporary solution: force all UMA
allocations to be M_NOWAIT from within UMA for now
to avoid deadlocks unless WITNESS is defined and we
can determine with certainty that we're not holding
any locks when we're M_WAITOK.
- I've seen at least one weird socketbuffer empty-but-
mbuf-still-attached panic. I don't believe this
to be related to mbuma but please keep your eyes
open, turn on debugging, and capture crash dumps.
This change removes more code than it adds.
A paper is available detailing the change and considering
various performance issues, it was presented at BSDCan2004:
http://www.unixdaemons.com/~bmilekic/netbuf_bmilekic.pdf
Please read the paper for Future Work and implementation
details, as well as credits.
Testing and Debugging:
rwatson,
brueffer,
Ketrien I. Saihr-Kesenchedra,
...
Reviewed by: Lots of people (for different parts)
2004-05-31 21:46:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* See uma.h */
|
|
|
|
uma_zone_t
|
|
|
|
uma_zsecond_create(char *name, uma_ctor ctor, uma_dtor dtor,
|
|
|
|
uma_init zinit, uma_fini zfini, uma_zone_t master)
|
|
|
|
{
|
|
|
|
struct uma_zctor_args args;
|
2009-01-25 09:11:24 +00:00
|
|
|
uma_keg_t keg;
|
2014-11-30 20:20:55 +00:00
|
|
|
uma_zone_t res;
|
|
|
|
bool locked;
|
Bring in mbuma to replace mballoc.
mbuma is an Mbuf & Cluster allocator built on top of a number of
extensions to the UMA framework, all included herein.
Extensions to UMA worth noting:
- Better layering between slab <-> zone caches; introduce
Keg structure which splits off slab cache away from the
zone structure and allows multiple zones to be stacked
on top of a single Keg (single type of slab cache);
perhaps we should look into defining a subset API on
top of the Keg for special use by malloc(9),
for example.
- UMA_ZONE_REFCNT zones can now be added, and reference
counters automagically allocated for them within the end
of the associated slab structures. uma_find_refcnt()
does a kextract to fetch the slab struct reference from
the underlying page, and lookup the corresponding refcnt.
mbuma things worth noting:
- integrates mbuf & cluster allocations with extended UMA
and provides caches for commonly-allocated items; defines
several zones (two primary, one secondary) and two kegs.
- change up certain code paths that always used to do:
m_get() + m_clget() to instead just use m_getcl() and
try to take advantage of the newly defined secondary
Packet zone.
- netstat(1) and systat(1) quickly hacked up to do basic
stat reporting but additional stats work needs to be
done once some other details within UMA have been taken
care of and it becomes clearer to how stats will work
within the modified framework.
From the user perspective, one implication is that the
NMBCLUSTERS compile-time option is no longer used. The
maximum number of clusters is still capped off according
to maxusers, but it can be made unlimited by setting
the kern.ipc.nmbclusters boot-time tunable to zero.
Work should be done to write an appropriate sysctl
handler allowing dynamic tuning of kern.ipc.nmbclusters
at runtime.
Additional things worth noting/known issues (READ):
- One report of 'ips' (ServeRAID) driver acting really
slow in conjunction with mbuma. Need more data.
Latest report is that ips is equally sucking with
and without mbuma.
- Giant leak in NFS code sometimes occurs, can't
reproduce but currently analyzing; brueffer is
able to reproduce but THIS IS NOT an mbuma-specific
problem and currently occurs even WITHOUT mbuma.
- Issues in network locking: there is at least one
code path in the rip code where one or more locks
are acquired and we end up in m_prepend() with
M_WAITOK, which causes WITNESS to whine from within
UMA. Current temporary solution: force all UMA
allocations to be M_NOWAIT from within UMA for now
to avoid deadlocks unless WITNESS is defined and we
can determine with certainty that we're not holding
any locks when we're M_WAITOK.
- I've seen at least one weird socketbuffer empty-but-
mbuf-still-attached panic. I don't believe this
to be related to mbuma but please keep your eyes
open, turn on debugging, and capture crash dumps.
This change removes more code than it adds.
A paper is available detailing the change and considering
various performance issues, it was presented at BSDCan2004:
http://www.unixdaemons.com/~bmilekic/netbuf_bmilekic.pdf
Please read the paper for Future Work and implementation
details, as well as credits.
Testing and Debugging:
rwatson,
brueffer,
Ketrien I. Saihr-Kesenchedra,
...
Reviewed by: Lots of people (for different parts)
2004-05-31 21:46:06 +00:00
|
|
|
|
o Move zone limit from keg level up to zone level. This means that now
two zones sharing a keg may have different limits. Now this is going
to work:
zone = uma_zcreate();
uma_zone_set_max(zone, limit);
zone2 = uma_zsecond_create(zone);
uma_zone_set_max(zone2, limit2);
Kegs no longer have uk_maxpages field, but zones have uz_items. When
set, it may be rounded up to minimum possible CPU bucket cache size.
For small limits bucket cache can also be reconfigured to be smaller.
Counter uz_items is updated whenever items transition from keg to a
bucket cache or directly to a consumer. If zone has uz_maxitems set and
it is reached, then we are going to sleep.
o Since new limits don't play well with multi-keg zones, remove them. The
idea of multi-keg zones was introduced exactly 10 years ago, and never
have had a practical usage. In discussion with Jeff we came to a wild
agreement that if we ever want to reintroduce the idea of a smart allocator
that would be able to choose between two (or more) totally different
backing stores, that choice should be made one level higher than UMA,
e.g. in malloc(9) or in mget(), or whatever and choice should be controlled
by the caller.
o Sleeping code is improved to account number of sleepers and wake them one
by one, to avoid thundering herd problem.
o Flag UMA_ZONE_NOBUCKETCACHE removed, instead uma_zone_set_maxcache()
KPI added. Having no bucket cache basically means setting maxcache to 0.
o Now with many fields added and many removed (no multi-keg zones!) make
sure that struct uma_zone is perfectly aligned.
Reviewed by: markj, jeff
Tested by: pho
Differential Revision: https://reviews.freebsd.org/D17773
2019-01-15 00:02:06 +00:00
|
|
|
keg = master->uz_keg;
|
2013-06-17 03:43:47 +00:00
|
|
|
memset(&args, 0, sizeof(args));
|
Bring in mbuma to replace mballoc.
mbuma is an Mbuf & Cluster allocator built on top of a number of
extensions to the UMA framework, all included herein.
Extensions to UMA worth noting:
- Better layering between slab <-> zone caches; introduce
Keg structure which splits off slab cache away from the
zone structure and allows multiple zones to be stacked
on top of a single Keg (single type of slab cache);
perhaps we should look into defining a subset API on
top of the Keg for special use by malloc(9),
for example.
- UMA_ZONE_REFCNT zones can now be added, and reference
counters automagically allocated for them within the end
of the associated slab structures. uma_find_refcnt()
does a kextract to fetch the slab struct reference from
the underlying page, and lookup the corresponding refcnt.
mbuma things worth noting:
- integrates mbuf & cluster allocations with extended UMA
and provides caches for commonly-allocated items; defines
several zones (two primary, one secondary) and two kegs.
- change up certain code paths that always used to do:
m_get() + m_clget() to instead just use m_getcl() and
try to take advantage of the newly defined secondary
Packet zone.
- netstat(1) and systat(1) quickly hacked up to do basic
stat reporting but additional stats work needs to be
done once some other details within UMA have been taken
care of and it becomes clearer to how stats will work
within the modified framework.
From the user perspective, one implication is that the
NMBCLUSTERS compile-time option is no longer used. The
maximum number of clusters is still capped off according
to maxusers, but it can be made unlimited by setting
the kern.ipc.nmbclusters boot-time tunable to zero.
Work should be done to write an appropriate sysctl
handler allowing dynamic tuning of kern.ipc.nmbclusters
at runtime.
Additional things worth noting/known issues (READ):
- One report of 'ips' (ServeRAID) driver acting really
slow in conjunction with mbuma. Need more data.
Latest report is that ips is equally sucking with
and without mbuma.
- Giant leak in NFS code sometimes occurs, can't
reproduce but currently analyzing; brueffer is
able to reproduce but THIS IS NOT an mbuma-specific
problem and currently occurs even WITHOUT mbuma.
- Issues in network locking: there is at least one
code path in the rip code where one or more locks
are acquired and we end up in m_prepend() with
M_WAITOK, which causes WITNESS to whine from within
UMA. Current temporary solution: force all UMA
allocations to be M_NOWAIT from within UMA for now
to avoid deadlocks unless WITNESS is defined and we
can determine with certainty that we're not holding
any locks when we're M_WAITOK.
- I've seen at least one weird socketbuffer empty-but-
mbuf-still-attached panic. I don't believe this
to be related to mbuma but please keep your eyes
open, turn on debugging, and capture crash dumps.
This change removes more code than it adds.
A paper is available detailing the change and considering
various performance issues, it was presented at BSDCan2004:
http://www.unixdaemons.com/~bmilekic/netbuf_bmilekic.pdf
Please read the paper for Future Work and implementation
details, as well as credits.
Testing and Debugging:
rwatson,
brueffer,
Ketrien I. Saihr-Kesenchedra,
...
Reviewed by: Lots of people (for different parts)
2004-05-31 21:46:06 +00:00
|
|
|
args.name = name;
|
2009-01-25 09:11:24 +00:00
|
|
|
args.size = keg->uk_size;
|
Bring in mbuma to replace mballoc.
mbuma is an Mbuf & Cluster allocator built on top of a number of
extensions to the UMA framework, all included herein.
Extensions to UMA worth noting:
- Better layering between slab <-> zone caches; introduce
Keg structure which splits off slab cache away from the
zone structure and allows multiple zones to be stacked
on top of a single Keg (single type of slab cache);
perhaps we should look into defining a subset API on
top of the Keg for special use by malloc(9),
for example.
- UMA_ZONE_REFCNT zones can now be added, and reference
counters automagically allocated for them within the end
of the associated slab structures. uma_find_refcnt()
does a kextract to fetch the slab struct reference from
the underlying page, and lookup the corresponding refcnt.
mbuma things worth noting:
- integrates mbuf & cluster allocations with extended UMA
and provides caches for commonly-allocated items; defines
several zones (two primary, one secondary) and two kegs.
- change up certain code paths that always used to do:
m_get() + m_clget() to instead just use m_getcl() and
try to take advantage of the newly defined secondary
Packet zone.
- netstat(1) and systat(1) quickly hacked up to do basic
stat reporting but additional stats work needs to be
done once some other details within UMA have been taken
care of and it becomes clearer to how stats will work
within the modified framework.
From the user perspective, one implication is that the
NMBCLUSTERS compile-time option is no longer used. The
maximum number of clusters is still capped off according
to maxusers, but it can be made unlimited by setting
the kern.ipc.nmbclusters boot-time tunable to zero.
Work should be done to write an appropriate sysctl
handler allowing dynamic tuning of kern.ipc.nmbclusters
at runtime.
Additional things worth noting/known issues (READ):
- One report of 'ips' (ServeRAID) driver acting really
slow in conjunction with mbuma. Need more data.
Latest report is that ips is equally sucking with
and without mbuma.
- Giant leak in NFS code sometimes occurs, can't
reproduce but currently analyzing; brueffer is
able to reproduce but THIS IS NOT an mbuma-specific
problem and currently occurs even WITHOUT mbuma.
- Issues in network locking: there is at least one
code path in the rip code where one or more locks
are acquired and we end up in m_prepend() with
M_WAITOK, which causes WITNESS to whine from within
UMA. Current temporary solution: force all UMA
allocations to be M_NOWAIT from within UMA for now
to avoid deadlocks unless WITNESS is defined and we
can determine with certainty that we're not holding
any locks when we're M_WAITOK.
- I've seen at least one weird socketbuffer empty-but-
mbuf-still-attached panic. I don't believe this
to be related to mbuma but please keep your eyes
open, turn on debugging, and capture crash dumps.
This change removes more code than it adds.
A paper is available detailing the change and considering
various performance issues, it was presented at BSDCan2004:
http://www.unixdaemons.com/~bmilekic/netbuf_bmilekic.pdf
Please read the paper for Future Work and implementation
details, as well as credits.
Testing and Debugging:
rwatson,
brueffer,
Ketrien I. Saihr-Kesenchedra,
...
Reviewed by: Lots of people (for different parts)
2004-05-31 21:46:06 +00:00
|
|
|
args.ctor = ctor;
|
|
|
|
args.dtor = dtor;
|
|
|
|
args.uminit = zinit;
|
|
|
|
args.fini = zfini;
|
2009-01-25 09:11:24 +00:00
|
|
|
args.align = keg->uk_align;
|
|
|
|
args.flags = keg->uk_flags | UMA_ZONE_SECONDARY;
|
|
|
|
args.keg = keg;
|
|
|
|
|
Followup on r302393 by cperciva, improving calculation of boot pages required
for UMA startup.
o Introduce another stage of UMA startup, which is entered after
vm_page_startup() finishes. After this stage we don't yet enable buckets,
but we can ask VM for pages. Rename stages to meaningful names while here.
New list of stages: BOOT_COLD, BOOT_STRAPPED, BOOT_PAGEALLOC, BOOT_BUCKETS,
BOOT_RUNNING.
Enabling page alloc earlier allows us to dramatically reduce number of
boot pages required. What is more important number of zones becomes
consistent across different machines, as no MD allocations are done before
the BOOT_PAGEALLOC stage. Now only UMA internal zones actually need to use
startup_alloc(), however that may change, so vm_page_startup() provides
its need for early zones as argument.
o Introduce uma_startup_count() function, to avoid code duplication. The
functions calculates sizes of zones zone and kegs zone, and calculates how
many pages UMA will need to bootstrap.
It counts not only of zone structures, but also of kegs, slabs and hashes.
o Hide uma_startup_foo() declarations from public file.
o Provide several DIAGNOSTIC printfs on boot_pages usage.
o Bugfix: when calculating zone of zones size use (mp_maxid + 1) instead of
mp_ncpus. Use resulting number not only in the size argument to zone_ctor()
but also as args.size.
Reviewed by: imp, gallatin (earlier version)
Differential Revision: https://reviews.freebsd.org/D14054
2018-02-06 04:16:00 +00:00
|
|
|
if (booted < BOOT_BUCKETS) {
|
2014-11-30 20:20:55 +00:00
|
|
|
locked = false;
|
|
|
|
} else {
|
2019-09-01 22:22:43 +00:00
|
|
|
sx_slock(&uma_reclaim_lock);
|
2014-11-30 20:20:55 +00:00
|
|
|
locked = true;
|
|
|
|
}
|
2009-01-25 09:11:24 +00:00
|
|
|
/* XXX Attaches only one keg of potentially many. */
|
2018-01-12 23:25:05 +00:00
|
|
|
res = zone_alloc_item(zones, &args, UMA_ANYDOMAIN, M_WAITOK);
|
2014-11-30 20:20:55 +00:00
|
|
|
if (locked)
|
2019-09-01 22:22:43 +00:00
|
|
|
sx_sunlock(&uma_reclaim_lock);
|
2014-11-30 20:20:55 +00:00
|
|
|
return (res);
|
2009-01-25 09:11:24 +00:00
|
|
|
}
|
|
|
|
|
2013-06-17 03:43:47 +00:00
|
|
|
/* See uma.h */
|
|
|
|
uma_zone_t
|
2013-06-20 19:08:12 +00:00
|
|
|
uma_zcache_create(char *name, int size, uma_ctor ctor, uma_dtor dtor,
|
|
|
|
uma_init zinit, uma_fini zfini, uma_import zimport,
|
|
|
|
uma_release zrelease, void *arg, int flags)
|
2013-06-17 03:43:47 +00:00
|
|
|
{
|
|
|
|
struct uma_zctor_args args;
|
|
|
|
|
|
|
|
memset(&args, 0, sizeof(args));
|
|
|
|
args.name = name;
|
2013-06-20 19:08:12 +00:00
|
|
|
args.size = size;
|
2013-06-17 03:43:47 +00:00
|
|
|
args.ctor = ctor;
|
|
|
|
args.dtor = dtor;
|
|
|
|
args.uminit = zinit;
|
|
|
|
args.fini = zfini;
|
|
|
|
args.import = zimport;
|
|
|
|
args.release = zrelease;
|
|
|
|
args.arg = arg;
|
|
|
|
args.align = 0;
|
o Move zone limit from keg level up to zone level. This means that now
two zones sharing a keg may have different limits. Now this is going
to work:
zone = uma_zcreate();
uma_zone_set_max(zone, limit);
zone2 = uma_zsecond_create(zone);
uma_zone_set_max(zone2, limit2);
Kegs no longer have uk_maxpages field, but zones have uz_items. When
set, it may be rounded up to minimum possible CPU bucket cache size.
For small limits bucket cache can also be reconfigured to be smaller.
Counter uz_items is updated whenever items transition from keg to a
bucket cache or directly to a consumer. If zone has uz_maxitems set and
it is reached, then we are going to sleep.
o Since new limits don't play well with multi-keg zones, remove them. The
idea of multi-keg zones was introduced exactly 10 years ago, and never
have had a practical usage. In discussion with Jeff we came to a wild
agreement that if we ever want to reintroduce the idea of a smart allocator
that would be able to choose between two (or more) totally different
backing stores, that choice should be made one level higher than UMA,
e.g. in malloc(9) or in mget(), or whatever and choice should be controlled
by the caller.
o Sleeping code is improved to account number of sleepers and wake them one
by one, to avoid thundering herd problem.
o Flag UMA_ZONE_NOBUCKETCACHE removed, instead uma_zone_set_maxcache()
KPI added. Having no bucket cache basically means setting maxcache to 0.
o Now with many fields added and many removed (no multi-keg zones!) make
sure that struct uma_zone is perfectly aligned.
Reviewed by: markj, jeff
Tested by: pho
Differential Revision: https://reviews.freebsd.org/D17773
2019-01-15 00:02:06 +00:00
|
|
|
args.flags = flags | UMA_ZFLAG_CACHE;
|
2013-06-17 03:43:47 +00:00
|
|
|
|
2018-01-12 23:25:05 +00:00
|
|
|
return (zone_alloc_item(zones, &args, UMA_ANYDOMAIN, M_WAITOK));
|
2013-06-17 03:43:47 +00:00
|
|
|
}
|
|
|
|
|
2002-04-08 04:48:58 +00:00
|
|
|
/* See uma.h */
|
|
|
|
void
|
|
|
|
uma_zdestroy(uma_zone_t zone)
|
|
|
|
{
|
2005-07-20 18:47:42 +00:00
|
|
|
|
2019-09-01 22:22:43 +00:00
|
|
|
sx_slock(&uma_reclaim_lock);
|
2013-06-17 03:43:47 +00:00
|
|
|
zone_free_item(zones, zone, NULL, SKIP_NONE);
|
2019-09-01 22:22:43 +00:00
|
|
|
sx_sunlock(&uma_reclaim_lock);
|
2002-04-08 04:48:58 +00:00
|
|
|
}
|
|
|
|
|
2017-11-08 02:39:37 +00:00
|
|
|
void
|
|
|
|
uma_zwait(uma_zone_t zone)
|
|
|
|
{
|
|
|
|
void *item;
|
|
|
|
|
|
|
|
item = uma_zalloc_arg(zone, NULL, M_WAITOK);
|
|
|
|
uma_zfree(zone, item);
|
|
|
|
}
|
|
|
|
|
2018-06-08 21:40:03 +00:00
|
|
|
void *
|
|
|
|
uma_zalloc_pcpu_arg(uma_zone_t zone, void *udata, int flags)
|
|
|
|
{
|
|
|
|
void *item;
|
2018-06-21 11:43:54 +00:00
|
|
|
#ifdef SMP
|
2018-06-08 21:40:03 +00:00
|
|
|
int i;
|
|
|
|
|
|
|
|
MPASS(zone->uz_flags & UMA_ZONE_PCPU);
|
2018-06-21 11:43:54 +00:00
|
|
|
#endif
|
Fix pre-SI_SUB_CPU initialization of per-CPU counters.
r336020 introduced pcpu_page_alloc(), replacing page_alloc() as the
backend allocator for PCPU UMA zones. Unlike page_alloc(), it does
not honour malloc(9) flags such as M_ZERO or M_NODUMP, so fix that.
r336020 also changed counter(9) to initialize each counter using a
CPU_FOREACH() loop instead of an SMP rendezvous. Before SI_SUB_CPU,
smp_rendezvous() will only execute the callback on the current CPU
(i.e., CPU 0), so only one counter gets zeroed. The rest are zeroed
by virtue of the fact that UMA gratuitously zeroes slabs when importing
them into a zone.
Prior to SI_SUB_CPU, all_cpus is clear, so with r336020 we weren't
zeroing vm_cnt counters during boot: the CPU_FOREACH() loop had no
effect, and pcpu_page_alloc() didn't honour M_ZERO. Fix this by
iterating over the full range of CPU IDs when zeroing counters,
ignoring whether the corresponding bits in all_cpus are set.
Reported and tested by: pho (previous version)
Reviewed by: kib (previous version)
Differential Revision: https://reviews.freebsd.org/D16190
2018-07-10 00:18:12 +00:00
|
|
|
item = uma_zalloc_arg(zone, udata, flags & ~M_ZERO);
|
2018-06-08 21:40:03 +00:00
|
|
|
if (item != NULL && (flags & M_ZERO)) {
|
2018-06-21 11:43:54 +00:00
|
|
|
#ifdef SMP
|
Fix pre-SI_SUB_CPU initialization of per-CPU counters.
r336020 introduced pcpu_page_alloc(), replacing page_alloc() as the
backend allocator for PCPU UMA zones. Unlike page_alloc(), it does
not honour malloc(9) flags such as M_ZERO or M_NODUMP, so fix that.
r336020 also changed counter(9) to initialize each counter using a
CPU_FOREACH() loop instead of an SMP rendezvous. Before SI_SUB_CPU,
smp_rendezvous() will only execute the callback on the current CPU
(i.e., CPU 0), so only one counter gets zeroed. The rest are zeroed
by virtue of the fact that UMA gratuitously zeroes slabs when importing
them into a zone.
Prior to SI_SUB_CPU, all_cpus is clear, so with r336020 we weren't
zeroing vm_cnt counters during boot: the CPU_FOREACH() loop had no
effect, and pcpu_page_alloc() didn't honour M_ZERO. Fix this by
iterating over the full range of CPU IDs when zeroing counters,
ignoring whether the corresponding bits in all_cpus are set.
Reported and tested by: pho (previous version)
Reviewed by: kib (previous version)
Differential Revision: https://reviews.freebsd.org/D16190
2018-07-10 00:18:12 +00:00
|
|
|
for (i = 0; i <= mp_maxid; i++)
|
2018-06-08 21:40:03 +00:00
|
|
|
bzero(zpcpu_get_cpu(item, i), zone->uz_size);
|
2018-06-21 11:43:54 +00:00
|
|
|
#else
|
|
|
|
bzero(item, zone->uz_size);
|
|
|
|
#endif
|
2018-06-08 21:40:03 +00:00
|
|
|
}
|
|
|
|
return (item);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* A stub while both regular and pcpu cases are identical.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
uma_zfree_pcpu_arg(uma_zone_t zone, void *item, void *udata)
|
|
|
|
{
|
|
|
|
|
2018-06-22 20:22:26 +00:00
|
|
|
#ifdef SMP
|
2018-06-08 21:40:03 +00:00
|
|
|
MPASS(zone->uz_flags & UMA_ZONE_PCPU);
|
2018-06-22 20:22:26 +00:00
|
|
|
#endif
|
2018-06-08 21:40:03 +00:00
|
|
|
uma_zfree_arg(zone, item, udata);
|
|
|
|
}
|
|
|
|
|
2019-11-26 22:17:02 +00:00
|
|
|
static inline void *
|
|
|
|
bucket_pop(uma_zone_t zone, uma_cache_t cache, uma_bucket_t bucket)
|
|
|
|
{
|
|
|
|
void *item;
|
|
|
|
|
|
|
|
bucket->ub_cnt--;
|
|
|
|
item = bucket->ub_bucket[bucket->ub_cnt];
|
|
|
|
#ifdef INVARIANTS
|
|
|
|
bucket->ub_bucket[bucket->ub_cnt] = NULL;
|
|
|
|
KASSERT(item != NULL, ("uma_zalloc: Bucket pointer mangled."));
|
|
|
|
#endif
|
|
|
|
cache->uc_allocs++;
|
|
|
|
|
|
|
|
return (item);
|
|
|
|
}
|
|
|
|
|
2019-11-27 23:19:06 +00:00
|
|
|
static inline void
|
|
|
|
bucket_push(uma_zone_t zone, uma_cache_t cache, uma_bucket_t bucket,
|
|
|
|
void *item)
|
|
|
|
{
|
|
|
|
KASSERT(bucket->ub_bucket[bucket->ub_cnt] == NULL,
|
|
|
|
("uma_zfree: Freeing to non free bucket index."));
|
|
|
|
bucket->ub_bucket[bucket->ub_cnt] = item;
|
|
|
|
bucket->ub_cnt++;
|
|
|
|
cache->uc_frees++;
|
|
|
|
}
|
|
|
|
|
2019-11-26 22:17:02 +00:00
|
|
|
static void *
|
|
|
|
item_ctor(uma_zone_t zone, void *udata, int flags, void *item)
|
|
|
|
{
|
|
|
|
#ifdef INVARIANTS
|
2019-11-27 19:49:55 +00:00
|
|
|
bool skipdbg;
|
2019-11-26 22:17:02 +00:00
|
|
|
|
|
|
|
skipdbg = uma_dbg_zskip(zone, item);
|
2019-11-27 19:49:55 +00:00
|
|
|
if (!skipdbg && (zone->uz_flags & UMA_ZFLAG_TRASH) != 0 &&
|
|
|
|
zone->uz_ctor != trash_ctor)
|
|
|
|
trash_ctor(item, zone->uz_size, udata, flags);
|
2019-11-26 22:17:02 +00:00
|
|
|
#endif
|
2019-11-27 19:49:55 +00:00
|
|
|
if (__predict_false(zone->uz_ctor != NULL) &&
|
2019-11-26 22:17:02 +00:00
|
|
|
zone->uz_ctor(item, zone->uz_size, udata, flags) != 0) {
|
|
|
|
counter_u64_add(zone->uz_fails, 1);
|
|
|
|
zone_free_item(zone, item, udata, SKIP_DTOR | SKIP_CNT);
|
|
|
|
return (NULL);
|
|
|
|
}
|
|
|
|
#ifdef INVARIANTS
|
|
|
|
if (!skipdbg)
|
|
|
|
uma_dbg_alloc(zone, NULL, item);
|
|
|
|
#endif
|
|
|
|
if (flags & M_ZERO)
|
|
|
|
uma_zero_item(item, zone);
|
|
|
|
|
|
|
|
return (item);
|
|
|
|
}
|
|
|
|
|
2019-11-27 19:49:55 +00:00
|
|
|
static inline void
|
|
|
|
item_dtor(uma_zone_t zone, void *item, void *udata, enum zfreeskip skip)
|
|
|
|
{
|
|
|
|
#ifdef INVARIANTS
|
|
|
|
bool skipdbg;
|
|
|
|
|
|
|
|
skipdbg = uma_dbg_zskip(zone, item);
|
|
|
|
if (skip == SKIP_NONE && !skipdbg) {
|
|
|
|
if ((zone->uz_flags & UMA_ZONE_MALLOC) != 0)
|
|
|
|
uma_dbg_free(zone, udata, item);
|
|
|
|
else
|
|
|
|
uma_dbg_free(zone, NULL, item);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
if (skip < SKIP_DTOR) {
|
|
|
|
if (zone->uz_dtor != NULL)
|
|
|
|
zone->uz_dtor(item, zone->uz_size, udata);
|
|
|
|
#ifdef INVARIANTS
|
|
|
|
if (!skipdbg && (zone->uz_flags & UMA_ZFLAG_TRASH) != 0 &&
|
|
|
|
zone->uz_dtor != trash_dtor)
|
|
|
|
trash_dtor(item, zone->uz_size, udata);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2002-03-19 09:11:49 +00:00
|
|
|
/* See uma.h */
|
|
|
|
void *
|
2002-04-30 04:26:34 +00:00
|
|
|
uma_zalloc_arg(uma_zone_t zone, void *udata, int flags)
|
2002-03-19 09:11:49 +00:00
|
|
|
{
|
|
|
|
uma_bucket_t bucket;
|
2018-01-12 23:25:05 +00:00
|
|
|
uma_cache_t cache;
|
|
|
|
void *item;
|
2019-11-26 22:17:02 +00:00
|
|
|
int cpu, domain;
|
2002-03-19 09:11:49 +00:00
|
|
|
|
2015-08-22 12:59:05 +00:00
|
|
|
/* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */
|
2018-08-26 12:51:46 +00:00
|
|
|
random_harvest_fast_uma(&zone, sizeof(zone), RANDOM_UMA);
|
This is the much-discussed major upgrade to the random(4) device, known to you all as /dev/random.
This code has had an extensive rewrite and a good series of reviews, both by the author and other parties. This means a lot of code has been simplified. Pluggable structures for high-rate entropy generators are available, and it is most definitely not the case that /dev/random can be driven by only a hardware souce any more. This has been designed out of the device. Hardware sources are stirred into the CSPRNG (Yarrow, Fortuna) like any other entropy source. Pluggable modules may be written by third parties for additional sources.
The harvesting structures and consequently the locking have been simplified. Entropy harvesting is done in a more general way (the documentation for this will follow). There is some GREAT entropy to be had in the UMA allocator, but it is disabled for now as messing with that is likely to annoy many people.
The venerable (but effective) Yarrow algorithm, which is no longer supported by its authors now has an alternative, Fortuna. For now, Yarrow is retained as the default algorithm, but this may be changed using a kernel option. It is intended to make Fortuna the default algorithm for 11.0. Interested parties are encouraged to read ISBN 978-0-470-47424-2 "Cryptography Engineering" By Ferguson, Schneier and Kohno for Fortuna's gory details. Heck, read it anyway.
Many thanks to Arthur Mesh who did early grunt work, and who got caught in the crossfire rather more than he deserved to.
My thanks also to folks who helped me thresh this out on whiteboards and in the odd "Hallway track", or otherwise.
My Nomex pants are on. Let the feedback commence!
Reviewed by: trasz,des(partial),imp(partial?),rwatson(partial?)
Approved by: so(des)
2014-10-30 21:21:53 +00:00
|
|
|
|
2002-03-19 09:11:49 +00:00
|
|
|
/* This is the fast path allocation */
|
2017-06-01 18:36:52 +00:00
|
|
|
CTR4(KTR_UMA, "uma_zalloc_arg thread %x zone %s(%p) flags %d",
|
|
|
|
curthread, zone->uz_name, zone, flags);
|
2002-04-08 02:42:55 +00:00
|
|
|
|
2007-01-10 21:04:43 +00:00
|
|
|
if (flags & M_WAITOK) {
|
|
|
|
WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
|
|
|
|
"uma_zalloc_arg: zone \"%s\"", zone->uz_name);
|
2002-05-20 17:54:48 +00:00
|
|
|
}
|
Make UMA and malloc(9) return non-executable memory in most cases.
Most kernel memory that is allocated after boot does not need to be
executable. There are a few exceptions. For example, kernel modules
do need executable memory, but they don't use UMA or malloc(9). The
BPF JIT compiler also needs executable memory and did use malloc(9)
until r317072.
(Note that a side effect of r316767 was that the "small allocation"
path in UMA on amd64 already returned non-executable memory. This
meant that some calls to malloc(9) or the UMA zone(9) allocator could
return executable memory, while others could return non-executable
memory. This change makes the behavior consistent.)
This change makes malloc(9) return non-executable memory unless the new
M_EXEC flag is specified. After this change, the UMA zone(9) allocator
will always return non-executable memory, and a KASSERT will catch
attempts to use the M_EXEC flag to allocate executable memory using
uma_zalloc() or its variants.
Allocations that do need executable memory have various choices. They
may use the M_EXEC flag to malloc(9), or they may use a different VM
interfact to obtain executable pages.
Now that malloc(9) again allows executable allocations, this change also
reverts most of r317072.
PR: 228927
Reviewed by: alc, kib, markj, jhb (previous version)
Sponsored by: Netflix
Differential Revision: https://reviews.freebsd.org/D15691
2018-06-13 17:04:41 +00:00
|
|
|
KASSERT((flags & M_EXEC) == 0, ("uma_zalloc_arg: called with M_EXEC"));
|
2015-12-11 20:05:07 +00:00
|
|
|
KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
|
2015-11-19 14:04:53 +00:00
|
|
|
("uma_zalloc_arg: called with spinlock or critical section held"));
|
2018-06-08 03:16:16 +00:00
|
|
|
if (zone->uz_flags & UMA_ZONE_PCPU)
|
2018-06-08 05:40:36 +00:00
|
|
|
KASSERT((flags & M_ZERO) == 0, ("allocating from a pcpu zone "
|
|
|
|
"with M_ZERO passed"));
|
2015-11-19 14:04:53 +00:00
|
|
|
|
2011-10-12 18:08:28 +00:00
|
|
|
#ifdef DEBUG_MEMGUARD
|
|
|
|
if (memguard_cmp_zone(zone)) {
|
|
|
|
item = memguard_alloc(zone->uz_size, flags);
|
|
|
|
if (item != NULL) {
|
|
|
|
if (zone->uz_init != NULL &&
|
|
|
|
zone->uz_init(item, zone->uz_size, flags) != 0)
|
|
|
|
return (NULL);
|
|
|
|
if (zone->uz_ctor != NULL &&
|
2013-06-18 04:50:20 +00:00
|
|
|
zone->uz_ctor(item, zone->uz_size, udata,
|
|
|
|
flags) != 0) {
|
2019-11-27 19:49:55 +00:00
|
|
|
counter_u64_add(zone->uz_fails, 1);
|
2011-10-12 18:08:28 +00:00
|
|
|
zone->uz_fini(item, zone->uz_size);
|
|
|
|
return (NULL);
|
|
|
|
}
|
|
|
|
return (item);
|
|
|
|
}
|
|
|
|
/* This is unfortunate but should not be fatal. */
|
|
|
|
}
|
|
|
|
#endif
|
Modify UMA to use critical sections to protect per-CPU caches, rather than
mutexes, which offers lower overhead on both UP and SMP. When allocating
from or freeing to the per-cpu cache, without INVARIANTS enabled, we now
no longer perform any mutex operations, which offers a 1%-3% performance
improvement in a variety of micro-benchmarks. We rely on critical
sections to prevent (a) preemption resulting in reentrant access to UMA on
a single CPU, and (b) migration of the thread during access. In the event
we need to go back to the zone for a new bucket, we release the critical
section to acquire the global zone mutex, and must re-acquire the critical
section and re-evaluate which cache we are accessing in case migration has
occured, or circumstances have changed in the current cache.
Per-CPU cache statistics are now gathered lock-free by the sysctl, which
can result in small races in statistics reporting for caches.
Reviewed by: bmilekic, jeff (somewhat)
Tested by: rwatson, kris, gnn, scottl, mike at sentex dot net, others
2005-04-29 18:56:36 +00:00
|
|
|
/*
|
|
|
|
* If possible, allocate from the per-CPU cache. There are two
|
|
|
|
* requirements for safe access to the per-CPU cache: (1) the thread
|
|
|
|
* accessing the cache must not be preempted or yield during access,
|
|
|
|
* and (2) the thread must not migrate CPUs without switching which
|
|
|
|
* cache it accesses. We rely on a critical section to prevent
|
|
|
|
* preemption and migration. We release the critical section in
|
|
|
|
* order to acquire the zone mutex if we are unable to allocate from
|
|
|
|
* the current cache; when we re-acquire the critical section, we
|
|
|
|
* must detect and handle migration if it has occurred.
|
|
|
|
*/
|
|
|
|
critical_enter();
|
2019-11-26 22:17:02 +00:00
|
|
|
do {
|
|
|
|
cpu = curcpu;
|
|
|
|
cache = &zone->uz_cpu[cpu];
|
|
|
|
bucket = cache->uc_allocbucket;
|
|
|
|
if (__predict_true(bucket != NULL && bucket->ub_cnt != 0)) {
|
|
|
|
item = bucket_pop(zone, cache, bucket);
|
|
|
|
critical_exit();
|
|
|
|
return (item_ctor(zone, udata, flags, item));
|
2013-06-18 04:50:20 +00:00
|
|
|
}
|
2019-11-26 22:17:02 +00:00
|
|
|
} while (cache_alloc(zone, cache, udata, flags));
|
|
|
|
critical_exit();
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We can not get a bucket so try to return a single item.
|
|
|
|
*/
|
|
|
|
if (zone->uz_flags & UMA_ZONE_NUMA)
|
|
|
|
domain = PCPU_GET(domain);
|
|
|
|
else
|
|
|
|
domain = UMA_ANYDOMAIN;
|
|
|
|
return (zone_alloc_item_locked(zone, udata, domain, flags));
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Replenish an alloc bucket and possibly restore an old one. Called in
|
|
|
|
* a critical section. Returns in a critical section.
|
|
|
|
*
|
|
|
|
* A false return value indicates failure and returns with the zone lock
|
|
|
|
* held. A true return value indicates success and the caller should retry.
|
|
|
|
*/
|
|
|
|
static __noinline bool
|
|
|
|
cache_alloc(uma_zone_t zone, uma_cache_t cache, void *udata, int flags)
|
|
|
|
{
|
|
|
|
uma_zone_domain_t zdom;
|
|
|
|
uma_bucket_t bucket;
|
|
|
|
int cpu, domain;
|
|
|
|
bool lockfail;
|
|
|
|
|
|
|
|
CRITICAL_ASSERT(curthread);
|
2013-06-18 04:50:20 +00:00
|
|
|
|
|
|
|
/*
|
2019-11-26 22:17:02 +00:00
|
|
|
* If we have run out of items in our alloc bucket see
|
|
|
|
* if we can switch with the free bucket.
|
2013-06-18 04:50:20 +00:00
|
|
|
*/
|
|
|
|
bucket = cache->uc_freebucket;
|
2019-11-26 22:17:02 +00:00
|
|
|
if (bucket != NULL && bucket->ub_cnt != 0) {
|
2013-06-18 04:50:20 +00:00
|
|
|
cache->uc_freebucket = cache->uc_allocbucket;
|
|
|
|
cache->uc_allocbucket = bucket;
|
2019-11-26 22:17:02 +00:00
|
|
|
return (true);
|
2002-04-08 02:42:55 +00:00
|
|
|
}
|
2013-06-18 04:50:20 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Discard any empty allocation bucket while we hold no locks.
|
|
|
|
*/
|
|
|
|
bucket = cache->uc_allocbucket;
|
|
|
|
cache->uc_allocbucket = NULL;
|
|
|
|
critical_exit();
|
|
|
|
if (bucket != NULL)
|
2013-06-26 00:57:38 +00:00
|
|
|
bucket_free(zone, bucket, udata);
|
2013-06-18 04:50:20 +00:00
|
|
|
|
Modify UMA to use critical sections to protect per-CPU caches, rather than
mutexes, which offers lower overhead on both UP and SMP. When allocating
from or freeing to the per-cpu cache, without INVARIANTS enabled, we now
no longer perform any mutex operations, which offers a 1%-3% performance
improvement in a variety of micro-benchmarks. We rely on critical
sections to prevent (a) preemption resulting in reentrant access to UMA on
a single CPU, and (b) migration of the thread during access. In the event
we need to go back to the zone for a new bucket, we release the critical
section to acquire the global zone mutex, and must re-acquire the critical
section and re-evaluate which cache we are accessing in case migration has
occured, or circumstances have changed in the current cache.
Per-CPU cache statistics are now gathered lock-free by the sysctl, which
can result in small races in statistics reporting for caches.
Reviewed by: bmilekic, jeff (somewhat)
Tested by: rwatson, kris, gnn, scottl, mike at sentex dot net, others
2005-04-29 18:56:36 +00:00
|
|
|
/*
|
|
|
|
* Attempt to retrieve the item from the per-CPU cache has failed, so
|
|
|
|
* we must go back to the zone. This requires the zone lock, so we
|
|
|
|
* must drop the critical section, then re-acquire it when we go back
|
|
|
|
* to the cache. Since the critical section is released, we may be
|
|
|
|
* preempted or migrate. As such, make sure not to maintain any
|
|
|
|
* thread-local state specific to the cache from prior to releasing
|
|
|
|
* the critical section.
|
|
|
|
*/
|
2013-06-18 04:50:20 +00:00
|
|
|
lockfail = 0;
|
|
|
|
if (ZONE_TRYLOCK(zone) == 0) {
|
|
|
|
/* Record contention to size the buckets. */
|
|
|
|
ZONE_LOCK(zone);
|
|
|
|
lockfail = 1;
|
|
|
|
}
|
2019-11-26 22:17:02 +00:00
|
|
|
|
Modify UMA to use critical sections to protect per-CPU caches, rather than
mutexes, which offers lower overhead on both UP and SMP. When allocating
from or freeing to the per-cpu cache, without INVARIANTS enabled, we now
no longer perform any mutex operations, which offers a 1%-3% performance
improvement in a variety of micro-benchmarks. We rely on critical
sections to prevent (a) preemption resulting in reentrant access to UMA on
a single CPU, and (b) migration of the thread during access. In the event
we need to go back to the zone for a new bucket, we release the critical
section to acquire the global zone mutex, and must re-acquire the critical
section and re-evaluate which cache we are accessing in case migration has
occured, or circumstances have changed in the current cache.
Per-CPU cache statistics are now gathered lock-free by the sysctl, which
can result in small races in statistics reporting for caches.
Reviewed by: bmilekic, jeff (somewhat)
Tested by: rwatson, kris, gnn, scottl, mike at sentex dot net, others
2005-04-29 18:56:36 +00:00
|
|
|
critical_enter();
|
2019-11-26 22:17:02 +00:00
|
|
|
/* Short-circuit for zones without buckets and low memory. */
|
2019-11-28 00:19:09 +00:00
|
|
|
if (zone->uz_bucket_size == 0 || bucketdisable)
|
2019-11-26 22:17:02 +00:00
|
|
|
return (false);
|
|
|
|
|
Modify UMA to use critical sections to protect per-CPU caches, rather than
mutexes, which offers lower overhead on both UP and SMP. When allocating
from or freeing to the per-cpu cache, without INVARIANTS enabled, we now
no longer perform any mutex operations, which offers a 1%-3% performance
improvement in a variety of micro-benchmarks. We rely on critical
sections to prevent (a) preemption resulting in reentrant access to UMA on
a single CPU, and (b) migration of the thread during access. In the event
we need to go back to the zone for a new bucket, we release the critical
section to acquire the global zone mutex, and must re-acquire the critical
section and re-evaluate which cache we are accessing in case migration has
occured, or circumstances have changed in the current cache.
Per-CPU cache statistics are now gathered lock-free by the sysctl, which
can result in small races in statistics reporting for caches.
Reviewed by: bmilekic, jeff (somewhat)
Tested by: rwatson, kris, gnn, scottl, mike at sentex dot net, others
2005-04-29 18:56:36 +00:00
|
|
|
cpu = curcpu;
|
|
|
|
cache = &zone->uz_cpu[cpu];
|
|
|
|
|
2013-06-18 04:50:20 +00:00
|
|
|
/* See if we lost the race to fill the cache. */
|
|
|
|
if (cache->uc_allocbucket != NULL) {
|
|
|
|
ZONE_UNLOCK(zone);
|
2019-11-26 22:17:02 +00:00
|
|
|
return (true);
|
2002-04-08 02:42:55 +00:00
|
|
|
}
|
|
|
|
|
2013-06-18 04:50:20 +00:00
|
|
|
/*
|
|
|
|
* Check the zone's cache of buckets.
|
|
|
|
*/
|
2019-08-06 21:50:34 +00:00
|
|
|
if (zone->uz_flags & UMA_ZONE_NUMA) {
|
|
|
|
domain = PCPU_GET(domain);
|
2018-01-12 23:25:05 +00:00
|
|
|
zdom = &zone->uz_domain[domain];
|
2019-08-06 21:50:34 +00:00
|
|
|
} else {
|
|
|
|
domain = UMA_ANYDOMAIN;
|
|
|
|
zdom = &zone->uz_domain[0];
|
|
|
|
}
|
|
|
|
|
2019-09-01 22:22:43 +00:00
|
|
|
if ((bucket = zone_fetch_bucket(zone, zdom)) != NULL) {
|
2019-11-26 22:17:02 +00:00
|
|
|
ZONE_UNLOCK(zone);
|
2003-09-19 06:26:45 +00:00
|
|
|
KASSERT(bucket->ub_cnt != 0,
|
2002-04-08 02:42:55 +00:00
|
|
|
("uma_zalloc_arg: Returning an empty bucket."));
|
|
|
|
cache->uc_allocbucket = bucket;
|
2019-11-26 22:17:02 +00:00
|
|
|
return (true);
|
2004-01-30 16:26:29 +00:00
|
|
|
}
|
Modify UMA to use critical sections to protect per-CPU caches, rather than
mutexes, which offers lower overhead on both UP and SMP. When allocating
from or freeing to the per-cpu cache, without INVARIANTS enabled, we now
no longer perform any mutex operations, which offers a 1%-3% performance
improvement in a variety of micro-benchmarks. We rely on critical
sections to prevent (a) preemption resulting in reentrant access to UMA on
a single CPU, and (b) migration of the thread during access. In the event
we need to go back to the zone for a new bucket, we release the critical
section to acquire the global zone mutex, and must re-acquire the critical
section and re-evaluate which cache we are accessing in case migration has
occured, or circumstances have changed in the current cache.
Per-CPU cache statistics are now gathered lock-free by the sysctl, which
can result in small races in statistics reporting for caches.
Reviewed by: bmilekic, jeff (somewhat)
Tested by: rwatson, kris, gnn, scottl, mike at sentex dot net, others
2005-04-29 18:56:36 +00:00
|
|
|
/* We are no longer associated with this CPU. */
|
|
|
|
critical_exit();
|
2002-10-24 07:59:03 +00:00
|
|
|
|
2013-06-18 04:50:20 +00:00
|
|
|
/*
|
|
|
|
* We bump the uz count when the cache size is insufficient to
|
|
|
|
* handle the working set.
|
|
|
|
*/
|
2019-11-28 00:19:09 +00:00
|
|
|
if (lockfail && zone->uz_bucket_size < zone->uz_bucket_size_max)
|
|
|
|
zone->uz_bucket_size++;
|
o Move zone limit from keg level up to zone level. This means that now
two zones sharing a keg may have different limits. Now this is going
to work:
zone = uma_zcreate();
uma_zone_set_max(zone, limit);
zone2 = uma_zsecond_create(zone);
uma_zone_set_max(zone2, limit2);
Kegs no longer have uk_maxpages field, but zones have uz_items. When
set, it may be rounded up to minimum possible CPU bucket cache size.
For small limits bucket cache can also be reconfigured to be smaller.
Counter uz_items is updated whenever items transition from keg to a
bucket cache or directly to a consumer. If zone has uz_maxitems set and
it is reached, then we are going to sleep.
o Since new limits don't play well with multi-keg zones, remove them. The
idea of multi-keg zones was introduced exactly 10 years ago, and never
have had a practical usage. In discussion with Jeff we came to a wild
agreement that if we ever want to reintroduce the idea of a smart allocator
that would be able to choose between two (or more) totally different
backing stores, that choice should be made one level higher than UMA,
e.g. in malloc(9) or in mget(), or whatever and choice should be controlled
by the caller.
o Sleeping code is improved to account number of sleepers and wake them one
by one, to avoid thundering herd problem.
o Flag UMA_ZONE_NOBUCKETCACHE removed, instead uma_zone_set_maxcache()
KPI added. Having no bucket cache basically means setting maxcache to 0.
o Now with many fields added and many removed (no multi-keg zones!) make
sure that struct uma_zone is perfectly aligned.
Reviewed by: markj, jeff
Tested by: pho
Differential Revision: https://reviews.freebsd.org/D17773
2019-01-15 00:02:06 +00:00
|
|
|
|
2002-03-19 09:11:49 +00:00
|
|
|
/*
|
2019-11-26 22:17:02 +00:00
|
|
|
* Fill a bucket and attempt to use it as the alloc bucket.
|
2002-03-19 09:11:49 +00:00
|
|
|
*/
|
2019-11-26 22:17:02 +00:00
|
|
|
bucket = zone_alloc_bucket(zone, udata, domain, flags);
|
2017-06-01 18:36:52 +00:00
|
|
|
CTR3(KTR_UMA, "uma_zalloc: zone %s(%p) bucket zone returned %p",
|
|
|
|
zone->uz_name, zone, bucket);
|
2019-11-26 22:17:02 +00:00
|
|
|
critical_enter();
|
|
|
|
if (bucket == NULL)
|
|
|
|
return (false);
|
2013-06-18 04:50:20 +00:00
|
|
|
|
2002-10-24 07:59:03 +00:00
|
|
|
/*
|
2019-11-26 22:17:02 +00:00
|
|
|
* See if we lost the race or were migrated. Cache the
|
|
|
|
* initialized bucket to make this less likely or claim
|
|
|
|
* the memory directly.
|
2002-10-24 07:59:03 +00:00
|
|
|
*/
|
2019-11-26 22:17:02 +00:00
|
|
|
cpu = curcpu;
|
|
|
|
cache = &zone->uz_cpu[cpu];
|
|
|
|
if (cache->uc_allocbucket == NULL &&
|
|
|
|
((zone->uz_flags & UMA_ZONE_NUMA) == 0 ||
|
|
|
|
domain == PCPU_GET(domain))) {
|
|
|
|
cache->uc_allocbucket = bucket;
|
|
|
|
zdom->uzd_imax += bucket->ub_cnt;
|
|
|
|
} else if (zone->uz_bkt_count >= zone->uz_bkt_max) {
|
|
|
|
critical_exit();
|
|
|
|
ZONE_UNLOCK(zone);
|
|
|
|
bucket_drain(zone, bucket);
|
|
|
|
bucket_free(zone, bucket, udata);
|
|
|
|
critical_enter();
|
|
|
|
return (true);
|
|
|
|
} else
|
|
|
|
zone_put_bucket(zone, zdom, bucket, false);
|
|
|
|
ZONE_UNLOCK(zone);
|
|
|
|
return (true);
|
2002-10-24 07:59:03 +00:00
|
|
|
}
|
|
|
|
|
2018-01-12 23:25:05 +00:00
|
|
|
void *
|
|
|
|
uma_zalloc_domain(uma_zone_t zone, void *udata, int domain, int flags)
|
|
|
|
{
|
|
|
|
|
|
|
|
/* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */
|
2018-08-26 12:51:46 +00:00
|
|
|
random_harvest_fast_uma(&zone, sizeof(zone), RANDOM_UMA);
|
2018-01-12 23:25:05 +00:00
|
|
|
|
|
|
|
/* This is the fast path allocation */
|
|
|
|
CTR5(KTR_UMA,
|
|
|
|
"uma_zalloc_domain thread %x zone %s(%p) domain %d flags %d",
|
|
|
|
curthread, zone->uz_name, zone, domain, flags);
|
|
|
|
|
|
|
|
if (flags & M_WAITOK) {
|
|
|
|
WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
|
|
|
|
"uma_zalloc_domain: zone \"%s\"", zone->uz_name);
|
|
|
|
}
|
|
|
|
KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
|
|
|
|
("uma_zalloc_domain: called with spinlock or critical section held"));
|
|
|
|
|
|
|
|
return (zone_alloc_item(zone, udata, domain, flags));
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Find a slab with some space. Prefer slabs that are partially used over those
|
|
|
|
* that are totally full. This helps to reduce fragmentation.
|
|
|
|
*
|
|
|
|
* If 'rr' is 1, search all domains starting from 'domain'. Otherwise check
|
|
|
|
* only 'domain'.
|
|
|
|
*/
|
2002-10-24 07:59:03 +00:00
|
|
|
static uma_slab_t
|
2018-10-24 16:41:47 +00:00
|
|
|
keg_first_slab(uma_keg_t keg, int domain, bool rr)
|
2002-10-24 07:59:03 +00:00
|
|
|
{
|
2018-01-12 23:25:05 +00:00
|
|
|
uma_domain_t dom;
|
2002-10-24 07:59:03 +00:00
|
|
|
uma_slab_t slab;
|
2018-01-12 23:25:05 +00:00
|
|
|
int start;
|
|
|
|
|
|
|
|
KASSERT(domain >= 0 && domain < vm_ndomains,
|
|
|
|
("keg_first_slab: domain %d out of range", domain));
|
o Move zone limit from keg level up to zone level. This means that now
two zones sharing a keg may have different limits. Now this is going
to work:
zone = uma_zcreate();
uma_zone_set_max(zone, limit);
zone2 = uma_zsecond_create(zone);
uma_zone_set_max(zone2, limit2);
Kegs no longer have uk_maxpages field, but zones have uz_items. When
set, it may be rounded up to minimum possible CPU bucket cache size.
For small limits bucket cache can also be reconfigured to be smaller.
Counter uz_items is updated whenever items transition from keg to a
bucket cache or directly to a consumer. If zone has uz_maxitems set and
it is reached, then we are going to sleep.
o Since new limits don't play well with multi-keg zones, remove them. The
idea of multi-keg zones was introduced exactly 10 years ago, and never
have had a practical usage. In discussion with Jeff we came to a wild
agreement that if we ever want to reintroduce the idea of a smart allocator
that would be able to choose between two (or more) totally different
backing stores, that choice should be made one level higher than UMA,
e.g. in malloc(9) or in mget(), or whatever and choice should be controlled
by the caller.
o Sleeping code is improved to account number of sleepers and wake them one
by one, to avoid thundering herd problem.
o Flag UMA_ZONE_NOBUCKETCACHE removed, instead uma_zone_set_maxcache()
KPI added. Having no bucket cache basically means setting maxcache to 0.
o Now with many fields added and many removed (no multi-keg zones!) make
sure that struct uma_zone is perfectly aligned.
Reviewed by: markj, jeff
Tested by: pho
Differential Revision: https://reviews.freebsd.org/D17773
2019-01-15 00:02:06 +00:00
|
|
|
KEG_LOCK_ASSERT(keg);
|
2018-01-12 23:25:05 +00:00
|
|
|
|
|
|
|
slab = NULL;
|
|
|
|
start = domain;
|
|
|
|
do {
|
|
|
|
dom = &keg->uk_domain[domain];
|
|
|
|
if (!LIST_EMPTY(&dom->ud_part_slab))
|
|
|
|
return (LIST_FIRST(&dom->ud_part_slab));
|
|
|
|
if (!LIST_EMPTY(&dom->ud_free_slab)) {
|
|
|
|
slab = LIST_FIRST(&dom->ud_free_slab);
|
|
|
|
LIST_REMOVE(slab, us_link);
|
|
|
|
LIST_INSERT_HEAD(&dom->ud_part_slab, slab, us_link);
|
|
|
|
return (slab);
|
|
|
|
}
|
|
|
|
if (rr)
|
|
|
|
domain = (domain + 1) % vm_ndomains;
|
|
|
|
} while (domain != start);
|
|
|
|
|
|
|
|
return (NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
static uma_slab_t
|
2018-10-24 16:41:47 +00:00
|
|
|
keg_fetch_free_slab(uma_keg_t keg, int domain, bool rr, int flags)
|
|
|
|
{
|
|
|
|
uint32_t reserve;
|
|
|
|
|
o Move zone limit from keg level up to zone level. This means that now
two zones sharing a keg may have different limits. Now this is going
to work:
zone = uma_zcreate();
uma_zone_set_max(zone, limit);
zone2 = uma_zsecond_create(zone);
uma_zone_set_max(zone2, limit2);
Kegs no longer have uk_maxpages field, but zones have uz_items. When
set, it may be rounded up to minimum possible CPU bucket cache size.
For small limits bucket cache can also be reconfigured to be smaller.
Counter uz_items is updated whenever items transition from keg to a
bucket cache or directly to a consumer. If zone has uz_maxitems set and
it is reached, then we are going to sleep.
o Since new limits don't play well with multi-keg zones, remove them. The
idea of multi-keg zones was introduced exactly 10 years ago, and never
have had a practical usage. In discussion with Jeff we came to a wild
agreement that if we ever want to reintroduce the idea of a smart allocator
that would be able to choose between two (or more) totally different
backing stores, that choice should be made one level higher than UMA,
e.g. in malloc(9) or in mget(), or whatever and choice should be controlled
by the caller.
o Sleeping code is improved to account number of sleepers and wake them one
by one, to avoid thundering herd problem.
o Flag UMA_ZONE_NOBUCKETCACHE removed, instead uma_zone_set_maxcache()
KPI added. Having no bucket cache basically means setting maxcache to 0.
o Now with many fields added and many removed (no multi-keg zones!) make
sure that struct uma_zone is perfectly aligned.
Reviewed by: markj, jeff
Tested by: pho
Differential Revision: https://reviews.freebsd.org/D17773
2019-01-15 00:02:06 +00:00
|
|
|
KEG_LOCK_ASSERT(keg);
|
2018-10-24 16:41:47 +00:00
|
|
|
|
|
|
|
reserve = (flags & M_USE_RESERVE) != 0 ? 0 : keg->uk_reserve;
|
|
|
|
if (keg->uk_free <= reserve)
|
|
|
|
return (NULL);
|
|
|
|
return (keg_first_slab(keg, domain, rr));
|
|
|
|
}
|
|
|
|
|
|
|
|
static uma_slab_t
|
|
|
|
keg_fetch_slab(uma_keg_t keg, uma_zone_t zone, int rdomain, const int flags)
|
2018-01-12 23:25:05 +00:00
|
|
|
{
|
2018-10-24 16:41:47 +00:00
|
|
|
struct vm_domainset_iter di;
|
2018-01-12 23:25:05 +00:00
|
|
|
uma_domain_t dom;
|
|
|
|
uma_slab_t slab;
|
2018-10-24 16:41:47 +00:00
|
|
|
int aflags, domain;
|
|
|
|
bool rr;
|
2002-10-24 07:59:03 +00:00
|
|
|
|
2018-10-24 16:41:47 +00:00
|
|
|
restart:
|
o Move zone limit from keg level up to zone level. This means that now
two zones sharing a keg may have different limits. Now this is going
to work:
zone = uma_zcreate();
uma_zone_set_max(zone, limit);
zone2 = uma_zsecond_create(zone);
uma_zone_set_max(zone2, limit2);
Kegs no longer have uk_maxpages field, but zones have uz_items. When
set, it may be rounded up to minimum possible CPU bucket cache size.
For small limits bucket cache can also be reconfigured to be smaller.
Counter uz_items is updated whenever items transition from keg to a
bucket cache or directly to a consumer. If zone has uz_maxitems set and
it is reached, then we are going to sleep.
o Since new limits don't play well with multi-keg zones, remove them. The
idea of multi-keg zones was introduced exactly 10 years ago, and never
have had a practical usage. In discussion with Jeff we came to a wild
agreement that if we ever want to reintroduce the idea of a smart allocator
that would be able to choose between two (or more) totally different
backing stores, that choice should be made one level higher than UMA,
e.g. in malloc(9) or in mget(), or whatever and choice should be controlled
by the caller.
o Sleeping code is improved to account number of sleepers and wake them one
by one, to avoid thundering herd problem.
o Flag UMA_ZONE_NOBUCKETCACHE removed, instead uma_zone_set_maxcache()
KPI added. Having no bucket cache basically means setting maxcache to 0.
o Now with many fields added and many removed (no multi-keg zones!) make
sure that struct uma_zone is perfectly aligned.
Reviewed by: markj, jeff
Tested by: pho
Differential Revision: https://reviews.freebsd.org/D17773
2019-01-15 00:02:06 +00:00
|
|
|
KEG_LOCK_ASSERT(keg);
|
2002-10-24 07:59:03 +00:00
|
|
|
|
2018-01-12 23:25:05 +00:00
|
|
|
/*
|
2018-10-24 16:41:47 +00:00
|
|
|
* Use the keg's policy if upper layers haven't already specified a
|
|
|
|
* domain (as happens with first-touch zones).
|
|
|
|
*
|
|
|
|
* To avoid races we run the iterator with the keg lock held, but that
|
|
|
|
* means that we cannot allow the vm_domainset layer to sleep. Thus,
|
|
|
|
* clear M_WAITOK and handle low memory conditions locally.
|
2018-01-12 23:25:05 +00:00
|
|
|
*/
|
|
|
|
rr = rdomain == UMA_ANYDOMAIN;
|
|
|
|
if (rr) {
|
2018-10-24 16:41:47 +00:00
|
|
|
aflags = (flags & ~M_WAITOK) | M_NOWAIT;
|
|
|
|
vm_domainset_iter_policy_ref_init(&di, &keg->uk_dr, &domain,
|
|
|
|
&aflags);
|
|
|
|
} else {
|
|
|
|
aflags = flags;
|
|
|
|
domain = rdomain;
|
|
|
|
}
|
2018-01-12 23:25:05 +00:00
|
|
|
|
2018-10-24 16:41:47 +00:00
|
|
|
for (;;) {
|
|
|
|
slab = keg_fetch_free_slab(keg, domain, rr, flags);
|
2019-11-28 07:49:25 +00:00
|
|
|
if (slab != NULL)
|
2002-10-24 07:59:03 +00:00
|
|
|
return (slab);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* M_NOVM means don't ask at all!
|
|
|
|
*/
|
|
|
|
if (flags & M_NOVM)
|
|
|
|
break;
|
|
|
|
|
o Move zone limit from keg level up to zone level. This means that now
two zones sharing a keg may have different limits. Now this is going
to work:
zone = uma_zcreate();
uma_zone_set_max(zone, limit);
zone2 = uma_zsecond_create(zone);
uma_zone_set_max(zone2, limit2);
Kegs no longer have uk_maxpages field, but zones have uz_items. When
set, it may be rounded up to minimum possible CPU bucket cache size.
For small limits bucket cache can also be reconfigured to be smaller.
Counter uz_items is updated whenever items transition from keg to a
bucket cache or directly to a consumer. If zone has uz_maxitems set and
it is reached, then we are going to sleep.
o Since new limits don't play well with multi-keg zones, remove them. The
idea of multi-keg zones was introduced exactly 10 years ago, and never
have had a practical usage. In discussion with Jeff we came to a wild
agreement that if we ever want to reintroduce the idea of a smart allocator
that would be able to choose between two (or more) totally different
backing stores, that choice should be made one level higher than UMA,
e.g. in malloc(9) or in mget(), or whatever and choice should be controlled
by the caller.
o Sleeping code is improved to account number of sleepers and wake them one
by one, to avoid thundering herd problem.
o Flag UMA_ZONE_NOBUCKETCACHE removed, instead uma_zone_set_maxcache()
KPI added. Having no bucket cache basically means setting maxcache to 0.
o Now with many fields added and many removed (no multi-keg zones!) make
sure that struct uma_zone is perfectly aligned.
Reviewed by: markj, jeff
Tested by: pho
Differential Revision: https://reviews.freebsd.org/D17773
2019-01-15 00:02:06 +00:00
|
|
|
KASSERT(zone->uz_max_items == 0 ||
|
|
|
|
zone->uz_items <= zone->uz_max_items,
|
|
|
|
("%s: zone %p overflow", __func__, zone));
|
|
|
|
|
2019-01-23 18:58:15 +00:00
|
|
|
slab = keg_alloc_slab(keg, zone, domain, flags, aflags);
|
2004-01-30 16:26:29 +00:00
|
|
|
/*
|
2002-10-24 07:59:03 +00:00
|
|
|
* If we got a slab here it's safe to mark it partially used
|
|
|
|
* and return. We assume that the caller is going to remove
|
|
|
|
* at least one item.
|
|
|
|
*/
|
|
|
|
if (slab) {
|
2018-01-12 23:25:05 +00:00
|
|
|
dom = &keg->uk_domain[slab->us_domain];
|
|
|
|
LIST_INSERT_HEAD(&dom->ud_part_slab, slab, us_link);
|
2002-10-24 07:59:03 +00:00
|
|
|
return (slab);
|
|
|
|
}
|
2018-10-24 16:41:47 +00:00
|
|
|
KEG_LOCK(keg);
|
|
|
|
if (rr && vm_domainset_iter_policy(&di, &domain) != 0) {
|
|
|
|
if ((flags & M_WAITOK) != 0) {
|
|
|
|
KEG_UNLOCK(keg);
|
|
|
|
vm_wait_doms(&keg->uk_dr.dr_policy->ds_mask);
|
|
|
|
KEG_LOCK(keg);
|
|
|
|
goto restart;
|
|
|
|
}
|
|
|
|
break;
|
2018-10-01 14:14:21 +00:00
|
|
|
}
|
2002-10-24 07:59:03 +00:00
|
|
|
}
|
2018-01-12 23:25:05 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* We might not have been able to get a slab but another cpu
|
|
|
|
* could have while we were unlocked. Check again before we
|
|
|
|
* fail.
|
|
|
|
*/
|
2018-10-24 16:41:47 +00:00
|
|
|
if ((slab = keg_fetch_free_slab(keg, domain, rr, flags)) != NULL) {
|
2018-01-12 23:25:05 +00:00
|
|
|
return (slab);
|
|
|
|
}
|
|
|
|
return (NULL);
|
2002-10-24 07:59:03 +00:00
|
|
|
}
|
2002-03-19 09:11:49 +00:00
|
|
|
|
2003-07-30 18:55:15 +00:00
|
|
|
static void *
|
2013-06-17 03:43:47 +00:00
|
|
|
slab_alloc_item(uma_keg_t keg, uma_slab_t slab)
|
2002-10-24 07:59:03 +00:00
|
|
|
{
|
2018-01-12 23:25:05 +00:00
|
|
|
uma_domain_t dom;
|
2002-10-24 07:59:03 +00:00
|
|
|
void *item;
|
2013-04-09 17:43:48 +00:00
|
|
|
uint8_t freei;
|
2004-01-30 16:26:29 +00:00
|
|
|
|
o Move zone limit from keg level up to zone level. This means that now
two zones sharing a keg may have different limits. Now this is going
to work:
zone = uma_zcreate();
uma_zone_set_max(zone, limit);
zone2 = uma_zsecond_create(zone);
uma_zone_set_max(zone2, limit2);
Kegs no longer have uk_maxpages field, but zones have uz_items. When
set, it may be rounded up to minimum possible CPU bucket cache size.
For small limits bucket cache can also be reconfigured to be smaller.
Counter uz_items is updated whenever items transition from keg to a
bucket cache or directly to a consumer. If zone has uz_maxitems set and
it is reached, then we are going to sleep.
o Since new limits don't play well with multi-keg zones, remove them. The
idea of multi-keg zones was introduced exactly 10 years ago, and never
have had a practical usage. In discussion with Jeff we came to a wild
agreement that if we ever want to reintroduce the idea of a smart allocator
that would be able to choose between two (or more) totally different
backing stores, that choice should be made one level higher than UMA,
e.g. in malloc(9) or in mget(), or whatever and choice should be controlled
by the caller.
o Sleeping code is improved to account number of sleepers and wake them one
by one, to avoid thundering herd problem.
o Flag UMA_ZONE_NOBUCKETCACHE removed, instead uma_zone_set_maxcache()
KPI added. Having no bucket cache basically means setting maxcache to 0.
o Now with many fields added and many removed (no multi-keg zones!) make
sure that struct uma_zone is perfectly aligned.
Reviewed by: markj, jeff
Tested by: pho
Differential Revision: https://reviews.freebsd.org/D17773
2019-01-15 00:02:06 +00:00
|
|
|
KEG_LOCK_ASSERT(keg);
|
Bring in mbuma to replace mballoc.
mbuma is an Mbuf & Cluster allocator built on top of a number of
extensions to the UMA framework, all included herein.
Extensions to UMA worth noting:
- Better layering between slab <-> zone caches; introduce
Keg structure which splits off slab cache away from the
zone structure and allows multiple zones to be stacked
on top of a single Keg (single type of slab cache);
perhaps we should look into defining a subset API on
top of the Keg for special use by malloc(9),
for example.
- UMA_ZONE_REFCNT zones can now be added, and reference
counters automagically allocated for them within the end
of the associated slab structures. uma_find_refcnt()
does a kextract to fetch the slab struct reference from
the underlying page, and lookup the corresponding refcnt.
mbuma things worth noting:
- integrates mbuf & cluster allocations with extended UMA
and provides caches for commonly-allocated items; defines
several zones (two primary, one secondary) and two kegs.
- change up certain code paths that always used to do:
m_get() + m_clget() to instead just use m_getcl() and
try to take advantage of the newly defined secondary
Packet zone.
- netstat(1) and systat(1) quickly hacked up to do basic
stat reporting but additional stats work needs to be
done once some other details within UMA have been taken
care of and it becomes clearer to how stats will work
within the modified framework.
From the user perspective, one implication is that the
NMBCLUSTERS compile-time option is no longer used. The
maximum number of clusters is still capped off according
to maxusers, but it can be made unlimited by setting
the kern.ipc.nmbclusters boot-time tunable to zero.
Work should be done to write an appropriate sysctl
handler allowing dynamic tuning of kern.ipc.nmbclusters
at runtime.
Additional things worth noting/known issues (READ):
- One report of 'ips' (ServeRAID) driver acting really
slow in conjunction with mbuma. Need more data.
Latest report is that ips is equally sucking with
and without mbuma.
- Giant leak in NFS code sometimes occurs, can't
reproduce but currently analyzing; brueffer is
able to reproduce but THIS IS NOT an mbuma-specific
problem and currently occurs even WITHOUT mbuma.
- Issues in network locking: there is at least one
code path in the rip code where one or more locks
are acquired and we end up in m_prepend() with
M_WAITOK, which causes WITNESS to whine from within
UMA. Current temporary solution: force all UMA
allocations to be M_NOWAIT from within UMA for now
to avoid deadlocks unless WITNESS is defined and we
can determine with certainty that we're not holding
any locks when we're M_WAITOK.
- I've seen at least one weird socketbuffer empty-but-
mbuf-still-attached panic. I don't believe this
to be related to mbuma but please keep your eyes
open, turn on debugging, and capture crash dumps.
This change removes more code than it adds.
A paper is available detailing the change and considering
various performance issues, it was presented at BSDCan2004:
http://www.unixdaemons.com/~bmilekic/netbuf_bmilekic.pdf
Please read the paper for Future Work and implementation
details, as well as credits.
Testing and Debugging:
rwatson,
brueffer,
Ketrien I. Saihr-Kesenchedra,
...
Reviewed by: Lots of people (for different parts)
2004-05-31 21:46:06 +00:00
|
|
|
|
2019-12-02 22:44:34 +00:00
|
|
|
freei = BIT_FFS(keg->uk_ipers, &slab->us_free) - 1;
|
|
|
|
BIT_CLR(keg->uk_ipers, freei, &slab->us_free);
|
2019-12-08 01:15:06 +00:00
|
|
|
item = slab_item(slab, keg, freei);
|
2002-10-24 07:59:03 +00:00
|
|
|
slab->us_freecount--;
|
Bring in mbuma to replace mballoc.
mbuma is an Mbuf & Cluster allocator built on top of a number of
extensions to the UMA framework, all included herein.
Extensions to UMA worth noting:
- Better layering between slab <-> zone caches; introduce
Keg structure which splits off slab cache away from the
zone structure and allows multiple zones to be stacked
on top of a single Keg (single type of slab cache);
perhaps we should look into defining a subset API on
top of the Keg for special use by malloc(9),
for example.
- UMA_ZONE_REFCNT zones can now be added, and reference
counters automagically allocated for them within the end
of the associated slab structures. uma_find_refcnt()
does a kextract to fetch the slab struct reference from
the underlying page, and lookup the corresponding refcnt.
mbuma things worth noting:
- integrates mbuf & cluster allocations with extended UMA
and provides caches for commonly-allocated items; defines
several zones (two primary, one secondary) and two kegs.
- change up certain code paths that always used to do:
m_get() + m_clget() to instead just use m_getcl() and
try to take advantage of the newly defined secondary
Packet zone.
- netstat(1) and systat(1) quickly hacked up to do basic
stat reporting but additional stats work needs to be
done once some other details within UMA have been taken
care of and it becomes clearer to how stats will work
within the modified framework.
From the user perspective, one implication is that the
NMBCLUSTERS compile-time option is no longer used. The
maximum number of clusters is still capped off according
to maxusers, but it can be made unlimited by setting
the kern.ipc.nmbclusters boot-time tunable to zero.
Work should be done to write an appropriate sysctl
handler allowing dynamic tuning of kern.ipc.nmbclusters
at runtime.
Additional things worth noting/known issues (READ):
- One report of 'ips' (ServeRAID) driver acting really
slow in conjunction with mbuma. Need more data.
Latest report is that ips is equally sucking with
and without mbuma.
- Giant leak in NFS code sometimes occurs, can't
reproduce but currently analyzing; brueffer is
able to reproduce but THIS IS NOT an mbuma-specific
problem and currently occurs even WITHOUT mbuma.
- Issues in network locking: there is at least one
code path in the rip code where one or more locks
are acquired and we end up in m_prepend() with
M_WAITOK, which causes WITNESS to whine from within
UMA. Current temporary solution: force all UMA
allocations to be M_NOWAIT from within UMA for now
to avoid deadlocks unless WITNESS is defined and we
can determine with certainty that we're not holding
any locks when we're M_WAITOK.
- I've seen at least one weird socketbuffer empty-but-
mbuf-still-attached panic. I don't believe this
to be related to mbuma but please keep your eyes
open, turn on debugging, and capture crash dumps.
This change removes more code than it adds.
A paper is available detailing the change and considering
various performance issues, it was presented at BSDCan2004:
http://www.unixdaemons.com/~bmilekic/netbuf_bmilekic.pdf
Please read the paper for Future Work and implementation
details, as well as credits.
Testing and Debugging:
rwatson,
brueffer,
Ketrien I. Saihr-Kesenchedra,
...
Reviewed by: Lots of people (for different parts)
2004-05-31 21:46:06 +00:00
|
|
|
keg->uk_free--;
|
2013-06-13 21:05:38 +00:00
|
|
|
|
2002-10-24 07:59:03 +00:00
|
|
|
/* Move this slab to the full list */
|
|
|
|
if (slab->us_freecount == 0) {
|
|
|
|
LIST_REMOVE(slab, us_link);
|
2018-01-12 23:25:05 +00:00
|
|
|
dom = &keg->uk_domain[slab->us_domain];
|
|
|
|
LIST_INSERT_HEAD(&dom->ud_full_slab, slab, us_link);
|
2002-10-24 07:59:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return (item);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2019-12-04 18:40:05 +00:00
|
|
|
zone_import(void *arg, void **bucket, int max, int domain, int flags)
|
2002-10-24 07:59:03 +00:00
|
|
|
{
|
2019-12-04 18:40:05 +00:00
|
|
|
uma_zone_t zone;
|
2002-10-24 07:59:03 +00:00
|
|
|
uma_slab_t slab;
|
2009-01-25 09:11:24 +00:00
|
|
|
uma_keg_t keg;
|
2018-07-07 13:37:44 +00:00
|
|
|
#ifdef NUMA
|
2018-01-12 23:25:05 +00:00
|
|
|
int stripe;
|
2018-07-07 13:37:44 +00:00
|
|
|
#endif
|
2013-06-17 03:43:47 +00:00
|
|
|
int i;
|
2002-06-17 22:02:41 +00:00
|
|
|
|
2019-12-04 18:40:05 +00:00
|
|
|
zone = arg;
|
2013-06-17 03:43:47 +00:00
|
|
|
slab = NULL;
|
2019-11-28 07:49:25 +00:00
|
|
|
keg = zone->uz_keg;
|
|
|
|
KEG_LOCK(keg);
|
2013-06-20 19:08:12 +00:00
|
|
|
/* Try to keep the buckets totally full */
|
2013-06-17 03:43:47 +00:00
|
|
|
for (i = 0; i < max; ) {
|
2019-11-28 07:49:25 +00:00
|
|
|
if ((slab = keg_fetch_slab(keg, zone, domain, flags)) == NULL)
|
2013-06-17 03:43:47 +00:00
|
|
|
break;
|
2018-07-07 13:37:44 +00:00
|
|
|
#ifdef NUMA
|
2018-01-12 23:25:05 +00:00
|
|
|
stripe = howmany(max, vm_ndomains);
|
2018-07-07 13:37:44 +00:00
|
|
|
#endif
|
2013-06-26 00:57:38 +00:00
|
|
|
while (slab->us_freecount && i < max) {
|
2013-06-17 03:43:47 +00:00
|
|
|
bucket[i++] = slab_alloc_item(keg, slab);
|
2013-06-26 00:57:38 +00:00
|
|
|
if (keg->uk_free <= keg->uk_reserve)
|
|
|
|
break;
|
2018-01-14 03:36:03 +00:00
|
|
|
#ifdef NUMA
|
2018-01-12 23:25:05 +00:00
|
|
|
/*
|
|
|
|
* If the zone is striped we pick a new slab for every
|
|
|
|
* N allocations. Eliminating this conditional will
|
|
|
|
* instead pick a new domain for each bucket rather
|
|
|
|
* than stripe within each bucket. The current option
|
|
|
|
* produces more fragmentation and requires more cpu
|
|
|
|
* time but yields better distribution.
|
|
|
|
*/
|
|
|
|
if ((zone->uz_flags & UMA_ZONE_NUMA) == 0 &&
|
|
|
|
vm_ndomains > 1 && --stripe == 0)
|
|
|
|
break;
|
|
|
|
#endif
|
2013-06-26 00:57:38 +00:00
|
|
|
}
|
2018-01-12 23:25:05 +00:00
|
|
|
/* Don't block if we allocated any successfully. */
|
2013-06-17 03:43:47 +00:00
|
|
|
flags &= ~M_WAITOK;
|
|
|
|
flags |= M_NOWAIT;
|
2002-10-24 07:59:03 +00:00
|
|
|
}
|
2019-11-28 07:49:25 +00:00
|
|
|
KEG_UNLOCK(keg);
|
2002-03-19 09:11:49 +00:00
|
|
|
|
2013-06-17 03:43:47 +00:00
|
|
|
return i;
|
|
|
|
}
|
|
|
|
|
2013-06-18 04:50:20 +00:00
|
|
|
static uma_bucket_t
|
2019-11-26 22:17:02 +00:00
|
|
|
zone_alloc_bucket(uma_zone_t zone, void *udata, int domain, int flags)
|
2013-06-17 03:43:47 +00:00
|
|
|
{
|
|
|
|
uma_bucket_t bucket;
|
2019-11-26 22:17:02 +00:00
|
|
|
int maxbucket, cnt;
|
2002-10-24 07:59:03 +00:00
|
|
|
|
2018-10-01 14:14:21 +00:00
|
|
|
CTR1(KTR_UMA, "zone_alloc:_bucket domain %d)", domain);
|
|
|
|
|
2019-08-06 21:50:34 +00:00
|
|
|
/* Avoid allocs targeting empty domains. */
|
|
|
|
if (domain != UMA_ANYDOMAIN && VM_DOMAIN_EMPTY(domain))
|
|
|
|
domain = UMA_ANYDOMAIN;
|
|
|
|
|
2019-11-26 22:17:02 +00:00
|
|
|
if (zone->uz_max_items > 0) {
|
|
|
|
if (zone->uz_items >= zone->uz_max_items)
|
|
|
|
return (false);
|
2019-11-28 00:19:09 +00:00
|
|
|
maxbucket = MIN(zone->uz_bucket_size,
|
2019-11-26 22:17:02 +00:00
|
|
|
zone->uz_max_items - zone->uz_items);
|
|
|
|
zone->uz_items += maxbucket;
|
|
|
|
} else
|
2019-11-28 00:19:09 +00:00
|
|
|
maxbucket = zone->uz_bucket_size;
|
2019-11-26 22:17:02 +00:00
|
|
|
ZONE_UNLOCK(zone);
|
|
|
|
|
2013-06-26 00:57:38 +00:00
|
|
|
/* Don't wait for buckets, preserve caller's NOVM setting. */
|
|
|
|
bucket = bucket_alloc(zone, udata, M_NOWAIT | (flags & M_NOVM));
|
2019-11-26 22:17:02 +00:00
|
|
|
if (bucket == NULL) {
|
|
|
|
cnt = 0;
|
|
|
|
goto out;
|
|
|
|
}
|
2013-06-17 03:43:47 +00:00
|
|
|
|
|
|
|
bucket->ub_cnt = zone->uz_import(zone->uz_arg, bucket->ub_bucket,
|
2019-11-26 22:17:02 +00:00
|
|
|
MIN(maxbucket, bucket->ub_entries), domain, flags);
|
2002-04-08 02:42:55 +00:00
|
|
|
|
Bring in mbuma to replace mballoc.
mbuma is an Mbuf & Cluster allocator built on top of a number of
extensions to the UMA framework, all included herein.
Extensions to UMA worth noting:
- Better layering between slab <-> zone caches; introduce
Keg structure which splits off slab cache away from the
zone structure and allows multiple zones to be stacked
on top of a single Keg (single type of slab cache);
perhaps we should look into defining a subset API on
top of the Keg for special use by malloc(9),
for example.
- UMA_ZONE_REFCNT zones can now be added, and reference
counters automagically allocated for them within the end
of the associated slab structures. uma_find_refcnt()
does a kextract to fetch the slab struct reference from
the underlying page, and lookup the corresponding refcnt.
mbuma things worth noting:
- integrates mbuf & cluster allocations with extended UMA
and provides caches for commonly-allocated items; defines
several zones (two primary, one secondary) and two kegs.
- change up certain code paths that always used to do:
m_get() + m_clget() to instead just use m_getcl() and
try to take advantage of the newly defined secondary
Packet zone.
- netstat(1) and systat(1) quickly hacked up to do basic
stat reporting but additional stats work needs to be
done once some other details within UMA have been taken
care of and it becomes clearer to how stats will work
within the modified framework.
From the user perspective, one implication is that the
NMBCLUSTERS compile-time option is no longer used. The
maximum number of clusters is still capped off according
to maxusers, but it can be made unlimited by setting
the kern.ipc.nmbclusters boot-time tunable to zero.
Work should be done to write an appropriate sysctl
handler allowing dynamic tuning of kern.ipc.nmbclusters
at runtime.
Additional things worth noting/known issues (READ):
- One report of 'ips' (ServeRAID) driver acting really
slow in conjunction with mbuma. Need more data.
Latest report is that ips is equally sucking with
and without mbuma.
- Giant leak in NFS code sometimes occurs, can't
reproduce but currently analyzing; brueffer is
able to reproduce but THIS IS NOT an mbuma-specific
problem and currently occurs even WITHOUT mbuma.
- Issues in network locking: there is at least one
code path in the rip code where one or more locks
are acquired and we end up in m_prepend() with
M_WAITOK, which causes WITNESS to whine from within
UMA. Current temporary solution: force all UMA
allocations to be M_NOWAIT from within UMA for now
to avoid deadlocks unless WITNESS is defined and we
can determine with certainty that we're not holding
any locks when we're M_WAITOK.
- I've seen at least one weird socketbuffer empty-but-
mbuf-still-attached panic. I don't believe this
to be related to mbuma but please keep your eyes
open, turn on debugging, and capture crash dumps.
This change removes more code than it adds.
A paper is available detailing the change and considering
various performance issues, it was presented at BSDCan2004:
http://www.unixdaemons.com/~bmilekic/netbuf_bmilekic.pdf
Please read the paper for Future Work and implementation
details, as well as credits.
Testing and Debugging:
rwatson,
brueffer,
Ketrien I. Saihr-Kesenchedra,
...
Reviewed by: Lots of people (for different parts)
2004-05-31 21:46:06 +00:00
|
|
|
/*
|
2013-06-17 03:43:47 +00:00
|
|
|
* Initialize the memory if necessary.
|
Bring in mbuma to replace mballoc.
mbuma is an Mbuf & Cluster allocator built on top of a number of
extensions to the UMA framework, all included herein.
Extensions to UMA worth noting:
- Better layering between slab <-> zone caches; introduce
Keg structure which splits off slab cache away from the
zone structure and allows multiple zones to be stacked
on top of a single Keg (single type of slab cache);
perhaps we should look into defining a subset API on
top of the Keg for special use by malloc(9),
for example.
- UMA_ZONE_REFCNT zones can now be added, and reference
counters automagically allocated for them within the end
of the associated slab structures. uma_find_refcnt()
does a kextract to fetch the slab struct reference from
the underlying page, and lookup the corresponding refcnt.
mbuma things worth noting:
- integrates mbuf & cluster allocations with extended UMA
and provides caches for commonly-allocated items; defines
several zones (two primary, one secondary) and two kegs.
- change up certain code paths that always used to do:
m_get() + m_clget() to instead just use m_getcl() and
try to take advantage of the newly defined secondary
Packet zone.
- netstat(1) and systat(1) quickly hacked up to do basic
stat reporting but additional stats work needs to be
done once some other details within UMA have been taken
care of and it becomes clearer to how stats will work
within the modified framework.
From the user perspective, one implication is that the
NMBCLUSTERS compile-time option is no longer used. The
maximum number of clusters is still capped off according
to maxusers, but it can be made unlimited by setting
the kern.ipc.nmbclusters boot-time tunable to zero.
Work should be done to write an appropriate sysctl
handler allowing dynamic tuning of kern.ipc.nmbclusters
at runtime.
Additional things worth noting/known issues (READ):
- One report of 'ips' (ServeRAID) driver acting really
slow in conjunction with mbuma. Need more data.
Latest report is that ips is equally sucking with
and without mbuma.
- Giant leak in NFS code sometimes occurs, can't
reproduce but currently analyzing; brueffer is
able to reproduce but THIS IS NOT an mbuma-specific
problem and currently occurs even WITHOUT mbuma.
- Issues in network locking: there is at least one
code path in the rip code where one or more locks
are acquired and we end up in m_prepend() with
M_WAITOK, which causes WITNESS to whine from within
UMA. Current temporary solution: force all UMA
allocations to be M_NOWAIT from within UMA for now
to avoid deadlocks unless WITNESS is defined and we
can determine with certainty that we're not holding
any locks when we're M_WAITOK.
- I've seen at least one weird socketbuffer empty-but-
mbuf-still-attached panic. I don't believe this
to be related to mbuma but please keep your eyes
open, turn on debugging, and capture crash dumps.
This change removes more code than it adds.
A paper is available detailing the change and considering
various performance issues, it was presented at BSDCan2004:
http://www.unixdaemons.com/~bmilekic/netbuf_bmilekic.pdf
Please read the paper for Future Work and implementation
details, as well as credits.
Testing and Debugging:
rwatson,
brueffer,
Ketrien I. Saihr-Kesenchedra,
...
Reviewed by: Lots of people (for different parts)
2004-05-31 21:46:06 +00:00
|
|
|
*/
|
2013-06-17 03:43:47 +00:00
|
|
|
if (bucket->ub_cnt != 0 && zone->uz_init != NULL) {
|
Bring in mbuma to replace mballoc.
mbuma is an Mbuf & Cluster allocator built on top of a number of
extensions to the UMA framework, all included herein.
Extensions to UMA worth noting:
- Better layering between slab <-> zone caches; introduce
Keg structure which splits off slab cache away from the
zone structure and allows multiple zones to be stacked
on top of a single Keg (single type of slab cache);
perhaps we should look into defining a subset API on
top of the Keg for special use by malloc(9),
for example.
- UMA_ZONE_REFCNT zones can now be added, and reference
counters automagically allocated for them within the end
of the associated slab structures. uma_find_refcnt()
does a kextract to fetch the slab struct reference from
the underlying page, and lookup the corresponding refcnt.
mbuma things worth noting:
- integrates mbuf & cluster allocations with extended UMA
and provides caches for commonly-allocated items; defines
several zones (two primary, one secondary) and two kegs.
- change up certain code paths that always used to do:
m_get() + m_clget() to instead just use m_getcl() and
try to take advantage of the newly defined secondary
Packet zone.
- netstat(1) and systat(1) quickly hacked up to do basic
stat reporting but additional stats work needs to be
done once some other details within UMA have been taken
care of and it becomes clearer to how stats will work
within the modified framework.
From the user perspective, one implication is that the
NMBCLUSTERS compile-time option is no longer used. The
maximum number of clusters is still capped off according
to maxusers, but it can be made unlimited by setting
the kern.ipc.nmbclusters boot-time tunable to zero.
Work should be done to write an appropriate sysctl
handler allowing dynamic tuning of kern.ipc.nmbclusters
at runtime.
Additional things worth noting/known issues (READ):
- One report of 'ips' (ServeRAID) driver acting really
slow in conjunction with mbuma. Need more data.
Latest report is that ips is equally sucking with
and without mbuma.
- Giant leak in NFS code sometimes occurs, can't
reproduce but currently analyzing; brueffer is
able to reproduce but THIS IS NOT an mbuma-specific
problem and currently occurs even WITHOUT mbuma.
- Issues in network locking: there is at least one
code path in the rip code where one or more locks
are acquired and we end up in m_prepend() with
M_WAITOK, which causes WITNESS to whine from within
UMA. Current temporary solution: force all UMA
allocations to be M_NOWAIT from within UMA for now
to avoid deadlocks unless WITNESS is defined and we
can determine with certainty that we're not holding
any locks when we're M_WAITOK.
- I've seen at least one weird socketbuffer empty-but-
mbuf-still-attached panic. I don't believe this
to be related to mbuma but please keep your eyes
open, turn on debugging, and capture crash dumps.
This change removes more code than it adds.
A paper is available detailing the change and considering
various performance issues, it was presented at BSDCan2004:
http://www.unixdaemons.com/~bmilekic/netbuf_bmilekic.pdf
Please read the paper for Future Work and implementation
details, as well as credits.
Testing and Debugging:
rwatson,
brueffer,
Ketrien I. Saihr-Kesenchedra,
...
Reviewed by: Lots of people (for different parts)
2004-05-31 21:46:06 +00:00
|
|
|
int i;
|
|
|
|
|
2013-06-17 03:43:47 +00:00
|
|
|
for (i = 0; i < bucket->ub_cnt; i++)
|
2009-01-25 09:11:24 +00:00
|
|
|
if (zone->uz_init(bucket->ub_bucket[i], zone->uz_size,
|
2013-06-17 03:43:47 +00:00
|
|
|
flags) != 0)
|
2004-08-02 00:18:36 +00:00
|
|
|
break;
|
|
|
|
/*
|
|
|
|
* If we couldn't initialize the whole bucket, put the
|
|
|
|
* rest back onto the freelist.
|
|
|
|
*/
|
|
|
|
if (i != bucket->ub_cnt) {
|
2013-06-20 19:08:12 +00:00
|
|
|
zone->uz_release(zone->uz_arg, &bucket->ub_bucket[i],
|
2013-06-17 03:43:47 +00:00
|
|
|
bucket->ub_cnt - i);
|
2004-10-27 21:19:35 +00:00
|
|
|
#ifdef INVARIANTS
|
2013-06-17 03:43:47 +00:00
|
|
|
bzero(&bucket->ub_bucket[i],
|
|
|
|
sizeof(void *) * (bucket->ub_cnt - i));
|
2004-10-27 21:19:35 +00:00
|
|
|
#endif
|
2004-08-02 00:18:36 +00:00
|
|
|
bucket->ub_cnt = i;
|
|
|
|
}
|
Bring in mbuma to replace mballoc.
mbuma is an Mbuf & Cluster allocator built on top of a number of
extensions to the UMA framework, all included herein.
Extensions to UMA worth noting:
- Better layering between slab <-> zone caches; introduce
Keg structure which splits off slab cache away from the
zone structure and allows multiple zones to be stacked
on top of a single Keg (single type of slab cache);
perhaps we should look into defining a subset API on
top of the Keg for special use by malloc(9),
for example.
- UMA_ZONE_REFCNT zones can now be added, and reference
counters automagically allocated for them within the end
of the associated slab structures. uma_find_refcnt()
does a kextract to fetch the slab struct reference from
the underlying page, and lookup the corresponding refcnt.
mbuma things worth noting:
- integrates mbuf & cluster allocations with extended UMA
and provides caches for commonly-allocated items; defines
several zones (two primary, one secondary) and two kegs.
- change up certain code paths that always used to do:
m_get() + m_clget() to instead just use m_getcl() and
try to take advantage of the newly defined secondary
Packet zone.
- netstat(1) and systat(1) quickly hacked up to do basic
stat reporting but additional stats work needs to be
done once some other details within UMA have been taken
care of and it becomes clearer to how stats will work
within the modified framework.
From the user perspective, one implication is that the
NMBCLUSTERS compile-time option is no longer used. The
maximum number of clusters is still capped off according
to maxusers, but it can be made unlimited by setting
the kern.ipc.nmbclusters boot-time tunable to zero.
Work should be done to write an appropriate sysctl
handler allowing dynamic tuning of kern.ipc.nmbclusters
at runtime.
Additional things worth noting/known issues (READ):
- One report of 'ips' (ServeRAID) driver acting really
slow in conjunction with mbuma. Need more data.
Latest report is that ips is equally sucking with
and without mbuma.
- Giant leak in NFS code sometimes occurs, can't
reproduce but currently analyzing; brueffer is
able to reproduce but THIS IS NOT an mbuma-specific
problem and currently occurs even WITHOUT mbuma.
- Issues in network locking: there is at least one
code path in the rip code where one or more locks
are acquired and we end up in m_prepend() with
M_WAITOK, which causes WITNESS to whine from within
UMA. Current temporary solution: force all UMA
allocations to be M_NOWAIT from within UMA for now
to avoid deadlocks unless WITNESS is defined and we
can determine with certainty that we're not holding
any locks when we're M_WAITOK.
- I've seen at least one weird socketbuffer empty-but-
mbuf-still-attached panic. I don't believe this
to be related to mbuma but please keep your eyes
open, turn on debugging, and capture crash dumps.
This change removes more code than it adds.
A paper is available detailing the change and considering
various performance issues, it was presented at BSDCan2004:
http://www.unixdaemons.com/~bmilekic/netbuf_bmilekic.pdf
Please read the paper for Future Work and implementation
details, as well as credits.
Testing and Debugging:
rwatson,
brueffer,
Ketrien I. Saihr-Kesenchedra,
...
Reviewed by: Lots of people (for different parts)
2004-05-31 21:46:06 +00:00
|
|
|
}
|
2002-03-19 09:11:49 +00:00
|
|
|
|
2019-11-26 22:17:02 +00:00
|
|
|
cnt = bucket->ub_cnt;
|
2013-11-27 20:16:18 +00:00
|
|
|
if (bucket->ub_cnt == 0) {
|
|
|
|
bucket_free(zone, bucket, udata);
|
2019-01-15 18:24:34 +00:00
|
|
|
counter_u64_add(zone->uz_fails, 1);
|
2019-11-26 22:17:02 +00:00
|
|
|
bucket = NULL;
|
|
|
|
}
|
|
|
|
out:
|
|
|
|
ZONE_LOCK(zone);
|
|
|
|
if (zone->uz_max_items > 0 && cnt < maxbucket) {
|
|
|
|
MPASS(zone->uz_items >= maxbucket - cnt);
|
|
|
|
zone->uz_items -= maxbucket - cnt;
|
|
|
|
if (zone->uz_sleepers > 0 &&
|
|
|
|
(cnt == 0 ? zone->uz_items + 1 : zone->uz_items) <
|
|
|
|
zone->uz_max_items)
|
|
|
|
wakeup_one(zone);
|
2002-10-24 07:59:03 +00:00
|
|
|
}
|
|
|
|
|
2013-06-18 04:50:20 +00:00
|
|
|
return (bucket);
|
2002-10-24 07:59:03 +00:00
|
|
|
}
|
2013-06-18 04:50:20 +00:00
|
|
|
|
2002-03-19 09:11:49 +00:00
|
|
|
/*
|
2013-06-17 03:43:47 +00:00
|
|
|
* Allocates a single item from a zone.
|
2002-03-19 09:11:49 +00:00
|
|
|
*
|
|
|
|
* Arguments
|
|
|
|
* zone The zone to alloc for.
|
|
|
|
* udata The data to be passed to the constructor.
|
2018-01-12 23:25:05 +00:00
|
|
|
* domain The domain to allocate from or UMA_ANYDOMAIN.
|
2003-02-19 05:47:46 +00:00
|
|
|
* flags M_WAITOK, M_NOWAIT, M_ZERO.
|
2002-03-19 09:11:49 +00:00
|
|
|
*
|
|
|
|
* Returns
|
|
|
|
* NULL if there is no memory and M_NOWAIT is set
|
2002-10-24 07:59:03 +00:00
|
|
|
* An item if successful
|
2002-03-19 09:11:49 +00:00
|
|
|
*/
|
|
|
|
|
|
|
|
static void *
|
2018-01-12 23:25:05 +00:00
|
|
|
zone_alloc_item(uma_zone_t zone, void *udata, int domain, int flags)
|
o Move zone limit from keg level up to zone level. This means that now
two zones sharing a keg may have different limits. Now this is going
to work:
zone = uma_zcreate();
uma_zone_set_max(zone, limit);
zone2 = uma_zsecond_create(zone);
uma_zone_set_max(zone2, limit2);
Kegs no longer have uk_maxpages field, but zones have uz_items. When
set, it may be rounded up to minimum possible CPU bucket cache size.
For small limits bucket cache can also be reconfigured to be smaller.
Counter uz_items is updated whenever items transition from keg to a
bucket cache or directly to a consumer. If zone has uz_maxitems set and
it is reached, then we are going to sleep.
o Since new limits don't play well with multi-keg zones, remove them. The
idea of multi-keg zones was introduced exactly 10 years ago, and never
have had a practical usage. In discussion with Jeff we came to a wild
agreement that if we ever want to reintroduce the idea of a smart allocator
that would be able to choose between two (or more) totally different
backing stores, that choice should be made one level higher than UMA,
e.g. in malloc(9) or in mget(), or whatever and choice should be controlled
by the caller.
o Sleeping code is improved to account number of sleepers and wake them one
by one, to avoid thundering herd problem.
o Flag UMA_ZONE_NOBUCKETCACHE removed, instead uma_zone_set_maxcache()
KPI added. Having no bucket cache basically means setting maxcache to 0.
o Now with many fields added and many removed (no multi-keg zones!) make
sure that struct uma_zone is perfectly aligned.
Reviewed by: markj, jeff
Tested by: pho
Differential Revision: https://reviews.freebsd.org/D17773
2019-01-15 00:02:06 +00:00
|
|
|
{
|
|
|
|
|
|
|
|
ZONE_LOCK(zone);
|
|
|
|
return (zone_alloc_item_locked(zone, udata, domain, flags));
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Returns with zone unlocked.
|
|
|
|
*/
|
|
|
|
static void *
|
|
|
|
zone_alloc_item_locked(uma_zone_t zone, void *udata, int domain, int flags)
|
2002-03-19 09:11:49 +00:00
|
|
|
{
|
|
|
|
void *item;
|
|
|
|
|
o Move zone limit from keg level up to zone level. This means that now
two zones sharing a keg may have different limits. Now this is going
to work:
zone = uma_zcreate();
uma_zone_set_max(zone, limit);
zone2 = uma_zsecond_create(zone);
uma_zone_set_max(zone2, limit2);
Kegs no longer have uk_maxpages field, but zones have uz_items. When
set, it may be rounded up to minimum possible CPU bucket cache size.
For small limits bucket cache can also be reconfigured to be smaller.
Counter uz_items is updated whenever items transition from keg to a
bucket cache or directly to a consumer. If zone has uz_maxitems set and
it is reached, then we are going to sleep.
o Since new limits don't play well with multi-keg zones, remove them. The
idea of multi-keg zones was introduced exactly 10 years ago, and never
have had a practical usage. In discussion with Jeff we came to a wild
agreement that if we ever want to reintroduce the idea of a smart allocator
that would be able to choose between two (or more) totally different
backing stores, that choice should be made one level higher than UMA,
e.g. in malloc(9) or in mget(), or whatever and choice should be controlled
by the caller.
o Sleeping code is improved to account number of sleepers and wake them one
by one, to avoid thundering herd problem.
o Flag UMA_ZONE_NOBUCKETCACHE removed, instead uma_zone_set_maxcache()
KPI added. Having no bucket cache basically means setting maxcache to 0.
o Now with many fields added and many removed (no multi-keg zones!) make
sure that struct uma_zone is perfectly aligned.
Reviewed by: markj, jeff
Tested by: pho
Differential Revision: https://reviews.freebsd.org/D17773
2019-01-15 00:02:06 +00:00
|
|
|
ZONE_LOCK_ASSERT(zone);
|
|
|
|
|
2019-01-15 18:32:26 +00:00
|
|
|
if (zone->uz_max_items > 0) {
|
|
|
|
if (zone->uz_items >= zone->uz_max_items) {
|
|
|
|
zone_log_warning(zone);
|
|
|
|
zone_maxaction(zone);
|
|
|
|
if (flags & M_NOWAIT) {
|
|
|
|
ZONE_UNLOCK(zone);
|
|
|
|
return (NULL);
|
|
|
|
}
|
|
|
|
zone->uz_sleeps++;
|
|
|
|
zone->uz_sleepers++;
|
|
|
|
while (zone->uz_items >= zone->uz_max_items)
|
2019-01-15 18:50:11 +00:00
|
|
|
mtx_sleep(zone, zone->uz_lockptr, PVM,
|
|
|
|
"zonelimit", 0);
|
2019-01-15 18:32:26 +00:00
|
|
|
zone->uz_sleepers--;
|
|
|
|
if (zone->uz_sleepers > 0 &&
|
|
|
|
zone->uz_items + 1 < zone->uz_max_items)
|
|
|
|
wakeup_one(zone);
|
o Move zone limit from keg level up to zone level. This means that now
two zones sharing a keg may have different limits. Now this is going
to work:
zone = uma_zcreate();
uma_zone_set_max(zone, limit);
zone2 = uma_zsecond_create(zone);
uma_zone_set_max(zone2, limit2);
Kegs no longer have uk_maxpages field, but zones have uz_items. When
set, it may be rounded up to minimum possible CPU bucket cache size.
For small limits bucket cache can also be reconfigured to be smaller.
Counter uz_items is updated whenever items transition from keg to a
bucket cache or directly to a consumer. If zone has uz_maxitems set and
it is reached, then we are going to sleep.
o Since new limits don't play well with multi-keg zones, remove them. The
idea of multi-keg zones was introduced exactly 10 years ago, and never
have had a practical usage. In discussion with Jeff we came to a wild
agreement that if we ever want to reintroduce the idea of a smart allocator
that would be able to choose between two (or more) totally different
backing stores, that choice should be made one level higher than UMA,
e.g. in malloc(9) or in mget(), or whatever and choice should be controlled
by the caller.
o Sleeping code is improved to account number of sleepers and wake them one
by one, to avoid thundering herd problem.
o Flag UMA_ZONE_NOBUCKETCACHE removed, instead uma_zone_set_maxcache()
KPI added. Having no bucket cache basically means setting maxcache to 0.
o Now with many fields added and many removed (no multi-keg zones!) make
sure that struct uma_zone is perfectly aligned.
Reviewed by: markj, jeff
Tested by: pho
Differential Revision: https://reviews.freebsd.org/D17773
2019-01-15 00:02:06 +00:00
|
|
|
}
|
2019-01-15 18:32:26 +00:00
|
|
|
zone->uz_items++;
|
o Move zone limit from keg level up to zone level. This means that now
two zones sharing a keg may have different limits. Now this is going
to work:
zone = uma_zcreate();
uma_zone_set_max(zone, limit);
zone2 = uma_zsecond_create(zone);
uma_zone_set_max(zone2, limit2);
Kegs no longer have uk_maxpages field, but zones have uz_items. When
set, it may be rounded up to minimum possible CPU bucket cache size.
For small limits bucket cache can also be reconfigured to be smaller.
Counter uz_items is updated whenever items transition from keg to a
bucket cache or directly to a consumer. If zone has uz_maxitems set and
it is reached, then we are going to sleep.
o Since new limits don't play well with multi-keg zones, remove them. The
idea of multi-keg zones was introduced exactly 10 years ago, and never
have had a practical usage. In discussion with Jeff we came to a wild
agreement that if we ever want to reintroduce the idea of a smart allocator
that would be able to choose between two (or more) totally different
backing stores, that choice should be made one level higher than UMA,
e.g. in malloc(9) or in mget(), or whatever and choice should be controlled
by the caller.
o Sleeping code is improved to account number of sleepers and wake them one
by one, to avoid thundering herd problem.
o Flag UMA_ZONE_NOBUCKETCACHE removed, instead uma_zone_set_maxcache()
KPI added. Having no bucket cache basically means setting maxcache to 0.
o Now with many fields added and many removed (no multi-keg zones!) make
sure that struct uma_zone is perfectly aligned.
Reviewed by: markj, jeff
Tested by: pho
Differential Revision: https://reviews.freebsd.org/D17773
2019-01-15 00:02:06 +00:00
|
|
|
}
|
|
|
|
ZONE_UNLOCK(zone);
|
2002-03-19 09:11:49 +00:00
|
|
|
|
2019-08-06 21:50:34 +00:00
|
|
|
/* Avoid allocs targeting empty domains. */
|
|
|
|
if (domain != UMA_ANYDOMAIN && VM_DOMAIN_EMPTY(domain))
|
|
|
|
domain = UMA_ANYDOMAIN;
|
|
|
|
|
2018-01-12 23:25:05 +00:00
|
|
|
if (zone->uz_import(zone->uz_arg, &item, 1, domain, flags) != 1)
|
2019-11-26 22:17:02 +00:00
|
|
|
goto fail_cnt;
|
2002-03-19 09:11:49 +00:00
|
|
|
|
Bring in mbuma to replace mballoc.
mbuma is an Mbuf & Cluster allocator built on top of a number of
extensions to the UMA framework, all included herein.
Extensions to UMA worth noting:
- Better layering between slab <-> zone caches; introduce
Keg structure which splits off slab cache away from the
zone structure and allows multiple zones to be stacked
on top of a single Keg (single type of slab cache);
perhaps we should look into defining a subset API on
top of the Keg for special use by malloc(9),
for example.
- UMA_ZONE_REFCNT zones can now be added, and reference
counters automagically allocated for them within the end
of the associated slab structures. uma_find_refcnt()
does a kextract to fetch the slab struct reference from
the underlying page, and lookup the corresponding refcnt.
mbuma things worth noting:
- integrates mbuf & cluster allocations with extended UMA
and provides caches for commonly-allocated items; defines
several zones (two primary, one secondary) and two kegs.
- change up certain code paths that always used to do:
m_get() + m_clget() to instead just use m_getcl() and
try to take advantage of the newly defined secondary
Packet zone.
- netstat(1) and systat(1) quickly hacked up to do basic
stat reporting but additional stats work needs to be
done once some other details within UMA have been taken
care of and it becomes clearer to how stats will work
within the modified framework.
From the user perspective, one implication is that the
NMBCLUSTERS compile-time option is no longer used. The
maximum number of clusters is still capped off according
to maxusers, but it can be made unlimited by setting
the kern.ipc.nmbclusters boot-time tunable to zero.
Work should be done to write an appropriate sysctl
handler allowing dynamic tuning of kern.ipc.nmbclusters
at runtime.
Additional things worth noting/known issues (READ):
- One report of 'ips' (ServeRAID) driver acting really
slow in conjunction with mbuma. Need more data.
Latest report is that ips is equally sucking with
and without mbuma.
- Giant leak in NFS code sometimes occurs, can't
reproduce but currently analyzing; brueffer is
able to reproduce but THIS IS NOT an mbuma-specific
problem and currently occurs even WITHOUT mbuma.
- Issues in network locking: there is at least one
code path in the rip code where one or more locks
are acquired and we end up in m_prepend() with
M_WAITOK, which causes WITNESS to whine from within
UMA. Current temporary solution: force all UMA
allocations to be M_NOWAIT from within UMA for now
to avoid deadlocks unless WITNESS is defined and we
can determine with certainty that we're not holding
any locks when we're M_WAITOK.
- I've seen at least one weird socketbuffer empty-but-
mbuf-still-attached panic. I don't believe this
to be related to mbuma but please keep your eyes
open, turn on debugging, and capture crash dumps.
This change removes more code than it adds.
A paper is available detailing the change and considering
various performance issues, it was presented at BSDCan2004:
http://www.unixdaemons.com/~bmilekic/netbuf_bmilekic.pdf
Please read the paper for Future Work and implementation
details, as well as credits.
Testing and Debugging:
rwatson,
brueffer,
Ketrien I. Saihr-Kesenchedra,
...
Reviewed by: Lots of people (for different parts)
2004-05-31 21:46:06 +00:00
|
|
|
/*
|
|
|
|
* We have to call both the zone's init (not the keg's init)
|
|
|
|
* and the zone's ctor. This is because the item is going from
|
|
|
|
* a keg slab directly to the user, and the user is expecting it
|
|
|
|
* to be both zone-init'd as well as zone-ctor'd.
|
|
|
|
*/
|
2004-08-02 00:18:36 +00:00
|
|
|
if (zone->uz_init != NULL) {
|
2009-01-25 09:11:24 +00:00
|
|
|
if (zone->uz_init(item, zone->uz_size, flags) != 0) {
|
o Move zone limit from keg level up to zone level. This means that now
two zones sharing a keg may have different limits. Now this is going
to work:
zone = uma_zcreate();
uma_zone_set_max(zone, limit);
zone2 = uma_zsecond_create(zone);
uma_zone_set_max(zone2, limit2);
Kegs no longer have uk_maxpages field, but zones have uz_items. When
set, it may be rounded up to minimum possible CPU bucket cache size.
For small limits bucket cache can also be reconfigured to be smaller.
Counter uz_items is updated whenever items transition from keg to a
bucket cache or directly to a consumer. If zone has uz_maxitems set and
it is reached, then we are going to sleep.
o Since new limits don't play well with multi-keg zones, remove them. The
idea of multi-keg zones was introduced exactly 10 years ago, and never
have had a practical usage. In discussion with Jeff we came to a wild
agreement that if we ever want to reintroduce the idea of a smart allocator
that would be able to choose between two (or more) totally different
backing stores, that choice should be made one level higher than UMA,
e.g. in malloc(9) or in mget(), or whatever and choice should be controlled
by the caller.
o Sleeping code is improved to account number of sleepers and wake them one
by one, to avoid thundering herd problem.
o Flag UMA_ZONE_NOBUCKETCACHE removed, instead uma_zone_set_maxcache()
KPI added. Having no bucket cache basically means setting maxcache to 0.
o Now with many fields added and many removed (no multi-keg zones!) make
sure that struct uma_zone is perfectly aligned.
Reviewed by: markj, jeff
Tested by: pho
Differential Revision: https://reviews.freebsd.org/D17773
2019-01-15 00:02:06 +00:00
|
|
|
zone_free_item(zone, item, udata, SKIP_FINI | SKIP_CNT);
|
2019-11-26 22:17:02 +00:00
|
|
|
goto fail_cnt;
|
2004-08-02 00:18:36 +00:00
|
|
|
}
|
|
|
|
}
|
2019-11-26 22:17:02 +00:00
|
|
|
item = item_ctor(zone, udata, flags, item);
|
|
|
|
if (item == NULL)
|
2018-06-08 00:15:08 +00:00
|
|
|
goto fail;
|
2002-03-19 09:11:49 +00:00
|
|
|
|
2019-01-15 18:24:34 +00:00
|
|
|
counter_u64_add(zone->uz_allocs, 1);
|
2017-06-01 18:36:52 +00:00
|
|
|
CTR3(KTR_UMA, "zone_alloc_item item %p from %s(%p)", item,
|
|
|
|
zone->uz_name, zone);
|
|
|
|
|
2002-03-19 09:11:49 +00:00
|
|
|
return (item);
|
2013-06-17 03:43:47 +00:00
|
|
|
|
2019-11-26 22:17:02 +00:00
|
|
|
fail_cnt:
|
|
|
|
counter_u64_add(zone->uz_fails, 1);
|
2013-06-17 03:43:47 +00:00
|
|
|
fail:
|
2019-01-15 18:32:26 +00:00
|
|
|
if (zone->uz_max_items > 0) {
|
|
|
|
ZONE_LOCK(zone);
|
2019-11-26 22:17:02 +00:00
|
|
|
/* XXX Decrement without wakeup */
|
2019-01-15 18:32:26 +00:00
|
|
|
zone->uz_items--;
|
|
|
|
ZONE_UNLOCK(zone);
|
|
|
|
}
|
2017-06-01 18:36:52 +00:00
|
|
|
CTR2(KTR_UMA, "zone_alloc_item failed from %s(%p)",
|
|
|
|
zone->uz_name, zone);
|
2013-06-17 03:43:47 +00:00
|
|
|
return (NULL);
|
2002-03-19 09:11:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* See uma.h */
|
|
|
|
void
|
|
|
|
uma_zfree_arg(uma_zone_t zone, void *item, void *udata)
|
|
|
|
{
|
|
|
|
uma_cache_t cache;
|
|
|
|
uma_bucket_t bucket;
|
2019-11-27 23:19:06 +00:00
|
|
|
int cpu, domain, itemdomain;
|
2002-03-19 09:11:49 +00:00
|
|
|
|
2015-08-22 12:59:05 +00:00
|
|
|
/* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */
|
2018-08-26 12:51:46 +00:00
|
|
|
random_harvest_fast_uma(&zone, sizeof(zone), RANDOM_UMA);
|
This is the much-discussed major upgrade to the random(4) device, known to you all as /dev/random.
This code has had an extensive rewrite and a good series of reviews, both by the author and other parties. This means a lot of code has been simplified. Pluggable structures for high-rate entropy generators are available, and it is most definitely not the case that /dev/random can be driven by only a hardware souce any more. This has been designed out of the device. Hardware sources are stirred into the CSPRNG (Yarrow, Fortuna) like any other entropy source. Pluggable modules may be written by third parties for additional sources.
The harvesting structures and consequently the locking have been simplified. Entropy harvesting is done in a more general way (the documentation for this will follow). There is some GREAT entropy to be had in the UMA allocator, but it is disabled for now as messing with that is likely to annoy many people.
The venerable (but effective) Yarrow algorithm, which is no longer supported by its authors now has an alternative, Fortuna. For now, Yarrow is retained as the default algorithm, but this may be changed using a kernel option. It is intended to make Fortuna the default algorithm for 11.0. Interested parties are encouraged to read ISBN 978-0-470-47424-2 "Cryptography Engineering" By Ferguson, Schneier and Kohno for Fortuna's gory details. Heck, read it anyway.
Many thanks to Arthur Mesh who did early grunt work, and who got caught in the crossfire rather more than he deserved to.
My thanks also to folks who helped me thresh this out on whiteboards and in the odd "Hallway track", or otherwise.
My Nomex pants are on. Let the feedback commence!
Reviewed by: trasz,des(partial),imp(partial?),rwatson(partial?)
Approved by: so(des)
2014-10-30 21:21:53 +00:00
|
|
|
|
2004-08-06 21:52:38 +00:00
|
|
|
CTR2(KTR_UMA, "uma_zfree_arg thread %x zone %s", curthread,
|
|
|
|
zone->uz_name);
|
|
|
|
|
2015-12-11 20:05:07 +00:00
|
|
|
KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
|
2015-11-19 14:04:53 +00:00
|
|
|
("uma_zfree_arg: called with spinlock or critical section held"));
|
|
|
|
|
2010-10-19 16:06:00 +00:00
|
|
|
/* uma_zfree(..., NULL) does nothing, to match free(9). */
|
|
|
|
if (item == NULL)
|
|
|
|
return;
|
2011-10-12 18:08:28 +00:00
|
|
|
#ifdef DEBUG_MEMGUARD
|
|
|
|
if (is_memguard_addr(item)) {
|
2016-06-01 22:31:35 +00:00
|
|
|
if (zone->uz_dtor != NULL)
|
2011-10-12 18:08:28 +00:00
|
|
|
zone->uz_dtor(item, zone->uz_size, udata);
|
2016-06-01 22:31:35 +00:00
|
|
|
if (zone->uz_fini != NULL)
|
2011-10-12 18:08:28 +00:00
|
|
|
zone->uz_fini(item, zone->uz_size);
|
|
|
|
memguard_free(item);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
#endif
|
2019-11-27 19:49:55 +00:00
|
|
|
item_dtor(zone, item, udata, SKIP_NONE);
|
2013-06-13 21:05:38 +00:00
|
|
|
|
2002-04-14 01:56:25 +00:00
|
|
|
/*
|
|
|
|
* The race here is acceptable. If we miss it we'll just have to wait
|
|
|
|
* a little longer for the limits to be reset.
|
|
|
|
*/
|
o Move zone limit from keg level up to zone level. This means that now
two zones sharing a keg may have different limits. Now this is going
to work:
zone = uma_zcreate();
uma_zone_set_max(zone, limit);
zone2 = uma_zsecond_create(zone);
uma_zone_set_max(zone2, limit2);
Kegs no longer have uk_maxpages field, but zones have uz_items. When
set, it may be rounded up to minimum possible CPU bucket cache size.
For small limits bucket cache can also be reconfigured to be smaller.
Counter uz_items is updated whenever items transition from keg to a
bucket cache or directly to a consumer. If zone has uz_maxitems set and
it is reached, then we are going to sleep.
o Since new limits don't play well with multi-keg zones, remove them. The
idea of multi-keg zones was introduced exactly 10 years ago, and never
have had a practical usage. In discussion with Jeff we came to a wild
agreement that if we ever want to reintroduce the idea of a smart allocator
that would be able to choose between two (or more) totally different
backing stores, that choice should be made one level higher than UMA,
e.g. in malloc(9) or in mget(), or whatever and choice should be controlled
by the caller.
o Sleeping code is improved to account number of sleepers and wake them one
by one, to avoid thundering herd problem.
o Flag UMA_ZONE_NOBUCKETCACHE removed, instead uma_zone_set_maxcache()
KPI added. Having no bucket cache basically means setting maxcache to 0.
o Now with many fields added and many removed (no multi-keg zones!) make
sure that struct uma_zone is perfectly aligned.
Reviewed by: markj, jeff
Tested by: pho
Differential Revision: https://reviews.freebsd.org/D17773
2019-01-15 00:02:06 +00:00
|
|
|
if (zone->uz_sleepers > 0)
|
2013-06-18 04:50:20 +00:00
|
|
|
goto zfree_item;
|
2002-04-14 01:56:25 +00:00
|
|
|
|
Modify UMA to use critical sections to protect per-CPU caches, rather than
mutexes, which offers lower overhead on both UP and SMP. When allocating
from or freeing to the per-cpu cache, without INVARIANTS enabled, we now
no longer perform any mutex operations, which offers a 1%-3% performance
improvement in a variety of micro-benchmarks. We rely on critical
sections to prevent (a) preemption resulting in reentrant access to UMA on
a single CPU, and (b) migration of the thread during access. In the event
we need to go back to the zone for a new bucket, we release the critical
section to acquire the global zone mutex, and must re-acquire the critical
section and re-evaluate which cache we are accessing in case migration has
occured, or circumstances have changed in the current cache.
Per-CPU cache statistics are now gathered lock-free by the sysctl, which
can result in small races in statistics reporting for caches.
Reviewed by: bmilekic, jeff (somewhat)
Tested by: rwatson, kris, gnn, scottl, mike at sentex dot net, others
2005-04-29 18:56:36 +00:00
|
|
|
/*
|
|
|
|
* If possible, free to the per-CPU cache. There are two
|
|
|
|
* requirements for safe access to the per-CPU cache: (1) the thread
|
|
|
|
* accessing the cache must not be preempted or yield during access,
|
|
|
|
* and (2) the thread must not migrate CPUs without switching which
|
|
|
|
* cache it accesses. We rely on a critical section to prevent
|
|
|
|
* preemption and migration. We release the critical section in
|
|
|
|
* order to acquire the zone mutex if we are unable to free to the
|
|
|
|
* current cache; when we re-acquire the critical section, we must
|
|
|
|
* detect and handle migration if it has occurred.
|
|
|
|
*/
|
2019-11-27 23:19:06 +00:00
|
|
|
domain = itemdomain = 0;
|
Modify UMA to use critical sections to protect per-CPU caches, rather than
mutexes, which offers lower overhead on both UP and SMP. When allocating
from or freeing to the per-cpu cache, without INVARIANTS enabled, we now
no longer perform any mutex operations, which offers a 1%-3% performance
improvement in a variety of micro-benchmarks. We rely on critical
sections to prevent (a) preemption resulting in reentrant access to UMA on
a single CPU, and (b) migration of the thread during access. In the event
we need to go back to the zone for a new bucket, we release the critical
section to acquire the global zone mutex, and must re-acquire the critical
section and re-evaluate which cache we are accessing in case migration has
occured, or circumstances have changed in the current cache.
Per-CPU cache statistics are now gathered lock-free by the sysctl, which
can result in small races in statistics reporting for caches.
Reviewed by: bmilekic, jeff (somewhat)
Tested by: rwatson, kris, gnn, scottl, mike at sentex dot net, others
2005-04-29 18:56:36 +00:00
|
|
|
critical_enter();
|
2019-11-27 23:19:06 +00:00
|
|
|
do {
|
|
|
|
cpu = curcpu;
|
|
|
|
cache = &zone->uz_cpu[cpu];
|
|
|
|
bucket = cache->uc_allocbucket;
|
2019-08-06 21:50:34 +00:00
|
|
|
#ifdef UMA_XDOMAIN
|
2019-11-27 23:19:06 +00:00
|
|
|
if ((zone->uz_flags & UMA_ZONE_NUMA) != 0) {
|
|
|
|
itemdomain = _vm_phys_domain(pmap_kextract((vm_offset_t)item));
|
|
|
|
domain = PCPU_GET(domain);
|
|
|
|
}
|
|
|
|
if ((zone->uz_flags & UMA_ZONE_NUMA) != 0 &&
|
|
|
|
domain != itemdomain) {
|
|
|
|
bucket = cache->uc_crossbucket;
|
|
|
|
} else
|
2019-08-06 21:50:34 +00:00
|
|
|
#endif
|
2019-11-27 23:19:06 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Try to free into the allocbucket first to give LIFO ordering
|
|
|
|
* for cache-hot datastructures. Spill over into the freebucket
|
|
|
|
* if necessary. Alloc will swap them if one runs dry.
|
|
|
|
*/
|
|
|
|
if (bucket == NULL || bucket->ub_cnt >= bucket->ub_entries)
|
|
|
|
bucket = cache->uc_freebucket;
|
|
|
|
if (__predict_true(bucket != NULL &&
|
|
|
|
bucket->ub_cnt < bucket->ub_entries)) {
|
|
|
|
bucket_push(zone, cache, bucket, item);
|
|
|
|
critical_exit();
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
} while (cache_free(zone, cache, udata, item, itemdomain));
|
|
|
|
critical_exit();
|
|
|
|
|
2013-06-18 04:50:20 +00:00
|
|
|
/*
|
2019-11-27 23:19:06 +00:00
|
|
|
* If nothing else caught this, we'll just do an internal free.
|
2013-06-18 04:50:20 +00:00
|
|
|
*/
|
2019-11-27 23:19:06 +00:00
|
|
|
zfree_item:
|
|
|
|
zone_free_item(zone, item, udata, SKIP_DTOR);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
zone_free_bucket(uma_zone_t zone, uma_bucket_t bucket, void *udata,
|
|
|
|
int domain, int itemdomain)
|
|
|
|
{
|
|
|
|
uma_zone_domain_t zdom;
|
|
|
|
|
2019-08-06 21:50:34 +00:00
|
|
|
#ifdef UMA_XDOMAIN
|
2019-11-27 23:19:06 +00:00
|
|
|
/*
|
|
|
|
* Buckets coming from the wrong domain will be entirely for the
|
|
|
|
* only other domain on two domain systems. In this case we can
|
|
|
|
* simply cache them. Otherwise we need to sort them back to
|
|
|
|
* correct domains by freeing the contents to the slab layer.
|
|
|
|
*/
|
|
|
|
if (domain != itemdomain && vm_ndomains > 2) {
|
|
|
|
CTR3(KTR_UMA,
|
|
|
|
"uma_zfree: zone %s(%p) draining cross bucket %p",
|
|
|
|
zone->uz_name, zone, bucket);
|
|
|
|
bucket_drain(zone, bucket);
|
|
|
|
bucket_free(zone, bucket, udata);
|
2013-06-18 04:50:20 +00:00
|
|
|
return;
|
2004-01-30 16:26:29 +00:00
|
|
|
}
|
2019-11-27 23:19:06 +00:00
|
|
|
#endif
|
2002-03-19 09:11:49 +00:00
|
|
|
/*
|
2019-11-27 23:19:06 +00:00
|
|
|
* Attempt to save the bucket in the zone's domain bucket cache.
|
|
|
|
*
|
|
|
|
* We bump the uz count when the cache size is insufficient to
|
|
|
|
* handle the working set.
|
2002-03-19 09:11:49 +00:00
|
|
|
*/
|
2013-11-19 10:17:10 +00:00
|
|
|
if (ZONE_TRYLOCK(zone) == 0) {
|
|
|
|
/* Record contention to size the buckets. */
|
|
|
|
ZONE_LOCK(zone);
|
2019-11-28 00:19:09 +00:00
|
|
|
if (zone->uz_bucket_size < zone->uz_bucket_size_max)
|
|
|
|
zone->uz_bucket_size++;
|
2013-11-19 10:17:10 +00:00
|
|
|
}
|
2019-11-27 23:19:06 +00:00
|
|
|
|
|
|
|
CTR3(KTR_UMA,
|
|
|
|
"uma_zfree: zone %s(%p) putting bucket %p on free list",
|
|
|
|
zone->uz_name, zone, bucket);
|
|
|
|
/* ub_cnt is pointing to the last free item */
|
|
|
|
KASSERT(bucket->ub_cnt == bucket->ub_entries,
|
|
|
|
("uma_zfree: Attempting to insert partial bucket onto the full list.\n"));
|
|
|
|
if (zone->uz_bkt_count >= zone->uz_bkt_max) {
|
|
|
|
ZONE_UNLOCK(zone);
|
|
|
|
bucket_drain(zone, bucket);
|
|
|
|
bucket_free(zone, bucket, udata);
|
|
|
|
} else {
|
|
|
|
zdom = &zone->uz_domain[itemdomain];
|
|
|
|
zone_put_bucket(zone, zdom, bucket, true);
|
|
|
|
ZONE_UNLOCK(zone);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Populate a free or cross bucket for the current cpu cache. Free any
|
|
|
|
* existing full bucket either to the zone cache or back to the slab layer.
|
|
|
|
*
|
|
|
|
* Enters and returns in a critical section. false return indicates that
|
|
|
|
* we can not satisfy this free in the cache layer. true indicates that
|
|
|
|
* the caller should retry.
|
|
|
|
*/
|
|
|
|
static __noinline bool
|
|
|
|
cache_free(uma_zone_t zone, uma_cache_t cache, void *udata, void *item,
|
|
|
|
int itemdomain)
|
|
|
|
{
|
|
|
|
uma_bucket_t bucket;
|
|
|
|
int cpu, domain;
|
|
|
|
|
|
|
|
CRITICAL_ASSERT(curthread);
|
|
|
|
|
2019-11-28 00:19:09 +00:00
|
|
|
if (zone->uz_bucket_size == 0 || bucketdisable)
|
2019-11-27 23:19:06 +00:00
|
|
|
return false;
|
|
|
|
|
Modify UMA to use critical sections to protect per-CPU caches, rather than
mutexes, which offers lower overhead on both UP and SMP. When allocating
from or freeing to the per-cpu cache, without INVARIANTS enabled, we now
no longer perform any mutex operations, which offers a 1%-3% performance
improvement in a variety of micro-benchmarks. We rely on critical
sections to prevent (a) preemption resulting in reentrant access to UMA on
a single CPU, and (b) migration of the thread during access. In the event
we need to go back to the zone for a new bucket, we release the critical
section to acquire the global zone mutex, and must re-acquire the critical
section and re-evaluate which cache we are accessing in case migration has
occured, or circumstances have changed in the current cache.
Per-CPU cache statistics are now gathered lock-free by the sysctl, which
can result in small races in statistics reporting for caches.
Reviewed by: bmilekic, jeff (somewhat)
Tested by: rwatson, kris, gnn, scottl, mike at sentex dot net, others
2005-04-29 18:56:36 +00:00
|
|
|
cpu = curcpu;
|
|
|
|
cache = &zone->uz_cpu[cpu];
|
2002-03-19 09:11:49 +00:00
|
|
|
|
2019-11-27 23:19:06 +00:00
|
|
|
/*
|
|
|
|
* NUMA domains need to free to the correct zdom. When XDOMAIN
|
|
|
|
* is enabled this is the zdom of the item and the bucket may be
|
|
|
|
* the cross bucket if they do not match.
|
|
|
|
*/
|
|
|
|
if ((zone->uz_flags & UMA_ZONE_NUMA) != 0)
|
2019-08-06 21:50:34 +00:00
|
|
|
#ifdef UMA_XDOMAIN
|
2019-11-27 23:19:06 +00:00
|
|
|
domain = PCPU_GET(domain);
|
|
|
|
#else
|
|
|
|
itemdomain = domain = PCPU_GET(domain);
|
2019-08-06 21:50:34 +00:00
|
|
|
#endif
|
2019-11-27 23:19:06 +00:00
|
|
|
else
|
|
|
|
itemdomain = domain = 0;
|
2019-08-06 21:50:34 +00:00
|
|
|
#ifdef UMA_XDOMAIN
|
2019-11-27 23:19:06 +00:00
|
|
|
if (domain != itemdomain) {
|
|
|
|
bucket = cache->uc_crossbucket;
|
2019-08-06 21:50:34 +00:00
|
|
|
cache->uc_crossbucket = NULL;
|
2019-11-27 23:19:06 +00:00
|
|
|
if (bucket != NULL)
|
|
|
|
atomic_add_64(&zone->uz_xdomain, bucket->ub_cnt);
|
|
|
|
} else
|
2019-08-06 21:50:34 +00:00
|
|
|
#endif
|
2019-11-27 23:19:06 +00:00
|
|
|
{
|
|
|
|
bucket = cache->uc_freebucket;
|
2019-08-06 21:50:34 +00:00
|
|
|
cache->uc_freebucket = NULL;
|
2019-11-27 23:19:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2016-07-20 01:01:50 +00:00
|
|
|
/* We are no longer associated with this CPU. */
|
|
|
|
critical_exit();
|
2002-03-19 09:11:49 +00:00
|
|
|
|
2019-11-27 23:19:06 +00:00
|
|
|
if (bucket != NULL)
|
|
|
|
zone_free_bucket(zone, bucket, udata, domain, itemdomain);
|
|
|
|
|
|
|
|
bucket = bucket_alloc(zone, udata, M_NOWAIT);
|
|
|
|
CTR3(KTR_UMA, "uma_zfree: zone %s(%p) allocated bucket %p",
|
|
|
|
zone->uz_name, zone, bucket);
|
|
|
|
critical_enter();
|
|
|
|
if (bucket == NULL)
|
|
|
|
return (false);
|
|
|
|
cpu = curcpu;
|
|
|
|
cache = &zone->uz_cpu[cpu];
|
2019-08-06 21:50:34 +00:00
|
|
|
#ifdef UMA_XDOMAIN
|
2019-11-27 23:19:06 +00:00
|
|
|
/*
|
|
|
|
* Check to see if we should be populating the cross bucket. If it
|
|
|
|
* is already populated we will fall through and attempt to populate
|
|
|
|
* the free bucket.
|
|
|
|
*/
|
|
|
|
if ((zone->uz_flags & UMA_ZONE_NUMA) != 0) {
|
|
|
|
domain = PCPU_GET(domain);
|
|
|
|
if (domain != itemdomain && cache->uc_crossbucket == NULL) {
|
2019-08-06 21:50:34 +00:00
|
|
|
cache->uc_crossbucket = bucket;
|
2019-11-27 23:19:06 +00:00
|
|
|
return (true);
|
2019-08-06 21:50:34 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
2013-11-19 10:17:10 +00:00
|
|
|
/*
|
2019-11-27 23:19:06 +00:00
|
|
|
* We may have lost the race to fill the bucket or switched CPUs.
|
2013-11-19 10:17:10 +00:00
|
|
|
*/
|
2019-11-27 23:19:06 +00:00
|
|
|
if (cache->uc_freebucket != NULL) {
|
2013-06-18 04:50:20 +00:00
|
|
|
critical_exit();
|
2013-06-26 00:57:38 +00:00
|
|
|
bucket_free(zone, bucket, udata);
|
2019-11-27 23:19:06 +00:00
|
|
|
critical_enter();
|
|
|
|
} else
|
|
|
|
cache->uc_freebucket = bucket;
|
2002-03-19 09:11:49 +00:00
|
|
|
|
2019-11-27 23:19:06 +00:00
|
|
|
return (true);
|
2002-03-19 09:11:49 +00:00
|
|
|
}
|
|
|
|
|
2018-01-12 23:25:05 +00:00
|
|
|
void
|
|
|
|
uma_zfree_domain(uma_zone_t zone, void *item, void *udata)
|
|
|
|
{
|
|
|
|
|
|
|
|
/* Enable entropy collection for RANDOM_ENABLE_UMA kernel option */
|
2018-08-26 12:51:46 +00:00
|
|
|
random_harvest_fast_uma(&zone, sizeof(zone), RANDOM_UMA);
|
2018-01-12 23:25:05 +00:00
|
|
|
|
|
|
|
CTR2(KTR_UMA, "uma_zfree_domain thread %x zone %s", curthread,
|
|
|
|
zone->uz_name);
|
|
|
|
|
|
|
|
KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
|
|
|
|
("uma_zfree_domain: called with spinlock or critical section held"));
|
|
|
|
|
|
|
|
/* uma_zfree(..., NULL) does nothing, to match free(9). */
|
|
|
|
if (item == NULL)
|
|
|
|
return;
|
|
|
|
zone_free_item(zone, item, udata, SKIP_NONE);
|
|
|
|
}
|
|
|
|
|
2002-03-19 09:11:49 +00:00
|
|
|
static void
|
o Move zone limit from keg level up to zone level. This means that now
two zones sharing a keg may have different limits. Now this is going
to work:
zone = uma_zcreate();
uma_zone_set_max(zone, limit);
zone2 = uma_zsecond_create(zone);
uma_zone_set_max(zone2, limit2);
Kegs no longer have uk_maxpages field, but zones have uz_items. When
set, it may be rounded up to minimum possible CPU bucket cache size.
For small limits bucket cache can also be reconfigured to be smaller.
Counter uz_items is updated whenever items transition from keg to a
bucket cache or directly to a consumer. If zone has uz_maxitems set and
it is reached, then we are going to sleep.
o Since new limits don't play well with multi-keg zones, remove them. The
idea of multi-keg zones was introduced exactly 10 years ago, and never
have had a practical usage. In discussion with Jeff we came to a wild
agreement that if we ever want to reintroduce the idea of a smart allocator
that would be able to choose between two (or more) totally different
backing stores, that choice should be made one level higher than UMA,
e.g. in malloc(9) or in mget(), or whatever and choice should be controlled
by the caller.
o Sleeping code is improved to account number of sleepers and wake them one
by one, to avoid thundering herd problem.
o Flag UMA_ZONE_NOBUCKETCACHE removed, instead uma_zone_set_maxcache()
KPI added. Having no bucket cache basically means setting maxcache to 0.
o Now with many fields added and many removed (no multi-keg zones!) make
sure that struct uma_zone is perfectly aligned.
Reviewed by: markj, jeff
Tested by: pho
Differential Revision: https://reviews.freebsd.org/D17773
2019-01-15 00:02:06 +00:00
|
|
|
slab_free_item(uma_zone_t zone, uma_slab_t slab, void *item)
|
2002-03-19 09:11:49 +00:00
|
|
|
{
|
o Move zone limit from keg level up to zone level. This means that now
two zones sharing a keg may have different limits. Now this is going
to work:
zone = uma_zcreate();
uma_zone_set_max(zone, limit);
zone2 = uma_zsecond_create(zone);
uma_zone_set_max(zone2, limit2);
Kegs no longer have uk_maxpages field, but zones have uz_items. When
set, it may be rounded up to minimum possible CPU bucket cache size.
For small limits bucket cache can also be reconfigured to be smaller.
Counter uz_items is updated whenever items transition from keg to a
bucket cache or directly to a consumer. If zone has uz_maxitems set and
it is reached, then we are going to sleep.
o Since new limits don't play well with multi-keg zones, remove them. The
idea of multi-keg zones was introduced exactly 10 years ago, and never
have had a practical usage. In discussion with Jeff we came to a wild
agreement that if we ever want to reintroduce the idea of a smart allocator
that would be able to choose between two (or more) totally different
backing stores, that choice should be made one level higher than UMA,
e.g. in malloc(9) or in mget(), or whatever and choice should be controlled
by the caller.
o Sleeping code is improved to account number of sleepers and wake them one
by one, to avoid thundering herd problem.
o Flag UMA_ZONE_NOBUCKETCACHE removed, instead uma_zone_set_maxcache()
KPI added. Having no bucket cache basically means setting maxcache to 0.
o Now with many fields added and many removed (no multi-keg zones!) make
sure that struct uma_zone is perfectly aligned.
Reviewed by: markj, jeff
Tested by: pho
Differential Revision: https://reviews.freebsd.org/D17773
2019-01-15 00:02:06 +00:00
|
|
|
uma_keg_t keg;
|
2018-01-12 23:25:05 +00:00
|
|
|
uma_domain_t dom;
|
2013-04-09 17:43:48 +00:00
|
|
|
uint8_t freei;
|
Bring in mbuma to replace mballoc.
mbuma is an Mbuf & Cluster allocator built on top of a number of
extensions to the UMA framework, all included herein.
Extensions to UMA worth noting:
- Better layering between slab <-> zone caches; introduce
Keg structure which splits off slab cache away from the
zone structure and allows multiple zones to be stacked
on top of a single Keg (single type of slab cache);
perhaps we should look into defining a subset API on
top of the Keg for special use by malloc(9),
for example.
- UMA_ZONE_REFCNT zones can now be added, and reference
counters automagically allocated for them within the end
of the associated slab structures. uma_find_refcnt()
does a kextract to fetch the slab struct reference from
the underlying page, and lookup the corresponding refcnt.
mbuma things worth noting:
- integrates mbuf & cluster allocations with extended UMA
and provides caches for commonly-allocated items; defines
several zones (two primary, one secondary) and two kegs.
- change up certain code paths that always used to do:
m_get() + m_clget() to instead just use m_getcl() and
try to take advantage of the newly defined secondary
Packet zone.
- netstat(1) and systat(1) quickly hacked up to do basic
stat reporting but additional stats work needs to be
done once some other details within UMA have been taken
care of and it becomes clearer to how stats will work
within the modified framework.
From the user perspective, one implication is that the
NMBCLUSTERS compile-time option is no longer used. The
maximum number of clusters is still capped off according
to maxusers, but it can be made unlimited by setting
the kern.ipc.nmbclusters boot-time tunable to zero.
Work should be done to write an appropriate sysctl
handler allowing dynamic tuning of kern.ipc.nmbclusters
at runtime.
Additional things worth noting/known issues (READ):
- One report of 'ips' (ServeRAID) driver acting really
slow in conjunction with mbuma. Need more data.
Latest report is that ips is equally sucking with
and without mbuma.
- Giant leak in NFS code sometimes occurs, can't
reproduce but currently analyzing; brueffer is
able to reproduce but THIS IS NOT an mbuma-specific
problem and currently occurs even WITHOUT mbuma.
- Issues in network locking: there is at least one
code path in the rip code where one or more locks
are acquired and we end up in m_prepend() with
M_WAITOK, which causes WITNESS to whine from within
UMA. Current temporary solution: force all UMA
allocations to be M_NOWAIT from within UMA for now
to avoid deadlocks unless WITNESS is defined and we
can determine with certainty that we're not holding
any locks when we're M_WAITOK.
- I've seen at least one weird socketbuffer empty-but-
mbuf-still-attached panic. I don't believe this
to be related to mbuma but please keep your eyes
open, turn on debugging, and capture crash dumps.
This change removes more code than it adds.
A paper is available detailing the change and considering
various performance issues, it was presented at BSDCan2004:
http://www.unixdaemons.com/~bmilekic/netbuf_bmilekic.pdf
Please read the paper for Future Work and implementation
details, as well as credits.
Testing and Debugging:
rwatson,
brueffer,
Ketrien I. Saihr-Kesenchedra,
...
Reviewed by: Lots of people (for different parts)
2004-05-31 21:46:06 +00:00
|
|
|
|
o Move zone limit from keg level up to zone level. This means that now
two zones sharing a keg may have different limits. Now this is going
to work:
zone = uma_zcreate();
uma_zone_set_max(zone, limit);
zone2 = uma_zsecond_create(zone);
uma_zone_set_max(zone2, limit2);
Kegs no longer have uk_maxpages field, but zones have uz_items. When
set, it may be rounded up to minimum possible CPU bucket cache size.
For small limits bucket cache can also be reconfigured to be smaller.
Counter uz_items is updated whenever items transition from keg to a
bucket cache or directly to a consumer. If zone has uz_maxitems set and
it is reached, then we are going to sleep.
o Since new limits don't play well with multi-keg zones, remove them. The
idea of multi-keg zones was introduced exactly 10 years ago, and never
have had a practical usage. In discussion with Jeff we came to a wild
agreement that if we ever want to reintroduce the idea of a smart allocator
that would be able to choose between two (or more) totally different
backing stores, that choice should be made one level higher than UMA,
e.g. in malloc(9) or in mget(), or whatever and choice should be controlled
by the caller.
o Sleeping code is improved to account number of sleepers and wake them one
by one, to avoid thundering herd problem.
o Flag UMA_ZONE_NOBUCKETCACHE removed, instead uma_zone_set_maxcache()
KPI added. Having no bucket cache basically means setting maxcache to 0.
o Now with many fields added and many removed (no multi-keg zones!) make
sure that struct uma_zone is perfectly aligned.
Reviewed by: markj, jeff
Tested by: pho
Differential Revision: https://reviews.freebsd.org/D17773
2019-01-15 00:02:06 +00:00
|
|
|
keg = zone->uz_keg;
|
|
|
|
MPASS(zone->uz_lockptr == &keg->uk_lock);
|
|
|
|
KEG_LOCK_ASSERT(keg);
|
2002-03-19 09:11:49 +00:00
|
|
|
|
2018-01-12 23:25:05 +00:00
|
|
|
dom = &keg->uk_domain[slab->us_domain];
|
|
|
|
|
2002-03-19 09:11:49 +00:00
|
|
|
/* Do we need to remove from any lists? */
|
Bring in mbuma to replace mballoc.
mbuma is an Mbuf & Cluster allocator built on top of a number of
extensions to the UMA framework, all included herein.
Extensions to UMA worth noting:
- Better layering between slab <-> zone caches; introduce
Keg structure which splits off slab cache away from the
zone structure and allows multiple zones to be stacked
on top of a single Keg (single type of slab cache);
perhaps we should look into defining a subset API on
top of the Keg for special use by malloc(9),
for example.
- UMA_ZONE_REFCNT zones can now be added, and reference
counters automagically allocated for them within the end
of the associated slab structures. uma_find_refcnt()
does a kextract to fetch the slab struct reference from
the underlying page, and lookup the corresponding refcnt.
mbuma things worth noting:
- integrates mbuf & cluster allocations with extended UMA
and provides caches for commonly-allocated items; defines
several zones (two primary, one secondary) and two kegs.
- change up certain code paths that always used to do:
m_get() + m_clget() to instead just use m_getcl() and
try to take advantage of the newly defined secondary
Packet zone.
- netstat(1) and systat(1) quickly hacked up to do basic
stat reporting but additional stats work needs to be
done once some other details within UMA have been taken
care of and it becomes clearer to how stats will work
within the modified framework.
From the user perspective, one implication is that the
NMBCLUSTERS compile-time option is no longer used. The
maximum number of clusters is still capped off according
to maxusers, but it can be made unlimited by setting
the kern.ipc.nmbclusters boot-time tunable to zero.
Work should be done to write an appropriate sysctl
handler allowing dynamic tuning of kern.ipc.nmbclusters
at runtime.
Additional things worth noting/known issues (READ):
- One report of 'ips' (ServeRAID) driver acting really
slow in conjunction with mbuma. Need more data.
Latest report is that ips is equally sucking with
and without mbuma.
- Giant leak in NFS code sometimes occurs, can't
reproduce but currently analyzing; brueffer is
able to reproduce but THIS IS NOT an mbuma-specific
problem and currently occurs even WITHOUT mbuma.
- Issues in network locking: there is at least one
code path in the rip code where one or more locks
are acquired and we end up in m_prepend() with
M_WAITOK, which causes WITNESS to whine from within
UMA. Current temporary solution: force all UMA
allocations to be M_NOWAIT from within UMA for now
to avoid deadlocks unless WITNESS is defined and we
can determine with certainty that we're not holding
any locks when we're M_WAITOK.
- I've seen at least one weird socketbuffer empty-but-
mbuf-still-attached panic. I don't believe this
to be related to mbuma but please keep your eyes
open, turn on debugging, and capture crash dumps.
This change removes more code than it adds.
A paper is available detailing the change and considering
various performance issues, it was presented at BSDCan2004:
http://www.unixdaemons.com/~bmilekic/netbuf_bmilekic.pdf
Please read the paper for Future Work and implementation
details, as well as credits.
Testing and Debugging:
rwatson,
brueffer,
Ketrien I. Saihr-Kesenchedra,
...
Reviewed by: Lots of people (for different parts)
2004-05-31 21:46:06 +00:00
|
|
|
if (slab->us_freecount+1 == keg->uk_ipers) {
|
2003-06-09 22:51:36 +00:00
|
|
|
LIST_REMOVE(slab, us_link);
|
2018-01-12 23:25:05 +00:00
|
|
|
LIST_INSERT_HEAD(&dom->ud_free_slab, slab, us_link);
|
2002-03-19 09:11:49 +00:00
|
|
|
} else if (slab->us_freecount == 0) {
|
|
|
|
LIST_REMOVE(slab, us_link);
|
2018-01-12 23:25:05 +00:00
|
|
|
LIST_INSERT_HEAD(&dom->ud_part_slab, slab, us_link);
|
2002-03-19 09:11:49 +00:00
|
|
|
}
|
|
|
|
|
2013-06-13 21:05:38 +00:00
|
|
|
/* Slab management. */
|
2019-12-08 01:15:06 +00:00
|
|
|
freei = slab_item_index(slab, keg, item);
|
2019-12-02 22:44:34 +00:00
|
|
|
BIT_SET(keg->uk_ipers, freei, &slab->us_free);
|
2002-03-19 09:11:49 +00:00
|
|
|
slab->us_freecount++;
|
|
|
|
|
2013-06-13 21:05:38 +00:00
|
|
|
/* Keg statistics. */
|
Bring in mbuma to replace mballoc.
mbuma is an Mbuf & Cluster allocator built on top of a number of
extensions to the UMA framework, all included herein.
Extensions to UMA worth noting:
- Better layering between slab <-> zone caches; introduce
Keg structure which splits off slab cache away from the
zone structure and allows multiple zones to be stacked
on top of a single Keg (single type of slab cache);
perhaps we should look into defining a subset API on
top of the Keg for special use by malloc(9),
for example.
- UMA_ZONE_REFCNT zones can now be added, and reference
counters automagically allocated for them within the end
of the associated slab structures. uma_find_refcnt()
does a kextract to fetch the slab struct reference from
the underlying page, and lookup the corresponding refcnt.
mbuma things worth noting:
- integrates mbuf & cluster allocations with extended UMA
and provides caches for commonly-allocated items; defines
several zones (two primary, one secondary) and two kegs.
- change up certain code paths that always used to do:
m_get() + m_clget() to instead just use m_getcl() and
try to take advantage of the newly defined secondary
Packet zone.
- netstat(1) and systat(1) quickly hacked up to do basic
stat reporting but additional stats work needs to be
done once some other details within UMA have been taken
care of and it becomes clearer to how stats will work
within the modified framework.
From the user perspective, one implication is that the
NMBCLUSTERS compile-time option is no longer used. The
maximum number of clusters is still capped off according
to maxusers, but it can be made unlimited by setting
the kern.ipc.nmbclusters boot-time tunable to zero.
Work should be done to write an appropriate sysctl
handler allowing dynamic tuning of kern.ipc.nmbclusters
at runtime.
Additional things worth noting/known issues (READ):
- One report of 'ips' (ServeRAID) driver acting really
slow in conjunction with mbuma. Need more data.
Latest report is that ips is equally sucking with
and without mbuma.
- Giant leak in NFS code sometimes occurs, can't
reproduce but currently analyzing; brueffer is
able to reproduce but THIS IS NOT an mbuma-specific
problem and currently occurs even WITHOUT mbuma.
- Issues in network locking: there is at least one
code path in the rip code where one or more locks
are acquired and we end up in m_prepend() with
M_WAITOK, which causes WITNESS to whine from within
UMA. Current temporary solution: force all UMA
allocations to be M_NOWAIT from within UMA for now
to avoid deadlocks unless WITNESS is defined and we
can determine with certainty that we're not holding
any locks when we're M_WAITOK.
- I've seen at least one weird socketbuffer empty-but-
mbuf-still-attached panic. I don't believe this
to be related to mbuma but please keep your eyes
open, turn on debugging, and capture crash dumps.
This change removes more code than it adds.
A paper is available detailing the change and considering
various performance issues, it was presented at BSDCan2004:
http://www.unixdaemons.com/~bmilekic/netbuf_bmilekic.pdf
Please read the paper for Future Work and implementation
details, as well as credits.
Testing and Debugging:
rwatson,
brueffer,
Ketrien I. Saihr-Kesenchedra,
...
Reviewed by: Lots of people (for different parts)
2004-05-31 21:46:06 +00:00
|
|
|
keg->uk_free++;
|
2013-06-17 03:43:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2019-12-04 18:40:05 +00:00
|
|
|
zone_release(void *arg, void **bucket, int cnt)
|
2013-06-17 03:43:47 +00:00
|
|
|
{
|
2019-12-04 18:40:05 +00:00
|
|
|
uma_zone_t zone;
|
2013-06-17 03:43:47 +00:00
|
|
|
void *item;
|
|
|
|
uma_slab_t slab;
|
|
|
|
uma_keg_t keg;
|
|
|
|
uint8_t *mem;
|
|
|
|
int i;
|
2002-03-19 09:11:49 +00:00
|
|
|
|
2019-12-04 18:40:05 +00:00
|
|
|
zone = arg;
|
o Move zone limit from keg level up to zone level. This means that now
two zones sharing a keg may have different limits. Now this is going
to work:
zone = uma_zcreate();
uma_zone_set_max(zone, limit);
zone2 = uma_zsecond_create(zone);
uma_zone_set_max(zone2, limit2);
Kegs no longer have uk_maxpages field, but zones have uz_items. When
set, it may be rounded up to minimum possible CPU bucket cache size.
For small limits bucket cache can also be reconfigured to be smaller.
Counter uz_items is updated whenever items transition from keg to a
bucket cache or directly to a consumer. If zone has uz_maxitems set and
it is reached, then we are going to sleep.
o Since new limits don't play well with multi-keg zones, remove them. The
idea of multi-keg zones was introduced exactly 10 years ago, and never
have had a practical usage. In discussion with Jeff we came to a wild
agreement that if we ever want to reintroduce the idea of a smart allocator
that would be able to choose between two (or more) totally different
backing stores, that choice should be made one level higher than UMA,
e.g. in malloc(9) or in mget(), or whatever and choice should be controlled
by the caller.
o Sleeping code is improved to account number of sleepers and wake them one
by one, to avoid thundering herd problem.
o Flag UMA_ZONE_NOBUCKETCACHE removed, instead uma_zone_set_maxcache()
KPI added. Having no bucket cache basically means setting maxcache to 0.
o Now with many fields added and many removed (no multi-keg zones!) make
sure that struct uma_zone is perfectly aligned.
Reviewed by: markj, jeff
Tested by: pho
Differential Revision: https://reviews.freebsd.org/D17773
2019-01-15 00:02:06 +00:00
|
|
|
keg = zone->uz_keg;
|
2013-06-20 19:08:12 +00:00
|
|
|
KEG_LOCK(keg);
|
2013-06-17 03:43:47 +00:00
|
|
|
for (i = 0; i < cnt; i++) {
|
|
|
|
item = bucket[i];
|
|
|
|
if (!(zone->uz_flags & UMA_ZONE_VTOSLAB)) {
|
|
|
|
mem = (uint8_t *)((uintptr_t)item & (~UMA_SLAB_MASK));
|
|
|
|
if (zone->uz_flags & UMA_ZONE_HASH) {
|
|
|
|
slab = hash_sfind(&keg->uk_hash, mem);
|
|
|
|
} else {
|
|
|
|
mem += keg->uk_pgoff;
|
|
|
|
slab = (uma_slab_t)mem;
|
|
|
|
}
|
2019-11-28 07:49:25 +00:00
|
|
|
} else
|
2013-06-17 03:43:47 +00:00
|
|
|
slab = vtoslab((vm_offset_t)item);
|
o Move zone limit from keg level up to zone level. This means that now
two zones sharing a keg may have different limits. Now this is going
to work:
zone = uma_zcreate();
uma_zone_set_max(zone, limit);
zone2 = uma_zsecond_create(zone);
uma_zone_set_max(zone2, limit2);
Kegs no longer have uk_maxpages field, but zones have uz_items. When
set, it may be rounded up to minimum possible CPU bucket cache size.
For small limits bucket cache can also be reconfigured to be smaller.
Counter uz_items is updated whenever items transition from keg to a
bucket cache or directly to a consumer. If zone has uz_maxitems set and
it is reached, then we are going to sleep.
o Since new limits don't play well with multi-keg zones, remove them. The
idea of multi-keg zones was introduced exactly 10 years ago, and never
have had a practical usage. In discussion with Jeff we came to a wild
agreement that if we ever want to reintroduce the idea of a smart allocator
that would be able to choose between two (or more) totally different
backing stores, that choice should be made one level higher than UMA,
e.g. in malloc(9) or in mget(), or whatever and choice should be controlled
by the caller.
o Sleeping code is improved to account number of sleepers and wake them one
by one, to avoid thundering herd problem.
o Flag UMA_ZONE_NOBUCKETCACHE removed, instead uma_zone_set_maxcache()
KPI added. Having no bucket cache basically means setting maxcache to 0.
o Now with many fields added and many removed (no multi-keg zones!) make
sure that struct uma_zone is perfectly aligned.
Reviewed by: markj, jeff
Tested by: pho
Differential Revision: https://reviews.freebsd.org/D17773
2019-01-15 00:02:06 +00:00
|
|
|
slab_free_item(zone, slab, item);
|
2002-04-14 01:56:25 +00:00
|
|
|
}
|
2013-06-20 19:08:12 +00:00
|
|
|
KEG_UNLOCK(keg);
|
2002-03-19 09:11:49 +00:00
|
|
|
}
|
|
|
|
|
2013-06-17 03:43:47 +00:00
|
|
|
/*
|
|
|
|
* Frees a single item to any zone.
|
|
|
|
*
|
|
|
|
* Arguments:
|
|
|
|
* zone The zone to free to
|
|
|
|
* item The item we're freeing
|
|
|
|
* udata User supplied data for the dtor
|
|
|
|
* skip Skip dtors and finis
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
zone_free_item(uma_zone_t zone, void *item, void *udata, enum zfreeskip skip)
|
|
|
|
{
|
2018-06-08 00:15:08 +00:00
|
|
|
|
2019-11-27 19:49:55 +00:00
|
|
|
item_dtor(zone, item, udata, skip);
|
2013-06-17 03:43:47 +00:00
|
|
|
|
|
|
|
if (skip < SKIP_FINI && zone->uz_fini)
|
|
|
|
zone->uz_fini(item, zone->uz_size);
|
|
|
|
|
|
|
|
zone->uz_release(zone->uz_arg, &item, 1);
|
o Move zone limit from keg level up to zone level. This means that now
two zones sharing a keg may have different limits. Now this is going
to work:
zone = uma_zcreate();
uma_zone_set_max(zone, limit);
zone2 = uma_zsecond_create(zone);
uma_zone_set_max(zone2, limit2);
Kegs no longer have uk_maxpages field, but zones have uz_items. When
set, it may be rounded up to minimum possible CPU bucket cache size.
For small limits bucket cache can also be reconfigured to be smaller.
Counter uz_items is updated whenever items transition from keg to a
bucket cache or directly to a consumer. If zone has uz_maxitems set and
it is reached, then we are going to sleep.
o Since new limits don't play well with multi-keg zones, remove them. The
idea of multi-keg zones was introduced exactly 10 years ago, and never
have had a practical usage. In discussion with Jeff we came to a wild
agreement that if we ever want to reintroduce the idea of a smart allocator
that would be able to choose between two (or more) totally different
backing stores, that choice should be made one level higher than UMA,
e.g. in malloc(9) or in mget(), or whatever and choice should be controlled
by the caller.
o Sleeping code is improved to account number of sleepers and wake them one
by one, to avoid thundering herd problem.
o Flag UMA_ZONE_NOBUCKETCACHE removed, instead uma_zone_set_maxcache()
KPI added. Having no bucket cache basically means setting maxcache to 0.
o Now with many fields added and many removed (no multi-keg zones!) make
sure that struct uma_zone is perfectly aligned.
Reviewed by: markj, jeff
Tested by: pho
Differential Revision: https://reviews.freebsd.org/D17773
2019-01-15 00:02:06 +00:00
|
|
|
|
|
|
|
if (skip & SKIP_CNT)
|
|
|
|
return;
|
|
|
|
|
2019-01-15 18:24:34 +00:00
|
|
|
counter_u64_add(zone->uz_frees, 1);
|
|
|
|
|
2019-01-15 18:32:26 +00:00
|
|
|
if (zone->uz_max_items > 0) {
|
|
|
|
ZONE_LOCK(zone);
|
|
|
|
zone->uz_items--;
|
|
|
|
if (zone->uz_sleepers > 0 &&
|
|
|
|
zone->uz_items < zone->uz_max_items)
|
|
|
|
wakeup_one(zone);
|
|
|
|
ZONE_UNLOCK(zone);
|
|
|
|
}
|
2013-06-17 03:43:47 +00:00
|
|
|
}
|
|
|
|
|
2002-03-20 05:28:34 +00:00
|
|
|
/* See uma.h */
|
2010-10-16 04:41:45 +00:00
|
|
|
int
|
2002-03-20 05:28:34 +00:00
|
|
|
uma_zone_set_max(uma_zone_t zone, int nitems)
|
|
|
|
{
|
o Move zone limit from keg level up to zone level. This means that now
two zones sharing a keg may have different limits. Now this is going
to work:
zone = uma_zcreate();
uma_zone_set_max(zone, limit);
zone2 = uma_zsecond_create(zone);
uma_zone_set_max(zone2, limit2);
Kegs no longer have uk_maxpages field, but zones have uz_items. When
set, it may be rounded up to minimum possible CPU bucket cache size.
For small limits bucket cache can also be reconfigured to be smaller.
Counter uz_items is updated whenever items transition from keg to a
bucket cache or directly to a consumer. If zone has uz_maxitems set and
it is reached, then we are going to sleep.
o Since new limits don't play well with multi-keg zones, remove them. The
idea of multi-keg zones was introduced exactly 10 years ago, and never
have had a practical usage. In discussion with Jeff we came to a wild
agreement that if we ever want to reintroduce the idea of a smart allocator
that would be able to choose between two (or more) totally different
backing stores, that choice should be made one level higher than UMA,
e.g. in malloc(9) or in mget(), or whatever and choice should be controlled
by the caller.
o Sleeping code is improved to account number of sleepers and wake them one
by one, to avoid thundering herd problem.
o Flag UMA_ZONE_NOBUCKETCACHE removed, instead uma_zone_set_maxcache()
KPI added. Having no bucket cache basically means setting maxcache to 0.
o Now with many fields added and many removed (no multi-keg zones!) make
sure that struct uma_zone is perfectly aligned.
Reviewed by: markj, jeff
Tested by: pho
Differential Revision: https://reviews.freebsd.org/D17773
2019-01-15 00:02:06 +00:00
|
|
|
struct uma_bucket_zone *ubz;
|
2019-11-22 16:30:47 +00:00
|
|
|
int count;
|
o Move zone limit from keg level up to zone level. This means that now
two zones sharing a keg may have different limits. Now this is going
to work:
zone = uma_zcreate();
uma_zone_set_max(zone, limit);
zone2 = uma_zsecond_create(zone);
uma_zone_set_max(zone2, limit2);
Kegs no longer have uk_maxpages field, but zones have uz_items. When
set, it may be rounded up to minimum possible CPU bucket cache size.
For small limits bucket cache can also be reconfigured to be smaller.
Counter uz_items is updated whenever items transition from keg to a
bucket cache or directly to a consumer. If zone has uz_maxitems set and
it is reached, then we are going to sleep.
o Since new limits don't play well with multi-keg zones, remove them. The
idea of multi-keg zones was introduced exactly 10 years ago, and never
have had a practical usage. In discussion with Jeff we came to a wild
agreement that if we ever want to reintroduce the idea of a smart allocator
that would be able to choose between two (or more) totally different
backing stores, that choice should be made one level higher than UMA,
e.g. in malloc(9) or in mget(), or whatever and choice should be controlled
by the caller.
o Sleeping code is improved to account number of sleepers and wake them one
by one, to avoid thundering herd problem.
o Flag UMA_ZONE_NOBUCKETCACHE removed, instead uma_zone_set_maxcache()
KPI added. Having no bucket cache basically means setting maxcache to 0.
o Now with many fields added and many removed (no multi-keg zones!) make
sure that struct uma_zone is perfectly aligned.
Reviewed by: markj, jeff
Tested by: pho
Differential Revision: https://reviews.freebsd.org/D17773
2019-01-15 00:02:06 +00:00
|
|
|
|
|
|
|
ZONE_LOCK(zone);
|
2019-11-22 16:30:47 +00:00
|
|
|
ubz = bucket_zone_max(zone, nitems);
|
|
|
|
count = ubz != NULL ? ubz->ubz_entries : 0;
|
2019-11-28 00:19:09 +00:00
|
|
|
zone->uz_bucket_size_max = zone->uz_bucket_size = count;
|
|
|
|
if (zone->uz_bucket_size_min > zone->uz_bucket_size_max)
|
|
|
|
zone->uz_bucket_size_min = zone->uz_bucket_size_max;
|
o Move zone limit from keg level up to zone level. This means that now
two zones sharing a keg may have different limits. Now this is going
to work:
zone = uma_zcreate();
uma_zone_set_max(zone, limit);
zone2 = uma_zsecond_create(zone);
uma_zone_set_max(zone2, limit2);
Kegs no longer have uk_maxpages field, but zones have uz_items. When
set, it may be rounded up to minimum possible CPU bucket cache size.
For small limits bucket cache can also be reconfigured to be smaller.
Counter uz_items is updated whenever items transition from keg to a
bucket cache or directly to a consumer. If zone has uz_maxitems set and
it is reached, then we are going to sleep.
o Since new limits don't play well with multi-keg zones, remove them. The
idea of multi-keg zones was introduced exactly 10 years ago, and never
have had a practical usage. In discussion with Jeff we came to a wild
agreement that if we ever want to reintroduce the idea of a smart allocator
that would be able to choose between two (or more) totally different
backing stores, that choice should be made one level higher than UMA,
e.g. in malloc(9) or in mget(), or whatever and choice should be controlled
by the caller.
o Sleeping code is improved to account number of sleepers and wake them one
by one, to avoid thundering herd problem.
o Flag UMA_ZONE_NOBUCKETCACHE removed, instead uma_zone_set_maxcache()
KPI added. Having no bucket cache basically means setting maxcache to 0.
o Now with many fields added and many removed (no multi-keg zones!) make
sure that struct uma_zone is perfectly aligned.
Reviewed by: markj, jeff
Tested by: pho
Differential Revision: https://reviews.freebsd.org/D17773
2019-01-15 00:02:06 +00:00
|
|
|
zone->uz_max_items = nitems;
|
|
|
|
ZONE_UNLOCK(zone);
|
|
|
|
|
|
|
|
return (nitems);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* See uma.h */
|
2019-11-22 16:30:47 +00:00
|
|
|
void
|
o Move zone limit from keg level up to zone level. This means that now
two zones sharing a keg may have different limits. Now this is going
to work:
zone = uma_zcreate();
uma_zone_set_max(zone, limit);
zone2 = uma_zsecond_create(zone);
uma_zone_set_max(zone2, limit2);
Kegs no longer have uk_maxpages field, but zones have uz_items. When
set, it may be rounded up to minimum possible CPU bucket cache size.
For small limits bucket cache can also be reconfigured to be smaller.
Counter uz_items is updated whenever items transition from keg to a
bucket cache or directly to a consumer. If zone has uz_maxitems set and
it is reached, then we are going to sleep.
o Since new limits don't play well with multi-keg zones, remove them. The
idea of multi-keg zones was introduced exactly 10 years ago, and never
have had a practical usage. In discussion with Jeff we came to a wild
agreement that if we ever want to reintroduce the idea of a smart allocator
that would be able to choose between two (or more) totally different
backing stores, that choice should be made one level higher than UMA,
e.g. in malloc(9) or in mget(), or whatever and choice should be controlled
by the caller.
o Sleeping code is improved to account number of sleepers and wake them one
by one, to avoid thundering herd problem.
o Flag UMA_ZONE_NOBUCKETCACHE removed, instead uma_zone_set_maxcache()
KPI added. Having no bucket cache basically means setting maxcache to 0.
o Now with many fields added and many removed (no multi-keg zones!) make
sure that struct uma_zone is perfectly aligned.
Reviewed by: markj, jeff
Tested by: pho
Differential Revision: https://reviews.freebsd.org/D17773
2019-01-15 00:02:06 +00:00
|
|
|
uma_zone_set_maxcache(uma_zone_t zone, int nitems)
|
|
|
|
{
|
2019-11-22 16:30:47 +00:00
|
|
|
struct uma_bucket_zone *ubz;
|
|
|
|
int bpcpu;
|
o Move zone limit from keg level up to zone level. This means that now
two zones sharing a keg may have different limits. Now this is going
to work:
zone = uma_zcreate();
uma_zone_set_max(zone, limit);
zone2 = uma_zsecond_create(zone);
uma_zone_set_max(zone2, limit2);
Kegs no longer have uk_maxpages field, but zones have uz_items. When
set, it may be rounded up to minimum possible CPU bucket cache size.
For small limits bucket cache can also be reconfigured to be smaller.
Counter uz_items is updated whenever items transition from keg to a
bucket cache or directly to a consumer. If zone has uz_maxitems set and
it is reached, then we are going to sleep.
o Since new limits don't play well with multi-keg zones, remove them. The
idea of multi-keg zones was introduced exactly 10 years ago, and never
have had a practical usage. In discussion with Jeff we came to a wild
agreement that if we ever want to reintroduce the idea of a smart allocator
that would be able to choose between two (or more) totally different
backing stores, that choice should be made one level higher than UMA,
e.g. in malloc(9) or in mget(), or whatever and choice should be controlled
by the caller.
o Sleeping code is improved to account number of sleepers and wake them one
by one, to avoid thundering herd problem.
o Flag UMA_ZONE_NOBUCKETCACHE removed, instead uma_zone_set_maxcache()
KPI added. Having no bucket cache basically means setting maxcache to 0.
o Now with many fields added and many removed (no multi-keg zones!) make
sure that struct uma_zone is perfectly aligned.
Reviewed by: markj, jeff
Tested by: pho
Differential Revision: https://reviews.freebsd.org/D17773
2019-01-15 00:02:06 +00:00
|
|
|
|
|
|
|
ZONE_LOCK(zone);
|
2019-11-22 16:30:47 +00:00
|
|
|
ubz = bucket_zone_max(zone, nitems);
|
|
|
|
if (ubz != NULL) {
|
|
|
|
bpcpu = 2;
|
|
|
|
#ifdef UMA_XDOMAIN
|
|
|
|
if ((zone->uz_flags & UMA_ZONE_NUMA) != 0)
|
|
|
|
/* Count the cross-domain bucket. */
|
|
|
|
bpcpu++;
|
|
|
|
#endif
|
|
|
|
nitems -= ubz->ubz_entries * bpcpu * mp_ncpus;
|
2019-11-28 00:19:09 +00:00
|
|
|
zone->uz_bucket_size_max = ubz->ubz_entries;
|
2019-11-22 16:30:47 +00:00
|
|
|
} else {
|
2019-11-28 00:19:09 +00:00
|
|
|
zone->uz_bucket_size_max = zone->uz_bucket_size = 0;
|
2019-11-22 16:30:47 +00:00
|
|
|
}
|
2019-11-28 00:19:09 +00:00
|
|
|
if (zone->uz_bucket_size_min > zone->uz_bucket_size_max)
|
|
|
|
zone->uz_bucket_size_min = zone->uz_bucket_size_max;
|
o Move zone limit from keg level up to zone level. This means that now
two zones sharing a keg may have different limits. Now this is going
to work:
zone = uma_zcreate();
uma_zone_set_max(zone, limit);
zone2 = uma_zsecond_create(zone);
uma_zone_set_max(zone2, limit2);
Kegs no longer have uk_maxpages field, but zones have uz_items. When
set, it may be rounded up to minimum possible CPU bucket cache size.
For small limits bucket cache can also be reconfigured to be smaller.
Counter uz_items is updated whenever items transition from keg to a
bucket cache or directly to a consumer. If zone has uz_maxitems set and
it is reached, then we are going to sleep.
o Since new limits don't play well with multi-keg zones, remove them. The
idea of multi-keg zones was introduced exactly 10 years ago, and never
have had a practical usage. In discussion with Jeff we came to a wild
agreement that if we ever want to reintroduce the idea of a smart allocator
that would be able to choose between two (or more) totally different
backing stores, that choice should be made one level higher than UMA,
e.g. in malloc(9) or in mget(), or whatever and choice should be controlled
by the caller.
o Sleeping code is improved to account number of sleepers and wake them one
by one, to avoid thundering herd problem.
o Flag UMA_ZONE_NOBUCKETCACHE removed, instead uma_zone_set_maxcache()
KPI added. Having no bucket cache basically means setting maxcache to 0.
o Now with many fields added and many removed (no multi-keg zones!) make
sure that struct uma_zone is perfectly aligned.
Reviewed by: markj, jeff
Tested by: pho
Differential Revision: https://reviews.freebsd.org/D17773
2019-01-15 00:02:06 +00:00
|
|
|
zone->uz_bkt_max = nitems;
|
|
|
|
ZONE_UNLOCK(zone);
|
2002-03-20 05:28:34 +00:00
|
|
|
}
|
|
|
|
|
2010-08-16 14:24:00 +00:00
|
|
|
/* See uma.h */
|
|
|
|
int
|
|
|
|
uma_zone_get_max(uma_zone_t zone)
|
|
|
|
{
|
|
|
|
int nitems;
|
|
|
|
|
o Move zone limit from keg level up to zone level. This means that now
two zones sharing a keg may have different limits. Now this is going
to work:
zone = uma_zcreate();
uma_zone_set_max(zone, limit);
zone2 = uma_zsecond_create(zone);
uma_zone_set_max(zone2, limit2);
Kegs no longer have uk_maxpages field, but zones have uz_items. When
set, it may be rounded up to minimum possible CPU bucket cache size.
For small limits bucket cache can also be reconfigured to be smaller.
Counter uz_items is updated whenever items transition from keg to a
bucket cache or directly to a consumer. If zone has uz_maxitems set and
it is reached, then we are going to sleep.
o Since new limits don't play well with multi-keg zones, remove them. The
idea of multi-keg zones was introduced exactly 10 years ago, and never
have had a practical usage. In discussion with Jeff we came to a wild
agreement that if we ever want to reintroduce the idea of a smart allocator
that would be able to choose between two (or more) totally different
backing stores, that choice should be made one level higher than UMA,
e.g. in malloc(9) or in mget(), or whatever and choice should be controlled
by the caller.
o Sleeping code is improved to account number of sleepers and wake them one
by one, to avoid thundering herd problem.
o Flag UMA_ZONE_NOBUCKETCACHE removed, instead uma_zone_set_maxcache()
KPI added. Having no bucket cache basically means setting maxcache to 0.
o Now with many fields added and many removed (no multi-keg zones!) make
sure that struct uma_zone is perfectly aligned.
Reviewed by: markj, jeff
Tested by: pho
Differential Revision: https://reviews.freebsd.org/D17773
2019-01-15 00:02:06 +00:00
|
|
|
ZONE_LOCK(zone);
|
|
|
|
nitems = zone->uz_max_items;
|
|
|
|
ZONE_UNLOCK(zone);
|
2010-08-16 14:24:00 +00:00
|
|
|
|
|
|
|
return (nitems);
|
|
|
|
}
|
|
|
|
|
2012-12-07 22:27:13 +00:00
|
|
|
/* See uma.h */
|
|
|
|
void
|
|
|
|
uma_zone_set_warning(uma_zone_t zone, const char *warning)
|
|
|
|
{
|
|
|
|
|
|
|
|
ZONE_LOCK(zone);
|
|
|
|
zone->uz_warning = warning;
|
|
|
|
ZONE_UNLOCK(zone);
|
|
|
|
}
|
|
|
|
|
2015-12-20 02:05:33 +00:00
|
|
|
/* See uma.h */
|
|
|
|
void
|
|
|
|
uma_zone_set_maxaction(uma_zone_t zone, uma_maxaction_t maxaction)
|
|
|
|
{
|
|
|
|
|
|
|
|
ZONE_LOCK(zone);
|
2016-02-03 23:30:17 +00:00
|
|
|
TASK_INIT(&zone->uz_maxaction, 0, (task_fn_t *)maxaction, zone);
|
2015-12-20 02:05:33 +00:00
|
|
|
ZONE_UNLOCK(zone);
|
|
|
|
}
|
|
|
|
|
2010-10-16 04:14:45 +00:00
|
|
|
/* See uma.h */
|
|
|
|
int
|
|
|
|
uma_zone_get_cur(uma_zone_t zone)
|
|
|
|
{
|
|
|
|
int64_t nitems;
|
|
|
|
u_int i;
|
|
|
|
|
|
|
|
ZONE_LOCK(zone);
|
2019-01-15 18:24:34 +00:00
|
|
|
nitems = counter_u64_fetch(zone->uz_allocs) -
|
|
|
|
counter_u64_fetch(zone->uz_frees);
|
2019-11-28 00:19:09 +00:00
|
|
|
if ((zone->uz_flags & UMA_ZFLAG_INTERNAL) == 0) {
|
|
|
|
CPU_FOREACH(i) {
|
|
|
|
/*
|
|
|
|
* See the comment in uma_vm_zone_stats() regarding
|
|
|
|
* the safety of accessing the per-cpu caches. With
|
|
|
|
* the zone lock held, it is safe, but can potentially
|
|
|
|
* result in stale data.
|
|
|
|
*/
|
|
|
|
nitems += zone->uz_cpu[i].uc_allocs -
|
|
|
|
zone->uz_cpu[i].uc_frees;
|
|
|
|
}
|
2010-10-16 04:14:45 +00:00
|
|
|
}
|
|
|
|
ZONE_UNLOCK(zone);
|
|
|
|
|
|
|
|
return (nitems < 0 ? 0 : nitems);
|
|
|
|
}
|
|
|
|
|
2019-11-28 00:19:09 +00:00
|
|
|
static uint64_t
|
|
|
|
uma_zone_get_allocs(uma_zone_t zone)
|
|
|
|
{
|
|
|
|
uint64_t nitems;
|
|
|
|
u_int i;
|
|
|
|
|
|
|
|
ZONE_LOCK(zone);
|
|
|
|
nitems = counter_u64_fetch(zone->uz_allocs);
|
|
|
|
if ((zone->uz_flags & UMA_ZFLAG_INTERNAL) == 0) {
|
|
|
|
CPU_FOREACH(i) {
|
|
|
|
/*
|
|
|
|
* See the comment in uma_vm_zone_stats() regarding
|
|
|
|
* the safety of accessing the per-cpu caches. With
|
|
|
|
* the zone lock held, it is safe, but can potentially
|
|
|
|
* result in stale data.
|
|
|
|
*/
|
|
|
|
nitems += zone->uz_cpu[i].uc_allocs;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
ZONE_UNLOCK(zone);
|
|
|
|
|
|
|
|
return (nitems);
|
|
|
|
}
|
|
|
|
|
|
|
|
static uint64_t
|
|
|
|
uma_zone_get_frees(uma_zone_t zone)
|
|
|
|
{
|
|
|
|
uint64_t nitems;
|
|
|
|
u_int i;
|
|
|
|
|
|
|
|
ZONE_LOCK(zone);
|
|
|
|
nitems = counter_u64_fetch(zone->uz_frees);
|
|
|
|
if ((zone->uz_flags & UMA_ZFLAG_INTERNAL) == 0) {
|
|
|
|
CPU_FOREACH(i) {
|
|
|
|
/*
|
|
|
|
* See the comment in uma_vm_zone_stats() regarding
|
|
|
|
* the safety of accessing the per-cpu caches. With
|
|
|
|
* the zone lock held, it is safe, but can potentially
|
|
|
|
* result in stale data.
|
|
|
|
*/
|
|
|
|
nitems += zone->uz_cpu[i].uc_frees;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
ZONE_UNLOCK(zone);
|
|
|
|
|
|
|
|
return (nitems);
|
|
|
|
}
|
|
|
|
|
Bring in mbuma to replace mballoc.
mbuma is an Mbuf & Cluster allocator built on top of a number of
extensions to the UMA framework, all included herein.
Extensions to UMA worth noting:
- Better layering between slab <-> zone caches; introduce
Keg structure which splits off slab cache away from the
zone structure and allows multiple zones to be stacked
on top of a single Keg (single type of slab cache);
perhaps we should look into defining a subset API on
top of the Keg for special use by malloc(9),
for example.
- UMA_ZONE_REFCNT zones can now be added, and reference
counters automagically allocated for them within the end
of the associated slab structures. uma_find_refcnt()
does a kextract to fetch the slab struct reference from
the underlying page, and lookup the corresponding refcnt.
mbuma things worth noting:
- integrates mbuf & cluster allocations with extended UMA
and provides caches for commonly-allocated items; defines
several zones (two primary, one secondary) and two kegs.
- change up certain code paths that always used to do:
m_get() + m_clget() to instead just use m_getcl() and
try to take advantage of the newly defined secondary
Packet zone.
- netstat(1) and systat(1) quickly hacked up to do basic
stat reporting but additional stats work needs to be
done once some other details within UMA have been taken
care of and it becomes clearer to how stats will work
within the modified framework.
From the user perspective, one implication is that the
NMBCLUSTERS compile-time option is no longer used. The
maximum number of clusters is still capped off according
to maxusers, but it can be made unlimited by setting
the kern.ipc.nmbclusters boot-time tunable to zero.
Work should be done to write an appropriate sysctl
handler allowing dynamic tuning of kern.ipc.nmbclusters
at runtime.
Additional things worth noting/known issues (READ):
- One report of 'ips' (ServeRAID) driver acting really
slow in conjunction with mbuma. Need more data.
Latest report is that ips is equally sucking with
and without mbuma.
- Giant leak in NFS code sometimes occurs, can't
reproduce but currently analyzing; brueffer is
able to reproduce but THIS IS NOT an mbuma-specific
problem and currently occurs even WITHOUT mbuma.
- Issues in network locking: there is at least one
code path in the rip code where one or more locks
are acquired and we end up in m_prepend() with
M_WAITOK, which causes WITNESS to whine from within
UMA. Current temporary solution: force all UMA
allocations to be M_NOWAIT from within UMA for now
to avoid deadlocks unless WITNESS is defined and we
can determine with certainty that we're not holding
any locks when we're M_WAITOK.
- I've seen at least one weird socketbuffer empty-but-
mbuf-still-attached panic. I don't believe this
to be related to mbuma but please keep your eyes
open, turn on debugging, and capture crash dumps.
This change removes more code than it adds.
A paper is available detailing the change and considering
various performance issues, it was presented at BSDCan2004:
http://www.unixdaemons.com/~bmilekic/netbuf_bmilekic.pdf
Please read the paper for Future Work and implementation
details, as well as credits.
Testing and Debugging:
rwatson,
brueffer,
Ketrien I. Saihr-Kesenchedra,
...
Reviewed by: Lots of people (for different parts)
2004-05-31 21:46:06 +00:00
|
|
|
/* See uma.h */
|
|
|
|
void
|
|
|
|
uma_zone_set_init(uma_zone_t zone, uma_init uminit)
|
|
|
|
{
|
2009-01-25 09:11:24 +00:00
|
|
|
uma_keg_t keg;
|
|
|
|
|
o Move zone limit from keg level up to zone level. This means that now
two zones sharing a keg may have different limits. Now this is going
to work:
zone = uma_zcreate();
uma_zone_set_max(zone, limit);
zone2 = uma_zsecond_create(zone);
uma_zone_set_max(zone2, limit2);
Kegs no longer have uk_maxpages field, but zones have uz_items. When
set, it may be rounded up to minimum possible CPU bucket cache size.
For small limits bucket cache can also be reconfigured to be smaller.
Counter uz_items is updated whenever items transition from keg to a
bucket cache or directly to a consumer. If zone has uz_maxitems set and
it is reached, then we are going to sleep.
o Since new limits don't play well with multi-keg zones, remove them. The
idea of multi-keg zones was introduced exactly 10 years ago, and never
have had a practical usage. In discussion with Jeff we came to a wild
agreement that if we ever want to reintroduce the idea of a smart allocator
that would be able to choose between two (or more) totally different
backing stores, that choice should be made one level higher than UMA,
e.g. in malloc(9) or in mget(), or whatever and choice should be controlled
by the caller.
o Sleeping code is improved to account number of sleepers and wake them one
by one, to avoid thundering herd problem.
o Flag UMA_ZONE_NOBUCKETCACHE removed, instead uma_zone_set_maxcache()
KPI added. Having no bucket cache basically means setting maxcache to 0.
o Now with many fields added and many removed (no multi-keg zones!) make
sure that struct uma_zone is perfectly aligned.
Reviewed by: markj, jeff
Tested by: pho
Differential Revision: https://reviews.freebsd.org/D17773
2019-01-15 00:02:06 +00:00
|
|
|
KEG_GET(zone, keg);
|
2013-06-20 19:08:12 +00:00
|
|
|
KEG_LOCK(keg);
|
2009-01-25 09:11:24 +00:00
|
|
|
KASSERT(keg->uk_pages == 0,
|
Bring in mbuma to replace mballoc.
mbuma is an Mbuf & Cluster allocator built on top of a number of
extensions to the UMA framework, all included herein.
Extensions to UMA worth noting:
- Better layering between slab <-> zone caches; introduce
Keg structure which splits off slab cache away from the
zone structure and allows multiple zones to be stacked
on top of a single Keg (single type of slab cache);
perhaps we should look into defining a subset API on
top of the Keg for special use by malloc(9),
for example.
- UMA_ZONE_REFCNT zones can now be added, and reference
counters automagically allocated for them within the end
of the associated slab structures. uma_find_refcnt()
does a kextract to fetch the slab struct reference from
the underlying page, and lookup the corresponding refcnt.
mbuma things worth noting:
- integrates mbuf & cluster allocations with extended UMA
and provides caches for commonly-allocated items; defines
several zones (two primary, one secondary) and two kegs.
- change up certain code paths that always used to do:
m_get() + m_clget() to instead just use m_getcl() and
try to take advantage of the newly defined secondary
Packet zone.
- netstat(1) and systat(1) quickly hacked up to do basic
stat reporting but additional stats work needs to be
done once some other details within UMA have been taken
care of and it becomes clearer to how stats will work
within the modified framework.
From the user perspective, one implication is that the
NMBCLUSTERS compile-time option is no longer used. The
maximum number of clusters is still capped off according
to maxusers, but it can be made unlimited by setting
the kern.ipc.nmbclusters boot-time tunable to zero.
Work should be done to write an appropriate sysctl
handler allowing dynamic tuning of kern.ipc.nmbclusters
at runtime.
Additional things worth noting/known issues (READ):
- One report of 'ips' (ServeRAID) driver acting really
slow in conjunction with mbuma. Need more data.
Latest report is that ips is equally sucking with
and without mbuma.
- Giant leak in NFS code sometimes occurs, can't
reproduce but currently analyzing; brueffer is
able to reproduce but THIS IS NOT an mbuma-specific
problem and currently occurs even WITHOUT mbuma.
- Issues in network locking: there is at least one
code path in the rip code where one or more locks
are acquired and we end up in m_prepend() with
M_WAITOK, which causes WITNESS to whine from within
UMA. Current temporary solution: force all UMA
allocations to be M_NOWAIT from within UMA for now
to avoid deadlocks unless WITNESS is defined and we
can determine with certainty that we're not holding
any locks when we're M_WAITOK.
- I've seen at least one weird socketbuffer empty-but-
mbuf-still-attached panic. I don't believe this
to be related to mbuma but please keep your eyes
open, turn on debugging, and capture crash dumps.
This change removes more code than it adds.
A paper is available detailing the change and considering
various performance issues, it was presented at BSDCan2004:
http://www.unixdaemons.com/~bmilekic/netbuf_bmilekic.pdf
Please read the paper for Future Work and implementation
details, as well as credits.
Testing and Debugging:
rwatson,
brueffer,
Ketrien I. Saihr-Kesenchedra,
...
Reviewed by: Lots of people (for different parts)
2004-05-31 21:46:06 +00:00
|
|
|
("uma_zone_set_init on non-empty keg"));
|
2009-01-25 09:11:24 +00:00
|
|
|
keg->uk_init = uminit;
|
2013-06-20 19:08:12 +00:00
|
|
|
KEG_UNLOCK(keg);
|
Bring in mbuma to replace mballoc.
mbuma is an Mbuf & Cluster allocator built on top of a number of
extensions to the UMA framework, all included herein.
Extensions to UMA worth noting:
- Better layering between slab <-> zone caches; introduce
Keg structure which splits off slab cache away from the
zone structure and allows multiple zones to be stacked
on top of a single Keg (single type of slab cache);
perhaps we should look into defining a subset API on
top of the Keg for special use by malloc(9),
for example.
- UMA_ZONE_REFCNT zones can now be added, and reference
counters automagically allocated for them within the end
of the associated slab structures. uma_find_refcnt()
does a kextract to fetch the slab struct reference from
the underlying page, and lookup the corresponding refcnt.
mbuma things worth noting:
- integrates mbuf & cluster allocations with extended UMA
and provides caches for commonly-allocated items; defines
several zones (two primary, one secondary) and two kegs.
- change up certain code paths that always used to do:
m_get() + m_clget() to instead just use m_getcl() and
try to take advantage of the newly defined secondary
Packet zone.
- netstat(1) and systat(1) quickly hacked up to do basic
stat reporting but additional stats work needs to be
done once some other details within UMA have been taken
care of and it becomes clearer to how stats will work
within the modified framework.
From the user perspective, one implication is that the
NMBCLUSTERS compile-time option is no longer used. The
maximum number of clusters is still capped off according
to maxusers, but it can be made unlimited by setting
the kern.ipc.nmbclusters boot-time tunable to zero.
Work should be done to write an appropriate sysctl
handler allowing dynamic tuning of kern.ipc.nmbclusters
at runtime.
Additional things worth noting/known issues (READ):
- One report of 'ips' (ServeRAID) driver acting really
slow in conjunction with mbuma. Need more data.
Latest report is that ips is equally sucking with
and without mbuma.
- Giant leak in NFS code sometimes occurs, can't
reproduce but currently analyzing; brueffer is
able to reproduce but THIS IS NOT an mbuma-specific
problem and currently occurs even WITHOUT mbuma.
- Issues in network locking: there is at least one
code path in the rip code where one or more locks
are acquired and we end up in m_prepend() with
M_WAITOK, which causes WITNESS to whine from within
UMA. Current temporary solution: force all UMA
allocations to be M_NOWAIT from within UMA for now
to avoid deadlocks unless WITNESS is defined and we
can determine with certainty that we're not holding
any locks when we're M_WAITOK.
- I've seen at least one weird socketbuffer empty-but-
mbuf-still-attached panic. I don't believe this
to be related to mbuma but please keep your eyes
open, turn on debugging, and capture crash dumps.
This change removes more code than it adds.
A paper is available detailing the change and considering
various performance issues, it was presented at BSDCan2004:
http://www.unixdaemons.com/~bmilekic/netbuf_bmilekic.pdf
Please read the paper for Future Work and implementation
details, as well as credits.
Testing and Debugging:
rwatson,
brueffer,
Ketrien I. Saihr-Kesenchedra,
...
Reviewed by: Lots of people (for different parts)
2004-05-31 21:46:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* See uma.h */
|
|
|
|
void
|
|
|
|
uma_zone_set_fini(uma_zone_t zone, uma_fini fini)
|
|
|
|
{
|
2009-01-25 09:11:24 +00:00
|
|
|
uma_keg_t keg;
|
|
|
|
|
o Move zone limit from keg level up to zone level. This means that now
two zones sharing a keg may have different limits. Now this is going
to work:
zone = uma_zcreate();
uma_zone_set_max(zone, limit);
zone2 = uma_zsecond_create(zone);
uma_zone_set_max(zone2, limit2);
Kegs no longer have uk_maxpages field, but zones have uz_items. When
set, it may be rounded up to minimum possible CPU bucket cache size.
For small limits bucket cache can also be reconfigured to be smaller.
Counter uz_items is updated whenever items transition from keg to a
bucket cache or directly to a consumer. If zone has uz_maxitems set and
it is reached, then we are going to sleep.
o Since new limits don't play well with multi-keg zones, remove them. The
idea of multi-keg zones was introduced exactly 10 years ago, and never
have had a practical usage. In discussion with Jeff we came to a wild
agreement that if we ever want to reintroduce the idea of a smart allocator
that would be able to choose between two (or more) totally different
backing stores, that choice should be made one level higher than UMA,
e.g. in malloc(9) or in mget(), or whatever and choice should be controlled
by the caller.
o Sleeping code is improved to account number of sleepers and wake them one
by one, to avoid thundering herd problem.
o Flag UMA_ZONE_NOBUCKETCACHE removed, instead uma_zone_set_maxcache()
KPI added. Having no bucket cache basically means setting maxcache to 0.
o Now with many fields added and many removed (no multi-keg zones!) make
sure that struct uma_zone is perfectly aligned.
Reviewed by: markj, jeff
Tested by: pho
Differential Revision: https://reviews.freebsd.org/D17773
2019-01-15 00:02:06 +00:00
|
|
|
KEG_GET(zone, keg);
|
2013-06-20 19:08:12 +00:00
|
|
|
KEG_LOCK(keg);
|
2009-01-25 09:11:24 +00:00
|
|
|
KASSERT(keg->uk_pages == 0,
|
Bring in mbuma to replace mballoc.
mbuma is an Mbuf & Cluster allocator built on top of a number of
extensions to the UMA framework, all included herein.
Extensions to UMA worth noting:
- Better layering between slab <-> zone caches; introduce
Keg structure which splits off slab cache away from the
zone structure and allows multiple zones to be stacked
on top of a single Keg (single type of slab cache);
perhaps we should look into defining a subset API on
top of the Keg for special use by malloc(9),
for example.
- UMA_ZONE_REFCNT zones can now be added, and reference
counters automagically allocated for them within the end
of the associated slab structures. uma_find_refcnt()
does a kextract to fetch the slab struct reference from
the underlying page, and lookup the corresponding refcnt.
mbuma things worth noting:
- integrates mbuf & cluster allocations with extended UMA
and provides caches for commonly-allocated items; defines
several zones (two primary, one secondary) and two kegs.
- change up certain code paths that always used to do:
m_get() + m_clget() to instead just use m_getcl() and
try to take advantage of the newly defined secondary
Packet zone.
- netstat(1) and systat(1) quickly hacked up to do basic
stat reporting but additional stats work needs to be
done once some other details within UMA have been taken
care of and it becomes clearer to how stats will work
within the modified framework.
From the user perspective, one implication is that the
NMBCLUSTERS compile-time option is no longer used. The
maximum number of clusters is still capped off according
to maxusers, but it can be made unlimited by setting
the kern.ipc.nmbclusters boot-time tunable to zero.
Work should be done to write an appropriate sysctl
handler allowing dynamic tuning of kern.ipc.nmbclusters
at runtime.
Additional things worth noting/known issues (READ):
- One report of 'ips' (ServeRAID) driver acting really
slow in conjunction with mbuma. Need more data.
Latest report is that ips is equally sucking with
and without mbuma.
- Giant leak in NFS code sometimes occurs, can't
reproduce but currently analyzing; brueffer is
able to reproduce but THIS IS NOT an mbuma-specific
problem and currently occurs even WITHOUT mbuma.
- Issues in network locking: there is at least one
code path in the rip code where one or more locks
are acquired and we end up in m_prepend() with
M_WAITOK, which causes WITNESS to whine from within
UMA. Current temporary solution: force all UMA
allocations to be M_NOWAIT from within UMA for now
to avoid deadlocks unless WITNESS is defined and we
can determine with certainty that we're not holding
any locks when we're M_WAITOK.
- I've seen at least one weird socketbuffer empty-but-
mbuf-still-attached panic. I don't believe this
to be related to mbuma but please keep your eyes
open, turn on debugging, and capture crash dumps.
This change removes more code than it adds.
A paper is available detailing the change and considering
various performance issues, it was presented at BSDCan2004:
http://www.unixdaemons.com/~bmilekic/netbuf_bmilekic.pdf
Please read the paper for Future Work and implementation
details, as well as credits.
Testing and Debugging:
rwatson,
brueffer,
Ketrien I. Saihr-Kesenchedra,
...
Reviewed by: Lots of people (for different parts)
2004-05-31 21:46:06 +00:00
|
|
|
("uma_zone_set_fini on non-empty keg"));
|
2009-01-25 09:11:24 +00:00
|
|
|
keg->uk_fini = fini;
|
2013-06-20 19:08:12 +00:00
|
|
|
KEG_UNLOCK(keg);
|
Bring in mbuma to replace mballoc.
mbuma is an Mbuf & Cluster allocator built on top of a number of
extensions to the UMA framework, all included herein.
Extensions to UMA worth noting:
- Better layering between slab <-> zone caches; introduce
Keg structure which splits off slab cache away from the
zone structure and allows multiple zones to be stacked
on top of a single Keg (single type of slab cache);
perhaps we should look into defining a subset API on
top of the Keg for special use by malloc(9),
for example.
- UMA_ZONE_REFCNT zones can now be added, and reference
counters automagically allocated for them within the end
of the associated slab structures. uma_find_refcnt()
does a kextract to fetch the slab struct reference from
the underlying page, and lookup the corresponding refcnt.
mbuma things worth noting:
- integrates mbuf & cluster allocations with extended UMA
and provides caches for commonly-allocated items; defines
several zones (two primary, one secondary) and two kegs.
- change up certain code paths that always used to do:
m_get() + m_clget() to instead just use m_getcl() and
try to take advantage of the newly defined secondary
Packet zone.
- netstat(1) and systat(1) quickly hacked up to do basic
stat reporting but additional stats work needs to be
done once some other details within UMA have been taken
care of and it becomes clearer to how stats will work
within the modified framework.
From the user perspective, one implication is that the
NMBCLUSTERS compile-time option is no longer used. The
maximum number of clusters is still capped off according
to maxusers, but it can be made unlimited by setting
the kern.ipc.nmbclusters boot-time tunable to zero.
Work should be done to write an appropriate sysctl
handler allowing dynamic tuning of kern.ipc.nmbclusters
at runtime.
Additional things worth noting/known issues (READ):
- One report of 'ips' (ServeRAID) driver acting really
slow in conjunction with mbuma. Need more data.
Latest report is that ips is equally sucking with
and without mbuma.
- Giant leak in NFS code sometimes occurs, can't
reproduce but currently analyzing; brueffer is
able to reproduce but THIS IS NOT an mbuma-specific
problem and currently occurs even WITHOUT mbuma.
- Issues in network locking: there is at least one
code path in the rip code where one or more locks
are acquired and we end up in m_prepend() with
M_WAITOK, which causes WITNESS to whine from within
UMA. Current temporary solution: force all UMA
allocations to be M_NOWAIT from within UMA for now
to avoid deadlocks unless WITNESS is defined and we
can determine with certainty that we're not holding
any locks when we're M_WAITOK.
- I've seen at least one weird socketbuffer empty-but-
mbuf-still-attached panic. I don't believe this
to be related to mbuma but please keep your eyes
open, turn on debugging, and capture crash dumps.
This change removes more code than it adds.
A paper is available detailing the change and considering
various performance issues, it was presented at BSDCan2004:
http://www.unixdaemons.com/~bmilekic/netbuf_bmilekic.pdf
Please read the paper for Future Work and implementation
details, as well as credits.
Testing and Debugging:
rwatson,
brueffer,
Ketrien I. Saihr-Kesenchedra,
...
Reviewed by: Lots of people (for different parts)
2004-05-31 21:46:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* See uma.h */
|
|
|
|
void
|
|
|
|
uma_zone_set_zinit(uma_zone_t zone, uma_init zinit)
|
|
|
|
{
|
2013-06-20 19:08:12 +00:00
|
|
|
|
Bring in mbuma to replace mballoc.
mbuma is an Mbuf & Cluster allocator built on top of a number of
extensions to the UMA framework, all included herein.
Extensions to UMA worth noting:
- Better layering between slab <-> zone caches; introduce
Keg structure which splits off slab cache away from the
zone structure and allows multiple zones to be stacked
on top of a single Keg (single type of slab cache);
perhaps we should look into defining a subset API on
top of the Keg for special use by malloc(9),
for example.
- UMA_ZONE_REFCNT zones can now be added, and reference
counters automagically allocated for them within the end
of the associated slab structures. uma_find_refcnt()
does a kextract to fetch the slab struct reference from
the underlying page, and lookup the corresponding refcnt.
mbuma things worth noting:
- integrates mbuf & cluster allocations with extended UMA
and provides caches for commonly-allocated items; defines
several zones (two primary, one secondary) and two kegs.
- change up certain code paths that always used to do:
m_get() + m_clget() to instead just use m_getcl() and
try to take advantage of the newly defined secondary
Packet zone.
- netstat(1) and systat(1) quickly hacked up to do basic
stat reporting but additional stats work needs to be
done once some other details within UMA have been taken
care of and it becomes clearer to how stats will work
within the modified framework.
From the user perspective, one implication is that the
NMBCLUSTERS compile-time option is no longer used. The
maximum number of clusters is still capped off according
to maxusers, but it can be made unlimited by setting
the kern.ipc.nmbclusters boot-time tunable to zero.
Work should be done to write an appropriate sysctl
handler allowing dynamic tuning of kern.ipc.nmbclusters
at runtime.
Additional things worth noting/known issues (READ):
- One report of 'ips' (ServeRAID) driver acting really
slow in conjunction with mbuma. Need more data.
Latest report is that ips is equally sucking with
and without mbuma.
- Giant leak in NFS code sometimes occurs, can't
reproduce but currently analyzing; brueffer is
able to reproduce but THIS IS NOT an mbuma-specific
problem and currently occurs even WITHOUT mbuma.
- Issues in network locking: there is at least one
code path in the rip code where one or more locks
are acquired and we end up in m_prepend() with
M_WAITOK, which causes WITNESS to whine from within
UMA. Current temporary solution: force all UMA
allocations to be M_NOWAIT from within UMA for now
to avoid deadlocks unless WITNESS is defined and we
can determine with certainty that we're not holding
any locks when we're M_WAITOK.
- I've seen at least one weird socketbuffer empty-but-
mbuf-still-attached panic. I don't believe this
to be related to mbuma but please keep your eyes
open, turn on debugging, and capture crash dumps.
This change removes more code than it adds.
A paper is available detailing the change and considering
various performance issues, it was presented at BSDCan2004:
http://www.unixdaemons.com/~bmilekic/netbuf_bmilekic.pdf
Please read the paper for Future Work and implementation
details, as well as credits.
Testing and Debugging:
rwatson,
brueffer,
Ketrien I. Saihr-Kesenchedra,
...
Reviewed by: Lots of people (for different parts)
2004-05-31 21:46:06 +00:00
|
|
|
ZONE_LOCK(zone);
|
o Move zone limit from keg level up to zone level. This means that now
two zones sharing a keg may have different limits. Now this is going
to work:
zone = uma_zcreate();
uma_zone_set_max(zone, limit);
zone2 = uma_zsecond_create(zone);
uma_zone_set_max(zone2, limit2);
Kegs no longer have uk_maxpages field, but zones have uz_items. When
set, it may be rounded up to minimum possible CPU bucket cache size.
For small limits bucket cache can also be reconfigured to be smaller.
Counter uz_items is updated whenever items transition from keg to a
bucket cache or directly to a consumer. If zone has uz_maxitems set and
it is reached, then we are going to sleep.
o Since new limits don't play well with multi-keg zones, remove them. The
idea of multi-keg zones was introduced exactly 10 years ago, and never
have had a practical usage. In discussion with Jeff we came to a wild
agreement that if we ever want to reintroduce the idea of a smart allocator
that would be able to choose between two (or more) totally different
backing stores, that choice should be made one level higher than UMA,
e.g. in malloc(9) or in mget(), or whatever and choice should be controlled
by the caller.
o Sleeping code is improved to account number of sleepers and wake them one
by one, to avoid thundering herd problem.
o Flag UMA_ZONE_NOBUCKETCACHE removed, instead uma_zone_set_maxcache()
KPI added. Having no bucket cache basically means setting maxcache to 0.
o Now with many fields added and many removed (no multi-keg zones!) make
sure that struct uma_zone is perfectly aligned.
Reviewed by: markj, jeff
Tested by: pho
Differential Revision: https://reviews.freebsd.org/D17773
2019-01-15 00:02:06 +00:00
|
|
|
KASSERT(zone->uz_keg->uk_pages == 0,
|
Bring in mbuma to replace mballoc.
mbuma is an Mbuf & Cluster allocator built on top of a number of
extensions to the UMA framework, all included herein.
Extensions to UMA worth noting:
- Better layering between slab <-> zone caches; introduce
Keg structure which splits off slab cache away from the
zone structure and allows multiple zones to be stacked
on top of a single Keg (single type of slab cache);
perhaps we should look into defining a subset API on
top of the Keg for special use by malloc(9),
for example.
- UMA_ZONE_REFCNT zones can now be added, and reference
counters automagically allocated for them within the end
of the associated slab structures. uma_find_refcnt()
does a kextract to fetch the slab struct reference from
the underlying page, and lookup the corresponding refcnt.
mbuma things worth noting:
- integrates mbuf & cluster allocations with extended UMA
and provides caches for commonly-allocated items; defines
several zones (two primary, one secondary) and two kegs.
- change up certain code paths that always used to do:
m_get() + m_clget() to instead just use m_getcl() and
try to take advantage of the newly defined secondary
Packet zone.
- netstat(1) and systat(1) quickly hacked up to do basic
stat reporting but additional stats work needs to be
done once some other details within UMA have been taken
care of and it becomes clearer to how stats will work
within the modified framework.
From the user perspective, one implication is that the
NMBCLUSTERS compile-time option is no longer used. The
maximum number of clusters is still capped off according
to maxusers, but it can be made unlimited by setting
the kern.ipc.nmbclusters boot-time tunable to zero.
Work should be done to write an appropriate sysctl
handler allowing dynamic tuning of kern.ipc.nmbclusters
at runtime.
Additional things worth noting/known issues (READ):
- One report of 'ips' (ServeRAID) driver acting really
slow in conjunction with mbuma. Need more data.
Latest report is that ips is equally sucking with
and without mbuma.
- Giant leak in NFS code sometimes occurs, can't
reproduce but currently analyzing; brueffer is
able to reproduce but THIS IS NOT an mbuma-specific
problem and currently occurs even WITHOUT mbuma.
- Issues in network locking: there is at least one
code path in the rip code where one or more locks
are acquired and we end up in m_prepend() with
M_WAITOK, which causes WITNESS to whine from within
UMA. Current temporary solution: force all UMA
allocations to be M_NOWAIT from within UMA for now
to avoid deadlocks unless WITNESS is defined and we
can determine with certainty that we're not holding
any locks when we're M_WAITOK.
- I've seen at least one weird socketbuffer empty-but-
mbuf-still-attached panic. I don't believe this
to be related to mbuma but please keep your eyes
open, turn on debugging, and capture crash dumps.
This change removes more code than it adds.
A paper is available detailing the change and considering
various performance issues, it was presented at BSDCan2004:
http://www.unixdaemons.com/~bmilekic/netbuf_bmilekic.pdf
Please read the paper for Future Work and implementation
details, as well as credits.
Testing and Debugging:
rwatson,
brueffer,
Ketrien I. Saihr-Kesenchedra,
...
Reviewed by: Lots of people (for different parts)
2004-05-31 21:46:06 +00:00
|
|
|
("uma_zone_set_zinit on non-empty keg"));
|
|
|
|
zone->uz_init = zinit;
|
|
|
|
ZONE_UNLOCK(zone);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* See uma.h */
|
|
|
|
void
|
|
|
|
uma_zone_set_zfini(uma_zone_t zone, uma_fini zfini)
|
|
|
|
{
|
2013-06-20 19:08:12 +00:00
|
|
|
|
Bring in mbuma to replace mballoc.
mbuma is an Mbuf & Cluster allocator built on top of a number of
extensions to the UMA framework, all included herein.
Extensions to UMA worth noting:
- Better layering between slab <-> zone caches; introduce
Keg structure which splits off slab cache away from the
zone structure and allows multiple zones to be stacked
on top of a single Keg (single type of slab cache);
perhaps we should look into defining a subset API on
top of the Keg for special use by malloc(9),
for example.
- UMA_ZONE_REFCNT zones can now be added, and reference
counters automagically allocated for them within the end
of the associated slab structures. uma_find_refcnt()
does a kextract to fetch the slab struct reference from
the underlying page, and lookup the corresponding refcnt.
mbuma things worth noting:
- integrates mbuf & cluster allocations with extended UMA
and provides caches for commonly-allocated items; defines
several zones (two primary, one secondary) and two kegs.
- change up certain code paths that always used to do:
m_get() + m_clget() to instead just use m_getcl() and
try to take advantage of the newly defined secondary
Packet zone.
- netstat(1) and systat(1) quickly hacked up to do basic
stat reporting but additional stats work needs to be
done once some other details within UMA have been taken
care of and it becomes clearer to how stats will work
within the modified framework.
From the user perspective, one implication is that the
NMBCLUSTERS compile-time option is no longer used. The
maximum number of clusters is still capped off according
to maxusers, but it can be made unlimited by setting
the kern.ipc.nmbclusters boot-time tunable to zero.
Work should be done to write an appropriate sysctl
handler allowing dynamic tuning of kern.ipc.nmbclusters
at runtime.
Additional things worth noting/known issues (READ):
- One report of 'ips' (ServeRAID) driver acting really
slow in conjunction with mbuma. Need more data.
Latest report is that ips is equally sucking with
and without mbuma.
- Giant leak in NFS code sometimes occurs, can't
reproduce but currently analyzing; brueffer is
able to reproduce but THIS IS NOT an mbuma-specific
problem and currently occurs even WITHOUT mbuma.
- Issues in network locking: there is at least one
code path in the rip code where one or more locks
are acquired and we end up in m_prepend() with
M_WAITOK, which causes WITNESS to whine from within
UMA. Current temporary solution: force all UMA
allocations to be M_NOWAIT from within UMA for now
to avoid deadlocks unless WITNESS is defined and we
can determine with certainty that we're not holding
any locks when we're M_WAITOK.
- I've seen at least one weird socketbuffer empty-but-
mbuf-still-attached panic. I don't believe this
to be related to mbuma but please keep your eyes
open, turn on debugging, and capture crash dumps.
This change removes more code than it adds.
A paper is available detailing the change and considering
various performance issues, it was presented at BSDCan2004:
http://www.unixdaemons.com/~bmilekic/netbuf_bmilekic.pdf
Please read the paper for Future Work and implementation
details, as well as credits.
Testing and Debugging:
rwatson,
brueffer,
Ketrien I. Saihr-Kesenchedra,
...
Reviewed by: Lots of people (for different parts)
2004-05-31 21:46:06 +00:00
|
|
|
ZONE_LOCK(zone);
|
o Move zone limit from keg level up to zone level. This means that now
two zones sharing a keg may have different limits. Now this is going
to work:
zone = uma_zcreate();
uma_zone_set_max(zone, limit);
zone2 = uma_zsecond_create(zone);
uma_zone_set_max(zone2, limit2);
Kegs no longer have uk_maxpages field, but zones have uz_items. When
set, it may be rounded up to minimum possible CPU bucket cache size.
For small limits bucket cache can also be reconfigured to be smaller.
Counter uz_items is updated whenever items transition from keg to a
bucket cache or directly to a consumer. If zone has uz_maxitems set and
it is reached, then we are going to sleep.
o Since new limits don't play well with multi-keg zones, remove them. The
idea of multi-keg zones was introduced exactly 10 years ago, and never
have had a practical usage. In discussion with Jeff we came to a wild
agreement that if we ever want to reintroduce the idea of a smart allocator
that would be able to choose between two (or more) totally different
backing stores, that choice should be made one level higher than UMA,
e.g. in malloc(9) or in mget(), or whatever and choice should be controlled
by the caller.
o Sleeping code is improved to account number of sleepers and wake them one
by one, to avoid thundering herd problem.
o Flag UMA_ZONE_NOBUCKETCACHE removed, instead uma_zone_set_maxcache()
KPI added. Having no bucket cache basically means setting maxcache to 0.
o Now with many fields added and many removed (no multi-keg zones!) make
sure that struct uma_zone is perfectly aligned.
Reviewed by: markj, jeff
Tested by: pho
Differential Revision: https://reviews.freebsd.org/D17773
2019-01-15 00:02:06 +00:00
|
|
|
KASSERT(zone->uz_keg->uk_pages == 0,
|
Bring in mbuma to replace mballoc.
mbuma is an Mbuf & Cluster allocator built on top of a number of
extensions to the UMA framework, all included herein.
Extensions to UMA worth noting:
- Better layering between slab <-> zone caches; introduce
Keg structure which splits off slab cache away from the
zone structure and allows multiple zones to be stacked
on top of a single Keg (single type of slab cache);
perhaps we should look into defining a subset API on
top of the Keg for special use by malloc(9),
for example.
- UMA_ZONE_REFCNT zones can now be added, and reference
counters automagically allocated for them within the end
of the associated slab structures. uma_find_refcnt()
does a kextract to fetch the slab struct reference from
the underlying page, and lookup the corresponding refcnt.
mbuma things worth noting:
- integrates mbuf & cluster allocations with extended UMA
and provides caches for commonly-allocated items; defines
several zones (two primary, one secondary) and two kegs.
- change up certain code paths that always used to do:
m_get() + m_clget() to instead just use m_getcl() and
try to take advantage of the newly defined secondary
Packet zone.
- netstat(1) and systat(1) quickly hacked up to do basic
stat reporting but additional stats work needs to be
done once some other details within UMA have been taken
care of and it becomes clearer to how stats will work
within the modified framework.
From the user perspective, one implication is that the
NMBCLUSTERS compile-time option is no longer used. The
maximum number of clusters is still capped off according
to maxusers, but it can be made unlimited by setting
the kern.ipc.nmbclusters boot-time tunable to zero.
Work should be done to write an appropriate sysctl
handler allowing dynamic tuning of kern.ipc.nmbclusters
at runtime.
Additional things worth noting/known issues (READ):
- One report of 'ips' (ServeRAID) driver acting really
slow in conjunction with mbuma. Need more data.
Latest report is that ips is equally sucking with
and without mbuma.
- Giant leak in NFS code sometimes occurs, can't
reproduce but currently analyzing; brueffer is
able to reproduce but THIS IS NOT an mbuma-specific
problem and currently occurs even WITHOUT mbuma.
- Issues in network locking: there is at least one
code path in the rip code where one or more locks
are acquired and we end up in m_prepend() with
M_WAITOK, which causes WITNESS to whine from within
UMA. Current temporary solution: force all UMA
allocations to be M_NOWAIT from within UMA for now
to avoid deadlocks unless WITNESS is defined and we
can determine with certainty that we're not holding
any locks when we're M_WAITOK.
- I've seen at least one weird socketbuffer empty-but-
mbuf-still-attached panic. I don't believe this
to be related to mbuma but please keep your eyes
open, turn on debugging, and capture crash dumps.
This change removes more code than it adds.
A paper is available detailing the change and considering
various performance issues, it was presented at BSDCan2004:
http://www.unixdaemons.com/~bmilekic/netbuf_bmilekic.pdf
Please read the paper for Future Work and implementation
details, as well as credits.
Testing and Debugging:
rwatson,
brueffer,
Ketrien I. Saihr-Kesenchedra,
...
Reviewed by: Lots of people (for different parts)
2004-05-31 21:46:06 +00:00
|
|
|
("uma_zone_set_zfini on non-empty keg"));
|
|
|
|
zone->uz_fini = zfini;
|
|
|
|
ZONE_UNLOCK(zone);
|
|
|
|
}
|
|
|
|
|
2002-03-19 09:11:49 +00:00
|
|
|
/* See uma.h */
|
2004-08-02 00:18:36 +00:00
|
|
|
/* XXX uk_freef is not actually used with the zone locked */
|
2002-03-19 09:11:49 +00:00
|
|
|
void
|
|
|
|
uma_zone_set_freef(uma_zone_t zone, uma_free freef)
|
|
|
|
{
|
2013-06-17 03:43:47 +00:00
|
|
|
uma_keg_t keg;
|
2009-01-25 09:11:24 +00:00
|
|
|
|
o Move zone limit from keg level up to zone level. This means that now
two zones sharing a keg may have different limits. Now this is going
to work:
zone = uma_zcreate();
uma_zone_set_max(zone, limit);
zone2 = uma_zsecond_create(zone);
uma_zone_set_max(zone2, limit2);
Kegs no longer have uk_maxpages field, but zones have uz_items. When
set, it may be rounded up to minimum possible CPU bucket cache size.
For small limits bucket cache can also be reconfigured to be smaller.
Counter uz_items is updated whenever items transition from keg to a
bucket cache or directly to a consumer. If zone has uz_maxitems set and
it is reached, then we are going to sleep.
o Since new limits don't play well with multi-keg zones, remove them. The
idea of multi-keg zones was introduced exactly 10 years ago, and never
have had a practical usage. In discussion with Jeff we came to a wild
agreement that if we ever want to reintroduce the idea of a smart allocator
that would be able to choose between two (or more) totally different
backing stores, that choice should be made one level higher than UMA,
e.g. in malloc(9) or in mget(), or whatever and choice should be controlled
by the caller.
o Sleeping code is improved to account number of sleepers and wake them one
by one, to avoid thundering herd problem.
o Flag UMA_ZONE_NOBUCKETCACHE removed, instead uma_zone_set_maxcache()
KPI added. Having no bucket cache basically means setting maxcache to 0.
o Now with many fields added and many removed (no multi-keg zones!) make
sure that struct uma_zone is perfectly aligned.
Reviewed by: markj, jeff
Tested by: pho
Differential Revision: https://reviews.freebsd.org/D17773
2019-01-15 00:02:06 +00:00
|
|
|
KEG_GET(zone, keg);
|
2015-04-05 18:25:23 +00:00
|
|
|
KASSERT(keg != NULL, ("uma_zone_set_freef: Invalid zone type"));
|
2013-06-20 19:08:12 +00:00
|
|
|
KEG_LOCK(keg);
|
2013-06-17 03:43:47 +00:00
|
|
|
keg->uk_freef = freef;
|
2013-06-20 19:08:12 +00:00
|
|
|
KEG_UNLOCK(keg);
|
2002-03-19 09:11:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* See uma.h */
|
2004-08-02 00:18:36 +00:00
|
|
|
/* XXX uk_allocf is not actually used with the zone locked */
|
2002-03-19 09:11:49 +00:00
|
|
|
void
|
|
|
|
uma_zone_set_allocf(uma_zone_t zone, uma_alloc allocf)
|
|
|
|
{
|
2009-01-25 09:11:24 +00:00
|
|
|
uma_keg_t keg;
|
|
|
|
|
o Move zone limit from keg level up to zone level. This means that now
two zones sharing a keg may have different limits. Now this is going
to work:
zone = uma_zcreate();
uma_zone_set_max(zone, limit);
zone2 = uma_zsecond_create(zone);
uma_zone_set_max(zone2, limit2);
Kegs no longer have uk_maxpages field, but zones have uz_items. When
set, it may be rounded up to minimum possible CPU bucket cache size.
For small limits bucket cache can also be reconfigured to be smaller.
Counter uz_items is updated whenever items transition from keg to a
bucket cache or directly to a consumer. If zone has uz_maxitems set and
it is reached, then we are going to sleep.
o Since new limits don't play well with multi-keg zones, remove them. The
idea of multi-keg zones was introduced exactly 10 years ago, and never
have had a practical usage. In discussion with Jeff we came to a wild
agreement that if we ever want to reintroduce the idea of a smart allocator
that would be able to choose between two (or more) totally different
backing stores, that choice should be made one level higher than UMA,
e.g. in malloc(9) or in mget(), or whatever and choice should be controlled
by the caller.
o Sleeping code is improved to account number of sleepers and wake them one
by one, to avoid thundering herd problem.
o Flag UMA_ZONE_NOBUCKETCACHE removed, instead uma_zone_set_maxcache()
KPI added. Having no bucket cache basically means setting maxcache to 0.
o Now with many fields added and many removed (no multi-keg zones!) make
sure that struct uma_zone is perfectly aligned.
Reviewed by: markj, jeff
Tested by: pho
Differential Revision: https://reviews.freebsd.org/D17773
2019-01-15 00:02:06 +00:00
|
|
|
KEG_GET(zone, keg);
|
2013-06-20 19:08:12 +00:00
|
|
|
KEG_LOCK(keg);
|
2009-01-25 09:11:24 +00:00
|
|
|
keg->uk_allocf = allocf;
|
2013-06-20 19:08:12 +00:00
|
|
|
KEG_UNLOCK(keg);
|
2002-03-19 09:11:49 +00:00
|
|
|
}
|
|
|
|
|
2013-06-26 00:57:38 +00:00
|
|
|
/* See uma.h */
|
|
|
|
void
|
|
|
|
uma_zone_reserve(uma_zone_t zone, int items)
|
|
|
|
{
|
|
|
|
uma_keg_t keg;
|
|
|
|
|
o Move zone limit from keg level up to zone level. This means that now
two zones sharing a keg may have different limits. Now this is going
to work:
zone = uma_zcreate();
uma_zone_set_max(zone, limit);
zone2 = uma_zsecond_create(zone);
uma_zone_set_max(zone2, limit2);
Kegs no longer have uk_maxpages field, but zones have uz_items. When
set, it may be rounded up to minimum possible CPU bucket cache size.
For small limits bucket cache can also be reconfigured to be smaller.
Counter uz_items is updated whenever items transition from keg to a
bucket cache or directly to a consumer. If zone has uz_maxitems set and
it is reached, then we are going to sleep.
o Since new limits don't play well with multi-keg zones, remove them. The
idea of multi-keg zones was introduced exactly 10 years ago, and never
have had a practical usage. In discussion with Jeff we came to a wild
agreement that if we ever want to reintroduce the idea of a smart allocator
that would be able to choose between two (or more) totally different
backing stores, that choice should be made one level higher than UMA,
e.g. in malloc(9) or in mget(), or whatever and choice should be controlled
by the caller.
o Sleeping code is improved to account number of sleepers and wake them one
by one, to avoid thundering herd problem.
o Flag UMA_ZONE_NOBUCKETCACHE removed, instead uma_zone_set_maxcache()
KPI added. Having no bucket cache basically means setting maxcache to 0.
o Now with many fields added and many removed (no multi-keg zones!) make
sure that struct uma_zone is perfectly aligned.
Reviewed by: markj, jeff
Tested by: pho
Differential Revision: https://reviews.freebsd.org/D17773
2019-01-15 00:02:06 +00:00
|
|
|
KEG_GET(zone, keg);
|
2013-06-26 00:57:38 +00:00
|
|
|
KEG_LOCK(keg);
|
|
|
|
keg->uk_reserve = items;
|
|
|
|
KEG_UNLOCK(keg);
|
|
|
|
}
|
|
|
|
|
2002-03-19 09:11:49 +00:00
|
|
|
/* See uma.h */
|
|
|
|
int
|
2013-02-26 23:35:27 +00:00
|
|
|
uma_zone_reserve_kva(uma_zone_t zone, int count)
|
2002-03-19 09:11:49 +00:00
|
|
|
{
|
Bring in mbuma to replace mballoc.
mbuma is an Mbuf & Cluster allocator built on top of a number of
extensions to the UMA framework, all included herein.
Extensions to UMA worth noting:
- Better layering between slab <-> zone caches; introduce
Keg structure which splits off slab cache away from the
zone structure and allows multiple zones to be stacked
on top of a single Keg (single type of slab cache);
perhaps we should look into defining a subset API on
top of the Keg for special use by malloc(9),
for example.
- UMA_ZONE_REFCNT zones can now be added, and reference
counters automagically allocated for them within the end
of the associated slab structures. uma_find_refcnt()
does a kextract to fetch the slab struct reference from
the underlying page, and lookup the corresponding refcnt.
mbuma things worth noting:
- integrates mbuf & cluster allocations with extended UMA
and provides caches for commonly-allocated items; defines
several zones (two primary, one secondary) and two kegs.
- change up certain code paths that always used to do:
m_get() + m_clget() to instead just use m_getcl() and
try to take advantage of the newly defined secondary
Packet zone.
- netstat(1) and systat(1) quickly hacked up to do basic
stat reporting but additional stats work needs to be
done once some other details within UMA have been taken
care of and it becomes clearer to how stats will work
within the modified framework.
From the user perspective, one implication is that the
NMBCLUSTERS compile-time option is no longer used. The
maximum number of clusters is still capped off according
to maxusers, but it can be made unlimited by setting
the kern.ipc.nmbclusters boot-time tunable to zero.
Work should be done to write an appropriate sysctl
handler allowing dynamic tuning of kern.ipc.nmbclusters
at runtime.
Additional things worth noting/known issues (READ):
- One report of 'ips' (ServeRAID) driver acting really
slow in conjunction with mbuma. Need more data.
Latest report is that ips is equally sucking with
and without mbuma.
- Giant leak in NFS code sometimes occurs, can't
reproduce but currently analyzing; brueffer is
able to reproduce but THIS IS NOT an mbuma-specific
problem and currently occurs even WITHOUT mbuma.
- Issues in network locking: there is at least one
code path in the rip code where one or more locks
are acquired and we end up in m_prepend() with
M_WAITOK, which causes WITNESS to whine from within
UMA. Current temporary solution: force all UMA
allocations to be M_NOWAIT from within UMA for now
to avoid deadlocks unless WITNESS is defined and we
can determine with certainty that we're not holding
any locks when we're M_WAITOK.
- I've seen at least one weird socketbuffer empty-but-
mbuf-still-attached panic. I don't believe this
to be related to mbuma but please keep your eyes
open, turn on debugging, and capture crash dumps.
This change removes more code than it adds.
A paper is available detailing the change and considering
various performance issues, it was presented at BSDCan2004:
http://www.unixdaemons.com/~bmilekic/netbuf_bmilekic.pdf
Please read the paper for Future Work and implementation
details, as well as credits.
Testing and Debugging:
rwatson,
brueffer,
Ketrien I. Saihr-Kesenchedra,
...
Reviewed by: Lots of people (for different parts)
2004-05-31 21:46:06 +00:00
|
|
|
uma_keg_t keg;
|
2002-03-19 09:11:49 +00:00
|
|
|
vm_offset_t kva;
|
2015-08-10 17:16:49 +00:00
|
|
|
u_int pages;
|
2002-03-19 09:11:49 +00:00
|
|
|
|
o Move zone limit from keg level up to zone level. This means that now
two zones sharing a keg may have different limits. Now this is going
to work:
zone = uma_zcreate();
uma_zone_set_max(zone, limit);
zone2 = uma_zsecond_create(zone);
uma_zone_set_max(zone2, limit2);
Kegs no longer have uk_maxpages field, but zones have uz_items. When
set, it may be rounded up to minimum possible CPU bucket cache size.
For small limits bucket cache can also be reconfigured to be smaller.
Counter uz_items is updated whenever items transition from keg to a
bucket cache or directly to a consumer. If zone has uz_maxitems set and
it is reached, then we are going to sleep.
o Since new limits don't play well with multi-keg zones, remove them. The
idea of multi-keg zones was introduced exactly 10 years ago, and never
have had a practical usage. In discussion with Jeff we came to a wild
agreement that if we ever want to reintroduce the idea of a smart allocator
that would be able to choose between two (or more) totally different
backing stores, that choice should be made one level higher than UMA,
e.g. in malloc(9) or in mget(), or whatever and choice should be controlled
by the caller.
o Sleeping code is improved to account number of sleepers and wake them one
by one, to avoid thundering herd problem.
o Flag UMA_ZONE_NOBUCKETCACHE removed, instead uma_zone_set_maxcache()
KPI added. Having no bucket cache basically means setting maxcache to 0.
o Now with many fields added and many removed (no multi-keg zones!) make
sure that struct uma_zone is perfectly aligned.
Reviewed by: markj, jeff
Tested by: pho
Differential Revision: https://reviews.freebsd.org/D17773
2019-01-15 00:02:06 +00:00
|
|
|
KEG_GET(zone, keg);
|
2002-03-19 09:11:49 +00:00
|
|
|
|
o Move zone limit from keg level up to zone level. This means that now
two zones sharing a keg may have different limits. Now this is going
to work:
zone = uma_zcreate();
uma_zone_set_max(zone, limit);
zone2 = uma_zsecond_create(zone);
uma_zone_set_max(zone2, limit2);
Kegs no longer have uk_maxpages field, but zones have uz_items. When
set, it may be rounded up to minimum possible CPU bucket cache size.
For small limits bucket cache can also be reconfigured to be smaller.
Counter uz_items is updated whenever items transition from keg to a
bucket cache or directly to a consumer. If zone has uz_maxitems set and
it is reached, then we are going to sleep.
o Since new limits don't play well with multi-keg zones, remove them. The
idea of multi-keg zones was introduced exactly 10 years ago, and never
have had a practical usage. In discussion with Jeff we came to a wild
agreement that if we ever want to reintroduce the idea of a smart allocator
that would be able to choose between two (or more) totally different
backing stores, that choice should be made one level higher than UMA,
e.g. in malloc(9) or in mget(), or whatever and choice should be controlled
by the caller.
o Sleeping code is improved to account number of sleepers and wake them one
by one, to avoid thundering herd problem.
o Flag UMA_ZONE_NOBUCKETCACHE removed, instead uma_zone_set_maxcache()
KPI added. Having no bucket cache basically means setting maxcache to 0.
o Now with many fields added and many removed (no multi-keg zones!) make
sure that struct uma_zone is perfectly aligned.
Reviewed by: markj, jeff
Tested by: pho
Differential Revision: https://reviews.freebsd.org/D17773
2019-01-15 00:02:06 +00:00
|
|
|
pages = count / keg->uk_ipers;
|
Bring in mbuma to replace mballoc.
mbuma is an Mbuf & Cluster allocator built on top of a number of
extensions to the UMA framework, all included herein.
Extensions to UMA worth noting:
- Better layering between slab <-> zone caches; introduce
Keg structure which splits off slab cache away from the
zone structure and allows multiple zones to be stacked
on top of a single Keg (single type of slab cache);
perhaps we should look into defining a subset API on
top of the Keg for special use by malloc(9),
for example.
- UMA_ZONE_REFCNT zones can now be added, and reference
counters automagically allocated for them within the end
of the associated slab structures. uma_find_refcnt()
does a kextract to fetch the slab struct reference from
the underlying page, and lookup the corresponding refcnt.
mbuma things worth noting:
- integrates mbuf & cluster allocations with extended UMA
and provides caches for commonly-allocated items; defines
several zones (two primary, one secondary) and two kegs.
- change up certain code paths that always used to do:
m_get() + m_clget() to instead just use m_getcl() and
try to take advantage of the newly defined secondary
Packet zone.
- netstat(1) and systat(1) quickly hacked up to do basic
stat reporting but additional stats work needs to be
done once some other details within UMA have been taken
care of and it becomes clearer to how stats will work
within the modified framework.
From the user perspective, one implication is that the
NMBCLUSTERS compile-time option is no longer used. The
maximum number of clusters is still capped off according
to maxusers, but it can be made unlimited by setting
the kern.ipc.nmbclusters boot-time tunable to zero.
Work should be done to write an appropriate sysctl
handler allowing dynamic tuning of kern.ipc.nmbclusters
at runtime.
Additional things worth noting/known issues (READ):
- One report of 'ips' (ServeRAID) driver acting really
slow in conjunction with mbuma. Need more data.
Latest report is that ips is equally sucking with
and without mbuma.
- Giant leak in NFS code sometimes occurs, can't
reproduce but currently analyzing; brueffer is
able to reproduce but THIS IS NOT an mbuma-specific
problem and currently occurs even WITHOUT mbuma.
- Issues in network locking: there is at least one
code path in the rip code where one or more locks
are acquired and we end up in m_prepend() with
M_WAITOK, which causes WITNESS to whine from within
UMA. Current temporary solution: force all UMA
allocations to be M_NOWAIT from within UMA for now
to avoid deadlocks unless WITNESS is defined and we
can determine with certainty that we're not holding
any locks when we're M_WAITOK.
- I've seen at least one weird socketbuffer empty-but-
mbuf-still-attached panic. I don't believe this
to be related to mbuma but please keep your eyes
open, turn on debugging, and capture crash dumps.
This change removes more code than it adds.
A paper is available detailing the change and considering
various performance issues, it was presented at BSDCan2004:
http://www.unixdaemons.com/~bmilekic/netbuf_bmilekic.pdf
Please read the paper for Future Work and implementation
details, as well as credits.
Testing and Debugging:
rwatson,
brueffer,
Ketrien I. Saihr-Kesenchedra,
...
Reviewed by: Lots of people (for different parts)
2004-05-31 21:46:06 +00:00
|
|
|
if (pages * keg->uk_ipers < count)
|
2002-03-19 09:11:49 +00:00
|
|
|
pages++;
|
2017-03-11 16:43:38 +00:00
|
|
|
pages *= keg->uk_ppera;
|
2002-03-19 09:11:49 +00:00
|
|
|
|
2013-02-26 23:35:27 +00:00
|
|
|
#ifdef UMA_MD_SMALL_ALLOC
|
|
|
|
if (keg->uk_ppera > 1) {
|
|
|
|
#else
|
|
|
|
if (1) {
|
|
|
|
#endif
|
2017-03-11 16:43:38 +00:00
|
|
|
kva = kva_alloc((vm_size_t)pages * PAGE_SIZE);
|
2013-02-26 23:35:27 +00:00
|
|
|
if (kva == 0)
|
|
|
|
return (0);
|
|
|
|
} else
|
|
|
|
kva = 0;
|
o Move zone limit from keg level up to zone level. This means that now
two zones sharing a keg may have different limits. Now this is going
to work:
zone = uma_zcreate();
uma_zone_set_max(zone, limit);
zone2 = uma_zsecond_create(zone);
uma_zone_set_max(zone2, limit2);
Kegs no longer have uk_maxpages field, but zones have uz_items. When
set, it may be rounded up to minimum possible CPU bucket cache size.
For small limits bucket cache can also be reconfigured to be smaller.
Counter uz_items is updated whenever items transition from keg to a
bucket cache or directly to a consumer. If zone has uz_maxitems set and
it is reached, then we are going to sleep.
o Since new limits don't play well with multi-keg zones, remove them. The
idea of multi-keg zones was introduced exactly 10 years ago, and never
have had a practical usage. In discussion with Jeff we came to a wild
agreement that if we ever want to reintroduce the idea of a smart allocator
that would be able to choose between two (or more) totally different
backing stores, that choice should be made one level higher than UMA,
e.g. in malloc(9) or in mget(), or whatever and choice should be controlled
by the caller.
o Sleeping code is improved to account number of sleepers and wake them one
by one, to avoid thundering herd problem.
o Flag UMA_ZONE_NOBUCKETCACHE removed, instead uma_zone_set_maxcache()
KPI added. Having no bucket cache basically means setting maxcache to 0.
o Now with many fields added and many removed (no multi-keg zones!) make
sure that struct uma_zone is perfectly aligned.
Reviewed by: markj, jeff
Tested by: pho
Differential Revision: https://reviews.freebsd.org/D17773
2019-01-15 00:02:06 +00:00
|
|
|
|
|
|
|
ZONE_LOCK(zone);
|
|
|
|
MPASS(keg->uk_kva == 0);
|
Bring in mbuma to replace mballoc.
mbuma is an Mbuf & Cluster allocator built on top of a number of
extensions to the UMA framework, all included herein.
Extensions to UMA worth noting:
- Better layering between slab <-> zone caches; introduce
Keg structure which splits off slab cache away from the
zone structure and allows multiple zones to be stacked
on top of a single Keg (single type of slab cache);
perhaps we should look into defining a subset API on
top of the Keg for special use by malloc(9),
for example.
- UMA_ZONE_REFCNT zones can now be added, and reference
counters automagically allocated for them within the end
of the associated slab structures. uma_find_refcnt()
does a kextract to fetch the slab struct reference from
the underlying page, and lookup the corresponding refcnt.
mbuma things worth noting:
- integrates mbuf & cluster allocations with extended UMA
and provides caches for commonly-allocated items; defines
several zones (two primary, one secondary) and two kegs.
- change up certain code paths that always used to do:
m_get() + m_clget() to instead just use m_getcl() and
try to take advantage of the newly defined secondary
Packet zone.
- netstat(1) and systat(1) quickly hacked up to do basic
stat reporting but additional stats work needs to be
done once some other details within UMA have been taken
care of and it becomes clearer to how stats will work
within the modified framework.
From the user perspective, one implication is that the
NMBCLUSTERS compile-time option is no longer used. The
maximum number of clusters is still capped off according
to maxusers, but it can be made unlimited by setting
the kern.ipc.nmbclusters boot-time tunable to zero.
Work should be done to write an appropriate sysctl
handler allowing dynamic tuning of kern.ipc.nmbclusters
at runtime.
Additional things worth noting/known issues (READ):
- One report of 'ips' (ServeRAID) driver acting really
slow in conjunction with mbuma. Need more data.
Latest report is that ips is equally sucking with
and without mbuma.
- Giant leak in NFS code sometimes occurs, can't
reproduce but currently analyzing; brueffer is
able to reproduce but THIS IS NOT an mbuma-specific
problem and currently occurs even WITHOUT mbuma.
- Issues in network locking: there is at least one
code path in the rip code where one or more locks
are acquired and we end up in m_prepend() with
M_WAITOK, which causes WITNESS to whine from within
UMA. Current temporary solution: force all UMA
allocations to be M_NOWAIT from within UMA for now
to avoid deadlocks unless WITNESS is defined and we
can determine with certainty that we're not holding
any locks when we're M_WAITOK.
- I've seen at least one weird socketbuffer empty-but-
mbuf-still-attached panic. I don't believe this
to be related to mbuma but please keep your eyes
open, turn on debugging, and capture crash dumps.
This change removes more code than it adds.
A paper is available detailing the change and considering
various performance issues, it was presented at BSDCan2004:
http://www.unixdaemons.com/~bmilekic/netbuf_bmilekic.pdf
Please read the paper for Future Work and implementation
details, as well as credits.
Testing and Debugging:
rwatson,
brueffer,
Ketrien I. Saihr-Kesenchedra,
...
Reviewed by: Lots of people (for different parts)
2004-05-31 21:46:06 +00:00
|
|
|
keg->uk_kva = kva;
|
2013-02-26 23:35:27 +00:00
|
|
|
keg->uk_offset = 0;
|
o Move zone limit from keg level up to zone level. This means that now
two zones sharing a keg may have different limits. Now this is going
to work:
zone = uma_zcreate();
uma_zone_set_max(zone, limit);
zone2 = uma_zsecond_create(zone);
uma_zone_set_max(zone2, limit2);
Kegs no longer have uk_maxpages field, but zones have uz_items. When
set, it may be rounded up to minimum possible CPU bucket cache size.
For small limits bucket cache can also be reconfigured to be smaller.
Counter uz_items is updated whenever items transition from keg to a
bucket cache or directly to a consumer. If zone has uz_maxitems set and
it is reached, then we are going to sleep.
o Since new limits don't play well with multi-keg zones, remove them. The
idea of multi-keg zones was introduced exactly 10 years ago, and never
have had a practical usage. In discussion with Jeff we came to a wild
agreement that if we ever want to reintroduce the idea of a smart allocator
that would be able to choose between two (or more) totally different
backing stores, that choice should be made one level higher than UMA,
e.g. in malloc(9) or in mget(), or whatever and choice should be controlled
by the caller.
o Sleeping code is improved to account number of sleepers and wake them one
by one, to avoid thundering herd problem.
o Flag UMA_ZONE_NOBUCKETCACHE removed, instead uma_zone_set_maxcache()
KPI added. Having no bucket cache basically means setting maxcache to 0.
o Now with many fields added and many removed (no multi-keg zones!) make
sure that struct uma_zone is perfectly aligned.
Reviewed by: markj, jeff
Tested by: pho
Differential Revision: https://reviews.freebsd.org/D17773
2019-01-15 00:02:06 +00:00
|
|
|
zone->uz_max_items = pages * keg->uk_ipers;
|
2013-02-26 23:35:27 +00:00
|
|
|
#ifdef UMA_MD_SMALL_ALLOC
|
|
|
|
keg->uk_allocf = (keg->uk_ppera > 1) ? noobj_alloc : uma_small_alloc;
|
|
|
|
#else
|
|
|
|
keg->uk_allocf = noobj_alloc;
|
|
|
|
#endif
|
2013-06-26 00:57:38 +00:00
|
|
|
keg->uk_flags |= UMA_ZONE_NOFREE;
|
o Move zone limit from keg level up to zone level. This means that now
two zones sharing a keg may have different limits. Now this is going
to work:
zone = uma_zcreate();
uma_zone_set_max(zone, limit);
zone2 = uma_zsecond_create(zone);
uma_zone_set_max(zone2, limit2);
Kegs no longer have uk_maxpages field, but zones have uz_items. When
set, it may be rounded up to minimum possible CPU bucket cache size.
For small limits bucket cache can also be reconfigured to be smaller.
Counter uz_items is updated whenever items transition from keg to a
bucket cache or directly to a consumer. If zone has uz_maxitems set and
it is reached, then we are going to sleep.
o Since new limits don't play well with multi-keg zones, remove them. The
idea of multi-keg zones was introduced exactly 10 years ago, and never
have had a practical usage. In discussion with Jeff we came to a wild
agreement that if we ever want to reintroduce the idea of a smart allocator
that would be able to choose between two (or more) totally different
backing stores, that choice should be made one level higher than UMA,
e.g. in malloc(9) or in mget(), or whatever and choice should be controlled
by the caller.
o Sleeping code is improved to account number of sleepers and wake them one
by one, to avoid thundering herd problem.
o Flag UMA_ZONE_NOBUCKETCACHE removed, instead uma_zone_set_maxcache()
KPI added. Having no bucket cache basically means setting maxcache to 0.
o Now with many fields added and many removed (no multi-keg zones!) make
sure that struct uma_zone is perfectly aligned.
Reviewed by: markj, jeff
Tested by: pho
Differential Revision: https://reviews.freebsd.org/D17773
2019-01-15 00:02:06 +00:00
|
|
|
ZONE_UNLOCK(zone);
|
2013-06-20 19:08:12 +00:00
|
|
|
|
2002-03-19 09:11:49 +00:00
|
|
|
return (1);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* See uma.h */
|
|
|
|
void
|
|
|
|
uma_prealloc(uma_zone_t zone, int items)
|
|
|
|
{
|
2018-10-30 17:57:40 +00:00
|
|
|
struct vm_domainset_iter di;
|
2018-01-12 23:25:05 +00:00
|
|
|
uma_domain_t dom;
|
2002-03-19 09:11:49 +00:00
|
|
|
uma_slab_t slab;
|
Bring in mbuma to replace mballoc.
mbuma is an Mbuf & Cluster allocator built on top of a number of
extensions to the UMA framework, all included herein.
Extensions to UMA worth noting:
- Better layering between slab <-> zone caches; introduce
Keg structure which splits off slab cache away from the
zone structure and allows multiple zones to be stacked
on top of a single Keg (single type of slab cache);
perhaps we should look into defining a subset API on
top of the Keg for special use by malloc(9),
for example.
- UMA_ZONE_REFCNT zones can now be added, and reference
counters automagically allocated for them within the end
of the associated slab structures. uma_find_refcnt()
does a kextract to fetch the slab struct reference from
the underlying page, and lookup the corresponding refcnt.
mbuma things worth noting:
- integrates mbuf & cluster allocations with extended UMA
and provides caches for commonly-allocated items; defines
several zones (two primary, one secondary) and two kegs.
- change up certain code paths that always used to do:
m_get() + m_clget() to instead just use m_getcl() and
try to take advantage of the newly defined secondary
Packet zone.
- netstat(1) and systat(1) quickly hacked up to do basic
stat reporting but additional stats work needs to be
done once some other details within UMA have been taken
care of and it becomes clearer to how stats will work
within the modified framework.
From the user perspective, one implication is that the
NMBCLUSTERS compile-time option is no longer used. The
maximum number of clusters is still capped off according
to maxusers, but it can be made unlimited by setting
the kern.ipc.nmbclusters boot-time tunable to zero.
Work should be done to write an appropriate sysctl
handler allowing dynamic tuning of kern.ipc.nmbclusters
at runtime.
Additional things worth noting/known issues (READ):
- One report of 'ips' (ServeRAID) driver acting really
slow in conjunction with mbuma. Need more data.
Latest report is that ips is equally sucking with
and without mbuma.
- Giant leak in NFS code sometimes occurs, can't
reproduce but currently analyzing; brueffer is
able to reproduce but THIS IS NOT an mbuma-specific
problem and currently occurs even WITHOUT mbuma.
- Issues in network locking: there is at least one
code path in the rip code where one or more locks
are acquired and we end up in m_prepend() with
M_WAITOK, which causes WITNESS to whine from within
UMA. Current temporary solution: force all UMA
allocations to be M_NOWAIT from within UMA for now
to avoid deadlocks unless WITNESS is defined and we
can determine with certainty that we're not holding
any locks when we're M_WAITOK.
- I've seen at least one weird socketbuffer empty-but-
mbuf-still-attached panic. I don't believe this
to be related to mbuma but please keep your eyes
open, turn on debugging, and capture crash dumps.
This change removes more code than it adds.
A paper is available detailing the change and considering
various performance issues, it was presented at BSDCan2004:
http://www.unixdaemons.com/~bmilekic/netbuf_bmilekic.pdf
Please read the paper for Future Work and implementation
details, as well as credits.
Testing and Debugging:
rwatson,
brueffer,
Ketrien I. Saihr-Kesenchedra,
...
Reviewed by: Lots of people (for different parts)
2004-05-31 21:46:06 +00:00
|
|
|
uma_keg_t keg;
|
2019-01-23 18:58:15 +00:00
|
|
|
int aflags, domain, slabs;
|
2002-03-19 09:11:49 +00:00
|
|
|
|
o Move zone limit from keg level up to zone level. This means that now
two zones sharing a keg may have different limits. Now this is going
to work:
zone = uma_zcreate();
uma_zone_set_max(zone, limit);
zone2 = uma_zsecond_create(zone);
uma_zone_set_max(zone2, limit2);
Kegs no longer have uk_maxpages field, but zones have uz_items. When
set, it may be rounded up to minimum possible CPU bucket cache size.
For small limits bucket cache can also be reconfigured to be smaller.
Counter uz_items is updated whenever items transition from keg to a
bucket cache or directly to a consumer. If zone has uz_maxitems set and
it is reached, then we are going to sleep.
o Since new limits don't play well with multi-keg zones, remove them. The
idea of multi-keg zones was introduced exactly 10 years ago, and never
have had a practical usage. In discussion with Jeff we came to a wild
agreement that if we ever want to reintroduce the idea of a smart allocator
that would be able to choose between two (or more) totally different
backing stores, that choice should be made one level higher than UMA,
e.g. in malloc(9) or in mget(), or whatever and choice should be controlled
by the caller.
o Sleeping code is improved to account number of sleepers and wake them one
by one, to avoid thundering herd problem.
o Flag UMA_ZONE_NOBUCKETCACHE removed, instead uma_zone_set_maxcache()
KPI added. Having no bucket cache basically means setting maxcache to 0.
o Now with many fields added and many removed (no multi-keg zones!) make
sure that struct uma_zone is perfectly aligned.
Reviewed by: markj, jeff
Tested by: pho
Differential Revision: https://reviews.freebsd.org/D17773
2019-01-15 00:02:06 +00:00
|
|
|
KEG_GET(zone, keg);
|
2013-06-20 19:08:12 +00:00
|
|
|
KEG_LOCK(keg);
|
Bring in mbuma to replace mballoc.
mbuma is an Mbuf & Cluster allocator built on top of a number of
extensions to the UMA framework, all included herein.
Extensions to UMA worth noting:
- Better layering between slab <-> zone caches; introduce
Keg structure which splits off slab cache away from the
zone structure and allows multiple zones to be stacked
on top of a single Keg (single type of slab cache);
perhaps we should look into defining a subset API on
top of the Keg for special use by malloc(9),
for example.
- UMA_ZONE_REFCNT zones can now be added, and reference
counters automagically allocated for them within the end
of the associated slab structures. uma_find_refcnt()
does a kextract to fetch the slab struct reference from
the underlying page, and lookup the corresponding refcnt.
mbuma things worth noting:
- integrates mbuf & cluster allocations with extended UMA
and provides caches for commonly-allocated items; defines
several zones (two primary, one secondary) and two kegs.
- change up certain code paths that always used to do:
m_get() + m_clget() to instead just use m_getcl() and
try to take advantage of the newly defined secondary
Packet zone.
- netstat(1) and systat(1) quickly hacked up to do basic
stat reporting but additional stats work needs to be
done once some other details within UMA have been taken
care of and it becomes clearer to how stats will work
within the modified framework.
From the user perspective, one implication is that the
NMBCLUSTERS compile-time option is no longer used. The
maximum number of clusters is still capped off according
to maxusers, but it can be made unlimited by setting
the kern.ipc.nmbclusters boot-time tunable to zero.
Work should be done to write an appropriate sysctl
handler allowing dynamic tuning of kern.ipc.nmbclusters
at runtime.
Additional things worth noting/known issues (READ):
- One report of 'ips' (ServeRAID) driver acting really
slow in conjunction with mbuma. Need more data.
Latest report is that ips is equally sucking with
and without mbuma.
- Giant leak in NFS code sometimes occurs, can't
reproduce but currently analyzing; brueffer is
able to reproduce but THIS IS NOT an mbuma-specific
problem and currently occurs even WITHOUT mbuma.
- Issues in network locking: there is at least one
code path in the rip code where one or more locks
are acquired and we end up in m_prepend() with
M_WAITOK, which causes WITNESS to whine from within
UMA. Current temporary solution: force all UMA
allocations to be M_NOWAIT from within UMA for now
to avoid deadlocks unless WITNESS is defined and we
can determine with certainty that we're not holding
any locks when we're M_WAITOK.
- I've seen at least one weird socketbuffer empty-but-
mbuf-still-attached panic. I don't believe this
to be related to mbuma but please keep your eyes
open, turn on debugging, and capture crash dumps.
This change removes more code than it adds.
A paper is available detailing the change and considering
various performance issues, it was presented at BSDCan2004:
http://www.unixdaemons.com/~bmilekic/netbuf_bmilekic.pdf
Please read the paper for Future Work and implementation
details, as well as credits.
Testing and Debugging:
rwatson,
brueffer,
Ketrien I. Saihr-Kesenchedra,
...
Reviewed by: Lots of people (for different parts)
2004-05-31 21:46:06 +00:00
|
|
|
slabs = items / keg->uk_ipers;
|
|
|
|
if (slabs * keg->uk_ipers < items)
|
2002-03-19 09:11:49 +00:00
|
|
|
slabs++;
|
2018-10-24 16:41:47 +00:00
|
|
|
while (slabs-- > 0) {
|
2019-01-23 18:58:15 +00:00
|
|
|
aflags = M_NOWAIT;
|
|
|
|
vm_domainset_iter_policy_ref_init(&di, &keg->uk_dr, &domain,
|
|
|
|
&aflags);
|
|
|
|
for (;;) {
|
|
|
|
slab = keg_alloc_slab(keg, zone, domain, M_WAITOK,
|
|
|
|
aflags);
|
|
|
|
if (slab != NULL) {
|
|
|
|
dom = &keg->uk_domain[slab->us_domain];
|
|
|
|
LIST_INSERT_HEAD(&dom->ud_free_slab, slab,
|
|
|
|
us_link);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
KEG_LOCK(keg);
|
|
|
|
if (vm_domainset_iter_policy(&di, &domain) != 0) {
|
|
|
|
KEG_UNLOCK(keg);
|
|
|
|
vm_wait_doms(&keg->uk_dr.dr_policy->ds_mask);
|
|
|
|
KEG_LOCK(keg);
|
|
|
|
}
|
|
|
|
}
|
2002-03-19 09:11:49 +00:00
|
|
|
}
|
2013-06-20 19:08:12 +00:00
|
|
|
KEG_UNLOCK(keg);
|
2002-03-19 09:11:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* See uma.h */
|
2019-09-01 22:22:43 +00:00
|
|
|
void
|
|
|
|
uma_reclaim(int req)
|
2002-03-19 09:11:49 +00:00
|
|
|
{
|
2015-05-09 20:08:36 +00:00
|
|
|
|
2017-06-01 18:36:52 +00:00
|
|
|
CTR0(KTR_UMA, "UMA: vm asked us to release pages!");
|
2019-09-01 22:22:43 +00:00
|
|
|
sx_xlock(&uma_reclaim_lock);
|
2002-04-08 06:20:34 +00:00
|
|
|
bucket_enable();
|
2019-09-01 22:22:43 +00:00
|
|
|
|
|
|
|
switch (req) {
|
|
|
|
case UMA_RECLAIM_TRIM:
|
2019-11-28 00:19:09 +00:00
|
|
|
zone_foreach(zone_trim, NULL);
|
2019-09-01 22:22:43 +00:00
|
|
|
break;
|
|
|
|
case UMA_RECLAIM_DRAIN:
|
|
|
|
case UMA_RECLAIM_DRAIN_CPU:
|
2019-11-28 00:19:09 +00:00
|
|
|
zone_foreach(zone_drain, NULL);
|
2019-09-01 22:22:43 +00:00
|
|
|
if (req == UMA_RECLAIM_DRAIN_CPU) {
|
|
|
|
pcpu_cache_drain_safe(NULL);
|
2019-11-28 00:19:09 +00:00
|
|
|
zone_foreach(zone_drain, NULL);
|
2019-09-01 22:22:43 +00:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
panic("unhandled reclamation request %d", req);
|
2013-11-19 10:51:46 +00:00
|
|
|
}
|
2018-11-13 19:44:40 +00:00
|
|
|
|
2002-03-19 09:11:49 +00:00
|
|
|
/*
|
|
|
|
* Some slabs may have been freed but this zone will be visited early
|
|
|
|
* we visit again so that we can free pages that are empty once other
|
|
|
|
* zones are drained. We have to do the same for buckets.
|
|
|
|
*/
|
2019-11-28 00:19:09 +00:00
|
|
|
zone_drain(slabzone, NULL);
|
2003-09-19 06:26:45 +00:00
|
|
|
bucket_zone_drain();
|
2019-09-01 22:22:43 +00:00
|
|
|
sx_xunlock(&uma_reclaim_lock);
|
2002-03-19 09:11:49 +00:00
|
|
|
}
|
|
|
|
|
2017-11-28 23:40:54 +00:00
|
|
|
static volatile int uma_reclaim_needed;
|
2015-05-09 20:08:36 +00:00
|
|
|
|
|
|
|
void
|
|
|
|
uma_reclaim_wakeup(void)
|
|
|
|
{
|
|
|
|
|
2017-11-28 23:40:54 +00:00
|
|
|
if (atomic_fetchadd_int(&uma_reclaim_needed, 1) == 0)
|
|
|
|
wakeup(uma_reclaim);
|
2015-05-09 20:08:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
uma_reclaim_worker(void *arg __unused)
|
|
|
|
{
|
|
|
|
|
|
|
|
for (;;) {
|
2019-09-01 22:22:43 +00:00
|
|
|
sx_xlock(&uma_reclaim_lock);
|
2017-12-19 10:06:55 +00:00
|
|
|
while (atomic_load_int(&uma_reclaim_needed) == 0)
|
2019-09-01 22:22:43 +00:00
|
|
|
sx_sleep(uma_reclaim, &uma_reclaim_lock, PVM, "umarcl",
|
2017-11-28 23:40:54 +00:00
|
|
|
hz);
|
2019-09-01 22:22:43 +00:00
|
|
|
sx_xunlock(&uma_reclaim_lock);
|
2017-11-28 23:40:54 +00:00
|
|
|
EVENTHANDLER_INVOKE(vm_lowmem, VM_LOW_KMEM);
|
2019-09-01 22:22:43 +00:00
|
|
|
uma_reclaim(UMA_RECLAIM_DRAIN_CPU);
|
2017-12-19 10:06:55 +00:00
|
|
|
atomic_store_int(&uma_reclaim_needed, 0);
|
2017-11-28 23:40:54 +00:00
|
|
|
/* Don't fire more than once per-second. */
|
|
|
|
pause("umarclslp", hz);
|
2015-05-09 20:08:36 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-09-01 22:22:43 +00:00
|
|
|
/* See uma.h */
|
|
|
|
void
|
|
|
|
uma_zone_reclaim(uma_zone_t zone, int req)
|
|
|
|
{
|
|
|
|
|
|
|
|
switch (req) {
|
|
|
|
case UMA_RECLAIM_TRIM:
|
2019-11-28 00:19:09 +00:00
|
|
|
zone_trim(zone, NULL);
|
2019-09-01 22:22:43 +00:00
|
|
|
break;
|
|
|
|
case UMA_RECLAIM_DRAIN:
|
2019-11-28 00:19:09 +00:00
|
|
|
zone_drain(zone, NULL);
|
2019-09-01 22:22:43 +00:00
|
|
|
break;
|
|
|
|
case UMA_RECLAIM_DRAIN_CPU:
|
|
|
|
pcpu_cache_drain_safe(zone);
|
2019-11-28 00:19:09 +00:00
|
|
|
zone_drain(zone, NULL);
|
2019-09-01 22:22:43 +00:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
panic("unhandled reclamation request %d", req);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-01-05 19:09:01 +00:00
|
|
|
/* See uma.h */
|
|
|
|
int
|
|
|
|
uma_zone_exhausted(uma_zone_t zone)
|
|
|
|
{
|
|
|
|
int full;
|
|
|
|
|
|
|
|
ZONE_LOCK(zone);
|
o Move zone limit from keg level up to zone level. This means that now
two zones sharing a keg may have different limits. Now this is going
to work:
zone = uma_zcreate();
uma_zone_set_max(zone, limit);
zone2 = uma_zsecond_create(zone);
uma_zone_set_max(zone2, limit2);
Kegs no longer have uk_maxpages field, but zones have uz_items. When
set, it may be rounded up to minimum possible CPU bucket cache size.
For small limits bucket cache can also be reconfigured to be smaller.
Counter uz_items is updated whenever items transition from keg to a
bucket cache or directly to a consumer. If zone has uz_maxitems set and
it is reached, then we are going to sleep.
o Since new limits don't play well with multi-keg zones, remove them. The
idea of multi-keg zones was introduced exactly 10 years ago, and never
have had a practical usage. In discussion with Jeff we came to a wild
agreement that if we ever want to reintroduce the idea of a smart allocator
that would be able to choose between two (or more) totally different
backing stores, that choice should be made one level higher than UMA,
e.g. in malloc(9) or in mget(), or whatever and choice should be controlled
by the caller.
o Sleeping code is improved to account number of sleepers and wake them one
by one, to avoid thundering herd problem.
o Flag UMA_ZONE_NOBUCKETCACHE removed, instead uma_zone_set_maxcache()
KPI added. Having no bucket cache basically means setting maxcache to 0.
o Now with many fields added and many removed (no multi-keg zones!) make
sure that struct uma_zone is perfectly aligned.
Reviewed by: markj, jeff
Tested by: pho
Differential Revision: https://reviews.freebsd.org/D17773
2019-01-15 00:02:06 +00:00
|
|
|
full = zone->uz_sleepers > 0;
|
2007-01-05 19:09:01 +00:00
|
|
|
ZONE_UNLOCK(zone);
|
|
|
|
return (full);
|
|
|
|
}
|
|
|
|
|
2007-01-25 01:05:23 +00:00
|
|
|
int
|
|
|
|
uma_zone_exhausted_nolock(uma_zone_t zone)
|
|
|
|
{
|
o Move zone limit from keg level up to zone level. This means that now
two zones sharing a keg may have different limits. Now this is going
to work:
zone = uma_zcreate();
uma_zone_set_max(zone, limit);
zone2 = uma_zsecond_create(zone);
uma_zone_set_max(zone2, limit2);
Kegs no longer have uk_maxpages field, but zones have uz_items. When
set, it may be rounded up to minimum possible CPU bucket cache size.
For small limits bucket cache can also be reconfigured to be smaller.
Counter uz_items is updated whenever items transition from keg to a
bucket cache or directly to a consumer. If zone has uz_maxitems set and
it is reached, then we are going to sleep.
o Since new limits don't play well with multi-keg zones, remove them. The
idea of multi-keg zones was introduced exactly 10 years ago, and never
have had a practical usage. In discussion with Jeff we came to a wild
agreement that if we ever want to reintroduce the idea of a smart allocator
that would be able to choose between two (or more) totally different
backing stores, that choice should be made one level higher than UMA,
e.g. in malloc(9) or in mget(), or whatever and choice should be controlled
by the caller.
o Sleeping code is improved to account number of sleepers and wake them one
by one, to avoid thundering herd problem.
o Flag UMA_ZONE_NOBUCKETCACHE removed, instead uma_zone_set_maxcache()
KPI added. Having no bucket cache basically means setting maxcache to 0.
o Now with many fields added and many removed (no multi-keg zones!) make
sure that struct uma_zone is perfectly aligned.
Reviewed by: markj, jeff
Tested by: pho
Differential Revision: https://reviews.freebsd.org/D17773
2019-01-15 00:02:06 +00:00
|
|
|
return (zone->uz_sleepers > 0);
|
2007-01-25 01:05:23 +00:00
|
|
|
}
|
|
|
|
|
2014-02-10 19:48:26 +00:00
|
|
|
static void
|
|
|
|
uma_zero_item(void *item, uma_zone_t zone)
|
|
|
|
{
|
|
|
|
|
2018-06-08 03:16:16 +00:00
|
|
|
bzero(item, zone->uz_size);
|
2014-02-10 19:48:26 +00:00
|
|
|
}
|
|
|
|
|
2017-11-28 23:40:54 +00:00
|
|
|
unsigned long
|
|
|
|
uma_limit(void)
|
|
|
|
{
|
|
|
|
|
|
|
|
return (uma_kmem_limit);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
uma_set_limit(unsigned long limit)
|
|
|
|
{
|
|
|
|
|
|
|
|
uma_kmem_limit = limit;
|
|
|
|
}
|
|
|
|
|
|
|
|
unsigned long
|
|
|
|
uma_size(void)
|
|
|
|
{
|
|
|
|
|
2019-06-06 16:23:44 +00:00
|
|
|
return (atomic_load_long(&uma_kmem_total));
|
2018-01-02 04:35:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
long
|
|
|
|
uma_avail(void)
|
|
|
|
{
|
|
|
|
|
2019-06-06 16:23:44 +00:00
|
|
|
return (uma_kmem_limit - uma_size());
|
2017-11-28 23:40:54 +00:00
|
|
|
}
|
|
|
|
|
2006-07-18 01:13:18 +00:00
|
|
|
#ifdef DDB
|
Introduce a new sysctl, vm.zone_stats, which exports UMA(9) allocator
statistics via a binary structure stream:
- Add structure 'uma_stream_header', which defines a stream version,
definition of MAXCPUs used in the stream, and the number of zone
records in the stream.
- Add structure 'uma_type_header', which defines the name, alignment,
size, resource allocation limits, current pages allocated, preferred
bucket size, and central zone + keg statistics.
- Add structure 'uma_percpu_stat', which, for each per-CPU cache,
includes the number of allocations and frees, as well as the number
of free items in the cache.
- When the sysctl is queried, return a stream header, followed by a
series of type descriptions, each consisting of a type header
followed by a series of MAXCPUs uma_percpu_stat structures holding
per-CPU allocation information. Typical values of MAXCPU will be
1 (UP compiled kernel) and 16 (SMP compiled kernel).
This query mechanism allows user space monitoring tools to extract
memory allocation statistics in a machine-readable form, and to do so
at a per-CPU granularity, allowing monitoring of allocation patterns
across CPUs in order to better understand the distribution of work and
memory flow over multiple CPUs.
While here, also export the number of UMA zones as a sysctl
vm.uma_count, in order to assist in sizing user swpace buffers to
receive the stream.
A follow-up commit of libmemstat(3), a library to monitor kernel memory
allocation, will occur in the next few days. This change directly
supports converting netstat(1)'s "-mb" mode to using UMA-sourced stats
rather than separately maintained mbuf allocator statistics.
MFC after: 1 week
2005-07-14 16:35:13 +00:00
|
|
|
/*
|
|
|
|
* Generate statistics across both the zone and its per-cpu cache's. Return
|
|
|
|
* desired statistics if the pointer is non-NULL for that statistic.
|
|
|
|
*
|
|
|
|
* Note: does not update the zone statistics, as it can't safely clear the
|
|
|
|
* per-CPU cache statistic.
|
|
|
|
*
|
|
|
|
* XXXRW: Following the uc_allocbucket and uc_freebucket pointers here isn't
|
|
|
|
* safe from off-CPU; we should modify the caches to track this information
|
|
|
|
* directly so that we don't have to.
|
|
|
|
*/
|
|
|
|
static void
|
2018-11-13 19:44:40 +00:00
|
|
|
uma_zone_sumstat(uma_zone_t z, long *cachefreep, uint64_t *allocsp,
|
2019-08-06 21:50:34 +00:00
|
|
|
uint64_t *freesp, uint64_t *sleepsp, uint64_t *xdomainp)
|
Introduce a new sysctl, vm.zone_stats, which exports UMA(9) allocator
statistics via a binary structure stream:
- Add structure 'uma_stream_header', which defines a stream version,
definition of MAXCPUs used in the stream, and the number of zone
records in the stream.
- Add structure 'uma_type_header', which defines the name, alignment,
size, resource allocation limits, current pages allocated, preferred
bucket size, and central zone + keg statistics.
- Add structure 'uma_percpu_stat', which, for each per-CPU cache,
includes the number of allocations and frees, as well as the number
of free items in the cache.
- When the sysctl is queried, return a stream header, followed by a
series of type descriptions, each consisting of a type header
followed by a series of MAXCPUs uma_percpu_stat structures holding
per-CPU allocation information. Typical values of MAXCPU will be
1 (UP compiled kernel) and 16 (SMP compiled kernel).
This query mechanism allows user space monitoring tools to extract
memory allocation statistics in a machine-readable form, and to do so
at a per-CPU granularity, allowing monitoring of allocation patterns
across CPUs in order to better understand the distribution of work and
memory flow over multiple CPUs.
While here, also export the number of UMA zones as a sysctl
vm.uma_count, in order to assist in sizing user swpace buffers to
receive the stream.
A follow-up commit of libmemstat(3), a library to monitor kernel memory
allocation, will occur in the next few days. This change directly
supports converting netstat(1)'s "-mb" mode to using UMA-sourced stats
rather than separately maintained mbuf allocator statistics.
MFC after: 1 week
2005-07-14 16:35:13 +00:00
|
|
|
{
|
|
|
|
uma_cache_t cache;
|
2019-08-06 21:50:34 +00:00
|
|
|
uint64_t allocs, frees, sleeps, xdomain;
|
Introduce a new sysctl, vm.zone_stats, which exports UMA(9) allocator
statistics via a binary structure stream:
- Add structure 'uma_stream_header', which defines a stream version,
definition of MAXCPUs used in the stream, and the number of zone
records in the stream.
- Add structure 'uma_type_header', which defines the name, alignment,
size, resource allocation limits, current pages allocated, preferred
bucket size, and central zone + keg statistics.
- Add structure 'uma_percpu_stat', which, for each per-CPU cache,
includes the number of allocations and frees, as well as the number
of free items in the cache.
- When the sysctl is queried, return a stream header, followed by a
series of type descriptions, each consisting of a type header
followed by a series of MAXCPUs uma_percpu_stat structures holding
per-CPU allocation information. Typical values of MAXCPU will be
1 (UP compiled kernel) and 16 (SMP compiled kernel).
This query mechanism allows user space monitoring tools to extract
memory allocation statistics in a machine-readable form, and to do so
at a per-CPU granularity, allowing monitoring of allocation patterns
across CPUs in order to better understand the distribution of work and
memory flow over multiple CPUs.
While here, also export the number of UMA zones as a sysctl
vm.uma_count, in order to assist in sizing user swpace buffers to
receive the stream.
A follow-up commit of libmemstat(3), a library to monitor kernel memory
allocation, will occur in the next few days. This change directly
supports converting netstat(1)'s "-mb" mode to using UMA-sourced stats
rather than separately maintained mbuf allocator statistics.
MFC after: 1 week
2005-07-14 16:35:13 +00:00
|
|
|
int cachefree, cpu;
|
|
|
|
|
2019-08-06 21:50:34 +00:00
|
|
|
allocs = frees = sleeps = xdomain = 0;
|
Introduce a new sysctl, vm.zone_stats, which exports UMA(9) allocator
statistics via a binary structure stream:
- Add structure 'uma_stream_header', which defines a stream version,
definition of MAXCPUs used in the stream, and the number of zone
records in the stream.
- Add structure 'uma_type_header', which defines the name, alignment,
size, resource allocation limits, current pages allocated, preferred
bucket size, and central zone + keg statistics.
- Add structure 'uma_percpu_stat', which, for each per-CPU cache,
includes the number of allocations and frees, as well as the number
of free items in the cache.
- When the sysctl is queried, return a stream header, followed by a
series of type descriptions, each consisting of a type header
followed by a series of MAXCPUs uma_percpu_stat structures holding
per-CPU allocation information. Typical values of MAXCPU will be
1 (UP compiled kernel) and 16 (SMP compiled kernel).
This query mechanism allows user space monitoring tools to extract
memory allocation statistics in a machine-readable form, and to do so
at a per-CPU granularity, allowing monitoring of allocation patterns
across CPUs in order to better understand the distribution of work and
memory flow over multiple CPUs.
While here, also export the number of UMA zones as a sysctl
vm.uma_count, in order to assist in sizing user swpace buffers to
receive the stream.
A follow-up commit of libmemstat(3), a library to monitor kernel memory
allocation, will occur in the next few days. This change directly
supports converting netstat(1)'s "-mb" mode to using UMA-sourced stats
rather than separately maintained mbuf allocator statistics.
MFC after: 1 week
2005-07-14 16:35:13 +00:00
|
|
|
cachefree = 0;
|
2010-06-11 18:46:34 +00:00
|
|
|
CPU_FOREACH(cpu) {
|
Introduce a new sysctl, vm.zone_stats, which exports UMA(9) allocator
statistics via a binary structure stream:
- Add structure 'uma_stream_header', which defines a stream version,
definition of MAXCPUs used in the stream, and the number of zone
records in the stream.
- Add structure 'uma_type_header', which defines the name, alignment,
size, resource allocation limits, current pages allocated, preferred
bucket size, and central zone + keg statistics.
- Add structure 'uma_percpu_stat', which, for each per-CPU cache,
includes the number of allocations and frees, as well as the number
of free items in the cache.
- When the sysctl is queried, return a stream header, followed by a
series of type descriptions, each consisting of a type header
followed by a series of MAXCPUs uma_percpu_stat structures holding
per-CPU allocation information. Typical values of MAXCPU will be
1 (UP compiled kernel) and 16 (SMP compiled kernel).
This query mechanism allows user space monitoring tools to extract
memory allocation statistics in a machine-readable form, and to do so
at a per-CPU granularity, allowing monitoring of allocation patterns
across CPUs in order to better understand the distribution of work and
memory flow over multiple CPUs.
While here, also export the number of UMA zones as a sysctl
vm.uma_count, in order to assist in sizing user swpace buffers to
receive the stream.
A follow-up commit of libmemstat(3), a library to monitor kernel memory
allocation, will occur in the next few days. This change directly
supports converting netstat(1)'s "-mb" mode to using UMA-sourced stats
rather than separately maintained mbuf allocator statistics.
MFC after: 1 week
2005-07-14 16:35:13 +00:00
|
|
|
cache = &z->uz_cpu[cpu];
|
|
|
|
if (cache->uc_allocbucket != NULL)
|
|
|
|
cachefree += cache->uc_allocbucket->ub_cnt;
|
|
|
|
if (cache->uc_freebucket != NULL)
|
|
|
|
cachefree += cache->uc_freebucket->ub_cnt;
|
2019-08-06 21:50:34 +00:00
|
|
|
if (cache->uc_crossbucket != NULL) {
|
|
|
|
xdomain += cache->uc_crossbucket->ub_cnt;
|
|
|
|
cachefree += cache->uc_crossbucket->ub_cnt;
|
|
|
|
}
|
Introduce a new sysctl, vm.zone_stats, which exports UMA(9) allocator
statistics via a binary structure stream:
- Add structure 'uma_stream_header', which defines a stream version,
definition of MAXCPUs used in the stream, and the number of zone
records in the stream.
- Add structure 'uma_type_header', which defines the name, alignment,
size, resource allocation limits, current pages allocated, preferred
bucket size, and central zone + keg statistics.
- Add structure 'uma_percpu_stat', which, for each per-CPU cache,
includes the number of allocations and frees, as well as the number
of free items in the cache.
- When the sysctl is queried, return a stream header, followed by a
series of type descriptions, each consisting of a type header
followed by a series of MAXCPUs uma_percpu_stat structures holding
per-CPU allocation information. Typical values of MAXCPU will be
1 (UP compiled kernel) and 16 (SMP compiled kernel).
This query mechanism allows user space monitoring tools to extract
memory allocation statistics in a machine-readable form, and to do so
at a per-CPU granularity, allowing monitoring of allocation patterns
across CPUs in order to better understand the distribution of work and
memory flow over multiple CPUs.
While here, also export the number of UMA zones as a sysctl
vm.uma_count, in order to assist in sizing user swpace buffers to
receive the stream.
A follow-up commit of libmemstat(3), a library to monitor kernel memory
allocation, will occur in the next few days. This change directly
supports converting netstat(1)'s "-mb" mode to using UMA-sourced stats
rather than separately maintained mbuf allocator statistics.
MFC after: 1 week
2005-07-14 16:35:13 +00:00
|
|
|
allocs += cache->uc_allocs;
|
|
|
|
frees += cache->uc_frees;
|
|
|
|
}
|
2019-01-15 18:24:34 +00:00
|
|
|
allocs += counter_u64_fetch(z->uz_allocs);
|
|
|
|
frees += counter_u64_fetch(z->uz_frees);
|
2010-06-15 19:28:37 +00:00
|
|
|
sleeps += z->uz_sleeps;
|
2019-08-06 21:50:34 +00:00
|
|
|
xdomain += z->uz_xdomain;
|
Introduce a new sysctl, vm.zone_stats, which exports UMA(9) allocator
statistics via a binary structure stream:
- Add structure 'uma_stream_header', which defines a stream version,
definition of MAXCPUs used in the stream, and the number of zone
records in the stream.
- Add structure 'uma_type_header', which defines the name, alignment,
size, resource allocation limits, current pages allocated, preferred
bucket size, and central zone + keg statistics.
- Add structure 'uma_percpu_stat', which, for each per-CPU cache,
includes the number of allocations and frees, as well as the number
of free items in the cache.
- When the sysctl is queried, return a stream header, followed by a
series of type descriptions, each consisting of a type header
followed by a series of MAXCPUs uma_percpu_stat structures holding
per-CPU allocation information. Typical values of MAXCPU will be
1 (UP compiled kernel) and 16 (SMP compiled kernel).
This query mechanism allows user space monitoring tools to extract
memory allocation statistics in a machine-readable form, and to do so
at a per-CPU granularity, allowing monitoring of allocation patterns
across CPUs in order to better understand the distribution of work and
memory flow over multiple CPUs.
While here, also export the number of UMA zones as a sysctl
vm.uma_count, in order to assist in sizing user swpace buffers to
receive the stream.
A follow-up commit of libmemstat(3), a library to monitor kernel memory
allocation, will occur in the next few days. This change directly
supports converting netstat(1)'s "-mb" mode to using UMA-sourced stats
rather than separately maintained mbuf allocator statistics.
MFC after: 1 week
2005-07-14 16:35:13 +00:00
|
|
|
if (cachefreep != NULL)
|
|
|
|
*cachefreep = cachefree;
|
|
|
|
if (allocsp != NULL)
|
|
|
|
*allocsp = allocs;
|
|
|
|
if (freesp != NULL)
|
|
|
|
*freesp = frees;
|
2010-06-15 19:28:37 +00:00
|
|
|
if (sleepsp != NULL)
|
|
|
|
*sleepsp = sleeps;
|
2019-08-06 21:50:34 +00:00
|
|
|
if (xdomainp != NULL)
|
|
|
|
*xdomainp = xdomain;
|
Introduce a new sysctl, vm.zone_stats, which exports UMA(9) allocator
statistics via a binary structure stream:
- Add structure 'uma_stream_header', which defines a stream version,
definition of MAXCPUs used in the stream, and the number of zone
records in the stream.
- Add structure 'uma_type_header', which defines the name, alignment,
size, resource allocation limits, current pages allocated, preferred
bucket size, and central zone + keg statistics.
- Add structure 'uma_percpu_stat', which, for each per-CPU cache,
includes the number of allocations and frees, as well as the number
of free items in the cache.
- When the sysctl is queried, return a stream header, followed by a
series of type descriptions, each consisting of a type header
followed by a series of MAXCPUs uma_percpu_stat structures holding
per-CPU allocation information. Typical values of MAXCPU will be
1 (UP compiled kernel) and 16 (SMP compiled kernel).
This query mechanism allows user space monitoring tools to extract
memory allocation statistics in a machine-readable form, and to do so
at a per-CPU granularity, allowing monitoring of allocation patterns
across CPUs in order to better understand the distribution of work and
memory flow over multiple CPUs.
While here, also export the number of UMA zones as a sysctl
vm.uma_count, in order to assist in sizing user swpace buffers to
receive the stream.
A follow-up commit of libmemstat(3), a library to monitor kernel memory
allocation, will occur in the next few days. This change directly
supports converting netstat(1)'s "-mb" mode to using UMA-sourced stats
rather than separately maintained mbuf allocator statistics.
MFC after: 1 week
2005-07-14 16:35:13 +00:00
|
|
|
}
|
2006-07-18 01:13:18 +00:00
|
|
|
#endif /* DDB */
|
Introduce a new sysctl, vm.zone_stats, which exports UMA(9) allocator
statistics via a binary structure stream:
- Add structure 'uma_stream_header', which defines a stream version,
definition of MAXCPUs used in the stream, and the number of zone
records in the stream.
- Add structure 'uma_type_header', which defines the name, alignment,
size, resource allocation limits, current pages allocated, preferred
bucket size, and central zone + keg statistics.
- Add structure 'uma_percpu_stat', which, for each per-CPU cache,
includes the number of allocations and frees, as well as the number
of free items in the cache.
- When the sysctl is queried, return a stream header, followed by a
series of type descriptions, each consisting of a type header
followed by a series of MAXCPUs uma_percpu_stat structures holding
per-CPU allocation information. Typical values of MAXCPU will be
1 (UP compiled kernel) and 16 (SMP compiled kernel).
This query mechanism allows user space monitoring tools to extract
memory allocation statistics in a machine-readable form, and to do so
at a per-CPU granularity, allowing monitoring of allocation patterns
across CPUs in order to better understand the distribution of work and
memory flow over multiple CPUs.
While here, also export the number of UMA zones as a sysctl
vm.uma_count, in order to assist in sizing user swpace buffers to
receive the stream.
A follow-up commit of libmemstat(3), a library to monitor kernel memory
allocation, will occur in the next few days. This change directly
supports converting netstat(1)'s "-mb" mode to using UMA-sourced stats
rather than separately maintained mbuf allocator statistics.
MFC after: 1 week
2005-07-14 16:35:13 +00:00
|
|
|
|
|
|
|
static int
|
|
|
|
sysctl_vm_zone_count(SYSCTL_HANDLER_ARGS)
|
|
|
|
{
|
|
|
|
uma_keg_t kz;
|
|
|
|
uma_zone_t z;
|
|
|
|
int count;
|
|
|
|
|
|
|
|
count = 0;
|
2014-10-05 21:34:56 +00:00
|
|
|
rw_rlock(&uma_rwlock);
|
Introduce a new sysctl, vm.zone_stats, which exports UMA(9) allocator
statistics via a binary structure stream:
- Add structure 'uma_stream_header', which defines a stream version,
definition of MAXCPUs used in the stream, and the number of zone
records in the stream.
- Add structure 'uma_type_header', which defines the name, alignment,
size, resource allocation limits, current pages allocated, preferred
bucket size, and central zone + keg statistics.
- Add structure 'uma_percpu_stat', which, for each per-CPU cache,
includes the number of allocations and frees, as well as the number
of free items in the cache.
- When the sysctl is queried, return a stream header, followed by a
series of type descriptions, each consisting of a type header
followed by a series of MAXCPUs uma_percpu_stat structures holding
per-CPU allocation information. Typical values of MAXCPU will be
1 (UP compiled kernel) and 16 (SMP compiled kernel).
This query mechanism allows user space monitoring tools to extract
memory allocation statistics in a machine-readable form, and to do so
at a per-CPU granularity, allowing monitoring of allocation patterns
across CPUs in order to better understand the distribution of work and
memory flow over multiple CPUs.
While here, also export the number of UMA zones as a sysctl
vm.uma_count, in order to assist in sizing user swpace buffers to
receive the stream.
A follow-up commit of libmemstat(3), a library to monitor kernel memory
allocation, will occur in the next few days. This change directly
supports converting netstat(1)'s "-mb" mode to using UMA-sourced stats
rather than separately maintained mbuf allocator statistics.
MFC after: 1 week
2005-07-14 16:35:13 +00:00
|
|
|
LIST_FOREACH(kz, &uma_kegs, uk_link) {
|
|
|
|
LIST_FOREACH(z, &kz->uk_zones, uz_link)
|
|
|
|
count++;
|
|
|
|
}
|
2019-02-07 03:32:45 +00:00
|
|
|
LIST_FOREACH(z, &uma_cachezones, uz_link)
|
|
|
|
count++;
|
|
|
|
|
2014-10-05 21:34:56 +00:00
|
|
|
rw_runlock(&uma_rwlock);
|
Introduce a new sysctl, vm.zone_stats, which exports UMA(9) allocator
statistics via a binary structure stream:
- Add structure 'uma_stream_header', which defines a stream version,
definition of MAXCPUs used in the stream, and the number of zone
records in the stream.
- Add structure 'uma_type_header', which defines the name, alignment,
size, resource allocation limits, current pages allocated, preferred
bucket size, and central zone + keg statistics.
- Add structure 'uma_percpu_stat', which, for each per-CPU cache,
includes the number of allocations and frees, as well as the number
of free items in the cache.
- When the sysctl is queried, return a stream header, followed by a
series of type descriptions, each consisting of a type header
followed by a series of MAXCPUs uma_percpu_stat structures holding
per-CPU allocation information. Typical values of MAXCPU will be
1 (UP compiled kernel) and 16 (SMP compiled kernel).
This query mechanism allows user space monitoring tools to extract
memory allocation statistics in a machine-readable form, and to do so
at a per-CPU granularity, allowing monitoring of allocation patterns
across CPUs in order to better understand the distribution of work and
memory flow over multiple CPUs.
While here, also export the number of UMA zones as a sysctl
vm.uma_count, in order to assist in sizing user swpace buffers to
receive the stream.
A follow-up commit of libmemstat(3), a library to monitor kernel memory
allocation, will occur in the next few days. This change directly
supports converting netstat(1)'s "-mb" mode to using UMA-sourced stats
rather than separately maintained mbuf allocator statistics.
MFC after: 1 week
2005-07-14 16:35:13 +00:00
|
|
|
return (sysctl_handle_int(oidp, &count, 0, req));
|
|
|
|
}
|
|
|
|
|
2019-02-07 03:32:45 +00:00
|
|
|
static void
|
|
|
|
uma_vm_zone_stats(struct uma_type_header *uth, uma_zone_t z, struct sbuf *sbuf,
|
|
|
|
struct uma_percpu_stat *ups, bool internal)
|
|
|
|
{
|
|
|
|
uma_zone_domain_t zdom;
|
2019-10-22 14:20:06 +00:00
|
|
|
uma_bucket_t bucket;
|
2019-02-07 03:32:45 +00:00
|
|
|
uma_cache_t cache;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
|
|
|
|
for (i = 0; i < vm_ndomains; i++) {
|
|
|
|
zdom = &z->uz_domain[i];
|
|
|
|
uth->uth_zone_free += zdom->uzd_nitems;
|
|
|
|
}
|
|
|
|
uth->uth_allocs = counter_u64_fetch(z->uz_allocs);
|
|
|
|
uth->uth_frees = counter_u64_fetch(z->uz_frees);
|
|
|
|
uth->uth_fails = counter_u64_fetch(z->uz_fails);
|
|
|
|
uth->uth_sleeps = z->uz_sleeps;
|
2019-08-06 21:50:34 +00:00
|
|
|
uth->uth_xdomain = z->uz_xdomain;
|
2019-10-22 14:20:06 +00:00
|
|
|
|
2019-02-07 03:32:45 +00:00
|
|
|
/*
|
2019-10-22 14:20:06 +00:00
|
|
|
* While it is not normally safe to access the cache bucket pointers
|
|
|
|
* while not on the CPU that owns the cache, we only allow the pointers
|
|
|
|
* to be exchanged without the zone lock held, not invalidated, so
|
|
|
|
* accept the possible race associated with bucket exchange during
|
|
|
|
* monitoring. Use atomic_load_ptr() to ensure that the bucket pointers
|
|
|
|
* are loaded only once.
|
2019-02-07 03:32:45 +00:00
|
|
|
*/
|
|
|
|
for (i = 0; i < mp_maxid + 1; i++) {
|
|
|
|
bzero(&ups[i], sizeof(*ups));
|
|
|
|
if (internal || CPU_ABSENT(i))
|
|
|
|
continue;
|
|
|
|
cache = &z->uz_cpu[i];
|
2019-10-22 14:20:06 +00:00
|
|
|
bucket = (uma_bucket_t)atomic_load_ptr(&cache->uc_allocbucket);
|
|
|
|
if (bucket != NULL)
|
|
|
|
ups[i].ups_cache_free += bucket->ub_cnt;
|
|
|
|
bucket = (uma_bucket_t)atomic_load_ptr(&cache->uc_freebucket);
|
|
|
|
if (bucket != NULL)
|
|
|
|
ups[i].ups_cache_free += bucket->ub_cnt;
|
|
|
|
bucket = (uma_bucket_t)atomic_load_ptr(&cache->uc_crossbucket);
|
|
|
|
if (bucket != NULL)
|
|
|
|
ups[i].ups_cache_free += bucket->ub_cnt;
|
2019-02-07 03:32:45 +00:00
|
|
|
ups[i].ups_allocs = cache->uc_allocs;
|
|
|
|
ups[i].ups_frees = cache->uc_frees;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Introduce a new sysctl, vm.zone_stats, which exports UMA(9) allocator
statistics via a binary structure stream:
- Add structure 'uma_stream_header', which defines a stream version,
definition of MAXCPUs used in the stream, and the number of zone
records in the stream.
- Add structure 'uma_type_header', which defines the name, alignment,
size, resource allocation limits, current pages allocated, preferred
bucket size, and central zone + keg statistics.
- Add structure 'uma_percpu_stat', which, for each per-CPU cache,
includes the number of allocations and frees, as well as the number
of free items in the cache.
- When the sysctl is queried, return a stream header, followed by a
series of type descriptions, each consisting of a type header
followed by a series of MAXCPUs uma_percpu_stat structures holding
per-CPU allocation information. Typical values of MAXCPU will be
1 (UP compiled kernel) and 16 (SMP compiled kernel).
This query mechanism allows user space monitoring tools to extract
memory allocation statistics in a machine-readable form, and to do so
at a per-CPU granularity, allowing monitoring of allocation patterns
across CPUs in order to better understand the distribution of work and
memory flow over multiple CPUs.
While here, also export the number of UMA zones as a sysctl
vm.uma_count, in order to assist in sizing user swpace buffers to
receive the stream.
A follow-up commit of libmemstat(3), a library to monitor kernel memory
allocation, will occur in the next few days. This change directly
supports converting netstat(1)'s "-mb" mode to using UMA-sourced stats
rather than separately maintained mbuf allocator statistics.
MFC after: 1 week
2005-07-14 16:35:13 +00:00
|
|
|
static int
|
|
|
|
sysctl_vm_zone_stats(SYSCTL_HANDLER_ARGS)
|
|
|
|
{
|
|
|
|
struct uma_stream_header ush;
|
|
|
|
struct uma_type_header uth;
|
2018-03-24 13:48:53 +00:00
|
|
|
struct uma_percpu_stat *ups;
|
Introduce a new sysctl, vm.zone_stats, which exports UMA(9) allocator
statistics via a binary structure stream:
- Add structure 'uma_stream_header', which defines a stream version,
definition of MAXCPUs used in the stream, and the number of zone
records in the stream.
- Add structure 'uma_type_header', which defines the name, alignment,
size, resource allocation limits, current pages allocated, preferred
bucket size, and central zone + keg statistics.
- Add structure 'uma_percpu_stat', which, for each per-CPU cache,
includes the number of allocations and frees, as well as the number
of free items in the cache.
- When the sysctl is queried, return a stream header, followed by a
series of type descriptions, each consisting of a type header
followed by a series of MAXCPUs uma_percpu_stat structures holding
per-CPU allocation information. Typical values of MAXCPU will be
1 (UP compiled kernel) and 16 (SMP compiled kernel).
This query mechanism allows user space monitoring tools to extract
memory allocation statistics in a machine-readable form, and to do so
at a per-CPU granularity, allowing monitoring of allocation patterns
across CPUs in order to better understand the distribution of work and
memory flow over multiple CPUs.
While here, also export the number of UMA zones as a sysctl
vm.uma_count, in order to assist in sizing user swpace buffers to
receive the stream.
A follow-up commit of libmemstat(3), a library to monitor kernel memory
allocation, will occur in the next few days. This change directly
supports converting netstat(1)'s "-mb" mode to using UMA-sourced stats
rather than separately maintained mbuf allocator statistics.
MFC after: 1 week
2005-07-14 16:35:13 +00:00
|
|
|
struct sbuf sbuf;
|
|
|
|
uma_keg_t kz;
|
|
|
|
uma_zone_t z;
|
2010-09-16 16:13:12 +00:00
|
|
|
int count, error, i;
|
Introduce a new sysctl, vm.zone_stats, which exports UMA(9) allocator
statistics via a binary structure stream:
- Add structure 'uma_stream_header', which defines a stream version,
definition of MAXCPUs used in the stream, and the number of zone
records in the stream.
- Add structure 'uma_type_header', which defines the name, alignment,
size, resource allocation limits, current pages allocated, preferred
bucket size, and central zone + keg statistics.
- Add structure 'uma_percpu_stat', which, for each per-CPU cache,
includes the number of allocations and frees, as well as the number
of free items in the cache.
- When the sysctl is queried, return a stream header, followed by a
series of type descriptions, each consisting of a type header
followed by a series of MAXCPUs uma_percpu_stat structures holding
per-CPU allocation information. Typical values of MAXCPU will be
1 (UP compiled kernel) and 16 (SMP compiled kernel).
This query mechanism allows user space monitoring tools to extract
memory allocation statistics in a machine-readable form, and to do so
at a per-CPU granularity, allowing monitoring of allocation patterns
across CPUs in order to better understand the distribution of work and
memory flow over multiple CPUs.
While here, also export the number of UMA zones as a sysctl
vm.uma_count, in order to assist in sizing user swpace buffers to
receive the stream.
A follow-up commit of libmemstat(3), a library to monitor kernel memory
allocation, will occur in the next few days. This change directly
supports converting netstat(1)'s "-mb" mode to using UMA-sourced stats
rather than separately maintained mbuf allocator statistics.
MFC after: 1 week
2005-07-14 16:35:13 +00:00
|
|
|
|
2011-01-27 00:34:12 +00:00
|
|
|
error = sysctl_wire_old_buffer(req, 0);
|
|
|
|
if (error != 0)
|
|
|
|
return (error);
|
2010-09-16 16:13:12 +00:00
|
|
|
sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
|
2015-03-14 17:08:28 +00:00
|
|
|
sbuf_clear_flags(&sbuf, SBUF_INCLUDENUL);
|
2018-03-24 13:48:53 +00:00
|
|
|
ups = malloc((mp_maxid + 1) * sizeof(*ups), M_TEMP, M_WAITOK);
|
2010-09-13 18:48:23 +00:00
|
|
|
|
2010-09-16 16:13:12 +00:00
|
|
|
count = 0;
|
2014-10-05 21:34:56 +00:00
|
|
|
rw_rlock(&uma_rwlock);
|
2010-09-13 18:48:23 +00:00
|
|
|
LIST_FOREACH(kz, &uma_kegs, uk_link) {
|
|
|
|
LIST_FOREACH(z, &kz->uk_zones, uz_link)
|
2010-09-16 16:13:12 +00:00
|
|
|
count++;
|
2010-09-13 18:48:23 +00:00
|
|
|
}
|
Introduce a new sysctl, vm.zone_stats, which exports UMA(9) allocator
statistics via a binary structure stream:
- Add structure 'uma_stream_header', which defines a stream version,
definition of MAXCPUs used in the stream, and the number of zone
records in the stream.
- Add structure 'uma_type_header', which defines the name, alignment,
size, resource allocation limits, current pages allocated, preferred
bucket size, and central zone + keg statistics.
- Add structure 'uma_percpu_stat', which, for each per-CPU cache,
includes the number of allocations and frees, as well as the number
of free items in the cache.
- When the sysctl is queried, return a stream header, followed by a
series of type descriptions, each consisting of a type header
followed by a series of MAXCPUs uma_percpu_stat structures holding
per-CPU allocation information. Typical values of MAXCPU will be
1 (UP compiled kernel) and 16 (SMP compiled kernel).
This query mechanism allows user space monitoring tools to extract
memory allocation statistics in a machine-readable form, and to do so
at a per-CPU granularity, allowing monitoring of allocation patterns
across CPUs in order to better understand the distribution of work and
memory flow over multiple CPUs.
While here, also export the number of UMA zones as a sysctl
vm.uma_count, in order to assist in sizing user swpace buffers to
receive the stream.
A follow-up commit of libmemstat(3), a library to monitor kernel memory
allocation, will occur in the next few days. This change directly
supports converting netstat(1)'s "-mb" mode to using UMA-sourced stats
rather than separately maintained mbuf allocator statistics.
MFC after: 1 week
2005-07-14 16:35:13 +00:00
|
|
|
|
2019-02-07 03:32:45 +00:00
|
|
|
LIST_FOREACH(z, &uma_cachezones, uz_link)
|
|
|
|
count++;
|
|
|
|
|
Introduce a new sysctl, vm.zone_stats, which exports UMA(9) allocator
statistics via a binary structure stream:
- Add structure 'uma_stream_header', which defines a stream version,
definition of MAXCPUs used in the stream, and the number of zone
records in the stream.
- Add structure 'uma_type_header', which defines the name, alignment,
size, resource allocation limits, current pages allocated, preferred
bucket size, and central zone + keg statistics.
- Add structure 'uma_percpu_stat', which, for each per-CPU cache,
includes the number of allocations and frees, as well as the number
of free items in the cache.
- When the sysctl is queried, return a stream header, followed by a
series of type descriptions, each consisting of a type header
followed by a series of MAXCPUs uma_percpu_stat structures holding
per-CPU allocation information. Typical values of MAXCPU will be
1 (UP compiled kernel) and 16 (SMP compiled kernel).
This query mechanism allows user space monitoring tools to extract
memory allocation statistics in a machine-readable form, and to do so
at a per-CPU granularity, allowing monitoring of allocation patterns
across CPUs in order to better understand the distribution of work and
memory flow over multiple CPUs.
While here, also export the number of UMA zones as a sysctl
vm.uma_count, in order to assist in sizing user swpace buffers to
receive the stream.
A follow-up commit of libmemstat(3), a library to monitor kernel memory
allocation, will occur in the next few days. This change directly
supports converting netstat(1)'s "-mb" mode to using UMA-sourced stats
rather than separately maintained mbuf allocator statistics.
MFC after: 1 week
2005-07-14 16:35:13 +00:00
|
|
|
/*
|
|
|
|
* Insert stream header.
|
|
|
|
*/
|
|
|
|
bzero(&ush, sizeof(ush));
|
|
|
|
ush.ush_version = UMA_STREAM_VERSION;
|
2005-07-16 11:03:06 +00:00
|
|
|
ush.ush_maxcpus = (mp_maxid + 1);
|
Introduce a new sysctl, vm.zone_stats, which exports UMA(9) allocator
statistics via a binary structure stream:
- Add structure 'uma_stream_header', which defines a stream version,
definition of MAXCPUs used in the stream, and the number of zone
records in the stream.
- Add structure 'uma_type_header', which defines the name, alignment,
size, resource allocation limits, current pages allocated, preferred
bucket size, and central zone + keg statistics.
- Add structure 'uma_percpu_stat', which, for each per-CPU cache,
includes the number of allocations and frees, as well as the number
of free items in the cache.
- When the sysctl is queried, return a stream header, followed by a
series of type descriptions, each consisting of a type header
followed by a series of MAXCPUs uma_percpu_stat structures holding
per-CPU allocation information. Typical values of MAXCPU will be
1 (UP compiled kernel) and 16 (SMP compiled kernel).
This query mechanism allows user space monitoring tools to extract
memory allocation statistics in a machine-readable form, and to do so
at a per-CPU granularity, allowing monitoring of allocation patterns
across CPUs in order to better understand the distribution of work and
memory flow over multiple CPUs.
While here, also export the number of UMA zones as a sysctl
vm.uma_count, in order to assist in sizing user swpace buffers to
receive the stream.
A follow-up commit of libmemstat(3), a library to monitor kernel memory
allocation, will occur in the next few days. This change directly
supports converting netstat(1)'s "-mb" mode to using UMA-sourced stats
rather than separately maintained mbuf allocator statistics.
MFC after: 1 week
2005-07-14 16:35:13 +00:00
|
|
|
ush.ush_count = count;
|
2010-09-16 16:13:12 +00:00
|
|
|
(void)sbuf_bcat(&sbuf, &ush, sizeof(ush));
|
Introduce a new sysctl, vm.zone_stats, which exports UMA(9) allocator
statistics via a binary structure stream:
- Add structure 'uma_stream_header', which defines a stream version,
definition of MAXCPUs used in the stream, and the number of zone
records in the stream.
- Add structure 'uma_type_header', which defines the name, alignment,
size, resource allocation limits, current pages allocated, preferred
bucket size, and central zone + keg statistics.
- Add structure 'uma_percpu_stat', which, for each per-CPU cache,
includes the number of allocations and frees, as well as the number
of free items in the cache.
- When the sysctl is queried, return a stream header, followed by a
series of type descriptions, each consisting of a type header
followed by a series of MAXCPUs uma_percpu_stat structures holding
per-CPU allocation information. Typical values of MAXCPU will be
1 (UP compiled kernel) and 16 (SMP compiled kernel).
This query mechanism allows user space monitoring tools to extract
memory allocation statistics in a machine-readable form, and to do so
at a per-CPU granularity, allowing monitoring of allocation patterns
across CPUs in order to better understand the distribution of work and
memory flow over multiple CPUs.
While here, also export the number of UMA zones as a sysctl
vm.uma_count, in order to assist in sizing user swpace buffers to
receive the stream.
A follow-up commit of libmemstat(3), a library to monitor kernel memory
allocation, will occur in the next few days. This change directly
supports converting netstat(1)'s "-mb" mode to using UMA-sourced stats
rather than separately maintained mbuf allocator statistics.
MFC after: 1 week
2005-07-14 16:35:13 +00:00
|
|
|
|
|
|
|
LIST_FOREACH(kz, &uma_kegs, uk_link) {
|
|
|
|
LIST_FOREACH(z, &kz->uk_zones, uz_link) {
|
|
|
|
bzero(&uth, sizeof(uth));
|
|
|
|
ZONE_LOCK(z);
|
2005-07-25 00:47:32 +00:00
|
|
|
strlcpy(uth.uth_name, z->uz_name, UTH_MAX_NAME);
|
Introduce a new sysctl, vm.zone_stats, which exports UMA(9) allocator
statistics via a binary structure stream:
- Add structure 'uma_stream_header', which defines a stream version,
definition of MAXCPUs used in the stream, and the number of zone
records in the stream.
- Add structure 'uma_type_header', which defines the name, alignment,
size, resource allocation limits, current pages allocated, preferred
bucket size, and central zone + keg statistics.
- Add structure 'uma_percpu_stat', which, for each per-CPU cache,
includes the number of allocations and frees, as well as the number
of free items in the cache.
- When the sysctl is queried, return a stream header, followed by a
series of type descriptions, each consisting of a type header
followed by a series of MAXCPUs uma_percpu_stat structures holding
per-CPU allocation information. Typical values of MAXCPU will be
1 (UP compiled kernel) and 16 (SMP compiled kernel).
This query mechanism allows user space monitoring tools to extract
memory allocation statistics in a machine-readable form, and to do so
at a per-CPU granularity, allowing monitoring of allocation patterns
across CPUs in order to better understand the distribution of work and
memory flow over multiple CPUs.
While here, also export the number of UMA zones as a sysctl
vm.uma_count, in order to assist in sizing user swpace buffers to
receive the stream.
A follow-up commit of libmemstat(3), a library to monitor kernel memory
allocation, will occur in the next few days. This change directly
supports converting netstat(1)'s "-mb" mode to using UMA-sourced stats
rather than separately maintained mbuf allocator statistics.
MFC after: 1 week
2005-07-14 16:35:13 +00:00
|
|
|
uth.uth_align = kz->uk_align;
|
|
|
|
uth.uth_size = kz->uk_size;
|
|
|
|
uth.uth_rsize = kz->uk_rsize;
|
2019-01-15 18:32:26 +00:00
|
|
|
if (z->uz_max_items > 0)
|
|
|
|
uth.uth_pages = (z->uz_items / kz->uk_ipers) *
|
|
|
|
kz->uk_ppera;
|
|
|
|
else
|
|
|
|
uth.uth_pages = kz->uk_pages;
|
2019-01-15 18:49:31 +00:00
|
|
|
uth.uth_maxpages = (z->uz_max_items / kz->uk_ipers) *
|
o Move zone limit from keg level up to zone level. This means that now
two zones sharing a keg may have different limits. Now this is going
to work:
zone = uma_zcreate();
uma_zone_set_max(zone, limit);
zone2 = uma_zsecond_create(zone);
uma_zone_set_max(zone2, limit2);
Kegs no longer have uk_maxpages field, but zones have uz_items. When
set, it may be rounded up to minimum possible CPU bucket cache size.
For small limits bucket cache can also be reconfigured to be smaller.
Counter uz_items is updated whenever items transition from keg to a
bucket cache or directly to a consumer. If zone has uz_maxitems set and
it is reached, then we are going to sleep.
o Since new limits don't play well with multi-keg zones, remove them. The
idea of multi-keg zones was introduced exactly 10 years ago, and never
have had a practical usage. In discussion with Jeff we came to a wild
agreement that if we ever want to reintroduce the idea of a smart allocator
that would be able to choose between two (or more) totally different
backing stores, that choice should be made one level higher than UMA,
e.g. in malloc(9) or in mget(), or whatever and choice should be controlled
by the caller.
o Sleeping code is improved to account number of sleepers and wake them one
by one, to avoid thundering herd problem.
o Flag UMA_ZONE_NOBUCKETCACHE removed, instead uma_zone_set_maxcache()
KPI added. Having no bucket cache basically means setting maxcache to 0.
o Now with many fields added and many removed (no multi-keg zones!) make
sure that struct uma_zone is perfectly aligned.
Reviewed by: markj, jeff
Tested by: pho
Differential Revision: https://reviews.freebsd.org/D17773
2019-01-15 00:02:06 +00:00
|
|
|
kz->uk_ppera;
|
|
|
|
uth.uth_limit = z->uz_max_items;
|
2019-01-15 18:49:31 +00:00
|
|
|
uth.uth_keg_free = z->uz_keg->uk_free;
|
2005-07-25 00:47:32 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* A zone is secondary is it is not the first entry
|
|
|
|
* on the keg's zone list.
|
|
|
|
*/
|
2009-01-25 09:11:24 +00:00
|
|
|
if ((z->uz_flags & UMA_ZONE_SECONDARY) &&
|
2005-07-25 00:47:32 +00:00
|
|
|
(LIST_FIRST(&kz->uk_zones) != z))
|
|
|
|
uth.uth_zone_flags = UTH_ZONE_SECONDARY;
|
2019-02-07 03:32:45 +00:00
|
|
|
uma_vm_zone_stats(&uth, z, &sbuf, ups,
|
|
|
|
kz->uk_flags & UMA_ZFLAG_INTERNAL);
|
2005-07-16 09:40:34 +00:00
|
|
|
ZONE_UNLOCK(z);
|
2018-03-24 13:48:53 +00:00
|
|
|
(void)sbuf_bcat(&sbuf, &uth, sizeof(uth));
|
|
|
|
for (i = 0; i < mp_maxid + 1; i++)
|
|
|
|
(void)sbuf_bcat(&sbuf, &ups[i], sizeof(ups[i]));
|
Introduce a new sysctl, vm.zone_stats, which exports UMA(9) allocator
statistics via a binary structure stream:
- Add structure 'uma_stream_header', which defines a stream version,
definition of MAXCPUs used in the stream, and the number of zone
records in the stream.
- Add structure 'uma_type_header', which defines the name, alignment,
size, resource allocation limits, current pages allocated, preferred
bucket size, and central zone + keg statistics.
- Add structure 'uma_percpu_stat', which, for each per-CPU cache,
includes the number of allocations and frees, as well as the number
of free items in the cache.
- When the sysctl is queried, return a stream header, followed by a
series of type descriptions, each consisting of a type header
followed by a series of MAXCPUs uma_percpu_stat structures holding
per-CPU allocation information. Typical values of MAXCPU will be
1 (UP compiled kernel) and 16 (SMP compiled kernel).
This query mechanism allows user space monitoring tools to extract
memory allocation statistics in a machine-readable form, and to do so
at a per-CPU granularity, allowing monitoring of allocation patterns
across CPUs in order to better understand the distribution of work and
memory flow over multiple CPUs.
While here, also export the number of UMA zones as a sysctl
vm.uma_count, in order to assist in sizing user swpace buffers to
receive the stream.
A follow-up commit of libmemstat(3), a library to monitor kernel memory
allocation, will occur in the next few days. This change directly
supports converting netstat(1)'s "-mb" mode to using UMA-sourced stats
rather than separately maintained mbuf allocator statistics.
MFC after: 1 week
2005-07-14 16:35:13 +00:00
|
|
|
}
|
|
|
|
}
|
2019-02-07 03:32:45 +00:00
|
|
|
LIST_FOREACH(z, &uma_cachezones, uz_link) {
|
|
|
|
bzero(&uth, sizeof(uth));
|
|
|
|
ZONE_LOCK(z);
|
|
|
|
strlcpy(uth.uth_name, z->uz_name, UTH_MAX_NAME);
|
|
|
|
uth.uth_size = z->uz_size;
|
|
|
|
uma_vm_zone_stats(&uth, z, &sbuf, ups, false);
|
|
|
|
ZONE_UNLOCK(z);
|
|
|
|
(void)sbuf_bcat(&sbuf, &uth, sizeof(uth));
|
|
|
|
for (i = 0; i < mp_maxid + 1; i++)
|
|
|
|
(void)sbuf_bcat(&sbuf, &ups[i], sizeof(ups[i]));
|
|
|
|
}
|
|
|
|
|
2014-10-05 21:34:56 +00:00
|
|
|
rw_runlock(&uma_rwlock);
|
2010-09-16 16:13:12 +00:00
|
|
|
error = sbuf_finish(&sbuf);
|
|
|
|
sbuf_delete(&sbuf);
|
2018-03-24 13:48:53 +00:00
|
|
|
free(ups, M_TEMP);
|
Introduce a new sysctl, vm.zone_stats, which exports UMA(9) allocator
statistics via a binary structure stream:
- Add structure 'uma_stream_header', which defines a stream version,
definition of MAXCPUs used in the stream, and the number of zone
records in the stream.
- Add structure 'uma_type_header', which defines the name, alignment,
size, resource allocation limits, current pages allocated, preferred
bucket size, and central zone + keg statistics.
- Add structure 'uma_percpu_stat', which, for each per-CPU cache,
includes the number of allocations and frees, as well as the number
of free items in the cache.
- When the sysctl is queried, return a stream header, followed by a
series of type descriptions, each consisting of a type header
followed by a series of MAXCPUs uma_percpu_stat structures holding
per-CPU allocation information. Typical values of MAXCPU will be
1 (UP compiled kernel) and 16 (SMP compiled kernel).
This query mechanism allows user space monitoring tools to extract
memory allocation statistics in a machine-readable form, and to do so
at a per-CPU granularity, allowing monitoring of allocation patterns
across CPUs in order to better understand the distribution of work and
memory flow over multiple CPUs.
While here, also export the number of UMA zones as a sysctl
vm.uma_count, in order to assist in sizing user swpace buffers to
receive the stream.
A follow-up commit of libmemstat(3), a library to monitor kernel memory
allocation, will occur in the next few days. This change directly
supports converting netstat(1)'s "-mb" mode to using UMA-sourced stats
rather than separately maintained mbuf allocator statistics.
MFC after: 1 week
2005-07-14 16:35:13 +00:00
|
|
|
return (error);
|
|
|
|
}
|
2005-10-20 16:39:33 +00:00
|
|
|
|
2014-02-07 14:29:03 +00:00
|
|
|
int
|
|
|
|
sysctl_handle_uma_zone_max(SYSCTL_HANDLER_ARGS)
|
|
|
|
{
|
|
|
|
uma_zone_t zone = *(uma_zone_t *)arg1;
|
2015-04-10 06:56:49 +00:00
|
|
|
int error, max;
|
2014-02-07 14:29:03 +00:00
|
|
|
|
2015-04-10 06:56:49 +00:00
|
|
|
max = uma_zone_get_max(zone);
|
2014-02-07 14:29:03 +00:00
|
|
|
error = sysctl_handle_int(oidp, &max, 0, req);
|
|
|
|
if (error || !req->newptr)
|
|
|
|
return (error);
|
|
|
|
|
|
|
|
uma_zone_set_max(zone, max);
|
|
|
|
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
sysctl_handle_uma_zone_cur(SYSCTL_HANDLER_ARGS)
|
|
|
|
{
|
2019-11-28 00:19:09 +00:00
|
|
|
uma_zone_t zone;
|
2014-02-07 14:29:03 +00:00
|
|
|
int cur;
|
|
|
|
|
2019-11-28 00:19:09 +00:00
|
|
|
/*
|
|
|
|
* Some callers want to add sysctls for global zones that
|
|
|
|
* may not yet exist so they pass a pointer to a pointer.
|
|
|
|
*/
|
|
|
|
if (arg2 == 0)
|
|
|
|
zone = *(uma_zone_t *)arg1;
|
|
|
|
else
|
|
|
|
zone = arg1;
|
2014-02-07 14:29:03 +00:00
|
|
|
cur = uma_zone_get_cur(zone);
|
|
|
|
return (sysctl_handle_int(oidp, &cur, 0, req));
|
|
|
|
}
|
|
|
|
|
2019-11-28 00:19:09 +00:00
|
|
|
static int
|
|
|
|
sysctl_handle_uma_zone_allocs(SYSCTL_HANDLER_ARGS)
|
|
|
|
{
|
|
|
|
uma_zone_t zone = arg1;
|
|
|
|
uint64_t cur;
|
|
|
|
|
|
|
|
cur = uma_zone_get_allocs(zone);
|
|
|
|
return (sysctl_handle_64(oidp, &cur, 0, req));
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
sysctl_handle_uma_zone_frees(SYSCTL_HANDLER_ARGS)
|
|
|
|
{
|
|
|
|
uma_zone_t zone = arg1;
|
|
|
|
uint64_t cur;
|
|
|
|
|
|
|
|
cur = uma_zone_get_frees(zone);
|
|
|
|
return (sysctl_handle_64(oidp, &cur, 0, req));
|
|
|
|
}
|
|
|
|
|
2019-12-11 06:50:55 +00:00
|
|
|
static int
|
|
|
|
sysctl_handle_uma_zone_flags(SYSCTL_HANDLER_ARGS)
|
|
|
|
{
|
|
|
|
struct sbuf sbuf;
|
|
|
|
uma_zone_t zone = arg1;
|
|
|
|
int error;
|
|
|
|
|
|
|
|
sbuf_new_for_sysctl(&sbuf, NULL, 0, req);
|
|
|
|
if (zone->uz_flags != 0)
|
|
|
|
sbuf_printf(&sbuf, "0x%b", zone->uz_flags, PRINT_UMA_ZFLAGS);
|
|
|
|
else
|
|
|
|
sbuf_printf(&sbuf, "0");
|
|
|
|
error = sbuf_finish(&sbuf);
|
|
|
|
sbuf_delete(&sbuf);
|
|
|
|
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
2016-02-03 22:02:36 +00:00
|
|
|
#ifdef INVARIANTS
|
|
|
|
static uma_slab_t
|
|
|
|
uma_dbg_getslab(uma_zone_t zone, void *item)
|
|
|
|
{
|
|
|
|
uma_slab_t slab;
|
|
|
|
uma_keg_t keg;
|
|
|
|
uint8_t *mem;
|
|
|
|
|
|
|
|
mem = (uint8_t *)((uintptr_t)item & (~UMA_SLAB_MASK));
|
|
|
|
if (zone->uz_flags & UMA_ZONE_VTOSLAB) {
|
|
|
|
slab = vtoslab((vm_offset_t)mem);
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* It is safe to return the slab here even though the
|
|
|
|
* zone is unlocked because the item's allocation state
|
|
|
|
* essentially holds a reference.
|
|
|
|
*/
|
o Move zone limit from keg level up to zone level. This means that now
two zones sharing a keg may have different limits. Now this is going
to work:
zone = uma_zcreate();
uma_zone_set_max(zone, limit);
zone2 = uma_zsecond_create(zone);
uma_zone_set_max(zone2, limit2);
Kegs no longer have uk_maxpages field, but zones have uz_items. When
set, it may be rounded up to minimum possible CPU bucket cache size.
For small limits bucket cache can also be reconfigured to be smaller.
Counter uz_items is updated whenever items transition from keg to a
bucket cache or directly to a consumer. If zone has uz_maxitems set and
it is reached, then we are going to sleep.
o Since new limits don't play well with multi-keg zones, remove them. The
idea of multi-keg zones was introduced exactly 10 years ago, and never
have had a practical usage. In discussion with Jeff we came to a wild
agreement that if we ever want to reintroduce the idea of a smart allocator
that would be able to choose between two (or more) totally different
backing stores, that choice should be made one level higher than UMA,
e.g. in malloc(9) or in mget(), or whatever and choice should be controlled
by the caller.
o Sleeping code is improved to account number of sleepers and wake them one
by one, to avoid thundering herd problem.
o Flag UMA_ZONE_NOBUCKETCACHE removed, instead uma_zone_set_maxcache()
KPI added. Having no bucket cache basically means setting maxcache to 0.
o Now with many fields added and many removed (no multi-keg zones!) make
sure that struct uma_zone is perfectly aligned.
Reviewed by: markj, jeff
Tested by: pho
Differential Revision: https://reviews.freebsd.org/D17773
2019-01-15 00:02:06 +00:00
|
|
|
if (zone->uz_lockptr == &zone->uz_lock)
|
|
|
|
return (NULL);
|
2016-02-03 22:02:36 +00:00
|
|
|
ZONE_LOCK(zone);
|
o Move zone limit from keg level up to zone level. This means that now
two zones sharing a keg may have different limits. Now this is going
to work:
zone = uma_zcreate();
uma_zone_set_max(zone, limit);
zone2 = uma_zsecond_create(zone);
uma_zone_set_max(zone2, limit2);
Kegs no longer have uk_maxpages field, but zones have uz_items. When
set, it may be rounded up to minimum possible CPU bucket cache size.
For small limits bucket cache can also be reconfigured to be smaller.
Counter uz_items is updated whenever items transition from keg to a
bucket cache or directly to a consumer. If zone has uz_maxitems set and
it is reached, then we are going to sleep.
o Since new limits don't play well with multi-keg zones, remove them. The
idea of multi-keg zones was introduced exactly 10 years ago, and never
have had a practical usage. In discussion with Jeff we came to a wild
agreement that if we ever want to reintroduce the idea of a smart allocator
that would be able to choose between two (or more) totally different
backing stores, that choice should be made one level higher than UMA,
e.g. in malloc(9) or in mget(), or whatever and choice should be controlled
by the caller.
o Sleeping code is improved to account number of sleepers and wake them one
by one, to avoid thundering herd problem.
o Flag UMA_ZONE_NOBUCKETCACHE removed, instead uma_zone_set_maxcache()
KPI added. Having no bucket cache basically means setting maxcache to 0.
o Now with many fields added and many removed (no multi-keg zones!) make
sure that struct uma_zone is perfectly aligned.
Reviewed by: markj, jeff
Tested by: pho
Differential Revision: https://reviews.freebsd.org/D17773
2019-01-15 00:02:06 +00:00
|
|
|
keg = zone->uz_keg;
|
2016-02-03 22:02:36 +00:00
|
|
|
if (keg->uk_flags & UMA_ZONE_HASH)
|
|
|
|
slab = hash_sfind(&keg->uk_hash, mem);
|
|
|
|
else
|
|
|
|
slab = (uma_slab_t)(mem + keg->uk_pgoff);
|
|
|
|
ZONE_UNLOCK(zone);
|
|
|
|
}
|
|
|
|
|
|
|
|
return (slab);
|
|
|
|
}
|
|
|
|
|
2018-06-08 00:15:08 +00:00
|
|
|
static bool
|
|
|
|
uma_dbg_zskip(uma_zone_t zone, void *mem)
|
|
|
|
{
|
|
|
|
|
o Move zone limit from keg level up to zone level. This means that now
two zones sharing a keg may have different limits. Now this is going
to work:
zone = uma_zcreate();
uma_zone_set_max(zone, limit);
zone2 = uma_zsecond_create(zone);
uma_zone_set_max(zone2, limit2);
Kegs no longer have uk_maxpages field, but zones have uz_items. When
set, it may be rounded up to minimum possible CPU bucket cache size.
For small limits bucket cache can also be reconfigured to be smaller.
Counter uz_items is updated whenever items transition from keg to a
bucket cache or directly to a consumer. If zone has uz_maxitems set and
it is reached, then we are going to sleep.
o Since new limits don't play well with multi-keg zones, remove them. The
idea of multi-keg zones was introduced exactly 10 years ago, and never
have had a practical usage. In discussion with Jeff we came to a wild
agreement that if we ever want to reintroduce the idea of a smart allocator
that would be able to choose between two (or more) totally different
backing stores, that choice should be made one level higher than UMA,
e.g. in malloc(9) or in mget(), or whatever and choice should be controlled
by the caller.
o Sleeping code is improved to account number of sleepers and wake them one
by one, to avoid thundering herd problem.
o Flag UMA_ZONE_NOBUCKETCACHE removed, instead uma_zone_set_maxcache()
KPI added. Having no bucket cache basically means setting maxcache to 0.
o Now with many fields added and many removed (no multi-keg zones!) make
sure that struct uma_zone is perfectly aligned.
Reviewed by: markj, jeff
Tested by: pho
Differential Revision: https://reviews.freebsd.org/D17773
2019-01-15 00:02:06 +00:00
|
|
|
if (zone->uz_lockptr == &zone->uz_lock)
|
2018-06-08 00:15:08 +00:00
|
|
|
return (true);
|
|
|
|
|
o Move zone limit from keg level up to zone level. This means that now
two zones sharing a keg may have different limits. Now this is going
to work:
zone = uma_zcreate();
uma_zone_set_max(zone, limit);
zone2 = uma_zsecond_create(zone);
uma_zone_set_max(zone2, limit2);
Kegs no longer have uk_maxpages field, but zones have uz_items. When
set, it may be rounded up to minimum possible CPU bucket cache size.
For small limits bucket cache can also be reconfigured to be smaller.
Counter uz_items is updated whenever items transition from keg to a
bucket cache or directly to a consumer. If zone has uz_maxitems set and
it is reached, then we are going to sleep.
o Since new limits don't play well with multi-keg zones, remove them. The
idea of multi-keg zones was introduced exactly 10 years ago, and never
have had a practical usage. In discussion with Jeff we came to a wild
agreement that if we ever want to reintroduce the idea of a smart allocator
that would be able to choose between two (or more) totally different
backing stores, that choice should be made one level higher than UMA,
e.g. in malloc(9) or in mget(), or whatever and choice should be controlled
by the caller.
o Sleeping code is improved to account number of sleepers and wake them one
by one, to avoid thundering herd problem.
o Flag UMA_ZONE_NOBUCKETCACHE removed, instead uma_zone_set_maxcache()
KPI added. Having no bucket cache basically means setting maxcache to 0.
o Now with many fields added and many removed (no multi-keg zones!) make
sure that struct uma_zone is perfectly aligned.
Reviewed by: markj, jeff
Tested by: pho
Differential Revision: https://reviews.freebsd.org/D17773
2019-01-15 00:02:06 +00:00
|
|
|
return (uma_dbg_kskip(zone->uz_keg, mem));
|
2018-06-08 00:15:08 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static bool
|
|
|
|
uma_dbg_kskip(uma_keg_t keg, void *mem)
|
|
|
|
{
|
|
|
|
uintptr_t idx;
|
|
|
|
|
|
|
|
if (dbg_divisor == 0)
|
|
|
|
return (true);
|
|
|
|
|
|
|
|
if (dbg_divisor == 1)
|
|
|
|
return (false);
|
|
|
|
|
|
|
|
idx = (uintptr_t)mem >> PAGE_SHIFT;
|
|
|
|
if (keg->uk_ipers > 1) {
|
|
|
|
idx *= keg->uk_ipers;
|
|
|
|
idx += ((uintptr_t)mem & PAGE_MASK) / keg->uk_rsize;
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((idx / dbg_divisor) * dbg_divisor != idx) {
|
|
|
|
counter_u64_add(uma_skip_cnt, 1);
|
|
|
|
return (true);
|
|
|
|
}
|
|
|
|
counter_u64_add(uma_dbg_cnt, 1);
|
|
|
|
|
|
|
|
return (false);
|
|
|
|
}
|
|
|
|
|
2016-02-03 22:02:36 +00:00
|
|
|
/*
|
|
|
|
* Set up the slab's freei data such that uma_dbg_free can function.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
uma_dbg_alloc(uma_zone_t zone, uma_slab_t slab, void *item)
|
|
|
|
{
|
|
|
|
uma_keg_t keg;
|
|
|
|
int freei;
|
|
|
|
|
|
|
|
if (slab == NULL) {
|
|
|
|
slab = uma_dbg_getslab(zone, item);
|
|
|
|
if (slab == NULL)
|
|
|
|
panic("uma: item %p did not belong to zone %s\n",
|
|
|
|
item, zone->uz_name);
|
|
|
|
}
|
2019-11-28 07:49:25 +00:00
|
|
|
keg = zone->uz_keg;
|
2019-12-08 01:15:06 +00:00
|
|
|
freei = slab_item_index(slab, keg, item);
|
2016-02-03 22:02:36 +00:00
|
|
|
|
2019-12-13 09:31:59 +00:00
|
|
|
if (BIT_ISSET(keg->uk_ipers, freei, slab_dbg_bits(slab, keg)))
|
2016-02-03 22:02:36 +00:00
|
|
|
panic("Duplicate alloc of %p from zone %p(%s) slab %p(%d)\n",
|
|
|
|
item, zone, zone->uz_name, slab, freei);
|
2019-12-13 09:31:59 +00:00
|
|
|
BIT_SET_ATOMIC(keg->uk_ipers, freei, slab_dbg_bits(slab, keg));
|
2016-02-03 22:02:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Verifies freed addresses. Checks for alignment, valid slab membership
|
|
|
|
* and duplicate frees.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
uma_dbg_free(uma_zone_t zone, uma_slab_t slab, void *item)
|
|
|
|
{
|
|
|
|
uma_keg_t keg;
|
|
|
|
int freei;
|
|
|
|
|
|
|
|
if (slab == NULL) {
|
|
|
|
slab = uma_dbg_getslab(zone, item);
|
|
|
|
if (slab == NULL)
|
|
|
|
panic("uma: Freed item %p did not belong to zone %s\n",
|
|
|
|
item, zone->uz_name);
|
|
|
|
}
|
2019-11-28 07:49:25 +00:00
|
|
|
keg = zone->uz_keg;
|
2019-12-08 01:15:06 +00:00
|
|
|
freei = slab_item_index(slab, keg, item);
|
2016-02-03 22:02:36 +00:00
|
|
|
|
|
|
|
if (freei >= keg->uk_ipers)
|
|
|
|
panic("Invalid free of %p from zone %p(%s) slab %p(%d)\n",
|
|
|
|
item, zone, zone->uz_name, slab, freei);
|
|
|
|
|
2019-12-08 01:15:06 +00:00
|
|
|
if (slab_item(slab, keg, freei) != item)
|
2016-02-03 22:02:36 +00:00
|
|
|
panic("Unaligned free of %p from zone %p(%s) slab %p(%d)\n",
|
|
|
|
item, zone, zone->uz_name, slab, freei);
|
|
|
|
|
2019-12-13 09:31:59 +00:00
|
|
|
if (!BIT_ISSET(keg->uk_ipers, freei, slab_dbg_bits(slab, keg)))
|
2016-02-03 22:02:36 +00:00
|
|
|
panic("Duplicate free of %p from zone %p(%s) slab %p(%d)\n",
|
|
|
|
item, zone, zone->uz_name, slab, freei);
|
|
|
|
|
2019-12-13 09:31:59 +00:00
|
|
|
BIT_CLR_ATOMIC(keg->uk_ipers, freei, slab_dbg_bits(slab, keg));
|
2016-02-03 22:02:36 +00:00
|
|
|
}
|
|
|
|
#endif /* INVARIANTS */
|
|
|
|
|
2005-10-20 16:39:33 +00:00
|
|
|
#ifdef DDB
|
2019-10-11 01:31:31 +00:00
|
|
|
static int64_t
|
|
|
|
get_uma_stats(uma_keg_t kz, uma_zone_t z, uint64_t *allocs, uint64_t *used,
|
2019-10-11 06:02:03 +00:00
|
|
|
uint64_t *sleeps, long *cachefree, uint64_t *xdomain)
|
2019-10-11 01:31:31 +00:00
|
|
|
{
|
|
|
|
uint64_t frees;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (kz->uk_flags & UMA_ZFLAG_INTERNAL) {
|
|
|
|
*allocs = counter_u64_fetch(z->uz_allocs);
|
|
|
|
frees = counter_u64_fetch(z->uz_frees);
|
|
|
|
*sleeps = z->uz_sleeps;
|
|
|
|
*cachefree = 0;
|
|
|
|
*xdomain = 0;
|
|
|
|
} else
|
|
|
|
uma_zone_sumstat(z, cachefree, allocs, &frees, sleeps,
|
|
|
|
xdomain);
|
|
|
|
if (!((z->uz_flags & UMA_ZONE_SECONDARY) &&
|
|
|
|
(LIST_FIRST(&kz->uk_zones) != z)))
|
|
|
|
*cachefree += kz->uk_free;
|
|
|
|
for (i = 0; i < vm_ndomains; i++)
|
|
|
|
*cachefree += z->uz_domain[i].uzd_nitems;
|
|
|
|
*used = *allocs - frees;
|
|
|
|
return (((int64_t)*used + *cachefree) * kz->uk_size);
|
|
|
|
}
|
|
|
|
|
2005-10-20 16:39:33 +00:00
|
|
|
DB_SHOW_COMMAND(uma, db_show_uma)
|
|
|
|
{
|
2019-10-11 01:31:31 +00:00
|
|
|
const char *fmt_hdr, *fmt_entry;
|
2005-10-20 16:39:33 +00:00
|
|
|
uma_keg_t kz;
|
|
|
|
uma_zone_t z;
|
2019-10-11 01:31:31 +00:00
|
|
|
uint64_t allocs, used, sleeps, xdomain;
|
2018-11-13 19:44:40 +00:00
|
|
|
long cachefree;
|
2019-10-11 01:31:31 +00:00
|
|
|
/* variables for sorting */
|
|
|
|
uma_keg_t cur_keg;
|
|
|
|
uma_zone_t cur_zone, last_zone;
|
|
|
|
int64_t cur_size, last_size, size;
|
|
|
|
int ties;
|
|
|
|
|
|
|
|
/* /i option produces machine-parseable CSV output */
|
|
|
|
if (modif[0] == 'i') {
|
|
|
|
fmt_hdr = "%s,%s,%s,%s,%s,%s,%s,%s,%s\n";
|
|
|
|
fmt_entry = "\"%s\",%ju,%jd,%ld,%ju,%ju,%u,%jd,%ju\n";
|
|
|
|
} else {
|
|
|
|
fmt_hdr = "%18s %6s %7s %7s %11s %7s %7s %10s %8s\n";
|
|
|
|
fmt_entry = "%18s %6ju %7jd %7ld %11ju %7ju %7u %10jd %8ju\n";
|
|
|
|
}
|
2005-10-20 16:39:33 +00:00
|
|
|
|
2019-10-11 01:31:31 +00:00
|
|
|
db_printf(fmt_hdr, "Zone", "Size", "Used", "Free", "Requests",
|
|
|
|
"Sleeps", "Bucket", "Total Mem", "XFree");
|
|
|
|
|
|
|
|
/* Sort the zones with largest size first. */
|
|
|
|
last_zone = NULL;
|
|
|
|
last_size = INT64_MAX;
|
|
|
|
for (;;) {
|
|
|
|
cur_zone = NULL;
|
|
|
|
cur_size = -1;
|
|
|
|
ties = 0;
|
|
|
|
LIST_FOREACH(kz, &uma_kegs, uk_link) {
|
|
|
|
LIST_FOREACH(z, &kz->uk_zones, uz_link) {
|
|
|
|
/*
|
|
|
|
* In the case of size ties, print out zones
|
|
|
|
* in the order they are encountered. That is,
|
|
|
|
* when we encounter the most recently output
|
|
|
|
* zone, we have already printed all preceding
|
|
|
|
* ties, and we must print all following ties.
|
|
|
|
*/
|
|
|
|
if (z == last_zone) {
|
|
|
|
ties = 1;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
size = get_uma_stats(kz, z, &allocs, &used,
|
|
|
|
&sleeps, &cachefree, &xdomain);
|
|
|
|
if (size > cur_size && size < last_size + ties)
|
|
|
|
{
|
|
|
|
cur_size = size;
|
|
|
|
cur_zone = z;
|
|
|
|
cur_keg = kz;
|
|
|
|
}
|
|
|
|
}
|
2005-10-20 16:39:33 +00:00
|
|
|
}
|
2019-10-11 01:31:31 +00:00
|
|
|
if (cur_zone == NULL)
|
|
|
|
break;
|
|
|
|
|
|
|
|
size = get_uma_stats(cur_keg, cur_zone, &allocs, &used,
|
|
|
|
&sleeps, &cachefree, &xdomain);
|
|
|
|
db_printf(fmt_entry, cur_zone->uz_name,
|
|
|
|
(uintmax_t)cur_keg->uk_size, (intmax_t)used, cachefree,
|
|
|
|
(uintmax_t)allocs, (uintmax_t)sleeps,
|
2019-11-28 00:19:09 +00:00
|
|
|
(unsigned)cur_zone->uz_bucket_size, (intmax_t)size,
|
|
|
|
xdomain);
|
2019-10-11 01:31:31 +00:00
|
|
|
|
|
|
|
if (db_pager_quit)
|
|
|
|
return;
|
|
|
|
last_zone = cur_zone;
|
|
|
|
last_size = cur_size;
|
2005-10-20 16:39:33 +00:00
|
|
|
}
|
|
|
|
}
|
2013-11-28 19:20:49 +00:00
|
|
|
|
|
|
|
DB_SHOW_COMMAND(umacache, db_show_umacache)
|
|
|
|
{
|
|
|
|
uma_zone_t z;
|
2018-01-12 23:25:05 +00:00
|
|
|
uint64_t allocs, frees;
|
2018-11-13 19:44:40 +00:00
|
|
|
long cachefree;
|
|
|
|
int i;
|
2013-11-28 19:20:49 +00:00
|
|
|
|
|
|
|
db_printf("%18s %8s %8s %8s %12s %8s\n", "Zone", "Size", "Used", "Free",
|
|
|
|
"Requests", "Bucket");
|
|
|
|
LIST_FOREACH(z, &uma_cachezones, uz_link) {
|
2019-08-06 21:50:34 +00:00
|
|
|
uma_zone_sumstat(z, &cachefree, &allocs, &frees, NULL, NULL);
|
2018-11-13 19:44:40 +00:00
|
|
|
for (i = 0; i < vm_ndomains; i++)
|
|
|
|
cachefree += z->uz_domain[i].uzd_nitems;
|
|
|
|
db_printf("%18s %8ju %8jd %8ld %12ju %8u\n",
|
2013-11-28 19:20:49 +00:00
|
|
|
z->uz_name, (uintmax_t)z->uz_size,
|
|
|
|
(intmax_t)(allocs - frees), cachefree,
|
2019-11-28 00:19:09 +00:00
|
|
|
(uintmax_t)allocs, z->uz_bucket_size);
|
2013-11-28 19:20:49 +00:00
|
|
|
if (db_pager_quit)
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
2016-02-03 22:02:36 +00:00
|
|
|
#endif /* DDB */
|