2005-01-07 02:29:27 +00:00
|
|
|
/*-
|
2017-11-27 15:23:17 +00:00
|
|
|
* SPDX-License-Identifier: BSD-2-Clause-FreeBSD
|
|
|
|
*
|
2019-11-28 07:49:25 +00:00
|
|
|
* Copyright (c) 2002-2019 Jeffrey Roberson <jeff@FreeBSD.org>
|
2005-07-16 09:51:52 +00:00
|
|
|
* Copyright (c) 2004, 2005 Bosko Milekic <bmilekic@FreeBSD.org>
|
|
|
|
* All rights reserved.
|
2002-03-19 09:11:49 +00:00
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice unmodified, this list of conditions, and the following
|
|
|
|
* disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
|
|
|
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
|
|
|
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
|
|
|
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
|
|
|
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
|
|
|
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
|
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
|
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
|
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
|
|
|
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
*
|
|
|
|
* $FreeBSD$
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
2019-01-15 19:33:47 +00:00
|
|
|
#include <sys/counter.h>
|
2017-09-15 14:59:35 +00:00
|
|
|
#include <sys/_bitset.h>
|
2018-10-24 16:49:16 +00:00
|
|
|
#include <sys/_domainset.h>
|
2016-02-09 20:22:35 +00:00
|
|
|
#include <sys/_task.h>
|
|
|
|
|
2002-03-19 09:11:49 +00:00
|
|
|
/*
|
|
|
|
* This file includes definitions, structures, prototypes, and inlines that
|
|
|
|
* should not be used outside of the actual implementation of UMA.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
2018-01-12 23:25:05 +00:00
|
|
|
* The brief summary; Zones describe unique allocation types. Zones are
|
|
|
|
* organized into per-CPU caches which are filled by buckets. Buckets are
|
|
|
|
* organized according to memory domains. Buckets are filled from kegs which
|
|
|
|
* are also organized according to memory domains. Kegs describe a unique
|
|
|
|
* allocation type, backend memory provider, and layout. Kegs are associated
|
|
|
|
* with one or more zones and zones reference one or more kegs. Kegs provide
|
|
|
|
* slabs which are virtually contiguous collections of pages. Each slab is
|
|
|
|
* broken down int one or more items that will satisfy an individual allocation.
|
|
|
|
*
|
|
|
|
* Allocation is satisfied in the following order:
|
|
|
|
* 1) Per-CPU cache
|
|
|
|
* 2) Per-domain cache of buckets
|
|
|
|
* 3) Slab from any of N kegs
|
|
|
|
* 4) Backend page provider
|
|
|
|
*
|
|
|
|
* More detail on individual objects is contained below:
|
2002-03-19 09:11:49 +00:00
|
|
|
*
|
Bring in mbuma to replace mballoc.
mbuma is an Mbuf & Cluster allocator built on top of a number of
extensions to the UMA framework, all included herein.
Extensions to UMA worth noting:
- Better layering between slab <-> zone caches; introduce
Keg structure which splits off slab cache away from the
zone structure and allows multiple zones to be stacked
on top of a single Keg (single type of slab cache);
perhaps we should look into defining a subset API on
top of the Keg for special use by malloc(9),
for example.
- UMA_ZONE_REFCNT zones can now be added, and reference
counters automagically allocated for them within the end
of the associated slab structures. uma_find_refcnt()
does a kextract to fetch the slab struct reference from
the underlying page, and lookup the corresponding refcnt.
mbuma things worth noting:
- integrates mbuf & cluster allocations with extended UMA
and provides caches for commonly-allocated items; defines
several zones (two primary, one secondary) and two kegs.
- change up certain code paths that always used to do:
m_get() + m_clget() to instead just use m_getcl() and
try to take advantage of the newly defined secondary
Packet zone.
- netstat(1) and systat(1) quickly hacked up to do basic
stat reporting but additional stats work needs to be
done once some other details within UMA have been taken
care of and it becomes clearer to how stats will work
within the modified framework.
From the user perspective, one implication is that the
NMBCLUSTERS compile-time option is no longer used. The
maximum number of clusters is still capped off according
to maxusers, but it can be made unlimited by setting
the kern.ipc.nmbclusters boot-time tunable to zero.
Work should be done to write an appropriate sysctl
handler allowing dynamic tuning of kern.ipc.nmbclusters
at runtime.
Additional things worth noting/known issues (READ):
- One report of 'ips' (ServeRAID) driver acting really
slow in conjunction with mbuma. Need more data.
Latest report is that ips is equally sucking with
and without mbuma.
- Giant leak in NFS code sometimes occurs, can't
reproduce but currently analyzing; brueffer is
able to reproduce but THIS IS NOT an mbuma-specific
problem and currently occurs even WITHOUT mbuma.
- Issues in network locking: there is at least one
code path in the rip code where one or more locks
are acquired and we end up in m_prepend() with
M_WAITOK, which causes WITNESS to whine from within
UMA. Current temporary solution: force all UMA
allocations to be M_NOWAIT from within UMA for now
to avoid deadlocks unless WITNESS is defined and we
can determine with certainty that we're not holding
any locks when we're M_WAITOK.
- I've seen at least one weird socketbuffer empty-but-
mbuf-still-attached panic. I don't believe this
to be related to mbuma but please keep your eyes
open, turn on debugging, and capture crash dumps.
This change removes more code than it adds.
A paper is available detailing the change and considering
various performance issues, it was presented at BSDCan2004:
http://www.unixdaemons.com/~bmilekic/netbuf_bmilekic.pdf
Please read the paper for Future Work and implementation
details, as well as credits.
Testing and Debugging:
rwatson,
brueffer,
Ketrien I. Saihr-Kesenchedra,
...
Reviewed by: Lots of people (for different parts)
2004-05-31 21:46:06 +00:00
|
|
|
* Kegs contain lists of slabs which are stored in either the full bin, empty
|
2002-03-19 09:11:49 +00:00
|
|
|
* bin, or partially allocated bin, to reduce fragmentation. They also contain
|
|
|
|
* the user supplied value for size, which is adjusted for alignment purposes
|
Bring in mbuma to replace mballoc.
mbuma is an Mbuf & Cluster allocator built on top of a number of
extensions to the UMA framework, all included herein.
Extensions to UMA worth noting:
- Better layering between slab <-> zone caches; introduce
Keg structure which splits off slab cache away from the
zone structure and allows multiple zones to be stacked
on top of a single Keg (single type of slab cache);
perhaps we should look into defining a subset API on
top of the Keg for special use by malloc(9),
for example.
- UMA_ZONE_REFCNT zones can now be added, and reference
counters automagically allocated for them within the end
of the associated slab structures. uma_find_refcnt()
does a kextract to fetch the slab struct reference from
the underlying page, and lookup the corresponding refcnt.
mbuma things worth noting:
- integrates mbuf & cluster allocations with extended UMA
and provides caches for commonly-allocated items; defines
several zones (two primary, one secondary) and two kegs.
- change up certain code paths that always used to do:
m_get() + m_clget() to instead just use m_getcl() and
try to take advantage of the newly defined secondary
Packet zone.
- netstat(1) and systat(1) quickly hacked up to do basic
stat reporting but additional stats work needs to be
done once some other details within UMA have been taken
care of and it becomes clearer to how stats will work
within the modified framework.
From the user perspective, one implication is that the
NMBCLUSTERS compile-time option is no longer used. The
maximum number of clusters is still capped off according
to maxusers, but it can be made unlimited by setting
the kern.ipc.nmbclusters boot-time tunable to zero.
Work should be done to write an appropriate sysctl
handler allowing dynamic tuning of kern.ipc.nmbclusters
at runtime.
Additional things worth noting/known issues (READ):
- One report of 'ips' (ServeRAID) driver acting really
slow in conjunction with mbuma. Need more data.
Latest report is that ips is equally sucking with
and without mbuma.
- Giant leak in NFS code sometimes occurs, can't
reproduce but currently analyzing; brueffer is
able to reproduce but THIS IS NOT an mbuma-specific
problem and currently occurs even WITHOUT mbuma.
- Issues in network locking: there is at least one
code path in the rip code where one or more locks
are acquired and we end up in m_prepend() with
M_WAITOK, which causes WITNESS to whine from within
UMA. Current temporary solution: force all UMA
allocations to be M_NOWAIT from within UMA for now
to avoid deadlocks unless WITNESS is defined and we
can determine with certainty that we're not holding
any locks when we're M_WAITOK.
- I've seen at least one weird socketbuffer empty-but-
mbuf-still-attached panic. I don't believe this
to be related to mbuma but please keep your eyes
open, turn on debugging, and capture crash dumps.
This change removes more code than it adds.
A paper is available detailing the change and considering
various performance issues, it was presented at BSDCan2004:
http://www.unixdaemons.com/~bmilekic/netbuf_bmilekic.pdf
Please read the paper for Future Work and implementation
details, as well as credits.
Testing and Debugging:
rwatson,
brueffer,
Ketrien I. Saihr-Kesenchedra,
...
Reviewed by: Lots of people (for different parts)
2004-05-31 21:46:06 +00:00
|
|
|
* and rsize is the result of that. The Keg also stores information for
|
2002-03-19 09:11:49 +00:00
|
|
|
* managing a hash of page addresses that maps pages to uma_slab_t structures
|
|
|
|
* for pages that don't have embedded uma_slab_t's.
|
2018-01-12 23:25:05 +00:00
|
|
|
*
|
|
|
|
* Keg slab lists are organized by memory domain to support NUMA allocation
|
|
|
|
* policies. By default allocations are spread across domains to reduce the
|
|
|
|
* potential for hotspots. Special keg creation flags may be specified to
|
|
|
|
* prefer location allocation. However there is no strict enforcement as frees
|
|
|
|
* may happen on any CPU and these are returned to the CPU-local cache
|
|
|
|
* regardless of the originating domain.
|
2002-03-19 09:11:49 +00:00
|
|
|
*
|
|
|
|
* The uma_slab_t may be embedded in a UMA_SLAB_SIZE chunk of memory or it may
|
|
|
|
* be allocated off the page from a special slab zone. The free list within a
|
2013-06-13 21:05:38 +00:00
|
|
|
* slab is managed with a bitmask. For item sizes that would yield more than
|
|
|
|
* 10% memory waste we potentially allocate a separate uma_slab_t if this will
|
|
|
|
* improve the number of items per slab that will fit.
|
2002-03-19 09:11:49 +00:00
|
|
|
*
|
|
|
|
* The only really gross cases, with regards to memory waste, are for those
|
|
|
|
* items that are just over half the page size. You can get nearly 50% waste,
|
|
|
|
* so you fall back to the memory footprint of the power of two allocator. I
|
|
|
|
* have looked at memory allocation sizes on many of the machines available to
|
|
|
|
* me, and there does not seem to be an abundance of allocations at this range
|
|
|
|
* so at this time it may not make sense to optimize for it. This can, of
|
|
|
|
* course, be solved with dynamic slab sizes.
|
|
|
|
*
|
Bring in mbuma to replace mballoc.
mbuma is an Mbuf & Cluster allocator built on top of a number of
extensions to the UMA framework, all included herein.
Extensions to UMA worth noting:
- Better layering between slab <-> zone caches; introduce
Keg structure which splits off slab cache away from the
zone structure and allows multiple zones to be stacked
on top of a single Keg (single type of slab cache);
perhaps we should look into defining a subset API on
top of the Keg for special use by malloc(9),
for example.
- UMA_ZONE_REFCNT zones can now be added, and reference
counters automagically allocated for them within the end
of the associated slab structures. uma_find_refcnt()
does a kextract to fetch the slab struct reference from
the underlying page, and lookup the corresponding refcnt.
mbuma things worth noting:
- integrates mbuf & cluster allocations with extended UMA
and provides caches for commonly-allocated items; defines
several zones (two primary, one secondary) and two kegs.
- change up certain code paths that always used to do:
m_get() + m_clget() to instead just use m_getcl() and
try to take advantage of the newly defined secondary
Packet zone.
- netstat(1) and systat(1) quickly hacked up to do basic
stat reporting but additional stats work needs to be
done once some other details within UMA have been taken
care of and it becomes clearer to how stats will work
within the modified framework.
From the user perspective, one implication is that the
NMBCLUSTERS compile-time option is no longer used. The
maximum number of clusters is still capped off according
to maxusers, but it can be made unlimited by setting
the kern.ipc.nmbclusters boot-time tunable to zero.
Work should be done to write an appropriate sysctl
handler allowing dynamic tuning of kern.ipc.nmbclusters
at runtime.
Additional things worth noting/known issues (READ):
- One report of 'ips' (ServeRAID) driver acting really
slow in conjunction with mbuma. Need more data.
Latest report is that ips is equally sucking with
and without mbuma.
- Giant leak in NFS code sometimes occurs, can't
reproduce but currently analyzing; brueffer is
able to reproduce but THIS IS NOT an mbuma-specific
problem and currently occurs even WITHOUT mbuma.
- Issues in network locking: there is at least one
code path in the rip code where one or more locks
are acquired and we end up in m_prepend() with
M_WAITOK, which causes WITNESS to whine from within
UMA. Current temporary solution: force all UMA
allocations to be M_NOWAIT from within UMA for now
to avoid deadlocks unless WITNESS is defined and we
can determine with certainty that we're not holding
any locks when we're M_WAITOK.
- I've seen at least one weird socketbuffer empty-but-
mbuf-still-attached panic. I don't believe this
to be related to mbuma but please keep your eyes
open, turn on debugging, and capture crash dumps.
This change removes more code than it adds.
A paper is available detailing the change and considering
various performance issues, it was presented at BSDCan2004:
http://www.unixdaemons.com/~bmilekic/netbuf_bmilekic.pdf
Please read the paper for Future Work and implementation
details, as well as credits.
Testing and Debugging:
rwatson,
brueffer,
Ketrien I. Saihr-Kesenchedra,
...
Reviewed by: Lots of people (for different parts)
2004-05-31 21:46:06 +00:00
|
|
|
* Kegs may serve multiple Zones but by far most of the time they only serve
|
|
|
|
* one. When a Zone is created, a Keg is allocated and setup for it. While
|
|
|
|
* the backing Keg stores slabs, the Zone caches Buckets of items allocated
|
|
|
|
* from the slabs. Each Zone is equipped with an init/fini and ctor/dtor
|
|
|
|
* pair, as well as with its own set of small per-CPU caches, layered above
|
|
|
|
* the Zone's general Bucket cache.
|
|
|
|
*
|
2007-05-09 22:53:34 +00:00
|
|
|
* The PCPU caches are protected by critical sections, and may be accessed
|
|
|
|
* safely only from their associated CPU, while the Zones backed by the same
|
|
|
|
* Keg all share a common Keg lock (to coalesce contention on the backing
|
|
|
|
* slabs). The backing Keg typically only serves one Zone but in the case of
|
2020-06-20 20:21:04 +00:00
|
|
|
* multiple Zones, one of the Zones is considered the Primary Zone and all
|
|
|
|
* Zone-related stats from the Keg are done in the Primary Zone. For an
|
2007-05-09 22:53:34 +00:00
|
|
|
* example of a Multi-Zone setup, refer to the Mbuf allocation code.
|
2002-03-19 09:11:49 +00:00
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This is the representation for normal (Non OFFPAGE slab)
|
|
|
|
*
|
|
|
|
* i == item
|
|
|
|
* s == slab pointer
|
|
|
|
*
|
|
|
|
* <---------------- Page (UMA_SLAB_SIZE) ------------------>
|
|
|
|
* ___________________________________________________________
|
|
|
|
* | _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ ___________ |
|
|
|
|
* ||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i| |slab header||
|
|
|
|
* ||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_| |___________||
|
|
|
|
* |___________________________________________________________|
|
|
|
|
*
|
|
|
|
*
|
|
|
|
* This is an OFFPAGE slab. These can be larger than UMA_SLAB_SIZE.
|
|
|
|
*
|
|
|
|
* ___________________________________________________________
|
|
|
|
* | _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ |
|
|
|
|
* ||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i||i| |
|
|
|
|
* ||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_||_| |
|
|
|
|
* |___________________________________________________________|
|
|
|
|
* ___________ ^
|
|
|
|
* |slab header| |
|
|
|
|
* |___________|---*
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef VM_UMA_INT_H
|
|
|
|
#define VM_UMA_INT_H
|
|
|
|
|
|
|
|
#define UMA_SLAB_SIZE PAGE_SIZE /* How big are our slabs? */
|
|
|
|
#define UMA_SLAB_MASK (PAGE_SIZE - 1) /* Mask to get back to the page */
|
|
|
|
#define UMA_SLAB_SHIFT PAGE_SHIFT /* Number of bits PAGE_MASK */
|
|
|
|
|
2013-04-08 19:10:45 +00:00
|
|
|
/* Max waste percentage before going to off page slab management */
|
|
|
|
#define UMA_MAX_WASTE 10
|
2002-03-19 09:11:49 +00:00
|
|
|
|
2020-01-09 02:03:17 +00:00
|
|
|
/* Max size of a CACHESPREAD slab. */
|
|
|
|
#define UMA_CACHESPREAD_MAX_SIZE (128 * 1024)
|
|
|
|
|
2020-01-09 02:03:03 +00:00
|
|
|
/*
|
|
|
|
* These flags must not overlap with the UMA_ZONE flags specified in uma.h.
|
|
|
|
*/
|
|
|
|
#define UMA_ZFLAG_OFFPAGE 0x00200000 /*
|
|
|
|
* Force the slab structure
|
|
|
|
* allocation off of the real
|
|
|
|
* memory.
|
|
|
|
*/
|
|
|
|
#define UMA_ZFLAG_HASH 0x00400000 /*
|
|
|
|
* Use a hash table instead of
|
|
|
|
* caching information in the
|
|
|
|
* vm_page.
|
|
|
|
*/
|
|
|
|
#define UMA_ZFLAG_VTOSLAB 0x00800000 /*
|
|
|
|
* Zone uses vtoslab for
|
|
|
|
* lookup.
|
|
|
|
*/
|
|
|
|
#define UMA_ZFLAG_CTORDTOR 0x01000000 /* Zone has ctor/dtor set. */
|
|
|
|
#define UMA_ZFLAG_LIMIT 0x02000000 /* Zone has limit set. */
|
|
|
|
#define UMA_ZFLAG_CACHE 0x04000000 /* uma_zcache_create()d it */
|
|
|
|
#define UMA_ZFLAG_BUCKET 0x10000000 /* Bucket zone. */
|
|
|
|
#define UMA_ZFLAG_INTERNAL 0x20000000 /* No offpage no PCPU. */
|
|
|
|
#define UMA_ZFLAG_TRASH 0x40000000 /* Add trash ctor/dtor. */
|
|
|
|
|
|
|
|
#define UMA_ZFLAG_INHERIT \
|
|
|
|
(UMA_ZFLAG_OFFPAGE | UMA_ZFLAG_HASH | UMA_ZFLAG_VTOSLAB | \
|
2020-02-06 08:32:25 +00:00
|
|
|
UMA_ZFLAG_BUCKET | UMA_ZFLAG_INTERNAL)
|
2020-01-09 02:03:03 +00:00
|
|
|
|
|
|
|
#define PRINT_UMA_ZFLAGS "\20" \
|
|
|
|
"\37TRASH" \
|
|
|
|
"\36INTERNAL" \
|
|
|
|
"\35BUCKET" \
|
|
|
|
"\33CACHE" \
|
|
|
|
"\32LIMIT" \
|
|
|
|
"\31CTORDTOR" \
|
|
|
|
"\30VTOSLAB" \
|
|
|
|
"\27HASH" \
|
|
|
|
"\26OFFPAGE" \
|
2020-01-31 00:49:51 +00:00
|
|
|
"\23SMR" \
|
2020-01-09 02:03:03 +00:00
|
|
|
"\22ROUNDROBIN" \
|
|
|
|
"\21FIRSTTOUCH" \
|
|
|
|
"\20PCPU" \
|
|
|
|
"\17NODUMP" \
|
|
|
|
"\16CACHESPREAD" \
|
|
|
|
"\14MAXBUCKET" \
|
|
|
|
"\13NOBUCKET" \
|
|
|
|
"\12SECONDARY" \
|
|
|
|
"\11NOTPAGE" \
|
|
|
|
"\10VM" \
|
|
|
|
"\7MTXCLASS" \
|
|
|
|
"\6NOFREE" \
|
|
|
|
"\5MALLOC" \
|
|
|
|
"\4NOTOUCH" \
|
2020-02-04 22:40:11 +00:00
|
|
|
"\3CONTIG" \
|
2020-01-09 02:03:03 +00:00
|
|
|
"\2ZINIT"
|
2019-12-08 01:15:06 +00:00
|
|
|
|
2002-03-19 09:11:49 +00:00
|
|
|
/*
|
2019-12-08 01:15:06 +00:00
|
|
|
* Hash table for freed address -> slab translation.
|
|
|
|
*
|
|
|
|
* Only zones with memory not touchable by the allocator use the
|
|
|
|
* hash table. Otherwise slabs are found with vtoslab().
|
2002-03-19 09:11:49 +00:00
|
|
|
*/
|
|
|
|
#define UMA_HASH_SIZE_INIT 32
|
|
|
|
|
2013-06-13 21:05:38 +00:00
|
|
|
#define UMA_HASH(h, s) ((((uintptr_t)s) >> UMA_SLAB_SHIFT) & (h)->uh_hashmask)
|
2002-03-19 09:11:49 +00:00
|
|
|
|
|
|
|
#define UMA_HASH_INSERT(h, s, mem) \
|
2019-12-08 01:15:06 +00:00
|
|
|
LIST_INSERT_HEAD(&(h)->uh_slab_hash[UMA_HASH((h), \
|
2020-01-14 02:14:15 +00:00
|
|
|
(mem))], slab_tohashslab(s), uhs_hlink)
|
2002-03-19 09:11:49 +00:00
|
|
|
|
2019-12-08 01:15:06 +00:00
|
|
|
#define UMA_HASH_REMOVE(h, s) \
|
2020-01-14 02:14:15 +00:00
|
|
|
LIST_REMOVE(slab_tohashslab(s), uhs_hlink)
|
2002-03-19 09:11:49 +00:00
|
|
|
|
2019-12-08 01:15:06 +00:00
|
|
|
LIST_HEAD(slabhashhead, uma_hash_slab);
|
2002-03-19 09:11:49 +00:00
|
|
|
|
|
|
|
struct uma_hash {
|
2019-12-08 01:15:06 +00:00
|
|
|
struct slabhashhead *uh_slab_hash; /* Hash table for slabs */
|
2019-02-12 04:33:05 +00:00
|
|
|
u_int uh_hashsize; /* Current size of the hash table */
|
|
|
|
u_int uh_hashmask; /* Mask used during hashing */
|
2002-03-19 09:11:49 +00:00
|
|
|
};
|
|
|
|
|
2010-03-17 21:18:28 +00:00
|
|
|
/*
|
2020-01-06 02:51:19 +00:00
|
|
|
* Align field or structure to cache 'sector' in intel terminology. This
|
|
|
|
* is more efficient with adjacent line prefetch.
|
2010-03-17 21:18:28 +00:00
|
|
|
*/
|
2018-06-04 15:44:17 +00:00
|
|
|
#if defined(__amd64__) || defined(__powerpc64__)
|
2020-01-06 02:51:19 +00:00
|
|
|
#define UMA_SUPER_ALIGN (CACHE_LINE_SIZE * 2)
|
2010-03-22 22:39:32 +00:00
|
|
|
#else
|
2020-01-06 02:51:19 +00:00
|
|
|
#define UMA_SUPER_ALIGN CACHE_LINE_SIZE
|
2010-03-22 22:39:32 +00:00
|
|
|
#endif
|
2010-03-17 21:18:28 +00:00
|
|
|
|
2020-01-06 02:51:19 +00:00
|
|
|
#define UMA_ALIGN __aligned(UMA_SUPER_ALIGN)
|
|
|
|
|
2002-03-19 09:11:49 +00:00
|
|
|
/*
|
2019-12-25 20:50:53 +00:00
|
|
|
* The uma_bucket structure is used to queue and manage buckets divorced
|
|
|
|
* from per-cpu caches. They are loaded into uma_cache_bucket structures
|
|
|
|
* for use.
|
2002-03-19 09:11:49 +00:00
|
|
|
*/
|
|
|
|
struct uma_bucket {
|
2020-02-04 02:41:24 +00:00
|
|
|
STAILQ_ENTRY(uma_bucket) ub_link; /* Link into the zone */
|
2020-01-31 00:49:51 +00:00
|
|
|
int16_t ub_cnt; /* Count of items in bucket. */
|
|
|
|
int16_t ub_entries; /* Max items. */
|
|
|
|
smr_seq_t ub_seq; /* SMR sequence number. */
|
|
|
|
void *ub_bucket[]; /* actual allocation storage */
|
2010-03-22 22:39:32 +00:00
|
|
|
};
|
2002-03-19 09:11:49 +00:00
|
|
|
|
|
|
|
typedef struct uma_bucket * uma_bucket_t;
|
|
|
|
|
2019-12-25 20:50:53 +00:00
|
|
|
/*
|
|
|
|
* The uma_cache_bucket structure is statically allocated on each per-cpu
|
|
|
|
* cache. Its use reduces branches and cache misses in the fast path.
|
|
|
|
*/
|
|
|
|
struct uma_cache_bucket {
|
|
|
|
uma_bucket_t ucb_bucket;
|
|
|
|
int16_t ucb_cnt;
|
|
|
|
int16_t ucb_entries;
|
|
|
|
uint32_t ucb_spare;
|
|
|
|
};
|
|
|
|
|
|
|
|
typedef struct uma_cache_bucket * uma_cache_bucket_t;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The uma_cache structure is allocated for each cpu for every zone
|
|
|
|
* type. This optimizes synchronization out of the allocator fast path.
|
|
|
|
*/
|
2002-03-19 09:11:49 +00:00
|
|
|
struct uma_cache {
|
2019-12-25 20:50:53 +00:00
|
|
|
struct uma_cache_bucket uc_freebucket; /* Bucket we're freeing to */
|
|
|
|
struct uma_cache_bucket uc_allocbucket; /* Bucket to allocate from */
|
|
|
|
struct uma_cache_bucket uc_crossbucket; /* cross domain bucket */
|
|
|
|
uint64_t uc_allocs; /* Count of allocations */
|
|
|
|
uint64_t uc_frees; /* Count of frees */
|
2010-03-17 21:18:28 +00:00
|
|
|
} UMA_ALIGN;
|
2002-03-19 09:11:49 +00:00
|
|
|
|
|
|
|
typedef struct uma_cache * uma_cache_t;
|
|
|
|
|
2019-12-08 01:15:06 +00:00
|
|
|
LIST_HEAD(slabhead, uma_slab);
|
|
|
|
|
2019-12-25 20:57:24 +00:00
|
|
|
/*
|
|
|
|
* The cache structure pads perfectly into 64 bytes so we use spare
|
|
|
|
* bits from the embedded cache buckets to store information from the zone
|
|
|
|
* and keep all fast-path allocations accessing a single per-cpu line.
|
|
|
|
*/
|
|
|
|
static inline void
|
|
|
|
cache_set_uz_flags(uma_cache_t cache, uint32_t flags)
|
|
|
|
{
|
|
|
|
|
|
|
|
cache->uc_freebucket.ucb_spare = flags;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
cache_set_uz_size(uma_cache_t cache, uint32_t size)
|
|
|
|
{
|
|
|
|
|
|
|
|
cache->uc_allocbucket.ucb_spare = size;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline uint32_t
|
|
|
|
cache_uz_flags(uma_cache_t cache)
|
|
|
|
{
|
|
|
|
|
|
|
|
return (cache->uc_freebucket.ucb_spare);
|
|
|
|
}
|
2020-09-01 21:20:45 +00:00
|
|
|
|
2019-12-25 20:57:24 +00:00
|
|
|
static inline uint32_t
|
|
|
|
cache_uz_size(uma_cache_t cache)
|
|
|
|
{
|
|
|
|
|
|
|
|
return (cache->uc_allocbucket.ucb_spare);
|
|
|
|
}
|
2020-09-01 21:20:45 +00:00
|
|
|
|
2018-01-12 23:25:05 +00:00
|
|
|
/*
|
2019-12-25 20:50:53 +00:00
|
|
|
* Per-domain slab lists. Embedded in the kegs.
|
2018-01-12 23:25:05 +00:00
|
|
|
*/
|
|
|
|
struct uma_domain {
|
2020-01-04 03:30:08 +00:00
|
|
|
struct mtx_padalign ud_lock; /* Lock for the domain lists. */
|
2019-12-08 01:15:06 +00:00
|
|
|
struct slabhead ud_part_slab; /* partially allocated slabs */
|
|
|
|
struct slabhead ud_free_slab; /* completely unallocated slabs */
|
|
|
|
struct slabhead ud_full_slab; /* fully allocated slabs */
|
2020-01-04 03:30:08 +00:00
|
|
|
uint32_t ud_pages; /* Total page count */
|
2020-02-11 20:06:33 +00:00
|
|
|
uint32_t ud_free_items; /* Count of items free in all slabs */
|
|
|
|
uint32_t ud_free_slabs; /* Count of free slabs */
|
2020-01-04 03:15:34 +00:00
|
|
|
} __aligned(CACHE_LINE_SIZE);
|
2018-01-12 23:25:05 +00:00
|
|
|
|
|
|
|
typedef struct uma_domain * uma_domain_t;
|
|
|
|
|
Bring in mbuma to replace mballoc.
mbuma is an Mbuf & Cluster allocator built on top of a number of
extensions to the UMA framework, all included herein.
Extensions to UMA worth noting:
- Better layering between slab <-> zone caches; introduce
Keg structure which splits off slab cache away from the
zone structure and allows multiple zones to be stacked
on top of a single Keg (single type of slab cache);
perhaps we should look into defining a subset API on
top of the Keg for special use by malloc(9),
for example.
- UMA_ZONE_REFCNT zones can now be added, and reference
counters automagically allocated for them within the end
of the associated slab structures. uma_find_refcnt()
does a kextract to fetch the slab struct reference from
the underlying page, and lookup the corresponding refcnt.
mbuma things worth noting:
- integrates mbuf & cluster allocations with extended UMA
and provides caches for commonly-allocated items; defines
several zones (two primary, one secondary) and two kegs.
- change up certain code paths that always used to do:
m_get() + m_clget() to instead just use m_getcl() and
try to take advantage of the newly defined secondary
Packet zone.
- netstat(1) and systat(1) quickly hacked up to do basic
stat reporting but additional stats work needs to be
done once some other details within UMA have been taken
care of and it becomes clearer to how stats will work
within the modified framework.
From the user perspective, one implication is that the
NMBCLUSTERS compile-time option is no longer used. The
maximum number of clusters is still capped off according
to maxusers, but it can be made unlimited by setting
the kern.ipc.nmbclusters boot-time tunable to zero.
Work should be done to write an appropriate sysctl
handler allowing dynamic tuning of kern.ipc.nmbclusters
at runtime.
Additional things worth noting/known issues (READ):
- One report of 'ips' (ServeRAID) driver acting really
slow in conjunction with mbuma. Need more data.
Latest report is that ips is equally sucking with
and without mbuma.
- Giant leak in NFS code sometimes occurs, can't
reproduce but currently analyzing; brueffer is
able to reproduce but THIS IS NOT an mbuma-specific
problem and currently occurs even WITHOUT mbuma.
- Issues in network locking: there is at least one
code path in the rip code where one or more locks
are acquired and we end up in m_prepend() with
M_WAITOK, which causes WITNESS to whine from within
UMA. Current temporary solution: force all UMA
allocations to be M_NOWAIT from within UMA for now
to avoid deadlocks unless WITNESS is defined and we
can determine with certainty that we're not holding
any locks when we're M_WAITOK.
- I've seen at least one weird socketbuffer empty-but-
mbuf-still-attached panic. I don't believe this
to be related to mbuma but please keep your eyes
open, turn on debugging, and capture crash dumps.
This change removes more code than it adds.
A paper is available detailing the change and considering
various performance issues, it was presented at BSDCan2004:
http://www.unixdaemons.com/~bmilekic/netbuf_bmilekic.pdf
Please read the paper for Future Work and implementation
details, as well as credits.
Testing and Debugging:
rwatson,
brueffer,
Ketrien I. Saihr-Kesenchedra,
...
Reviewed by: Lots of people (for different parts)
2004-05-31 21:46:06 +00:00
|
|
|
/*
|
|
|
|
* Keg management structure
|
|
|
|
*
|
|
|
|
* TODO: Optimize for cache line size
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
struct uma_keg {
|
|
|
|
struct uma_hash uk_hash;
|
|
|
|
LIST_HEAD(,uma_zone) uk_zones; /* Keg's zones */
|
|
|
|
|
2018-10-24 16:41:47 +00:00
|
|
|
struct domainset_ref uk_dr; /* Domain selection policy. */
|
2013-04-09 17:43:48 +00:00
|
|
|
uint32_t uk_align; /* Alignment mask */
|
2013-06-26 00:57:38 +00:00
|
|
|
uint32_t uk_reserve; /* Number of reserved items. */
|
2013-04-09 17:43:48 +00:00
|
|
|
uint32_t uk_size; /* Requested size of each item */
|
|
|
|
uint32_t uk_rsize; /* Real size of each item */
|
Bring in mbuma to replace mballoc.
mbuma is an Mbuf & Cluster allocator built on top of a number of
extensions to the UMA framework, all included herein.
Extensions to UMA worth noting:
- Better layering between slab <-> zone caches; introduce
Keg structure which splits off slab cache away from the
zone structure and allows multiple zones to be stacked
on top of a single Keg (single type of slab cache);
perhaps we should look into defining a subset API on
top of the Keg for special use by malloc(9),
for example.
- UMA_ZONE_REFCNT zones can now be added, and reference
counters automagically allocated for them within the end
of the associated slab structures. uma_find_refcnt()
does a kextract to fetch the slab struct reference from
the underlying page, and lookup the corresponding refcnt.
mbuma things worth noting:
- integrates mbuf & cluster allocations with extended UMA
and provides caches for commonly-allocated items; defines
several zones (two primary, one secondary) and two kegs.
- change up certain code paths that always used to do:
m_get() + m_clget() to instead just use m_getcl() and
try to take advantage of the newly defined secondary
Packet zone.
- netstat(1) and systat(1) quickly hacked up to do basic
stat reporting but additional stats work needs to be
done once some other details within UMA have been taken
care of and it becomes clearer to how stats will work
within the modified framework.
From the user perspective, one implication is that the
NMBCLUSTERS compile-time option is no longer used. The
maximum number of clusters is still capped off according
to maxusers, but it can be made unlimited by setting
the kern.ipc.nmbclusters boot-time tunable to zero.
Work should be done to write an appropriate sysctl
handler allowing dynamic tuning of kern.ipc.nmbclusters
at runtime.
Additional things worth noting/known issues (READ):
- One report of 'ips' (ServeRAID) driver acting really
slow in conjunction with mbuma. Need more data.
Latest report is that ips is equally sucking with
and without mbuma.
- Giant leak in NFS code sometimes occurs, can't
reproduce but currently analyzing; brueffer is
able to reproduce but THIS IS NOT an mbuma-specific
problem and currently occurs even WITHOUT mbuma.
- Issues in network locking: there is at least one
code path in the rip code where one or more locks
are acquired and we end up in m_prepend() with
M_WAITOK, which causes WITNESS to whine from within
UMA. Current temporary solution: force all UMA
allocations to be M_NOWAIT from within UMA for now
to avoid deadlocks unless WITNESS is defined and we
can determine with certainty that we're not holding
any locks when we're M_WAITOK.
- I've seen at least one weird socketbuffer empty-but-
mbuf-still-attached panic. I don't believe this
to be related to mbuma but please keep your eyes
open, turn on debugging, and capture crash dumps.
This change removes more code than it adds.
A paper is available detailing the change and considering
various performance issues, it was presented at BSDCan2004:
http://www.unixdaemons.com/~bmilekic/netbuf_bmilekic.pdf
Please read the paper for Future Work and implementation
details, as well as credits.
Testing and Debugging:
rwatson,
brueffer,
Ketrien I. Saihr-Kesenchedra,
...
Reviewed by: Lots of people (for different parts)
2004-05-31 21:46:06 +00:00
|
|
|
|
|
|
|
uma_init uk_init; /* Keg's init routine */
|
|
|
|
uma_fini uk_fini; /* Keg's fini routine */
|
|
|
|
uma_alloc uk_allocf; /* Allocation function */
|
|
|
|
uma_free uk_freef; /* Free routine */
|
|
|
|
|
2013-02-26 23:35:27 +00:00
|
|
|
u_long uk_offset; /* Next free offset from base KVA */
|
|
|
|
vm_offset_t uk_kva; /* Zone base KVA */
|
Bring in mbuma to replace mballoc.
mbuma is an Mbuf & Cluster allocator built on top of a number of
extensions to the UMA framework, all included herein.
Extensions to UMA worth noting:
- Better layering between slab <-> zone caches; introduce
Keg structure which splits off slab cache away from the
zone structure and allows multiple zones to be stacked
on top of a single Keg (single type of slab cache);
perhaps we should look into defining a subset API on
top of the Keg for special use by malloc(9),
for example.
- UMA_ZONE_REFCNT zones can now be added, and reference
counters automagically allocated for them within the end
of the associated slab structures. uma_find_refcnt()
does a kextract to fetch the slab struct reference from
the underlying page, and lookup the corresponding refcnt.
mbuma things worth noting:
- integrates mbuf & cluster allocations with extended UMA
and provides caches for commonly-allocated items; defines
several zones (two primary, one secondary) and two kegs.
- change up certain code paths that always used to do:
m_get() + m_clget() to instead just use m_getcl() and
try to take advantage of the newly defined secondary
Packet zone.
- netstat(1) and systat(1) quickly hacked up to do basic
stat reporting but additional stats work needs to be
done once some other details within UMA have been taken
care of and it becomes clearer to how stats will work
within the modified framework.
From the user perspective, one implication is that the
NMBCLUSTERS compile-time option is no longer used. The
maximum number of clusters is still capped off according
to maxusers, but it can be made unlimited by setting
the kern.ipc.nmbclusters boot-time tunable to zero.
Work should be done to write an appropriate sysctl
handler allowing dynamic tuning of kern.ipc.nmbclusters
at runtime.
Additional things worth noting/known issues (READ):
- One report of 'ips' (ServeRAID) driver acting really
slow in conjunction with mbuma. Need more data.
Latest report is that ips is equally sucking with
and without mbuma.
- Giant leak in NFS code sometimes occurs, can't
reproduce but currently analyzing; brueffer is
able to reproduce but THIS IS NOT an mbuma-specific
problem and currently occurs even WITHOUT mbuma.
- Issues in network locking: there is at least one
code path in the rip code where one or more locks
are acquired and we end up in m_prepend() with
M_WAITOK, which causes WITNESS to whine from within
UMA. Current temporary solution: force all UMA
allocations to be M_NOWAIT from within UMA for now
to avoid deadlocks unless WITNESS is defined and we
can determine with certainty that we're not holding
any locks when we're M_WAITOK.
- I've seen at least one weird socketbuffer empty-but-
mbuf-still-attached panic. I don't believe this
to be related to mbuma but please keep your eyes
open, turn on debugging, and capture crash dumps.
This change removes more code than it adds.
A paper is available detailing the change and considering
various performance issues, it was presented at BSDCan2004:
http://www.unixdaemons.com/~bmilekic/netbuf_bmilekic.pdf
Please read the paper for Future Work and implementation
details, as well as credits.
Testing and Debugging:
rwatson,
brueffer,
Ketrien I. Saihr-Kesenchedra,
...
Reviewed by: Lots of people (for different parts)
2004-05-31 21:46:06 +00:00
|
|
|
|
2017-09-13 21:54:37 +00:00
|
|
|
uint32_t uk_pgoff; /* Offset to uma_slab struct */
|
2013-04-09 17:43:48 +00:00
|
|
|
uint16_t uk_ppera; /* pages per allocation from backend */
|
|
|
|
uint16_t uk_ipers; /* Items per slab */
|
|
|
|
uint32_t uk_flags; /* Internal flags */
|
2013-04-08 19:10:45 +00:00
|
|
|
|
|
|
|
/* Least used fields go to the last cache line. */
|
|
|
|
const char *uk_name; /* Name of creating zone. */
|
|
|
|
LIST_ENTRY(uma_keg) uk_link; /* List of all kegs */
|
2018-01-12 23:25:05 +00:00
|
|
|
|
|
|
|
/* Must be last, variable sized. */
|
|
|
|
struct uma_domain uk_domain[]; /* Keg's slab lists. */
|
Bring in mbuma to replace mballoc.
mbuma is an Mbuf & Cluster allocator built on top of a number of
extensions to the UMA framework, all included herein.
Extensions to UMA worth noting:
- Better layering between slab <-> zone caches; introduce
Keg structure which splits off slab cache away from the
zone structure and allows multiple zones to be stacked
on top of a single Keg (single type of slab cache);
perhaps we should look into defining a subset API on
top of the Keg for special use by malloc(9),
for example.
- UMA_ZONE_REFCNT zones can now be added, and reference
counters automagically allocated for them within the end
of the associated slab structures. uma_find_refcnt()
does a kextract to fetch the slab struct reference from
the underlying page, and lookup the corresponding refcnt.
mbuma things worth noting:
- integrates mbuf & cluster allocations with extended UMA
and provides caches for commonly-allocated items; defines
several zones (two primary, one secondary) and two kegs.
- change up certain code paths that always used to do:
m_get() + m_clget() to instead just use m_getcl() and
try to take advantage of the newly defined secondary
Packet zone.
- netstat(1) and systat(1) quickly hacked up to do basic
stat reporting but additional stats work needs to be
done once some other details within UMA have been taken
care of and it becomes clearer to how stats will work
within the modified framework.
From the user perspective, one implication is that the
NMBCLUSTERS compile-time option is no longer used. The
maximum number of clusters is still capped off according
to maxusers, but it can be made unlimited by setting
the kern.ipc.nmbclusters boot-time tunable to zero.
Work should be done to write an appropriate sysctl
handler allowing dynamic tuning of kern.ipc.nmbclusters
at runtime.
Additional things worth noting/known issues (READ):
- One report of 'ips' (ServeRAID) driver acting really
slow in conjunction with mbuma. Need more data.
Latest report is that ips is equally sucking with
and without mbuma.
- Giant leak in NFS code sometimes occurs, can't
reproduce but currently analyzing; brueffer is
able to reproduce but THIS IS NOT an mbuma-specific
problem and currently occurs even WITHOUT mbuma.
- Issues in network locking: there is at least one
code path in the rip code where one or more locks
are acquired and we end up in m_prepend() with
M_WAITOK, which causes WITNESS to whine from within
UMA. Current temporary solution: force all UMA
allocations to be M_NOWAIT from within UMA for now
to avoid deadlocks unless WITNESS is defined and we
can determine with certainty that we're not holding
any locks when we're M_WAITOK.
- I've seen at least one weird socketbuffer empty-but-
mbuf-still-attached panic. I don't believe this
to be related to mbuma but please keep your eyes
open, turn on debugging, and capture crash dumps.
This change removes more code than it adds.
A paper is available detailing the change and considering
various performance issues, it was presented at BSDCan2004:
http://www.unixdaemons.com/~bmilekic/netbuf_bmilekic.pdf
Please read the paper for Future Work and implementation
details, as well as credits.
Testing and Debugging:
rwatson,
brueffer,
Ketrien I. Saihr-Kesenchedra,
...
Reviewed by: Lots of people (for different parts)
2004-05-31 21:46:06 +00:00
|
|
|
};
|
2009-01-25 09:11:24 +00:00
|
|
|
typedef struct uma_keg * uma_keg_t;
|
Bring in mbuma to replace mballoc.
mbuma is an Mbuf & Cluster allocator built on top of a number of
extensions to the UMA framework, all included herein.
Extensions to UMA worth noting:
- Better layering between slab <-> zone caches; introduce
Keg structure which splits off slab cache away from the
zone structure and allows multiple zones to be stacked
on top of a single Keg (single type of slab cache);
perhaps we should look into defining a subset API on
top of the Keg for special use by malloc(9),
for example.
- UMA_ZONE_REFCNT zones can now be added, and reference
counters automagically allocated for them within the end
of the associated slab structures. uma_find_refcnt()
does a kextract to fetch the slab struct reference from
the underlying page, and lookup the corresponding refcnt.
mbuma things worth noting:
- integrates mbuf & cluster allocations with extended UMA
and provides caches for commonly-allocated items; defines
several zones (two primary, one secondary) and two kegs.
- change up certain code paths that always used to do:
m_get() + m_clget() to instead just use m_getcl() and
try to take advantage of the newly defined secondary
Packet zone.
- netstat(1) and systat(1) quickly hacked up to do basic
stat reporting but additional stats work needs to be
done once some other details within UMA have been taken
care of and it becomes clearer to how stats will work
within the modified framework.
From the user perspective, one implication is that the
NMBCLUSTERS compile-time option is no longer used. The
maximum number of clusters is still capped off according
to maxusers, but it can be made unlimited by setting
the kern.ipc.nmbclusters boot-time tunable to zero.
Work should be done to write an appropriate sysctl
handler allowing dynamic tuning of kern.ipc.nmbclusters
at runtime.
Additional things worth noting/known issues (READ):
- One report of 'ips' (ServeRAID) driver acting really
slow in conjunction with mbuma. Need more data.
Latest report is that ips is equally sucking with
and without mbuma.
- Giant leak in NFS code sometimes occurs, can't
reproduce but currently analyzing; brueffer is
able to reproduce but THIS IS NOT an mbuma-specific
problem and currently occurs even WITHOUT mbuma.
- Issues in network locking: there is at least one
code path in the rip code where one or more locks
are acquired and we end up in m_prepend() with
M_WAITOK, which causes WITNESS to whine from within
UMA. Current temporary solution: force all UMA
allocations to be M_NOWAIT from within UMA for now
to avoid deadlocks unless WITNESS is defined and we
can determine with certainty that we're not holding
any locks when we're M_WAITOK.
- I've seen at least one weird socketbuffer empty-but-
mbuf-still-attached panic. I don't believe this
to be related to mbuma but please keep your eyes
open, turn on debugging, and capture crash dumps.
This change removes more code than it adds.
A paper is available detailing the change and considering
various performance issues, it was presented at BSDCan2004:
http://www.unixdaemons.com/~bmilekic/netbuf_bmilekic.pdf
Please read the paper for Future Work and implementation
details, as well as credits.
Testing and Debugging:
rwatson,
brueffer,
Ketrien I. Saihr-Kesenchedra,
...
Reviewed by: Lots of people (for different parts)
2004-05-31 21:46:06 +00:00
|
|
|
|
2013-06-13 21:05:38 +00:00
|
|
|
/*
|
|
|
|
* Free bits per-slab.
|
|
|
|
*/
|
2019-12-02 22:44:34 +00:00
|
|
|
#define SLAB_MAX_SETSIZE (PAGE_SIZE / UMA_SMALLEST_UNIT)
|
|
|
|
#define SLAB_MIN_SETSIZE _BITSET_BITS
|
|
|
|
BITSET_DEFINE(noslabbits, 0);
|
Bring in mbuma to replace mballoc.
mbuma is an Mbuf & Cluster allocator built on top of a number of
extensions to the UMA framework, all included herein.
Extensions to UMA worth noting:
- Better layering between slab <-> zone caches; introduce
Keg structure which splits off slab cache away from the
zone structure and allows multiple zones to be stacked
on top of a single Keg (single type of slab cache);
perhaps we should look into defining a subset API on
top of the Keg for special use by malloc(9),
for example.
- UMA_ZONE_REFCNT zones can now be added, and reference
counters automagically allocated for them within the end
of the associated slab structures. uma_find_refcnt()
does a kextract to fetch the slab struct reference from
the underlying page, and lookup the corresponding refcnt.
mbuma things worth noting:
- integrates mbuf & cluster allocations with extended UMA
and provides caches for commonly-allocated items; defines
several zones (two primary, one secondary) and two kegs.
- change up certain code paths that always used to do:
m_get() + m_clget() to instead just use m_getcl() and
try to take advantage of the newly defined secondary
Packet zone.
- netstat(1) and systat(1) quickly hacked up to do basic
stat reporting but additional stats work needs to be
done once some other details within UMA have been taken
care of and it becomes clearer to how stats will work
within the modified framework.
From the user perspective, one implication is that the
NMBCLUSTERS compile-time option is no longer used. The
maximum number of clusters is still capped off according
to maxusers, but it can be made unlimited by setting
the kern.ipc.nmbclusters boot-time tunable to zero.
Work should be done to write an appropriate sysctl
handler allowing dynamic tuning of kern.ipc.nmbclusters
at runtime.
Additional things worth noting/known issues (READ):
- One report of 'ips' (ServeRAID) driver acting really
slow in conjunction with mbuma. Need more data.
Latest report is that ips is equally sucking with
and without mbuma.
- Giant leak in NFS code sometimes occurs, can't
reproduce but currently analyzing; brueffer is
able to reproduce but THIS IS NOT an mbuma-specific
problem and currently occurs even WITHOUT mbuma.
- Issues in network locking: there is at least one
code path in the rip code where one or more locks
are acquired and we end up in m_prepend() with
M_WAITOK, which causes WITNESS to whine from within
UMA. Current temporary solution: force all UMA
allocations to be M_NOWAIT from within UMA for now
to avoid deadlocks unless WITNESS is defined and we
can determine with certainty that we're not holding
any locks when we're M_WAITOK.
- I've seen at least one weird socketbuffer empty-but-
mbuf-still-attached panic. I don't believe this
to be related to mbuma but please keep your eyes
open, turn on debugging, and capture crash dumps.
This change removes more code than it adds.
A paper is available detailing the change and considering
various performance issues, it was presented at BSDCan2004:
http://www.unixdaemons.com/~bmilekic/netbuf_bmilekic.pdf
Please read the paper for Future Work and implementation
details, as well as credits.
Testing and Debugging:
rwatson,
brueffer,
Ketrien I. Saihr-Kesenchedra,
...
Reviewed by: Lots of people (for different parts)
2004-05-31 21:46:06 +00:00
|
|
|
|
2013-06-13 21:05:38 +00:00
|
|
|
/*
|
|
|
|
* The slab structure manages a single contiguous allocation from backing
|
|
|
|
* store and subdivides it into individually allocatable items.
|
|
|
|
*/
|
|
|
|
struct uma_slab {
|
2019-11-29 03:14:10 +00:00
|
|
|
LIST_ENTRY(uma_slab) us_link; /* slabs in zone */
|
2013-04-09 17:43:48 +00:00
|
|
|
uint16_t us_freecount; /* How many are free? */
|
|
|
|
uint8_t us_flags; /* Page flags see uma.h */
|
2018-01-12 23:25:05 +00:00
|
|
|
uint8_t us_domain; /* Backing NUMA domain. */
|
2019-12-14 05:21:56 +00:00
|
|
|
struct noslabbits us_free; /* Free bitmask, flexible. */
|
Bring in mbuma to replace mballoc.
mbuma is an Mbuf & Cluster allocator built on top of a number of
extensions to the UMA framework, all included herein.
Extensions to UMA worth noting:
- Better layering between slab <-> zone caches; introduce
Keg structure which splits off slab cache away from the
zone structure and allows multiple zones to be stacked
on top of a single Keg (single type of slab cache);
perhaps we should look into defining a subset API on
top of the Keg for special use by malloc(9),
for example.
- UMA_ZONE_REFCNT zones can now be added, and reference
counters automagically allocated for them within the end
of the associated slab structures. uma_find_refcnt()
does a kextract to fetch the slab struct reference from
the underlying page, and lookup the corresponding refcnt.
mbuma things worth noting:
- integrates mbuf & cluster allocations with extended UMA
and provides caches for commonly-allocated items; defines
several zones (two primary, one secondary) and two kegs.
- change up certain code paths that always used to do:
m_get() + m_clget() to instead just use m_getcl() and
try to take advantage of the newly defined secondary
Packet zone.
- netstat(1) and systat(1) quickly hacked up to do basic
stat reporting but additional stats work needs to be
done once some other details within UMA have been taken
care of and it becomes clearer to how stats will work
within the modified framework.
From the user perspective, one implication is that the
NMBCLUSTERS compile-time option is no longer used. The
maximum number of clusters is still capped off according
to maxusers, but it can be made unlimited by setting
the kern.ipc.nmbclusters boot-time tunable to zero.
Work should be done to write an appropriate sysctl
handler allowing dynamic tuning of kern.ipc.nmbclusters
at runtime.
Additional things worth noting/known issues (READ):
- One report of 'ips' (ServeRAID) driver acting really
slow in conjunction with mbuma. Need more data.
Latest report is that ips is equally sucking with
and without mbuma.
- Giant leak in NFS code sometimes occurs, can't
reproduce but currently analyzing; brueffer is
able to reproduce but THIS IS NOT an mbuma-specific
problem and currently occurs even WITHOUT mbuma.
- Issues in network locking: there is at least one
code path in the rip code where one or more locks
are acquired and we end up in m_prepend() with
M_WAITOK, which causes WITNESS to whine from within
UMA. Current temporary solution: force all UMA
allocations to be M_NOWAIT from within UMA for now
to avoid deadlocks unless WITNESS is defined and we
can determine with certainty that we're not holding
any locks when we're M_WAITOK.
- I've seen at least one weird socketbuffer empty-but-
mbuf-still-attached panic. I don't believe this
to be related to mbuma but please keep your eyes
open, turn on debugging, and capture crash dumps.
This change removes more code than it adds.
A paper is available detailing the change and considering
various performance issues, it was presented at BSDCan2004:
http://www.unixdaemons.com/~bmilekic/netbuf_bmilekic.pdf
Please read the paper for Future Work and implementation
details, as well as credits.
Testing and Debugging:
rwatson,
brueffer,
Ketrien I. Saihr-Kesenchedra,
...
Reviewed by: Lots of people (for different parts)
2004-05-31 21:46:06 +00:00
|
|
|
};
|
2020-03-07 15:37:23 +00:00
|
|
|
_Static_assert(sizeof(struct uma_slab) == __offsetof(struct uma_slab, us_free),
|
2019-12-14 05:21:56 +00:00
|
|
|
"us_free field must be last");
|
2020-03-07 15:37:23 +00:00
|
|
|
_Static_assert(MAXMEMDOM < 255,
|
|
|
|
"us_domain field is not wide enough");
|
2018-01-12 23:25:05 +00:00
|
|
|
|
Bring in mbuma to replace mballoc.
mbuma is an Mbuf & Cluster allocator built on top of a number of
extensions to the UMA framework, all included herein.
Extensions to UMA worth noting:
- Better layering between slab <-> zone caches; introduce
Keg structure which splits off slab cache away from the
zone structure and allows multiple zones to be stacked
on top of a single Keg (single type of slab cache);
perhaps we should look into defining a subset API on
top of the Keg for special use by malloc(9),
for example.
- UMA_ZONE_REFCNT zones can now be added, and reference
counters automagically allocated for them within the end
of the associated slab structures. uma_find_refcnt()
does a kextract to fetch the slab struct reference from
the underlying page, and lookup the corresponding refcnt.
mbuma things worth noting:
- integrates mbuf & cluster allocations with extended UMA
and provides caches for commonly-allocated items; defines
several zones (two primary, one secondary) and two kegs.
- change up certain code paths that always used to do:
m_get() + m_clget() to instead just use m_getcl() and
try to take advantage of the newly defined secondary
Packet zone.
- netstat(1) and systat(1) quickly hacked up to do basic
stat reporting but additional stats work needs to be
done once some other details within UMA have been taken
care of and it becomes clearer to how stats will work
within the modified framework.
From the user perspective, one implication is that the
NMBCLUSTERS compile-time option is no longer used. The
maximum number of clusters is still capped off according
to maxusers, but it can be made unlimited by setting
the kern.ipc.nmbclusters boot-time tunable to zero.
Work should be done to write an appropriate sysctl
handler allowing dynamic tuning of kern.ipc.nmbclusters
at runtime.
Additional things worth noting/known issues (READ):
- One report of 'ips' (ServeRAID) driver acting really
slow in conjunction with mbuma. Need more data.
Latest report is that ips is equally sucking with
and without mbuma.
- Giant leak in NFS code sometimes occurs, can't
reproduce but currently analyzing; brueffer is
able to reproduce but THIS IS NOT an mbuma-specific
problem and currently occurs even WITHOUT mbuma.
- Issues in network locking: there is at least one
code path in the rip code where one or more locks
are acquired and we end up in m_prepend() with
M_WAITOK, which causes WITNESS to whine from within
UMA. Current temporary solution: force all UMA
allocations to be M_NOWAIT from within UMA for now
to avoid deadlocks unless WITNESS is defined and we
can determine with certainty that we're not holding
any locks when we're M_WAITOK.
- I've seen at least one weird socketbuffer empty-but-
mbuf-still-attached panic. I don't believe this
to be related to mbuma but please keep your eyes
open, turn on debugging, and capture crash dumps.
This change removes more code than it adds.
A paper is available detailing the change and considering
various performance issues, it was presented at BSDCan2004:
http://www.unixdaemons.com/~bmilekic/netbuf_bmilekic.pdf
Please read the paper for Future Work and implementation
details, as well as credits.
Testing and Debugging:
rwatson,
brueffer,
Ketrien I. Saihr-Kesenchedra,
...
Reviewed by: Lots of people (for different parts)
2004-05-31 21:46:06 +00:00
|
|
|
typedef struct uma_slab * uma_slab_t;
|
2009-01-25 09:11:24 +00:00
|
|
|
|
2019-12-08 01:15:06 +00:00
|
|
|
/*
|
|
|
|
* Slab structure with a full sized bitset and hash link for both
|
|
|
|
* HASH and OFFPAGE zones.
|
|
|
|
*/
|
|
|
|
struct uma_hash_slab {
|
|
|
|
LIST_ENTRY(uma_hash_slab) uhs_hlink; /* Link for hash table */
|
|
|
|
uint8_t *uhs_data; /* First item */
|
2020-01-14 02:14:15 +00:00
|
|
|
struct uma_slab uhs_slab; /* Must be last. */
|
2019-12-08 01:15:06 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
typedef struct uma_hash_slab * uma_hash_slab_t;
|
|
|
|
|
2020-01-14 02:14:15 +00:00
|
|
|
static inline uma_hash_slab_t
|
|
|
|
slab_tohashslab(uma_slab_t slab)
|
|
|
|
{
|
|
|
|
|
|
|
|
return (__containerof(slab, struct uma_hash_slab, uhs_slab));
|
|
|
|
}
|
|
|
|
|
2019-12-08 01:15:06 +00:00
|
|
|
static inline void *
|
|
|
|
slab_data(uma_slab_t slab, uma_keg_t keg)
|
|
|
|
{
|
|
|
|
|
2020-01-09 02:03:03 +00:00
|
|
|
if ((keg->uk_flags & UMA_ZFLAG_OFFPAGE) == 0)
|
2019-12-08 01:15:06 +00:00
|
|
|
return ((void *)((uintptr_t)slab - keg->uk_pgoff));
|
|
|
|
else
|
2020-01-14 02:14:15 +00:00
|
|
|
return (slab_tohashslab(slab)->uhs_data);
|
2019-12-08 01:15:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void *
|
|
|
|
slab_item(uma_slab_t slab, uma_keg_t keg, int index)
|
|
|
|
{
|
|
|
|
uintptr_t data;
|
|
|
|
|
|
|
|
data = (uintptr_t)slab_data(slab, keg);
|
|
|
|
return ((void *)(data + keg->uk_rsize * index));
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int
|
|
|
|
slab_item_index(uma_slab_t slab, uma_keg_t keg, void *item)
|
|
|
|
{
|
|
|
|
uintptr_t data;
|
|
|
|
|
|
|
|
data = (uintptr_t)slab_data(slab, keg);
|
|
|
|
return (((uintptr_t)item - data) / keg->uk_rsize);
|
|
|
|
}
|
|
|
|
|
2020-02-04 02:41:24 +00:00
|
|
|
STAILQ_HEAD(uma_bucketlist, uma_bucket);
|
2019-09-01 22:22:43 +00:00
|
|
|
|
2018-01-12 23:25:05 +00:00
|
|
|
struct uma_zone_domain {
|
2019-09-01 22:22:43 +00:00
|
|
|
struct uma_bucketlist uzd_buckets; /* full buckets */
|
2020-01-04 07:56:28 +00:00
|
|
|
uma_bucket_t uzd_cross; /* Fills from cross buckets. */
|
2018-11-13 19:44:40 +00:00
|
|
|
long uzd_nitems; /* total item count */
|
|
|
|
long uzd_imax; /* maximum item count this period */
|
|
|
|
long uzd_imin; /* minimum item count this period */
|
Improve UMA cache reclamation.
When estimating working set size, measure only allocation batches, not free
batches. Allocation and free patterns can be very different. For example,
ZFS on vm_lowmem event can free to UMA few gigabytes of memory in one call,
but it does not mean it will request the same amount back that fast too, in
fact it won't.
Update working set size on every reclamation call, shrinking caches faster
under pressure. Lack of this caused repeating vm_lowmem events squeezing
more and more memory out of real consumers only to make it stuck in UMA
caches. I saw ZFS drop ARC size in half before previous algorithm after
periodic WSS update decided to reclaim UMA caches.
Introduce voluntary reclamation of UMA caches not used for a long time. For
each zdom track longterm minimal cache size watermark, freeing some unused
items every UMA_TIMEOUT after first 15 minutes without cache misses. Freed
memory can get better use by other consumers. For example, ZFS won't grow
its ARC unless it see free memory, since it does not know it is not really
used. And even if memory is not really needed, periodic free during
inactivity periods should reduce its fragmentation.
Reviewed by: markj, jeff (previous version)
MFC after: 2 weeks
Sponsored by: iXsystems, Inc.
Differential Revision: https://reviews.freebsd.org/D29790
2021-05-02 23:35:28 +00:00
|
|
|
long uzd_bimin; /* Minimum item count this batch. */
|
2018-11-13 19:44:40 +00:00
|
|
|
long uzd_wss; /* working set size estimate */
|
Improve UMA cache reclamation.
When estimating working set size, measure only allocation batches, not free
batches. Allocation and free patterns can be very different. For example,
ZFS on vm_lowmem event can free to UMA few gigabytes of memory in one call,
but it does not mean it will request the same amount back that fast too, in
fact it won't.
Update working set size on every reclamation call, shrinking caches faster
under pressure. Lack of this caused repeating vm_lowmem events squeezing
more and more memory out of real consumers only to make it stuck in UMA
caches. I saw ZFS drop ARC size in half before previous algorithm after
periodic WSS update decided to reclaim UMA caches.
Introduce voluntary reclamation of UMA caches not used for a long time. For
each zdom track longterm minimal cache size watermark, freeing some unused
items every UMA_TIMEOUT after first 15 minutes without cache misses. Freed
memory can get better use by other consumers. For example, ZFS won't grow
its ARC unless it see free memory, since it does not know it is not really
used. And even if memory is not really needed, periodic free during
inactivity periods should reduce its fragmentation.
Reviewed by: markj, jeff (previous version)
MFC after: 2 weeks
Sponsored by: iXsystems, Inc.
Differential Revision: https://reviews.freebsd.org/D29790
2021-05-02 23:35:28 +00:00
|
|
|
long uzd_limin; /* Longtime minimum item count. */
|
|
|
|
u_int uzd_timin; /* Time since uzd_limin == 0. */
|
2020-02-19 18:48:46 +00:00
|
|
|
smr_seq_t uzd_seq; /* Lowest queued seq. */
|
|
|
|
struct mtx uzd_lock; /* Lock for the domain */
|
2020-01-04 03:15:34 +00:00
|
|
|
} __aligned(CACHE_LINE_SIZE);
|
2018-01-12 23:25:05 +00:00
|
|
|
|
|
|
|
typedef struct uma_zone_domain * uma_zone_domain_t;
|
|
|
|
|
2002-03-19 09:11:49 +00:00
|
|
|
/*
|
2020-01-04 03:04:46 +00:00
|
|
|
* Zone structure - per memory type.
|
2002-03-19 09:11:49 +00:00
|
|
|
*/
|
|
|
|
struct uma_zone {
|
2018-06-23 08:10:09 +00:00
|
|
|
/* Offset 0, used in alloc/free fast/medium fast path and const. */
|
|
|
|
uint32_t uz_flags; /* Flags inherited from kegs */
|
|
|
|
uint32_t uz_size; /* Size inherited from kegs */
|
2002-03-19 09:11:49 +00:00
|
|
|
uma_ctor uz_ctor; /* Constructor for each allocation */
|
|
|
|
uma_dtor uz_dtor; /* Destructor */
|
2020-01-31 00:49:51 +00:00
|
|
|
smr_t uz_smr; /* Safe memory reclaim context. */
|
o Move zone limit from keg level up to zone level. This means that now
two zones sharing a keg may have different limits. Now this is going
to work:
zone = uma_zcreate();
uma_zone_set_max(zone, limit);
zone2 = uma_zsecond_create(zone);
uma_zone_set_max(zone2, limit2);
Kegs no longer have uk_maxpages field, but zones have uz_items. When
set, it may be rounded up to minimum possible CPU bucket cache size.
For small limits bucket cache can also be reconfigured to be smaller.
Counter uz_items is updated whenever items transition from keg to a
bucket cache or directly to a consumer. If zone has uz_maxitems set and
it is reached, then we are going to sleep.
o Since new limits don't play well with multi-keg zones, remove them. The
idea of multi-keg zones was introduced exactly 10 years ago, and never
have had a practical usage. In discussion with Jeff we came to a wild
agreement that if we ever want to reintroduce the idea of a smart allocator
that would be able to choose between two (or more) totally different
backing stores, that choice should be made one level higher than UMA,
e.g. in malloc(9) or in mget(), or whatever and choice should be controlled
by the caller.
o Sleeping code is improved to account number of sleepers and wake them one
by one, to avoid thundering herd problem.
o Flag UMA_ZONE_NOBUCKETCACHE removed, instead uma_zone_set_maxcache()
KPI added. Having no bucket cache basically means setting maxcache to 0.
o Now with many fields added and many removed (no multi-keg zones!) make
sure that struct uma_zone is perfectly aligned.
Reviewed by: markj, jeff
Tested by: pho
Differential Revision: https://reviews.freebsd.org/D17773
2019-01-15 00:02:06 +00:00
|
|
|
uint64_t uz_max_items; /* Maximum number of items to alloc */
|
2020-02-19 18:48:46 +00:00
|
|
|
uint64_t uz_bucket_max; /* Maximum bucket cache size */
|
2019-11-28 00:19:09 +00:00
|
|
|
uint16_t uz_bucket_size; /* Number of items in full bucket */
|
|
|
|
uint16_t uz_bucket_size_max; /* Maximum number of bucket items */
|
2020-02-19 18:48:46 +00:00
|
|
|
uint32_t uz_sleepers; /* Threads sleeping on limit */
|
|
|
|
counter_u64_t uz_xdomain; /* Total number of cross-domain frees */
|
2018-06-23 08:10:09 +00:00
|
|
|
|
|
|
|
/* Offset 64, used in bucket replenish. */
|
2020-02-19 18:48:46 +00:00
|
|
|
uma_keg_t uz_keg; /* This zone's keg if !CACHE */
|
2013-06-17 03:43:47 +00:00
|
|
|
uma_import uz_import; /* Import new memory to cache. */
|
|
|
|
uma_release uz_release; /* Release memory from cache. */
|
|
|
|
void *uz_arg; /* Import/release argument. */
|
o Move zone limit from keg level up to zone level. This means that now
two zones sharing a keg may have different limits. Now this is going
to work:
zone = uma_zcreate();
uma_zone_set_max(zone, limit);
zone2 = uma_zsecond_create(zone);
uma_zone_set_max(zone2, limit2);
Kegs no longer have uk_maxpages field, but zones have uz_items. When
set, it may be rounded up to minimum possible CPU bucket cache size.
For small limits bucket cache can also be reconfigured to be smaller.
Counter uz_items is updated whenever items transition from keg to a
bucket cache or directly to a consumer. If zone has uz_maxitems set and
it is reached, then we are going to sleep.
o Since new limits don't play well with multi-keg zones, remove them. The
idea of multi-keg zones was introduced exactly 10 years ago, and never
have had a practical usage. In discussion with Jeff we came to a wild
agreement that if we ever want to reintroduce the idea of a smart allocator
that would be able to choose between two (or more) totally different
backing stores, that choice should be made one level higher than UMA,
e.g. in malloc(9) or in mget(), or whatever and choice should be controlled
by the caller.
o Sleeping code is improved to account number of sleepers and wake them one
by one, to avoid thundering herd problem.
o Flag UMA_ZONE_NOBUCKETCACHE removed, instead uma_zone_set_maxcache()
KPI added. Having no bucket cache basically means setting maxcache to 0.
o Now with many fields added and many removed (no multi-keg zones!) make
sure that struct uma_zone is perfectly aligned.
Reviewed by: markj, jeff
Tested by: pho
Differential Revision: https://reviews.freebsd.org/D17773
2019-01-15 00:02:06 +00:00
|
|
|
uma_init uz_init; /* Initializer for each item */
|
|
|
|
uma_fini uz_fini; /* Finalizer for each item. */
|
2020-02-19 18:48:46 +00:00
|
|
|
volatile uint64_t uz_items; /* Total items count & sleepers */
|
|
|
|
uint64_t uz_sleeps; /* Total number of alloc sleeps */
|
Bring in mbuma to replace mballoc.
mbuma is an Mbuf & Cluster allocator built on top of a number of
extensions to the UMA framework, all included herein.
Extensions to UMA worth noting:
- Better layering between slab <-> zone caches; introduce
Keg structure which splits off slab cache away from the
zone structure and allows multiple zones to be stacked
on top of a single Keg (single type of slab cache);
perhaps we should look into defining a subset API on
top of the Keg for special use by malloc(9),
for example.
- UMA_ZONE_REFCNT zones can now be added, and reference
counters automagically allocated for them within the end
of the associated slab structures. uma_find_refcnt()
does a kextract to fetch the slab struct reference from
the underlying page, and lookup the corresponding refcnt.
mbuma things worth noting:
- integrates mbuf & cluster allocations with extended UMA
and provides caches for commonly-allocated items; defines
several zones (two primary, one secondary) and two kegs.
- change up certain code paths that always used to do:
m_get() + m_clget() to instead just use m_getcl() and
try to take advantage of the newly defined secondary
Packet zone.
- netstat(1) and systat(1) quickly hacked up to do basic
stat reporting but additional stats work needs to be
done once some other details within UMA have been taken
care of and it becomes clearer to how stats will work
within the modified framework.
From the user perspective, one implication is that the
NMBCLUSTERS compile-time option is no longer used. The
maximum number of clusters is still capped off according
to maxusers, but it can be made unlimited by setting
the kern.ipc.nmbclusters boot-time tunable to zero.
Work should be done to write an appropriate sysctl
handler allowing dynamic tuning of kern.ipc.nmbclusters
at runtime.
Additional things worth noting/known issues (READ):
- One report of 'ips' (ServeRAID) driver acting really
slow in conjunction with mbuma. Need more data.
Latest report is that ips is equally sucking with
and without mbuma.
- Giant leak in NFS code sometimes occurs, can't
reproduce but currently analyzing; brueffer is
able to reproduce but THIS IS NOT an mbuma-specific
problem and currently occurs even WITHOUT mbuma.
- Issues in network locking: there is at least one
code path in the rip code where one or more locks
are acquired and we end up in m_prepend() with
M_WAITOK, which causes WITNESS to whine from within
UMA. Current temporary solution: force all UMA
allocations to be M_NOWAIT from within UMA for now
to avoid deadlocks unless WITNESS is defined and we
can determine with certainty that we're not holding
any locks when we're M_WAITOK.
- I've seen at least one weird socketbuffer empty-but-
mbuf-still-attached panic. I don't believe this
to be related to mbuma but please keep your eyes
open, turn on debugging, and capture crash dumps.
This change removes more code than it adds.
A paper is available detailing the change and considering
various performance issues, it was presented at BSDCan2004:
http://www.unixdaemons.com/~bmilekic/netbuf_bmilekic.pdf
Please read the paper for Future Work and implementation
details, as well as credits.
Testing and Debugging:
rwatson,
brueffer,
Ketrien I. Saihr-Kesenchedra,
...
Reviewed by: Lots of people (for different parts)
2004-05-31 21:46:06 +00:00
|
|
|
|
2020-02-19 18:48:46 +00:00
|
|
|
/* Offset 128 Rare stats, misc read-only. */
|
o Move zone limit from keg level up to zone level. This means that now
two zones sharing a keg may have different limits. Now this is going
to work:
zone = uma_zcreate();
uma_zone_set_max(zone, limit);
zone2 = uma_zsecond_create(zone);
uma_zone_set_max(zone2, limit2);
Kegs no longer have uk_maxpages field, but zones have uz_items. When
set, it may be rounded up to minimum possible CPU bucket cache size.
For small limits bucket cache can also be reconfigured to be smaller.
Counter uz_items is updated whenever items transition from keg to a
bucket cache or directly to a consumer. If zone has uz_maxitems set and
it is reached, then we are going to sleep.
o Since new limits don't play well with multi-keg zones, remove them. The
idea of multi-keg zones was introduced exactly 10 years ago, and never
have had a practical usage. In discussion with Jeff we came to a wild
agreement that if we ever want to reintroduce the idea of a smart allocator
that would be able to choose between two (or more) totally different
backing stores, that choice should be made one level higher than UMA,
e.g. in malloc(9) or in mget(), or whatever and choice should be controlled
by the caller.
o Sleeping code is improved to account number of sleepers and wake them one
by one, to avoid thundering herd problem.
o Flag UMA_ZONE_NOBUCKETCACHE removed, instead uma_zone_set_maxcache()
KPI added. Having no bucket cache basically means setting maxcache to 0.
o Now with many fields added and many removed (no multi-keg zones!) make
sure that struct uma_zone is perfectly aligned.
Reviewed by: markj, jeff
Tested by: pho
Differential Revision: https://reviews.freebsd.org/D17773
2019-01-15 00:02:06 +00:00
|
|
|
LIST_ENTRY(uma_zone) uz_link; /* List of all zones in keg */
|
2019-01-16 04:02:08 +00:00
|
|
|
counter_u64_t uz_allocs; /* Total number of allocations */
|
2019-01-15 18:24:34 +00:00
|
|
|
counter_u64_t uz_frees; /* Total number of frees */
|
|
|
|
counter_u64_t uz_fails; /* Total number of alloc failures */
|
2020-02-19 18:48:46 +00:00
|
|
|
const char *uz_name; /* Text name of the zone */
|
2019-11-28 00:19:09 +00:00
|
|
|
char *uz_ctlname; /* sysctl safe name string. */
|
|
|
|
int uz_namecnt; /* duplicate name count. */
|
2020-02-19 18:48:46 +00:00
|
|
|
uint16_t uz_bucket_size_min; /* Min number of items in bucket */
|
2021-04-14 16:57:24 +00:00
|
|
|
uint16_t uz_reclaimers; /* pending reclaim operations. */
|
2020-02-19 18:48:46 +00:00
|
|
|
|
|
|
|
/* Offset 192, rare read-only. */
|
|
|
|
struct sysctl_oid *uz_oid; /* sysctl oid pointer. */
|
|
|
|
const char *uz_warning; /* Warning to print on failure */
|
|
|
|
struct timeval uz_ratecheck; /* Warnings rate-limiting */
|
|
|
|
struct task uz_maxaction; /* Task to run when at limit */
|
|
|
|
|
|
|
|
/* Offset 256. */
|
|
|
|
struct mtx uz_cross_lock; /* Cross domain free lock */
|
2018-06-23 08:10:09 +00:00
|
|
|
|
2002-03-19 09:11:49 +00:00
|
|
|
/*
|
|
|
|
* This HAS to be the last item because we adjust the zone size
|
|
|
|
* based on NCPU and then allocate the space for the zones.
|
|
|
|
*/
|
2018-01-12 23:25:05 +00:00
|
|
|
struct uma_cache uz_cpu[]; /* Per cpu caches */
|
|
|
|
|
2020-02-19 18:48:46 +00:00
|
|
|
/* domains follow here. */
|
2002-03-19 09:11:49 +00:00
|
|
|
};
|
|
|
|
|
2020-01-04 03:04:46 +00:00
|
|
|
/*
|
|
|
|
* Macros for interpreting the uz_items field. 20 bits of sleeper count
|
|
|
|
* and 44 bit of item count.
|
|
|
|
*/
|
|
|
|
#define UZ_ITEMS_SLEEPER_SHIFT 44LL
|
|
|
|
#define UZ_ITEMS_SLEEPERS_MAX ((1 << (64 - UZ_ITEMS_SLEEPER_SHIFT)) - 1)
|
|
|
|
#define UZ_ITEMS_COUNT_MASK ((1LL << UZ_ITEMS_SLEEPER_SHIFT) - 1)
|
|
|
|
#define UZ_ITEMS_COUNT(x) ((x) & UZ_ITEMS_COUNT_MASK)
|
|
|
|
#define UZ_ITEMS_SLEEPERS(x) ((x) >> UZ_ITEMS_SLEEPER_SHIFT)
|
|
|
|
#define UZ_ITEMS_SLEEPER (1LL << UZ_ITEMS_SLEEPER_SHIFT)
|
|
|
|
|
2020-01-04 03:15:34 +00:00
|
|
|
#define ZONE_ASSERT_COLD(z) \
|
2020-01-04 19:29:25 +00:00
|
|
|
KASSERT(uma_zone_get_allocs((z)) == 0, \
|
2020-01-04 03:15:34 +00:00
|
|
|
("zone %s initialization after use.", (z)->uz_name))
|
|
|
|
|
2020-08-28 19:50:40 +00:00
|
|
|
/* Domains are contiguous after the last CPU */
|
|
|
|
#define ZDOM_GET(z, n) \
|
|
|
|
(&((uma_zone_domain_t)&(z)->uz_cpu[mp_maxid + 1])[n])
|
|
|
|
|
2020-01-06 02:51:19 +00:00
|
|
|
#undef UMA_ALIGN
|
2010-03-17 21:18:28 +00:00
|
|
|
|
2005-08-04 10:03:53 +00:00
|
|
|
#ifdef _KERNEL
|
2002-03-19 09:11:49 +00:00
|
|
|
/* Internal prototypes */
|
2013-04-09 17:43:48 +00:00
|
|
|
static __inline uma_slab_t hash_sfind(struct uma_hash *hash, uint8_t *data);
|
2002-03-19 09:11:49 +00:00
|
|
|
|
|
|
|
/* Lock Macros */
|
|
|
|
|
2020-01-04 03:30:08 +00:00
|
|
|
#define KEG_LOCKPTR(k, d) (struct mtx *)&(k)->uk_domain[(d)].ud_lock
|
|
|
|
#define KEG_LOCK_INIT(k, d, lc) \
|
|
|
|
do { \
|
|
|
|
if ((lc)) \
|
|
|
|
mtx_init(KEG_LOCKPTR(k, d), (k)->uk_name, \
|
|
|
|
(k)->uk_name, MTX_DEF | MTX_DUPOK); \
|
|
|
|
else \
|
|
|
|
mtx_init(KEG_LOCKPTR(k, d), (k)->uk_name, \
|
|
|
|
"UMA zone", MTX_DEF | MTX_DUPOK); \
|
2002-04-29 23:45:41 +00:00
|
|
|
} while (0)
|
2013-06-20 19:08:12 +00:00
|
|
|
|
2020-01-04 03:30:08 +00:00
|
|
|
#define KEG_LOCK_FINI(k, d) mtx_destroy(KEG_LOCKPTR(k, d))
|
|
|
|
#define KEG_LOCK(k, d) \
|
|
|
|
({ mtx_lock(KEG_LOCKPTR(k, d)); KEG_LOCKPTR(k, d); })
|
|
|
|
#define KEG_UNLOCK(k, d) mtx_unlock(KEG_LOCKPTR(k, d))
|
|
|
|
#define KEG_LOCK_ASSERT(k, d) mtx_assert(KEG_LOCKPTR(k, d), MA_OWNED)
|
o Move zone limit from keg level up to zone level. This means that now
two zones sharing a keg may have different limits. Now this is going
to work:
zone = uma_zcreate();
uma_zone_set_max(zone, limit);
zone2 = uma_zsecond_create(zone);
uma_zone_set_max(zone2, limit2);
Kegs no longer have uk_maxpages field, but zones have uz_items. When
set, it may be rounded up to minimum possible CPU bucket cache size.
For small limits bucket cache can also be reconfigured to be smaller.
Counter uz_items is updated whenever items transition from keg to a
bucket cache or directly to a consumer. If zone has uz_maxitems set and
it is reached, then we are going to sleep.
o Since new limits don't play well with multi-keg zones, remove them. The
idea of multi-keg zones was introduced exactly 10 years ago, and never
have had a practical usage. In discussion with Jeff we came to a wild
agreement that if we ever want to reintroduce the idea of a smart allocator
that would be able to choose between two (or more) totally different
backing stores, that choice should be made one level higher than UMA,
e.g. in malloc(9) or in mget(), or whatever and choice should be controlled
by the caller.
o Sleeping code is improved to account number of sleepers and wake them one
by one, to avoid thundering herd problem.
o Flag UMA_ZONE_NOBUCKETCACHE removed, instead uma_zone_set_maxcache()
KPI added. Having no bucket cache basically means setting maxcache to 0.
o Now with many fields added and many removed (no multi-keg zones!) make
sure that struct uma_zone is perfectly aligned.
Reviewed by: markj, jeff
Tested by: pho
Differential Revision: https://reviews.freebsd.org/D17773
2019-01-15 00:02:06 +00:00
|
|
|
|
|
|
|
#define KEG_GET(zone, keg) do { \
|
|
|
|
(keg) = (zone)->uz_keg; \
|
2020-02-19 18:48:46 +00:00
|
|
|
KASSERT((void *)(keg) != NULL, \
|
o Move zone limit from keg level up to zone level. This means that now
two zones sharing a keg may have different limits. Now this is going
to work:
zone = uma_zcreate();
uma_zone_set_max(zone, limit);
zone2 = uma_zsecond_create(zone);
uma_zone_set_max(zone2, limit2);
Kegs no longer have uk_maxpages field, but zones have uz_items. When
set, it may be rounded up to minimum possible CPU bucket cache size.
For small limits bucket cache can also be reconfigured to be smaller.
Counter uz_items is updated whenever items transition from keg to a
bucket cache or directly to a consumer. If zone has uz_maxitems set and
it is reached, then we are going to sleep.
o Since new limits don't play well with multi-keg zones, remove them. The
idea of multi-keg zones was introduced exactly 10 years ago, and never
have had a practical usage. In discussion with Jeff we came to a wild
agreement that if we ever want to reintroduce the idea of a smart allocator
that would be able to choose between two (or more) totally different
backing stores, that choice should be made one level higher than UMA,
e.g. in malloc(9) or in mget(), or whatever and choice should be controlled
by the caller.
o Sleeping code is improved to account number of sleepers and wake them one
by one, to avoid thundering herd problem.
o Flag UMA_ZONE_NOBUCKETCACHE removed, instead uma_zone_set_maxcache()
KPI added. Having no bucket cache basically means setting maxcache to 0.
o Now with many fields added and many removed (no multi-keg zones!) make
sure that struct uma_zone is perfectly aligned.
Reviewed by: markj, jeff
Tested by: pho
Differential Revision: https://reviews.freebsd.org/D17773
2019-01-15 00:02:06 +00:00
|
|
|
("%s: Invalid zone %p type", __func__, (zone))); \
|
|
|
|
} while (0)
|
2013-06-20 19:08:12 +00:00
|
|
|
|
2020-03-07 15:37:23 +00:00
|
|
|
#define KEG_ASSERT_COLD(k) \
|
|
|
|
KASSERT(uma_keg_get_allocs((k)) == 0, \
|
|
|
|
("keg %s initialization after use.", (k)->uk_name))
|
|
|
|
|
2020-02-19 18:48:46 +00:00
|
|
|
#define ZDOM_LOCK_INIT(z, zdom, lc) \
|
|
|
|
do { \
|
|
|
|
if ((lc)) \
|
|
|
|
mtx_init(&(zdom)->uzd_lock, (z)->uz_name, \
|
|
|
|
(z)->uz_name, MTX_DEF | MTX_DUPOK); \
|
|
|
|
else \
|
|
|
|
mtx_init(&(zdom)->uzd_lock, (z)->uz_name, \
|
|
|
|
"UMA zone", MTX_DEF | MTX_DUPOK); \
|
2013-06-20 19:08:12 +00:00
|
|
|
} while (0)
|
2020-02-19 18:48:46 +00:00
|
|
|
#define ZDOM_LOCK_FINI(z) mtx_destroy(&(z)->uzd_lock)
|
|
|
|
#define ZDOM_LOCK_ASSERT(z) mtx_assert(&(z)->uzd_lock, MA_OWNED)
|
2018-11-13 19:44:40 +00:00
|
|
|
|
2020-02-19 18:48:46 +00:00
|
|
|
#define ZDOM_LOCK(z) mtx_lock(&(z)->uzd_lock)
|
|
|
|
#define ZDOM_OWNED(z) (mtx_owner(&(z)->uzd_lock) != NULL)
|
|
|
|
#define ZDOM_UNLOCK(z) mtx_unlock(&(z)->uzd_lock)
|
|
|
|
|
|
|
|
#define ZONE_LOCK(z) ZDOM_LOCK(ZDOM_GET((z), 0))
|
|
|
|
#define ZONE_UNLOCK(z) ZDOM_UNLOCK(ZDOM_GET((z), 0))
|
2021-04-14 16:57:24 +00:00
|
|
|
#define ZONE_LOCKPTR(z) (&ZDOM_GET((z), 0)->uzd_lock)
|
2002-03-19 09:11:49 +00:00
|
|
|
|
2020-01-04 07:56:28 +00:00
|
|
|
#define ZONE_CROSS_LOCK_INIT(z) \
|
|
|
|
mtx_init(&(z)->uz_cross_lock, "UMA Cross", NULL, MTX_DEF)
|
|
|
|
#define ZONE_CROSS_LOCK(z) mtx_lock(&(z)->uz_cross_lock)
|
|
|
|
#define ZONE_CROSS_UNLOCK(z) mtx_unlock(&(z)->uz_cross_lock)
|
|
|
|
#define ZONE_CROSS_LOCK_FINI(z) mtx_destroy(&(z)->uz_cross_lock)
|
|
|
|
|
2002-03-19 09:11:49 +00:00
|
|
|
/*
|
|
|
|
* Find a slab within a hash table. This is used for OFFPAGE zones to lookup
|
|
|
|
* the slab structure.
|
|
|
|
*
|
|
|
|
* Arguments:
|
|
|
|
* hash The hash table to search.
|
|
|
|
* data The base page of the item.
|
|
|
|
*
|
|
|
|
* Returns:
|
|
|
|
* A pointer to a slab if successful, else NULL.
|
|
|
|
*/
|
|
|
|
static __inline uma_slab_t
|
2013-04-09 17:43:48 +00:00
|
|
|
hash_sfind(struct uma_hash *hash, uint8_t *data)
|
2002-03-19 09:11:49 +00:00
|
|
|
{
|
2019-12-08 01:15:06 +00:00
|
|
|
uma_hash_slab_t slab;
|
2019-02-12 04:33:05 +00:00
|
|
|
u_int hval;
|
2002-03-19 09:11:49 +00:00
|
|
|
|
|
|
|
hval = UMA_HASH(hash, data);
|
|
|
|
|
2019-12-08 01:15:06 +00:00
|
|
|
LIST_FOREACH(slab, &hash->uh_slab_hash[hval], uhs_hlink) {
|
|
|
|
if ((uint8_t *)slab->uhs_data == data)
|
|
|
|
return (&slab->uhs_slab);
|
2002-03-19 09:11:49 +00:00
|
|
|
}
|
|
|
|
return (NULL);
|
|
|
|
}
|
|
|
|
|
2002-09-18 08:26:30 +00:00
|
|
|
static __inline uma_slab_t
|
|
|
|
vtoslab(vm_offset_t va)
|
|
|
|
{
|
|
|
|
vm_page_t p;
|
|
|
|
|
|
|
|
p = PHYS_TO_VM_PAGE(pmap_kextract(va));
|
2019-11-28 07:49:25 +00:00
|
|
|
return (p->plinks.uma.slab);
|
2002-09-18 08:26:30 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static __inline void
|
2019-11-28 07:49:25 +00:00
|
|
|
vtozoneslab(vm_offset_t va, uma_zone_t *zone, uma_slab_t *slab)
|
2002-09-18 08:26:30 +00:00
|
|
|
{
|
|
|
|
vm_page_t p;
|
|
|
|
|
2004-11-26 15:04:26 +00:00
|
|
|
p = PHYS_TO_VM_PAGE(pmap_kextract(va));
|
2019-11-28 07:49:25 +00:00
|
|
|
*slab = p->plinks.uma.slab;
|
|
|
|
*zone = p->plinks.uma.zone;
|
|
|
|
}
|
|
|
|
|
|
|
|
static __inline void
|
|
|
|
vsetzoneslab(vm_offset_t va, uma_zone_t zone, uma_slab_t slab)
|
|
|
|
{
|
|
|
|
vm_page_t p;
|
|
|
|
|
|
|
|
p = PHYS_TO_VM_PAGE(pmap_kextract(va));
|
|
|
|
p->plinks.uma.slab = slab;
|
|
|
|
p->plinks.uma.zone = zone;
|
2002-09-18 08:26:30 +00:00
|
|
|
}
|
|
|
|
|
2019-11-29 03:14:10 +00:00
|
|
|
extern unsigned long uma_kmem_limit;
|
|
|
|
extern unsigned long uma_kmem_total;
|
|
|
|
|
|
|
|
/* Adjust bytes under management by UMA. */
|
|
|
|
static inline void
|
|
|
|
uma_total_dec(unsigned long size)
|
|
|
|
{
|
|
|
|
|
|
|
|
atomic_subtract_long(&uma_kmem_total, size);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
uma_total_inc(unsigned long size)
|
|
|
|
{
|
|
|
|
|
|
|
|
if (atomic_fetchadd_long(&uma_kmem_total, size) > uma_kmem_limit)
|
|
|
|
uma_reclaim_wakeup();
|
|
|
|
}
|
|
|
|
|
2002-11-01 01:01:27 +00:00
|
|
|
/*
|
|
|
|
* The following two functions may be defined by architecture specific code
|
2016-05-02 20:16:29 +00:00
|
|
|
* if they can provide more efficient allocation functions. This is useful
|
2002-11-01 01:01:27 +00:00
|
|
|
* for using direct mapped addresses.
|
|
|
|
*/
|
2018-01-12 23:25:05 +00:00
|
|
|
void *uma_small_alloc(uma_zone_t zone, vm_size_t bytes, int domain,
|
|
|
|
uint8_t *pflag, int wait);
|
2015-04-01 12:42:26 +00:00
|
|
|
void uma_small_free(void *mem, vm_size_t size, uint8_t flags);
|
2017-11-28 23:40:54 +00:00
|
|
|
|
|
|
|
/* Set a global soft limit on UMA managed memory. */
|
|
|
|
void uma_set_limit(unsigned long limit);
|
2020-02-19 18:48:46 +00:00
|
|
|
|
2005-08-04 10:03:53 +00:00
|
|
|
#endif /* _KERNEL */
|
2002-11-01 01:01:27 +00:00
|
|
|
|
2002-03-19 09:11:49 +00:00
|
|
|
#endif /* VM_UMA_INT_H */
|