2013-06-28 03:51:20 +00:00
|
|
|
/*-
|
2017-11-27 15:20:12 +00:00
|
|
|
* SPDX-License-Identifier: BSD-2-Clause-FreeBSD
|
|
|
|
*
|
2013-06-28 03:51:20 +00:00
|
|
|
* Copyright (c)2006,2007,2008,2009 YAMAMOTO Takashi,
|
|
|
|
* Copyright (c) 2013 EMC Corp.
|
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
|
|
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
|
|
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
|
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
|
|
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
|
|
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
|
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
|
|
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
|
|
|
* SUCH DAMAGE.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* From:
|
|
|
|
* $NetBSD: vmem_impl.h,v 1.2 2013/01/29 21:26:24 para Exp $
|
|
|
|
* $NetBSD: subr_vmem.c,v 1.83 2013/03/06 11:20:10 yamt Exp $
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* reference:
|
|
|
|
* - Magazines and Vmem: Extending the Slab Allocator
|
|
|
|
* to Many CPUs and Arbitrary Resources
|
|
|
|
* http://www.usenix.org/event/usenix01/bonwick.html
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <sys/cdefs.h>
|
|
|
|
__FBSDID("$FreeBSD$");
|
|
|
|
|
|
|
|
#include "opt_ddb.h"
|
|
|
|
|
|
|
|
#include <sys/param.h>
|
|
|
|
#include <sys/systm.h>
|
|
|
|
#include <sys/kernel.h>
|
|
|
|
#include <sys/queue.h>
|
|
|
|
#include <sys/callout.h>
|
|
|
|
#include <sys/hash.h>
|
|
|
|
#include <sys/lock.h>
|
|
|
|
#include <sys/malloc.h>
|
|
|
|
#include <sys/mutex.h>
|
|
|
|
#include <sys/smp.h>
|
|
|
|
#include <sys/condvar.h>
|
2013-08-20 11:06:56 +00:00
|
|
|
#include <sys/sysctl.h>
|
2013-06-28 03:51:20 +00:00
|
|
|
#include <sys/taskqueue.h>
|
|
|
|
#include <sys/vmem.h>
|
2018-02-06 22:10:07 +00:00
|
|
|
#include <sys/vmmeter.h>
|
2013-06-28 03:51:20 +00:00
|
|
|
|
2013-08-13 22:40:43 +00:00
|
|
|
#include "opt_vm.h"
|
|
|
|
|
2013-06-28 03:51:20 +00:00
|
|
|
#include <vm/uma.h>
|
|
|
|
#include <vm/vm.h>
|
|
|
|
#include <vm/pmap.h>
|
|
|
|
#include <vm/vm_map.h>
|
2013-08-07 06:21:20 +00:00
|
|
|
#include <vm/vm_object.h>
|
2013-06-28 03:51:20 +00:00
|
|
|
#include <vm/vm_kern.h>
|
|
|
|
#include <vm/vm_extern.h>
|
|
|
|
#include <vm/vm_param.h>
|
2018-01-12 23:13:55 +00:00
|
|
|
#include <vm/vm_page.h>
|
2013-06-28 03:51:20 +00:00
|
|
|
#include <vm/vm_pageout.h>
|
2018-02-06 22:10:07 +00:00
|
|
|
#include <vm/vm_phys.h>
|
|
|
|
#include <vm/vm_pagequeue.h>
|
2018-02-06 22:06:59 +00:00
|
|
|
#include <vm/uma_int.h>
|
|
|
|
|
2013-12-11 21:48:04 +00:00
|
|
|
#define VMEM_OPTORDER 5
|
|
|
|
#define VMEM_OPTVALUE (1 << VMEM_OPTORDER)
|
|
|
|
#define VMEM_MAXORDER \
|
|
|
|
(VMEM_OPTVALUE - 1 + sizeof(vmem_size_t) * NBBY - VMEM_OPTORDER)
|
2013-06-28 03:51:20 +00:00
|
|
|
|
|
|
|
#define VMEM_HASHSIZE_MIN 16
|
|
|
|
#define VMEM_HASHSIZE_MAX 131072
|
|
|
|
|
|
|
|
#define VMEM_QCACHE_IDX_MAX 16
|
|
|
|
|
2019-05-18 01:46:38 +00:00
|
|
|
#define VMEM_FITMASK (M_BESTFIT | M_FIRSTFIT | M_NEXTFIT)
|
2013-06-28 03:51:20 +00:00
|
|
|
|
2019-05-18 01:46:38 +00:00
|
|
|
#define VMEM_FLAGS (M_NOWAIT | M_WAITOK | M_USE_RESERVE | M_NOVM | \
|
|
|
|
M_BESTFIT | M_FIRSTFIT | M_NEXTFIT)
|
2013-06-28 03:51:20 +00:00
|
|
|
|
|
|
|
#define BT_FLAGS (M_NOWAIT | M_WAITOK | M_USE_RESERVE | M_NOVM)
|
|
|
|
|
|
|
|
#define QC_NAME_MAX 16
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Data structures private to vmem.
|
|
|
|
*/
|
|
|
|
MALLOC_DEFINE(M_VMEM, "vmem", "vmem internal structures");
|
|
|
|
|
|
|
|
typedef struct vmem_btag bt_t;
|
|
|
|
|
|
|
|
TAILQ_HEAD(vmem_seglist, vmem_btag);
|
|
|
|
LIST_HEAD(vmem_freelist, vmem_btag);
|
|
|
|
LIST_HEAD(vmem_hashlist, vmem_btag);
|
|
|
|
|
|
|
|
struct qcache {
|
|
|
|
uma_zone_t qc_cache;
|
|
|
|
vmem_t *qc_vmem;
|
|
|
|
vmem_size_t qc_size;
|
|
|
|
char qc_name[QC_NAME_MAX];
|
|
|
|
};
|
|
|
|
typedef struct qcache qcache_t;
|
|
|
|
#define QC_POOL_TO_QCACHE(pool) ((qcache_t *)(pool->pr_qcache))
|
|
|
|
|
|
|
|
#define VMEM_NAME_MAX 16
|
|
|
|
|
2019-05-18 01:46:38 +00:00
|
|
|
/* boundary tag */
|
|
|
|
struct vmem_btag {
|
|
|
|
TAILQ_ENTRY(vmem_btag) bt_seglist;
|
|
|
|
union {
|
|
|
|
LIST_ENTRY(vmem_btag) u_freelist; /* BT_TYPE_FREE */
|
|
|
|
LIST_ENTRY(vmem_btag) u_hashlist; /* BT_TYPE_BUSY */
|
|
|
|
} bt_u;
|
|
|
|
#define bt_hashlist bt_u.u_hashlist
|
|
|
|
#define bt_freelist bt_u.u_freelist
|
|
|
|
vmem_addr_t bt_start;
|
|
|
|
vmem_size_t bt_size;
|
|
|
|
int bt_type;
|
|
|
|
};
|
|
|
|
|
2013-06-28 03:51:20 +00:00
|
|
|
/* vmem arena */
|
|
|
|
struct vmem {
|
|
|
|
struct mtx_padalign vm_lock;
|
|
|
|
struct cv vm_cv;
|
|
|
|
char vm_name[VMEM_NAME_MAX+1];
|
|
|
|
LIST_ENTRY(vmem) vm_alllist;
|
|
|
|
struct vmem_hashlist vm_hash0[VMEM_HASHSIZE_MIN];
|
|
|
|
struct vmem_freelist vm_freelist[VMEM_MAXORDER];
|
|
|
|
struct vmem_seglist vm_seglist;
|
|
|
|
struct vmem_hashlist *vm_hashlist;
|
|
|
|
vmem_size_t vm_hashsize;
|
|
|
|
|
|
|
|
/* Constant after init */
|
|
|
|
vmem_size_t vm_qcache_max;
|
|
|
|
vmem_size_t vm_quantum_mask;
|
|
|
|
vmem_size_t vm_import_quantum;
|
|
|
|
int vm_quantum_shift;
|
|
|
|
|
|
|
|
/* Written on alloc/free */
|
|
|
|
LIST_HEAD(, vmem_btag) vm_freetags;
|
|
|
|
int vm_nfreetags;
|
|
|
|
int vm_nbusytag;
|
|
|
|
vmem_size_t vm_inuse;
|
|
|
|
vmem_size_t vm_size;
|
2017-11-28 23:40:54 +00:00
|
|
|
vmem_size_t vm_limit;
|
2019-05-18 01:46:38 +00:00
|
|
|
struct vmem_btag vm_cursor;
|
2013-06-28 03:51:20 +00:00
|
|
|
|
|
|
|
/* Used on import. */
|
|
|
|
vmem_import_t *vm_importfn;
|
|
|
|
vmem_release_t *vm_releasefn;
|
|
|
|
void *vm_arg;
|
|
|
|
|
|
|
|
/* Space exhaustion callback. */
|
|
|
|
vmem_reclaim_t *vm_reclaimfn;
|
|
|
|
|
|
|
|
/* quantum cache */
|
|
|
|
qcache_t vm_qcache[VMEM_QCACHE_IDX_MAX];
|
|
|
|
};
|
|
|
|
|
|
|
|
#define BT_TYPE_SPAN 1 /* Allocated from importfn */
|
|
|
|
#define BT_TYPE_SPAN_STATIC 2 /* vmem_add() or create. */
|
|
|
|
#define BT_TYPE_FREE 3 /* Available space. */
|
|
|
|
#define BT_TYPE_BUSY 4 /* Used space. */
|
2019-05-18 01:46:38 +00:00
|
|
|
#define BT_TYPE_CURSOR 5 /* Cursor for nextfit allocations. */
|
2013-06-28 03:51:20 +00:00
|
|
|
#define BT_ISSPAN_P(bt) ((bt)->bt_type <= BT_TYPE_SPAN_STATIC)
|
|
|
|
|
|
|
|
#define BT_END(bt) ((bt)->bt_start + (bt)->bt_size - 1)
|
|
|
|
|
|
|
|
#if defined(DIAGNOSTIC)
|
2013-08-20 11:06:56 +00:00
|
|
|
static int enable_vmem_check = 1;
|
2015-03-28 23:30:51 +00:00
|
|
|
SYSCTL_INT(_debug, OID_AUTO, vmem_check, CTLFLAG_RWTUN,
|
2013-08-20 11:06:56 +00:00
|
|
|
&enable_vmem_check, 0, "Enable vmem check");
|
2013-06-28 03:51:20 +00:00
|
|
|
static void vmem_check(vmem_t *);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
static struct callout vmem_periodic_ch;
|
|
|
|
static int vmem_periodic_interval;
|
|
|
|
static struct task vmem_periodic_wk;
|
|
|
|
|
2017-09-06 20:28:18 +00:00
|
|
|
static struct mtx_padalign __exclusive_cache_line vmem_list_lock;
|
2013-06-28 03:51:20 +00:00
|
|
|
static LIST_HEAD(, vmem) vmem_list = LIST_HEAD_INITIALIZER(vmem_list);
|
2018-01-12 23:13:55 +00:00
|
|
|
static uma_zone_t vmem_zone;
|
2013-06-28 03:51:20 +00:00
|
|
|
|
|
|
|
/* ---- misc */
|
|
|
|
#define VMEM_CONDVAR_INIT(vm, wchan) cv_init(&vm->vm_cv, wchan)
|
|
|
|
#define VMEM_CONDVAR_DESTROY(vm) cv_destroy(&vm->vm_cv)
|
|
|
|
#define VMEM_CONDVAR_WAIT(vm) cv_wait(&vm->vm_cv, &vm->vm_lock)
|
|
|
|
#define VMEM_CONDVAR_BROADCAST(vm) cv_broadcast(&vm->vm_cv)
|
|
|
|
|
|
|
|
#define VMEM_LOCK(vm) mtx_lock(&vm->vm_lock)
|
|
|
|
#define VMEM_TRYLOCK(vm) mtx_trylock(&vm->vm_lock)
|
|
|
|
#define VMEM_UNLOCK(vm) mtx_unlock(&vm->vm_lock)
|
|
|
|
#define VMEM_LOCK_INIT(vm, name) mtx_init(&vm->vm_lock, (name), NULL, MTX_DEF)
|
|
|
|
#define VMEM_LOCK_DESTROY(vm) mtx_destroy(&vm->vm_lock)
|
|
|
|
#define VMEM_ASSERT_LOCKED(vm) mtx_assert(&vm->vm_lock, MA_OWNED);
|
|
|
|
|
|
|
|
#define VMEM_ALIGNUP(addr, align) (-(-(addr) & -(align)))
|
|
|
|
|
|
|
|
#define VMEM_CROSS_P(addr1, addr2, boundary) \
|
|
|
|
((((addr1) ^ (addr2)) & -(boundary)) != 0)
|
|
|
|
|
2013-12-11 21:48:04 +00:00
|
|
|
#define ORDER2SIZE(order) ((order) < VMEM_OPTVALUE ? ((order) + 1) : \
|
|
|
|
(vmem_size_t)1 << ((order) - (VMEM_OPTVALUE - VMEM_OPTORDER - 1)))
|
|
|
|
#define SIZE2ORDER(size) ((size) <= VMEM_OPTVALUE ? ((size) - 1) : \
|
|
|
|
(flsl(size) + (VMEM_OPTVALUE - VMEM_OPTORDER - 2)))
|
2013-06-28 03:51:20 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Maximum number of boundary tags that may be required to satisfy an
|
|
|
|
* allocation. Two may be required to import. Another two may be
|
|
|
|
* required to clip edges.
|
|
|
|
*/
|
|
|
|
#define BT_MAXALLOC 4
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Max free limits the number of locally cached boundary tags. We
|
|
|
|
* just want to avoid hitting the zone allocator for every call.
|
|
|
|
*/
|
|
|
|
#define BT_MAXFREE (BT_MAXALLOC * 8)
|
|
|
|
|
|
|
|
/* Allocator for boundary tags. */
|
|
|
|
static uma_zone_t vmem_bt_zone;
|
|
|
|
|
|
|
|
/* boot time arena storage. */
|
2013-08-07 06:21:20 +00:00
|
|
|
static struct vmem kernel_arena_storage;
|
2013-06-28 03:51:20 +00:00
|
|
|
static struct vmem buffer_arena_storage;
|
|
|
|
static struct vmem transient_arena_storage;
|
2017-11-28 23:40:54 +00:00
|
|
|
/* kernel and kmem arenas are aliased for backwards KPI compat. */
|
2013-08-07 06:21:20 +00:00
|
|
|
vmem_t *kernel_arena = &kernel_arena_storage;
|
2017-11-28 23:40:54 +00:00
|
|
|
vmem_t *kmem_arena = &kernel_arena_storage;
|
2013-06-28 03:51:20 +00:00
|
|
|
vmem_t *buffer_arena = &buffer_arena_storage;
|
|
|
|
vmem_t *transient_arena = &transient_arena_storage;
|
|
|
|
|
2013-08-13 22:40:43 +00:00
|
|
|
#ifdef DEBUG_MEMGUARD
|
|
|
|
static struct vmem memguard_arena_storage;
|
|
|
|
vmem_t *memguard_arena = &memguard_arena_storage;
|
|
|
|
#endif
|
|
|
|
|
2020-08-26 14:31:35 +00:00
|
|
|
static bool
|
|
|
|
bt_isbusy(bt_t *bt)
|
|
|
|
{
|
|
|
|
return (bt->bt_type == BT_TYPE_BUSY);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool
|
|
|
|
bt_isfree(bt_t *bt)
|
|
|
|
{
|
|
|
|
return (bt->bt_type == BT_TYPE_FREE);
|
|
|
|
}
|
|
|
|
|
2013-06-28 03:51:20 +00:00
|
|
|
/*
|
|
|
|
* Fill the vmem's boundary tag cache. We guarantee that boundary tag
|
|
|
|
* allocation will not fail once bt_fill() passes. To do so we cache
|
|
|
|
* at least the maximum possible tag allocations in the arena.
|
|
|
|
*/
|
2020-10-19 16:52:27 +00:00
|
|
|
static __noinline int
|
|
|
|
_bt_fill(vmem_t *vm, int flags)
|
2013-06-28 03:51:20 +00:00
|
|
|
{
|
|
|
|
bt_t *bt;
|
|
|
|
|
|
|
|
VMEM_ASSERT_LOCKED(vm);
|
|
|
|
|
2013-08-07 06:21:20 +00:00
|
|
|
/*
|
2018-01-12 23:13:55 +00:00
|
|
|
* Only allow the kernel arena and arenas derived from kernel arena to
|
|
|
|
* dip into reserve tags. They are where new tags come from.
|
2013-08-07 06:21:20 +00:00
|
|
|
*/
|
|
|
|
flags &= BT_FLAGS;
|
2018-01-12 23:13:55 +00:00
|
|
|
if (vm != kernel_arena && vm->vm_arg != kernel_arena)
|
2013-08-07 06:21:20 +00:00
|
|
|
flags &= ~M_USE_RESERVE;
|
|
|
|
|
2013-06-28 03:51:20 +00:00
|
|
|
/*
|
|
|
|
* Loop until we meet the reserve. To minimize the lock shuffle
|
|
|
|
* and prevent simultaneous fills we first try a NOWAIT regardless
|
|
|
|
* of the caller's flags. Specify M_NOVM so we don't recurse while
|
|
|
|
* holding a vmem lock.
|
|
|
|
*/
|
|
|
|
while (vm->vm_nfreetags < BT_MAXALLOC) {
|
|
|
|
bt = uma_zalloc(vmem_bt_zone,
|
|
|
|
(flags & M_USE_RESERVE) | M_NOWAIT | M_NOVM);
|
|
|
|
if (bt == NULL) {
|
|
|
|
VMEM_UNLOCK(vm);
|
|
|
|
bt = uma_zalloc(vmem_bt_zone, flags);
|
|
|
|
VMEM_LOCK(vm);
|
2018-10-25 15:40:59 +00:00
|
|
|
if (bt == NULL)
|
2013-06-28 03:51:20 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
LIST_INSERT_HEAD(&vm->vm_freetags, bt, bt_freelist);
|
|
|
|
vm->vm_nfreetags++;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (vm->vm_nfreetags < BT_MAXALLOC)
|
|
|
|
return ENOMEM;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-10-19 16:52:27 +00:00
|
|
|
static inline int
|
|
|
|
bt_fill(vmem_t *vm, int flags)
|
|
|
|
{
|
|
|
|
if (vm->vm_nfreetags >= BT_MAXALLOC)
|
|
|
|
return (0);
|
|
|
|
return (_bt_fill(vm, flags));
|
|
|
|
}
|
|
|
|
|
2013-06-28 03:51:20 +00:00
|
|
|
/*
|
|
|
|
* Pop a tag off of the freetag stack.
|
|
|
|
*/
|
|
|
|
static bt_t *
|
|
|
|
bt_alloc(vmem_t *vm)
|
|
|
|
{
|
|
|
|
bt_t *bt;
|
|
|
|
|
|
|
|
VMEM_ASSERT_LOCKED(vm);
|
|
|
|
bt = LIST_FIRST(&vm->vm_freetags);
|
|
|
|
MPASS(bt != NULL);
|
|
|
|
LIST_REMOVE(bt, bt_freelist);
|
|
|
|
vm->vm_nfreetags--;
|
|
|
|
|
|
|
|
return bt;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Trim the per-vmem free list. Returns with the lock released to
|
|
|
|
* avoid allocator recursions.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
bt_freetrim(vmem_t *vm, int freelimit)
|
|
|
|
{
|
|
|
|
LIST_HEAD(, vmem_btag) freetags;
|
|
|
|
bt_t *bt;
|
|
|
|
|
|
|
|
LIST_INIT(&freetags);
|
|
|
|
VMEM_ASSERT_LOCKED(vm);
|
|
|
|
while (vm->vm_nfreetags > freelimit) {
|
|
|
|
bt = LIST_FIRST(&vm->vm_freetags);
|
|
|
|
LIST_REMOVE(bt, bt_freelist);
|
|
|
|
vm->vm_nfreetags--;
|
|
|
|
LIST_INSERT_HEAD(&freetags, bt, bt_freelist);
|
|
|
|
}
|
|
|
|
VMEM_UNLOCK(vm);
|
|
|
|
while ((bt = LIST_FIRST(&freetags)) != NULL) {
|
|
|
|
LIST_REMOVE(bt, bt_freelist);
|
|
|
|
uma_zfree(vmem_bt_zone, bt);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
bt_free(vmem_t *vm, bt_t *bt)
|
|
|
|
{
|
|
|
|
|
|
|
|
VMEM_ASSERT_LOCKED(vm);
|
|
|
|
MPASS(LIST_FIRST(&vm->vm_freetags) != bt);
|
|
|
|
LIST_INSERT_HEAD(&vm->vm_freetags, bt, bt_freelist);
|
|
|
|
vm->vm_nfreetags++;
|
|
|
|
}
|
|
|
|
|
vmem: Allocate btags before looping in vmem_xalloc()
BT_MAXALLOC (4) is the number of boundary tags required to complete an
allocation in the worst case: two to clip a free segment, and two to
import from a parent arena. vmem_xalloc() preallocates four boundary
tags before attempting a search to simplify the segment allocation code.
It implements a loop that:
1) ensures that BT_MAXALLOC boundary tags are available,
2) attempts to find and clip a free segment satisfying the allocation
constraints, and failing that,
3) attempts to import a segment.
On !UMA_MD_SMALL_ALLOC platforms the btag zone has to handle recusion:
it needs boundary tags to allocate boundary tags. Thus we reserve
2 * BT_MAXALLOC * mp_ncpus tags for use when recursing: the factor of 2
is because there are two layers of vmem arenas, the per-domain arena and
global arena. For a single thread, 2 * BT_MAXALLOC tags should be
sufficient.
Because of the way the loop is structured, BT_MAXALLOC tags are not
sufficient. The first bt_fill() call may allocate BT_MAXALLOC tags,
then import a segment (consuming two tags), then attempt to top up the
preallocation before carving into the imported free segment, thus
requiring up to six tags in the worst case. Because we don't
preallocate that many, this bug can cause deadlocks in rare scenarios.
Fix the problem by moving the preallocation out the loop. This assumes
that only a single import is ever required to satisfy an allocation
request.
Thanks to manu, emaste and lwhsu for helping test debug patches.
Reported by: Jenkins (hardware CI lab)
Reviewed by: alc, kib, rlibby
MFC after: 2 weeks
Sponsored by: The FreeBSD Foundation
Differential Revision: https://reviews.freebsd.org/D26770
2020-10-19 16:54:06 +00:00
|
|
|
/*
|
|
|
|
* Hide MAXALLOC tags before dropping the arena lock to ensure that a
|
|
|
|
* concurrent allocation attempt does not grab them.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
bt_save(vmem_t *vm)
|
|
|
|
{
|
|
|
|
KASSERT(vm->vm_nfreetags >= BT_MAXALLOC,
|
|
|
|
("%s: insufficient free tags %d", __func__, vm->vm_nfreetags));
|
|
|
|
vm->vm_nfreetags -= BT_MAXALLOC;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
bt_restore(vmem_t *vm)
|
|
|
|
{
|
|
|
|
vm->vm_nfreetags += BT_MAXALLOC;
|
|
|
|
}
|
|
|
|
|
2013-06-28 03:51:20 +00:00
|
|
|
/*
|
|
|
|
* freelist[0] ... [1, 1]
|
2013-12-11 21:48:04 +00:00
|
|
|
* freelist[1] ... [2, 2]
|
2013-06-28 03:51:20 +00:00
|
|
|
* :
|
2013-12-11 21:48:04 +00:00
|
|
|
* freelist[29] ... [30, 30]
|
|
|
|
* freelist[30] ... [31, 31]
|
|
|
|
* freelist[31] ... [32, 63]
|
|
|
|
* freelist[33] ... [64, 127]
|
|
|
|
* :
|
|
|
|
* freelist[n] ... [(1 << (n - 26)), (1 << (n - 25)) - 1]
|
2013-06-28 03:51:20 +00:00
|
|
|
* :
|
|
|
|
*/
|
|
|
|
|
|
|
|
static struct vmem_freelist *
|
|
|
|
bt_freehead_tofree(vmem_t *vm, vmem_size_t size)
|
|
|
|
{
|
|
|
|
const vmem_size_t qsize = size >> vm->vm_quantum_shift;
|
|
|
|
const int idx = SIZE2ORDER(qsize);
|
|
|
|
|
|
|
|
MPASS(size != 0 && qsize != 0);
|
|
|
|
MPASS((size & vm->vm_quantum_mask) == 0);
|
|
|
|
MPASS(idx >= 0);
|
|
|
|
MPASS(idx < VMEM_MAXORDER);
|
|
|
|
|
|
|
|
return &vm->vm_freelist[idx];
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* bt_freehead_toalloc: return the freelist for the given size and allocation
|
|
|
|
* strategy.
|
|
|
|
*
|
|
|
|
* For M_FIRSTFIT, return the list in which any blocks are large enough
|
|
|
|
* for the requested size. otherwise, return the list which can have blocks
|
|
|
|
* large enough for the requested size.
|
|
|
|
*/
|
|
|
|
static struct vmem_freelist *
|
|
|
|
bt_freehead_toalloc(vmem_t *vm, vmem_size_t size, int strat)
|
|
|
|
{
|
|
|
|
const vmem_size_t qsize = size >> vm->vm_quantum_shift;
|
|
|
|
int idx = SIZE2ORDER(qsize);
|
|
|
|
|
|
|
|
MPASS(size != 0 && qsize != 0);
|
|
|
|
MPASS((size & vm->vm_quantum_mask) == 0);
|
|
|
|
|
|
|
|
if (strat == M_FIRSTFIT && ORDER2SIZE(idx) != qsize) {
|
|
|
|
idx++;
|
|
|
|
/* check too large request? */
|
|
|
|
}
|
|
|
|
MPASS(idx >= 0);
|
|
|
|
MPASS(idx < VMEM_MAXORDER);
|
|
|
|
|
|
|
|
return &vm->vm_freelist[idx];
|
|
|
|
}
|
|
|
|
|
|
|
|
/* ---- boundary tag hash */
|
|
|
|
|
|
|
|
static struct vmem_hashlist *
|
|
|
|
bt_hashhead(vmem_t *vm, vmem_addr_t addr)
|
|
|
|
{
|
|
|
|
struct vmem_hashlist *list;
|
|
|
|
unsigned int hash;
|
|
|
|
|
|
|
|
hash = hash32_buf(&addr, sizeof(addr), 0);
|
|
|
|
list = &vm->vm_hashlist[hash % vm->vm_hashsize];
|
|
|
|
|
|
|
|
return list;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bt_t *
|
|
|
|
bt_lookupbusy(vmem_t *vm, vmem_addr_t addr)
|
|
|
|
{
|
|
|
|
struct vmem_hashlist *list;
|
|
|
|
bt_t *bt;
|
|
|
|
|
|
|
|
VMEM_ASSERT_LOCKED(vm);
|
|
|
|
list = bt_hashhead(vm, addr);
|
|
|
|
LIST_FOREACH(bt, list, bt_hashlist) {
|
|
|
|
if (bt->bt_start == addr) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return bt;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
bt_rembusy(vmem_t *vm, bt_t *bt)
|
|
|
|
{
|
|
|
|
|
|
|
|
VMEM_ASSERT_LOCKED(vm);
|
|
|
|
MPASS(vm->vm_nbusytag > 0);
|
|
|
|
vm->vm_inuse -= bt->bt_size;
|
|
|
|
vm->vm_nbusytag--;
|
|
|
|
LIST_REMOVE(bt, bt_hashlist);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
bt_insbusy(vmem_t *vm, bt_t *bt)
|
|
|
|
{
|
|
|
|
struct vmem_hashlist *list;
|
|
|
|
|
|
|
|
VMEM_ASSERT_LOCKED(vm);
|
|
|
|
MPASS(bt->bt_type == BT_TYPE_BUSY);
|
|
|
|
|
|
|
|
list = bt_hashhead(vm, bt->bt_start);
|
|
|
|
LIST_INSERT_HEAD(list, bt, bt_hashlist);
|
|
|
|
vm->vm_nbusytag++;
|
|
|
|
vm->vm_inuse += bt->bt_size;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* ---- boundary tag list */
|
|
|
|
|
|
|
|
static void
|
|
|
|
bt_remseg(vmem_t *vm, bt_t *bt)
|
|
|
|
{
|
|
|
|
|
2019-10-09 21:20:39 +00:00
|
|
|
MPASS(bt->bt_type != BT_TYPE_CURSOR);
|
2013-06-28 03:51:20 +00:00
|
|
|
TAILQ_REMOVE(&vm->vm_seglist, bt, bt_seglist);
|
|
|
|
bt_free(vm, bt);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
bt_insseg(vmem_t *vm, bt_t *bt, bt_t *prev)
|
|
|
|
{
|
|
|
|
|
|
|
|
TAILQ_INSERT_AFTER(&vm->vm_seglist, prev, bt, bt_seglist);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
bt_insseg_tail(vmem_t *vm, bt_t *bt)
|
|
|
|
{
|
|
|
|
|
|
|
|
TAILQ_INSERT_TAIL(&vm->vm_seglist, bt, bt_seglist);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2020-11-17 02:18:34 +00:00
|
|
|
bt_remfree(vmem_t *vm __unused, bt_t *bt)
|
2013-06-28 03:51:20 +00:00
|
|
|
{
|
|
|
|
|
|
|
|
MPASS(bt->bt_type == BT_TYPE_FREE);
|
|
|
|
|
|
|
|
LIST_REMOVE(bt, bt_freelist);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
bt_insfree(vmem_t *vm, bt_t *bt)
|
|
|
|
{
|
|
|
|
struct vmem_freelist *list;
|
|
|
|
|
|
|
|
list = bt_freehead_tofree(vm, bt->bt_size);
|
|
|
|
LIST_INSERT_HEAD(list, bt, bt_freelist);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* ---- vmem internal functions */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Import from the arena into the quantum cache in UMA.
|
2018-10-22 16:16:42 +00:00
|
|
|
*
|
|
|
|
* We use VMEM_ADDR_QCACHE_MIN instead of 0: uma_zalloc() returns 0 to indicate
|
|
|
|
* failure, so UMA can't be used to cache a resource with value 0.
|
2013-06-28 03:51:20 +00:00
|
|
|
*/
|
|
|
|
static int
|
2018-01-12 23:25:05 +00:00
|
|
|
qc_import(void *arg, void **store, int cnt, int domain, int flags)
|
2013-06-28 03:51:20 +00:00
|
|
|
{
|
|
|
|
qcache_t *qc;
|
|
|
|
vmem_addr_t addr;
|
|
|
|
int i;
|
|
|
|
|
2018-10-22 16:16:42 +00:00
|
|
|
KASSERT((flags & M_WAITOK) == 0, ("blocking allocation"));
|
|
|
|
|
2013-06-28 03:51:20 +00:00
|
|
|
qc = arg;
|
|
|
|
for (i = 0; i < cnt; i++) {
|
|
|
|
if (vmem_xalloc(qc->qc_vmem, qc->qc_size, 0, 0, 0,
|
2018-10-22 16:16:42 +00:00
|
|
|
VMEM_ADDR_QCACHE_MIN, VMEM_ADDR_MAX, flags, &addr) != 0)
|
2013-06-28 03:51:20 +00:00
|
|
|
break;
|
|
|
|
store[i] = (void *)addr;
|
|
|
|
}
|
2018-10-22 16:16:42 +00:00
|
|
|
return (i);
|
2013-06-28 03:51:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Release memory from the UMA cache to the arena.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
qc_release(void *arg, void **store, int cnt)
|
|
|
|
{
|
|
|
|
qcache_t *qc;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
qc = arg;
|
|
|
|
for (i = 0; i < cnt; i++)
|
|
|
|
vmem_xfree(qc->qc_vmem, (vmem_addr_t)store[i], qc->qc_size);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
qc_init(vmem_t *vm, vmem_size_t qcache_max)
|
|
|
|
{
|
|
|
|
qcache_t *qc;
|
|
|
|
vmem_size_t size;
|
|
|
|
int qcache_idx_max;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
MPASS((qcache_max & vm->vm_quantum_mask) == 0);
|
|
|
|
qcache_idx_max = MIN(qcache_max >> vm->vm_quantum_shift,
|
|
|
|
VMEM_QCACHE_IDX_MAX);
|
|
|
|
vm->vm_qcache_max = qcache_idx_max << vm->vm_quantum_shift;
|
|
|
|
for (i = 0; i < qcache_idx_max; i++) {
|
|
|
|
qc = &vm->vm_qcache[i];
|
|
|
|
size = (i + 1) << vm->vm_quantum_shift;
|
|
|
|
snprintf(qc->qc_name, sizeof(qc->qc_name), "%s-%zu",
|
|
|
|
vm->vm_name, size);
|
|
|
|
qc->qc_vmem = vm;
|
|
|
|
qc->qc_size = size;
|
|
|
|
qc->qc_cache = uma_zcache_create(qc->qc_name, size,
|
2020-02-19 08:17:27 +00:00
|
|
|
NULL, NULL, NULL, NULL, qc_import, qc_release, qc, 0);
|
2013-06-28 03:51:20 +00:00
|
|
|
MPASS(qc->qc_cache);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
qc_destroy(vmem_t *vm)
|
|
|
|
{
|
|
|
|
int qcache_idx_max;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
qcache_idx_max = vm->vm_qcache_max >> vm->vm_quantum_shift;
|
|
|
|
for (i = 0; i < qcache_idx_max; i++)
|
|
|
|
uma_zdestroy(vm->vm_qcache[i].qc_cache);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
qc_drain(vmem_t *vm)
|
|
|
|
{
|
|
|
|
int qcache_idx_max;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
qcache_idx_max = vm->vm_qcache_max >> vm->vm_quantum_shift;
|
|
|
|
for (i = 0; i < qcache_idx_max; i++)
|
2019-09-01 22:22:43 +00:00
|
|
|
uma_zone_reclaim(vm->vm_qcache[i].qc_cache, UMA_RECLAIM_DRAIN);
|
2013-06-28 03:51:20 +00:00
|
|
|
}
|
|
|
|
|
2013-08-07 06:21:20 +00:00
|
|
|
#ifndef UMA_MD_SMALL_ALLOC
|
|
|
|
|
2017-09-06 20:28:18 +00:00
|
|
|
static struct mtx_padalign __exclusive_cache_line vmem_bt_lock;
|
2013-08-07 06:21:20 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* vmem_bt_alloc: Allocate a new page of boundary tags.
|
|
|
|
*
|
|
|
|
* On architectures with uma_small_alloc there is no recursion; no address
|
|
|
|
* space need be allocated to allocate boundary tags. For the others, we
|
|
|
|
* must handle recursion. Boundary tags are necessary to allocate new
|
|
|
|
* boundary tags.
|
|
|
|
*
|
|
|
|
* UMA guarantees that enough tags are held in reserve to allocate a new
|
|
|
|
* page of kva. We dip into this reserve by specifying M_USE_RESERVE only
|
|
|
|
* when allocating the page to hold new boundary tags. In this way the
|
|
|
|
* reserve is automatically filled by the allocation that uses the reserve.
|
|
|
|
*
|
|
|
|
* We still have to guarantee that the new tags are allocated atomically since
|
|
|
|
* many threads may try concurrently. The bt_lock provides this guarantee.
|
|
|
|
* We convert WAITOK allocations to NOWAIT and then handle the blocking here
|
|
|
|
* on failure. It's ok to return NULL for a WAITOK allocation as UMA will
|
|
|
|
* loop again after checking to see if we lost the race to allocate.
|
|
|
|
*
|
|
|
|
* There is a small race between vmem_bt_alloc() returning the page and the
|
|
|
|
* zone lock being acquired to add the page to the zone. For WAITOK
|
|
|
|
* allocations we just pause briefly. NOWAIT may experience a transient
|
|
|
|
* failure. To alleviate this we permit a small number of simultaneous
|
|
|
|
* fills to proceed concurrently so NOWAIT is less likely to fail unless
|
|
|
|
* we are really out of KVA.
|
|
|
|
*/
|
|
|
|
static void *
|
2018-01-12 23:25:05 +00:00
|
|
|
vmem_bt_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *pflag,
|
|
|
|
int wait)
|
2013-08-07 06:21:20 +00:00
|
|
|
{
|
|
|
|
vmem_addr_t addr;
|
|
|
|
|
2017-11-28 23:40:54 +00:00
|
|
|
*pflag = UMA_SLAB_KERNEL;
|
2013-08-07 06:21:20 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Single thread boundary tag allocation so that the address space
|
|
|
|
* and memory are added in one atomic operation.
|
|
|
|
*/
|
|
|
|
mtx_lock(&vmem_bt_lock);
|
2018-01-12 23:13:55 +00:00
|
|
|
if (vmem_xalloc(vm_dom[domain].vmd_kernel_arena, bytes, 0, 0, 0,
|
|
|
|
VMEM_ADDR_MIN, VMEM_ADDR_MAX,
|
|
|
|
M_NOWAIT | M_NOVM | M_USE_RESERVE | M_BESTFIT, &addr) == 0) {
|
|
|
|
if (kmem_back_domain(domain, kernel_object, addr, bytes,
|
2013-08-07 06:21:20 +00:00
|
|
|
M_NOWAIT | M_USE_RESERVE) == 0) {
|
|
|
|
mtx_unlock(&vmem_bt_lock);
|
|
|
|
return ((void *)addr);
|
|
|
|
}
|
2018-01-12 23:13:55 +00:00
|
|
|
vmem_xfree(vm_dom[domain].vmd_kernel_arena, addr, bytes);
|
2013-08-07 06:21:20 +00:00
|
|
|
mtx_unlock(&vmem_bt_lock);
|
|
|
|
/*
|
|
|
|
* Out of memory, not address space. This may not even be
|
|
|
|
* possible due to M_USE_RESERVE page allocation.
|
|
|
|
*/
|
|
|
|
if (wait & M_WAITOK)
|
2018-02-06 22:10:07 +00:00
|
|
|
vm_wait_domain(domain);
|
2013-08-07 06:21:20 +00:00
|
|
|
return (NULL);
|
|
|
|
}
|
|
|
|
mtx_unlock(&vmem_bt_lock);
|
|
|
|
/*
|
|
|
|
* We're either out of address space or lost a fill race.
|
|
|
|
*/
|
|
|
|
if (wait & M_WAITOK)
|
|
|
|
pause("btalloc", 1);
|
|
|
|
|
|
|
|
return (NULL);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2013-06-28 03:51:20 +00:00
|
|
|
void
|
|
|
|
vmem_startup(void)
|
|
|
|
{
|
|
|
|
|
|
|
|
mtx_init(&vmem_list_lock, "vmem list lock", NULL, MTX_DEF);
|
2018-01-12 23:13:55 +00:00
|
|
|
vmem_zone = uma_zcreate("vmem",
|
|
|
|
sizeof(struct vmem), NULL, NULL, NULL, NULL,
|
2020-02-19 08:17:27 +00:00
|
|
|
UMA_ALIGN_PTR, 0);
|
2013-06-28 03:51:20 +00:00
|
|
|
vmem_bt_zone = uma_zcreate("vmem btag",
|
|
|
|
sizeof(struct vmem_btag), NULL, NULL, NULL, NULL,
|
2020-02-19 08:17:27 +00:00
|
|
|
UMA_ALIGN_PTR, UMA_ZONE_VM);
|
2020-12-01 16:06:31 +00:00
|
|
|
#ifndef UMA_MD_SMALL_ALLOC
|
2013-08-07 06:21:20 +00:00
|
|
|
mtx_init(&vmem_bt_lock, "btag lock", NULL, MTX_DEF);
|
|
|
|
uma_prealloc(vmem_bt_zone, BT_MAXALLOC);
|
|
|
|
/*
|
|
|
|
* Reserve enough tags to allocate new tags. We allow multiple
|
|
|
|
* CPUs to attempt to allocate new tags concurrently to limit
|
2019-02-25 19:22:13 +00:00
|
|
|
* false restarts in UMA. vmem_bt_alloc() allocates from a per-domain
|
|
|
|
* arena, which may involve importing a range from the kernel arena,
|
|
|
|
* so we need to keep at least 2 * BT_MAXALLOC tags reserved.
|
2013-08-07 06:21:20 +00:00
|
|
|
*/
|
2019-02-25 19:22:13 +00:00
|
|
|
uma_zone_reserve(vmem_bt_zone, 2 * BT_MAXALLOC * mp_ncpus);
|
2013-08-07 06:21:20 +00:00
|
|
|
uma_zone_set_allocf(vmem_bt_zone, vmem_bt_alloc);
|
|
|
|
#endif
|
2013-06-28 03:51:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* ---- rehash */
|
|
|
|
|
|
|
|
static int
|
|
|
|
vmem_rehash(vmem_t *vm, vmem_size_t newhashsize)
|
|
|
|
{
|
|
|
|
bt_t *bt;
|
|
|
|
struct vmem_hashlist *newhashlist;
|
|
|
|
struct vmem_hashlist *oldhashlist;
|
2020-11-17 02:18:34 +00:00
|
|
|
vmem_size_t i, oldhashsize;
|
2013-06-28 03:51:20 +00:00
|
|
|
|
|
|
|
MPASS(newhashsize > 0);
|
|
|
|
|
2018-01-21 15:42:36 +00:00
|
|
|
newhashlist = malloc(sizeof(struct vmem_hashlist) * newhashsize,
|
2013-06-28 03:51:20 +00:00
|
|
|
M_VMEM, M_NOWAIT);
|
|
|
|
if (newhashlist == NULL)
|
|
|
|
return ENOMEM;
|
|
|
|
for (i = 0; i < newhashsize; i++) {
|
|
|
|
LIST_INIT(&newhashlist[i]);
|
|
|
|
}
|
|
|
|
|
|
|
|
VMEM_LOCK(vm);
|
|
|
|
oldhashlist = vm->vm_hashlist;
|
|
|
|
oldhashsize = vm->vm_hashsize;
|
|
|
|
vm->vm_hashlist = newhashlist;
|
|
|
|
vm->vm_hashsize = newhashsize;
|
|
|
|
if (oldhashlist == NULL) {
|
|
|
|
VMEM_UNLOCK(vm);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
for (i = 0; i < oldhashsize; i++) {
|
|
|
|
while ((bt = LIST_FIRST(&oldhashlist[i])) != NULL) {
|
|
|
|
bt_rembusy(vm, bt);
|
|
|
|
bt_insbusy(vm, bt);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
VMEM_UNLOCK(vm);
|
|
|
|
|
2020-11-17 02:18:34 +00:00
|
|
|
if (oldhashlist != vm->vm_hash0)
|
2013-06-28 03:51:20 +00:00
|
|
|
free(oldhashlist, M_VMEM);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
vmem_periodic_kick(void *dummy)
|
|
|
|
{
|
|
|
|
|
|
|
|
taskqueue_enqueue(taskqueue_thread, &vmem_periodic_wk);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
vmem_periodic(void *unused, int pending)
|
|
|
|
{
|
|
|
|
vmem_t *vm;
|
|
|
|
vmem_size_t desired;
|
|
|
|
vmem_size_t current;
|
|
|
|
|
|
|
|
mtx_lock(&vmem_list_lock);
|
|
|
|
LIST_FOREACH(vm, &vmem_list, vm_alllist) {
|
|
|
|
#ifdef DIAGNOSTIC
|
|
|
|
/* Convenient time to verify vmem state. */
|
2013-08-20 11:06:56 +00:00
|
|
|
if (enable_vmem_check == 1) {
|
|
|
|
VMEM_LOCK(vm);
|
|
|
|
vmem_check(vm);
|
|
|
|
VMEM_UNLOCK(vm);
|
|
|
|
}
|
2013-06-28 03:51:20 +00:00
|
|
|
#endif
|
|
|
|
desired = 1 << flsl(vm->vm_nbusytag);
|
|
|
|
desired = MIN(MAX(desired, VMEM_HASHSIZE_MIN),
|
|
|
|
VMEM_HASHSIZE_MAX);
|
|
|
|
current = vm->vm_hashsize;
|
|
|
|
|
|
|
|
/* Grow in powers of two. Shrink less aggressively. */
|
|
|
|
if (desired >= current * 2 || desired * 4 <= current)
|
|
|
|
vmem_rehash(vm, desired);
|
2015-03-30 13:30:53 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Periodically wake up threads waiting for resources,
|
|
|
|
* so they could ask for reclamation again.
|
|
|
|
*/
|
|
|
|
VMEM_CONDVAR_BROADCAST(vm);
|
2013-06-28 03:51:20 +00:00
|
|
|
}
|
|
|
|
mtx_unlock(&vmem_list_lock);
|
|
|
|
|
|
|
|
callout_reset(&vmem_periodic_ch, vmem_periodic_interval,
|
|
|
|
vmem_periodic_kick, NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
vmem_start_callout(void *unused)
|
|
|
|
{
|
|
|
|
|
|
|
|
TASK_INIT(&vmem_periodic_wk, 0, vmem_periodic, NULL);
|
|
|
|
vmem_periodic_interval = hz * 10;
|
2015-05-22 17:05:21 +00:00
|
|
|
callout_init(&vmem_periodic_ch, 1);
|
2013-06-28 03:51:20 +00:00
|
|
|
callout_reset(&vmem_periodic_ch, vmem_periodic_interval,
|
|
|
|
vmem_periodic_kick, NULL);
|
|
|
|
}
|
|
|
|
SYSINIT(vfs, SI_SUB_CONFIGURE, SI_ORDER_ANY, vmem_start_callout, NULL);
|
|
|
|
|
|
|
|
static void
|
2013-07-24 08:02:56 +00:00
|
|
|
vmem_add1(vmem_t *vm, vmem_addr_t addr, vmem_size_t size, int type)
|
2013-06-28 03:51:20 +00:00
|
|
|
{
|
2020-08-26 14:31:35 +00:00
|
|
|
bt_t *btfree, *btprev, *btspan;
|
2013-06-28 03:51:20 +00:00
|
|
|
|
2020-08-26 14:31:35 +00:00
|
|
|
VMEM_ASSERT_LOCKED(vm);
|
2013-06-28 03:51:20 +00:00
|
|
|
MPASS(type == BT_TYPE_SPAN || type == BT_TYPE_SPAN_STATIC);
|
2013-08-19 23:02:39 +00:00
|
|
|
MPASS((size & vm->vm_quantum_mask) == 0);
|
2013-06-28 03:51:20 +00:00
|
|
|
|
2020-08-26 14:31:35 +00:00
|
|
|
if (vm->vm_releasefn == NULL) {
|
|
|
|
/*
|
|
|
|
* The new segment will never be released, so see if it is
|
|
|
|
* contiguous with respect to an existing segment. In this case
|
|
|
|
* a span tag is not needed, and it may be possible now or in
|
|
|
|
* the future to coalesce the new segment with an existing free
|
|
|
|
* segment.
|
|
|
|
*/
|
|
|
|
btprev = TAILQ_LAST(&vm->vm_seglist, vmem_seglist);
|
|
|
|
if ((!bt_isbusy(btprev) && !bt_isfree(btprev)) ||
|
|
|
|
btprev->bt_start + btprev->bt_size != addr)
|
|
|
|
btprev = NULL;
|
|
|
|
} else {
|
|
|
|
btprev = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (btprev == NULL || bt_isbusy(btprev)) {
|
|
|
|
if (btprev == NULL) {
|
|
|
|
btspan = bt_alloc(vm);
|
|
|
|
btspan->bt_type = type;
|
|
|
|
btspan->bt_start = addr;
|
|
|
|
btspan->bt_size = size;
|
|
|
|
bt_insseg_tail(vm, btspan);
|
|
|
|
}
|
2013-06-28 03:51:20 +00:00
|
|
|
|
2020-08-26 14:31:35 +00:00
|
|
|
btfree = bt_alloc(vm);
|
|
|
|
btfree->bt_type = BT_TYPE_FREE;
|
|
|
|
btfree->bt_start = addr;
|
|
|
|
btfree->bt_size = size;
|
|
|
|
bt_insseg_tail(vm, btfree);
|
|
|
|
bt_insfree(vm, btfree);
|
|
|
|
} else {
|
|
|
|
bt_remfree(vm, btprev);
|
|
|
|
btprev->bt_size += size;
|
|
|
|
bt_insfree(vm, btprev);
|
|
|
|
}
|
2013-08-07 06:21:20 +00:00
|
|
|
|
2013-06-28 03:51:20 +00:00
|
|
|
vm->vm_size += size;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
vmem_destroy1(vmem_t *vm)
|
|
|
|
{
|
|
|
|
bt_t *bt;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Drain per-cpu quantum caches.
|
|
|
|
*/
|
|
|
|
qc_destroy(vm);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The vmem should now only contain empty segments.
|
|
|
|
*/
|
|
|
|
VMEM_LOCK(vm);
|
|
|
|
MPASS(vm->vm_nbusytag == 0);
|
|
|
|
|
2019-10-09 21:20:39 +00:00
|
|
|
TAILQ_REMOVE(&vm->vm_seglist, &vm->vm_cursor, bt_seglist);
|
2013-06-28 03:51:20 +00:00
|
|
|
while ((bt = TAILQ_FIRST(&vm->vm_seglist)) != NULL)
|
|
|
|
bt_remseg(vm, bt);
|
|
|
|
|
|
|
|
if (vm->vm_hashlist != NULL && vm->vm_hashlist != vm->vm_hash0)
|
|
|
|
free(vm->vm_hashlist, M_VMEM);
|
|
|
|
|
|
|
|
bt_freetrim(vm, 0);
|
|
|
|
|
|
|
|
VMEM_CONDVAR_DESTROY(vm);
|
|
|
|
VMEM_LOCK_DESTROY(vm);
|
2018-01-12 23:13:55 +00:00
|
|
|
uma_zfree(vmem_zone, vm);
|
2013-06-28 03:51:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2013-08-19 23:02:39 +00:00
|
|
|
vmem_import(vmem_t *vm, vmem_size_t size, vmem_size_t align, int flags)
|
2013-06-28 03:51:20 +00:00
|
|
|
{
|
|
|
|
vmem_addr_t addr;
|
|
|
|
int error;
|
|
|
|
|
|
|
|
if (vm->vm_importfn == NULL)
|
2017-11-28 23:40:54 +00:00
|
|
|
return (EINVAL);
|
2013-06-28 03:51:20 +00:00
|
|
|
|
2013-08-19 23:02:39 +00:00
|
|
|
/*
|
|
|
|
* To make sure we get a span that meets the alignment we double it
|
|
|
|
* and add the size to the tail. This slightly overestimates.
|
|
|
|
*/
|
|
|
|
if (align != vm->vm_quantum_mask + 1)
|
|
|
|
size = (align * 2) + size;
|
2013-06-28 03:51:20 +00:00
|
|
|
size = roundup(size, vm->vm_import_quantum);
|
|
|
|
|
2017-11-28 23:40:54 +00:00
|
|
|
if (vm->vm_limit != 0 && vm->vm_limit < vm->vm_size + size)
|
|
|
|
return (ENOMEM);
|
|
|
|
|
vmem: Allocate btags before looping in vmem_xalloc()
BT_MAXALLOC (4) is the number of boundary tags required to complete an
allocation in the worst case: two to clip a free segment, and two to
import from a parent arena. vmem_xalloc() preallocates four boundary
tags before attempting a search to simplify the segment allocation code.
It implements a loop that:
1) ensures that BT_MAXALLOC boundary tags are available,
2) attempts to find and clip a free segment satisfying the allocation
constraints, and failing that,
3) attempts to import a segment.
On !UMA_MD_SMALL_ALLOC platforms the btag zone has to handle recusion:
it needs boundary tags to allocate boundary tags. Thus we reserve
2 * BT_MAXALLOC * mp_ncpus tags for use when recursing: the factor of 2
is because there are two layers of vmem arenas, the per-domain arena and
global arena. For a single thread, 2 * BT_MAXALLOC tags should be
sufficient.
Because of the way the loop is structured, BT_MAXALLOC tags are not
sufficient. The first bt_fill() call may allocate BT_MAXALLOC tags,
then import a segment (consuming two tags), then attempt to top up the
preallocation before carving into the imported free segment, thus
requiring up to six tags in the worst case. Because we don't
preallocate that many, this bug can cause deadlocks in rare scenarios.
Fix the problem by moving the preallocation out the loop. This assumes
that only a single import is ever required to satisfy an allocation
request.
Thanks to manu, emaste and lwhsu for helping test debug patches.
Reported by: Jenkins (hardware CI lab)
Reviewed by: alc, kib, rlibby
MFC after: 2 weeks
Sponsored by: The FreeBSD Foundation
Differential Revision: https://reviews.freebsd.org/D26770
2020-10-19 16:54:06 +00:00
|
|
|
bt_save(vm);
|
2013-06-28 03:51:20 +00:00
|
|
|
VMEM_UNLOCK(vm);
|
|
|
|
error = (vm->vm_importfn)(vm->vm_arg, size, flags, &addr);
|
|
|
|
VMEM_LOCK(vm);
|
vmem: Allocate btags before looping in vmem_xalloc()
BT_MAXALLOC (4) is the number of boundary tags required to complete an
allocation in the worst case: two to clip a free segment, and two to
import from a parent arena. vmem_xalloc() preallocates four boundary
tags before attempting a search to simplify the segment allocation code.
It implements a loop that:
1) ensures that BT_MAXALLOC boundary tags are available,
2) attempts to find and clip a free segment satisfying the allocation
constraints, and failing that,
3) attempts to import a segment.
On !UMA_MD_SMALL_ALLOC platforms the btag zone has to handle recusion:
it needs boundary tags to allocate boundary tags. Thus we reserve
2 * BT_MAXALLOC * mp_ncpus tags for use when recursing: the factor of 2
is because there are two layers of vmem arenas, the per-domain arena and
global arena. For a single thread, 2 * BT_MAXALLOC tags should be
sufficient.
Because of the way the loop is structured, BT_MAXALLOC tags are not
sufficient. The first bt_fill() call may allocate BT_MAXALLOC tags,
then import a segment (consuming two tags), then attempt to top up the
preallocation before carving into the imported free segment, thus
requiring up to six tags in the worst case. Because we don't
preallocate that many, this bug can cause deadlocks in rare scenarios.
Fix the problem by moving the preallocation out the loop. This assumes
that only a single import is ever required to satisfy an allocation
request.
Thanks to manu, emaste and lwhsu for helping test debug patches.
Reported by: Jenkins (hardware CI lab)
Reviewed by: alc, kib, rlibby
MFC after: 2 weeks
Sponsored by: The FreeBSD Foundation
Differential Revision: https://reviews.freebsd.org/D26770
2020-10-19 16:54:06 +00:00
|
|
|
bt_restore(vm);
|
2013-06-28 03:51:20 +00:00
|
|
|
if (error)
|
2017-11-28 23:40:54 +00:00
|
|
|
return (ENOMEM);
|
2013-06-28 03:51:20 +00:00
|
|
|
|
2013-07-24 08:02:56 +00:00
|
|
|
vmem_add1(vm, addr, size, BT_TYPE_SPAN);
|
2013-06-28 03:51:20 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* vmem_fit: check if a bt can satisfy the given restrictions.
|
|
|
|
*
|
|
|
|
* it's a caller's responsibility to ensure the region is big enough
|
|
|
|
* before calling us.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
vmem_fit(const bt_t *bt, vmem_size_t size, vmem_size_t align,
|
|
|
|
vmem_size_t phase, vmem_size_t nocross, vmem_addr_t minaddr,
|
|
|
|
vmem_addr_t maxaddr, vmem_addr_t *addrp)
|
|
|
|
{
|
|
|
|
vmem_addr_t start;
|
|
|
|
vmem_addr_t end;
|
|
|
|
|
|
|
|
MPASS(size > 0);
|
|
|
|
MPASS(bt->bt_size >= size); /* caller's responsibility */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* XXX assumption: vmem_addr_t and vmem_size_t are
|
|
|
|
* unsigned integer of the same size.
|
|
|
|
*/
|
|
|
|
|
|
|
|
start = bt->bt_start;
|
|
|
|
if (start < minaddr) {
|
|
|
|
start = minaddr;
|
|
|
|
}
|
|
|
|
end = BT_END(bt);
|
|
|
|
if (end > maxaddr)
|
|
|
|
end = maxaddr;
|
|
|
|
if (start > end)
|
|
|
|
return (ENOMEM);
|
|
|
|
|
|
|
|
start = VMEM_ALIGNUP(start - phase, align) + phase;
|
|
|
|
if (start < bt->bt_start)
|
|
|
|
start += align;
|
|
|
|
if (VMEM_CROSS_P(start, start + size - 1, nocross)) {
|
|
|
|
MPASS(align < nocross);
|
|
|
|
start = VMEM_ALIGNUP(start - phase, nocross) + phase;
|
|
|
|
}
|
|
|
|
if (start <= end && end - start >= size - 1) {
|
|
|
|
MPASS((start & (align - 1)) == phase);
|
|
|
|
MPASS(!VMEM_CROSS_P(start, start + size - 1, nocross));
|
|
|
|
MPASS(minaddr <= start);
|
|
|
|
MPASS(maxaddr == 0 || start + size - 1 <= maxaddr);
|
|
|
|
MPASS(bt->bt_start <= start);
|
|
|
|
MPASS(BT_END(bt) - start >= size - 1);
|
|
|
|
*addrp = start;
|
|
|
|
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
return (ENOMEM);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* vmem_clip: Trim the boundary tag edges to the requested start and size.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
vmem_clip(vmem_t *vm, bt_t *bt, vmem_addr_t start, vmem_size_t size)
|
|
|
|
{
|
|
|
|
bt_t *btnew;
|
|
|
|
bt_t *btprev;
|
|
|
|
|
|
|
|
VMEM_ASSERT_LOCKED(vm);
|
|
|
|
MPASS(bt->bt_type == BT_TYPE_FREE);
|
|
|
|
MPASS(bt->bt_size >= size);
|
|
|
|
bt_remfree(vm, bt);
|
|
|
|
if (bt->bt_start != start) {
|
|
|
|
btprev = bt_alloc(vm);
|
|
|
|
btprev->bt_type = BT_TYPE_FREE;
|
|
|
|
btprev->bt_start = bt->bt_start;
|
|
|
|
btprev->bt_size = start - bt->bt_start;
|
|
|
|
bt->bt_start = start;
|
|
|
|
bt->bt_size -= btprev->bt_size;
|
|
|
|
bt_insfree(vm, btprev);
|
|
|
|
bt_insseg(vm, btprev,
|
|
|
|
TAILQ_PREV(bt, vmem_seglist, bt_seglist));
|
|
|
|
}
|
|
|
|
MPASS(bt->bt_start == start);
|
|
|
|
if (bt->bt_size != size && bt->bt_size - size > vm->vm_quantum_mask) {
|
|
|
|
/* split */
|
|
|
|
btnew = bt_alloc(vm);
|
|
|
|
btnew->bt_type = BT_TYPE_BUSY;
|
|
|
|
btnew->bt_start = bt->bt_start;
|
|
|
|
btnew->bt_size = size;
|
|
|
|
bt->bt_start = bt->bt_start + size;
|
|
|
|
bt->bt_size -= size;
|
|
|
|
bt_insfree(vm, bt);
|
|
|
|
bt_insseg(vm, btnew,
|
|
|
|
TAILQ_PREV(bt, vmem_seglist, bt_seglist));
|
|
|
|
bt_insbusy(vm, btnew);
|
|
|
|
bt = btnew;
|
|
|
|
} else {
|
|
|
|
bt->bt_type = BT_TYPE_BUSY;
|
|
|
|
bt_insbusy(vm, bt);
|
|
|
|
}
|
|
|
|
MPASS(bt->bt_size >= size);
|
|
|
|
}
|
|
|
|
|
2019-05-18 01:46:38 +00:00
|
|
|
static int
|
|
|
|
vmem_try_fetch(vmem_t *vm, const vmem_size_t size, vmem_size_t align, int flags)
|
|
|
|
{
|
|
|
|
vmem_size_t avail;
|
|
|
|
|
|
|
|
VMEM_ASSERT_LOCKED(vm);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* XXX it is possible to fail to meet xalloc constraints with the
|
|
|
|
* imported region. It is up to the user to specify the
|
|
|
|
* import quantum such that it can satisfy any allocation.
|
|
|
|
*/
|
|
|
|
if (vmem_import(vm, size, align, flags) == 0)
|
|
|
|
return (1);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Try to free some space from the quantum cache or reclaim
|
|
|
|
* functions if available.
|
|
|
|
*/
|
|
|
|
if (vm->vm_qcache_max != 0 || vm->vm_reclaimfn != NULL) {
|
|
|
|
avail = vm->vm_size - vm->vm_inuse;
|
vmem: Allocate btags before looping in vmem_xalloc()
BT_MAXALLOC (4) is the number of boundary tags required to complete an
allocation in the worst case: two to clip a free segment, and two to
import from a parent arena. vmem_xalloc() preallocates four boundary
tags before attempting a search to simplify the segment allocation code.
It implements a loop that:
1) ensures that BT_MAXALLOC boundary tags are available,
2) attempts to find and clip a free segment satisfying the allocation
constraints, and failing that,
3) attempts to import a segment.
On !UMA_MD_SMALL_ALLOC platforms the btag zone has to handle recusion:
it needs boundary tags to allocate boundary tags. Thus we reserve
2 * BT_MAXALLOC * mp_ncpus tags for use when recursing: the factor of 2
is because there are two layers of vmem arenas, the per-domain arena and
global arena. For a single thread, 2 * BT_MAXALLOC tags should be
sufficient.
Because of the way the loop is structured, BT_MAXALLOC tags are not
sufficient. The first bt_fill() call may allocate BT_MAXALLOC tags,
then import a segment (consuming two tags), then attempt to top up the
preallocation before carving into the imported free segment, thus
requiring up to six tags in the worst case. Because we don't
preallocate that many, this bug can cause deadlocks in rare scenarios.
Fix the problem by moving the preallocation out the loop. This assumes
that only a single import is ever required to satisfy an allocation
request.
Thanks to manu, emaste and lwhsu for helping test debug patches.
Reported by: Jenkins (hardware CI lab)
Reviewed by: alc, kib, rlibby
MFC after: 2 weeks
Sponsored by: The FreeBSD Foundation
Differential Revision: https://reviews.freebsd.org/D26770
2020-10-19 16:54:06 +00:00
|
|
|
bt_save(vm);
|
2019-05-18 01:46:38 +00:00
|
|
|
VMEM_UNLOCK(vm);
|
|
|
|
if (vm->vm_qcache_max != 0)
|
|
|
|
qc_drain(vm);
|
|
|
|
if (vm->vm_reclaimfn != NULL)
|
|
|
|
vm->vm_reclaimfn(vm, flags);
|
|
|
|
VMEM_LOCK(vm);
|
vmem: Allocate btags before looping in vmem_xalloc()
BT_MAXALLOC (4) is the number of boundary tags required to complete an
allocation in the worst case: two to clip a free segment, and two to
import from a parent arena. vmem_xalloc() preallocates four boundary
tags before attempting a search to simplify the segment allocation code.
It implements a loop that:
1) ensures that BT_MAXALLOC boundary tags are available,
2) attempts to find and clip a free segment satisfying the allocation
constraints, and failing that,
3) attempts to import a segment.
On !UMA_MD_SMALL_ALLOC platforms the btag zone has to handle recusion:
it needs boundary tags to allocate boundary tags. Thus we reserve
2 * BT_MAXALLOC * mp_ncpus tags for use when recursing: the factor of 2
is because there are two layers of vmem arenas, the per-domain arena and
global arena. For a single thread, 2 * BT_MAXALLOC tags should be
sufficient.
Because of the way the loop is structured, BT_MAXALLOC tags are not
sufficient. The first bt_fill() call may allocate BT_MAXALLOC tags,
then import a segment (consuming two tags), then attempt to top up the
preallocation before carving into the imported free segment, thus
requiring up to six tags in the worst case. Because we don't
preallocate that many, this bug can cause deadlocks in rare scenarios.
Fix the problem by moving the preallocation out the loop. This assumes
that only a single import is ever required to satisfy an allocation
request.
Thanks to manu, emaste and lwhsu for helping test debug patches.
Reported by: Jenkins (hardware CI lab)
Reviewed by: alc, kib, rlibby
MFC after: 2 weeks
Sponsored by: The FreeBSD Foundation
Differential Revision: https://reviews.freebsd.org/D26770
2020-10-19 16:54:06 +00:00
|
|
|
bt_restore(vm);
|
2019-05-18 01:46:38 +00:00
|
|
|
/* If we were successful retry even NOWAIT. */
|
|
|
|
if (vm->vm_size - vm->vm_inuse > avail)
|
|
|
|
return (1);
|
|
|
|
}
|
|
|
|
if ((flags & M_NOWAIT) != 0)
|
|
|
|
return (0);
|
vmem: Allocate btags before looping in vmem_xalloc()
BT_MAXALLOC (4) is the number of boundary tags required to complete an
allocation in the worst case: two to clip a free segment, and two to
import from a parent arena. vmem_xalloc() preallocates four boundary
tags before attempting a search to simplify the segment allocation code.
It implements a loop that:
1) ensures that BT_MAXALLOC boundary tags are available,
2) attempts to find and clip a free segment satisfying the allocation
constraints, and failing that,
3) attempts to import a segment.
On !UMA_MD_SMALL_ALLOC platforms the btag zone has to handle recusion:
it needs boundary tags to allocate boundary tags. Thus we reserve
2 * BT_MAXALLOC * mp_ncpus tags for use when recursing: the factor of 2
is because there are two layers of vmem arenas, the per-domain arena and
global arena. For a single thread, 2 * BT_MAXALLOC tags should be
sufficient.
Because of the way the loop is structured, BT_MAXALLOC tags are not
sufficient. The first bt_fill() call may allocate BT_MAXALLOC tags,
then import a segment (consuming two tags), then attempt to top up the
preallocation before carving into the imported free segment, thus
requiring up to six tags in the worst case. Because we don't
preallocate that many, this bug can cause deadlocks in rare scenarios.
Fix the problem by moving the preallocation out the loop. This assumes
that only a single import is ever required to satisfy an allocation
request.
Thanks to manu, emaste and lwhsu for helping test debug patches.
Reported by: Jenkins (hardware CI lab)
Reviewed by: alc, kib, rlibby
MFC after: 2 weeks
Sponsored by: The FreeBSD Foundation
Differential Revision: https://reviews.freebsd.org/D26770
2020-10-19 16:54:06 +00:00
|
|
|
bt_save(vm);
|
2019-05-18 01:46:38 +00:00
|
|
|
VMEM_CONDVAR_WAIT(vm);
|
vmem: Allocate btags before looping in vmem_xalloc()
BT_MAXALLOC (4) is the number of boundary tags required to complete an
allocation in the worst case: two to clip a free segment, and two to
import from a parent arena. vmem_xalloc() preallocates four boundary
tags before attempting a search to simplify the segment allocation code.
It implements a loop that:
1) ensures that BT_MAXALLOC boundary tags are available,
2) attempts to find and clip a free segment satisfying the allocation
constraints, and failing that,
3) attempts to import a segment.
On !UMA_MD_SMALL_ALLOC platforms the btag zone has to handle recusion:
it needs boundary tags to allocate boundary tags. Thus we reserve
2 * BT_MAXALLOC * mp_ncpus tags for use when recursing: the factor of 2
is because there are two layers of vmem arenas, the per-domain arena and
global arena. For a single thread, 2 * BT_MAXALLOC tags should be
sufficient.
Because of the way the loop is structured, BT_MAXALLOC tags are not
sufficient. The first bt_fill() call may allocate BT_MAXALLOC tags,
then import a segment (consuming two tags), then attempt to top up the
preallocation before carving into the imported free segment, thus
requiring up to six tags in the worst case. Because we don't
preallocate that many, this bug can cause deadlocks in rare scenarios.
Fix the problem by moving the preallocation out the loop. This assumes
that only a single import is ever required to satisfy an allocation
request.
Thanks to manu, emaste and lwhsu for helping test debug patches.
Reported by: Jenkins (hardware CI lab)
Reviewed by: alc, kib, rlibby
MFC after: 2 weeks
Sponsored by: The FreeBSD Foundation
Differential Revision: https://reviews.freebsd.org/D26770
2020-10-19 16:54:06 +00:00
|
|
|
bt_restore(vm);
|
2019-05-18 01:46:38 +00:00
|
|
|
return (1);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
vmem_try_release(vmem_t *vm, struct vmem_btag *bt, const bool remfree)
|
|
|
|
{
|
|
|
|
struct vmem_btag *prev;
|
|
|
|
|
|
|
|
MPASS(bt->bt_type == BT_TYPE_FREE);
|
|
|
|
|
|
|
|
if (vm->vm_releasefn == NULL)
|
|
|
|
return (0);
|
|
|
|
|
|
|
|
prev = TAILQ_PREV(bt, vmem_seglist, bt_seglist);
|
|
|
|
MPASS(prev != NULL);
|
|
|
|
MPASS(prev->bt_type != BT_TYPE_FREE);
|
|
|
|
|
|
|
|
if (prev->bt_type == BT_TYPE_SPAN && prev->bt_size == bt->bt_size) {
|
|
|
|
vmem_addr_t spanaddr;
|
|
|
|
vmem_size_t spansize;
|
|
|
|
|
|
|
|
MPASS(prev->bt_start == bt->bt_start);
|
|
|
|
spanaddr = prev->bt_start;
|
|
|
|
spansize = prev->bt_size;
|
|
|
|
if (remfree)
|
|
|
|
bt_remfree(vm, bt);
|
|
|
|
bt_remseg(vm, bt);
|
|
|
|
bt_remseg(vm, prev);
|
|
|
|
vm->vm_size -= spansize;
|
|
|
|
VMEM_CONDVAR_BROADCAST(vm);
|
|
|
|
bt_freetrim(vm, BT_MAXFREE);
|
|
|
|
vm->vm_releasefn(vm->vm_arg, spanaddr, spansize);
|
|
|
|
return (1);
|
|
|
|
}
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
vmem_xalloc_nextfit(vmem_t *vm, const vmem_size_t size, vmem_size_t align,
|
|
|
|
const vmem_size_t phase, const vmem_size_t nocross, int flags,
|
|
|
|
vmem_addr_t *addrp)
|
|
|
|
{
|
|
|
|
struct vmem_btag *bt, *cursor, *next, *prev;
|
|
|
|
int error;
|
|
|
|
|
|
|
|
error = ENOMEM;
|
|
|
|
VMEM_LOCK(vm);
|
vmem: Allocate btags before looping in vmem_xalloc()
BT_MAXALLOC (4) is the number of boundary tags required to complete an
allocation in the worst case: two to clip a free segment, and two to
import from a parent arena. vmem_xalloc() preallocates four boundary
tags before attempting a search to simplify the segment allocation code.
It implements a loop that:
1) ensures that BT_MAXALLOC boundary tags are available,
2) attempts to find and clip a free segment satisfying the allocation
constraints, and failing that,
3) attempts to import a segment.
On !UMA_MD_SMALL_ALLOC platforms the btag zone has to handle recusion:
it needs boundary tags to allocate boundary tags. Thus we reserve
2 * BT_MAXALLOC * mp_ncpus tags for use when recursing: the factor of 2
is because there are two layers of vmem arenas, the per-domain arena and
global arena. For a single thread, 2 * BT_MAXALLOC tags should be
sufficient.
Because of the way the loop is structured, BT_MAXALLOC tags are not
sufficient. The first bt_fill() call may allocate BT_MAXALLOC tags,
then import a segment (consuming two tags), then attempt to top up the
preallocation before carving into the imported free segment, thus
requiring up to six tags in the worst case. Because we don't
preallocate that many, this bug can cause deadlocks in rare scenarios.
Fix the problem by moving the preallocation out the loop. This assumes
that only a single import is ever required to satisfy an allocation
request.
Thanks to manu, emaste and lwhsu for helping test debug patches.
Reported by: Jenkins (hardware CI lab)
Reviewed by: alc, kib, rlibby
MFC after: 2 weeks
Sponsored by: The FreeBSD Foundation
Differential Revision: https://reviews.freebsd.org/D26770
2020-10-19 16:54:06 +00:00
|
|
|
|
2019-05-18 01:46:38 +00:00
|
|
|
/*
|
|
|
|
* Make sure we have enough tags to complete the operation.
|
|
|
|
*/
|
2020-10-19 16:52:27 +00:00
|
|
|
if (bt_fill(vm, flags) != 0)
|
2019-05-18 01:46:38 +00:00
|
|
|
goto out;
|
|
|
|
|
vmem: Allocate btags before looping in vmem_xalloc()
BT_MAXALLOC (4) is the number of boundary tags required to complete an
allocation in the worst case: two to clip a free segment, and two to
import from a parent arena. vmem_xalloc() preallocates four boundary
tags before attempting a search to simplify the segment allocation code.
It implements a loop that:
1) ensures that BT_MAXALLOC boundary tags are available,
2) attempts to find and clip a free segment satisfying the allocation
constraints, and failing that,
3) attempts to import a segment.
On !UMA_MD_SMALL_ALLOC platforms the btag zone has to handle recusion:
it needs boundary tags to allocate boundary tags. Thus we reserve
2 * BT_MAXALLOC * mp_ncpus tags for use when recursing: the factor of 2
is because there are two layers of vmem arenas, the per-domain arena and
global arena. For a single thread, 2 * BT_MAXALLOC tags should be
sufficient.
Because of the way the loop is structured, BT_MAXALLOC tags are not
sufficient. The first bt_fill() call may allocate BT_MAXALLOC tags,
then import a segment (consuming two tags), then attempt to top up the
preallocation before carving into the imported free segment, thus
requiring up to six tags in the worst case. Because we don't
preallocate that many, this bug can cause deadlocks in rare scenarios.
Fix the problem by moving the preallocation out the loop. This assumes
that only a single import is ever required to satisfy an allocation
request.
Thanks to manu, emaste and lwhsu for helping test debug patches.
Reported by: Jenkins (hardware CI lab)
Reviewed by: alc, kib, rlibby
MFC after: 2 weeks
Sponsored by: The FreeBSD Foundation
Differential Revision: https://reviews.freebsd.org/D26770
2020-10-19 16:54:06 +00:00
|
|
|
retry:
|
2019-05-18 01:46:38 +00:00
|
|
|
/*
|
|
|
|
* Find the next free tag meeting our constraints. If one is found,
|
|
|
|
* perform the allocation.
|
|
|
|
*/
|
|
|
|
for (cursor = &vm->vm_cursor, bt = TAILQ_NEXT(cursor, bt_seglist);
|
|
|
|
bt != cursor; bt = TAILQ_NEXT(bt, bt_seglist)) {
|
|
|
|
if (bt == NULL)
|
|
|
|
bt = TAILQ_FIRST(&vm->vm_seglist);
|
|
|
|
if (bt->bt_type == BT_TYPE_FREE && bt->bt_size >= size &&
|
|
|
|
(error = vmem_fit(bt, size, align, phase, nocross,
|
|
|
|
VMEM_ADDR_MIN, VMEM_ADDR_MAX, addrp)) == 0) {
|
|
|
|
vmem_clip(vm, bt, *addrp, size);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Try to coalesce free segments around the cursor. If we succeed, and
|
|
|
|
* have not yet satisfied the allocation request, try again with the
|
|
|
|
* newly coalesced segment.
|
|
|
|
*/
|
|
|
|
if ((next = TAILQ_NEXT(cursor, bt_seglist)) != NULL &&
|
|
|
|
(prev = TAILQ_PREV(cursor, vmem_seglist, bt_seglist)) != NULL &&
|
|
|
|
next->bt_type == BT_TYPE_FREE && prev->bt_type == BT_TYPE_FREE &&
|
|
|
|
prev->bt_start + prev->bt_size == next->bt_start) {
|
|
|
|
prev->bt_size += next->bt_size;
|
|
|
|
bt_remfree(vm, next);
|
|
|
|
bt_remseg(vm, next);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The coalesced segment might be able to satisfy our request.
|
|
|
|
* If not, we might need to release it from the arena.
|
|
|
|
*/
|
|
|
|
if (error == ENOMEM && prev->bt_size >= size &&
|
|
|
|
(error = vmem_fit(prev, size, align, phase, nocross,
|
|
|
|
VMEM_ADDR_MIN, VMEM_ADDR_MAX, addrp)) == 0) {
|
|
|
|
vmem_clip(vm, prev, *addrp, size);
|
|
|
|
bt = prev;
|
|
|
|
} else
|
|
|
|
(void)vmem_try_release(vm, prev, true);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If the allocation was successful, advance the cursor.
|
|
|
|
*/
|
|
|
|
if (error == 0) {
|
|
|
|
TAILQ_REMOVE(&vm->vm_seglist, cursor, bt_seglist);
|
|
|
|
for (; bt != NULL && bt->bt_start < *addrp + size;
|
|
|
|
bt = TAILQ_NEXT(bt, bt_seglist))
|
|
|
|
;
|
|
|
|
if (bt != NULL)
|
|
|
|
TAILQ_INSERT_BEFORE(bt, cursor, bt_seglist);
|
|
|
|
else
|
|
|
|
TAILQ_INSERT_HEAD(&vm->vm_seglist, cursor, bt_seglist);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Attempt to bring additional resources into the arena. If that fails
|
|
|
|
* and M_WAITOK is specified, sleep waiting for resources to be freed.
|
|
|
|
*/
|
|
|
|
if (error == ENOMEM && vmem_try_fetch(vm, size, align, flags))
|
|
|
|
goto retry;
|
|
|
|
|
|
|
|
out:
|
|
|
|
VMEM_UNLOCK(vm);
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
2013-06-28 03:51:20 +00:00
|
|
|
/* ---- vmem API */
|
|
|
|
|
|
|
|
void
|
|
|
|
vmem_set_import(vmem_t *vm, vmem_import_t *importfn,
|
|
|
|
vmem_release_t *releasefn, void *arg, vmem_size_t import_quantum)
|
|
|
|
{
|
|
|
|
|
|
|
|
VMEM_LOCK(vm);
|
2020-08-26 14:31:35 +00:00
|
|
|
KASSERT(vm->vm_size == 0, ("%s: arena is non-empty", __func__));
|
2013-06-28 03:51:20 +00:00
|
|
|
vm->vm_importfn = importfn;
|
|
|
|
vm->vm_releasefn = releasefn;
|
|
|
|
vm->vm_arg = arg;
|
|
|
|
vm->vm_import_quantum = import_quantum;
|
|
|
|
VMEM_UNLOCK(vm);
|
|
|
|
}
|
|
|
|
|
2017-11-28 23:40:54 +00:00
|
|
|
void
|
|
|
|
vmem_set_limit(vmem_t *vm, vmem_size_t limit)
|
|
|
|
{
|
|
|
|
|
|
|
|
VMEM_LOCK(vm);
|
|
|
|
vm->vm_limit = limit;
|
|
|
|
VMEM_UNLOCK(vm);
|
|
|
|
}
|
|
|
|
|
2013-06-28 03:51:20 +00:00
|
|
|
void
|
|
|
|
vmem_set_reclaim(vmem_t *vm, vmem_reclaim_t *reclaimfn)
|
|
|
|
{
|
|
|
|
|
|
|
|
VMEM_LOCK(vm);
|
|
|
|
vm->vm_reclaimfn = reclaimfn;
|
|
|
|
VMEM_UNLOCK(vm);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* vmem_init: Initializes vmem arena.
|
|
|
|
*/
|
|
|
|
vmem_t *
|
|
|
|
vmem_init(vmem_t *vm, const char *name, vmem_addr_t base, vmem_size_t size,
|
|
|
|
vmem_size_t quantum, vmem_size_t qcache_max, int flags)
|
|
|
|
{
|
2020-11-17 02:18:34 +00:00
|
|
|
vmem_size_t i;
|
2013-06-28 03:51:20 +00:00
|
|
|
|
|
|
|
MPASS(quantum > 0);
|
2013-12-11 21:48:04 +00:00
|
|
|
MPASS((quantum & (quantum - 1)) == 0);
|
2013-06-28 03:51:20 +00:00
|
|
|
|
|
|
|
bzero(vm, sizeof(*vm));
|
|
|
|
|
|
|
|
VMEM_CONDVAR_INIT(vm, name);
|
|
|
|
VMEM_LOCK_INIT(vm, name);
|
|
|
|
vm->vm_nfreetags = 0;
|
|
|
|
LIST_INIT(&vm->vm_freetags);
|
|
|
|
strlcpy(vm->vm_name, name, sizeof(vm->vm_name));
|
|
|
|
vm->vm_quantum_mask = quantum - 1;
|
2013-12-11 21:48:04 +00:00
|
|
|
vm->vm_quantum_shift = flsl(quantum) - 1;
|
2013-06-28 03:51:20 +00:00
|
|
|
vm->vm_nbusytag = 0;
|
|
|
|
vm->vm_size = 0;
|
2017-11-28 23:40:54 +00:00
|
|
|
vm->vm_limit = 0;
|
2013-06-28 03:51:20 +00:00
|
|
|
vm->vm_inuse = 0;
|
|
|
|
qc_init(vm, qcache_max);
|
|
|
|
|
|
|
|
TAILQ_INIT(&vm->vm_seglist);
|
2019-05-18 01:46:38 +00:00
|
|
|
vm->vm_cursor.bt_start = vm->vm_cursor.bt_size = 0;
|
|
|
|
vm->vm_cursor.bt_type = BT_TYPE_CURSOR;
|
|
|
|
TAILQ_INSERT_TAIL(&vm->vm_seglist, &vm->vm_cursor, bt_seglist);
|
|
|
|
|
|
|
|
for (i = 0; i < VMEM_MAXORDER; i++)
|
2013-06-28 03:51:20 +00:00
|
|
|
LIST_INIT(&vm->vm_freelist[i]);
|
2019-05-18 01:46:38 +00:00
|
|
|
|
2013-06-28 03:51:20 +00:00
|
|
|
memset(&vm->vm_hash0, 0, sizeof(vm->vm_hash0));
|
|
|
|
vm->vm_hashsize = VMEM_HASHSIZE_MIN;
|
|
|
|
vm->vm_hashlist = vm->vm_hash0;
|
|
|
|
|
|
|
|
if (size != 0) {
|
|
|
|
if (vmem_add(vm, base, size, flags) != 0) {
|
|
|
|
vmem_destroy1(vm);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
mtx_lock(&vmem_list_lock);
|
|
|
|
LIST_INSERT_HEAD(&vmem_list, vm, vm_alllist);
|
|
|
|
mtx_unlock(&vmem_list_lock);
|
|
|
|
|
|
|
|
return vm;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* vmem_create: create an arena.
|
|
|
|
*/
|
|
|
|
vmem_t *
|
|
|
|
vmem_create(const char *name, vmem_addr_t base, vmem_size_t size,
|
|
|
|
vmem_size_t quantum, vmem_size_t qcache_max, int flags)
|
|
|
|
{
|
|
|
|
|
|
|
|
vmem_t *vm;
|
|
|
|
|
2018-01-12 23:13:55 +00:00
|
|
|
vm = uma_zalloc(vmem_zone, flags & (M_WAITOK|M_NOWAIT));
|
2013-06-28 03:51:20 +00:00
|
|
|
if (vm == NULL)
|
|
|
|
return (NULL);
|
|
|
|
if (vmem_init(vm, name, base, size, quantum, qcache_max,
|
2016-05-11 23:16:11 +00:00
|
|
|
flags) == NULL)
|
2013-06-28 03:51:20 +00:00
|
|
|
return (NULL);
|
|
|
|
return (vm);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
vmem_destroy(vmem_t *vm)
|
|
|
|
{
|
|
|
|
|
|
|
|
mtx_lock(&vmem_list_lock);
|
|
|
|
LIST_REMOVE(vm, vm_alllist);
|
|
|
|
mtx_unlock(&vmem_list_lock);
|
|
|
|
|
|
|
|
vmem_destroy1(vm);
|
|
|
|
}
|
|
|
|
|
|
|
|
vmem_size_t
|
|
|
|
vmem_roundup_size(vmem_t *vm, vmem_size_t size)
|
|
|
|
{
|
|
|
|
|
|
|
|
return (size + vm->vm_quantum_mask) & ~vm->vm_quantum_mask;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* vmem_alloc: allocate resource from the arena.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
vmem_alloc(vmem_t *vm, vmem_size_t size, int flags, vmem_addr_t *addrp)
|
|
|
|
{
|
|
|
|
const int strat __unused = flags & VMEM_FITMASK;
|
|
|
|
qcache_t *qc;
|
|
|
|
|
|
|
|
flags &= VMEM_FLAGS;
|
|
|
|
MPASS(size > 0);
|
2019-05-18 01:46:38 +00:00
|
|
|
MPASS(strat == M_BESTFIT || strat == M_FIRSTFIT || strat == M_NEXTFIT);
|
2013-06-28 03:51:20 +00:00
|
|
|
if ((flags & M_NOWAIT) == 0)
|
|
|
|
WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "vmem_alloc");
|
|
|
|
|
|
|
|
if (size <= vm->vm_qcache_max) {
|
2018-10-22 16:16:42 +00:00
|
|
|
/*
|
|
|
|
* Resource 0 cannot be cached, so avoid a blocking allocation
|
|
|
|
* in qc_import() and give the vmem_xalloc() call below a chance
|
|
|
|
* to return 0.
|
|
|
|
*/
|
2013-06-28 03:51:20 +00:00
|
|
|
qc = &vm->vm_qcache[(size - 1) >> vm->vm_quantum_shift];
|
2018-10-22 16:16:42 +00:00
|
|
|
*addrp = (vmem_addr_t)uma_zalloc(qc->qc_cache,
|
|
|
|
(flags & ~M_WAITOK) | M_NOWAIT);
|
|
|
|
if (__predict_true(*addrp != 0))
|
|
|
|
return (0);
|
2013-06-28 03:51:20 +00:00
|
|
|
}
|
|
|
|
|
2018-10-22 16:16:42 +00:00
|
|
|
return (vmem_xalloc(vm, size, 0, 0, 0, VMEM_ADDR_MIN, VMEM_ADDR_MAX,
|
|
|
|
flags, addrp));
|
2013-06-28 03:51:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
vmem_xalloc(vmem_t *vm, const vmem_size_t size0, vmem_size_t align,
|
|
|
|
const vmem_size_t phase, const vmem_size_t nocross,
|
|
|
|
const vmem_addr_t minaddr, const vmem_addr_t maxaddr, int flags,
|
|
|
|
vmem_addr_t *addrp)
|
|
|
|
{
|
|
|
|
const vmem_size_t size = vmem_roundup_size(vm, size0);
|
|
|
|
struct vmem_freelist *list;
|
|
|
|
struct vmem_freelist *first;
|
|
|
|
struct vmem_freelist *end;
|
|
|
|
bt_t *bt;
|
|
|
|
int error;
|
|
|
|
int strat;
|
|
|
|
|
|
|
|
flags &= VMEM_FLAGS;
|
|
|
|
strat = flags & VMEM_FITMASK;
|
|
|
|
MPASS(size0 > 0);
|
|
|
|
MPASS(size > 0);
|
2019-05-18 01:46:38 +00:00
|
|
|
MPASS(strat == M_BESTFIT || strat == M_FIRSTFIT || strat == M_NEXTFIT);
|
2013-06-28 03:51:20 +00:00
|
|
|
MPASS((flags & (M_NOWAIT|M_WAITOK)) != (M_NOWAIT|M_WAITOK));
|
|
|
|
if ((flags & M_NOWAIT) == 0)
|
|
|
|
WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "vmem_xalloc");
|
|
|
|
MPASS((align & vm->vm_quantum_mask) == 0);
|
|
|
|
MPASS((align & (align - 1)) == 0);
|
|
|
|
MPASS((phase & vm->vm_quantum_mask) == 0);
|
|
|
|
MPASS((nocross & vm->vm_quantum_mask) == 0);
|
|
|
|
MPASS((nocross & (nocross - 1)) == 0);
|
|
|
|
MPASS((align == 0 && phase == 0) || phase < align);
|
|
|
|
MPASS(nocross == 0 || nocross >= size);
|
|
|
|
MPASS(minaddr <= maxaddr);
|
|
|
|
MPASS(!VMEM_CROSS_P(phase, phase + size - 1, nocross));
|
2019-05-18 01:46:38 +00:00
|
|
|
if (strat == M_NEXTFIT)
|
|
|
|
MPASS(minaddr == VMEM_ADDR_MIN && maxaddr == VMEM_ADDR_MAX);
|
2013-06-28 03:51:20 +00:00
|
|
|
|
|
|
|
if (align == 0)
|
|
|
|
align = vm->vm_quantum_mask + 1;
|
|
|
|
*addrp = 0;
|
2019-05-18 01:46:38 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Next-fit allocations don't use the freelists.
|
|
|
|
*/
|
|
|
|
if (strat == M_NEXTFIT)
|
|
|
|
return (vmem_xalloc_nextfit(vm, size0, align, phase, nocross,
|
|
|
|
flags, addrp));
|
|
|
|
|
2013-06-28 03:51:20 +00:00
|
|
|
end = &vm->vm_freelist[VMEM_MAXORDER];
|
|
|
|
/*
|
|
|
|
* choose a free block from which we allocate.
|
|
|
|
*/
|
|
|
|
first = bt_freehead_toalloc(vm, size, strat);
|
|
|
|
VMEM_LOCK(vm);
|
2019-05-18 01:46:38 +00:00
|
|
|
|
vmem: Allocate btags before looping in vmem_xalloc()
BT_MAXALLOC (4) is the number of boundary tags required to complete an
allocation in the worst case: two to clip a free segment, and two to
import from a parent arena. vmem_xalloc() preallocates four boundary
tags before attempting a search to simplify the segment allocation code.
It implements a loop that:
1) ensures that BT_MAXALLOC boundary tags are available,
2) attempts to find and clip a free segment satisfying the allocation
constraints, and failing that,
3) attempts to import a segment.
On !UMA_MD_SMALL_ALLOC platforms the btag zone has to handle recusion:
it needs boundary tags to allocate boundary tags. Thus we reserve
2 * BT_MAXALLOC * mp_ncpus tags for use when recursing: the factor of 2
is because there are two layers of vmem arenas, the per-domain arena and
global arena. For a single thread, 2 * BT_MAXALLOC tags should be
sufficient.
Because of the way the loop is structured, BT_MAXALLOC tags are not
sufficient. The first bt_fill() call may allocate BT_MAXALLOC tags,
then import a segment (consuming two tags), then attempt to top up the
preallocation before carving into the imported free segment, thus
requiring up to six tags in the worst case. Because we don't
preallocate that many, this bug can cause deadlocks in rare scenarios.
Fix the problem by moving the preallocation out the loop. This assumes
that only a single import is ever required to satisfy an allocation
request.
Thanks to manu, emaste and lwhsu for helping test debug patches.
Reported by: Jenkins (hardware CI lab)
Reviewed by: alc, kib, rlibby
MFC after: 2 weeks
Sponsored by: The FreeBSD Foundation
Differential Revision: https://reviews.freebsd.org/D26770
2020-10-19 16:54:06 +00:00
|
|
|
/*
|
|
|
|
* Make sure we have enough tags to complete the operation.
|
|
|
|
*/
|
|
|
|
error = bt_fill(vm, flags);
|
|
|
|
if (error != 0)
|
|
|
|
goto out;
|
|
|
|
for (;;) {
|
2013-06-28 03:51:20 +00:00
|
|
|
/*
|
|
|
|
* Scan freelists looking for a tag that satisfies the
|
|
|
|
* allocation. If we're doing BESTFIT we may encounter
|
|
|
|
* sizes below the request. If we're doing FIRSTFIT we
|
|
|
|
* inspect only the first element from each list.
|
|
|
|
*/
|
|
|
|
for (list = first; list < end; list++) {
|
|
|
|
LIST_FOREACH(bt, list, bt_freelist) {
|
|
|
|
if (bt->bt_size >= size) {
|
|
|
|
error = vmem_fit(bt, size, align, phase,
|
|
|
|
nocross, minaddr, maxaddr, addrp);
|
|
|
|
if (error == 0) {
|
|
|
|
vmem_clip(vm, bt, *addrp, size);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
/* FIRST skips to the next list. */
|
|
|
|
if (strat == M_FIRSTFIT)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2019-05-18 01:46:38 +00:00
|
|
|
|
2013-06-28 03:51:20 +00:00
|
|
|
/*
|
|
|
|
* Retry if the fast algorithm failed.
|
|
|
|
*/
|
|
|
|
if (strat == M_FIRSTFIT) {
|
|
|
|
strat = M_BESTFIT;
|
|
|
|
first = bt_freehead_toalloc(vm, size, strat);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2019-05-18 01:46:38 +00:00
|
|
|
* Try a few measures to bring additional resources into the
|
|
|
|
* arena. If all else fails, we will sleep waiting for
|
|
|
|
* resources to be freed.
|
2013-06-28 03:51:20 +00:00
|
|
|
*/
|
2019-05-18 01:46:38 +00:00
|
|
|
if (!vmem_try_fetch(vm, size, align, flags)) {
|
2013-06-28 03:51:20 +00:00
|
|
|
error = ENOMEM;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
out:
|
|
|
|
VMEM_UNLOCK(vm);
|
|
|
|
if (error != 0 && (flags & M_NOWAIT) == 0)
|
|
|
|
panic("failed to allocate waiting allocation\n");
|
|
|
|
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* vmem_free: free the resource to the arena.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
vmem_free(vmem_t *vm, vmem_addr_t addr, vmem_size_t size)
|
|
|
|
{
|
|
|
|
qcache_t *qc;
|
|
|
|
MPASS(size > 0);
|
|
|
|
|
2018-10-22 16:16:42 +00:00
|
|
|
if (size <= vm->vm_qcache_max &&
|
|
|
|
__predict_true(addr >= VMEM_ADDR_QCACHE_MIN)) {
|
2013-06-28 03:51:20 +00:00
|
|
|
qc = &vm->vm_qcache[(size - 1) >> vm->vm_quantum_shift];
|
|
|
|
uma_zfree(qc->qc_cache, (void *)addr);
|
|
|
|
} else
|
|
|
|
vmem_xfree(vm, addr, size);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2020-11-17 02:18:34 +00:00
|
|
|
vmem_xfree(vmem_t *vm, vmem_addr_t addr, vmem_size_t size __unused)
|
2013-06-28 03:51:20 +00:00
|
|
|
{
|
|
|
|
bt_t *bt;
|
|
|
|
bt_t *t;
|
|
|
|
|
|
|
|
MPASS(size > 0);
|
|
|
|
|
|
|
|
VMEM_LOCK(vm);
|
|
|
|
bt = bt_lookupbusy(vm, addr);
|
|
|
|
MPASS(bt != NULL);
|
|
|
|
MPASS(bt->bt_start == addr);
|
|
|
|
MPASS(bt->bt_size == vmem_roundup_size(vm, size) ||
|
|
|
|
bt->bt_size - vmem_roundup_size(vm, size) <= vm->vm_quantum_mask);
|
|
|
|
MPASS(bt->bt_type == BT_TYPE_BUSY);
|
|
|
|
bt_rembusy(vm, bt);
|
|
|
|
bt->bt_type = BT_TYPE_FREE;
|
|
|
|
|
|
|
|
/* coalesce */
|
|
|
|
t = TAILQ_NEXT(bt, bt_seglist);
|
|
|
|
if (t != NULL && t->bt_type == BT_TYPE_FREE) {
|
|
|
|
MPASS(BT_END(bt) < t->bt_start); /* YYY */
|
|
|
|
bt->bt_size += t->bt_size;
|
|
|
|
bt_remfree(vm, t);
|
|
|
|
bt_remseg(vm, t);
|
|
|
|
}
|
|
|
|
t = TAILQ_PREV(bt, vmem_seglist, bt_seglist);
|
|
|
|
if (t != NULL && t->bt_type == BT_TYPE_FREE) {
|
|
|
|
MPASS(BT_END(t) < bt->bt_start); /* YYY */
|
|
|
|
bt->bt_size += t->bt_size;
|
|
|
|
bt->bt_start = t->bt_start;
|
|
|
|
bt_remfree(vm, t);
|
|
|
|
bt_remseg(vm, t);
|
|
|
|
}
|
|
|
|
|
2019-05-18 01:46:38 +00:00
|
|
|
if (!vmem_try_release(vm, bt, false)) {
|
2013-06-28 03:51:20 +00:00
|
|
|
bt_insfree(vm, bt);
|
|
|
|
VMEM_CONDVAR_BROADCAST(vm);
|
|
|
|
bt_freetrim(vm, BT_MAXFREE);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* vmem_add:
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
vmem_add(vmem_t *vm, vmem_addr_t addr, vmem_size_t size, int flags)
|
|
|
|
{
|
|
|
|
int error;
|
|
|
|
|
|
|
|
flags &= VMEM_FLAGS;
|
2020-10-19 16:52:27 +00:00
|
|
|
|
2013-06-28 03:51:20 +00:00
|
|
|
VMEM_LOCK(vm);
|
2020-10-19 16:52:27 +00:00
|
|
|
error = bt_fill(vm, flags);
|
|
|
|
if (error == 0)
|
2013-07-24 08:02:56 +00:00
|
|
|
vmem_add1(vm, addr, size, BT_TYPE_SPAN_STATIC);
|
2013-06-28 03:51:20 +00:00
|
|
|
VMEM_UNLOCK(vm);
|
|
|
|
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* vmem_size: information about arenas size
|
|
|
|
*/
|
|
|
|
vmem_size_t
|
|
|
|
vmem_size(vmem_t *vm, int typemask)
|
|
|
|
{
|
Make ZFS ARC track both KVA usage and fragmentation.
Even on Illumos, with its much larger KVA, ZFS ARC steps back if KVA usage
reaches certain threshold (3/4 on i386 or 16/17 otherwise). FreeBSD has
even less KVA, but had no such limit on archs with direct map as amd64.
As result, on machines with a lot of RAM, during load with very small user-
space memory pressure, such as `zfs send`, it was possible to reach state,
when there is enough both physical RAM and KVA (I've seen up to 25-30%),
but no continuous KVA range to allocate even single 128KB I/O request.
Address this situation from two sides:
- restore KVA usage limitations in a way the most close to Illumos;
- introduce new requirement for KVA fragmentation, specifying that we
should have at least one sequential KVA range of zfs_max_recordsize bytes.
Experiments show that first limitation done alone is not sufficient. On
machine with 64GB of RAM it is sometimes needed to drop up to half of ARC
size to get at leats one 1MB KVA chunk. Statically limiting ARC to half
of KVA/RAM is too strict, so second limitation makes it to work in cycles:
accumulate trash up to certain critical mass, do massive spring-cleaning,
and then start littering again. :)
MFC after: 1 month
2015-04-03 14:45:48 +00:00
|
|
|
int i;
|
2013-06-28 03:51:20 +00:00
|
|
|
|
|
|
|
switch (typemask) {
|
|
|
|
case VMEM_ALLOC:
|
|
|
|
return vm->vm_inuse;
|
|
|
|
case VMEM_FREE:
|
|
|
|
return vm->vm_size - vm->vm_inuse;
|
|
|
|
case VMEM_FREE|VMEM_ALLOC:
|
|
|
|
return vm->vm_size;
|
Make ZFS ARC track both KVA usage and fragmentation.
Even on Illumos, with its much larger KVA, ZFS ARC steps back if KVA usage
reaches certain threshold (3/4 on i386 or 16/17 otherwise). FreeBSD has
even less KVA, but had no such limit on archs with direct map as amd64.
As result, on machines with a lot of RAM, during load with very small user-
space memory pressure, such as `zfs send`, it was possible to reach state,
when there is enough both physical RAM and KVA (I've seen up to 25-30%),
but no continuous KVA range to allocate even single 128KB I/O request.
Address this situation from two sides:
- restore KVA usage limitations in a way the most close to Illumos;
- introduce new requirement for KVA fragmentation, specifying that we
should have at least one sequential KVA range of zfs_max_recordsize bytes.
Experiments show that first limitation done alone is not sufficient. On
machine with 64GB of RAM it is sometimes needed to drop up to half of ARC
size to get at leats one 1MB KVA chunk. Statically limiting ARC to half
of KVA/RAM is too strict, so second limitation makes it to work in cycles:
accumulate trash up to certain critical mass, do massive spring-cleaning,
and then start littering again. :)
MFC after: 1 month
2015-04-03 14:45:48 +00:00
|
|
|
case VMEM_MAXFREE:
|
2015-04-05 14:17:26 +00:00
|
|
|
VMEM_LOCK(vm);
|
Make ZFS ARC track both KVA usage and fragmentation.
Even on Illumos, with its much larger KVA, ZFS ARC steps back if KVA usage
reaches certain threshold (3/4 on i386 or 16/17 otherwise). FreeBSD has
even less KVA, but had no such limit on archs with direct map as amd64.
As result, on machines with a lot of RAM, during load with very small user-
space memory pressure, such as `zfs send`, it was possible to reach state,
when there is enough both physical RAM and KVA (I've seen up to 25-30%),
but no continuous KVA range to allocate even single 128KB I/O request.
Address this situation from two sides:
- restore KVA usage limitations in a way the most close to Illumos;
- introduce new requirement for KVA fragmentation, specifying that we
should have at least one sequential KVA range of zfs_max_recordsize bytes.
Experiments show that first limitation done alone is not sufficient. On
machine with 64GB of RAM it is sometimes needed to drop up to half of ARC
size to get at leats one 1MB KVA chunk. Statically limiting ARC to half
of KVA/RAM is too strict, so second limitation makes it to work in cycles:
accumulate trash up to certain critical mass, do massive spring-cleaning,
and then start littering again. :)
MFC after: 1 month
2015-04-03 14:45:48 +00:00
|
|
|
for (i = VMEM_MAXORDER - 1; i >= 0; i--) {
|
|
|
|
if (LIST_EMPTY(&vm->vm_freelist[i]))
|
|
|
|
continue;
|
2015-04-05 14:17:26 +00:00
|
|
|
VMEM_UNLOCK(vm);
|
Make ZFS ARC track both KVA usage and fragmentation.
Even on Illumos, with its much larger KVA, ZFS ARC steps back if KVA usage
reaches certain threshold (3/4 on i386 or 16/17 otherwise). FreeBSD has
even less KVA, but had no such limit on archs with direct map as amd64.
As result, on machines with a lot of RAM, during load with very small user-
space memory pressure, such as `zfs send`, it was possible to reach state,
when there is enough both physical RAM and KVA (I've seen up to 25-30%),
but no continuous KVA range to allocate even single 128KB I/O request.
Address this situation from two sides:
- restore KVA usage limitations in a way the most close to Illumos;
- introduce new requirement for KVA fragmentation, specifying that we
should have at least one sequential KVA range of zfs_max_recordsize bytes.
Experiments show that first limitation done alone is not sufficient. On
machine with 64GB of RAM it is sometimes needed to drop up to half of ARC
size to get at leats one 1MB KVA chunk. Statically limiting ARC to half
of KVA/RAM is too strict, so second limitation makes it to work in cycles:
accumulate trash up to certain critical mass, do massive spring-cleaning,
and then start littering again. :)
MFC after: 1 month
2015-04-03 14:45:48 +00:00
|
|
|
return ((vmem_size_t)ORDER2SIZE(i) <<
|
|
|
|
vm->vm_quantum_shift);
|
|
|
|
}
|
2015-04-05 14:17:26 +00:00
|
|
|
VMEM_UNLOCK(vm);
|
Make ZFS ARC track both KVA usage and fragmentation.
Even on Illumos, with its much larger KVA, ZFS ARC steps back if KVA usage
reaches certain threshold (3/4 on i386 or 16/17 otherwise). FreeBSD has
even less KVA, but had no such limit on archs with direct map as amd64.
As result, on machines with a lot of RAM, during load with very small user-
space memory pressure, such as `zfs send`, it was possible to reach state,
when there is enough both physical RAM and KVA (I've seen up to 25-30%),
but no continuous KVA range to allocate even single 128KB I/O request.
Address this situation from two sides:
- restore KVA usage limitations in a way the most close to Illumos;
- introduce new requirement for KVA fragmentation, specifying that we
should have at least one sequential KVA range of zfs_max_recordsize bytes.
Experiments show that first limitation done alone is not sufficient. On
machine with 64GB of RAM it is sometimes needed to drop up to half of ARC
size to get at leats one 1MB KVA chunk. Statically limiting ARC to half
of KVA/RAM is too strict, so second limitation makes it to work in cycles:
accumulate trash up to certain critical mass, do massive spring-cleaning,
and then start littering again. :)
MFC after: 1 month
2015-04-03 14:45:48 +00:00
|
|
|
return (0);
|
2013-06-28 03:51:20 +00:00
|
|
|
default:
|
|
|
|
panic("vmem_size");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* ---- debug */
|
|
|
|
|
|
|
|
#if defined(DDB) || defined(DIAGNOSTIC)
|
|
|
|
|
|
|
|
static void bt_dump(const bt_t *, int (*)(const char *, ...)
|
|
|
|
__printflike(1, 2));
|
|
|
|
|
|
|
|
static const char *
|
|
|
|
bt_type_string(int type)
|
|
|
|
{
|
|
|
|
|
|
|
|
switch (type) {
|
|
|
|
case BT_TYPE_BUSY:
|
|
|
|
return "busy";
|
|
|
|
case BT_TYPE_FREE:
|
|
|
|
return "free";
|
|
|
|
case BT_TYPE_SPAN:
|
|
|
|
return "span";
|
|
|
|
case BT_TYPE_SPAN_STATIC:
|
|
|
|
return "static span";
|
2019-05-18 01:46:38 +00:00
|
|
|
case BT_TYPE_CURSOR:
|
|
|
|
return "cursor";
|
2013-06-28 03:51:20 +00:00
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return "BOGUS";
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
bt_dump(const bt_t *bt, int (*pr)(const char *, ...))
|
|
|
|
{
|
|
|
|
|
|
|
|
(*pr)("\t%p: %jx %jx, %d(%s)\n",
|
|
|
|
bt, (intmax_t)bt->bt_start, (intmax_t)bt->bt_size,
|
|
|
|
bt->bt_type, bt_type_string(bt->bt_type));
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
vmem_dump(const vmem_t *vm , int (*pr)(const char *, ...) __printflike(1, 2))
|
|
|
|
{
|
|
|
|
const bt_t *bt;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
(*pr)("vmem %p '%s'\n", vm, vm->vm_name);
|
|
|
|
TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) {
|
|
|
|
bt_dump(bt, pr);
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < VMEM_MAXORDER; i++) {
|
|
|
|
const struct vmem_freelist *fl = &vm->vm_freelist[i];
|
|
|
|
|
|
|
|
if (LIST_EMPTY(fl)) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
(*pr)("freelist[%d]\n", i);
|
|
|
|
LIST_FOREACH(bt, fl, bt_freelist) {
|
|
|
|
bt_dump(bt, pr);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif /* defined(DDB) || defined(DIAGNOSTIC) */
|
|
|
|
|
|
|
|
#if defined(DDB)
|
2015-03-29 10:02:29 +00:00
|
|
|
#include <ddb/ddb.h>
|
|
|
|
|
2013-06-28 03:51:20 +00:00
|
|
|
static bt_t *
|
|
|
|
vmem_whatis_lookup(vmem_t *vm, vmem_addr_t addr)
|
|
|
|
{
|
|
|
|
bt_t *bt;
|
|
|
|
|
|
|
|
TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) {
|
|
|
|
if (BT_ISSPAN_P(bt)) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (bt->bt_start <= addr && addr <= BT_END(bt)) {
|
|
|
|
return bt;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
vmem_whatis(vmem_addr_t addr, int (*pr)(const char *, ...))
|
|
|
|
{
|
|
|
|
vmem_t *vm;
|
|
|
|
|
|
|
|
LIST_FOREACH(vm, &vmem_list, vm_alllist) {
|
|
|
|
bt_t *bt;
|
|
|
|
|
|
|
|
bt = vmem_whatis_lookup(vm, addr);
|
|
|
|
if (bt == NULL) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
(*pr)("%p is %p+%zu in VMEM '%s' (%s)\n",
|
|
|
|
(void *)addr, (void *)bt->bt_start,
|
|
|
|
(vmem_size_t)(addr - bt->bt_start), vm->vm_name,
|
|
|
|
(bt->bt_type == BT_TYPE_BUSY) ? "allocated" : "free");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
vmem_printall(const char *modif, int (*pr)(const char *, ...))
|
|
|
|
{
|
|
|
|
const vmem_t *vm;
|
|
|
|
|
|
|
|
LIST_FOREACH(vm, &vmem_list, vm_alllist) {
|
|
|
|
vmem_dump(vm, pr);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
vmem_print(vmem_addr_t addr, const char *modif, int (*pr)(const char *, ...))
|
|
|
|
{
|
|
|
|
const vmem_t *vm = (const void *)addr;
|
|
|
|
|
|
|
|
vmem_dump(vm, pr);
|
|
|
|
}
|
2015-03-29 10:02:29 +00:00
|
|
|
|
|
|
|
DB_SHOW_COMMAND(vmemdump, vmemdump)
|
|
|
|
{
|
|
|
|
|
|
|
|
if (!have_addr) {
|
|
|
|
db_printf("usage: show vmemdump <addr>\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
vmem_dump((const vmem_t *)addr, db_printf);
|
|
|
|
}
|
|
|
|
|
|
|
|
DB_SHOW_ALL_COMMAND(vmemdump, vmemdumpall)
|
|
|
|
{
|
|
|
|
const vmem_t *vm;
|
|
|
|
|
|
|
|
LIST_FOREACH(vm, &vmem_list, vm_alllist)
|
|
|
|
vmem_dump(vm, db_printf);
|
|
|
|
}
|
|
|
|
|
|
|
|
DB_SHOW_COMMAND(vmem, vmem_summ)
|
|
|
|
{
|
|
|
|
const vmem_t *vm = (const void *)addr;
|
|
|
|
const bt_t *bt;
|
|
|
|
size_t ft[VMEM_MAXORDER], ut[VMEM_MAXORDER];
|
|
|
|
size_t fs[VMEM_MAXORDER], us[VMEM_MAXORDER];
|
|
|
|
int ord;
|
|
|
|
|
|
|
|
if (!have_addr) {
|
|
|
|
db_printf("usage: show vmem <addr>\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
db_printf("vmem %p '%s'\n", vm, vm->vm_name);
|
|
|
|
db_printf("\tquantum:\t%zu\n", vm->vm_quantum_mask + 1);
|
|
|
|
db_printf("\tsize:\t%zu\n", vm->vm_size);
|
|
|
|
db_printf("\tinuse:\t%zu\n", vm->vm_inuse);
|
|
|
|
db_printf("\tfree:\t%zu\n", vm->vm_size - vm->vm_inuse);
|
|
|
|
db_printf("\tbusy tags:\t%d\n", vm->vm_nbusytag);
|
|
|
|
db_printf("\tfree tags:\t%d\n", vm->vm_nfreetags);
|
|
|
|
|
|
|
|
memset(&ft, 0, sizeof(ft));
|
|
|
|
memset(&ut, 0, sizeof(ut));
|
|
|
|
memset(&fs, 0, sizeof(fs));
|
|
|
|
memset(&us, 0, sizeof(us));
|
|
|
|
TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) {
|
|
|
|
ord = SIZE2ORDER(bt->bt_size >> vm->vm_quantum_shift);
|
|
|
|
if (bt->bt_type == BT_TYPE_BUSY) {
|
|
|
|
ut[ord]++;
|
|
|
|
us[ord] += bt->bt_size;
|
|
|
|
} else if (bt->bt_type == BT_TYPE_FREE) {
|
|
|
|
ft[ord]++;
|
|
|
|
fs[ord] += bt->bt_size;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
db_printf("\t\t\tinuse\tsize\t\tfree\tsize\n");
|
|
|
|
for (ord = 0; ord < VMEM_MAXORDER; ord++) {
|
|
|
|
if (ut[ord] == 0 && ft[ord] == 0)
|
|
|
|
continue;
|
|
|
|
db_printf("\t%-15zu %zu\t%-15zu %zu\t%-16zu\n",
|
|
|
|
ORDER2SIZE(ord) << vm->vm_quantum_shift,
|
|
|
|
ut[ord], us[ord], ft[ord], fs[ord]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
DB_SHOW_ALL_COMMAND(vmem, vmem_summall)
|
|
|
|
{
|
|
|
|
const vmem_t *vm;
|
|
|
|
|
|
|
|
LIST_FOREACH(vm, &vmem_list, vm_alllist)
|
|
|
|
vmem_summ((db_expr_t)vm, TRUE, count, modif);
|
|
|
|
}
|
2013-06-28 03:51:20 +00:00
|
|
|
#endif /* defined(DDB) */
|
|
|
|
|
|
|
|
#define vmem_printf printf
|
|
|
|
|
|
|
|
#if defined(DIAGNOSTIC)
|
|
|
|
|
|
|
|
static bool
|
|
|
|
vmem_check_sanity(vmem_t *vm)
|
|
|
|
{
|
|
|
|
const bt_t *bt, *bt2;
|
|
|
|
|
|
|
|
MPASS(vm != NULL);
|
|
|
|
|
|
|
|
TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) {
|
|
|
|
if (bt->bt_start > BT_END(bt)) {
|
|
|
|
printf("corrupted tag\n");
|
|
|
|
bt_dump(bt, vmem_printf);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) {
|
2019-05-18 14:19:23 +00:00
|
|
|
if (bt->bt_type == BT_TYPE_CURSOR) {
|
|
|
|
if (bt->bt_start != 0 || bt->bt_size != 0) {
|
|
|
|
printf("corrupted cursor\n");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
continue;
|
|
|
|
}
|
2013-06-28 03:51:20 +00:00
|
|
|
TAILQ_FOREACH(bt2, &vm->vm_seglist, bt_seglist) {
|
|
|
|
if (bt == bt2) {
|
|
|
|
continue;
|
|
|
|
}
|
2019-05-18 14:19:23 +00:00
|
|
|
if (bt2->bt_type == BT_TYPE_CURSOR) {
|
|
|
|
continue;
|
|
|
|
}
|
2013-06-28 03:51:20 +00:00
|
|
|
if (BT_ISSPAN_P(bt) != BT_ISSPAN_P(bt2)) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (bt->bt_start <= BT_END(bt2) &&
|
|
|
|
bt2->bt_start <= BT_END(bt)) {
|
|
|
|
printf("overwrapped tags\n");
|
|
|
|
bt_dump(bt, vmem_printf);
|
|
|
|
bt_dump(bt2, vmem_printf);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
vmem_check(vmem_t *vm)
|
|
|
|
{
|
|
|
|
|
|
|
|
if (!vmem_check_sanity(vm)) {
|
|
|
|
panic("insanity vmem %p", vm);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif /* defined(DIAGNOSTIC) */
|