vmem: Avoid allocating span tags when segments are never released.

vmem uses span tags to delimit imported segments, so that they can be
released if the segment becomes free in the future.  However, the
per-domain kernel KVA arenas never release resources, so the span tags
between imported ranges are unused when the ranges are contiguous.
Furthermore, such span tags prevent coalescing of free segments across
KVA_QUANTUM boundaries, resulting in internal fragmentation which
inhibits superpage promotion in the kernel map.

Stop allocating span tags in arenas that never release resources.  This
saves a small amount of memory and allows free segements to coalesce
across import boundaries.  This manifests as improved kernel superpage
usage during poudriere runs, which also helps to reduce physical memory
fragmentation by reducing the number of broken partially populated
reservations.

Tested by:	pho
Sponsored by:	The FreeBSD Foundation
Differential Revision:	https://reviews.freebsd.org/D24548
This commit is contained in:
Mark Johnston 2020-08-26 14:31:35 +00:00
parent b95807751a
commit 41c6838786
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=364819

View File

@ -249,6 +249,18 @@ static struct vmem memguard_arena_storage;
vmem_t *memguard_arena = &memguard_arena_storage;
#endif
static bool
bt_isbusy(bt_t *bt)
{
return (bt->bt_type == BT_TYPE_BUSY);
}
static bool
bt_isfree(bt_t *bt)
{
return (bt->bt_type == BT_TYPE_FREE);
}
/*
* Fill the vmem's boundary tag cache. We guarantee that boundary tag
* allocation will not fail once bt_fill() passes. To do so we cache
@ -795,24 +807,48 @@ SYSINIT(vfs, SI_SUB_CONFIGURE, SI_ORDER_ANY, vmem_start_callout, NULL);
static void
vmem_add1(vmem_t *vm, vmem_addr_t addr, vmem_size_t size, int type)
{
bt_t *btspan;
bt_t *btfree;
bt_t *btfree, *btprev, *btspan;
VMEM_ASSERT_LOCKED(vm);
MPASS(type == BT_TYPE_SPAN || type == BT_TYPE_SPAN_STATIC);
MPASS((size & vm->vm_quantum_mask) == 0);
btspan = bt_alloc(vm);
btspan->bt_type = type;
btspan->bt_start = addr;
btspan->bt_size = size;
bt_insseg_tail(vm, btspan);
if (vm->vm_releasefn == NULL) {
/*
* The new segment will never be released, so see if it is
* contiguous with respect to an existing segment. In this case
* a span tag is not needed, and it may be possible now or in
* the future to coalesce the new segment with an existing free
* segment.
*/
btprev = TAILQ_LAST(&vm->vm_seglist, vmem_seglist);
if ((!bt_isbusy(btprev) && !bt_isfree(btprev)) ||
btprev->bt_start + btprev->bt_size != addr)
btprev = NULL;
} else {
btprev = NULL;
}
btfree = bt_alloc(vm);
btfree->bt_type = BT_TYPE_FREE;
btfree->bt_start = addr;
btfree->bt_size = size;
bt_insseg(vm, btfree, btspan);
bt_insfree(vm, btfree);
if (btprev == NULL || bt_isbusy(btprev)) {
if (btprev == NULL) {
btspan = bt_alloc(vm);
btspan->bt_type = type;
btspan->bt_start = addr;
btspan->bt_size = size;
bt_insseg_tail(vm, btspan);
}
btfree = bt_alloc(vm);
btfree->bt_type = BT_TYPE_FREE;
btfree->bt_start = addr;
btfree->bt_size = size;
bt_insseg_tail(vm, btfree);
bt_insfree(vm, btfree);
} else {
bt_remfree(vm, btprev);
btprev->bt_size += size;
bt_insfree(vm, btprev);
}
vm->vm_size += size;
}
@ -1147,6 +1183,7 @@ vmem_set_import(vmem_t *vm, vmem_import_t *importfn,
{
VMEM_LOCK(vm);
KASSERT(vm->vm_size == 0, ("%s: arena is non-empty", __func__));
vm->vm_importfn = importfn;
vm->vm_releasefn = releasefn;
vm->vm_arg = arg;