- Add a statically allocated memguard arena since it is needed very early

on.
 - Pass the appropriate flags to vmem_xalloc() when allocating space for
   the arena from kmem_arena.

Sponsored by:	EMC / Isilon Storage Division
This commit is contained in:
Jeff Roberson 2013-08-13 22:40:43 +00:00
parent 38da30b419
commit 8441d1e842
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=254307
3 changed files with 17 additions and 8 deletions

View File

@ -57,6 +57,8 @@ __FBSDID("$FreeBSD$");
#include <sys/taskqueue.h>
#include <sys/vmem.h>
#include "opt_vm.h"
#include <vm/uma.h>
#include <vm/vm.h>
#include <vm/pmap.h>
@ -223,6 +225,11 @@ vmem_t *kmem_arena = &kmem_arena_storage;
vmem_t *buffer_arena = &buffer_arena_storage;
vmem_t *transient_arena = &transient_arena_storage;
#ifdef DEBUG_MEMGUARD
static struct vmem memguard_arena_storage;
vmem_t *memguard_arena = &memguard_arena_storage;
#endif
/*
* Fill the vmem's boundary tag cache. We guarantee that boundary tag
* allocation will not fail once bt_fill() passes. To do so we cache

View File

@ -56,6 +56,7 @@ __FBSDID("$FreeBSD$");
#include <vm/vm_page.h>
#include <vm/vm_map.h>
#include <vm/vm_object.h>
#include <vm/vm_kern.h>
#include <vm/vm_extern.h>
#include <vm/uma_int.h>
#include <vm/memguard.h>
@ -100,7 +101,6 @@ SYSCTL_PROC(_vm_memguard, OID_AUTO, desc,
CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 0,
memguard_sysctl_desc, "A", "Short description of memory type to monitor");
static vmem_t *memguard_map = NULL;
static vm_offset_t memguard_cursor;
static vm_offset_t memguard_base;
static vm_size_t memguard_mapsize;
@ -206,8 +206,8 @@ memguard_init(vmem_t *parent)
{
vm_offset_t base;
vmem_alloc(parent, memguard_mapsize, M_WAITOK, &base);
memguard_map = vmem_create("memguard arena", base, memguard_mapsize,
vmem_alloc(parent, memguard_mapsize, M_BESTFIT | M_WAITOK, &base);
vmem_init(memguard_arena, "memguard arena", base, memguard_mapsize,
PAGE_SIZE, 0, M_WAITOK);
memguard_cursor = base;
memguard_base = base;
@ -311,7 +311,7 @@ memguard_alloc(unsigned long req_size, int flags)
* of physical memory whether we allocate or hand off to
* uma_large_alloc(), so keep those.
*/
if (vmem_size(memguard_map, VMEM_ALLOC) >= memguard_physlimit &&
if (vmem_size(memguard_arena, VMEM_ALLOC) >= memguard_physlimit &&
req_size < PAGE_SIZE) {
addr = (vm_offset_t)NULL;
memguard_fail_pgs++;
@ -328,8 +328,9 @@ memguard_alloc(unsigned long req_size, int flags)
* map, unless vm_map_findspace() is tweaked.
*/
for (;;) {
if (vmem_xalloc(memguard_map, size_v, 0, 0, 0, memguard_cursor,
VMEM_ADDR_MAX, M_BESTFIT | M_NOWAIT, &addr) == 0)
if (vmem_xalloc(memguard_arena, size_v, 0, 0, 0,
memguard_cursor, VMEM_ADDR_MAX,
M_BESTFIT | M_NOWAIT, &addr) == 0)
break;
/*
* The map has no space. This may be due to
@ -348,7 +349,7 @@ memguard_alloc(unsigned long req_size, int flags)
addr += PAGE_SIZE;
rv = kmem_back(kmem_object, addr, size_p, flags);
if (rv != KERN_SUCCESS) {
vmem_xfree(memguard_map, addr, size_v);
vmem_xfree(memguard_arena, addr, size_v);
memguard_fail_pgs++;
addr = (vm_offset_t)NULL;
goto out;
@ -419,7 +420,7 @@ memguard_free(void *ptr)
kmem_unback(kmem_object, addr, size);
if (sizev > size)
addr -= PAGE_SIZE;
vmem_xfree(memguard_map, addr, sizev);
vmem_xfree(memguard_arena, addr, sizev);
if (req_size < PAGE_SIZE)
memguard_wasted -= (PAGE_SIZE - req_size);
}

View File

@ -71,6 +71,7 @@ extern struct vmem *kernel_arena;
extern struct vmem *kmem_arena;
extern struct vmem *buffer_arena;
extern struct vmem *transient_arena;
extern struct vmem *memguard_arena;
extern vm_offset_t swapbkva;
extern u_long vm_kmem_size;