Use a single VM object for kernel stacks.
Previously we allocated a separate VM object for each kernel stack. However, fully constructed kernel stacks are cached by UMA, so there is no harm in using a single global object for all stacks. This reduces memory consumption and makes it easier to define a memory allocation policy for kernel stack pages, with the aim of reducing physical memory fragmentation. Add a global kstack_object, and use the stack KVA address to index into the object like we do with kernel_object. Reviewed by: kib Tested by: pho Sponsored by: The FreeBSD Foundation Differential Revision: https://reviews.freebsd.org/D24473
This commit is contained in:
parent
1130dff9d2
commit
f13fa9df05
@ -84,7 +84,7 @@ _Static_assert(offsetof(struct thread, td_pflags) == 0x104,
|
||||
"struct thread KBI td_pflags");
|
||||
_Static_assert(offsetof(struct thread, td_frame) == 0x498,
|
||||
"struct thread KBI td_frame");
|
||||
_Static_assert(offsetof(struct thread, td_emuldata) == 0x6b0,
|
||||
_Static_assert(offsetof(struct thread, td_emuldata) == 0x6a0,
|
||||
"struct thread KBI td_emuldata");
|
||||
_Static_assert(offsetof(struct proc, p_flag) == 0xb0,
|
||||
"struct proc KBI p_flag");
|
||||
|
@ -348,7 +348,6 @@ struct thread {
|
||||
/* LP64 hole */
|
||||
struct callout td_slpcallout; /* (h) Callout for sleep. */
|
||||
struct trapframe *td_frame; /* (k) */
|
||||
struct vm_object *td_kstack_obj;/* (a) Kstack object. */
|
||||
vm_offset_t td_kstack; /* (a) Kernel VA of kstack. */
|
||||
int td_kstack_pages; /* (a) Size of the kstack. */
|
||||
volatile u_int td_critnest; /* (k*) Critical section nest level. */
|
||||
|
@ -126,6 +126,8 @@ struct sf_buf *vm_imgact_map_page(vm_object_t object, vm_ooffset_t offset);
|
||||
void vm_imgact_unmap_page(struct sf_buf *sf);
|
||||
void vm_thread_dispose(struct thread *td);
|
||||
int vm_thread_new(struct thread *td, int pages);
|
||||
void vm_thread_stack_back(struct domainset *ds, vm_offset_t kaddr,
|
||||
vm_page_t ma[], int npages, int req_class);
|
||||
u_int vm_active_count(void);
|
||||
u_int vm_inactive_count(void);
|
||||
u_int vm_laundry_count(void);
|
||||
|
@ -264,9 +264,9 @@ vm_sync_icache(vm_map_t map, vm_offset_t va, vm_offset_t sz)
|
||||
pmap_sync_icache(map->pmap, va, sz);
|
||||
}
|
||||
|
||||
vm_object_t kstack_object;
|
||||
static uma_zone_t kstack_cache;
|
||||
static int kstack_cache_size;
|
||||
static int kstack_domain_iter;
|
||||
|
||||
static int
|
||||
sysctl_kstack_cache_size(SYSCTL_HANDLER_ARGS)
|
||||
@ -285,22 +285,14 @@ SYSCTL_PROC(_vm, OID_AUTO, kstack_cache_size,
|
||||
|
||||
/*
|
||||
* Create the kernel stack (including pcb for i386) for a new thread.
|
||||
* This routine directly affects the fork perf for a process and
|
||||
* create performance for a thread.
|
||||
*/
|
||||
static vm_offset_t
|
||||
vm_thread_stack_create(struct domainset *ds, vm_object_t *ksobjp, int pages)
|
||||
vm_thread_stack_create(struct domainset *ds, int pages)
|
||||
{
|
||||
vm_page_t ma[KSTACK_MAX_PAGES];
|
||||
vm_object_t ksobj;
|
||||
vm_offset_t ks;
|
||||
int i;
|
||||
|
||||
/*
|
||||
* Allocate an object for the kstack.
|
||||
*/
|
||||
ksobj = vm_object_allocate(OBJT_DEFAULT, pages);
|
||||
|
||||
/*
|
||||
* Get a kernel virtual address for this thread's kstack.
|
||||
*/
|
||||
@ -319,54 +311,45 @@ vm_thread_stack_create(struct domainset *ds, vm_object_t *ksobjp, int pages)
|
||||
#endif
|
||||
if (ks == 0) {
|
||||
printf("%s: kstack allocation failed\n", __func__);
|
||||
vm_object_deallocate(ksobj);
|
||||
return (0);
|
||||
}
|
||||
if (vm_ndomains > 1) {
|
||||
ksobj->domain.dr_policy = ds;
|
||||
ksobj->domain.dr_iter =
|
||||
atomic_fetchadd_int(&kstack_domain_iter, 1);
|
||||
}
|
||||
|
||||
if (KSTACK_GUARD_PAGES != 0) {
|
||||
pmap_qremove(ks, KSTACK_GUARD_PAGES);
|
||||
ks += KSTACK_GUARD_PAGES * PAGE_SIZE;
|
||||
}
|
||||
|
||||
/*
|
||||
* For the length of the stack, link in a real page of ram for each
|
||||
* page of stack.
|
||||
/*
|
||||
* Allocate physical pages to back the stack.
|
||||
*/
|
||||
VM_OBJECT_WLOCK(ksobj);
|
||||
(void)vm_page_grab_pages(ksobj, 0, VM_ALLOC_NORMAL | VM_ALLOC_WIRED,
|
||||
ma, pages);
|
||||
vm_thread_stack_back(ds, ks, ma, pages, VM_ALLOC_NORMAL);
|
||||
for (i = 0; i < pages; i++)
|
||||
vm_page_valid(ma[i]);
|
||||
VM_OBJECT_WUNLOCK(ksobj);
|
||||
pmap_qenter(ks, ma, pages);
|
||||
*ksobjp = ksobj;
|
||||
|
||||
return (ks);
|
||||
}
|
||||
|
||||
static void
|
||||
vm_thread_stack_dispose(vm_object_t ksobj, vm_offset_t ks, int pages)
|
||||
vm_thread_stack_dispose(vm_offset_t ks, int pages)
|
||||
{
|
||||
vm_page_t m;
|
||||
vm_pindex_t pindex;
|
||||
int i;
|
||||
|
||||
pindex = atop(ks - VM_MIN_KERNEL_ADDRESS);
|
||||
|
||||
pmap_qremove(ks, pages);
|
||||
VM_OBJECT_WLOCK(ksobj);
|
||||
VM_OBJECT_WLOCK(kstack_object);
|
||||
for (i = 0; i < pages; i++) {
|
||||
m = vm_page_lookup(ksobj, i);
|
||||
m = vm_page_lookup(kstack_object, pindex + i);
|
||||
if (m == NULL)
|
||||
panic("%s: kstack already missing?", __func__);
|
||||
vm_page_xbusy_claim(m);
|
||||
vm_page_unwire_noq(m);
|
||||
vm_page_free(m);
|
||||
}
|
||||
VM_OBJECT_WUNLOCK(ksobj);
|
||||
vm_object_deallocate(ksobj);
|
||||
VM_OBJECT_WUNLOCK(kstack_object);
|
||||
kva_free(ks - (KSTACK_GUARD_PAGES * PAGE_SIZE),
|
||||
(pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
|
||||
}
|
||||
@ -377,7 +360,6 @@ vm_thread_stack_dispose(vm_object_t ksobj, vm_offset_t ks, int pages)
|
||||
int
|
||||
vm_thread_new(struct thread *td, int pages)
|
||||
{
|
||||
vm_object_t ksobj;
|
||||
vm_offset_t ks;
|
||||
|
||||
/* Bounds check */
|
||||
@ -387,12 +369,8 @@ vm_thread_new(struct thread *td, int pages)
|
||||
pages = KSTACK_MAX_PAGES;
|
||||
|
||||
ks = 0;
|
||||
ksobj = NULL;
|
||||
if (pages == kstack_pages && kstack_cache != NULL) {
|
||||
if (pages == kstack_pages && kstack_cache != NULL)
|
||||
ks = (vm_offset_t)uma_zalloc(kstack_cache, M_NOWAIT);
|
||||
if (ks != 0)
|
||||
ksobj = PHYS_TO_VM_PAGE(pmap_kextract(ks))->object;
|
||||
}
|
||||
|
||||
/*
|
||||
* Ensure that kstack objects can draw pages from any memory
|
||||
@ -401,10 +379,9 @@ vm_thread_new(struct thread *td, int pages)
|
||||
*/
|
||||
if (ks == 0)
|
||||
ks = vm_thread_stack_create(DOMAINSET_PREF(PCPU_GET(domain)),
|
||||
&ksobj, pages);
|
||||
pages);
|
||||
if (ks == 0)
|
||||
return (0);
|
||||
td->td_kstack_obj = ksobj;
|
||||
td->td_kstack = ks;
|
||||
td->td_kstack_pages = pages;
|
||||
return (1);
|
||||
@ -416,26 +393,52 @@ vm_thread_new(struct thread *td, int pages)
|
||||
void
|
||||
vm_thread_dispose(struct thread *td)
|
||||
{
|
||||
vm_object_t ksobj;
|
||||
vm_offset_t ks;
|
||||
int pages;
|
||||
|
||||
pages = td->td_kstack_pages;
|
||||
ksobj = td->td_kstack_obj;
|
||||
ks = td->td_kstack;
|
||||
td->td_kstack = 0;
|
||||
td->td_kstack_pages = 0;
|
||||
if (pages == kstack_pages)
|
||||
uma_zfree(kstack_cache, (void *)ks);
|
||||
else
|
||||
vm_thread_stack_dispose(ksobj, ks, pages);
|
||||
vm_thread_stack_dispose(ks, pages);
|
||||
}
|
||||
|
||||
/*
|
||||
* Allocate physical pages, following the specified NUMA policy, to back a
|
||||
* kernel stack.
|
||||
*/
|
||||
void
|
||||
vm_thread_stack_back(struct domainset *ds, vm_offset_t ks, vm_page_t ma[],
|
||||
int npages, int req_class)
|
||||
{
|
||||
vm_pindex_t pindex;
|
||||
int n;
|
||||
|
||||
pindex = atop(ks - VM_MIN_KERNEL_ADDRESS);
|
||||
|
||||
VM_OBJECT_WLOCK(kstack_object);
|
||||
for (n = 0; n < npages;) {
|
||||
if (vm_ndomains > 1)
|
||||
kstack_object->domain.dr_policy = ds;
|
||||
|
||||
/*
|
||||
* Use WAITFAIL to force a reset of the domain selection policy
|
||||
* if we had to sleep for pages.
|
||||
*/
|
||||
n += vm_page_grab_pages(kstack_object, pindex + n,
|
||||
req_class | VM_ALLOC_WIRED | VM_ALLOC_WAITFAIL,
|
||||
&ma[n], npages - n);
|
||||
}
|
||||
VM_OBJECT_WUNLOCK(kstack_object);
|
||||
}
|
||||
|
||||
static int
|
||||
kstack_import(void *arg, void **store, int cnt, int domain, int flags)
|
||||
{
|
||||
struct domainset *ds;
|
||||
vm_object_t ksobj;
|
||||
int i;
|
||||
|
||||
if (domain == UMA_ANYDOMAIN)
|
||||
@ -444,8 +447,7 @@ kstack_import(void *arg, void **store, int cnt, int domain, int flags)
|
||||
ds = DOMAINSET_PREF(domain);
|
||||
|
||||
for (i = 0; i < cnt; i++) {
|
||||
store[i] = (void *)vm_thread_stack_create(ds, &ksobj,
|
||||
kstack_pages);
|
||||
store[i] = (void *)vm_thread_stack_create(ds, kstack_pages);
|
||||
if (store[i] == NULL)
|
||||
break;
|
||||
}
|
||||
@ -460,15 +462,15 @@ kstack_release(void *arg, void **store, int cnt)
|
||||
|
||||
for (i = 0; i < cnt; i++) {
|
||||
ks = (vm_offset_t)store[i];
|
||||
vm_thread_stack_dispose(
|
||||
PHYS_TO_VM_PAGE(pmap_kextract(ks))->object,
|
||||
ks, kstack_pages);
|
||||
vm_thread_stack_dispose(ks, kstack_pages);
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
kstack_cache_init(void *null)
|
||||
{
|
||||
kstack_object = vm_object_allocate(OBJT_SWAP,
|
||||
atop(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS));
|
||||
kstack_cache = uma_zcache_create("kstack_cache",
|
||||
kstack_pages * PAGE_SIZE, NULL, NULL, NULL, NULL,
|
||||
kstack_import, kstack_release, NULL,
|
||||
@ -476,8 +478,7 @@ kstack_cache_init(void *null)
|
||||
kstack_cache_size = imax(128, mp_ncpus * 4);
|
||||
uma_zone_set_maxcache(kstack_cache, kstack_cache_size);
|
||||
}
|
||||
|
||||
SYSINIT(vm_kstacks, SI_SUB_KTHREAD_INIT, SI_ORDER_ANY, kstack_cache_init, NULL);
|
||||
SYSINIT(vm_kstacks, SI_SUB_KMEM, SI_ORDER_ANY, kstack_cache_init, NULL);
|
||||
|
||||
#ifdef KSTACK_USAGE_PROF
|
||||
/*
|
||||
|
@ -77,5 +77,6 @@ extern struct vmem *memguard_arena;
|
||||
extern u_long vm_kmem_size;
|
||||
extern u_int exec_map_entries;
|
||||
extern u_int exec_map_entry_size;
|
||||
extern vm_object_t kstack_object;
|
||||
|
||||
#endif /* _VM_VM_KERN_H_ */
|
||||
|
@ -104,6 +104,7 @@ __FBSDID("$FreeBSD$");
|
||||
|
||||
#include <vm/vm.h>
|
||||
#include <vm/vm_param.h>
|
||||
#include <vm/vm_kern.h>
|
||||
#include <vm/vm_object.h>
|
||||
#include <vm/vm_page.h>
|
||||
#include <vm/vm_map.h>
|
||||
@ -526,24 +527,26 @@ vm_daemon(void)
|
||||
static void
|
||||
vm_thread_swapout(struct thread *td)
|
||||
{
|
||||
vm_object_t ksobj;
|
||||
vm_page_t m;
|
||||
vm_offset_t kaddr;
|
||||
vm_pindex_t pindex;
|
||||
int i, pages;
|
||||
|
||||
cpu_thread_swapout(td);
|
||||
kaddr = td->td_kstack;
|
||||
pages = td->td_kstack_pages;
|
||||
ksobj = td->td_kstack_obj;
|
||||
pmap_qremove(td->td_kstack, pages);
|
||||
VM_OBJECT_WLOCK(ksobj);
|
||||
pindex = atop(kaddr - VM_MIN_KERNEL_ADDRESS);
|
||||
pmap_qremove(kaddr, pages);
|
||||
VM_OBJECT_WLOCK(kstack_object);
|
||||
for (i = 0; i < pages; i++) {
|
||||
m = vm_page_lookup(ksobj, i);
|
||||
m = vm_page_lookup(kstack_object, pindex + i);
|
||||
if (m == NULL)
|
||||
panic("vm_thread_swapout: kstack already missing?");
|
||||
vm_page_dirty(m);
|
||||
vm_page_xunbusy_unchecked(m);
|
||||
vm_page_unwire(m, PQ_LAUNDRY);
|
||||
}
|
||||
VM_OBJECT_WUNLOCK(ksobj);
|
||||
VM_OBJECT_WUNLOCK(kstack_object);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -552,38 +555,36 @@ vm_thread_swapout(struct thread *td)
|
||||
static void
|
||||
vm_thread_swapin(struct thread *td, int oom_alloc)
|
||||
{
|
||||
vm_object_t ksobj;
|
||||
vm_page_t ma[KSTACK_MAX_PAGES];
|
||||
vm_offset_t kaddr;
|
||||
int a, count, i, j, pages, rv;
|
||||
|
||||
kaddr = td->td_kstack;
|
||||
pages = td->td_kstack_pages;
|
||||
ksobj = td->td_kstack_obj;
|
||||
VM_OBJECT_WLOCK(ksobj);
|
||||
(void)vm_page_grab_pages(ksobj, 0, oom_alloc | VM_ALLOC_WIRED, ma,
|
||||
pages);
|
||||
VM_OBJECT_WUNLOCK(ksobj);
|
||||
vm_thread_stack_back(td->td_domain.dr_policy, kaddr, ma, pages,
|
||||
oom_alloc);
|
||||
for (i = 0; i < pages;) {
|
||||
vm_page_assert_xbusied(ma[i]);
|
||||
if (vm_page_all_valid(ma[i])) {
|
||||
i++;
|
||||
continue;
|
||||
}
|
||||
vm_object_pip_add(ksobj, 1);
|
||||
vm_object_pip_add(kstack_object, 1);
|
||||
for (j = i + 1; j < pages; j++)
|
||||
if (vm_page_all_valid(ma[j]))
|
||||
break;
|
||||
VM_OBJECT_WLOCK(ksobj);
|
||||
rv = vm_pager_has_page(ksobj, ma[i]->pindex, NULL, &a);
|
||||
VM_OBJECT_WUNLOCK(ksobj);
|
||||
VM_OBJECT_WLOCK(kstack_object);
|
||||
rv = vm_pager_has_page(kstack_object, ma[i]->pindex, NULL, &a);
|
||||
VM_OBJECT_WUNLOCK(kstack_object);
|
||||
KASSERT(rv == 1, ("%s: missing page %p", __func__, ma[i]));
|
||||
count = min(a + 1, j - i);
|
||||
rv = vm_pager_get_pages(ksobj, ma + i, count, NULL, NULL);
|
||||
rv = vm_pager_get_pages(kstack_object, ma + i, count, NULL, NULL);
|
||||
KASSERT(rv == VM_PAGER_OK, ("%s: cannot get kstack for proc %d",
|
||||
__func__, td->td_proc->p_pid));
|
||||
vm_object_pip_wakeup(ksobj);
|
||||
vm_object_pip_wakeup(kstack_object);
|
||||
i += count;
|
||||
}
|
||||
pmap_qenter(td->td_kstack, ma, pages);
|
||||
pmap_qenter(kaddr, ma, pages);
|
||||
cpu_thread_swapin(td);
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user