Migrate the thread stack management functions from the machine-dependent

to the machine-independent parts of the VM.  At the same time, this
introduces vm object locking for the non-i386 platforms.

Two details:

1. KSTACK_GUARD has been removed in favor of KSTACK_GUARD_PAGES.  The
different machine-dependent implementations used various combinations
of KSTACK_GUARD and KSTACK_GUARD_PAGES.  To disable guard page, set
KSTACK_GUARD_PAGES to 0.

2. Remove the (unnecessary) clearing of PG_ZERO in vm_thread_new.  In
5.x, (but not 4.x,) PG_ZERO can only be set if VM_ALLOC_ZERO is passed
to vm_page_alloc() or vm_page_grab().
This commit is contained in:
Alan Cox 2003-06-14 23:23:55 +00:00
parent da02c8c47e
commit 49a2507bd1
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=116355
20 changed files with 190 additions and 1258 deletions

View File

@ -106,7 +106,7 @@ interrupt(a0, a1, a2, framep)
intr_restore(s);
#endif
atomic_add_int(&td->td_intr_nesting_level, 1);
#ifndef KSTACK_GUARD
#if KSTACK_GUARD_PAGES == 0
#ifndef SMP
{
if ((caddr_t) framep < (caddr_t) td->td_pcb + 1024) {

View File

@ -932,209 +932,6 @@ pmap_page_lookup(vm_object_t object, vm_pindex_t pindex)
return m;
}
#ifndef KSTACK_MAX_PAGES
#define KSTACK_MAX_PAGES 32
#endif
/*
* Create the kernel stack for a new thread.
* This routine directly affects the fork perf for a process and thread.
*/
void
pmap_new_thread(struct thread *td, int pages)
{
int i;
vm_object_t ksobj;
vm_offset_t ks;
vm_page_t m;
pt_entry_t *ptek, oldpte;
/* Bounds check */
if (pages <= 1)
pages = KSTACK_PAGES;
else if (pages > KSTACK_MAX_PAGES)
pages = KSTACK_MAX_PAGES;
/*
* allocate object for the kstack
*/
ksobj = vm_object_allocate(OBJT_DEFAULT, pages);
td->td_kstack_obj = ksobj;
#ifdef KSTACK_GUARD
/* get a kernel virtual address for the kstack for this thread */
ks = kmem_alloc_nofault(kernel_map, (pages + 1) * PAGE_SIZE);
if (ks == NULL)
panic("pmap_new_thread: kstack allocation failed");
/* Set the first page to be the unmapped guard page. */
ptek = vtopte(ks);
oldpte = *ptek;
*ptek = 0;
if (oldpte)
pmap_invalidate_page(kernel_pmap, ks);
/* move to the next page, which is where the real stack starts. */
ks += PAGE_SIZE;
td->td_kstack = ks;
ptek++;
#else
/* get a kernel virtual address for the kstack for this thread */
ks = kmem_alloc_nofault(kernel_map, pages * PAGE_SIZE);
if (ks == NULL)
panic("pmap_new_thread: kstack allocation failed");
td->td_kstack = ks;
ptek = vtopte(ks);
#endif
/*
* Knowing the number of pages allocated is useful when you
* want to deallocate them.
*/
td->td_kstack_pages = pages;
/*
* For the length of the stack, link in a real page of ram for each
* page of stack.
*/
for (i = 0; i < pages; i++) {
/*
* Get a kernel stack page
*/
m = vm_page_grab(ksobj, i,
VM_ALLOC_NORMAL | VM_ALLOC_RETRY | VM_ALLOC_WIRED);
/*
* Enter the page into the kernel address space.
*/
oldpte = ptek[i];
ptek[i] = pmap_phys_to_pte(VM_PAGE_TO_PHYS(m))
| PG_ASM | PG_KRE | PG_KWE | PG_V;
if (oldpte)
pmap_invalidate_page(kernel_pmap, ks + i * PAGE_SIZE);
vm_page_lock_queues();
vm_page_wakeup(m);
vm_page_flag_clear(m, PG_ZERO);
m->valid = VM_PAGE_BITS_ALL;
vm_page_unlock_queues();
}
}
/*
* Dispose the kernel stack for a thread that has exited.
* This routine directly impacts the exit perf of a thread.
*/
void
pmap_dispose_thread(td)
struct thread *td;
{
int i;
int pages;
vm_object_t ksobj;
vm_offset_t ks;
vm_page_t m;
pt_entry_t *ptek;
pages = td->td_kstack_pages;
ksobj = td->td_kstack_obj;
ks = td->td_kstack;
ptek = vtopte(ks);
for (i = 0; i < pages; i++) {
m = vm_page_lookup(ksobj, i);
if (m == NULL)
panic("pmap_dispose_thread: kstack already missing?");
ptek[i] = 0;
pmap_invalidate_page(kernel_pmap, ks + i * PAGE_SIZE);
vm_page_lock_queues();
vm_page_busy(m);
vm_page_unwire(m, 0);
vm_page_free(m);
vm_page_unlock_queues();
}
/*
* Free the space that this stack was mapped to in the kernel
* address map.
*/
#ifdef KSTACK_GUARD
kmem_free(kernel_map, ks - PAGE_SIZE, (pages + 1) * PAGE_SIZE);
#else
kmem_free(kernel_map, ks, pages * PAGE_SIZE);
#endif
vm_object_deallocate(ksobj);
}
/*
* Allow the kernel stack for a thread to be prejudicially paged out.
*/
void
pmap_swapout_thread(td)
struct thread *td;
{
int i;
int pages;
vm_object_t ksobj;
vm_offset_t ks;
vm_page_t m;
/*
* Make sure we aren't fpcurthread.
*/
alpha_fpstate_save(td, 1);
pages = td->td_kstack_pages;
ksobj = td->td_kstack_obj;
ks = td->td_kstack;
for (i = 0; i < pages; i++) {
m = vm_page_lookup(ksobj, i);
if (m == NULL)
panic("pmap_swapout_thread: kstack already missing?");
vm_page_lock_queues();
vm_page_dirty(m);
vm_page_unwire(m, 0);
vm_page_unlock_queues();
pmap_kremove(ks + i * PAGE_SIZE);
}
}
/*
* Bring the kernel stack for a specified thread back in.
*/
void
pmap_swapin_thread(td)
struct thread *td;
{
int i, rv;
int pages;
vm_object_t ksobj;
vm_offset_t ks;
vm_page_t m;
pages = td->td_kstack_pages;
ksobj = td->td_kstack_obj;
ks = td->td_kstack;
for (i = 0; i < pages; i++) {
m = vm_page_grab(ksobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
pmap_kenter(ks + i * PAGE_SIZE, VM_PAGE_TO_PHYS(m));
if (m->valid != VM_PAGE_BITS_ALL) {
rv = vm_pager_get_pages(ksobj, &m, 1, 0);
if (rv != VM_PAGER_OK)
panic("pmap_swapin_thread: cannot get kstack for proc: %d\n", td->td_proc->p_pid);
m = vm_page_lookup(ksobj, i);
m->valid = VM_PAGE_BITS_ALL;
}
vm_page_lock_queues();
vm_page_wire(m);
vm_page_wakeup(m);
vm_page_unlock_queues();
}
/*
* The pcb may be at a different physical address now so cache the
* new address.
*/
td->td_md.md_pcbpaddr = (void *)vtophys((vm_offset_t)td->td_pcb);
}
/***************************************************
* Page table page management routines.....
***************************************************/

View File

@ -118,9 +118,9 @@
#define SINCR 1 /* increment of stack/NBPG */
#define KSTACK_PAGES 2 /* pages of kstack (with pcb) */
#define KSTACK_GUARD_PAGES 1 /* pages of kstack guard; 0 disables */
#define UAREA_PAGES 1 /* pages of u-area */
#define KSTACK_GUARD /* compile in kstack guard page */
/*
* Mach derived conversion macros

View File

@ -883,182 +883,6 @@ pmap_page_lookup(vm_object_t object, vm_pindex_t pindex)
return m;
}
#ifndef KSTACK_MAX_PAGES
#define KSTACK_MAX_PAGES 32
#endif
/*
* Create the kernel stack (including pcb for amd64) for a new thread.
* This routine directly affects the fork perf for a process and
* create performance for a thread.
*/
void
pmap_new_thread(struct thread *td, int pages)
{
int i;
vm_page_t ma[KSTACK_MAX_PAGES];
vm_object_t ksobj;
vm_page_t m;
vm_offset_t ks;
/* Bounds check */
if (pages <= 1)
pages = KSTACK_PAGES;
else if (pages > KSTACK_MAX_PAGES)
pages = KSTACK_MAX_PAGES;
/*
* allocate object for the kstack
*/
ksobj = vm_object_allocate(OBJT_DEFAULT, pages);
td->td_kstack_obj = ksobj;
/* get a kernel virtual address for the kstack for this thread */
#ifdef KSTACK_GUARD
ks = kmem_alloc_nofault(kernel_map, (pages + 1) * PAGE_SIZE);
if (ks == 0)
panic("pmap_new_thread: kstack allocation failed");
if (*vtopte(ks) != 0)
pmap_qremove(ks, 1);
ks += PAGE_SIZE;
td->td_kstack = ks;
#else
/* get a kernel virtual address for the kstack for this thread */
ks = kmem_alloc_nofault(kernel_map, pages * PAGE_SIZE);
if (ks == 0)
panic("pmap_new_thread: kstack allocation failed");
td->td_kstack = ks;
#endif
/*
* Knowing the number of pages allocated is useful when you
* want to deallocate them.
*/
td->td_kstack_pages = pages;
/*
* For the length of the stack, link in a real page of ram for each
* page of stack.
*/
for (i = 0; i < pages; i++) {
/*
* Get a kernel stack page
*/
m = vm_page_grab(ksobj, i,
VM_ALLOC_NORMAL | VM_ALLOC_RETRY | VM_ALLOC_WIRED);
ma[i] = m;
vm_page_lock_queues();
vm_page_wakeup(m);
vm_page_flag_clear(m, PG_ZERO);
m->valid = VM_PAGE_BITS_ALL;
vm_page_unlock_queues();
}
pmap_qenter(ks, ma, pages);
}
/*
* Dispose the kernel stack for a thread that has exited.
* This routine directly impacts the exit perf of a process and thread.
*/
void
pmap_dispose_thread(td)
struct thread *td;
{
int i;
int pages;
vm_object_t ksobj;
vm_offset_t ks;
vm_page_t m;
pages = td->td_kstack_pages;
ksobj = td->td_kstack_obj;
ks = td->td_kstack;
pmap_qremove(ks, pages);
for (i = 0; i < pages; i++) {
m = vm_page_lookup(ksobj, i);
if (m == NULL)
panic("pmap_dispose_thread: kstack already missing?");
vm_page_lock_queues();
vm_page_busy(m);
vm_page_unwire(m, 0);
vm_page_free(m);
vm_page_unlock_queues();
}
/*
* Free the space that this stack was mapped to in the kernel
* address map.
*/
#ifdef KSTACK_GUARD
kmem_free(kernel_map, ks - PAGE_SIZE, (pages + 1) * PAGE_SIZE);
#else
kmem_free(kernel_map, ks, pages * PAGE_SIZE);
#endif
vm_object_deallocate(ksobj);
}
/*
* Allow the Kernel stack for a thread to be prejudicially paged out.
*/
void
pmap_swapout_thread(td)
struct thread *td;
{
int i;
int pages;
vm_object_t ksobj;
vm_offset_t ks;
vm_page_t m;
pages = td->td_kstack_pages;
ksobj = td->td_kstack_obj;
ks = td->td_kstack;
pmap_qremove(ks, pages);
for (i = 0; i < pages; i++) {
m = vm_page_lookup(ksobj, i);
if (m == NULL)
panic("pmap_swapout_thread: kstack already missing?");
vm_page_lock_queues();
vm_page_dirty(m);
vm_page_unwire(m, 0);
vm_page_unlock_queues();
}
}
/*
* Bring the kernel stack for a specified thread back in.
*/
void
pmap_swapin_thread(td)
struct thread *td;
{
int i, rv;
int pages;
vm_page_t ma[KSTACK_MAX_PAGES];
vm_object_t ksobj;
vm_offset_t ks;
vm_page_t m;
pages = td->td_kstack_pages;
ksobj = td->td_kstack_obj;
ks = td->td_kstack;
for (i = 0; i < pages; i++) {
m = vm_page_grab(ksobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
if (m->valid != VM_PAGE_BITS_ALL) {
rv = vm_pager_get_pages(ksobj, &m, 1, 0);
if (rv != VM_PAGER_OK)
panic("pmap_swapin_thread: cannot get kstack for proc: %d\n", td->td_proc->p_pid);
m = vm_page_lookup(ksobj, i);
m->valid = VM_PAGE_BITS_ALL;
}
ma[i] = m;
vm_page_lock_queues();
vm_page_wire(m);
vm_page_wakeup(m);
vm_page_unlock_queues();
}
pmap_qenter(ks, ma, pages);
}
/***************************************************
* Page table page management routines.....
***************************************************/

View File

@ -118,9 +118,9 @@
#define IOPAGES 2 /* pages of i/o permission bitmap */
#define KSTACK_PAGES 4 /* pages of kstack (with pcb) */
#define KSTACK_GUARD_PAGES 1 /* pages of kstack guard; 0 disables */
#define UAREA_PAGES 1 /* holds struct user WITHOUT PCB (see def.) */
#define KSTACK_GUARD 1 /* compile in the kstack guard page */
/*
* Ceiling on amount of swblock kva space, can be changed via

View File

@ -982,190 +982,6 @@ pmap_page_lookup(vm_object_t object, vm_pindex_t pindex)
return m;
}
#ifndef KSTACK_MAX_PAGES
#define KSTACK_MAX_PAGES 32
#endif
/*
* Create the kernel stack (including pcb for i386) for a new thread.
* This routine directly affects the fork perf for a process and
* create performance for a thread.
*/
void
pmap_new_thread(struct thread *td, int pages)
{
int i;
vm_page_t ma[KSTACK_MAX_PAGES];
vm_object_t ksobj;
vm_page_t m;
vm_offset_t ks;
/* Bounds check */
if (pages <= 1)
pages = KSTACK_PAGES;
else if (pages > KSTACK_MAX_PAGES)
pages = KSTACK_MAX_PAGES;
/*
* allocate object for the kstack
*/
ksobj = vm_object_allocate(OBJT_DEFAULT, pages);
td->td_kstack_obj = ksobj;
/* get a kernel virtual address for the kstack for this thread */
#ifdef KSTACK_GUARD
ks = kmem_alloc_nofault(kernel_map, (pages + 1) * PAGE_SIZE);
if (ks == 0)
panic("pmap_new_thread: kstack allocation failed");
if (*vtopte(ks) != 0)
pmap_qremove(ks, 1);
ks += PAGE_SIZE;
td->td_kstack = ks;
#else
/* get a kernel virtual address for the kstack for this thread */
ks = kmem_alloc_nofault(kernel_map, pages * PAGE_SIZE);
if (ks == 0)
panic("pmap_new_thread: kstack allocation failed");
td->td_kstack = ks;
#endif
/*
* Knowing the number of pages allocated is useful when you
* want to deallocate them.
*/
td->td_kstack_pages = pages;
/*
* For the length of the stack, link in a real page of ram for each
* page of stack.
*/
VM_OBJECT_LOCK(ksobj);
for (i = 0; i < pages; i++) {
/*
* Get a kernel stack page
*/
m = vm_page_grab(ksobj, i,
VM_ALLOC_NORMAL | VM_ALLOC_RETRY | VM_ALLOC_WIRED);
ma[i] = m;
vm_page_lock_queues();
vm_page_wakeup(m);
vm_page_flag_clear(m, PG_ZERO);
m->valid = VM_PAGE_BITS_ALL;
vm_page_unlock_queues();
}
VM_OBJECT_UNLOCK(ksobj);
pmap_qenter(ks, ma, pages);
}
/*
* Dispose the kernel stack for a thread that has exited.
* This routine directly impacts the exit perf of a process and thread.
*/
void
pmap_dispose_thread(td)
struct thread *td;
{
int i;
int pages;
vm_object_t ksobj;
vm_offset_t ks;
vm_page_t m;
pages = td->td_kstack_pages;
ksobj = td->td_kstack_obj;
ks = td->td_kstack;
pmap_qremove(ks, pages);
VM_OBJECT_LOCK(ksobj);
for (i = 0; i < pages; i++) {
m = vm_page_lookup(ksobj, i);
if (m == NULL)
panic("pmap_dispose_thread: kstack already missing?");
vm_page_lock_queues();
vm_page_busy(m);
vm_page_unwire(m, 0);
vm_page_free(m);
vm_page_unlock_queues();
}
VM_OBJECT_UNLOCK(ksobj);
/*
* Free the space that this stack was mapped to in the kernel
* address map.
*/
#ifdef KSTACK_GUARD
kmem_free(kernel_map, ks - PAGE_SIZE, (pages + 1) * PAGE_SIZE);
#else
kmem_free(kernel_map, ks, pages * PAGE_SIZE);
#endif
vm_object_deallocate(ksobj);
}
/*
* Allow the Kernel stack for a thread to be prejudicially paged out.
*/
void
pmap_swapout_thread(td)
struct thread *td;
{
int i;
int pages;
vm_object_t ksobj;
vm_offset_t ks;
vm_page_t m;
pages = td->td_kstack_pages;
ksobj = td->td_kstack_obj;
ks = td->td_kstack;
pmap_qremove(ks, pages);
VM_OBJECT_LOCK(ksobj);
for (i = 0; i < pages; i++) {
m = vm_page_lookup(ksobj, i);
if (m == NULL)
panic("pmap_swapout_thread: kstack already missing?");
vm_page_lock_queues();
vm_page_dirty(m);
vm_page_unwire(m, 0);
vm_page_unlock_queues();
}
VM_OBJECT_UNLOCK(ksobj);
}
/*
* Bring the kernel stack for a specified thread back in.
*/
void
pmap_swapin_thread(td)
struct thread *td;
{
int i, rv;
int pages;
vm_page_t ma[KSTACK_MAX_PAGES];
vm_object_t ksobj;
vm_offset_t ks;
vm_page_t m;
pages = td->td_kstack_pages;
ksobj = td->td_kstack_obj;
ks = td->td_kstack;
VM_OBJECT_LOCK(ksobj);
for (i = 0; i < pages; i++) {
m = vm_page_grab(ksobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
if (m->valid != VM_PAGE_BITS_ALL) {
rv = vm_pager_get_pages(ksobj, &m, 1, 0);
if (rv != VM_PAGER_OK)
panic("pmap_swapin_thread: cannot get kstack for proc: %d\n", td->td_proc->p_pid);
m = vm_page_lookup(ksobj, i);
m->valid = VM_PAGE_BITS_ALL;
}
ma[i] = m;
vm_page_lock_queues();
vm_page_wire(m);
vm_page_wakeup(m);
vm_page_unlock_queues();
}
VM_OBJECT_UNLOCK(ksobj);
pmap_qenter(ks, ma, pages);
}
/***************************************************
* Page table page management routines.....
***************************************************/

View File

@ -106,10 +106,9 @@
#ifndef KSTACK_PAGES
#define KSTACK_PAGES 2 /* Includes pcb! */
#endif
#define KSTACK_GUARD_PAGES 1 /* pages of kstack guard; 0 disables */
#define UAREA_PAGES 1 /* holds struct user WITHOUT PCB (see def.) */
#define KSTACK_GUARD /* compile in the kstack guard page */
/*
* Ceiling on amount of swblock kva space, can be changed via
* the kern.maxswzone /boot/loader.conf variable.

View File

@ -765,53 +765,6 @@ pmap_track_modified(vm_offset_t va)
return 0;
}
/*
* Create the KSTACK for a new thread.
* This routine directly affects the fork perf for a process/thread.
*/
void
pmap_new_thread(struct thread *td, int pages)
{
/* Bounds check */
if (pages <= 1)
pages = KSTACK_PAGES;
else if (pages > KSTACK_MAX_PAGES)
pages = KSTACK_MAX_PAGES;
td->td_kstack = (vm_offset_t)malloc(pages * PAGE_SIZE, M_PMAP,
M_WAITOK);
td->td_kstack_pages = pages;
}
/*
* Dispose the KSTACK for a thread that has exited.
* This routine directly impacts the exit perf of a process/thread.
*/
void
pmap_dispose_thread(struct thread *td)
{
free((void*)td->td_kstack, M_PMAP);
td->td_kstack = 0;
td->td_kstack_pages = 0;
}
/*
* Allow the KSTACK for a thread to be prejudicially paged out.
*/
void
pmap_swapout_thread(struct thread *td)
{
}
/*
* Bring the KSTACK for a specified thread back in.
*/
void
pmap_swapin_thread(struct thread *td)
{
}
/***************************************************
* Page table page management routines.....
***************************************************/

View File

@ -135,6 +135,7 @@
#define SINCR 1 /* increment of stack/NBPG */
#define KSTACK_PAGES 4 /* pages of kernel stack */
#define KSTACK_GUARD_PAGES 0 /* pages of kstack guard; 0 disables */
#define UAREA_PAGES 1 /* pages of u-area */
/*

View File

@ -51,6 +51,7 @@ __FBSDID("$FreeBSD$");
#include <sys/ucontext.h>
#include <vm/vm.h>
#include <vm/vm_extern.h>
#include <vm/vm_object.h>
#include <vm/pmap.h>
#include <vm/uma.h>
@ -185,7 +186,7 @@ thread_init(void *mem, int size)
td = (struct thread *)mem;
mtx_lock(&Giant);
pmap_new_thread(td, 0);
vm_thread_new(td, 0);
mtx_unlock(&Giant);
cpu_thread_setup(td);
td->td_sched = (struct td_sched *)&td[1];
@ -200,7 +201,7 @@ thread_fini(void *mem, int size)
struct thread *td;
td = (struct thread *)mem;
pmap_dispose_thread(td);
vm_thread_dispose(td);
}
/*

View File

@ -51,6 +51,7 @@ __FBSDID("$FreeBSD$");
#include <sys/ucontext.h>
#include <vm/vm.h>
#include <vm/vm_extern.h>
#include <vm/vm_object.h>
#include <vm/pmap.h>
#include <vm/uma.h>
@ -185,7 +186,7 @@ thread_init(void *mem, int size)
td = (struct thread *)mem;
mtx_lock(&Giant);
pmap_new_thread(td, 0);
vm_thread_new(td, 0);
mtx_unlock(&Giant);
cpu_thread_setup(td);
td->td_sched = (struct td_sched *)&td[1];
@ -200,7 +201,7 @@ thread_fini(void *mem, int size)
struct thread *td;
td = (struct thread *)mem;
pmap_dispose_thread(td);
vm_thread_dispose(td);
}
/*

View File

@ -1556,159 +1556,6 @@ pmap_remove_pages(pmap_t pm, vm_offset_t sva, vm_offset_t eva)
pmap_remove(pm, sva, eva);
}
#ifndef KSTACK_MAX_PAGES
#define KSTACK_MAX_PAGES 32
#endif
/*
* Create the kernel stack and pcb for a new thread.
* This routine directly affects the fork perf for a process and
* create performance for a thread.
*/
void
pmap_new_thread(struct thread *td, int pages)
{
vm_page_t ma[KSTACK_MAX_PAGES];
vm_object_t ksobj;
vm_offset_t ks;
vm_page_t m;
u_int i;
/* Bounds check */
if (pages <= 1)
pages = KSTACK_PAGES;
else if (pages > KSTACK_MAX_PAGES)
pages = KSTACK_MAX_PAGES;
/*
* Allocate object for the kstack.
*/
ksobj = vm_object_allocate(OBJT_DEFAULT, pages);
td->td_kstack_obj = ksobj;
/*
* Get a kernel virtual address for the kstack for this thread.
*/
ks = kmem_alloc_nofault(kernel_map,
(pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
if (ks == 0)
panic("pmap_new_thread: kstack allocation failed");
TLBIE(ks);
ks += KSTACK_GUARD_PAGES * PAGE_SIZE;
td->td_kstack = ks;
/*
* Knowing the number of pages allocated is useful when you
* want to deallocate them.
*/
td->td_kstack_pages = pages;
for (i = 0; i < pages; i++) {
/*
* Get a kernel stack page.
*/
m = vm_page_grab(ksobj, i,
VM_ALLOC_NORMAL | VM_ALLOC_RETRY | VM_ALLOC_WIRED);
ma[i] = m;
vm_page_lock_queues();
vm_page_wakeup(m);
vm_page_flag_clear(m, PG_ZERO);
m->valid = VM_PAGE_BITS_ALL;
vm_page_unlock_queues();
}
/*
* Enter the page into the kernel address space
*/
pmap_qenter(ks, ma, pages);
}
void
pmap_dispose_thread(struct thread *td)
{
vm_object_t ksobj;
vm_offset_t ks;
vm_page_t m;
int i;
int pages;
pages = td->td_kstack_pages;
ksobj = td->td_kstack_obj;
ks = td->td_kstack;
for (i = 0; i < pages ; i++) {
m = vm_page_lookup(ksobj, i);
if (m == NULL)
panic("pmap_dispose_thread: kstack already missing?");
vm_page_lock_queues();
vm_page_busy(m);
vm_page_unwire(m, 0);
vm_page_free(m);
vm_page_unlock_queues();
}
pmap_qremove(ks, pages);
kmem_free(kernel_map, ks - (KSTACK_GUARD_PAGES * PAGE_SIZE),
(pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
vm_object_deallocate(ksobj);
}
void
pmap_swapin_thread(struct thread *td)
{
vm_page_t ma[KSTACK_MAX_PAGES];
vm_object_t ksobj;
vm_offset_t ks;
vm_page_t m;
int rv;
int i;
int pages;
pages = td->td_kstack_pages;
ksobj = td->td_kstack_obj;
ks = td->td_kstack;
for (i = 0; i < pages; i++) {
m = vm_page_grab(ksobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
if (m->valid != VM_PAGE_BITS_ALL) {
rv = vm_pager_get_pages(ksobj, &m, 1, 0);
if (rv != VM_PAGER_OK)
panic("pmap_swapin_thread: cannot get kstack");
m = vm_page_lookup(ksobj, i);
m->valid = VM_PAGE_BITS_ALL;
}
ma[i] = m;
vm_page_lock_queues();
vm_page_wire(m);
vm_page_wakeup(m);
vm_page_unlock_queues();
}
pmap_qenter(ks, ma, pages);
}
void
pmap_swapout_thread(struct thread *td)
{
vm_object_t ksobj;
vm_offset_t ks;
vm_page_t m;
int i;
int pages;
pages = td->td_kstack_pages;
ksobj = td->td_kstack_obj;
ks = (vm_offset_t)td->td_kstack;
for (i = 0; i < pages; i++) {
m = vm_page_lookup(ksobj, i);
if (m == NULL)
panic("pmap_swapout_thread: kstack already missing?");
vm_page_lock_queues();
vm_page_dirty(m);
vm_page_unwire(m, 0);
vm_page_unlock_queues();
}
pmap_qremove(ks, pages);
}
/*
* Allocate a physical page of memory directly from the phys_avail map.
* Can only be called from pmap_bootstrap before avail start and end are

View File

@ -99,7 +99,7 @@
#ifndef KSTACK_UPAGES
#define KSTACK_PAGES 4 /* includes pcb */
#define KSTACK_GUARD_PAGES 1
#define KSTACK_GUARD_PAGES 1 /* pages of kstack guard; 0 disables */
#endif
#define USPACE (KSTACK_PAGES * PAGE_SIZE) /* total size of pcb */
#define UAREA_PAGES 1 /* holds struct user WITHOUT PCB */

View File

@ -1556,159 +1556,6 @@ pmap_remove_pages(pmap_t pm, vm_offset_t sva, vm_offset_t eva)
pmap_remove(pm, sva, eva);
}
#ifndef KSTACK_MAX_PAGES
#define KSTACK_MAX_PAGES 32
#endif
/*
* Create the kernel stack and pcb for a new thread.
* This routine directly affects the fork perf for a process and
* create performance for a thread.
*/
void
pmap_new_thread(struct thread *td, int pages)
{
vm_page_t ma[KSTACK_MAX_PAGES];
vm_object_t ksobj;
vm_offset_t ks;
vm_page_t m;
u_int i;
/* Bounds check */
if (pages <= 1)
pages = KSTACK_PAGES;
else if (pages > KSTACK_MAX_PAGES)
pages = KSTACK_MAX_PAGES;
/*
* Allocate object for the kstack.
*/
ksobj = vm_object_allocate(OBJT_DEFAULT, pages);
td->td_kstack_obj = ksobj;
/*
* Get a kernel virtual address for the kstack for this thread.
*/
ks = kmem_alloc_nofault(kernel_map,
(pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
if (ks == 0)
panic("pmap_new_thread: kstack allocation failed");
TLBIE(ks);
ks += KSTACK_GUARD_PAGES * PAGE_SIZE;
td->td_kstack = ks;
/*
* Knowing the number of pages allocated is useful when you
* want to deallocate them.
*/
td->td_kstack_pages = pages;
for (i = 0; i < pages; i++) {
/*
* Get a kernel stack page.
*/
m = vm_page_grab(ksobj, i,
VM_ALLOC_NORMAL | VM_ALLOC_RETRY | VM_ALLOC_WIRED);
ma[i] = m;
vm_page_lock_queues();
vm_page_wakeup(m);
vm_page_flag_clear(m, PG_ZERO);
m->valid = VM_PAGE_BITS_ALL;
vm_page_unlock_queues();
}
/*
* Enter the page into the kernel address space
*/
pmap_qenter(ks, ma, pages);
}
void
pmap_dispose_thread(struct thread *td)
{
vm_object_t ksobj;
vm_offset_t ks;
vm_page_t m;
int i;
int pages;
pages = td->td_kstack_pages;
ksobj = td->td_kstack_obj;
ks = td->td_kstack;
for (i = 0; i < pages ; i++) {
m = vm_page_lookup(ksobj, i);
if (m == NULL)
panic("pmap_dispose_thread: kstack already missing?");
vm_page_lock_queues();
vm_page_busy(m);
vm_page_unwire(m, 0);
vm_page_free(m);
vm_page_unlock_queues();
}
pmap_qremove(ks, pages);
kmem_free(kernel_map, ks - (KSTACK_GUARD_PAGES * PAGE_SIZE),
(pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
vm_object_deallocate(ksobj);
}
void
pmap_swapin_thread(struct thread *td)
{
vm_page_t ma[KSTACK_MAX_PAGES];
vm_object_t ksobj;
vm_offset_t ks;
vm_page_t m;
int rv;
int i;
int pages;
pages = td->td_kstack_pages;
ksobj = td->td_kstack_obj;
ks = td->td_kstack;
for (i = 0; i < pages; i++) {
m = vm_page_grab(ksobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
if (m->valid != VM_PAGE_BITS_ALL) {
rv = vm_pager_get_pages(ksobj, &m, 1, 0);
if (rv != VM_PAGER_OK)
panic("pmap_swapin_thread: cannot get kstack");
m = vm_page_lookup(ksobj, i);
m->valid = VM_PAGE_BITS_ALL;
}
ma[i] = m;
vm_page_lock_queues();
vm_page_wire(m);
vm_page_wakeup(m);
vm_page_unlock_queues();
}
pmap_qenter(ks, ma, pages);
}
void
pmap_swapout_thread(struct thread *td)
{
vm_object_t ksobj;
vm_offset_t ks;
vm_page_t m;
int i;
int pages;
pages = td->td_kstack_pages;
ksobj = td->td_kstack_obj;
ks = (vm_offset_t)td->td_kstack;
for (i = 0; i < pages; i++) {
m = vm_page_lookup(ksobj, i);
if (m == NULL)
panic("pmap_swapout_thread: kstack already missing?");
vm_page_lock_queues();
vm_page_dirty(m);
vm_page_unwire(m, 0);
vm_page_unlock_queues();
}
pmap_qremove(ks, pages);
}
/*
* Allocate a physical page of memory directly from the phys_avail map.
* Can only be called from pmap_bootstrap before avail start and end are

View File

@ -1556,159 +1556,6 @@ pmap_remove_pages(pmap_t pm, vm_offset_t sva, vm_offset_t eva)
pmap_remove(pm, sva, eva);
}
#ifndef KSTACK_MAX_PAGES
#define KSTACK_MAX_PAGES 32
#endif
/*
* Create the kernel stack and pcb for a new thread.
* This routine directly affects the fork perf for a process and
* create performance for a thread.
*/
void
pmap_new_thread(struct thread *td, int pages)
{
vm_page_t ma[KSTACK_MAX_PAGES];
vm_object_t ksobj;
vm_offset_t ks;
vm_page_t m;
u_int i;
/* Bounds check */
if (pages <= 1)
pages = KSTACK_PAGES;
else if (pages > KSTACK_MAX_PAGES)
pages = KSTACK_MAX_PAGES;
/*
* Allocate object for the kstack.
*/
ksobj = vm_object_allocate(OBJT_DEFAULT, pages);
td->td_kstack_obj = ksobj;
/*
* Get a kernel virtual address for the kstack for this thread.
*/
ks = kmem_alloc_nofault(kernel_map,
(pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
if (ks == 0)
panic("pmap_new_thread: kstack allocation failed");
TLBIE(ks);
ks += KSTACK_GUARD_PAGES * PAGE_SIZE;
td->td_kstack = ks;
/*
* Knowing the number of pages allocated is useful when you
* want to deallocate them.
*/
td->td_kstack_pages = pages;
for (i = 0; i < pages; i++) {
/*
* Get a kernel stack page.
*/
m = vm_page_grab(ksobj, i,
VM_ALLOC_NORMAL | VM_ALLOC_RETRY | VM_ALLOC_WIRED);
ma[i] = m;
vm_page_lock_queues();
vm_page_wakeup(m);
vm_page_flag_clear(m, PG_ZERO);
m->valid = VM_PAGE_BITS_ALL;
vm_page_unlock_queues();
}
/*
* Enter the page into the kernel address space
*/
pmap_qenter(ks, ma, pages);
}
void
pmap_dispose_thread(struct thread *td)
{
vm_object_t ksobj;
vm_offset_t ks;
vm_page_t m;
int i;
int pages;
pages = td->td_kstack_pages;
ksobj = td->td_kstack_obj;
ks = td->td_kstack;
for (i = 0; i < pages ; i++) {
m = vm_page_lookup(ksobj, i);
if (m == NULL)
panic("pmap_dispose_thread: kstack already missing?");
vm_page_lock_queues();
vm_page_busy(m);
vm_page_unwire(m, 0);
vm_page_free(m);
vm_page_unlock_queues();
}
pmap_qremove(ks, pages);
kmem_free(kernel_map, ks - (KSTACK_GUARD_PAGES * PAGE_SIZE),
(pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
vm_object_deallocate(ksobj);
}
void
pmap_swapin_thread(struct thread *td)
{
vm_page_t ma[KSTACK_MAX_PAGES];
vm_object_t ksobj;
vm_offset_t ks;
vm_page_t m;
int rv;
int i;
int pages;
pages = td->td_kstack_pages;
ksobj = td->td_kstack_obj;
ks = td->td_kstack;
for (i = 0; i < pages; i++) {
m = vm_page_grab(ksobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
if (m->valid != VM_PAGE_BITS_ALL) {
rv = vm_pager_get_pages(ksobj, &m, 1, 0);
if (rv != VM_PAGER_OK)
panic("pmap_swapin_thread: cannot get kstack");
m = vm_page_lookup(ksobj, i);
m->valid = VM_PAGE_BITS_ALL;
}
ma[i] = m;
vm_page_lock_queues();
vm_page_wire(m);
vm_page_wakeup(m);
vm_page_unlock_queues();
}
pmap_qenter(ks, ma, pages);
}
void
pmap_swapout_thread(struct thread *td)
{
vm_object_t ksobj;
vm_offset_t ks;
vm_page_t m;
int i;
int pages;
pages = td->td_kstack_pages;
ksobj = td->td_kstack_obj;
ks = (vm_offset_t)td->td_kstack;
for (i = 0; i < pages; i++) {
m = vm_page_lookup(ksobj, i);
if (m == NULL)
panic("pmap_swapout_thread: kstack already missing?");
vm_page_lock_queues();
vm_page_dirty(m);
vm_page_unwire(m, 0);
vm_page_unlock_queues();
}
pmap_qremove(ks, pages);
}
/*
* Allocate a physical page of memory directly from the phys_avail map.
* Can only be called from pmap_bootstrap before avail start and end are

View File

@ -105,11 +105,10 @@
#define PAGE_MASK_MAX PAGE_MASK_4M
#define KSTACK_PAGES 4 /* pages of kernel stack (with pcb) */
#define KSTACK_GUARD_PAGES 1 /* pages of kstack guard; 0 disables */
#define UAREA_PAGES 1 /* pages of user area */
#define PCPU_PAGES 1
#define KSTACK_GUARD /* compile in kstack guard page */
#define KSTACK_GUARD_PAGES 1
/*
* Mach derived conversion macros

View File

@ -924,174 +924,6 @@ pmap_qremove(vm_offset_t sva, int count)
tlb_range_demap(kernel_pmap, sva, va);
}
#ifndef KSTACK_MAX_PAGES
#define KSTACK_MAX_PAGES 32
#endif
/*
* Create the kernel stack and pcb for a new thread.
* This routine directly affects the fork perf for a process and
* create performance for a thread.
*/
void
pmap_new_thread(struct thread *td, int pages)
{
vm_page_t ma[KSTACK_MAX_PAGES];
vm_object_t ksobj;
vm_offset_t ks;
vm_page_t m;
u_int i;
PMAP_STATS_INC(pmap_nnew_thread);
/* Bounds check */
if (pages <= 1)
pages = KSTACK_PAGES;
else if (pages > KSTACK_MAX_PAGES)
pages = KSTACK_MAX_PAGES;
/*
* Allocate object for the kstack,
*/
ksobj = vm_object_allocate(OBJT_DEFAULT, pages);
td->td_kstack_obj = ksobj;
/*
* Get a kernel virtual address for the kstack for this thread.
*/
ks = kmem_alloc_nofault(kernel_map,
(pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
if (ks == 0)
panic("pmap_new_thread: kstack allocation failed");
if (KSTACK_GUARD_PAGES != 0) {
tlb_page_demap(kernel_pmap, ks);
ks += KSTACK_GUARD_PAGES * PAGE_SIZE;
}
td->td_kstack = ks;
/*
* Knowing the number of pages allocated is useful when you
* want to deallocate them.
*/
td->td_kstack_pages = pages;
for (i = 0; i < pages; i++) {
/*
* Get a kernel stack page.
*/
m = vm_page_grab(ksobj, i,
VM_ALLOC_NORMAL | VM_ALLOC_RETRY | VM_ALLOC_WIRED);
ma[i] = m;
if (DCACHE_COLOR(ks + (i * PAGE_SIZE)) !=
DCACHE_COLOR(VM_PAGE_TO_PHYS(m)))
PMAP_STATS_INC(pmap_nnew_thread_oc);
vm_page_lock_queues();
vm_page_wakeup(m);
vm_page_flag_clear(m, PG_ZERO);
m->valid = VM_PAGE_BITS_ALL;
vm_page_unlock_queues();
}
/*
* Enter the page into the kernel address space.
*/
pmap_qenter(ks, ma, pages);
}
/*
* Dispose the kernel stack for a thread that has exited.
* This routine directly impacts the exit perf of a process and thread.
*/
void
pmap_dispose_thread(struct thread *td)
{
vm_object_t ksobj;
vm_offset_t ks;
vm_page_t m;
int pages;
int i;
pages = td->td_kstack_pages;
ksobj = td->td_kstack_obj;
ks = td->td_kstack;
for (i = 0; i < pages ; i++) {
m = vm_page_lookup(ksobj, i);
if (m == NULL)
panic("pmap_dispose_thread: kstack already missing?");
vm_page_lock_queues();
vm_page_busy(m);
vm_page_unwire(m, 0);
vm_page_free(m);
vm_page_unlock_queues();
}
pmap_qremove(ks, pages);
kmem_free(kernel_map, ks - (KSTACK_GUARD_PAGES * PAGE_SIZE),
(pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
vm_object_deallocate(ksobj);
}
/*
* Allow the kernel stack for a thread to be prejudicially paged out.
*/
void
pmap_swapout_thread(struct thread *td)
{
vm_object_t ksobj;
vm_offset_t ks;
vm_page_t m;
int pages;
int i;
pages = td->td_kstack_pages;
ksobj = td->td_kstack_obj;
ks = (vm_offset_t)td->td_kstack;
for (i = 0; i < pages; i++) {
m = vm_page_lookup(ksobj, i);
if (m == NULL)
panic("pmap_swapout_thread: kstack already missing?");
vm_page_lock_queues();
vm_page_dirty(m);
vm_page_unwire(m, 0);
vm_page_unlock_queues();
}
pmap_qremove(ks, pages);
}
/*
* Bring the kernel stack for a specified thread back in.
*/
void
pmap_swapin_thread(struct thread *td)
{
vm_page_t ma[KSTACK_MAX_PAGES];
vm_object_t ksobj;
vm_offset_t ks;
vm_page_t m;
int rv;
int i;
int pages;
pages = td->td_kstack_pages;
ksobj = td->td_kstack_obj;
ks = td->td_kstack;
for (i = 0; i < pages; i++) {
m = vm_page_grab(ksobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
if (m->valid != VM_PAGE_BITS_ALL) {
rv = vm_pager_get_pages(ksobj, &m, 1, 0);
if (rv != VM_PAGER_OK)
panic("pmap_swapin_thread: cannot get kstack");
m = vm_page_lookup(ksobj, i);
m->valid = VM_PAGE_BITS_ALL;
}
ma[i] = m;
vm_page_lock_queues();
vm_page_wire(m);
vm_page_wakeup(m);
vm_page_unlock_queues();
}
pmap_qenter(ks, ma, pages);
}
/*
* Initialize the pmap associated with process 0.
*/

View File

@ -129,10 +129,6 @@ void pmap_zero_page_area(vm_page_t, int off, int size);
void pmap_zero_page_idle(vm_page_t);
void pmap_prefault(pmap_t, vm_offset_t, vm_map_entry_t);
int pmap_mincore(pmap_t pmap, vm_offset_t addr);
void pmap_new_thread(struct thread *td, int pages);
void pmap_dispose_thread(struct thread *td);
void pmap_swapout_thread(struct thread *td);
void pmap_swapin_thread(struct thread *td);
void pmap_activate(struct thread *td);
vm_offset_t pmap_addr_hint(vm_object_t obj, vm_offset_t addr, vm_size_t size);
void *pmap_kenter_temporary(vm_offset_t pa, int i);

View File

@ -96,7 +96,11 @@ void vm_object_print(/* db_expr_t */ long, boolean_t, /* db_expr_t */ long,
int vm_fault_quick(caddr_t v, int prot);
void vm_proc_new(struct proc *p);
void vm_proc_dispose(struct proc *p);
void vm_thread_new_altkstack(struct thread *td, int pages);
void vm_thread_dispose(struct thread *td);
void vm_thread_dispose_altkstack(struct thread *td);
void vm_thread_new(struct thread *td, int pages);
void vm_thread_new_altkstack(struct thread *td, int pages);
void vm_thread_swapin(struct thread *td);
void vm_thread_swapout(struct thread *td);
#endif /* _KERNEL */
#endif /* !_VM_EXTERN_H_ */

View File

@ -387,6 +387,174 @@ vm_proc_swapin_all(int devidx)
}
#endif
#ifndef KSTACK_MAX_PAGES
#define KSTACK_MAX_PAGES 32
#endif
/*
* Create the kernel stack (including pcb for i386) for a new thread.
* This routine directly affects the fork perf for a process and
* create performance for a thread.
*/
void
vm_thread_new(struct thread *td, int pages)
{
vm_object_t ksobj;
vm_offset_t ks;
vm_page_t m, ma[KSTACK_MAX_PAGES];
int i;
/* Bounds check */
if (pages <= 1)
pages = KSTACK_PAGES;
else if (pages > KSTACK_MAX_PAGES)
pages = KSTACK_MAX_PAGES;
/*
* Allocate an object for the kstack.
*/
ksobj = vm_object_allocate(OBJT_DEFAULT, pages);
td->td_kstack_obj = ksobj;
/*
* Get a kernel virtual address for this thread's kstack.
*/
ks = kmem_alloc_nofault(kernel_map,
(pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
if (ks == 0)
panic("vm_thread_new: kstack allocation failed");
if (KSTACK_GUARD_PAGES != 0) {
pmap_qremove(ks, KSTACK_GUARD_PAGES);
ks += KSTACK_GUARD_PAGES * PAGE_SIZE;
}
td->td_kstack = ks;
/*
* Knowing the number of pages allocated is useful when you
* want to deallocate them.
*/
td->td_kstack_pages = pages;
/*
* For the length of the stack, link in a real page of ram for each
* page of stack.
*/
VM_OBJECT_LOCK(ksobj);
for (i = 0; i < pages; i++) {
/*
* Get a kernel stack page.
*/
m = vm_page_grab(ksobj, i,
VM_ALLOC_NORMAL | VM_ALLOC_RETRY | VM_ALLOC_WIRED);
ma[i] = m;
vm_page_lock_queues();
vm_page_wakeup(m);
m->valid = VM_PAGE_BITS_ALL;
vm_page_unlock_queues();
}
VM_OBJECT_UNLOCK(ksobj);
pmap_qenter(ks, ma, pages);
}
/*
* Dispose of a thread's kernel stack.
*/
void
vm_thread_dispose(struct thread *td)
{
vm_object_t ksobj;
vm_offset_t ks;
vm_page_t m;
int i, pages;
pages = td->td_kstack_pages;
ksobj = td->td_kstack_obj;
ks = td->td_kstack;
pmap_qremove(ks, pages);
VM_OBJECT_LOCK(ksobj);
for (i = 0; i < pages; i++) {
m = vm_page_lookup(ksobj, i);
if (m == NULL)
panic("vm_thread_dispose: kstack already missing?");
vm_page_lock_queues();
vm_page_busy(m);
vm_page_unwire(m, 0);
vm_page_free(m);
vm_page_unlock_queues();
}
VM_OBJECT_UNLOCK(ksobj);
vm_object_deallocate(ksobj);
kmem_free(kernel_map, ks - (KSTACK_GUARD_PAGES * PAGE_SIZE),
(pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
}
/*
* Allow a thread's kernel stack to be paged out.
*/
void
vm_thread_swapout(struct thread *td)
{
vm_object_t ksobj;
vm_page_t m;
int i, pages;
#ifdef __alpha
/*
* Make sure we aren't fpcurthread.
*/
alpha_fpstate_save(td, 1);
#endif
pages = td->td_kstack_pages;
ksobj = td->td_kstack_obj;
pmap_qremove(td->td_kstack, pages);
VM_OBJECT_LOCK(ksobj);
for (i = 0; i < pages; i++) {
m = vm_page_lookup(ksobj, i);
if (m == NULL)
panic("vm_thread_swapout: kstack already missing?");
vm_page_lock_queues();
vm_page_dirty(m);
vm_page_unwire(m, 0);
vm_page_unlock_queues();
}
VM_OBJECT_UNLOCK(ksobj);
}
/*
* Bring the kernel stack for a specified thread back in.
*/
void
vm_thread_swapin(struct thread *td)
{
vm_object_t ksobj;
vm_page_t m, ma[KSTACK_MAX_PAGES];
int i, pages, rv;
pages = td->td_kstack_pages;
ksobj = td->td_kstack_obj;
VM_OBJECT_LOCK(ksobj);
for (i = 0; i < pages; i++) {
m = vm_page_grab(ksobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
if (m->valid != VM_PAGE_BITS_ALL) {
rv = vm_pager_get_pages(ksobj, &m, 1, 0);
if (rv != VM_PAGER_OK)
panic("vm_thread_swapin: cannot get kstack for proc: %d", td->td_proc->p_pid);
m = vm_page_lookup(ksobj, i);
m->valid = VM_PAGE_BITS_ALL;
}
ma[i] = m;
vm_page_lock_queues();
vm_page_wire(m);
vm_page_wakeup(m);
vm_page_unlock_queues();
}
VM_OBJECT_UNLOCK(ksobj);
pmap_qenter(td->td_kstack, ma, pages);
#ifdef __alpha
/*
* The pcb may be at a different physical address now so cache the
* new address.
*/
td->td_md.md_pcbpaddr = (void *)vtophys((vm_offset_t)td->td_pcb);
#endif
}
/*
* Set up a variable-sized alternate kstack.
*/
@ -398,7 +566,7 @@ vm_thread_new_altkstack(struct thread *td, int pages)
td->td_altkstack_obj = td->td_kstack_obj;
td->td_altkstack_pages = td->td_kstack_pages;
pmap_new_thread(td, pages);
vm_thread_new(td, pages);
}
/*
@ -408,7 +576,7 @@ void
vm_thread_dispose_altkstack(struct thread *td)
{
pmap_dispose_thread(td);
vm_thread_dispose(td);
td->td_kstack = td->td_altkstack;
td->td_kstack_obj = td->td_altkstack_obj;
@ -572,7 +740,7 @@ faultin(p)
vm_proc_swapin(p);
FOREACH_THREAD_IN_PROC(p, td)
pmap_swapin_thread(td);
vm_thread_swapin(td);
PROC_LOCK(p);
mtx_lock_spin(&sched_lock);
@ -927,7 +1095,7 @@ swapout(p)
vm_proc_swapout(p);
FOREACH_THREAD_IN_PROC(p, td)
pmap_swapout_thread(td);
vm_thread_swapout(td);
PROC_LOCK(p);
mtx_lock_spin(&sched_lock);