Some kernel threads try to do significant work, and the default KSTACK_PAGES

doesn't give them enough stack to do much before blowing away the pcb.
This adds MI and MD code to allow the allocation of an alternate kstack
who's size can be speficied when calling kthread_create.  Passing the
value 0 prevents the alternate kstack from being created.  Note that the
ia64 MD code is missing for now, and PowerPC was only partially written
due to the pmap.c being incomplete there.
Though this patch does not modify anything to make use of the alternate
kstack, acpi and usb are good candidates.

Reviewed by:	jake, peter, jhb
This commit is contained in:
Scott Long 2002-10-02 07:44:29 +00:00
parent a70e34ac71
commit 316ec49abd
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=104354
35 changed files with 394 additions and 100 deletions

View File

@ -898,12 +898,16 @@ pmap_page_lookup(vm_object_t object, vm_pindex_t pindex)
return m;
}
#ifndef KSTACK_MAX_PAGES
#define KSTACK_MAX_PAGES 32
#endif
/*
* Create the kernel stack for a new thread.
* This routine directly affects the fork perf for a process and thread.
*/
void
pmap_new_thread(struct thread *td)
pmap_new_thread(struct thread *td, int pages)
{
int i;
vm_object_t ksobj;
@ -911,15 +915,21 @@ pmap_new_thread(struct thread *td)
vm_page_t m;
pt_entry_t *ptek, oldpte;
/* Bounds check */
if (pages <= 1)
pages = KSTACK_PAGES;
else if (pages > KSTACK_MAX_PAGES)
pages = KSTACK_MAX_PAGES;
/*
* allocate object for the kstack
*/
ksobj = vm_object_allocate(OBJT_DEFAULT, KSTACK_PAGES);
ksobj = vm_object_allocate(OBJT_DEFAULT, pages);
td->td_kstack_obj = ksobj;
#ifdef KSTACK_GUARD
/* get a kernel virtual address for the kstack for this thread */
ks = kmem_alloc_nofault(kernel_map, (KSTACK_PAGES + 1) * PAGE_SIZE);
ks = kmem_alloc_nofault(kernel_map, (pages + 1) * PAGE_SIZE);
if (ks == NULL)
panic("pmap_new_thread: kstack allocation failed");
@ -935,17 +945,23 @@ pmap_new_thread(struct thread *td)
ptek++;
#else
/* get a kernel virtual address for the kstack for this thread */
ks = kmem_alloc_nofault(kernel_map, KSTACK_PAGES * PAGE_SIZE);
ks = kmem_alloc_nofault(kernel_map, pages * PAGE_SIZE);
if (ks == NULL)
panic("pmap_new_thread: kstack allocation failed");
td->td_kstack = ks;
ptek = vtopte(ks);
#endif
/*
* Knowing the number of pages allocated is useful when you
* want to deallocate them.
*/
td->td_kstack_pages = pages;
/*
* For the length of the stack, link in a real page of ram for each
* page of stack.
*/
for (i = 0; i < KSTACK_PAGES; i++) {
for (i = 0; i < pages; i++) {
/*
* Get a kernel stack page
*/
@ -976,15 +992,17 @@ pmap_dispose_thread(td)
struct thread *td;
{
int i;
int pages;
vm_object_t ksobj;
vm_offset_t ks;
vm_page_t m;
pt_entry_t *ptek;
pages = td->td_kstack_pages;
ksobj = td->td_kstack_obj;
ks = td->td_kstack;
ptek = vtopte(ks);
for (i = 0; i < KSTACK_PAGES; i++) {
for (i = 0; i < pages; i++) {
m = vm_page_lookup(ksobj, i);
if (m == NULL)
panic("pmap_dispose_thread: kstack already missing?");
@ -1002,13 +1020,42 @@ pmap_dispose_thread(td)
* address map.
*/
#ifdef KSTACK_GUARD
kmem_free(kernel_map, ks - PAGE_SIZE, (KSTACK_PAGES + 1) * PAGE_SIZE);
kmem_free(kernel_map, ks - PAGE_SIZE, (pages + 1) * PAGE_SIZE);
#else
kmem_free(kernel_map, ks, KSTACK_PAGES * PAGE_SIZE);
kmem_free(kernel_map, ks, pages * PAGE_SIZE);
#endif
vm_object_deallocate(ksobj);
}
/*
* Set up a variable sized alternate kstack.
*/
void
pmap_new_altkstack(struct thread *td, int pages)
{
/* shuffle the original stack */
td->td_altkstack_obj = td->td_kstack_obj;
td->td_altkstack = td->td_kstack;
td->td_altkstack_pages = td->td_kstack_pages;
pmap_new_thread(td, pages);
}
void
pmap_dispose_altkstack(td)
struct thread *td;
{
pmap_dispose_thread(td);
/* restore the original kstack */
td->td_kstack = td->td_altkstack;
td->td_kstack_obj = td->td_altkstack_obj;
td->td_kstack_pages = td->td_altkstack_pages;
td->td_altkstack = 0;
td->td_altkstack_obj = NULL;
td->td_altkstack_pages = 0;
}
/*
* Allow the kernel stack for a thread to be prejudicially paged out.
*/
@ -1017,6 +1064,7 @@ pmap_swapout_thread(td)
struct thread *td;
{
int i;
int pages;
vm_object_t ksobj;
vm_offset_t ks;
vm_page_t m;
@ -1026,9 +1074,10 @@ pmap_swapout_thread(td)
*/
alpha_fpstate_save(td, 1);
pages = td->td_kstack_pages;
ksobj = td->td_kstack_obj;
ks = td->td_kstack;
for (i = 0; i < KSTACK_PAGES; i++) {
for (i = 0; i < pages; i++) {
m = vm_page_lookup(ksobj, i);
if (m == NULL)
panic("pmap_swapout_thread: kstack already missing?");
@ -1048,13 +1097,15 @@ pmap_swapin_thread(td)
struct thread *td;
{
int i, rv;
int pages;
vm_object_t ksobj;
vm_offset_t ks;
vm_page_t m;
pages = td->td_kstack_pages;
ksobj = td->td_kstack_obj;
ks = td->td_kstack;
for (i = 0; i < KSTACK_PAGES; i++) {
for (i = 0; i < pages; i++) {
m = vm_page_grab(ksobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
pmap_kenter(ks + i * PAGE_SIZE, VM_PAGE_TO_PHYS(m));
if (m->valid != VM_PAGE_BITS_ALL) {

View File

@ -934,29 +934,39 @@ pmap_page_lookup(vm_object_t object, vm_pindex_t pindex)
return m;
}
#ifndef KSTACK_MAX_PAGES
#define KSTACK_MAX_PAGES 32
#endif
/*
* Create the kernel stack (including pcb for i386) for a new thread.
* This routine directly affects the fork perf for a process and
* create performance for a thread.
*/
void
pmap_new_thread(struct thread *td)
pmap_new_thread(struct thread *td, int pages)
{
int i;
vm_page_t ma[KSTACK_PAGES];
vm_page_t ma[KSTACK_MAX_PAGES];
vm_object_t ksobj;
vm_page_t m;
vm_offset_t ks;
/* Bounds check */
if (pages <= 1)
pages = KSTACK_PAGES;
else if (pages > KSTACK_MAX_PAGES)
pages = KSTACK_MAX_PAGES;
/*
* allocate object for the kstack
*/
ksobj = vm_object_allocate(OBJT_DEFAULT, KSTACK_PAGES);
ksobj = vm_object_allocate(OBJT_DEFAULT, pages);
td->td_kstack_obj = ksobj;
/* get a kernel virtual address for the kstack for this thread */
#ifdef KSTACK_GUARD
ks = kmem_alloc_nofault(kernel_map, (KSTACK_PAGES + 1) * PAGE_SIZE);
ks = kmem_alloc_nofault(kernel_map, (pages + 1) * PAGE_SIZE);
if (ks == 0)
panic("pmap_new_thread: kstack allocation failed");
if (*vtopte(ks) != 0)
@ -965,16 +975,22 @@ pmap_new_thread(struct thread *td)
td->td_kstack = ks;
#else
/* get a kernel virtual address for the kstack for this thread */
ks = kmem_alloc_nofault(kernel_map, KSTACK_PAGES * PAGE_SIZE);
ks = kmem_alloc_nofault(kernel_map, pages * PAGE_SIZE);
if (ks == 0)
panic("pmap_new_thread: kstack allocation failed");
td->td_kstack = ks;
#endif
/*
* Knowing the number of pages allocated is useful when you
* want to deallocate them.
*/
td->td_kstack_pages = pages;
/*
* For the length of the stack, link in a real page of ram for each
* page of stack.
*/
for (i = 0; i < KSTACK_PAGES; i++) {
for (i = 0; i < pages; i++) {
/*
* Get a kernel stack page
*/
@ -986,7 +1002,7 @@ pmap_new_thread(struct thread *td)
vm_page_flag_clear(m, PG_ZERO);
m->valid = VM_PAGE_BITS_ALL;
}
pmap_qenter(ks, ma, KSTACK_PAGES);
pmap_qenter(ks, ma, pages);
}
/*
@ -998,14 +1014,16 @@ pmap_dispose_thread(td)
struct thread *td;
{
int i;
int pages;
vm_object_t ksobj;
vm_offset_t ks;
vm_page_t m;
pages = td->td_kstack_pages;
ksobj = td->td_kstack_obj;
ks = td->td_kstack;
pmap_qremove(ks, KSTACK_PAGES);
for (i = 0; i < KSTACK_PAGES; i++) {
pmap_qremove(ks, pages);
for (i = 0; i < pages; i++) {
m = vm_page_lookup(ksobj, i);
if (m == NULL)
panic("pmap_dispose_thread: kstack already missing?");
@ -1020,13 +1038,43 @@ pmap_dispose_thread(td)
* address map.
*/
#ifdef KSTACK_GUARD
kmem_free(kernel_map, ks - PAGE_SIZE, (KSTACK_PAGES + 1) * PAGE_SIZE);
kmem_free(kernel_map, ks - PAGE_SIZE, (pages + 1) * PAGE_SIZE);
#else
kmem_free(kernel_map, ks, KSTACK_PAGES * PAGE_SIZE);
kmem_free(kernel_map, ks, pages * PAGE_SIZE);
#endif
vm_object_deallocate(ksobj);
}
/*
* Set up a variable sized alternate kstack. Though it may look MI, it may
* need to be different on certain arches like ia64.
*/
void
pmap_new_altkstack(struct thread *td, int pages)
{
/* shuffle the original stack */
td->td_altkstack_obj = td->td_kstack_obj;
td->td_altkstack = td->td_kstack;
td->td_altkstack_pages = td->td_kstack_pages;
pmap_new_thread(td, pages);
}
void
pmap_dispose_altkstack(td)
struct thread *td;
{
pmap_dispose_thread(td);
/* restore the original kstack */
td->td_kstack = td->td_altkstack;
td->td_kstack_obj = td->td_altkstack_obj;
td->td_kstack_pages = td->td_altkstack_pages;
td->td_altkstack = 0;
td->td_altkstack_obj = NULL;
td->td_altkstack_pages = 0;
}
/*
* Allow the Kernel stack for a thread to be prejudicially paged out.
*/
@ -1035,14 +1083,16 @@ pmap_swapout_thread(td)
struct thread *td;
{
int i;
int pages;
vm_object_t ksobj;
vm_offset_t ks;
vm_page_t m;
pages = td->td_kstack_pages;
ksobj = td->td_kstack_obj;
ks = td->td_kstack;
pmap_qremove(ks, KSTACK_PAGES);
for (i = 0; i < KSTACK_PAGES; i++) {
pmap_qremove(ks, pages);
for (i = 0; i < pages; i++) {
m = vm_page_lookup(ksobj, i);
if (m == NULL)
panic("pmap_swapout_thread: kstack already missing?");
@ -1061,14 +1111,16 @@ pmap_swapin_thread(td)
struct thread *td;
{
int i, rv;
vm_page_t ma[KSTACK_PAGES];
int pages;
vm_page_t ma[KSTACK_MAX_PAGES];
vm_object_t ksobj;
vm_offset_t ks;
vm_page_t m;
pages = td->td_kstack_pages;
ksobj = td->td_kstack_obj;
ks = td->td_kstack;
for (i = 0; i < KSTACK_PAGES; i++) {
for (i = 0; i < pages; i++) {
m = vm_page_grab(ksobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
if (m->valid != VM_PAGE_BITS_ALL) {
rv = vm_pager_get_pages(ksobj, &m, 1, 0);
@ -1083,7 +1135,7 @@ pmap_swapin_thread(td)
vm_page_wakeup(m);
vm_page_unlock_queues();
}
pmap_qenter(ks, ma, KSTACK_PAGES);
pmap_qenter(ks, ma, pages);
}
/***************************************************

View File

@ -2253,6 +2253,8 @@ options SHOW_BUSYBUFS # List buffers that prevent root unmount
options SLIP_IFF_OPTS
options VFS_BIO_DEBUG # VFS buffer I/O debugging
options KSTACK_MAX_PAGES=32 # Maximum pages to give the kernel stack
# Yet more undocumented options for linting.
options AAC_DEBUG
options ACD_DEBUG

View File

@ -89,6 +89,7 @@ GDBSPEED opt_ddb.h
GEOM
HW_WDOG
KSTACK_PAGES
KSTACK_MAX_PAGES
KTRACE
KTRACE_REQUEST_POOL opt_ktrace.h
LIBICONV

View File

@ -312,7 +312,7 @@ aac_attach(struct aac_softc *sc)
/* Create the AIF thread */
#if __FreeBSD_version > 500005
if (kthread_create((void(*)(void *))aac_host_command, sc,
&sc->aifthread, 0, "aac%daif", unit))
&sc->aifthread, 0, 0, "aac%daif", unit))
#else
if (kthread_create((void(*)(void *))aac_host_command, sc,
&sc->aifthread, "aac%daif", unit))

View File

@ -148,7 +148,7 @@ acpi_task_thread_init(void)
for (i = 0; i < ACPI_MAX_THREADS; i++) {
err = kthread_create(acpi_task_thread, NULL, &acpi_kthread_proc,
0, "acpi_task%d", i);
0, 0, "acpi_task%d", i);
if (err != 0) {
printf("%s: kthread_create failed(%d)\n", __func__, err);
break;

View File

@ -263,7 +263,7 @@ acpi_tz_attach(device_t dev)
*/
if (acpi_tz_proc == NULL) {
error = kthread_create(acpi_tz_thread, NULL, &acpi_tz_proc,
RFHIGHPID, "acpi_thermal");
RFHIGHPID, 0, "acpi_thermal");
if (error != 0) {
device_printf(sc->tz_dev, "could not create thread - %d", error);
goto out;

View File

@ -463,7 +463,7 @@ ata_raid_rebuild(int array)
return ENXIO;
if (rdp->flags & AR_F_REBUILDING)
return EBUSY;
return kthread_create(ar_rebuild, rdp, &rdp->pid, RFNOWAIT,
return kthread_create(ar_rebuild, rdp, &rdp->pid, RFNOWAIT, 0,
"rebuilding ar%d", array);
}

View File

@ -148,7 +148,7 @@ isp_attach(struct ispsoftc *isp)
/* XXX: LOCK VIOLATION */
cv_init(&isp->isp_osinfo.kthread_cv, "isp_kthread_cv");
if (kthread_create(isp_kthread, isp, &isp->isp_osinfo.kproc,
RFHIGHPID, "%s: fc_thrd",
RFHIGHPID, 0, "%s: fc_thrd",
device_get_nameunit(isp->isp_dev))) {
xpt_bus_deregister(cam_sim_path(sim));
cam_sim_free(sim, TRUE);

View File

@ -646,7 +646,7 @@ mdnew(int unit)
sc = (struct md_s *)malloc(sizeof *sc, M_MD, M_WAITOK | M_ZERO);
sc->unit = unit;
sprintf(sc->name, "md%d", unit);
error = kthread_create(md_kthread, sc, &sc->procp, 0, "%s", sc->name);
error = kthread_create(md_kthread, sc, &sc->procp, 0, 0,"%s", sc->name);
if (error) {
free(sc, M_MD);
return (NULL);

View File

@ -680,7 +680,7 @@ cbb_attach(device_t brdev)
cbb_set(sc, CBB_SOCKET_EVENT, cbb_get(sc, CBB_SOCKET_EVENT));
/* Start the thread */
if (kthread_create(cbb_event_thread, sc, &sc->event_thread, 0,
if (kthread_create(cbb_event_thread, sc, &sc->event_thread, 0, 0,
"%s%d", device_get_name(sc->dev), device_get_unit(sc->dev))) {
device_printf (sc->dev, "unable to create event thread.\n");
panic ("cbb_create_event_thread");

View File

@ -487,7 +487,7 @@ pcic_create_event_thread(void *arg)
}
if (kthread_create(pcic_event_thread, h, &h->event_thread,
0, "%s,%s", device_get_name(PCIC_H2SOFTC(h)->dev), cs)) {
0, 0, "%s,%s", device_get_name(PCIC_H2SOFTC(h)->dev), cs)) {
device_printf(PCIC_H2SOFTC(h)->dev,
"cannot create event thread for sock 0x%02x\n", h->sock);
panic("pcic_create_event_thread");

View File

@ -282,7 +282,7 @@ random_modevent(module_t mod __unused, int type, void *data __unused)
/* Start the hash/reseed thread */
error = kthread_create(random_kthread, NULL,
&random_kthread_proc, RFHIGHPID, "random");
&random_kthread_proc, RFHIGHPID, 0, "random");
if (error != 0)
return error;

View File

@ -365,9 +365,9 @@ typedef struct thread *usb_proc_ptr;
#define memcpy(d, s, l) bcopy((s),(d),(l))
#define memset(d, v, l) bzero((d),(l))
#define usb_kthread_create1(f, s, p, a0, a1) \
kthread_create((f), (s), (p), RFHIGHPID, (a0), (a1))
kthread_create((f), (s), (p), RFHIGHPID, 0, (a0), (a1))
#define usb_kthread_create2(f, s, p, a0) \
kthread_create((f), (s), (p), RFHIGHPID, (a0))
kthread_create((f), (s), (p), RFHIGHPID, 0, (a0))
#define usb_kthread_create kthread_create
#define config_pending_incr()

View File

@ -934,29 +934,39 @@ pmap_page_lookup(vm_object_t object, vm_pindex_t pindex)
return m;
}
#ifndef KSTACK_MAX_PAGES
#define KSTACK_MAX_PAGES 32
#endif
/*
* Create the kernel stack (including pcb for i386) for a new thread.
* This routine directly affects the fork perf for a process and
* create performance for a thread.
*/
void
pmap_new_thread(struct thread *td)
pmap_new_thread(struct thread *td, int pages)
{
int i;
vm_page_t ma[KSTACK_PAGES];
vm_page_t ma[KSTACK_MAX_PAGES];
vm_object_t ksobj;
vm_page_t m;
vm_offset_t ks;
/* Bounds check */
if (pages <= 1)
pages = KSTACK_PAGES;
else if (pages > KSTACK_MAX_PAGES)
pages = KSTACK_MAX_PAGES;
/*
* allocate object for the kstack
*/
ksobj = vm_object_allocate(OBJT_DEFAULT, KSTACK_PAGES);
ksobj = vm_object_allocate(OBJT_DEFAULT, pages);
td->td_kstack_obj = ksobj;
/* get a kernel virtual address for the kstack for this thread */
#ifdef KSTACK_GUARD
ks = kmem_alloc_nofault(kernel_map, (KSTACK_PAGES + 1) * PAGE_SIZE);
ks = kmem_alloc_nofault(kernel_map, (pages + 1) * PAGE_SIZE);
if (ks == 0)
panic("pmap_new_thread: kstack allocation failed");
if (*vtopte(ks) != 0)
@ -965,16 +975,22 @@ pmap_new_thread(struct thread *td)
td->td_kstack = ks;
#else
/* get a kernel virtual address for the kstack for this thread */
ks = kmem_alloc_nofault(kernel_map, KSTACK_PAGES * PAGE_SIZE);
ks = kmem_alloc_nofault(kernel_map, pages * PAGE_SIZE);
if (ks == 0)
panic("pmap_new_thread: kstack allocation failed");
td->td_kstack = ks;
#endif
/*
* Knowing the number of pages allocated is useful when you
* want to deallocate them.
*/
td->td_kstack_pages = pages;
/*
* For the length of the stack, link in a real page of ram for each
* page of stack.
*/
for (i = 0; i < KSTACK_PAGES; i++) {
for (i = 0; i < pages; i++) {
/*
* Get a kernel stack page
*/
@ -986,7 +1002,7 @@ pmap_new_thread(struct thread *td)
vm_page_flag_clear(m, PG_ZERO);
m->valid = VM_PAGE_BITS_ALL;
}
pmap_qenter(ks, ma, KSTACK_PAGES);
pmap_qenter(ks, ma, pages);
}
/*
@ -998,14 +1014,16 @@ pmap_dispose_thread(td)
struct thread *td;
{
int i;
int pages;
vm_object_t ksobj;
vm_offset_t ks;
vm_page_t m;
pages = td->td_kstack_pages;
ksobj = td->td_kstack_obj;
ks = td->td_kstack;
pmap_qremove(ks, KSTACK_PAGES);
for (i = 0; i < KSTACK_PAGES; i++) {
pmap_qremove(ks, pages);
for (i = 0; i < pages; i++) {
m = vm_page_lookup(ksobj, i);
if (m == NULL)
panic("pmap_dispose_thread: kstack already missing?");
@ -1020,13 +1038,43 @@ pmap_dispose_thread(td)
* address map.
*/
#ifdef KSTACK_GUARD
kmem_free(kernel_map, ks - PAGE_SIZE, (KSTACK_PAGES + 1) * PAGE_SIZE);
kmem_free(kernel_map, ks - PAGE_SIZE, (pages + 1) * PAGE_SIZE);
#else
kmem_free(kernel_map, ks, KSTACK_PAGES * PAGE_SIZE);
kmem_free(kernel_map, ks, pages * PAGE_SIZE);
#endif
vm_object_deallocate(ksobj);
}
/*
* Set up a variable sized alternate kstack. Though it may look MI, it may
* need to be different on certain arches like ia64.
*/
void
pmap_new_altkstack(struct thread *td, int pages)
{
/* shuffle the original stack */
td->td_altkstack_obj = td->td_kstack_obj;
td->td_altkstack = td->td_kstack;
td->td_altkstack_pages = td->td_kstack_pages;
pmap_new_thread(td, pages);
}
void
pmap_dispose_altkstack(td)
struct thread *td;
{
pmap_dispose_thread(td);
/* restore the original kstack */
td->td_kstack = td->td_altkstack;
td->td_kstack_obj = td->td_altkstack_obj;
td->td_kstack_pages = td->td_altkstack_pages;
td->td_altkstack = 0;
td->td_altkstack_obj = NULL;
td->td_altkstack_pages = 0;
}
/*
* Allow the Kernel stack for a thread to be prejudicially paged out.
*/
@ -1035,14 +1083,16 @@ pmap_swapout_thread(td)
struct thread *td;
{
int i;
int pages;
vm_object_t ksobj;
vm_offset_t ks;
vm_page_t m;
pages = td->td_kstack_pages;
ksobj = td->td_kstack_obj;
ks = td->td_kstack;
pmap_qremove(ks, KSTACK_PAGES);
for (i = 0; i < KSTACK_PAGES; i++) {
pmap_qremove(ks, pages);
for (i = 0; i < pages; i++) {
m = vm_page_lookup(ksobj, i);
if (m == NULL)
panic("pmap_swapout_thread: kstack already missing?");
@ -1061,14 +1111,16 @@ pmap_swapin_thread(td)
struct thread *td;
{
int i, rv;
vm_page_t ma[KSTACK_PAGES];
int pages;
vm_page_t ma[KSTACK_MAX_PAGES];
vm_object_t ksobj;
vm_offset_t ks;
vm_page_t m;
pages = td->td_kstack_pages;
ksobj = td->td_kstack_obj;
ks = td->td_kstack;
for (i = 0; i < KSTACK_PAGES; i++) {
for (i = 0; i < pages; i++) {
m = vm_page_grab(ksobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
if (m->valid != VM_PAGE_BITS_ALL) {
rv = vm_pager_get_pages(ksobj, &m, 1, 0);
@ -1083,7 +1135,7 @@ pmap_swapin_thread(td)
vm_page_wakeup(m);
vm_page_unlock_queues();
}
pmap_qenter(ks, ma, KSTACK_PAGES);
pmap_qenter(ks, ma, pages);
}
/***************************************************

View File

@ -342,7 +342,7 @@ linux_clone(struct thread *td, struct linux_clone_args *args)
ff |= RFFDG;
mtx_lock(&Giant);
error = fork1(td, ff, &p2);
error = fork1(td, ff, 0, &p2);
if (error == 0) {
td->td_retval[0] = p2->p_pid;
td->td_retval[1] = 0;

View File

@ -685,7 +685,7 @@ create_init(const void *udata __unused)
struct ucred *newcred, *oldcred;
int error;
error = fork1(&thread0, RFFDG | RFPROC | RFSTOPPED, &initproc);
error = fork1(&thread0, RFFDG | RFPROC | RFSTOPPED, 0, &initproc);
if (error)
panic("cannot fork init: %d\n", error);
/* divorce init's credentials from the kernel's */

View File

@ -118,7 +118,7 @@ fork(td, uap)
struct proc *p2;
mtx_lock(&Giant);
error = fork1(td, RFFDG | RFPROC, &p2);
error = fork1(td, RFFDG | RFPROC, 0, &p2);
if (error == 0) {
td->td_retval[0] = p2->p_pid;
td->td_retval[1] = 0;
@ -140,7 +140,7 @@ vfork(td, uap)
struct proc *p2;
mtx_lock(&Giant);
error = fork1(td, RFFDG | RFPROC | RFPPWAIT | RFMEM, &p2);
error = fork1(td, RFFDG | RFPROC | RFPPWAIT | RFMEM, 0, &p2);
if (error == 0) {
td->td_retval[0] = p2->p_pid;
td->td_retval[1] = 0;
@ -164,7 +164,7 @@ rfork(td, uap)
if ((uap->flags & RFKERNELONLY) != 0)
return (EINVAL);
mtx_lock(&Giant);
error = fork1(td, uap->flags, &p2);
error = fork1(td, uap->flags, 0, &p2);
if (error == 0) {
td->td_retval[0] = p2 ? p2->p_pid : 0;
td->td_retval[1] = 0;
@ -215,9 +215,10 @@ SYSCTL_PROC(_kern, OID_AUTO, randompid, CTLTYPE_INT|CTLFLAG_RW,
0, 0, sysctl_kern_randompid, "I", "Random PID modulus");
int
fork1(td, flags, procp)
fork1(td, flags, pages, procp)
struct thread *td; /* parent proc */
int flags;
int pages;
struct proc **procp; /* child proc */
{
struct proc *p2, *pptr;
@ -471,6 +472,10 @@ fork1(td, flags, procp)
kg2 = FIRST_KSEGRP_IN_PROC(p2);
ke2 = FIRST_KSE_IN_KSEGRP(kg2);
/* Allocate and switch to an alternate kstack if specified */
if (pages != 0)
pmap_new_altkstack(td2, pages);
#define RANGEOF(type, start, end) (offsetof(type, end) - offsetof(type, start))
bzero(&p2->p_startzero,

View File

@ -46,7 +46,7 @@ idle_setup(void *dummy)
#ifdef SMP
SLIST_FOREACH(pc, &cpuhead, pc_allcpu) {
error = kthread_create(idle_proc, NULL, &p,
RFSTOPPED | RFHIGHPID, "idle: cpu%d", pc->pc_cpuid);
RFSTOPPED | RFHIGHPID, 0, "idle: cpu%d", pc->pc_cpuid);
pc->pc_idlethread = FIRST_THREAD_IN_PROC(p);
if (pc->pc_curthread == NULL) {
pc->pc_curthread = pc->pc_idlethread;
@ -54,7 +54,7 @@ idle_setup(void *dummy)
}
#else
error = kthread_create(idle_proc, NULL, &p,
RFSTOPPED | RFHIGHPID, "idle");
RFSTOPPED | RFHIGHPID, 0, "idle");
PCPU_SET(idlethread, FIRST_THREAD_IN_PROC(p));
#endif
if (error)

View File

@ -188,7 +188,7 @@ ithread_create(struct ithd **ithread, int vector, int flags,
va_end(ap);
error = kthread_create(ithread_loop, ithd, &p, RFSTOPPED | RFHIGHPID,
"%s", ithd->it_name);
0, "%s", ithd->it_name);
if (error) {
mtx_destroy(&ithd->it_lock);
free(ithd, M_ITHREAD);

View File

@ -142,7 +142,7 @@ thread_init(void *mem, int size)
td = (struct thread *)mem;
mtx_lock(&Giant);
pmap_new_thread(td);
pmap_new_thread(td, 0);
mtx_unlock(&Giant);
cpu_thread_setup(td);
}

View File

@ -55,7 +55,7 @@ kproc_start(udata)
int error;
error = kthread_create((void (*)(void *))kp->func, NULL,
kp->global_procpp, 0, "%s", kp->arg0);
kp->global_procpp, 0, 0, "%s", kp->arg0);
if (error)
panic("kproc_start: %s: error %d", kp->arg0, error);
}
@ -72,7 +72,7 @@ kproc_start(udata)
*/
int
kthread_create(void (*func)(void *), void *arg,
struct proc **newpp, int flags, const char *fmt, ...)
struct proc **newpp, int flags, int pages, const char *fmt, ...)
{
int error;
va_list ap;
@ -83,7 +83,7 @@ kthread_create(void (*func)(void *), void *arg,
panic("kthread_create called too soon");
error = fork1(&thread0, RFMEM | RFFDG | RFPROC | RFSTOPPED | flags,
&p2);
pages, &p2);
if (error)
return error;

View File

@ -134,7 +134,7 @@ ktrace_init(void *dummy)
req = malloc(sizeof(struct ktr_request), M_KTRACE, M_WAITOK);
STAILQ_INSERT_HEAD(&ktr_free, req, ktr_list);
}
kthread_create(ktr_loop, NULL, NULL, RFHIGHPID, "ktrace");
kthread_create(ktr_loop, NULL, NULL, RFHIGHPID, 0, "ktrace");
}
SYSINIT(ktrace_init, SI_SUB_KTRACE, SI_ORDER_ANY, ktrace_init, NULL);

View File

@ -164,6 +164,15 @@ proc_dtor(void *mem, int size, void *arg)
KASSERT((kg != NULL), ("proc_dtor: bad kg pointer"));
ke = FIRST_KSE_IN_KSEGRP(kg);
KASSERT((ke != NULL), ("proc_dtor: bad ke pointer"));
/* Dispose of an alternate kstack, if it exists.
* XXX What if there are more than one thread in the proc?
* The first thread in the proc is special and not
* freed, so you gotta do this here.
*/
if (((p->p_flag & P_KTHREAD) != 0) && (td->td_altkstack != 0))
pmap_dispose_altkstack(td);
/*
* We want to make sure we know the initial linkages.
* so for now tear them down and remake them.

View File

@ -142,7 +142,7 @@ thread_init(void *mem, int size)
td = (struct thread *)mem;
mtx_lock(&Giant);
pmap_new_thread(td);
pmap_new_thread(td, 0);
mtx_unlock(&Giant);
cpu_thread_setup(td);
}

View File

@ -1001,7 +1001,7 @@ aio_newproc()
int error;
struct proc *p;
error = kthread_create(aio_daemon, curproc, &p, RFNOWAIT, "aiod%d",
error = kthread_create(aio_daemon, curproc, &p, RFNOWAIT, 0, "aiod%d",
num_aio_procs);
if (error)
return error;

View File

@ -676,7 +676,7 @@ smb_iod_create(struct smb_vc *vcp)
smb_sl_init(&iod->iod_evlock, "90evl");
STAILQ_INIT(&iod->iod_evlist);
error = kthread_create(smb_iod_thread, iod, &iod->iod_p,
RFNOWAIT, "smbiod%d", iod->iod_id);
RFNOWAIT, 0, "smbiod%d", iod->iod_id);
if (error) {
SMBERROR("can't start smbiod: %d", error);
free(iod, M_SMBIOD);

View File

@ -170,7 +170,7 @@ nfs_nfsiodnew(void)
if (newiod == -1)
return (-1);
error = kthread_create(nfssvc_iod, nfs_asyncdaemon + i, NULL, RFHIGHPID,
"nfsiod %d", newiod);
0, "nfsiod %d", newiod);
if (error)
return (-1);
nfs_numasync++;

View File

@ -1532,13 +1532,17 @@ pmap_remove_pages(pmap_t pm, vm_offset_t sva, vm_offset_t eva)
pmap_remove(pm, sva, eva);
}
#ifndef KSTACK_MAX_PAGES
#define KSTACK_MAX_PAGES 32
#endif
/*
* Create the kernel stack and pcb for a new thread.
* This routine directly affects the fork perf for a process and
* create performance for a thread.
*/
void
pmap_new_thread(struct thread *td)
pmap_new_thread(struct thread *td, int pages)
{
vm_object_t ksobj;
vm_offset_t ks;
@ -1548,21 +1552,27 @@ pmap_new_thread(struct thread *td)
/*
* Allocate object for the kstack.
*/
ksobj = vm_object_allocate(OBJT_DEFAULT, KSTACK_PAGES);
ksobj = vm_object_allocate(OBJT_DEFAULT, pages);
td->td_kstack_obj = ksobj;
/*
* Get a kernel virtual address for the kstack for this thread.
*/
ks = kmem_alloc_nofault(kernel_map,
(KSTACK_PAGES + KSTACK_GUARD_PAGES) * PAGE_SIZE);
(pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
if (ks == 0)
panic("pmap_new_thread: kstack allocation failed");
TLBIE(ks);
ks += KSTACK_GUARD_PAGES * PAGE_SIZE;
td->td_kstack = ks;
for (i = 0; i < KSTACK_PAGES; i++) {
/*
* Knowing the number of pages allocated is useful when you
* want to deallocate them.
*/
td->td_kstack_pages = pages;
for (i = 0; i < pages; i++) {
/*
* Get a kernel stack page.
*/
@ -1586,6 +1596,18 @@ pmap_dispose_thread(struct thread *td)
TODO;
}
void
pmap_new_altkstack(struct thread *td, int pages)
{
TODO;
}
void
pmap_dispose_altkstack(struct thread *td)
{
TODO;
}
void
pmap_swapin_thread(struct thread *td)
{

View File

@ -1532,13 +1532,17 @@ pmap_remove_pages(pmap_t pm, vm_offset_t sva, vm_offset_t eva)
pmap_remove(pm, sva, eva);
}
#ifndef KSTACK_MAX_PAGES
#define KSTACK_MAX_PAGES 32
#endif
/*
* Create the kernel stack and pcb for a new thread.
* This routine directly affects the fork perf for a process and
* create performance for a thread.
*/
void
pmap_new_thread(struct thread *td)
pmap_new_thread(struct thread *td, int pages)
{
vm_object_t ksobj;
vm_offset_t ks;
@ -1548,21 +1552,27 @@ pmap_new_thread(struct thread *td)
/*
* Allocate object for the kstack.
*/
ksobj = vm_object_allocate(OBJT_DEFAULT, KSTACK_PAGES);
ksobj = vm_object_allocate(OBJT_DEFAULT, pages);
td->td_kstack_obj = ksobj;
/*
* Get a kernel virtual address for the kstack for this thread.
*/
ks = kmem_alloc_nofault(kernel_map,
(KSTACK_PAGES + KSTACK_GUARD_PAGES) * PAGE_SIZE);
(pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
if (ks == 0)
panic("pmap_new_thread: kstack allocation failed");
TLBIE(ks);
ks += KSTACK_GUARD_PAGES * PAGE_SIZE;
td->td_kstack = ks;
for (i = 0; i < KSTACK_PAGES; i++) {
/*
* Knowing the number of pages allocated is useful when you
* want to deallocate them.
*/
td->td_kstack_pages = pages;
for (i = 0; i < pages; i++) {
/*
* Get a kernel stack page.
*/
@ -1586,6 +1596,18 @@ pmap_dispose_thread(struct thread *td)
TODO;
}
void
pmap_new_altkstack(struct thread *td, int pages)
{
TODO;
}
void
pmap_dispose_altkstack(struct thread *td)
{
TODO;
}
void
pmap_swapin_thread(struct thread *td)
{

View File

@ -1532,13 +1532,17 @@ pmap_remove_pages(pmap_t pm, vm_offset_t sva, vm_offset_t eva)
pmap_remove(pm, sva, eva);
}
#ifndef KSTACK_MAX_PAGES
#define KSTACK_MAX_PAGES 32
#endif
/*
* Create the kernel stack and pcb for a new thread.
* This routine directly affects the fork perf for a process and
* create performance for a thread.
*/
void
pmap_new_thread(struct thread *td)
pmap_new_thread(struct thread *td, int pages)
{
vm_object_t ksobj;
vm_offset_t ks;
@ -1548,21 +1552,27 @@ pmap_new_thread(struct thread *td)
/*
* Allocate object for the kstack.
*/
ksobj = vm_object_allocate(OBJT_DEFAULT, KSTACK_PAGES);
ksobj = vm_object_allocate(OBJT_DEFAULT, pages);
td->td_kstack_obj = ksobj;
/*
* Get a kernel virtual address for the kstack for this thread.
*/
ks = kmem_alloc_nofault(kernel_map,
(KSTACK_PAGES + KSTACK_GUARD_PAGES) * PAGE_SIZE);
(pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
if (ks == 0)
panic("pmap_new_thread: kstack allocation failed");
TLBIE(ks);
ks += KSTACK_GUARD_PAGES * PAGE_SIZE;
td->td_kstack = ks;
for (i = 0; i < KSTACK_PAGES; i++) {
/*
* Knowing the number of pages allocated is useful when you
* want to deallocate them.
*/
td->td_kstack_pages = pages;
for (i = 0; i < pages; i++) {
/*
* Get a kernel stack page.
*/
@ -1586,6 +1596,18 @@ pmap_dispose_thread(struct thread *td)
TODO;
}
void
pmap_new_altkstack(struct thread *td, int pages)
{
TODO;
}
void
pmap_dispose_altkstack(struct thread *td)
{
TODO;
}
void
pmap_swapin_thread(struct thread *td)
{

View File

@ -951,31 +951,41 @@ pmap_qremove(vm_offset_t sva, int count)
tlb_range_demap(kernel_pmap, sva, sva + (count * PAGE_SIZE) - 1);
}
#ifndef KSTACK_MAX_PAGES
#define KSTACK_MAX_PAGES 32
#endif
/*
* Create the kernel stack and pcb for a new thread.
* This routine directly affects the fork perf for a process and
* create performance for a thread.
*/
void
pmap_new_thread(struct thread *td)
pmap_new_thread(struct thread *td, int pages)
{
vm_page_t ma[KSTACK_PAGES];
vm_page_t ma[KSTACK_MAX_PAGES];
vm_object_t ksobj;
vm_offset_t ks;
vm_page_t m;
u_int i;
/* Bounds check */
if (pages <= 1)
pages = KSTACK_PAGES;
else if (pages > KSTACK_MAX_PAGES)
pages = KSTACK_MAX_PAGES;
/*
* Allocate object for the kstack,
*/
ksobj = vm_object_allocate(OBJT_DEFAULT, KSTACK_PAGES);
ksobj = vm_object_allocate(OBJT_DEFAULT, pages);
td->td_kstack_obj = ksobj;
/*
* Get a kernel virtual address for the kstack for this thread.
*/
ks = kmem_alloc_nofault(kernel_map,
(KSTACK_PAGES + KSTACK_GUARD_PAGES) * PAGE_SIZE);
(pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
if (ks == 0)
panic("pmap_new_thread: kstack allocation failed");
if (KSTACK_GUARD_PAGES != 0) {
@ -984,7 +994,13 @@ pmap_new_thread(struct thread *td)
}
td->td_kstack = ks;
for (i = 0; i < KSTACK_PAGES; i++) {
/*
* Knowing the number of pages allocated is useful when you
* want to deallocate them.
*/
td->td_kstack_pages = pages;
for (i = 0; i < pages; i++) {
/*
* Get a kernel stack page.
*/
@ -1000,7 +1016,7 @@ pmap_new_thread(struct thread *td)
/*
* Enter the page into the kernel address space.
*/
pmap_qenter(ks, ma, KSTACK_PAGES);
pmap_qenter(ks, ma, pages);
}
/*
@ -1014,10 +1030,12 @@ pmap_dispose_thread(struct thread *td)
vm_offset_t ks;
vm_page_t m;
int i;
int pages;
pages = td->td_kstack_pages;
ksobj = td->td_kstack_obj;
ks = td->td_kstack;
for (i = 0; i < KSTACK_PAGES; i++) {
for (i = 0; i < pages ; i++) {
m = vm_page_lookup(ksobj, i);
if (m == NULL)
panic("pmap_dispose_thread: kstack already missing?");
@ -1027,12 +1045,40 @@ pmap_dispose_thread(struct thread *td)
vm_page_free(m);
vm_page_unlock_queues();
}
pmap_qremove(ks, KSTACK_PAGES);
pmap_qremove(ks, pages);
kmem_free(kernel_map, ks - (KSTACK_GUARD_PAGES * PAGE_SIZE),
(KSTACK_PAGES + KSTACK_GUARD_PAGES) * PAGE_SIZE);
(pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
vm_object_deallocate(ksobj);
}
/*
* Set up a variable sized alternate kstack.
*/
void
pmap_new_altkstack(struct thread *td, int pages)
{
/* shuffle the original stack */
td->td_altkstack_obj = td->td_kstack_obj;
td->td_altkstack = td->td_kstack;
td->td_altkstack_pages = td->td_kstack_pages;
pmap_new_thread(td, pages);
}
void
pmap_dispose_altkstack(struct thread *td)
{
pmap_dispose_thread(td);
/* restore the original kstack */
td->td_kstack = td->td_altkstack;
td->td_kstack_obj = td->td_altkstack_obj;
td->td_kstack_pages = td->td_altkstack_pages;
td->td_altkstack = 0;
td->td_altkstack_obj = NULL;
td->td_altkstack_pages = 0;
}
/*
* Allow the kernel stack for a thread to be prejudicially paged out.
*/
@ -1043,10 +1089,12 @@ pmap_swapout_thread(struct thread *td)
vm_offset_t ks;
vm_page_t m;
int i;
int pages;
pages = td->td_kstack_pages;
ksobj = td->td_kstack_obj;
ks = (vm_offset_t)td->td_kstack;
for (i = 0; i < KSTACK_PAGES; i++) {
for (i = 0; i < pages; i++) {
m = vm_page_lookup(ksobj, i);
if (m == NULL)
panic("pmap_swapout_thread: kstack already missing?");
@ -1055,7 +1103,7 @@ pmap_swapout_thread(struct thread *td)
vm_page_unwire(m, 0);
vm_page_unlock_queues();
}
pmap_qremove(ks, KSTACK_PAGES);
pmap_qremove(ks, pages);
}
/*
@ -1064,16 +1112,18 @@ pmap_swapout_thread(struct thread *td)
void
pmap_swapin_thread(struct thread *td)
{
vm_page_t ma[KSTACK_PAGES];
vm_page_t ma[KSTACK_MAX_PAGES];
vm_object_t ksobj;
vm_offset_t ks;
vm_page_t m;
int rv;
int i;
int pages;
pages = td->td_kstack_pages;
ksobj = td->td_kstack_obj;
ks = td->td_kstack;
for (i = 0; i < KSTACK_PAGES; i++) {
for (i = 0; i < pages; i++) {
m = vm_page_grab(ksobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
if (m->valid != VM_PAGE_BITS_ALL) {
rv = vm_pager_get_pages(ksobj, &m, 1, 0);
@ -1088,7 +1138,7 @@ pmap_swapin_thread(struct thread *td)
vm_page_wakeup(m);
vm_page_unlock_queues();
}
pmap_qenter(ks, ma, KSTACK_PAGES);
pmap_qenter(ks, ma, pages);
}
/*

View File

@ -45,7 +45,7 @@ struct kproc_desc {
void kproc_shutdown(void *, int);
void kproc_start(const void *);
int kthread_create(void (*)(void *), void *, struct proc **,
int flags, const char *, ...) __printflike(5, 6);
int flags, int pages, const char *, ...) __printflike(6, 7);
void kthread_exit(int) __dead2;
int kthread_resume(struct proc *); /* XXXKSE */
int kthread_suspend(struct proc *, int); /* XXXKSE */

View File

@ -314,6 +314,10 @@ struct thread {
struct trapframe *td_frame; /* (k) */
struct vm_object *td_kstack_obj;/* (a) Kstack object. */
vm_offset_t td_kstack; /* Kernel VA of kstack. */
int td_kstack_pages; /* Size of the kstack */
struct vm_object *td_altkstack_obj;/* (a) Alternate kstack object. */
vm_offset_t td_altkstack; /* Kernel VA of alternate kstack. */
int td_altkstack_pages; /* Size of the alternate kstack */
};
/* flags kept in td_flags */
#define TDF_UNBOUND 0x000001 /* May give away the kse, uses the kg runq. */
@ -844,7 +848,7 @@ int enterpgrp(struct proc *p, pid_t pgid, struct pgrp *pgrp, struct session *ses
int enterthispgrp(struct proc *p, struct pgrp *pgrp);
void faultin(struct proc *p);
void fixjobc(struct proc *p, struct pgrp *pgrp, int entering);
int fork1(struct thread *, int, struct proc **);
int fork1(struct thread *, int, int, struct proc **);
void fork_exit(void (*)(void *, struct trapframe *), void *,
struct trapframe *);
void fork_return(struct thread *, struct trapframe *);

View File

@ -128,8 +128,10 @@ void pmap_zero_page_area(vm_page_t, int off, int size);
void pmap_zero_page_idle(vm_page_t);
void pmap_prefault(pmap_t, vm_offset_t, vm_map_entry_t);
int pmap_mincore(pmap_t pmap, vm_offset_t addr);
void pmap_new_thread(struct thread *td);
void pmap_new_thread(struct thread *td, int pages);
void pmap_dispose_thread(struct thread *td);
void pmap_new_altkstack(struct thread *td, int pages);
void pmap_dispose_altkstack(struct thread *td);
void pmap_swapout_thread(struct thread *td);
void pmap_swapin_thread(struct thread *td);
void pmap_activate(struct thread *td);