Fix for the panic("vm_thread_new: kstack allocation failed") and

silent NULL pointer dereference in the i386 and sparc64 pmap_pinit()
when the kmem_alloc_nofault() failed to allocate address space. Both
functions now return error instead of panicing or dereferencing NULL.

As consequence, vmspace_exec() and vmspace_unshare() returns the errno
int. struct vmspace arg was added to vm_forkproc() to avoid dealing
with failed allocation when most of the fork1() job is already done.

The kernel stack for the thread is now set up in the thread_alloc(),
that itself may return NULL. Also, allocation of the first process
thread is performed in the fork1() to properly deal with stack
allocation failure. proc_linkup() is separated into proc_linkup()
called from fork1(), and proc_linkup0(), that is used to set up the
kernel process (was known as swapper).

In collaboration with:	Peter Holm
Reviewed by:	jhb
This commit is contained in:
Konstantin Belousov 2007-11-05 11:36:16 +00:00
parent 762d6411cf
commit 89b57fcf01
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=173361
41 changed files with 222 additions and 93 deletions

View File

@ -1137,7 +1137,7 @@ hammer_time(u_int64_t modulep, u_int64_t physfree)
* This may be done better later if it gets more high level
* components in it. If so just link td->td_proc here.
*/
proc_linkup(&proc0, &thread0);
proc_linkup0(&proc0, &thread0);
preload_metadata = (caddr_t)(uintptr_t)(modulep + KERNBASE);
preload_bootstrap_relocate(KERNBASE);

View File

@ -1186,7 +1186,7 @@ pmap_pinit0(pmap_t pmap)
* Initialize a preallocated and zeroed pmap structure,
* such as one in a vmspace structure.
*/
void
int
pmap_pinit(pmap_t pmap)
{
vm_page_t pml4pg;
@ -1216,6 +1216,8 @@ pmap_pinit(pmap_t pmap)
pmap->pm_active = 0;
TAILQ_INIT(&pmap->pm_pvchunk);
bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
return (1);
}
/*

View File

@ -3813,7 +3813,7 @@ pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
* such as one in a vmspace structure.
*/
void
int
pmap_pinit(pmap_t pmap)
{
PDEBUG(1, printf("pmap_pinit: pmap = %08x\n", (uint32_t) pmap));
@ -3832,6 +3832,7 @@ pmap_pinit(pmap_t pmap)
pmap_enter(pmap, vector_page, PHYS_TO_VM_PAGE(systempage.pv_pa),
VM_PROT_READ, 1);
}
return (1);
}

View File

@ -449,7 +449,7 @@ initarm(void *arg, void *arg2)
undefined_handler_address = (u_int)undefinedinstruction_bounce;
undefined_init();
proc_linkup(&proc0, &thread0);
proc_linkup0(&proc0, &thread0);
thread0.td_kstack = kernelstack.pv_va;
thread0.td_pcb = (struct pcb *)
(thread0.td_kstack + KSTACK_PAGES * PAGE_SIZE) - 1;

View File

@ -422,7 +422,7 @@ initarm(void *arg, void *arg2)
/* Set stack for exception handlers */
proc_linkup(&proc0, &thread0);
proc_linkup0(&proc0, &thread0);
thread0.td_kstack = kernelstack.pv_va;
thread0.td_pcb = (struct pcb *)
(thread0.td_kstack + KSTACK_PAGES * PAGE_SIZE) - 1;

View File

@ -426,7 +426,7 @@ initarm(void *arg, void *arg2)
undefined_handler_address = (u_int)undefinedinstruction_bounce;
undefined_init();
proc_linkup(&proc0, &thread0);
proc_linkup0(&proc0, &thread0);
thread0.td_kstack = kernelstack.pv_va;
thread0.td_pcb = (struct pcb *)
(thread0.td_kstack + KSTACK_PAGES * PAGE_SIZE) - 1;

View File

@ -424,7 +424,7 @@ initarm(void *arg, void *arg2)
undefined_handler_address = (u_int)undefinedinstruction_bounce;
undefined_init();
proc_linkup(&proc0, &thread0);
proc_linkup0(&proc0, &thread0);
thread0.td_kstack = kernelstack.pv_va;
thread0.td_pcb = (struct pcb *)
(thread0.td_kstack + KSTACK_PAGES * PAGE_SIZE) - 1;

View File

@ -409,7 +409,7 @@ initarm(void *arg, void *arg2)
#ifdef KSE
proc_linkup(&proc0, &ksegrp0, &thread0);
#else
proc_linkup(&proc0, &thread0);
proc_linkup0(&proc0, &thread0);
#endif
thread0.td_kstack = kernelstack.pv_va;
thread0.td_pcb = (struct pcb *)

View File

@ -490,7 +490,7 @@ initarm(void *arg, void *arg2)
undefined_handler_address = (u_int)undefinedinstruction_bounce;
undefined_init();
proc_linkup(&proc0, &thread0);
proc_linkup0(&proc0, &thread0);
thread0.td_kstack = kernelstack.pv_va;
thread0.td_pcb = (struct pcb *)
(thread0.td_kstack + KSTACK_PAGES * PAGE_SIZE) - 1;

View File

@ -416,7 +416,11 @@ exec_pecoff_coff_prep_zmagic(struct image_params * imgp,
wp = (void *) ((char *) ap + sizeof(struct coff_aouthdr));
error = pecoff_read_from(FIRST_THREAD_IN_PROC(imgp->proc), imgp->vp,
peofs + PECOFF_HDR_SIZE, (caddr_t) sh, scnsiz);
exec_new_vmspace(imgp, &pecoff_sysvec);
if (error)
return (error);
error = exec_new_vmspace(imgp, &pecoff_sysvec);
if (error)
return (error);
vmspace = imgp->proc->p_vmspace;
for (i = 0; i < fp->f_nscns; i++) {
prot = VM_PROT_WRITE; /* XXX for relocation? */

View File

@ -120,7 +120,9 @@ exec_svr4_imgact(imgp)
/*
* Destroy old process VM and create a new one (with a new stack)
*/
exec_new_vmspace(imgp, &svr4_sysvec);
error = exec_new_vmspace(imgp, &svr4_sysvec);
if (error)
goto fail;
vmspace = imgp->proc->p_vmspace;
/*

View File

@ -2088,7 +2088,7 @@ init386(first)
* This may be done better later if it gets more high level
* components in it. If so just link td->td_proc here.
*/
proc_linkup(&proc0, &thread0);
proc_linkup0(&proc0, &thread0);
metadata_missing = 0;
if (bootinfo.bi_modulep) {

View File

@ -1227,7 +1227,7 @@ pmap_pinit0(pmap_t pmap)
* Initialize a preallocated and zeroed pmap structure,
* such as one in a vmspace structure.
*/
void
int
pmap_pinit(pmap_t pmap)
{
vm_page_t m, ptdpg[NPGPTD];
@ -1244,6 +1244,11 @@ pmap_pinit(pmap_t pmap)
if (pmap->pm_pdir == NULL) {
pmap->pm_pdir = (pd_entry_t *)kmem_alloc_nofault(kernel_map,
NBPTD);
if (pmap->pm_pdir == NULL) {
PMAP_LOCK_DESTROY(pmap);
return (0);
}
#ifdef PAE
pmap->pm_pdpt = uma_zalloc(pdptzone, M_WAITOK | M_ZERO);
KASSERT(((vm_offset_t)pmap->pm_pdpt &
@ -1297,6 +1302,8 @@ pmap_pinit(pmap_t pmap)
pmap->pm_active = 0;
TAILQ_INIT(&pmap->pm_pvchunk);
bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
return (1);
}
/*

View File

@ -337,7 +337,9 @@ exec_coff_imgact(imgp)
VOP_UNLOCK(imgp->vp, 0, td);
exec_new_vmspace(imgp, &ibcs2_svr3_sysvec);
error = exec_new_vmspace(imgp, &ibcs2_svr3_sysvec);
if (error)
goto fail;
vmspace = imgp->proc->p_vmspace;
for (i = 0; i < nscns; i++) {

View File

@ -119,7 +119,9 @@ exec_linux_imgact(struct image_params *imgp)
/*
* Destroy old process VM and create a new one (with a new stack)
*/
exec_new_vmspace(imgp, &linux_sysvec);
error = exec_new_vmspace(imgp, &linux_sysvec);
if (error)
goto fail;
vmspace = imgp->proc->p_vmspace;
/*

View File

@ -789,7 +789,7 @@ ia64_init(void)
msgbufp = (struct msgbuf *)pmap_steal_memory(MSGBUF_SIZE);
msgbufinit(msgbufp, MSGBUF_SIZE);
proc_linkup(&proc0, &thread0);
proc_linkup0(&proc0, &thread0);
/*
* Init mapping for kernel stack for proc 0
*/

View File

@ -710,7 +710,7 @@ pmap_pinit0(struct pmap *pmap)
* Initialize a preallocated and zeroed pmap structure,
* such as one in a vmspace structure.
*/
void
int
pmap_pinit(struct pmap *pmap)
{
int i;
@ -721,6 +721,7 @@ pmap_pinit(struct pmap *pmap)
pmap->pm_active = 0;
TAILQ_INIT(&pmap->pm_pvlist);
bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
return (1);
}
/***************************************************

View File

@ -198,9 +198,11 @@ exec_aout_imgact(imgp)
/*
* Destroy old process VM and create a new one (with a new stack)
*/
exec_new_vmspace(imgp, &aout_sysvec);
error = exec_new_vmspace(imgp, &aout_sysvec);
vn_lock(imgp->vp, LK_EXCLUSIVE | LK_RETRY, td);
if (error)
return (error);
/*
* The vm space can be changed by exec_new_vmspace

View File

@ -666,10 +666,12 @@ __CONCAT(exec_, __elfN(imgact))(struct image_params *imgp)
*/
VOP_UNLOCK(imgp->vp, 0, td);
exec_new_vmspace(imgp, sv);
error = exec_new_vmspace(imgp, sv);
imgp->proc->p_sysent = sv;
vn_lock(imgp->vp, LK_EXCLUSIVE | LK_RETRY, td);
if (error)
return (error);
vmspace = imgp->proc->p_vmspace;

View File

@ -239,9 +239,13 @@ do_aout_hdr(struct imgact_gzip * gz)
/*
* Destroy old process VM and create a new one (with a new stack)
*/
exec_new_vmspace(gz->ip, &aout_sysvec);
error = exec_new_vmspace(gz->ip, &aout_sysvec);
vn_lock(gz->ip->vp, LK_EXCLUSIVE | LK_RETRY, td);
if (error) {
gz->where = __LINE__;
return (error);
}
vmspace = gz->ip->proc->p_vmspace;

View File

@ -914,7 +914,9 @@ exec_new_vmspace(imgp, sv)
pmap_remove_pages(vmspace_pmap(vmspace));
vm_map_remove(map, vm_map_min(map), vm_map_max(map));
} else {
vmspace_exec(p, sv->sv_minuser, sv->sv_maxuser);
error = vmspace_exec(p, sv->sv_minuser, sv->sv_maxuser);
if (error)
return (error);
vmspace = p->p_vmspace;
map = &vmspace->vm_map;
}

View File

@ -195,6 +195,7 @@ fork1(td, flags, pages, procp)
struct filedesc_to_leader *fdtol;
struct thread *td2;
struct sigacts *newsigacts;
struct vmspace *vm2;
int error;
/* Can't copy and clear. */
@ -218,7 +219,9 @@ fork1(td, flags, pages, procp)
PROC_UNLOCK(p1);
}
vm_forkproc(td, NULL, NULL, flags);
error = vm_forkproc(td, NULL, NULL, NULL, flags);
if (error)
goto norfproc_fail;
/*
* Close all file descriptors.
@ -236,6 +239,7 @@ fork1(td, flags, pages, procp)
if (flags & RFFDG)
fdunshare(p1, td);
norfproc_fail:
if (((p1->p_flag & (P_HADTHREADS|P_SYSTEM)) == P_HADTHREADS) &&
(flags & (RFCFDG | RFFDG))) {
PROC_LOCK(p1);
@ -243,7 +247,7 @@ fork1(td, flags, pages, procp)
PROC_UNLOCK(p1);
}
*procp = NULL;
return (0);
return (error);
}
/*
@ -254,6 +258,32 @@ fork1(td, flags, pages, procp)
/* Allocate new proc. */
newproc = uma_zalloc(proc_zone, M_WAITOK);
if (TAILQ_EMPTY(&newproc->p_threads)) {
td2 = thread_alloc();
if (td2 == NULL) {
error = ENOMEM;
goto fail1;
}
proc_linkup(newproc, td2);
sched_newproc(newproc, td2);
} else
td2 = FIRST_THREAD_IN_PROC(newproc);
/* Allocate and switch to an alternate kstack if specified. */
if (pages != 0) {
if (!vm_thread_new_altkstack(td2, pages)) {
error = ENOMEM;
goto fail1;
}
}
if ((flags & RFMEM) == 0) {
vm2 = vmspace_fork(p1->p_vmspace);
if (vm2 == NULL) {
error = ENOMEM;
goto fail1;
}
} else
vm2 = NULL;
#ifdef MAC
mac_proc_init(newproc);
#endif
@ -380,7 +410,6 @@ fork1(td, flags, pages, procp)
lastpid = trypid;
p2 = newproc;
td2 = FIRST_THREAD_IN_PROC(newproc);
p2->p_state = PRS_NEW; /* protect against others */
p2->p_pid = trypid;
/*
@ -456,9 +485,6 @@ fork1(td, flags, pages, procp)
* Start by zeroing the section of proc that is zero-initialized,
* then copy the section that is copied directly from the parent.
*/
/* Allocate and switch to an alternate kstack if specified. */
if (pages != 0)
vm_thread_new_altkstack(td2, pages);
PROC_LOCK(p2);
PROC_LOCK(p1);
@ -630,7 +656,7 @@ fork1(td, flags, pages, procp)
* Finish creating the child process. It will return via a different
* execution path later. (ie: directly into user mode)
*/
vm_forkproc(td, p2, td2, flags);
vm_forkproc(td, p2, td2, vm2, flags);
if (flags == (RFFDG | RFPROC)) {
PCPU_INC(cnt.v_forks);
@ -713,6 +739,7 @@ fork1(td, flags, pages, procp)
#ifdef MAC
mac_proc_destroy(newproc);
#endif
fail1:
uma_zfree(proc_zone, newproc);
pause("fork", hz / 2);
return (error);

View File

@ -43,6 +43,7 @@ __FBSDID("$FreeBSD$");
#include <sys/sched.h>
#include <sys/signalvar.h>
#include <sys/sleepqueue.h>
#include <sys/syslog.h>
#include <sys/kse.h>
#include <sys/ktr.h>
#include <vm/uma.h>
@ -64,7 +65,7 @@ TAILQ_HEAD(, kse_upcall) zombie_upcalls =
TAILQ_HEAD_INITIALIZER(zombie_upcalls);
static int thread_update_usr_ticks(struct thread *td);
static void thread_alloc_spare(struct thread *td);
static int thread_alloc_spare(struct thread *td);
static struct thread *thread_schedule_upcall(struct thread *td, struct kse_upcall *ku);
static struct kse_upcall *upcall_alloc(void);
@ -648,6 +649,16 @@ kse_create(struct thread *td, struct kse_create_args *uap)
PROC_UNLOCK(p);
}
/*
* For the first call this may not have been set.
* Of course nor may it actually be needed.
* thread_schedule_upcall() will look for it.
*/
if (td->td_standin == NULL) {
if (!thread_alloc_spare(td))
return (ENOMEM);
}
/*
* Even bound LWPs get a mailbox and an upcall to hold it.
* XXX This should change.
@ -657,13 +668,6 @@ kse_create(struct thread *td, struct kse_create_args *uap)
newku->ku_func = mbx.km_func;
bcopy(&mbx.km_stack, &newku->ku_stack, sizeof(stack_t));
/*
* For the first call this may not have been set.
* Of course nor may it actually be needed.
* thread_schedule_upcall() will look for it.
*/
if (td->td_standin == NULL)
thread_alloc_spare(td);
PROC_LOCK(p);
PROC_SLOCK(p);
/*
@ -989,20 +993,23 @@ thread_update_usr_ticks(struct thread *td)
* XXX BUG.. we need to get the cr ref after the thread has
* checked and chenged its own, not 6 months before...
*/
void
int
thread_alloc_spare(struct thread *td)
{
struct thread *spare;
if (td->td_standin)
return;
return (1);
spare = thread_alloc();
if (spare == NULL)
return (0);
td->td_standin = spare;
bzero(&spare->td_startzero,
__rangeof(struct thread, td_startzero, td_endzero));
spare->td_proc = td->td_proc;
spare->td_ucred = crhold(td->td_ucred);
spare->td_flags = TDF_INMEM;
return (1);
}
/*
@ -1170,8 +1177,18 @@ thread_user_enter(struct thread *td)
KASSERT(ku->ku_owner == td, ("wrong owner"));
KASSERT(!TD_CAN_UNBIND(td), ("can unbind"));
if (td->td_standin == NULL)
thread_alloc_spare(td);
if (td->td_standin == NULL) {
if (!thread_alloc_spare(td)) {
PROC_LOCK(p);
if (kern_logsigexit)
log(LOG_INFO,
"pid %d (%s), uid %d: thread_alloc_spare failed\n",
p->p_pid, p->p_comm,
td->td_ucred ? td->td_ucred->cr_uid : -1);
sigexit(td, SIGSEGV); /* XXX ? */
/* panic("thread_user_enter: thread_alloc_spare failed"); */
}
}
ku->ku_mflags = fuword32((void *)&ku->ku_mailbox->km_flags);
tmbx = (void *)fuword((void *)&ku->ku_mailbox->km_curthread);
if ((tmbx == NULL) || (tmbx == (void *)-1L) ||
@ -1385,7 +1402,7 @@ thread_userret(struct thread *td, struct trapframe *frame)
* for when we re-enter the kernel.
*/
if (td->td_standin == NULL)
thread_alloc_spare(td);
thread_alloc_spare(td); /* XXX care of failure ? */
}
ku->ku_mflags = 0;

View File

@ -145,20 +145,21 @@ proc_dtor(void *mem, int size, void *arg)
/* INVARIANTS checks go here */
p = (struct proc *)mem;
td = FIRST_THREAD_IN_PROC(p);
if (td != NULL) {
#ifdef INVARIANTS
KASSERT((p->p_numthreads == 1),
("bad number of threads in exiting process"));
KASSERT((td != NULL), ("proc_dtor: bad thread pointer"));
KASSERT(STAILQ_EMPTY(&p->p_ktr), ("proc_dtor: non-empty p_ktr"));
KASSERT((p->p_numthreads == 1),
("bad number of threads in exiting process"));
KASSERT(STAILQ_EMPTY(&p->p_ktr), ("proc_dtor: non-empty p_ktr"));
#endif
/* Dispose of an alternate kstack, if it exists.
* XXX What if there are more than one thread in the proc?
* The first thread in the proc is special and not
* freed, so you gotta do this here.
*/
if (((p->p_flag & P_KTHREAD) != 0) && (td->td_altkstack != 0))
vm_thread_dispose_altkstack(td);
/* Dispose of an alternate kstack, if it exists.
* XXX What if there are more than one thread in the proc?
* The first thread in the proc is special and not
* freed, so you gotta do this here.
*/
if (((p->p_flag & P_KTHREAD) != 0) && (td->td_altkstack != 0))
vm_thread_dispose_altkstack(td);
}
if (p->p_ksi != NULL)
KASSERT(! KSI_ONQ(p->p_ksi), ("SIGCHLD queue"));
}
@ -170,17 +171,14 @@ static int
proc_init(void *mem, int size, int flags)
{
struct proc *p;
struct thread *td;
p = (struct proc *)mem;
p->p_sched = (struct p_sched *)&p[1];
td = thread_alloc();
bzero(&p->p_mtx, sizeof(struct mtx));
mtx_init(&p->p_mtx, "process lock", NULL, MTX_DEF | MTX_DUPOK);
mtx_init(&p->p_slock, "process slock", NULL, MTX_SPIN | MTX_RECURSE);
TAILQ_INIT(&p->p_threads); /* all threads in proc */
p->p_stats = pstats_alloc();
proc_linkup(p, td);
sched_newproc(p, td);
return (0);
}

View File

@ -102,7 +102,7 @@ static uma_zone_t ksiginfo_zone = NULL;
struct filterops sig_filtops =
{ 0, filt_sigattach, filt_sigdetach, filt_signal };
static int kern_logsigexit = 1;
int kern_logsigexit = 1;
SYSCTL_INT(_kern, KERN_LOGSIGEXIT, logsigexit, CTLFLAG_RW,
&kern_logsigexit, 0,
"Log processes quitting on abnormal signals to syslog(3)");

View File

@ -175,6 +175,8 @@ create_thread(struct thread *td, mcontext_t *ctx,
/* Initialize our td */
newtd = thread_alloc();
if (newtd == NULL)
return (ENOMEM);
/*
* Try the copyout as soon as we allocate the td so we don't

View File

@ -181,13 +181,12 @@ thread_init(void *mem, int size, int flags)
td = (struct thread *)mem;
vm_thread_new(td, 0);
cpu_thread_setup(td);
td->td_sleepqueue = sleepq_alloc();
td->td_turnstile = turnstile_alloc();
td->td_sched = (struct td_sched *)&td[1];
sched_newthread(td);
umtx_thread_init(td);
td->td_kstack = 0;
return (0);
}
@ -203,7 +202,6 @@ thread_fini(void *mem, int size)
turnstile_free(td->td_turnstile);
sleepq_free(td->td_sleepqueue);
umtx_thread_fini(td);
vm_thread_dispose(td);
}
/*
@ -214,11 +212,17 @@ thread_fini(void *mem, int size)
* proc_dtor() (should go away)
* proc_init()
*/
void
proc_linkup0(struct proc *p, struct thread *td)
{
TAILQ_INIT(&p->p_threads); /* all threads in proc */
proc_linkup(p, td);
}
void
proc_linkup(struct proc *p, struct thread *td)
{
TAILQ_INIT(&p->p_threads); /* all threads in proc */
#ifdef KSE
TAILQ_INIT(&p->p_upcalls); /* upcall list */
#endif
@ -310,9 +314,18 @@ thread_reap(void)
struct thread *
thread_alloc(void)
{
struct thread *td;
thread_reap(); /* check if any zombies to get */
return (uma_zalloc(thread_zone, M_WAITOK));
td = (struct thread *)uma_zalloc(thread_zone, M_WAITOK);
KASSERT(td->td_kstack == 0, ("thread_alloc got thread with kstack"));
if (!vm_thread_new(td, 0)) {
uma_zfree(thread_zone, td);
return (NULL);
}
cpu_thread_setup(td);
return (td);
}
@ -324,6 +337,10 @@ thread_free(struct thread *td)
{
cpu_thread_clean(td);
if (td->td_altkstack != 0)
vm_thread_dispose_altkstack(td);
if (td->td_kstack != 0)
vm_thread_dispose(td);
uma_zfree(thread_zone, td);
}

View File

@ -1917,7 +1917,7 @@ init386(first)
* This may be done better later if it gets more high level
* components in it. If so just link td->td_proc here.
*/
proc_linkup(&proc0, &thread0);
proc_linkup0(&proc0, &thread0);
/*
* Initialize DMAC

View File

@ -287,7 +287,7 @@ powerpc_init(u_int startkernel, u_int endkernel, u_int basekernel, void *mdp)
/*
* Start initializing proc0 and thread0.
*/
proc_linkup(&proc0, &thread0);
proc_linkup0(&proc0, &thread0);
thread0.td_frame = &frame0;
/*

View File

@ -287,7 +287,7 @@ powerpc_init(u_int startkernel, u_int endkernel, u_int basekernel, void *mdp)
/*
* Start initializing proc0 and thread0.
*/
proc_linkup(&proc0, &thread0);
proc_linkup0(&proc0, &thread0);
thread0.td_frame = &frame0;
/*

View File

@ -193,10 +193,11 @@ pmap_page_init(vm_page_t m)
MMU_PAGE_INIT(mmu_obj, m);
}
void
int
pmap_pinit(pmap_t pmap)
{
MMU_PINIT(mmu_obj, pmap);
return (1);
}
void

View File

@ -399,7 +399,7 @@ sparc64_init(caddr_t mdp, u_long o1, u_long o2, u_long o3, ofw_vec_t *vec)
/*
* Initialize proc0 stuff (p_contested needs to be done early).
*/
proc_linkup(&proc0, &thread0);
proc_linkup0(&proc0, &thread0);
proc0.p_md.md_sigtramp = NULL;
proc0.p_md.md_utrap = NULL;
thread0.td_kstack = kstack0;

View File

@ -1006,7 +1006,7 @@ pmap_pinit0(pmap_t pm)
* Initialize a preallocated and zeroed pmap structure, such as one in a
* vmspace structure.
*/
void
int
pmap_pinit(pmap_t pm)
{
vm_page_t ma[TSB_PAGES];
@ -1021,6 +1021,10 @@ pmap_pinit(pmap_t pm)
if (pm->pm_tsb == NULL) {
pm->pm_tsb = (struct tte *)kmem_alloc_nofault(kernel_map,
TSB_BSIZE);
if (pm->pm_tsb == NULL) {
PMAP_LOCK_DESTROY(pm);
return (0);
}
}
/*
@ -1044,6 +1048,7 @@ pmap_pinit(pmap_t pm)
pm->pm_context[i] = -1;
pm->pm_active = 0;
bzero(&pm->pm_stats, sizeof(pm->pm_stats));
return (1);
}
/*

View File

@ -364,7 +364,7 @@ sparc64_init(caddr_t mdp, u_long o1, u_long o2, u_long o3, ofw_vec_t *vec)
* Initialize proc0 stuff (p_contested needs to be done early).
*/
proc_linkup(&proc0, &thread0);
proc_linkup0(&proc0, &thread0);
proc0.p_md.md_sigtramp = NULL;
proc0.p_md.md_utrap = NULL;
frame0.tf_tstate = TSTATE_IE | TSTATE_PEF | TSTATE_PRIV;

View File

@ -1703,7 +1703,7 @@ pmap_pinit0(pmap_t pmap)
* Initialize a preallocated and zeroed pmap structure, such as one in a
* vmspace structure.
*/
void
int
pmap_pinit(pmap_t pmap)
{
int i;
@ -1723,6 +1723,7 @@ pmap_pinit(pmap_t pmap)
TAILQ_INIT(&pmap->pm_pvlist);
PMAP_LOCK_INIT(pmap);
bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
return (1);
}
/*

View File

@ -839,6 +839,7 @@ void pargs_drop(struct pargs *pa);
void pargs_free(struct pargs *pa);
void pargs_hold(struct pargs *pa);
void procinit(void);
void proc_linkup0(struct proc *p, struct thread *td);
void proc_linkup(struct proc *p, struct thread *td);
void proc_reparent(struct proc *child, struct proc *newparent);
struct pstats *pstats_alloc(void);

View File

@ -299,6 +299,7 @@ struct mtx;
extern int sugid_coredump; /* Sysctl variable kern.sugid_coredump */
extern struct mtx sigio_lock;
extern int kern_logsigexit; /* Sysctl variable kern.logsigexit */
/*
* Lock the pointers for a sigio object in the underlying objects of

View File

@ -114,7 +114,7 @@ void pmap_object_init_pt(pmap_t pmap, vm_offset_t addr,
vm_object_t object, vm_pindex_t pindex, vm_size_t size);
boolean_t pmap_page_exists_quick(pmap_t pmap, vm_page_t m);
void pmap_page_init(vm_page_t m);
void pmap_pinit(pmap_t);
int pmap_pinit(pmap_t);
void pmap_pinit0(pmap_t);
void pmap_protect(pmap_t, vm_offset_t, vm_offset_t, vm_prot_t);
void pmap_qenter(vm_offset_t, vm_page_t *, int);

View File

@ -70,14 +70,14 @@ int vm_fault(vm_map_t, vm_offset_t, vm_prot_t, int);
void vm_fault_copy_entry(vm_map_t, vm_map_t, vm_map_entry_t, vm_map_entry_t);
void vm_fault_unwire(vm_map_t, vm_offset_t, vm_offset_t, boolean_t);
int vm_fault_wire(vm_map_t, vm_offset_t, vm_offset_t, boolean_t, boolean_t);
void vm_forkproc(struct thread *, struct proc *, struct thread *, int);
int vm_forkproc(struct thread *, struct proc *, struct thread *, struct vmspace *, int);
void vm_waitproc(struct proc *);
int vm_mmap(vm_map_t, vm_offset_t *, vm_size_t, vm_prot_t, vm_prot_t, int, objtype_t, void *, vm_ooffset_t);
void vm_set_page_size(void);
struct vmspace *vmspace_alloc(vm_offset_t, vm_offset_t);
struct vmspace *vmspace_fork(struct vmspace *);
void vmspace_exec(struct proc *, vm_offset_t, vm_offset_t);
void vmspace_unshare(struct proc *);
int vmspace_exec(struct proc *, vm_offset_t, vm_offset_t);
int vmspace_unshare(struct proc *);
void vmspace_exit(struct thread *);
struct vmspace *vmspace_acquire_ref(struct proc *);
void vmspace_free(struct vmspace *);
@ -92,8 +92,8 @@ struct sf_buf *vm_imgact_map_page(vm_object_t object, vm_ooffset_t offset);
void vm_imgact_unmap_page(struct sf_buf *sf);
void vm_thread_dispose(struct thread *td);
void vm_thread_dispose_altkstack(struct thread *td);
void vm_thread_new(struct thread *td, int pages);
void vm_thread_new_altkstack(struct thread *td, int pages);
int vm_thread_new(struct thread *td, int pages);
int vm_thread_new_altkstack(struct thread *td, int pages);
void vm_thread_swapin(struct thread *td);
void vm_thread_swapout(struct thread *td);
#endif /* _KERNEL */

View File

@ -321,7 +321,7 @@ vm_imgact_unmap_page(struct sf_buf *sf)
* This routine directly affects the fork perf for a process and
* create performance for a thread.
*/
void
int
vm_thread_new(struct thread *td, int pages)
{
vm_object_t ksobj;
@ -338,18 +338,22 @@ vm_thread_new(struct thread *td, int pages)
* Allocate an object for the kstack.
*/
ksobj = vm_object_allocate(OBJT_DEFAULT, pages);
td->td_kstack_obj = ksobj;
/*
* Get a kernel virtual address for this thread's kstack.
*/
ks = kmem_alloc_nofault(kernel_map,
(pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
if (ks == 0)
panic("vm_thread_new: kstack allocation failed");
if (ks == 0) {
printf("vm_thread_new: kstack allocation failed\n");
vm_object_deallocate(ksobj);
return (0);
}
if (KSTACK_GUARD_PAGES != 0) {
pmap_qremove(ks, KSTACK_GUARD_PAGES);
ks += KSTACK_GUARD_PAGES * PAGE_SIZE;
}
td->td_kstack_obj = ksobj;
td->td_kstack = ks;
/*
* Knowing the number of pages allocated is useful when you
@ -372,6 +376,7 @@ vm_thread_new(struct thread *td, int pages)
}
VM_OBJECT_UNLOCK(ksobj);
pmap_qenter(ks, ma, pages);
return (1);
}
/*
@ -403,6 +408,7 @@ vm_thread_dispose(struct thread *td)
vm_object_deallocate(ksobj);
kmem_free(kernel_map, ks - (KSTACK_GUARD_PAGES * PAGE_SIZE),
(pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
td->td_kstack = 0;
}
/*
@ -468,7 +474,7 @@ vm_thread_swapin(struct thread *td)
/*
* Set up a variable-sized alternate kstack.
*/
void
int
vm_thread_new_altkstack(struct thread *td, int pages)
{
@ -476,7 +482,7 @@ vm_thread_new_altkstack(struct thread *td, int pages)
td->td_altkstack_obj = td->td_kstack_obj;
td->td_altkstack_pages = td->td_kstack_pages;
vm_thread_new(td, pages);
return (vm_thread_new(td, pages));
}
/*
@ -504,14 +510,16 @@ vm_thread_dispose_altkstack(struct thread *td)
* ready to run. The new process is set up so that it returns directly
* to user mode to avoid stack copying and relocation problems.
*/
void
vm_forkproc(td, p2, td2, flags)
int
vm_forkproc(td, p2, td2, vm2, flags)
struct thread *td;
struct proc *p2;
struct thread *td2;
struct vmspace *vm2;
int flags;
{
struct proc *p1 = td->td_proc;
int error;
if ((flags & RFPROC) == 0) {
/*
@ -521,11 +529,13 @@ vm_forkproc(td, p2, td2, flags)
*/
if ((flags & RFMEM) == 0) {
if (p1->p_vmspace->vm_refcnt > 1) {
vmspace_unshare(p1);
error = vmspace_unshare(p1);
if (error)
return (error);
}
}
cpu_fork(td, p2, td2, flags);
return;
return (0);
}
if (flags & RFMEM) {
@ -538,7 +548,7 @@ vm_forkproc(td, p2, td2, flags)
}
if ((flags & RFMEM) == 0) {
p2->p_vmspace = vmspace_fork(p1->p_vmspace);
p2->p_vmspace = vm2;
if (p1->p_vmspace->vm_shm)
shmfork(p1, p2);
}
@ -548,6 +558,7 @@ vm_forkproc(td, p2, td2, flags)
* and make the child ready to run.
*/
cpu_fork(td, p2, td2, flags);
return (0);
}
/*

View File

@ -197,7 +197,6 @@ vmspace_zfini(void *mem, int size)
struct vmspace *vm;
vm = (struct vmspace *)mem;
pmap_release(vmspace_pmap(vm));
vm_map_zfini(&vm->vm_map, sizeof(vm->vm_map));
}
@ -208,8 +207,8 @@ vmspace_zinit(void *mem, int size, int flags)
vm = (struct vmspace *)mem;
vm->vm_map.pmap = NULL;
(void)vm_map_zinit(&vm->vm_map, sizeof(vm->vm_map), flags);
pmap_pinit(vmspace_pmap(vm));
return (0);
}
@ -272,6 +271,10 @@ vmspace_alloc(min, max)
struct vmspace *vm;
vm = uma_zalloc(vmspace_zone, M_WAITOK);
if (vm->vm_map.pmap == NULL && !pmap_pinit(vmspace_pmap(vm))) {
uma_zfree(vmspace_zone, vm);
return (NULL);
}
CTR1(KTR_VM, "vmspace_alloc: %p", vm);
_vm_map_init(&vm->vm_map, min, max);
vm->vm_map.pmap = vmspace_pmap(vm); /* XXX */
@ -321,6 +324,12 @@ vmspace_dofree(struct vmspace *vm)
(void)vm_map_remove(&vm->vm_map, vm->vm_map.min_offset,
vm->vm_map.max_offset);
/*
* XXX Comment out the pmap_release call for now. The
* vmspace_zone is marked as UMA_ZONE_NOFREE, and bugs cause
* pmap.resident_count to be != 0 on exit sometimes.
*/
/* pmap_release(vmspace_pmap(vm)); */
uma_zfree(vmspace_zone, vm);
}
@ -2584,6 +2593,8 @@ vmspace_fork(struct vmspace *vm1)
vm_map_lock(old_map);
vm2 = vmspace_alloc(old_map->min_offset, old_map->max_offset);
if (vm2 == NULL)
goto unlock_and_return;
vm2->vm_taddr = vm1->vm_taddr;
vm2->vm_daddr = vm1->vm_daddr;
vm2->vm_maxsaddr = vm1->vm_maxsaddr;
@ -2675,7 +2686,7 @@ vmspace_fork(struct vmspace *vm1)
}
old_entry = old_entry->next;
}
unlock_and_return:
vm_map_unlock(old_map);
return (vm2);
@ -3003,13 +3014,15 @@ vm_map_growstack(struct proc *p, vm_offset_t addr)
* Unshare the specified VM space for exec. If other processes are
* mapped to it, then create a new one. The new vmspace is null.
*/
void
int
vmspace_exec(struct proc *p, vm_offset_t minuser, vm_offset_t maxuser)
{
struct vmspace *oldvmspace = p->p_vmspace;
struct vmspace *newvmspace;
newvmspace = vmspace_alloc(minuser, maxuser);
if (newvmspace == NULL)
return (ENOMEM);
newvmspace->vm_swrss = oldvmspace->vm_swrss;
/*
* This code is written like this for prototype purposes. The
@ -3024,27 +3037,31 @@ vmspace_exec(struct proc *p, vm_offset_t minuser, vm_offset_t maxuser)
if (p == curthread->td_proc) /* XXXKSE ? */
pmap_activate(curthread);
vmspace_free(oldvmspace);
return (0);
}
/*
* Unshare the specified VM space for forcing COW. This
* is called by rfork, for the (RFMEM|RFPROC) == 0 case.
*/
void
int
vmspace_unshare(struct proc *p)
{
struct vmspace *oldvmspace = p->p_vmspace;
struct vmspace *newvmspace;
if (oldvmspace->vm_refcnt == 1)
return;
return (0);
newvmspace = vmspace_fork(oldvmspace);
if (newvmspace == NULL)
return (ENOMEM);
PROC_VMSPACE_LOCK(p);
p->p_vmspace = newvmspace;
PROC_VMSPACE_UNLOCK(p);
if (p == curthread->td_proc) /* XXXKSE ? */
pmap_activate(curthread);
vmspace_free(oldvmspace);
return (0);
}
/*