1997-02-10 02:22:35 +00:00
|
|
|
/*
|
1994-05-24 10:09:53 +00:00
|
|
|
* Copyright (c) 1982, 1986, 1989, 1991, 1993
|
|
|
|
* The Regents of the University of California. All rights reserved.
|
|
|
|
* (c) UNIX System Laboratories, Inc.
|
|
|
|
* All or some portions of this file are derived from material licensed
|
|
|
|
* to the University of California by American Telephone and Telegraph
|
|
|
|
* Co. or Unix System Laboratories, Inc. and are reproduced herein with
|
|
|
|
* the permission of UNIX System Laboratories, Inc.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
* 3. All advertising materials mentioning features or use of this software
|
|
|
|
* must display the following acknowledgement:
|
|
|
|
* This product includes software developed by the University of
|
|
|
|
* California, Berkeley and its contributors.
|
|
|
|
* 4. Neither the name of the University nor the names of its contributors
|
|
|
|
* may be used to endorse or promote products derived from this software
|
|
|
|
* without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
|
|
|
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
|
|
|
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
|
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
|
|
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
|
|
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
|
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
|
|
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
|
|
|
* SUCH DAMAGE.
|
|
|
|
*
|
|
|
|
* @(#)kern_exit.c 8.7 (Berkeley) 2/12/94
|
1999-08-28 01:08:13 +00:00
|
|
|
* $FreeBSD$
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
|
|
|
|
1997-12-16 17:40:42 +00:00
|
|
|
#include "opt_compat.h"
|
1996-01-03 21:42:35 +00:00
|
|
|
#include "opt_ktrace.h"
|
2002-11-20 15:41:25 +00:00
|
|
|
#include "opt_mac.h"
|
1996-01-03 21:42:35 +00:00
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
#include <sys/param.h>
|
|
|
|
#include <sys/systm.h>
|
1995-10-23 15:42:12 +00:00
|
|
|
#include <sys/sysproto.h>
|
1998-11-10 09:16:29 +00:00
|
|
|
#include <sys/kernel.h>
|
1997-10-12 20:26:33 +00:00
|
|
|
#include <sys/malloc.h>
|
2001-03-28 09:17:56 +00:00
|
|
|
#include <sys/lock.h>
|
2000-10-20 07:58:15 +00:00
|
|
|
#include <sys/mutex.h>
|
1994-05-24 10:09:53 +00:00
|
|
|
#include <sys/proc.h>
|
1997-12-06 04:11:14 +00:00
|
|
|
#include <sys/pioctl.h>
|
1994-05-24 10:09:53 +00:00
|
|
|
#include <sys/tty.h>
|
|
|
|
#include <sys/wait.h>
|
2001-09-10 04:28:58 +00:00
|
|
|
#include <sys/vmmeter.h>
|
2002-05-06 15:46:29 +00:00
|
|
|
#include <sys/vnode.h>
|
1994-05-24 10:09:53 +00:00
|
|
|
#include <sys/resourcevar.h>
|
1994-10-02 17:35:40 +00:00
|
|
|
#include <sys/signalvar.h>
|
2002-10-12 05:32:24 +00:00
|
|
|
#include <sys/sched.h>
|
2001-03-28 11:52:56 +00:00
|
|
|
#include <sys/sx.h>
|
1994-05-24 10:09:53 +00:00
|
|
|
#include <sys/ptrace.h>
|
1996-03-11 02:24:21 +00:00
|
|
|
#include <sys/acct.h> /* for acct_process() function prototype */
|
1994-10-02 17:35:40 +00:00
|
|
|
#include <sys/filedesc.h>
|
2002-11-20 15:41:25 +00:00
|
|
|
#include <sys/mac.h>
|
1996-01-01 12:23:39 +00:00
|
|
|
#include <sys/shm.h>
|
|
|
|
#include <sys/sem.h>
|
This Implements the mumbled about "Jail" feature.
This is a seriously beefed up chroot kind of thing. The process
is jailed along the same lines as a chroot does it, but with
additional tough restrictions imposed on what the superuser can do.
For all I know, it is safe to hand over the root bit inside a
prison to the customer living in that prison, this is what
it was developed for in fact: "real virtual servers".
Each prison has an ip number associated with it, which all IP
communications will be coerced to use and each prison has its own
hostname.
Needless to say, you need more RAM this way, but the advantage is
that each customer can run their own particular version of apache
and not stomp on the toes of their neighbors.
It generally does what one would expect, but setting up a jail
still takes a little knowledge.
A few notes:
I have no scripts for setting up a jail, don't ask me for them.
The IP number should be an alias on one of the interfaces.
mount a /proc in each jail, it will make ps more useable.
/proc/<pid>/status tells the hostname of the prison for
jailed processes.
Quotas are only sensible if you have a mountpoint per prison.
There are no privisions for stopping resource-hogging.
Some "#ifdef INET" and similar may be missing (send patches!)
If somebody wants to take it from here and develop it into
more of a "virtual machine" they should be most welcome!
Tools, comments, patches & documentation most welcome.
Have fun...
Sponsored by: http://www.rndassociates.com/
Run for almost a year by: http://www.servetheweb.com/
1999-04-28 11:38:52 +00:00
|
|
|
#include <sys/jail.h>
|
2002-06-07 05:41:27 +00:00
|
|
|
#ifdef KTRACE
|
|
|
|
#include <sys/ktrace.h>
|
|
|
|
#endif
|
1996-01-01 12:23:39 +00:00
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
#include <vm/vm.h>
|
2001-09-10 04:28:58 +00:00
|
|
|
#include <vm/vm_extern.h>
|
2002-05-06 15:46:29 +00:00
|
|
|
#include <vm/vm_param.h>
|
1995-12-07 12:48:31 +00:00
|
|
|
#include <vm/pmap.h>
|
|
|
|
#include <vm/vm_map.h>
|
2002-11-25 04:37:44 +00:00
|
|
|
#include <vm/vm_page.h>
|
2002-03-20 04:09:59 +00:00
|
|
|
#include <vm/uma.h>
|
1999-01-07 21:23:50 +00:00
|
|
|
#include <sys/user.h>
|
1994-05-24 10:09:53 +00:00
|
|
|
|
1999-01-31 03:15:13 +00:00
|
|
|
/* Required to be non-static for SysVR4 emulator */
|
1999-01-30 06:25:00 +00:00
|
|
|
MALLOC_DEFINE(M_ZOMBIE, "zombie", "zombie proc status");
|
1997-10-11 18:31:40 +00:00
|
|
|
|
1999-11-19 21:29:03 +00:00
|
|
|
static MALLOC_DEFINE(M_ATEXIT, "atexit", "atexit callback");
|
|
|
|
|
2002-03-19 21:25:46 +00:00
|
|
|
static int wait1(struct thread *, struct wait_args *, int);
|
1995-10-23 15:42:12 +00:00
|
|
|
|
1996-08-19 02:28:24 +00:00
|
|
|
/*
|
|
|
|
* callout list for things to do at exit time
|
|
|
|
*/
|
1999-11-19 21:29:03 +00:00
|
|
|
struct exitlist {
|
1996-08-19 02:28:24 +00:00
|
|
|
exitlist_fn function;
|
2000-05-26 02:09:24 +00:00
|
|
|
TAILQ_ENTRY(exitlist) next;
|
1999-11-19 21:29:03 +00:00
|
|
|
};
|
1996-08-19 02:28:24 +00:00
|
|
|
|
2000-05-26 02:09:24 +00:00
|
|
|
TAILQ_HEAD(exit_list_head, exitlist);
|
1999-11-19 21:29:03 +00:00
|
|
|
static struct exit_list_head exit_list = TAILQ_HEAD_INITIALIZER(exit_list);
|
1996-08-19 02:28:24 +00:00
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* exit --
|
|
|
|
* Death of process.
|
2001-09-01 04:37:34 +00:00
|
|
|
*
|
|
|
|
* MPSAFE
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
1996-09-13 09:20:15 +00:00
|
|
|
void
|
2003-03-19 00:49:40 +00:00
|
|
|
sys_exit(struct thread *td, struct sys_exit_args *uap)
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2001-09-12 08:38:13 +00:00
|
|
|
|
2001-09-01 04:37:34 +00:00
|
|
|
mtx_lock(&Giant);
|
2001-09-12 08:38:13 +00:00
|
|
|
exit1(td, W_EXITCODE(uap->rval, 0));
|
1994-05-24 10:09:53 +00:00
|
|
|
/* NOTREACHED */
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Exit: deallocate address space and other resources, change proc state
|
|
|
|
* to zombie, and unlink proc from allproc and parent's lists. Save exit
|
|
|
|
* status and rusage for wait(). Check for child processes and orphan them.
|
|
|
|
*/
|
1996-09-13 09:20:15 +00:00
|
|
|
void
|
2003-03-19 00:49:40 +00:00
|
|
|
exit1(struct thread *td, int rv)
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
1999-11-19 21:29:03 +00:00
|
|
|
struct exitlist *ep;
|
2002-05-06 17:04:29 +00:00
|
|
|
struct proc *p, *nq, *q;
|
2002-02-23 11:12:57 +00:00
|
|
|
struct tty *tp;
|
2002-05-06 17:04:29 +00:00
|
|
|
struct vnode *ttyvp;
|
2003-03-19 00:49:40 +00:00
|
|
|
struct vmspace *vm;
|
2002-05-06 17:04:29 +00:00
|
|
|
struct vnode *vtmp;
|
2002-05-02 15:09:58 +00:00
|
|
|
#ifdef KTRACE
|
|
|
|
struct vnode *tracevp;
|
2003-03-13 18:24:22 +00:00
|
|
|
struct ucred *tracecred;
|
2002-05-02 15:09:58 +00:00
|
|
|
#endif
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2001-07-04 16:20:28 +00:00
|
|
|
GIANT_REQUIRED;
|
|
|
|
|
2002-05-06 17:04:29 +00:00
|
|
|
p = td->td_proc;
|
2002-05-06 17:07:10 +00:00
|
|
|
if (p == initproc) {
|
1994-10-27 05:21:39 +00:00
|
|
|
printf("init died (signal %d, exit %d)\n",
|
1994-05-24 10:09:53 +00:00
|
|
|
WTERMSIG(rv), WEXITSTATUS(rv));
|
1994-10-27 05:21:39 +00:00
|
|
|
panic("Going nowhere without my init!");
|
|
|
|
}
|
1997-06-16 00:29:36 +00:00
|
|
|
|
2002-05-06 15:46:29 +00:00
|
|
|
/*
|
2003-02-17 05:14:26 +00:00
|
|
|
* XXXKSE: MUST abort all other threads before proceeding past here.
|
2002-05-06 15:46:29 +00:00
|
|
|
*/
|
Part 1 of KSE-III
The ability to schedule multiple threads per process
(one one cpu) by making ALL system calls optionally asynchronous.
to come: ia64 and power-pc patches, patches for gdb, test program (in tools)
Reviewed by: Almost everyone who counts
(at various times, peter, jhb, matt, alfred, mini, bernd,
and a cast of thousands)
NOTE: this is still Beta code, and contains lots of debugging stuff.
expect slight instability in signals..
2002-06-29 17:26:22 +00:00
|
|
|
PROC_LOCK(p);
|
2003-02-27 02:05:19 +00:00
|
|
|
if (p->p_flag & P_THREADED) {
|
Part 1 of KSE-III
The ability to schedule multiple threads per process
(one one cpu) by making ALL system calls optionally asynchronous.
to come: ia64 and power-pc patches, patches for gdb, test program (in tools)
Reviewed by: Almost everyone who counts
(at various times, peter, jhb, matt, alfred, mini, bernd,
and a cast of thousands)
NOTE: this is still Beta code, and contains lots of debugging stuff.
expect slight instability in signals..
2002-06-29 17:26:22 +00:00
|
|
|
/*
|
|
|
|
* First check if some other thread got here before us..
|
|
|
|
* if so, act apropriatly, (exit or suspend);
|
|
|
|
*/
|
|
|
|
thread_suspend_check(0);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Kill off the other threads. This requires
|
|
|
|
* Some co-operation from other parts of the kernel
|
|
|
|
* so it may not be instant.
|
|
|
|
* With this state set:
|
|
|
|
* Any thread entering the kernel from userspace will
|
|
|
|
* thread_exit() in trap(). Any thread attempting to
|
|
|
|
* sleep will return immediatly
|
|
|
|
* with EINTR or EWOULDBLOCK, which will hopefully force them
|
|
|
|
* to back out to userland, freeing resources as they go, and
|
|
|
|
* anything attempting to return to userland will thread_exit()
|
|
|
|
* from userret(). thread_exit() will unsuspend us
|
|
|
|
* when the last other thread exits.
|
|
|
|
*/
|
2002-09-05 07:30:18 +00:00
|
|
|
if (thread_single(SINGLE_EXIT)) {
|
Part 1 of KSE-III
The ability to schedule multiple threads per process
(one one cpu) by making ALL system calls optionally asynchronous.
to come: ia64 and power-pc patches, patches for gdb, test program (in tools)
Reviewed by: Almost everyone who counts
(at various times, peter, jhb, matt, alfred, mini, bernd,
and a cast of thousands)
NOTE: this is still Beta code, and contains lots of debugging stuff.
expect slight instability in signals..
2002-06-29 17:26:22 +00:00
|
|
|
panic ("Exit: Single threading fouled up");
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* All other activity in this process is now stopped.
|
|
|
|
* Remove excess KSEs and KSEGRPS. XXXKSE (when we have them)
|
2003-03-19 00:33:38 +00:00
|
|
|
* ...
|
Part 1 of KSE-III
The ability to schedule multiple threads per process
(one one cpu) by making ALL system calls optionally asynchronous.
to come: ia64 and power-pc patches, patches for gdb, test program (in tools)
Reviewed by: Almost everyone who counts
(at various times, peter, jhb, matt, alfred, mini, bernd,
and a cast of thousands)
NOTE: this is still Beta code, and contains lots of debugging stuff.
expect slight instability in signals..
2002-06-29 17:26:22 +00:00
|
|
|
* Turn off threading support.
|
|
|
|
*/
|
2003-02-27 02:05:19 +00:00
|
|
|
p->p_flag &= ~P_THREADED;
|
2003-03-19 00:33:38 +00:00
|
|
|
thread_single_end(); /* Don't need this any more. */
|
Part 1 of KSE-III
The ability to schedule multiple threads per process
(one one cpu) by making ALL system calls optionally asynchronous.
to come: ia64 and power-pc patches, patches for gdb, test program (in tools)
Reviewed by: Almost everyone who counts
(at various times, peter, jhb, matt, alfred, mini, bernd,
and a cast of thousands)
NOTE: this is still Beta code, and contains lots of debugging stuff.
expect slight instability in signals..
2002-06-29 17:26:22 +00:00
|
|
|
}
|
|
|
|
/*
|
|
|
|
* With this state set:
|
|
|
|
* Any thread entering the kernel from userspace will thread_exit()
|
|
|
|
* in trap(). Any thread attempting to sleep will return immediatly
|
|
|
|
* with EINTR or EWOULDBLOCK, which will hopefully force them
|
|
|
|
* to back out to userland, freeing resources as they go, and
|
|
|
|
* anything attempting to return to userland will thread_exit()
|
|
|
|
* from userret(). thread_exit() will do a wakeup on p->p_numthreads
|
|
|
|
* if it transitions to 1.
|
|
|
|
*/
|
|
|
|
|
|
|
|
p->p_flag |= P_WEXIT;
|
|
|
|
PROC_UNLOCK(p);
|
2001-09-12 08:38:13 +00:00
|
|
|
|
2002-05-06 15:46:29 +00:00
|
|
|
/* Are we a task leader? */
|
2002-05-04 07:40:49 +00:00
|
|
|
if (p == p->p_leader) {
|
2002-10-15 00:14:32 +00:00
|
|
|
mtx_lock(&ppeers_lock);
|
1997-06-16 00:29:36 +00:00
|
|
|
q = p->p_peers;
|
2001-06-27 06:15:44 +00:00
|
|
|
while (q != NULL) {
|
|
|
|
PROC_LOCK(q);
|
|
|
|
psignal(q, SIGKILL);
|
|
|
|
PROC_UNLOCK(q);
|
1997-06-16 00:29:36 +00:00
|
|
|
q = q->p_peers;
|
|
|
|
}
|
2003-03-19 00:33:38 +00:00
|
|
|
while (p->p_peers != NULL)
|
2002-10-15 00:14:32 +00:00
|
|
|
msleep(p, &ppeers_lock, PWAIT, "exit1", 0);
|
|
|
|
mtx_unlock(&ppeers_lock);
|
2001-01-24 00:33:44 +00:00
|
|
|
}
|
1997-06-16 00:29:36 +00:00
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
#ifdef PGINPROF
|
|
|
|
vmsizmon();
|
|
|
|
#endif
|
1997-12-06 04:11:14 +00:00
|
|
|
STOPEVENT(p, S_EXIT, rv);
|
2000-01-10 04:09:05 +00:00
|
|
|
wakeup(&p->p_stype); /* Wakeup anyone in procfs' PIOCWAIT */
|
1997-12-06 04:11:14 +00:00
|
|
|
|
2003-03-19 00:33:38 +00:00
|
|
|
/*
|
1999-04-17 08:36:07 +00:00
|
|
|
* Check if any loadable modules need anything done at process exit.
|
1996-08-19 02:28:24 +00:00
|
|
|
* e.g. SYSV IPC stuff
|
|
|
|
* XXX what if one of these generates an error?
|
|
|
|
*/
|
2003-03-19 00:33:38 +00:00
|
|
|
TAILQ_FOREACH(ep, &exit_list, next)
|
1996-08-19 02:28:24 +00:00
|
|
|
(*ep->function)(p);
|
2003-02-01 12:17:09 +00:00
|
|
|
|
2001-01-24 00:33:44 +00:00
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
MALLOC(p->p_ru, struct rusage *, sizeof(struct rusage),
|
2003-02-19 05:47:46 +00:00
|
|
|
M_ZOMBIE, M_WAITOK);
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* If parent is waiting for us to exit or exec,
|
|
|
|
* P_PPWAIT is set; we will wakeup the parent below.
|
|
|
|
*/
|
2001-01-24 00:33:44 +00:00
|
|
|
PROC_LOCK(p);
|
2003-02-08 02:58:16 +00:00
|
|
|
stopprofclock(p);
|
1994-05-24 10:09:53 +00:00
|
|
|
p->p_flag &= ~(P_TRACED | P_PPWAIT);
|
2002-10-01 17:15:53 +00:00
|
|
|
SIGEMPTYSET(p->p_siglist);
|
1998-04-06 08:26:08 +00:00
|
|
|
if (timevalisset(&p->p_realtimer.it_value))
|
2000-11-27 22:52:31 +00:00
|
|
|
callout_stop(&p->p_itcallout);
|
2003-02-17 10:03:02 +00:00
|
|
|
PROC_UNLOCK(p);
|
1994-05-24 10:09:53 +00:00
|
|
|
|
Installed the second patch attached to kern/7899 with some changes suggested
by bde, a few other tweaks to get the patch to apply cleanly again and
some improvements to the comments.
This change closes some fairly minor security holes associated with
F_SETOWN, fixes a few bugs, and removes some limitations that F_SETOWN
had on tty devices. For more details, see the description on the PR.
Because this patch increases the size of the proc and pgrp structures,
it is necessary to re-install the includes and recompile libkvm,
the vinum lkm, fstat, gcore, gdb, ipfilter, ps, top, and w.
PR: kern/7899
Reviewed by: bde, elvind
1998-11-11 10:04:13 +00:00
|
|
|
/*
|
|
|
|
* Reset any sigio structures pointing to us as a result of
|
|
|
|
* F_SETOWN with our pid.
|
|
|
|
*/
|
|
|
|
funsetownlst(&p->p_sigiolst);
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* Close open files and release open-file table.
|
|
|
|
* This may block!
|
|
|
|
*/
|
2003-02-15 05:52:56 +00:00
|
|
|
fdfree(td);
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2001-01-24 00:33:44 +00:00
|
|
|
/*
|
|
|
|
* Remove ourself from our leader's peer list and wake our leader.
|
|
|
|
*/
|
2002-10-15 00:14:32 +00:00
|
|
|
mtx_lock(&ppeers_lock);
|
2002-05-04 07:40:49 +00:00
|
|
|
if (p->p_leader->p_peers) {
|
1999-06-07 20:37:29 +00:00
|
|
|
q = p->p_leader;
|
2002-05-04 07:40:49 +00:00
|
|
|
while (q->p_peers != p)
|
1999-06-07 20:37:29 +00:00
|
|
|
q = q->p_peers;
|
|
|
|
q->p_peers = p->p_peers;
|
2002-06-29 01:50:25 +00:00
|
|
|
wakeup(p->p_leader);
|
1999-06-07 20:37:29 +00:00
|
|
|
}
|
2002-10-15 00:14:32 +00:00
|
|
|
mtx_unlock(&ppeers_lock);
|
1999-06-07 20:37:29 +00:00
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/* The next two chunks should probably be moved to vmspace_exit. */
|
|
|
|
vm = p->p_vmspace;
|
|
|
|
/*
|
|
|
|
* Release user portion of address space.
|
|
|
|
* This releases references to vnodes,
|
|
|
|
* which could cause I/O if the file has been unlinked.
|
|
|
|
* Need to do this early enough that we can still sleep.
|
|
|
|
* Can't free the entire vmspace as the kernel stack
|
|
|
|
* may be mapped within that space also.
|
2002-12-15 18:50:04 +00:00
|
|
|
*
|
|
|
|
* Processes sharing the same vmspace may exit in one order, and
|
|
|
|
* get cleaned up by vmspace_exit() in a different order. The
|
|
|
|
* last exiting process to reach this point releases as much of
|
|
|
|
* the environment as it can, and the last process cleaned up
|
|
|
|
* by vmspace_exit() (which decrements exitingcnt) cleans up the
|
|
|
|
* remainder.
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
2002-12-15 18:50:04 +00:00
|
|
|
++vm->vm_exitingcnt;
|
2002-02-05 21:23:05 +00:00
|
|
|
if (--vm->vm_refcnt == 0) {
|
2003-01-13 23:04:32 +00:00
|
|
|
shmexit(vm);
|
2002-11-25 04:37:44 +00:00
|
|
|
vm_page_lock_queues();
|
2002-09-21 22:07:17 +00:00
|
|
|
pmap_remove_pages(vmspace_pmap(vm), vm_map_min(&vm->vm_map),
|
|
|
|
vm_map_max(&vm->vm_map));
|
2002-11-25 04:37:44 +00:00
|
|
|
vm_page_unlock_queues();
|
2002-09-21 22:07:17 +00:00
|
|
|
(void) vm_map_remove(&vm->vm_map, vm_map_min(&vm->vm_map),
|
|
|
|
vm_map_max(&vm->vm_map));
|
1996-10-12 21:35:25 +00:00
|
|
|
}
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2002-04-16 17:04:21 +00:00
|
|
|
sx_xlock(&proctree_lock);
|
1994-05-24 10:09:53 +00:00
|
|
|
if (SESS_LEADER(p)) {
|
2003-03-19 00:49:40 +00:00
|
|
|
struct session *sp;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2002-02-23 11:12:57 +00:00
|
|
|
sp = p->p_session;
|
1994-05-24 10:09:53 +00:00
|
|
|
if (sp->s_ttyvp) {
|
|
|
|
/*
|
|
|
|
* Controlling process.
|
|
|
|
* Signal foreground pgrp,
|
|
|
|
* drain controlling terminal
|
|
|
|
* and revoke access to controlling terminal.
|
|
|
|
*/
|
1996-10-04 23:43:12 +00:00
|
|
|
if (sp->s_ttyp && (sp->s_ttyp->t_session == sp)) {
|
2002-02-23 11:12:57 +00:00
|
|
|
tp = sp->s_ttyp;
|
|
|
|
if (sp->s_ttyp->t_pgrp) {
|
|
|
|
PGRP_LOCK(sp->s_ttyp->t_pgrp);
|
1994-05-24 10:09:53 +00:00
|
|
|
pgsignal(sp->s_ttyp->t_pgrp, SIGHUP, 1);
|
2002-02-23 11:12:57 +00:00
|
|
|
PGRP_UNLOCK(sp->s_ttyp->t_pgrp);
|
|
|
|
}
|
|
|
|
/* XXX tp should be locked. */
|
2002-04-16 17:04:21 +00:00
|
|
|
sx_xunlock(&proctree_lock);
|
2002-02-23 11:12:57 +00:00
|
|
|
(void) ttywait(tp);
|
2002-04-16 17:04:21 +00:00
|
|
|
sx_xlock(&proctree_lock);
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* The tty could have been revoked
|
|
|
|
* if we blocked.
|
|
|
|
*/
|
2002-02-23 11:12:57 +00:00
|
|
|
if (sp->s_ttyvp) {
|
|
|
|
ttyvp = sp->s_ttyvp;
|
|
|
|
SESS_LOCK(p->p_session);
|
|
|
|
sp->s_ttyvp = NULL;
|
|
|
|
SESS_UNLOCK(p->p_session);
|
2002-04-16 17:04:21 +00:00
|
|
|
sx_xunlock(&proctree_lock);
|
2002-02-23 11:12:57 +00:00
|
|
|
VOP_REVOKE(ttyvp, REVOKEALL);
|
|
|
|
vrele(ttyvp);
|
2002-04-16 17:04:21 +00:00
|
|
|
sx_xlock(&proctree_lock);
|
2002-02-23 11:12:57 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
if (sp->s_ttyvp) {
|
|
|
|
ttyvp = sp->s_ttyvp;
|
|
|
|
SESS_LOCK(p->p_session);
|
|
|
|
sp->s_ttyvp = NULL;
|
|
|
|
SESS_UNLOCK(p->p_session);
|
|
|
|
vrele(ttyvp);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
/*
|
|
|
|
* s_ttyp is not zero'd; we use this to indicate
|
|
|
|
* that the session once had a controlling terminal.
|
|
|
|
* (for logging and informational purposes)
|
|
|
|
*/
|
|
|
|
}
|
2002-02-23 11:12:57 +00:00
|
|
|
SESS_LOCK(p->p_session);
|
1994-05-24 10:09:53 +00:00
|
|
|
sp->s_leader = NULL;
|
2002-02-23 11:12:57 +00:00
|
|
|
SESS_UNLOCK(p->p_session);
|
|
|
|
}
|
1994-05-24 10:09:53 +00:00
|
|
|
fixjobc(p, p->p_pgrp, 0);
|
2002-04-16 17:04:21 +00:00
|
|
|
sx_xunlock(&proctree_lock);
|
2001-09-12 08:38:13 +00:00
|
|
|
(void)acct_process(td);
|
1994-05-24 10:09:53 +00:00
|
|
|
#ifdef KTRACE
|
1995-05-30 08:16:23 +00:00
|
|
|
/*
|
1994-05-24 10:09:53 +00:00
|
|
|
* release trace file
|
|
|
|
*/
|
2002-05-02 15:09:58 +00:00
|
|
|
PROC_LOCK(p);
|
2002-06-07 05:41:27 +00:00
|
|
|
mtx_lock(&ktrace_mtx);
|
1994-05-24 10:09:53 +00:00
|
|
|
p->p_traceflag = 0; /* don't trace the vrele() */
|
2003-03-13 18:24:22 +00:00
|
|
|
tracevp = p->p_tracevp;
|
|
|
|
p->p_tracevp = NULL;
|
|
|
|
tracecred = p->p_tracecred;
|
|
|
|
p->p_tracecred = NULL;
|
2002-06-07 05:41:27 +00:00
|
|
|
mtx_unlock(&ktrace_mtx);
|
2002-05-02 15:09:58 +00:00
|
|
|
PROC_UNLOCK(p);
|
|
|
|
if (tracevp != NULL)
|
|
|
|
vrele(tracevp);
|
2003-03-13 18:24:22 +00:00
|
|
|
if (tracecred != NULL)
|
|
|
|
crfree(tracecred);
|
1994-05-24 10:09:53 +00:00
|
|
|
#endif
|
2002-01-05 21:47:58 +00:00
|
|
|
/*
|
|
|
|
* Release reference to text vnode
|
|
|
|
*/
|
|
|
|
if ((vtmp = p->p_textvp) != NULL) {
|
|
|
|
p->p_textvp = NULL;
|
|
|
|
vrele(vtmp);
|
|
|
|
}
|
|
|
|
|
2002-05-02 15:09:58 +00:00
|
|
|
/*
|
|
|
|
* Release our limits structure.
|
|
|
|
*/
|
|
|
|
mtx_assert(&Giant, MA_OWNED);
|
|
|
|
if (--p->p_limit->p_refcnt == 0) {
|
|
|
|
FREE(p->p_limit, M_SUBPROC);
|
|
|
|
p->p_limit = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Release this thread's reference to the ucred. The actual proc
|
|
|
|
* reference will stay around until the proc is harvested by
|
|
|
|
* wait(). At this point the ucred is immutable (no other threads
|
|
|
|
* from this proc are around that can change it) so we leave the
|
|
|
|
* per-thread ucred pointer intact in case it is needed although
|
|
|
|
* in theory nothing should be using it at this point.
|
|
|
|
*/
|
|
|
|
crfree(td->td_ucred);
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* Remove proc from allproc queue and pidhash chain.
|
|
|
|
* Place onto zombproc. Unlink from parent's child list.
|
|
|
|
*/
|
2001-03-28 11:52:56 +00:00
|
|
|
sx_xlock(&allproc_lock);
|
1996-03-11 06:05:03 +00:00
|
|
|
LIST_REMOVE(p, p_list);
|
|
|
|
LIST_INSERT_HEAD(&zombproc, p, p_list);
|
|
|
|
LIST_REMOVE(p, p_hash);
|
2001-03-28 11:52:56 +00:00
|
|
|
sx_xunlock(&allproc_lock);
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2001-03-28 11:52:56 +00:00
|
|
|
sx_xlock(&proctree_lock);
|
1999-11-16 10:56:05 +00:00
|
|
|
q = LIST_FIRST(&p->p_children);
|
2001-01-24 00:33:44 +00:00
|
|
|
if (q != NULL) /* only need this if any child is S_ZOMB */
|
2002-06-29 01:50:25 +00:00
|
|
|
wakeup(initproc);
|
2001-01-24 00:33:44 +00:00
|
|
|
for (; q != NULL; q = nq) {
|
1999-11-16 10:56:05 +00:00
|
|
|
nq = LIST_NEXT(q, p_sibling);
|
2001-01-24 00:33:44 +00:00
|
|
|
PROC_LOCK(q);
|
2001-03-07 02:22:31 +00:00
|
|
|
proc_reparent(q, initproc);
|
1999-03-02 00:28:09 +00:00
|
|
|
q->p_sigparent = SIGCHLD;
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* Traced processes are killed
|
|
|
|
* since their existence means someone is screwing up.
|
|
|
|
*/
|
|
|
|
if (q->p_flag & P_TRACED) {
|
|
|
|
q->p_flag &= ~P_TRACED;
|
|
|
|
psignal(q, SIGKILL);
|
2001-03-07 02:22:31 +00:00
|
|
|
}
|
2002-02-23 11:12:57 +00:00
|
|
|
PROC_UNLOCK(q);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Save exit status and final rusage info, adding in child rusage
|
|
|
|
* info and self times.
|
|
|
|
*/
|
2002-05-02 15:09:58 +00:00
|
|
|
PROC_LOCK(p);
|
1994-05-24 10:09:53 +00:00
|
|
|
p->p_xstat = rv;
|
|
|
|
*p->p_ru = p->p_stats->p_ru;
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_lock_spin(&sched_lock);
|
1994-05-24 10:09:53 +00:00
|
|
|
calcru(p, &p->p_ru->ru_utime, &p->p_ru->ru_stime, NULL);
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_unlock_spin(&sched_lock);
|
1994-05-24 10:09:53 +00:00
|
|
|
ruadd(p->p_ru, &p->p_stats->p_cru);
|
|
|
|
|
2000-04-16 18:53:38 +00:00
|
|
|
/*
|
2002-05-06 15:46:29 +00:00
|
|
|
* Notify interested parties of our demise.
|
2000-04-16 18:53:38 +00:00
|
|
|
*/
|
2000-05-21 16:27:41 +00:00
|
|
|
KNOTE(&p->p_klist, NOTE_EXIT);
|
2000-04-16 18:53:38 +00:00
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
1999-10-11 20:33:17 +00:00
|
|
|
* Notify parent that we're gone. If parent has the PS_NOCLDWAIT
|
2002-04-27 22:41:41 +00:00
|
|
|
* flag set, or if the handler is set to SIG_IGN, notify process
|
|
|
|
* 1 instead (and hope it will handle this situation).
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
2002-05-02 15:09:58 +00:00
|
|
|
PROC_LOCK(p->p_pptr);
|
2002-04-27 22:41:41 +00:00
|
|
|
if (p->p_pptr->p_procsig->ps_flag & (PS_NOCLDWAIT | PS_CLDSIGIGN)) {
|
2002-05-06 17:04:29 +00:00
|
|
|
struct proc *pp;
|
|
|
|
|
|
|
|
pp = p->p_pptr;
|
2002-02-23 11:12:57 +00:00
|
|
|
PROC_UNLOCK(pp);
|
1997-09-13 19:42:29 +00:00
|
|
|
proc_reparent(p, initproc);
|
2002-02-23 11:12:57 +00:00
|
|
|
PROC_LOCK(p->p_pptr);
|
1997-09-13 19:42:29 +00:00
|
|
|
/*
|
|
|
|
* If this was the last child of our parent, notify
|
|
|
|
* parent, so in case he was wait(2)ing, he will
|
|
|
|
* continue.
|
|
|
|
*/
|
|
|
|
if (LIST_EMPTY(&pp->p_children))
|
2002-06-29 01:50:25 +00:00
|
|
|
wakeup(pp);
|
1997-09-13 19:42:29 +00:00
|
|
|
}
|
|
|
|
|
2001-03-07 02:22:31 +00:00
|
|
|
if (p->p_sigparent && p->p_pptr != initproc)
|
2002-10-02 23:12:01 +00:00
|
|
|
psignal(p->p_pptr, p->p_sigparent);
|
2001-03-07 02:22:31 +00:00
|
|
|
else
|
2002-10-02 23:12:01 +00:00
|
|
|
psignal(p->p_pptr, SIGCHLD);
|
2001-03-07 02:22:31 +00:00
|
|
|
PROC_UNLOCK(p->p_pptr);
|
2001-06-27 06:15:44 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If this is a kthread, then wakeup anyone waiting for it to exit.
|
|
|
|
*/
|
|
|
|
if (p->p_flag & P_KTHREAD)
|
2002-06-29 01:50:25 +00:00
|
|
|
wakeup(p);
|
2001-03-07 02:22:31 +00:00
|
|
|
PROC_UNLOCK(p);
|
2003-03-19 00:33:38 +00:00
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* Finally, call machine-dependent code to release the remaining
|
2002-12-10 02:33:45 +00:00
|
|
|
* resources including address space.
|
2002-02-05 21:23:05 +00:00
|
|
|
* The address space is released by "vmspace_exitfree(p)" in
|
|
|
|
* vm_waitproc().
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
2001-09-12 08:38:13 +00:00
|
|
|
cpu_exit(td);
|
2001-09-10 04:28:58 +00:00
|
|
|
|
|
|
|
PROC_LOCK(p);
|
2002-05-02 15:09:58 +00:00
|
|
|
PROC_LOCK(p->p_pptr);
|
|
|
|
sx_xunlock(&proctree_lock);
|
2001-09-10 04:28:58 +00:00
|
|
|
mtx_lock_spin(&sched_lock);
|
2002-12-10 02:33:45 +00:00
|
|
|
|
2001-09-10 04:28:58 +00:00
|
|
|
while (mtx_owned(&Giant))
|
Change the preemption code for software interrupt thread schedules and
mutex releases to not require flags for the cases when preemption is
not allowed:
The purpose of the MTX_NOSWITCH and SWI_NOSWITCH flags is to prevent
switching to a higher priority thread on mutex releease and swi schedule,
respectively when that switch is not safe. Now that the critical section
API maintains a per-thread nesting count, the kernel can easily check
whether or not it should switch without relying on flags from the
programmer. This fixes a few bugs in that all current callers of
swi_sched() used SWI_NOSWITCH, when in fact, only the ones called from
fast interrupt handlers and the swi_sched of softclock needed this flag.
Note that to ensure that swi_sched()'s in clock and fast interrupt
handlers do not switch, these handlers have to be explicitly wrapped
in critical_enter/exit pairs. Presently, just wrapping the handlers is
sufficient, but in the future with the fully preemptive kernel, the
interrupt must be EOI'd before critical_exit() is called. (critical_exit()
can switch due to a deferred preemption in a fully preemptive kernel.)
I've tested the changes to the interrupt code on i386 and alpha. I have
not tested ia64, but the interrupt code is almost identical to the alpha
code, so I expect it will work fine. PowerPC and ARM do not yet have
interrupt code in the tree so they shouldn't be broken. Sparc64 is
broken, but that's been ok'd by jake and tmm who will be fixing the
interrupt code for sparc64 shortly.
Reviewed by: peter
Tested on: i386, alpha
2002-01-05 08:47:13 +00:00
|
|
|
mtx_unlock(&Giant);
|
2001-09-10 04:28:58 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* We have to wait until after releasing all locks before
|
Part 1 of KSE-III
The ability to schedule multiple threads per process
(one one cpu) by making ALL system calls optionally asynchronous.
to come: ia64 and power-pc patches, patches for gdb, test program (in tools)
Reviewed by: Almost everyone who counts
(at various times, peter, jhb, matt, alfred, mini, bernd,
and a cast of thousands)
NOTE: this is still Beta code, and contains lots of debugging stuff.
expect slight instability in signals..
2002-06-29 17:26:22 +00:00
|
|
|
* changing p_state. If we block on a mutex then we will be
|
2001-09-10 04:28:58 +00:00
|
|
|
* back at SRUN when we resume and our parent will never
|
|
|
|
* harvest us.
|
|
|
|
*/
|
Part 1 of KSE-III
The ability to schedule multiple threads per process
(one one cpu) by making ALL system calls optionally asynchronous.
to come: ia64 and power-pc patches, patches for gdb, test program (in tools)
Reviewed by: Almost everyone who counts
(at various times, peter, jhb, matt, alfred, mini, bernd,
and a cast of thousands)
NOTE: this is still Beta code, and contains lots of debugging stuff.
expect slight instability in signals..
2002-06-29 17:26:22 +00:00
|
|
|
p->p_state = PRS_ZOMBIE;
|
2001-09-10 04:28:58 +00:00
|
|
|
|
|
|
|
wakeup(p->p_pptr);
|
2002-05-02 15:09:58 +00:00
|
|
|
PROC_UNLOCK(p->p_pptr);
|
2001-09-10 04:28:58 +00:00
|
|
|
cnt.v_swtch++;
|
2002-05-02 15:09:58 +00:00
|
|
|
binuptime(PCPU_PTR(switchtime));
|
|
|
|
PCPU_SET(switchticks, ticks);
|
|
|
|
|
Part 1 of KSE-III
The ability to schedule multiple threads per process
(one one cpu) by making ALL system calls optionally asynchronous.
to come: ia64 and power-pc patches, patches for gdb, test program (in tools)
Reviewed by: Almost everyone who counts
(at various times, peter, jhb, matt, alfred, mini, bernd,
and a cast of thousands)
NOTE: this is still Beta code, and contains lots of debugging stuff.
expect slight instability in signals..
2002-06-29 17:26:22 +00:00
|
|
|
cpu_sched_exit(td); /* XXXKSE check if this should be in thread_exit */
|
|
|
|
/*
|
2002-12-10 02:33:45 +00:00
|
|
|
* Make sure the scheduler takes this thread out of its tables etc.
|
Part 1 of KSE-III
The ability to schedule multiple threads per process
(one one cpu) by making ALL system calls optionally asynchronous.
to come: ia64 and power-pc patches, patches for gdb, test program (in tools)
Reviewed by: Almost everyone who counts
(at various times, peter, jhb, matt, alfred, mini, bernd,
and a cast of thousands)
NOTE: this is still Beta code, and contains lots of debugging stuff.
expect slight instability in signals..
2002-06-29 17:26:22 +00:00
|
|
|
* This will also release this thread's reference to the ucred.
|
2003-03-19 00:33:38 +00:00
|
|
|
* Other thread parts to release include pcb bits and such.
|
Part 1 of KSE-III
The ability to schedule multiple threads per process
(one one cpu) by making ALL system calls optionally asynchronous.
to come: ia64 and power-pc patches, patches for gdb, test program (in tools)
Reviewed by: Almost everyone who counts
(at various times, peter, jhb, matt, alfred, mini, bernd,
and a cast of thousands)
NOTE: this is still Beta code, and contains lots of debugging stuff.
expect slight instability in signals..
2002-06-29 17:26:22 +00:00
|
|
|
*/
|
|
|
|
thread_exit();
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
1995-11-11 05:49:22 +00:00
|
|
|
#ifdef COMPAT_43
|
2001-09-01 04:37:34 +00:00
|
|
|
/*
|
2002-05-06 15:46:29 +00:00
|
|
|
* MPSAFE. The dirty work is handled by wait1().
|
2001-09-01 04:37:34 +00:00
|
|
|
*/
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
2003-03-19 00:49:40 +00:00
|
|
|
owait(struct thread *td, struct owait_args *uap __unused)
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
1995-10-07 23:47:26 +00:00
|
|
|
struct wait_args w;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
1995-10-07 23:47:26 +00:00
|
|
|
w.options = 0;
|
|
|
|
w.rusage = NULL;
|
|
|
|
w.pid = WAIT_ANY;
|
|
|
|
w.status = NULL;
|
2001-09-12 08:38:13 +00:00
|
|
|
return (wait1(td, &w, 1));
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
1995-11-11 05:49:22 +00:00
|
|
|
#endif /* COMPAT_43 */
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2001-09-01 04:37:34 +00:00
|
|
|
/*
|
2002-05-06 15:46:29 +00:00
|
|
|
* MPSAFE. The dirty work is handled by wait1().
|
2001-09-01 04:37:34 +00:00
|
|
|
*/
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
2003-03-19 00:49:40 +00:00
|
|
|
wait4(struct thread *td, struct wait_args *uap)
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2001-09-12 08:38:13 +00:00
|
|
|
|
|
|
|
return (wait1(td, uap, 0));
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
2001-09-01 04:37:34 +00:00
|
|
|
/*
|
|
|
|
* MPSAFE
|
|
|
|
*/
|
1995-10-07 23:47:26 +00:00
|
|
|
static int
|
2003-03-19 00:49:40 +00:00
|
|
|
wait1(struct thread *td, struct wait_args *uap, int compat)
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2002-05-06 17:04:29 +00:00
|
|
|
struct rusage ru;
|
2002-10-09 02:33:36 +00:00
|
|
|
int nfound;
|
|
|
|
struct proc *p, *q, *t;
|
1995-10-23 19:44:38 +00:00
|
|
|
int status, error;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2001-09-12 08:38:13 +00:00
|
|
|
q = td->td_proc;
|
2002-02-23 11:12:57 +00:00
|
|
|
if (uap->pid == 0) {
|
|
|
|
PROC_LOCK(q);
|
1994-05-24 10:09:53 +00:00
|
|
|
uap->pid = -q->p_pgid;
|
2002-02-23 11:12:57 +00:00
|
|
|
PROC_UNLOCK(q);
|
|
|
|
}
|
2002-06-01 18:37:46 +00:00
|
|
|
if (uap->options &~ (WUNTRACED|WNOHANG|WCONTINUED|WLINUXCLONE))
|
2002-04-09 20:00:40 +00:00
|
|
|
return (EINVAL);
|
|
|
|
mtx_lock(&Giant);
|
1994-05-24 10:09:53 +00:00
|
|
|
loop:
|
|
|
|
nfound = 0;
|
2002-05-02 15:09:58 +00:00
|
|
|
sx_xlock(&proctree_lock);
|
1999-11-16 10:56:05 +00:00
|
|
|
LIST_FOREACH(p, &q->p_children, p_sibling) {
|
2002-02-23 11:12:57 +00:00
|
|
|
PROC_LOCK(p);
|
1994-05-24 10:09:53 +00:00
|
|
|
if (uap->pid != WAIT_ANY &&
|
2002-02-23 11:12:57 +00:00
|
|
|
p->p_pid != uap->pid && p->p_pgid != -uap->pid) {
|
|
|
|
PROC_UNLOCK(p);
|
1994-05-24 10:09:53 +00:00
|
|
|
continue;
|
2002-02-23 11:12:57 +00:00
|
|
|
}
|
1999-03-02 00:28:09 +00:00
|
|
|
|
2000-12-18 07:10:04 +00:00
|
|
|
/*
|
2003-03-19 00:33:38 +00:00
|
|
|
* This special case handles a kthread spawned by linux_clone
|
2000-12-18 07:10:04 +00:00
|
|
|
* (see linux_misc.c). The linux_wait4 and linux_waitpid
|
|
|
|
* functions need to be able to distinguish between waiting
|
|
|
|
* on a process and waiting on a thread. It is a thread if
|
|
|
|
* p_sigparent is not SIGCHLD, and the WLINUXCLONE option
|
|
|
|
* signifies we want to wait for threads and not processes.
|
1999-03-02 00:28:09 +00:00
|
|
|
*/
|
2000-12-18 07:10:04 +00:00
|
|
|
if ((p->p_sigparent != SIGCHLD) ^
|
2001-01-24 00:33:44 +00:00
|
|
|
((uap->options & WLINUXCLONE) != 0)) {
|
|
|
|
PROC_UNLOCK(p);
|
1999-03-02 00:28:09 +00:00
|
|
|
continue;
|
2001-01-24 00:33:44 +00:00
|
|
|
}
|
1999-03-02 00:28:09 +00:00
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
nfound++;
|
Part 1 of KSE-III
The ability to schedule multiple threads per process
(one one cpu) by making ALL system calls optionally asynchronous.
to come: ia64 and power-pc patches, patches for gdb, test program (in tools)
Reviewed by: Almost everyone who counts
(at various times, peter, jhb, matt, alfred, mini, bernd,
and a cast of thousands)
NOTE: this is still Beta code, and contains lots of debugging stuff.
expect slight instability in signals..
2002-06-29 17:26:22 +00:00
|
|
|
if (p->p_state == PRS_ZOMBIE) {
|
2001-09-12 08:38:13 +00:00
|
|
|
/*
|
2002-10-12 05:32:24 +00:00
|
|
|
* Allow the scheduler to adjust the priority of the
|
|
|
|
* parent when a kseg is exiting.
|
2001-09-12 08:38:13 +00:00
|
|
|
*/
|
|
|
|
if (curthread->td_proc->p_pid != 1) {
|
2002-05-02 15:09:58 +00:00
|
|
|
mtx_lock_spin(&sched_lock);
|
2002-10-12 05:32:24 +00:00
|
|
|
sched_exit(curthread->td_ksegrp,
|
|
|
|
FIRST_KSEGRP_IN_PROC(p));
|
2002-05-02 15:09:58 +00:00
|
|
|
mtx_unlock_spin(&sched_lock);
|
1994-09-12 11:27:03 +00:00
|
|
|
}
|
1994-08-06 07:15:04 +00:00
|
|
|
|
2001-09-12 08:38:13 +00:00
|
|
|
td->td_retval[0] = p->p_pid;
|
1995-11-11 05:49:22 +00:00
|
|
|
#ifdef COMPAT_43
|
1995-10-07 23:47:26 +00:00
|
|
|
if (compat)
|
2001-09-12 08:38:13 +00:00
|
|
|
td->td_retval[1] = p->p_xstat;
|
1994-05-24 10:09:53 +00:00
|
|
|
else
|
|
|
|
#endif
|
|
|
|
if (uap->status) {
|
|
|
|
status = p->p_xstat; /* convert to int */
|
2002-05-02 15:09:58 +00:00
|
|
|
PROC_UNLOCK(p);
|
2002-06-29 01:50:25 +00:00
|
|
|
if ((error = copyout(&status,
|
|
|
|
uap->status, sizeof(status)))) {
|
2002-05-02 15:09:58 +00:00
|
|
|
sx_xunlock(&proctree_lock);
|
|
|
|
mtx_unlock(&Giant);
|
|
|
|
return (error);
|
2001-09-01 04:37:34 +00:00
|
|
|
}
|
2002-05-02 15:09:58 +00:00
|
|
|
PROC_LOCK(p);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
2002-05-02 15:09:58 +00:00
|
|
|
if (uap->rusage) {
|
|
|
|
bcopy(p->p_ru, &ru, sizeof(ru));
|
|
|
|
PROC_UNLOCK(p);
|
2002-06-29 01:50:25 +00:00
|
|
|
if ((error = copyout(&ru,
|
|
|
|
uap->rusage, sizeof (struct rusage)))) {
|
2002-05-02 15:09:58 +00:00
|
|
|
sx_xunlock(&proctree_lock);
|
|
|
|
mtx_unlock(&Giant);
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
} else
|
|
|
|
PROC_UNLOCK(p);
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* If we got the child via a ptrace 'attach',
|
|
|
|
* we need to give it back to the old parent.
|
|
|
|
*/
|
2002-05-02 15:09:58 +00:00
|
|
|
if (p->p_oppid && (t = pfind(p->p_oppid)) != NULL) {
|
|
|
|
PROC_LOCK(p);
|
|
|
|
p->p_oppid = 0;
|
|
|
|
proc_reparent(p, t);
|
|
|
|
PROC_UNLOCK(p);
|
|
|
|
psignal(t, SIGCHLD);
|
2002-06-29 01:50:25 +00:00
|
|
|
wakeup(t);
|
2002-05-02 15:09:58 +00:00
|
|
|
PROC_UNLOCK(t);
|
|
|
|
sx_xunlock(&proctree_lock);
|
|
|
|
mtx_unlock(&Giant);
|
|
|
|
return (0);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
2002-05-02 15:09:58 +00:00
|
|
|
|
2003-03-12 11:10:04 +00:00
|
|
|
/*
|
|
|
|
* Remove other references to this process to ensure
|
|
|
|
* we have an exclusive reference.
|
|
|
|
*/
|
2003-03-13 00:57:47 +00:00
|
|
|
sx_xlock(&allproc_lock);
|
|
|
|
LIST_REMOVE(p, p_list); /* off zombproc */
|
|
|
|
sx_xunlock(&allproc_lock);
|
|
|
|
LIST_REMOVE(p, p_sibling);
|
2003-03-12 11:10:04 +00:00
|
|
|
leavepgrp(p);
|
2001-03-28 11:52:56 +00:00
|
|
|
sx_xunlock(&proctree_lock);
|
2002-05-02 15:09:58 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* As a side effect of this lock, we know that
|
|
|
|
* all other writes to this proc are visible now, so
|
|
|
|
* no more locking is needed for p.
|
|
|
|
*/
|
2001-03-07 02:22:31 +00:00
|
|
|
PROC_LOCK(p);
|
2002-05-02 15:09:58 +00:00
|
|
|
p->p_xstat = 0; /* XXX: why? */
|
2001-03-07 02:22:31 +00:00
|
|
|
PROC_UNLOCK(p);
|
2002-05-02 15:09:58 +00:00
|
|
|
PROC_LOCK(q);
|
1994-05-24 10:09:53 +00:00
|
|
|
ruadd(&q->p_stats->p_cru, p->p_ru);
|
2002-05-02 15:09:58 +00:00
|
|
|
PROC_UNLOCK(q);
|
1994-05-24 10:09:53 +00:00
|
|
|
FREE(p->p_ru, M_ZOMBIE);
|
1994-12-28 06:15:08 +00:00
|
|
|
p->p_ru = NULL;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Decrement the count of procs running with this uid.
|
|
|
|
*/
|
o Merge contents of struct pcred into struct ucred. Specifically, add the
real uid, saved uid, real gid, and saved gid to ucred, as well as the
pcred->pc_uidinfo, which was associated with the real uid, only rename
it to cr_ruidinfo so as not to conflict with cr_uidinfo, which
corresponds to the effective uid.
o Remove p_cred from struct proc; add p_ucred to struct proc, replacing
original macro that pointed.
p->p_ucred to p->p_cred->pc_ucred.
o Universally update code so that it makes use of ucred instead of pcred,
p->p_ucred instead of p->p_pcred, cr_ruidinfo instead of p_uidinfo,
cr_{r,sv}{u,g}id instead of p_*, etc.
o Remove pcred0 and its initialization from init_main.c; initialize
cr_ruidinfo there.
o Restruction many credential modification chunks to always crdup while
we figure out locking and optimizations; generally speaking, this
means moving to a structure like this:
newcred = crdup(oldcred);
...
p->p_ucred = newcred;
crfree(oldcred);
It's not race-free, but better than nothing. There are also races
in sys_process.c, all inter-process authorization, fork, exec, and
exit.
o Remove sigio->sio_ruid since sigio->sio_ucred now contains the ruid;
remove comments indicating that the old arrangement was a problem.
o Restructure exec1() a little to use newcred/oldcred arrangement, and
use improved uid management primitives.
o Clean up exit1() so as to do less work in credential cleanup due to
pcred removal.
o Clean up fork1() so as to do less work in credential cleanup and
allocation.
o Clean up ktrcanset() to take into account changes, and move to using
suser_xxx() instead of performing a direct uid==0 comparision.
o Improve commenting in various kern_prot.c credential modification
calls to better document current behavior. In a couple of places,
current behavior is a little questionable and we need to check
POSIX.1 to make sure it's "right". More commenting work still
remains to be done.
o Update credential management calls, such as crfree(), to take into
account new ruidinfo reference.
o Modify or add the following uid and gid helper routines:
change_euid()
change_egid()
change_ruid()
change_rgid()
change_svuid()
change_svgid()
In each case, the call now acts on a credential not a process, and as
such no longer requires more complicated process locking/etc. They
now assume the caller will do any necessary allocation of an
exclusive credential reference. Each is commented to document its
reference requirements.
o CANSIGIO() is simplified to require only credentials, not processes
and pcreds.
o Remove lots of (p_pcred==NULL) checks.
o Add an XXX to authorization code in nfs_lock.c, since it's
questionable, and needs to be considered carefully.
o Simplify posix4 authorization code to require only credentials, not
processes and pcreds. Note that this authorization, as well as
CANSIGIO(), needs to be updated to use the p_cansignal() and
p_cansched() centralized authorization routines, as they currently
do not take into account some desirable restrictions that are handled
by the centralized routines, as well as being inconsistent with other
similar authorization instances.
o Update libkvm to take these changes into account.
Obtained from: TrustedBSD Project
Reviewed by: green, bde, jhb, freebsd-arch, freebsd-audit
2001-05-25 16:59:11 +00:00
|
|
|
(void)chgproccnt(p->p_ucred->cr_ruidinfo, -1, 0);
|
1994-05-24 10:09:53 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Free up credentials.
|
|
|
|
*/
|
o Merge contents of struct pcred into struct ucred. Specifically, add the
real uid, saved uid, real gid, and saved gid to ucred, as well as the
pcred->pc_uidinfo, which was associated with the real uid, only rename
it to cr_ruidinfo so as not to conflict with cr_uidinfo, which
corresponds to the effective uid.
o Remove p_cred from struct proc; add p_ucred to struct proc, replacing
original macro that pointed.
p->p_ucred to p->p_cred->pc_ucred.
o Universally update code so that it makes use of ucred instead of pcred,
p->p_ucred instead of p->p_pcred, cr_ruidinfo instead of p_uidinfo,
cr_{r,sv}{u,g}id instead of p_*, etc.
o Remove pcred0 and its initialization from init_main.c; initialize
cr_ruidinfo there.
o Restruction many credential modification chunks to always crdup while
we figure out locking and optimizations; generally speaking, this
means moving to a structure like this:
newcred = crdup(oldcred);
...
p->p_ucred = newcred;
crfree(oldcred);
It's not race-free, but better than nothing. There are also races
in sys_process.c, all inter-process authorization, fork, exec, and
exit.
o Remove sigio->sio_ruid since sigio->sio_ucred now contains the ruid;
remove comments indicating that the old arrangement was a problem.
o Restructure exec1() a little to use newcred/oldcred arrangement, and
use improved uid management primitives.
o Clean up exit1() so as to do less work in credential cleanup due to
pcred removal.
o Clean up fork1() so as to do less work in credential cleanup and
allocation.
o Clean up ktrcanset() to take into account changes, and move to using
suser_xxx() instead of performing a direct uid==0 comparision.
o Improve commenting in various kern_prot.c credential modification
calls to better document current behavior. In a couple of places,
current behavior is a little questionable and we need to check
POSIX.1 to make sure it's "right". More commenting work still
remains to be done.
o Update credential management calls, such as crfree(), to take into
account new ruidinfo reference.
o Modify or add the following uid and gid helper routines:
change_euid()
change_egid()
change_ruid()
change_rgid()
change_svuid()
change_svgid()
In each case, the call now acts on a credential not a process, and as
such no longer requires more complicated process locking/etc. They
now assume the caller will do any necessary allocation of an
exclusive credential reference. Each is commented to document its
reference requirements.
o CANSIGIO() is simplified to require only credentials, not processes
and pcreds.
o Remove lots of (p_pcred==NULL) checks.
o Add an XXX to authorization code in nfs_lock.c, since it's
questionable, and needs to be considered carefully.
o Simplify posix4 authorization code to require only credentials, not
processes and pcreds. Note that this authorization, as well as
CANSIGIO(), needs to be updated to use the p_cansignal() and
p_cansched() centralized authorization routines, as they currently
do not take into account some desirable restrictions that are handled
by the centralized routines, as well as being inconsistent with other
similar authorization instances.
o Update libkvm to take these changes into account.
Obtained from: TrustedBSD Project
Reviewed by: green, bde, jhb, freebsd-arch, freebsd-audit
2001-05-25 16:59:11 +00:00
|
|
|
crfree(p->p_ucred);
|
2002-05-02 15:09:58 +00:00
|
|
|
p->p_ucred = NULL; /* XXX: why? */
|
1994-05-24 10:09:53 +00:00
|
|
|
|
1999-11-16 20:31:58 +00:00
|
|
|
/*
|
|
|
|
* Remove unused arguments
|
|
|
|
*/
|
2002-05-02 15:09:58 +00:00
|
|
|
pargs_drop(p->p_args);
|
|
|
|
p->p_args = NULL;
|
1999-11-16 20:31:58 +00:00
|
|
|
|
1998-12-19 02:55:34 +00:00
|
|
|
if (--p->p_procsig->ps_refcnt == 0) {
|
2001-09-12 08:38:13 +00:00
|
|
|
if (p->p_sigacts != &p->p_uarea->u_sigacts)
|
1999-01-07 21:23:50 +00:00
|
|
|
FREE(p->p_sigacts, M_SUBPROC);
|
2002-10-02 23:12:01 +00:00
|
|
|
FREE(p->p_procsig, M_SUBPROC);
|
1998-12-19 02:55:34 +00:00
|
|
|
p->p_procsig = NULL;
|
|
|
|
}
|
1999-01-26 02:38:12 +00:00
|
|
|
|
Part 1 of KSE-III
The ability to schedule multiple threads per process
(one one cpu) by making ALL system calls optionally asynchronous.
to come: ia64 and power-pc patches, patches for gdb, test program (in tools)
Reviewed by: Almost everyone who counts
(at various times, peter, jhb, matt, alfred, mini, bernd,
and a cast of thousands)
NOTE: this is still Beta code, and contains lots of debugging stuff.
expect slight instability in signals..
2002-06-29 17:26:22 +00:00
|
|
|
/*
|
2002-12-10 02:33:45 +00:00
|
|
|
* do any thread-system specific cleanups
|
Part 1 of KSE-III
The ability to schedule multiple threads per process
(one one cpu) by making ALL system calls optionally asynchronous.
to come: ia64 and power-pc patches, patches for gdb, test program (in tools)
Reviewed by: Almost everyone who counts
(at various times, peter, jhb, matt, alfred, mini, bernd,
and a cast of thousands)
NOTE: this is still Beta code, and contains lots of debugging stuff.
expect slight instability in signals..
2002-06-29 17:26:22 +00:00
|
|
|
*/
|
2002-12-10 02:33:45 +00:00
|
|
|
thread_wait(p);
|
Part 1 of KSE-III
The ability to schedule multiple threads per process
(one one cpu) by making ALL system calls optionally asynchronous.
to come: ia64 and power-pc patches, patches for gdb, test program (in tools)
Reviewed by: Almost everyone who counts
(at various times, peter, jhb, matt, alfred, mini, bernd,
and a cast of thousands)
NOTE: this is still Beta code, and contains lots of debugging stuff.
expect slight instability in signals..
2002-06-29 17:26:22 +00:00
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
2001-09-10 04:28:58 +00:00
|
|
|
* Give vm and machine-dependent layer a chance
|
1994-05-24 10:09:53 +00:00
|
|
|
* to free anything that cpu_exit couldn't
|
|
|
|
* release while still running in process context.
|
|
|
|
*/
|
2001-09-10 04:28:58 +00:00
|
|
|
vm_waitproc(p);
|
2000-12-03 01:22:34 +00:00
|
|
|
mtx_destroy(&p->p_mtx);
|
2002-11-20 15:41:25 +00:00
|
|
|
#ifdef MAC
|
|
|
|
mac_destroy_proc(p);
|
|
|
|
#endif
|
2002-09-06 07:00:37 +00:00
|
|
|
KASSERT(FIRST_THREAD_IN_PROC(p),
|
|
|
|
("wait1: no residual thread!"));
|
2002-03-20 04:09:59 +00:00
|
|
|
uma_zfree(proc_zone, p);
|
2002-05-02 15:09:58 +00:00
|
|
|
sx_xlock(&allproc_lock);
|
1994-05-24 10:09:53 +00:00
|
|
|
nprocs--;
|
2002-05-02 15:09:58 +00:00
|
|
|
sx_xunlock(&allproc_lock);
|
|
|
|
mtx_unlock(&Giant);
|
|
|
|
return (0);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
2003-03-11 00:07:53 +00:00
|
|
|
if (P_SHOULDSTOP(p) && (p->p_suspcount == p->p_numthreads) &&
|
|
|
|
((p->p_flag & P_WAITED) == 0) &&
|
1994-05-24 10:09:53 +00:00
|
|
|
(p->p_flag & P_TRACED || uap->options & WUNTRACED)) {
|
|
|
|
p->p_flag |= P_WAITED;
|
2002-05-02 15:09:58 +00:00
|
|
|
sx_xunlock(&proctree_lock);
|
2001-09-12 08:38:13 +00:00
|
|
|
td->td_retval[0] = p->p_pid;
|
1995-11-11 05:49:22 +00:00
|
|
|
#ifdef COMPAT_43
|
1995-10-07 23:47:26 +00:00
|
|
|
if (compat) {
|
2001-09-12 08:38:13 +00:00
|
|
|
td->td_retval[1] = W_STOPCODE(p->p_xstat);
|
2002-05-02 15:09:58 +00:00
|
|
|
PROC_UNLOCK(p);
|
1994-05-24 10:09:53 +00:00
|
|
|
error = 0;
|
|
|
|
} else
|
|
|
|
#endif
|
|
|
|
if (uap->status) {
|
|
|
|
status = W_STOPCODE(p->p_xstat);
|
2002-05-02 15:09:58 +00:00
|
|
|
PROC_UNLOCK(p);
|
2002-06-29 01:50:25 +00:00
|
|
|
error = copyout(&status,
|
|
|
|
uap->status, sizeof(status));
|
2002-05-02 15:09:58 +00:00
|
|
|
} else {
|
|
|
|
PROC_UNLOCK(p);
|
1994-05-24 10:09:53 +00:00
|
|
|
error = 0;
|
2002-05-02 15:09:58 +00:00
|
|
|
}
|
|
|
|
mtx_unlock(&Giant);
|
|
|
|
return (error);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
2002-06-01 18:37:46 +00:00
|
|
|
if (uap->options & WCONTINUED && (p->p_flag & P_CONTINUED)) {
|
|
|
|
sx_xunlock(&proctree_lock);
|
|
|
|
td->td_retval[0] = p->p_pid;
|
|
|
|
p->p_flag &= ~P_CONTINUED;
|
|
|
|
PROC_UNLOCK(p);
|
|
|
|
|
|
|
|
if (uap->status) {
|
|
|
|
status = SIGCONT;
|
2002-06-29 01:50:25 +00:00
|
|
|
error = copyout(&status,
|
|
|
|
uap->status, sizeof(status));
|
2002-06-01 18:37:46 +00:00
|
|
|
} else
|
|
|
|
error = 0;
|
|
|
|
|
|
|
|
mtx_unlock(&Giant);
|
|
|
|
return (error);
|
|
|
|
}
|
2001-01-24 00:33:44 +00:00
|
|
|
PROC_UNLOCK(p);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
2001-09-01 04:37:34 +00:00
|
|
|
if (nfound == 0) {
|
2002-05-02 15:09:58 +00:00
|
|
|
sx_xunlock(&proctree_lock);
|
|
|
|
mtx_unlock(&Giant);
|
|
|
|
return (ECHILD);
|
2001-09-01 04:37:34 +00:00
|
|
|
}
|
1994-05-24 10:09:53 +00:00
|
|
|
if (uap->options & WNOHANG) {
|
2002-05-02 15:09:58 +00:00
|
|
|
sx_xunlock(&proctree_lock);
|
2001-09-12 08:38:13 +00:00
|
|
|
td->td_retval[0] = 0;
|
2002-05-02 15:09:58 +00:00
|
|
|
mtx_unlock(&Giant);
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
PROC_LOCK(q);
|
|
|
|
sx_xunlock(&proctree_lock);
|
2002-06-29 01:50:25 +00:00
|
|
|
error = msleep(q, &q->p_mtx, PWAIT | PCATCH, "wait", 0);
|
2002-05-02 15:09:58 +00:00
|
|
|
PROC_UNLOCK(q);
|
|
|
|
if (error) {
|
|
|
|
mtx_unlock(&Giant);
|
|
|
|
return (error);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
goto loop;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2000-12-23 19:43:10 +00:00
|
|
|
* Make process 'parent' the new parent of process 'child'.
|
|
|
|
* Must be called with an exclusive hold of proctree lock.
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
|
|
|
void
|
2003-03-19 00:49:40 +00:00
|
|
|
proc_reparent(struct proc *child, struct proc *parent)
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
|
|
|
|
2001-10-23 22:39:11 +00:00
|
|
|
sx_assert(&proctree_lock, SX_XLOCKED);
|
2001-03-07 02:22:31 +00:00
|
|
|
PROC_LOCK_ASSERT(child, MA_OWNED);
|
1994-05-24 10:09:53 +00:00
|
|
|
if (child->p_pptr == parent)
|
|
|
|
return;
|
|
|
|
|
1996-03-11 06:05:03 +00:00
|
|
|
LIST_REMOVE(child, p_sibling);
|
|
|
|
LIST_INSERT_HEAD(&parent->p_children, child, p_sibling);
|
1994-05-24 10:09:53 +00:00
|
|
|
child->p_pptr = parent;
|
|
|
|
}
|
1996-08-19 02:28:24 +00:00
|
|
|
|
1996-08-22 03:50:33 +00:00
|
|
|
/*
|
|
|
|
* The next two functions are to handle adding/deleting items on the
|
1996-08-19 02:28:24 +00:00
|
|
|
* exit callout list
|
2003-03-19 00:33:38 +00:00
|
|
|
*
|
1996-08-22 03:50:33 +00:00
|
|
|
* at_exit():
|
|
|
|
* Take the arguments given and put them onto the exit callout list,
|
1996-08-19 02:28:24 +00:00
|
|
|
* However first make sure that it's not already there.
|
|
|
|
* returns 0 on success.
|
|
|
|
*/
|
1999-11-19 21:29:03 +00:00
|
|
|
|
1996-08-19 02:28:24 +00:00
|
|
|
int
|
2003-03-19 00:49:40 +00:00
|
|
|
at_exit(exitlist_fn function)
|
1996-08-19 02:28:24 +00:00
|
|
|
{
|
1999-11-19 21:29:03 +00:00
|
|
|
struct exitlist *ep;
|
1996-08-22 03:50:33 +00:00
|
|
|
|
1999-11-19 21:29:03 +00:00
|
|
|
#ifdef INVARIANTS
|
1996-08-22 03:50:33 +00:00
|
|
|
/* Be noisy if the programmer has lost track of things */
|
2003-03-19 00:33:38 +00:00
|
|
|
if (rm_at_exit(function))
|
1999-11-19 21:29:03 +00:00
|
|
|
printf("WARNING: exit callout entry (%p) already present\n",
|
|
|
|
function);
|
|
|
|
#endif
|
|
|
|
ep = malloc(sizeof(*ep), M_ATEXIT, M_NOWAIT);
|
1996-08-22 03:50:33 +00:00
|
|
|
if (ep == NULL)
|
|
|
|
return (ENOMEM);
|
1996-08-19 02:28:24 +00:00
|
|
|
ep->function = function;
|
1999-11-19 21:29:03 +00:00
|
|
|
TAILQ_INSERT_TAIL(&exit_list, ep, next);
|
1996-08-22 03:50:33 +00:00
|
|
|
return (0);
|
1996-08-19 02:28:24 +00:00
|
|
|
}
|
1999-11-19 21:29:03 +00:00
|
|
|
|
1996-08-19 02:28:24 +00:00
|
|
|
/*
|
1999-11-19 21:29:03 +00:00
|
|
|
* Scan the exit callout list for the given item and remove it.
|
|
|
|
* Returns the number of items removed (0 or 1)
|
1996-08-19 02:28:24 +00:00
|
|
|
*/
|
|
|
|
int
|
2003-03-19 00:49:40 +00:00
|
|
|
rm_at_exit(exitlist_fn function)
|
1996-08-19 02:28:24 +00:00
|
|
|
{
|
1999-11-19 21:29:03 +00:00
|
|
|
struct exitlist *ep;
|
1996-08-19 02:28:24 +00:00
|
|
|
|
1999-11-19 21:29:03 +00:00
|
|
|
TAILQ_FOREACH(ep, &exit_list, next) {
|
1996-08-22 03:50:33 +00:00
|
|
|
if (ep->function == function) {
|
1999-11-19 21:29:03 +00:00
|
|
|
TAILQ_REMOVE(&exit_list, ep, next);
|
|
|
|
free(ep, M_ATEXIT);
|
2002-05-06 15:46:29 +00:00
|
|
|
return (1);
|
1996-08-19 02:28:24 +00:00
|
|
|
}
|
2002-05-23 04:12:28 +00:00
|
|
|
}
|
1999-11-19 21:29:03 +00:00
|
|
|
return (0);
|
1996-08-19 02:28:24 +00:00
|
|
|
}
|