freebsd-dev/sys/kern/kern_exit.c

741 lines
18 KiB
C
Raw Normal View History

/*
1994-05-24 10:09:53 +00:00
* Copyright (c) 1982, 1986, 1989, 1991, 1993
* The Regents of the University of California. All rights reserved.
* (c) UNIX System Laboratories, Inc.
* All or some portions of this file are derived from material licensed
* to the University of California by American Telephone and Telegraph
* Co. or Unix System Laboratories, Inc. and are reproduced herein with
* the permission of UNIX System Laboratories, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by the University of
* California, Berkeley and its contributors.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)kern_exit.c 8.7 (Berkeley) 2/12/94
1999-08-28 01:08:13 +00:00
* $FreeBSD$
1994-05-24 10:09:53 +00:00
*/
#include "opt_compat.h"
#include "opt_ktrace.h"
1994-05-24 10:09:53 +00:00
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/sysproto.h>
#include <sys/kernel.h>
#include <sys/malloc.h>
#include <sys/lock.h>
#include <sys/mutex.h>
1994-05-24 10:09:53 +00:00
#include <sys/proc.h>
#include <sys/pioctl.h>
1994-05-24 10:09:53 +00:00
#include <sys/tty.h>
#include <sys/wait.h>
#include <sys/vnode.h>
#include <sys/vmmeter.h>
1994-05-24 10:09:53 +00:00
#include <sys/resourcevar.h>
#include <sys/signalvar.h>
#include <sys/sx.h>
1994-05-24 10:09:53 +00:00
#include <sys/ptrace.h>
#include <sys/acct.h> /* for acct_process() function prototype */
#include <sys/filedesc.h>
#include <sys/shm.h>
#include <sys/sem.h>
This Implements the mumbled about "Jail" feature. This is a seriously beefed up chroot kind of thing. The process is jailed along the same lines as a chroot does it, but with additional tough restrictions imposed on what the superuser can do. For all I know, it is safe to hand over the root bit inside a prison to the customer living in that prison, this is what it was developed for in fact: "real virtual servers". Each prison has an ip number associated with it, which all IP communications will be coerced to use and each prison has its own hostname. Needless to say, you need more RAM this way, but the advantage is that each customer can run their own particular version of apache and not stomp on the toes of their neighbors. It generally does what one would expect, but setting up a jail still takes a little knowledge. A few notes: I have no scripts for setting up a jail, don't ask me for them. The IP number should be an alias on one of the interfaces. mount a /proc in each jail, it will make ps more useable. /proc/<pid>/status tells the hostname of the prison for jailed processes. Quotas are only sensible if you have a mountpoint per prison. There are no privisions for stopping resource-hogging. Some "#ifdef INET" and similar may be missing (send patches!) If somebody wants to take it from here and develop it into more of a "virtual machine" they should be most welcome! Tools, comments, patches & documentation most welcome. Have fun... Sponsored by: http://www.rndassociates.com/ Run for almost a year by: http://www.servetheweb.com/
1999-04-28 11:38:52 +00:00
#include <sys/jail.h>
1994-05-24 10:09:53 +00:00
#include <vm/vm.h>
#include <vm/vm_param.h>
#include <vm/vm_extern.h>
#include <vm/pmap.h>
#include <vm/vm_map.h>
VM level code cleanups. 1) Start using TSM. Struct procs continue to point to upages structure, after being freed. Struct vmspace continues to point to pte object and kva space for kstack. u_map is now superfluous. 2) vm_map's don't need to be reference counted. They always exist either in the kernel or in a vmspace. The vmspaces are managed by reference counts. 3) Remove the "wired" vm_map nonsense. 4) No need to keep a cache of kernel stack kva's. 5) Get rid of strange looking ++var, and change to var++. 6) Change more data structures to use our "zone" allocator. Added struct proc, struct vmspace and struct vnode. This saves a significant amount of kva space and physical memory. Additionally, this enables TSM for the zone managed memory. 7) Keep ioopt disabled for now. 8) Remove the now bogus "single use" map concept. 9) Use generation counts or id's for data structures residing in TSM, where it allows us to avoid unneeded restart overhead during traversals, where blocking might occur. 10) Account better for memory deficits, so the pageout daemon will be able to make enough memory available (experimental.) 11) Fix some vnode locking problems. (From Tor, I think.) 12) Add a check in ufs_lookup, to avoid lots of unneeded calls to bcmp. (experimental.) 13) Significantly shrink, cleanup, and make slightly faster the vm_fault.c code. Use generation counts, get rid of unneded collpase operations, and clean up the cluster code. 14) Make vm_zone more suitable for TSM. This commit is partially as a result of discussions and contributions from other people, including DG, Tor Egge, PHK, and probably others that I have forgotten to attribute (so let me know, if I forgot.) This is not the infamous, final cleanup of the vnode stuff, but a necessary step. Vnode mgmt should be correct, but things might still change, and there is still some missing stuff (like ioopt, and physical backing of non-merged cache files, debugging of layering concepts.)
1998-01-22 17:30:44 +00:00
#include <vm/vm_zone.h>
#include <sys/user.h>
1994-05-24 10:09:53 +00:00
/* Required to be non-static for SysVR4 emulator */
MALLOC_DEFINE(M_ZOMBIE, "zombie", "zombie proc status");
static MALLOC_DEFINE(M_ATEXIT, "atexit", "atexit callback");
static int wait1 __P((struct thread *, struct wait_args *, int));
/*
* callout list for things to do at exit time
*/
struct exitlist {
exitlist_fn function;
TAILQ_ENTRY(exitlist) next;
};
TAILQ_HEAD(exit_list_head, exitlist);
static struct exit_list_head exit_list = TAILQ_HEAD_INITIALIZER(exit_list);
1994-05-24 10:09:53 +00:00
/*
* exit --
* Death of process.
*
* MPSAFE
1994-05-24 10:09:53 +00:00
*/
void
sys_exit(td, uap)
struct thread *td;
struct sys_exit_args /* {
int rval;
} */ *uap;
1994-05-24 10:09:53 +00:00
{
mtx_lock(&Giant);
exit1(td, W_EXITCODE(uap->rval, 0));
1994-05-24 10:09:53 +00:00
/* NOTREACHED */
}
/*
* Exit: deallocate address space and other resources, change proc state
* to zombie, and unlink proc from allproc and parent's lists. Save exit
* status and rusage for wait(). Check for child processes and orphan them.
*/
void
exit1(td, rv)
register struct thread *td;
1994-05-24 10:09:53 +00:00
int rv;
{
struct proc *p = td->td_proc;
1994-05-24 10:09:53 +00:00
register struct proc *q, *nq;
register struct vmspace *vm;
struct vnode *vtmp;
struct exitlist *ep;
1994-05-24 10:09:53 +00:00
GIANT_REQUIRED;
if (p->p_pid == 1) {
printf("init died (signal %d, exit %d)\n",
1994-05-24 10:09:53 +00:00
WTERMSIG(rv), WEXITSTATUS(rv));
panic("Going nowhere without my init!");
}
/* XXXXKSE */
/* MUST abort all other threads before proceeding past this point */
/* are we a task leader? */
PROC_LOCK(p);
if(p == p->p_leader) {
q = p->p_peers;
while (q != NULL) {
PROC_LOCK(q);
psignal(q, SIGKILL);
PROC_UNLOCK(q);
q = q->p_peers;
}
while (p->p_peers)
msleep((caddr_t)p, &p->p_mtx, PWAIT, "exit1", 0);
}
PROC_UNLOCK(p);
1994-05-24 10:09:53 +00:00
#ifdef PGINPROF
vmsizmon();
#endif
STOPEVENT(p, S_EXIT, rv);
wakeup(&p->p_stype); /* Wakeup anyone in procfs' PIOCWAIT */
/*
* Check if any loadable modules need anything done at process exit.
* e.g. SYSV IPC stuff
* XXX what if one of these generates an error?
*/
TAILQ_FOREACH(ep, &exit_list, next)
(*ep->function)(p);
stopprofclock(p);
1994-05-24 10:09:53 +00:00
MALLOC(p->p_ru, struct rusage *, sizeof(struct rusage),
M_ZOMBIE, M_WAITOK);
/*
* If parent is waiting for us to exit or exec,
* P_PPWAIT is set; we will wakeup the parent below.
*/
PROC_LOCK(p);
1994-05-24 10:09:53 +00:00
p->p_flag &= ~(P_TRACED | P_PPWAIT);
p->p_flag |= P_WEXIT;
SIGEMPTYSET(p->p_siglist);
PROC_UNLOCK(p);
if (timevalisset(&p->p_realtimer.it_value))
callout_stop(&p->p_itcallout);
1994-05-24 10:09:53 +00:00
/*
* Reset any sigio structures pointing to us as a result of
* F_SETOWN with our pid.
*/
funsetownlst(&p->p_sigiolst);
1994-05-24 10:09:53 +00:00
/*
* Close open files and release open-file table.
* This may block!
*/
fdfree(td); /* XXXKSE *//* may not be the one in proc */
1994-05-24 10:09:53 +00:00
/*
* Remove ourself from our leader's peer list and wake our leader.
*/
PROC_LOCK(p->p_leader);
if(p->p_leader->p_peers) {
q = p->p_leader;
while(q->p_peers != p)
q = q->p_peers;
q->p_peers = p->p_peers;
wakeup((caddr_t)p->p_leader);
}
PROC_UNLOCK(p->p_leader);
1994-05-24 10:09:53 +00:00
/* The next two chunks should probably be moved to vmspace_exit. */
vm = p->p_vmspace;
/*
* Release user portion of address space.
* This releases references to vnodes,
* which could cause I/O if the file has been unlinked.
* Need to do this early enough that we can still sleep.
* Can't free the entire vmspace as the kernel stack
* may be mapped within that space also.
*/
if (--vm->vm_refcnt == 0) {
The biggie: Get rid of the UPAGES from the top of the per-process address space. (!) Have each process use the kernel stack and pcb in the kvm space. Since the stacks are at a different address, we cannot copy the stack at fork() and allow the child to return up through the function call tree to return to user mode - create a new execution context and have the new process begin executing from cpu_switch() and go to user mode directly. In theory this should speed up fork a bit. Context switch the tss_esp0 pointer in the common tss. This is a lot simpler since than swithching the gdt[GPROC0_SEL].sd.sd_base pointer to each process's tss since the esp0 pointer is a 32 bit pointer, and the sd_base setting is split into three different bit sections at non-aligned boundaries and requires a lot of twiddling to reset. The 8K of memory at the top of the process space is now empty, and unmapped (and unmappable, it's higher than VM_MAXUSER_ADDRESS). Simplity the pmap code to manage process contexts, we no longer have to double map the UPAGES, this simplifies and should measuably speed up fork(). The following parts came from John Dyson: Set PG_G on the UPAGES that are now in kernel context, and invalidate them when swapping them out. Move the upages object (upobj) from the vmspace to the proc structure. Now that the UPAGES (pcb and kernel stack) are out of user space, make rfork(..RFMEM..) do what was intended by sharing the vmspace entirely via reference counting rather than simply inheriting the mappings.
1997-04-07 07:16:06 +00:00
if (vm->vm_shm)
shmexit(p);
pmap_remove_pages(vmspace_pmap(vm), VM_MIN_ADDRESS,
VM_MAXUSER_ADDRESS);
(void) vm_map_remove(&vm->vm_map, VM_MIN_ADDRESS,
VM_MAXUSER_ADDRESS);
vm->vm_freer = p;
}
1994-05-24 10:09:53 +00:00
PROC_LOCK(p);
1994-05-24 10:09:53 +00:00
if (SESS_LEADER(p)) {
register struct session *sp = p->p_session;
PROC_UNLOCK(p);
1994-05-24 10:09:53 +00:00
if (sp->s_ttyvp) {
/*
* Controlling process.
* Signal foreground pgrp,
* drain controlling terminal
* and revoke access to controlling terminal.
*/
if (sp->s_ttyp && (sp->s_ttyp->t_session == sp)) {
1994-05-24 10:09:53 +00:00
if (sp->s_ttyp->t_pgrp)
pgsignal(sp->s_ttyp->t_pgrp, SIGHUP, 1);
(void) ttywait(sp->s_ttyp);
/*
* The tty could have been revoked
* if we blocked.
*/
if (sp->s_ttyvp)
VOP_REVOKE(sp->s_ttyvp, REVOKEALL);
1994-05-24 10:09:53 +00:00
}
if (sp->s_ttyvp)
vrele(sp->s_ttyvp);
sp->s_ttyvp = NULL;
/*
* s_ttyp is not zero'd; we use this to indicate
* that the session once had a controlling terminal.
* (for logging and informational purposes)
*/
}
sp->s_leader = NULL;
} else
PROC_UNLOCK(p);
1994-05-24 10:09:53 +00:00
fixjobc(p, p->p_pgrp, 0);
(void)acct_process(td);
1994-05-24 10:09:53 +00:00
#ifdef KTRACE
1995-05-30 08:16:23 +00:00
/*
1994-05-24 10:09:53 +00:00
* release trace file
*/
p->p_traceflag = 0; /* don't trace the vrele() */
if ((vtmp = p->p_tracep) != NULL) {
p->p_tracep = NULL;
vrele(vtmp);
}
1994-05-24 10:09:53 +00:00
#endif
/*
* Release reference to text vnode
*/
if ((vtmp = p->p_textvp) != NULL) {
p->p_textvp = NULL;
vrele(vtmp);
}
1994-05-24 10:09:53 +00:00
/*
* Remove proc from allproc queue and pidhash chain.
* Place onto zombproc. Unlink from parent's child list.
*/
sx_xlock(&allproc_lock);
LIST_REMOVE(p, p_list);
LIST_INSERT_HEAD(&zombproc, p, p_list);
LIST_REMOVE(p, p_hash);
sx_xunlock(&allproc_lock);
1994-05-24 10:09:53 +00:00
sx_xlock(&proctree_lock);
q = LIST_FIRST(&p->p_children);
if (q != NULL) /* only need this if any child is S_ZOMB */
1994-05-24 10:09:53 +00:00
wakeup((caddr_t) initproc);
for (; q != NULL; q = nq) {
nq = LIST_NEXT(q, p_sibling);
PROC_LOCK(q);
proc_reparent(q, initproc);
q->p_sigparent = SIGCHLD;
1994-05-24 10:09:53 +00:00
/*
* Traced processes are killed
* since their existence means someone is screwing up.
*/
if (q->p_flag & P_TRACED) {
q->p_flag &= ~P_TRACED;
psignal(q, SIGKILL);
}
PROC_UNLOCK(q);
1994-05-24 10:09:53 +00:00
}
/*
* Save exit status and final rusage info, adding in child rusage
* info and self times.
*/
p->p_xstat = rv;
*p->p_ru = p->p_stats->p_ru;
Change and clean the mutex lock interface. mtx_enter(lock, type) becomes: mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks) mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized) similarily, for releasing a lock, we now have: mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN. We change the caller interface for the two different types of locks because the semantics are entirely different for each case, and this makes it explicitly clear and, at the same time, it rids us of the extra `type' argument. The enter->lock and exit->unlock change has been made with the idea that we're "locking data" and not "entering locked code" in mind. Further, remove all additional "flags" previously passed to the lock acquire/release routines with the exception of two: MTX_QUIET and MTX_NOSWITCH The functionality of these flags is preserved and they can be passed to the lock/unlock routines by calling the corresponding wrappers: mtx_{lock, unlock}_flags(lock, flag(s)) and mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN locks, respectively. Re-inline some lock acq/rel code; in the sleep lock case, we only inline the _obtain_lock()s in order to ensure that the inlined code fits into a cache line. In the spin lock case, we inline recursion and actually only perform a function call if we need to spin. This change has been made with the idea that we generally tend to avoid spin locks and that also the spin locks that we do have and are heavily used (i.e. sched_lock) do recurse, and therefore in an effort to reduce function call overhead for some architectures (such as alpha), we inline recursion for this case. Create a new malloc type for the witness code and retire from using the M_DEV type. The new type is called M_WITNESS and is only declared if WITNESS is enabled. Begin cleaning up some machdep/mutex.h code - specifically updated the "optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently need those. Finally, caught up to the interface changes in all sys code. Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
mtx_lock_spin(&sched_lock);
1994-05-24 10:09:53 +00:00
calcru(p, &p->p_ru->ru_utime, &p->p_ru->ru_stime, NULL);
Change and clean the mutex lock interface. mtx_enter(lock, type) becomes: mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks) mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized) similarily, for releasing a lock, we now have: mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN. We change the caller interface for the two different types of locks because the semantics are entirely different for each case, and this makes it explicitly clear and, at the same time, it rids us of the extra `type' argument. The enter->lock and exit->unlock change has been made with the idea that we're "locking data" and not "entering locked code" in mind. Further, remove all additional "flags" previously passed to the lock acquire/release routines with the exception of two: MTX_QUIET and MTX_NOSWITCH The functionality of these flags is preserved and they can be passed to the lock/unlock routines by calling the corresponding wrappers: mtx_{lock, unlock}_flags(lock, flag(s)) and mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN locks, respectively. Re-inline some lock acq/rel code; in the sleep lock case, we only inline the _obtain_lock()s in order to ensure that the inlined code fits into a cache line. In the spin lock case, we inline recursion and actually only perform a function call if we need to spin. This change has been made with the idea that we generally tend to avoid spin locks and that also the spin locks that we do have and are heavily used (i.e. sched_lock) do recurse, and therefore in an effort to reduce function call overhead for some architectures (such as alpha), we inline recursion for this case. Create a new malloc type for the witness code and retire from using the M_DEV type. The new type is called M_WITNESS and is only declared if WITNESS is enabled. Begin cleaning up some machdep/mutex.h code - specifically updated the "optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently need those. Finally, caught up to the interface changes in all sys code. Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
mtx_unlock_spin(&sched_lock);
1994-05-24 10:09:53 +00:00
ruadd(p->p_ru, &p->p_stats->p_cru);
/*
* Pretend that an mi_switch() to the next process occurs now. We
* must set `switchtime' directly since we will call cpu_switch()
* directly. Set it now so that the rest of the exit time gets
* counted somewhere if possible.
*/
mtx_lock_spin(&sched_lock);
binuptime(PCPU_PTR(switchtime));
PCPU_SET(switchticks, ticks);
mtx_unlock_spin(&sched_lock);
/*
* notify interested parties of our demise.
*/
PROC_LOCK(p);
KNOTE(&p->p_klist, NOTE_EXIT);
1994-05-24 10:09:53 +00:00
/*
* Notify parent that we're gone. If parent has the PS_NOCLDWAIT
* flag set, notify process 1 instead (and hope it will handle
* this situation).
1994-05-24 10:09:53 +00:00
*/
if (p->p_pptr->p_procsig->ps_flag & PS_NOCLDWAIT) {
struct proc *pp = p->p_pptr;
proc_reparent(p, initproc);
/*
* If this was the last child of our parent, notify
* parent, so in case he was wait(2)ing, he will
* continue.
*/
if (LIST_EMPTY(&pp->p_children))
wakeup((caddr_t)pp);
}
PROC_LOCK(p->p_pptr);
if (p->p_sigparent && p->p_pptr != initproc)
psignal(p->p_pptr, p->p_sigparent);
else
psignal(p->p_pptr, SIGCHLD);
PROC_UNLOCK(p->p_pptr);
/*
* If this is a kthread, then wakeup anyone waiting for it to exit.
*/
if (p->p_flag & P_KTHREAD)
wakeup((caddr_t)p);
PROC_UNLOCK(p);
sx_xunlock(&proctree_lock);
1994-05-24 10:09:53 +00:00
/*
* Clear curproc after we've done all operations
* that could block, and before tearing down the rest
* of the process state that might be used from clock, etc.
* Also, can't clear curproc while we're still runnable,
* as we're not on a run queue (we are current, just not
* a proper proc any longer!).
*
* Other substructures are freed from wait().
*/
mtx_assert(&Giant, MA_OWNED);
if (--p->p_limit->p_refcnt == 0) {
1994-05-24 10:09:53 +00:00
FREE(p->p_limit, M_SUBPROC);
p->p_limit = NULL;
}
1994-05-24 10:09:53 +00:00
/*
* Release this thread's reference to the ucred. The actual proc
* reference will stay around until the proc is harvested by
* wait(). At this point the ucred is immutable (no other threads
* from this proc are around that can change it) so we leave the
* per-thread ucred pointer intact in case it is needed although
* in theory nothing should be using it at this point.
*/
crfree(td->td_ucred);
1994-05-24 10:09:53 +00:00
/*
* Finally, call machine-dependent code to release the remaining
* resources including address space, the kernel stack and pcb.
* The address space is released by "vmspace_exitfree(p)" in
* vm_waitproc().
1994-05-24 10:09:53 +00:00
*/
cpu_exit(td);
PROC_LOCK(p);
mtx_lock_spin(&sched_lock);
while (mtx_owned(&Giant))
Change the preemption code for software interrupt thread schedules and mutex releases to not require flags for the cases when preemption is not allowed: The purpose of the MTX_NOSWITCH and SWI_NOSWITCH flags is to prevent switching to a higher priority thread on mutex releease and swi schedule, respectively when that switch is not safe. Now that the critical section API maintains a per-thread nesting count, the kernel can easily check whether or not it should switch without relying on flags from the programmer. This fixes a few bugs in that all current callers of swi_sched() used SWI_NOSWITCH, when in fact, only the ones called from fast interrupt handlers and the swi_sched of softclock needed this flag. Note that to ensure that swi_sched()'s in clock and fast interrupt handlers do not switch, these handlers have to be explicitly wrapped in critical_enter/exit pairs. Presently, just wrapping the handlers is sufficient, but in the future with the fully preemptive kernel, the interrupt must be EOI'd before critical_exit() is called. (critical_exit() can switch due to a deferred preemption in a fully preemptive kernel.) I've tested the changes to the interrupt code on i386 and alpha. I have not tested ia64, but the interrupt code is almost identical to the alpha code, so I expect it will work fine. PowerPC and ARM do not yet have interrupt code in the tree so they shouldn't be broken. Sparc64 is broken, but that's been ok'd by jake and tmm who will be fixing the interrupt code for sparc64 shortly. Reviewed by: peter Tested on: i386, alpha
2002-01-05 08:47:13 +00:00
mtx_unlock(&Giant);
/*
* We have to wait until after releasing all locks before
* changing p_stat. If we block on a mutex then we will be
* back at SRUN when we resume and our parent will never
* harvest us.
*/
p->p_stat = SZOMB;
wakeup(p->p_pptr);
Change the preemption code for software interrupt thread schedules and mutex releases to not require flags for the cases when preemption is not allowed: The purpose of the MTX_NOSWITCH and SWI_NOSWITCH flags is to prevent switching to a higher priority thread on mutex releease and swi schedule, respectively when that switch is not safe. Now that the critical section API maintains a per-thread nesting count, the kernel can easily check whether or not it should switch without relying on flags from the programmer. This fixes a few bugs in that all current callers of swi_sched() used SWI_NOSWITCH, when in fact, only the ones called from fast interrupt handlers and the swi_sched of softclock needed this flag. Note that to ensure that swi_sched()'s in clock and fast interrupt handlers do not switch, these handlers have to be explicitly wrapped in critical_enter/exit pairs. Presently, just wrapping the handlers is sufficient, but in the future with the fully preemptive kernel, the interrupt must be EOI'd before critical_exit() is called. (critical_exit() can switch due to a deferred preemption in a fully preemptive kernel.) I've tested the changes to the interrupt code on i386 and alpha. I have not tested ia64, but the interrupt code is almost identical to the alpha code, so I expect it will work fine. PowerPC and ARM do not yet have interrupt code in the tree so they shouldn't be broken. Sparc64 is broken, but that's been ok'd by jake and tmm who will be fixing the interrupt code for sparc64 shortly. Reviewed by: peter Tested on: i386, alpha
2002-01-05 08:47:13 +00:00
PROC_UNLOCK(p);
cnt.v_swtch++;
cpu_throw();
panic("exit1");
1994-05-24 10:09:53 +00:00
}
#ifdef COMPAT_43
/*
* MPSAFE, the dirty work is handled by wait1().
*/
int
owait(td, uap)
struct thread *td;
register struct owait_args /* {
int dummy;
} */ *uap;
1994-05-24 10:09:53 +00:00
{
struct wait_args w;
1994-05-24 10:09:53 +00:00
w.options = 0;
w.rusage = NULL;
w.pid = WAIT_ANY;
w.status = NULL;
return (wait1(td, &w, 1));
1994-05-24 10:09:53 +00:00
}
#endif /* COMPAT_43 */
1994-05-24 10:09:53 +00:00
/*
* MPSAFE, the dirty work is handled by wait1().
*/
int
wait4(td, uap)
struct thread *td;
1994-05-24 10:09:53 +00:00
struct wait_args *uap;
{
return (wait1(td, uap, 0));
1994-05-24 10:09:53 +00:00
}
/*
* MPSAFE
*/
static int
wait1(td, uap, compat)
register struct thread *td;
register struct wait_args /* {
int pid;
int *status;
int options;
struct rusage *rusage;
} */ *uap;
int compat;
1994-05-24 10:09:53 +00:00
{
register int nfound;
register struct proc *q, *p, *t;
int status, error;
1994-05-24 10:09:53 +00:00
mtx_lock(&Giant);
q = td->td_proc;
1994-05-24 10:09:53 +00:00
if (uap->pid == 0)
uap->pid = -q->p_pgid;
if (uap->options &~ (WUNTRACED|WNOHANG|WLINUXCLONE)) {
error = EINVAL;
goto done2;
}
1994-05-24 10:09:53 +00:00
loop:
nfound = 0;
sx_slock(&proctree_lock);
LIST_FOREACH(p, &q->p_children, p_sibling) {
1994-05-24 10:09:53 +00:00
if (uap->pid != WAIT_ANY &&
p->p_pid != uap->pid && p->p_pgid != -uap->pid)
continue;
/*
* This special case handles a kthread spawned by linux_clone
* (see linux_misc.c). The linux_wait4 and linux_waitpid
* functions need to be able to distinguish between waiting
* on a process and waiting on a thread. It is a thread if
* p_sigparent is not SIGCHLD, and the WLINUXCLONE option
* signifies we want to wait for threads and not processes.
*/
PROC_LOCK(p);
if ((p->p_sigparent != SIGCHLD) ^
((uap->options & WLINUXCLONE) != 0)) {
PROC_UNLOCK(p);
continue;
}
1994-05-24 10:09:53 +00:00
nfound++;
Change and clean the mutex lock interface. mtx_enter(lock, type) becomes: mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks) mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized) similarily, for releasing a lock, we now have: mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN. We change the caller interface for the two different types of locks because the semantics are entirely different for each case, and this makes it explicitly clear and, at the same time, it rids us of the extra `type' argument. The enter->lock and exit->unlock change has been made with the idea that we're "locking data" and not "entering locked code" in mind. Further, remove all additional "flags" previously passed to the lock acquire/release routines with the exception of two: MTX_QUIET and MTX_NOSWITCH The functionality of these flags is preserved and they can be passed to the lock/unlock routines by calling the corresponding wrappers: mtx_{lock, unlock}_flags(lock, flag(s)) and mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN locks, respectively. Re-inline some lock acq/rel code; in the sleep lock case, we only inline the _obtain_lock()s in order to ensure that the inlined code fits into a cache line. In the spin lock case, we inline recursion and actually only perform a function call if we need to spin. This change has been made with the idea that we generally tend to avoid spin locks and that also the spin locks that we do have and are heavily used (i.e. sched_lock) do recurse, and therefore in an effort to reduce function call overhead for some architectures (such as alpha), we inline recursion for this case. Create a new malloc type for the witness code and retire from using the M_DEV type. The new type is called M_WITNESS and is only declared if WITNESS is enabled. Begin cleaning up some machdep/mutex.h code - specifically updated the "optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently need those. Finally, caught up to the interface changes in all sys code. Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
mtx_lock_spin(&sched_lock);
1994-05-24 10:09:53 +00:00
if (p->p_stat == SZOMB) {
/*
* charge childs scheduling cpu usage to parent
* XXXKSE assume only one thread & kse & ksegrp
* keep estcpu in each ksegrp
* so charge it to the ksegrp that did the wait
* since process estcpu is sum of all ksegrps,
* this is strictly as expected.
* Assume that the child process aggregated all
* tke estcpu into the 'build-in' ksegrp.
* XXXKSE
*/
if (curthread->td_proc->p_pid != 1) {
curthread->td_ksegrp->kg_estcpu =
ESTCPULIM(curthread->td_ksegrp->kg_estcpu +
p->p_ksegrp.kg_estcpu);
}
mtx_unlock_spin(&sched_lock);
PROC_UNLOCK(p);
sx_sunlock(&proctree_lock);
td->td_retval[0] = p->p_pid;
#ifdef COMPAT_43
if (compat)
td->td_retval[1] = p->p_xstat;
1994-05-24 10:09:53 +00:00
else
#endif
if (uap->status) {
status = p->p_xstat; /* convert to int */
if ((error = copyout((caddr_t)&status,
(caddr_t)uap->status, sizeof(status)))) {
goto done2;
}
1994-05-24 10:09:53 +00:00
}
if (uap->rusage && (error = copyout((caddr_t)p->p_ru,
(caddr_t)uap->rusage, sizeof (struct rusage)))) {
goto done2;
}
1994-05-24 10:09:53 +00:00
/*
* If we got the child via a ptrace 'attach',
* we need to give it back to the old parent.
*/
sx_xlock(&proctree_lock);
if (p->p_oppid) {
if ((t = pfind(p->p_oppid)) != NULL) {
PROC_LOCK(p);
p->p_oppid = 0;
proc_reparent(p, t);
PROC_UNLOCK(p);
psignal(t, SIGCHLD);
wakeup((caddr_t)t);
PROC_UNLOCK(t);
sx_xunlock(&proctree_lock);
error = 0;
goto done2;
}
1994-05-24 10:09:53 +00:00
}
sx_xunlock(&proctree_lock);
PROC_LOCK(p);
1994-05-24 10:09:53 +00:00
p->p_xstat = 0;
PROC_UNLOCK(p);
1994-05-24 10:09:53 +00:00
ruadd(&q->p_stats->p_cru, p->p_ru);
FREE(p->p_ru, M_ZOMBIE);
p->p_ru = NULL;
1994-05-24 10:09:53 +00:00
/*
* Decrement the count of procs running with this uid.
*/
o Merge contents of struct pcred into struct ucred. Specifically, add the real uid, saved uid, real gid, and saved gid to ucred, as well as the pcred->pc_uidinfo, which was associated with the real uid, only rename it to cr_ruidinfo so as not to conflict with cr_uidinfo, which corresponds to the effective uid. o Remove p_cred from struct proc; add p_ucred to struct proc, replacing original macro that pointed. p->p_ucred to p->p_cred->pc_ucred. o Universally update code so that it makes use of ucred instead of pcred, p->p_ucred instead of p->p_pcred, cr_ruidinfo instead of p_uidinfo, cr_{r,sv}{u,g}id instead of p_*, etc. o Remove pcred0 and its initialization from init_main.c; initialize cr_ruidinfo there. o Restruction many credential modification chunks to always crdup while we figure out locking and optimizations; generally speaking, this means moving to a structure like this: newcred = crdup(oldcred); ... p->p_ucred = newcred; crfree(oldcred); It's not race-free, but better than nothing. There are also races in sys_process.c, all inter-process authorization, fork, exec, and exit. o Remove sigio->sio_ruid since sigio->sio_ucred now contains the ruid; remove comments indicating that the old arrangement was a problem. o Restructure exec1() a little to use newcred/oldcred arrangement, and use improved uid management primitives. o Clean up exit1() so as to do less work in credential cleanup due to pcred removal. o Clean up fork1() so as to do less work in credential cleanup and allocation. o Clean up ktrcanset() to take into account changes, and move to using suser_xxx() instead of performing a direct uid==0 comparision. o Improve commenting in various kern_prot.c credential modification calls to better document current behavior. In a couple of places, current behavior is a little questionable and we need to check POSIX.1 to make sure it's "right". More commenting work still remains to be done. o Update credential management calls, such as crfree(), to take into account new ruidinfo reference. o Modify or add the following uid and gid helper routines: change_euid() change_egid() change_ruid() change_rgid() change_svuid() change_svgid() In each case, the call now acts on a credential not a process, and as such no longer requires more complicated process locking/etc. They now assume the caller will do any necessary allocation of an exclusive credential reference. Each is commented to document its reference requirements. o CANSIGIO() is simplified to require only credentials, not processes and pcreds. o Remove lots of (p_pcred==NULL) checks. o Add an XXX to authorization code in nfs_lock.c, since it's questionable, and needs to be considered carefully. o Simplify posix4 authorization code to require only credentials, not processes and pcreds. Note that this authorization, as well as CANSIGIO(), needs to be updated to use the p_cansignal() and p_cansched() centralized authorization routines, as they currently do not take into account some desirable restrictions that are handled by the centralized routines, as well as being inconsistent with other similar authorization instances. o Update libkvm to take these changes into account. Obtained from: TrustedBSD Project Reviewed by: green, bde, jhb, freebsd-arch, freebsd-audit
2001-05-25 16:59:11 +00:00
(void)chgproccnt(p->p_ucred->cr_ruidinfo, -1, 0);
1994-05-24 10:09:53 +00:00
/*
* Finally finished with old proc entry.
* Unlink it from its process group and free it.
*/
leavepgrp(p);
sx_xlock(&allproc_lock);
LIST_REMOVE(p, p_list); /* off zombproc */
sx_xunlock(&allproc_lock);
sx_xlock(&proctree_lock);
LIST_REMOVE(p, p_sibling);
sx_xunlock(&proctree_lock);
1994-05-24 10:09:53 +00:00
/*
* Free up credentials.
*/
o Merge contents of struct pcred into struct ucred. Specifically, add the real uid, saved uid, real gid, and saved gid to ucred, as well as the pcred->pc_uidinfo, which was associated with the real uid, only rename it to cr_ruidinfo so as not to conflict with cr_uidinfo, which corresponds to the effective uid. o Remove p_cred from struct proc; add p_ucred to struct proc, replacing original macro that pointed. p->p_ucred to p->p_cred->pc_ucred. o Universally update code so that it makes use of ucred instead of pcred, p->p_ucred instead of p->p_pcred, cr_ruidinfo instead of p_uidinfo, cr_{r,sv}{u,g}id instead of p_*, etc. o Remove pcred0 and its initialization from init_main.c; initialize cr_ruidinfo there. o Restruction many credential modification chunks to always crdup while we figure out locking and optimizations; generally speaking, this means moving to a structure like this: newcred = crdup(oldcred); ... p->p_ucred = newcred; crfree(oldcred); It's not race-free, but better than nothing. There are also races in sys_process.c, all inter-process authorization, fork, exec, and exit. o Remove sigio->sio_ruid since sigio->sio_ucred now contains the ruid; remove comments indicating that the old arrangement was a problem. o Restructure exec1() a little to use newcred/oldcred arrangement, and use improved uid management primitives. o Clean up exit1() so as to do less work in credential cleanup due to pcred removal. o Clean up fork1() so as to do less work in credential cleanup and allocation. o Clean up ktrcanset() to take into account changes, and move to using suser_xxx() instead of performing a direct uid==0 comparision. o Improve commenting in various kern_prot.c credential modification calls to better document current behavior. In a couple of places, current behavior is a little questionable and we need to check POSIX.1 to make sure it's "right". More commenting work still remains to be done. o Update credential management calls, such as crfree(), to take into account new ruidinfo reference. o Modify or add the following uid and gid helper routines: change_euid() change_egid() change_ruid() change_rgid() change_svuid() change_svgid() In each case, the call now acts on a credential not a process, and as such no longer requires more complicated process locking/etc. They now assume the caller will do any necessary allocation of an exclusive credential reference. Each is commented to document its reference requirements. o CANSIGIO() is simplified to require only credentials, not processes and pcreds. o Remove lots of (p_pcred==NULL) checks. o Add an XXX to authorization code in nfs_lock.c, since it's questionable, and needs to be considered carefully. o Simplify posix4 authorization code to require only credentials, not processes and pcreds. Note that this authorization, as well as CANSIGIO(), needs to be updated to use the p_cansignal() and p_cansched() centralized authorization routines, as they currently do not take into account some desirable restrictions that are handled by the centralized routines, as well as being inconsistent with other similar authorization instances. o Update libkvm to take these changes into account. Obtained from: TrustedBSD Project Reviewed by: green, bde, jhb, freebsd-arch, freebsd-audit
2001-05-25 16:59:11 +00:00
crfree(p->p_ucred);
p->p_ucred = NULL;
1994-05-24 10:09:53 +00:00
/*
* Remove unused arguments
*/
if (p->p_args && --p->p_args->ar_ref == 0)
FREE(p->p_args, M_PARGS);
if (--p->p_procsig->ps_refcnt == 0) {
if (p->p_sigacts != &p->p_uarea->u_sigacts)
FREE(p->p_sigacts, M_SUBPROC);
FREE(p->p_procsig, M_SUBPROC);
p->p_procsig = NULL;
}
1994-05-24 10:09:53 +00:00
/*
* Give vm and machine-dependent layer a chance
1994-05-24 10:09:53 +00:00
* to free anything that cpu_exit couldn't
* release while still running in process context.
*/
vm_waitproc(p);
mtx_destroy(&p->p_mtx);
VM level code cleanups. 1) Start using TSM. Struct procs continue to point to upages structure, after being freed. Struct vmspace continues to point to pte object and kva space for kstack. u_map is now superfluous. 2) vm_map's don't need to be reference counted. They always exist either in the kernel or in a vmspace. The vmspaces are managed by reference counts. 3) Remove the "wired" vm_map nonsense. 4) No need to keep a cache of kernel stack kva's. 5) Get rid of strange looking ++var, and change to var++. 6) Change more data structures to use our "zone" allocator. Added struct proc, struct vmspace and struct vnode. This saves a significant amount of kva space and physical memory. Additionally, this enables TSM for the zone managed memory. 7) Keep ioopt disabled for now. 8) Remove the now bogus "single use" map concept. 9) Use generation counts or id's for data structures residing in TSM, where it allows us to avoid unneeded restart overhead during traversals, where blocking might occur. 10) Account better for memory deficits, so the pageout daemon will be able to make enough memory available (experimental.) 11) Fix some vnode locking problems. (From Tor, I think.) 12) Add a check in ufs_lookup, to avoid lots of unneeded calls to bcmp. (experimental.) 13) Significantly shrink, cleanup, and make slightly faster the vm_fault.c code. Use generation counts, get rid of unneded collpase operations, and clean up the cluster code. 14) Make vm_zone more suitable for TSM. This commit is partially as a result of discussions and contributions from other people, including DG, Tor Egge, PHK, and probably others that I have forgotten to attribute (so let me know, if I forgot.) This is not the infamous, final cleanup of the vnode stuff, but a necessary step. Vnode mgmt should be correct, but things might still change, and there is still some missing stuff (like ioopt, and physical backing of non-merged cache files, debugging of layering concepts.)
1998-01-22 17:30:44 +00:00
zfree(proc_zone, p);
1994-05-24 10:09:53 +00:00
nprocs--;
error = 0;
goto done2;
1994-05-24 10:09:53 +00:00
}
if (p->p_stat == SSTOP && (p->p_flag & P_WAITED) == 0 &&
(p->p_flag & P_TRACED || uap->options & WUNTRACED)) {
Change and clean the mutex lock interface. mtx_enter(lock, type) becomes: mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks) mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized) similarily, for releasing a lock, we now have: mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN. We change the caller interface for the two different types of locks because the semantics are entirely different for each case, and this makes it explicitly clear and, at the same time, it rids us of the extra `type' argument. The enter->lock and exit->unlock change has been made with the idea that we're "locking data" and not "entering locked code" in mind. Further, remove all additional "flags" previously passed to the lock acquire/release routines with the exception of two: MTX_QUIET and MTX_NOSWITCH The functionality of these flags is preserved and they can be passed to the lock/unlock routines by calling the corresponding wrappers: mtx_{lock, unlock}_flags(lock, flag(s)) and mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN locks, respectively. Re-inline some lock acq/rel code; in the sleep lock case, we only inline the _obtain_lock()s in order to ensure that the inlined code fits into a cache line. In the spin lock case, we inline recursion and actually only perform a function call if we need to spin. This change has been made with the idea that we generally tend to avoid spin locks and that also the spin locks that we do have and are heavily used (i.e. sched_lock) do recurse, and therefore in an effort to reduce function call overhead for some architectures (such as alpha), we inline recursion for this case. Create a new malloc type for the witness code and retire from using the M_DEV type. The new type is called M_WITNESS and is only declared if WITNESS is enabled. Begin cleaning up some machdep/mutex.h code - specifically updated the "optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently need those. Finally, caught up to the interface changes in all sys code. Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
mtx_unlock_spin(&sched_lock);
1994-05-24 10:09:53 +00:00
p->p_flag |= P_WAITED;
PROC_UNLOCK(p);
sx_sunlock(&proctree_lock);
td->td_retval[0] = p->p_pid;
#ifdef COMPAT_43
if (compat) {
td->td_retval[1] = W_STOPCODE(p->p_xstat);
1994-05-24 10:09:53 +00:00
error = 0;
} else
#endif
if (uap->status) {
status = W_STOPCODE(p->p_xstat);
error = copyout((caddr_t)&status,
(caddr_t)uap->status, sizeof(status));
} else
error = 0;
goto done2;
1994-05-24 10:09:53 +00:00
}
Change and clean the mutex lock interface. mtx_enter(lock, type) becomes: mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks) mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized) similarily, for releasing a lock, we now have: mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN. We change the caller interface for the two different types of locks because the semantics are entirely different for each case, and this makes it explicitly clear and, at the same time, it rids us of the extra `type' argument. The enter->lock and exit->unlock change has been made with the idea that we're "locking data" and not "entering locked code" in mind. Further, remove all additional "flags" previously passed to the lock acquire/release routines with the exception of two: MTX_QUIET and MTX_NOSWITCH The functionality of these flags is preserved and they can be passed to the lock/unlock routines by calling the corresponding wrappers: mtx_{lock, unlock}_flags(lock, flag(s)) and mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN locks, respectively. Re-inline some lock acq/rel code; in the sleep lock case, we only inline the _obtain_lock()s in order to ensure that the inlined code fits into a cache line. In the spin lock case, we inline recursion and actually only perform a function call if we need to spin. This change has been made with the idea that we generally tend to avoid spin locks and that also the spin locks that we do have and are heavily used (i.e. sched_lock) do recurse, and therefore in an effort to reduce function call overhead for some architectures (such as alpha), we inline recursion for this case. Create a new malloc type for the witness code and retire from using the M_DEV type. The new type is called M_WITNESS and is only declared if WITNESS is enabled. Begin cleaning up some machdep/mutex.h code - specifically updated the "optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently need those. Finally, caught up to the interface changes in all sys code. Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
mtx_unlock_spin(&sched_lock);
PROC_UNLOCK(p);
1994-05-24 10:09:53 +00:00
}
sx_sunlock(&proctree_lock);
if (nfound == 0) {
error = ECHILD;
goto done2;
}
1994-05-24 10:09:53 +00:00
if (uap->options & WNOHANG) {
td->td_retval[0] = 0;
error = 0;
goto done2;
1994-05-24 10:09:53 +00:00
}
if ((error = tsleep((caddr_t)q, PWAIT | PCATCH, "wait", 0)) != 0)
goto done2;
1994-05-24 10:09:53 +00:00
goto loop;
done2:
mtx_unlock(&Giant);
return(error);
1994-05-24 10:09:53 +00:00
}
/*
* Make process 'parent' the new parent of process 'child'.
* Must be called with an exclusive hold of proctree lock.
1994-05-24 10:09:53 +00:00
*/
void
proc_reparent(child, parent)
register struct proc *child;
register struct proc *parent;
{
sx_assert(&proctree_lock, SX_XLOCKED);
PROC_LOCK_ASSERT(child, MA_OWNED);
1994-05-24 10:09:53 +00:00
if (child->p_pptr == parent)
return;
LIST_REMOVE(child, p_sibling);
LIST_INSERT_HEAD(&parent->p_children, child, p_sibling);
1994-05-24 10:09:53 +00:00
child->p_pptr = parent;
}
/*
* The next two functions are to handle adding/deleting items on the
* exit callout list
*
* at_exit():
* Take the arguments given and put them onto the exit callout list,
* However first make sure that it's not already there.
* returns 0 on success.
*/
int
1997-08-26 00:15:04 +00:00
at_exit(function)
exitlist_fn function;
{
struct exitlist *ep;
#ifdef INVARIANTS
/* Be noisy if the programmer has lost track of things */
if (rm_at_exit(function))
printf("WARNING: exit callout entry (%p) already present\n",
function);
#endif
ep = malloc(sizeof(*ep), M_ATEXIT, M_NOWAIT);
if (ep == NULL)
return (ENOMEM);
ep->function = function;
TAILQ_INSERT_TAIL(&exit_list, ep, next);
return (0);
}
/*
* Scan the exit callout list for the given item and remove it.
* Returns the number of items removed (0 or 1)
*/
int
1997-08-26 00:15:04 +00:00
rm_at_exit(function)
exitlist_fn function;
{
struct exitlist *ep;
TAILQ_FOREACH(ep, &exit_list, next) {
if (ep->function == function) {
TAILQ_REMOVE(&exit_list, ep, next);
free(ep, M_ATEXIT);
return(1);
}
}
return (0);
}