1994-05-24 10:09:53 +00:00
|
|
|
/*
|
1995-08-28 09:19:25 +00:00
|
|
|
* Copyright (c) 1995 Terrence R. Lambert
|
|
|
|
* All rights reserved.
|
|
|
|
*
|
1994-05-24 10:09:53 +00:00
|
|
|
* Copyright (c) 1982, 1986, 1989, 1991, 1992, 1993
|
|
|
|
* The Regents of the University of California. All rights reserved.
|
|
|
|
* (c) UNIX System Laboratories, Inc.
|
|
|
|
* All or some portions of this file are derived from material licensed
|
|
|
|
* to the University of California by American Telephone and Telegraph
|
|
|
|
* Co. or Unix System Laboratories, Inc. and are reproduced herein with
|
|
|
|
* the permission of UNIX System Laboratories, Inc.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
* 3. All advertising materials mentioning features or use of this software
|
|
|
|
* must display the following acknowledgement:
|
|
|
|
* This product includes software developed by the University of
|
|
|
|
* California, Berkeley and its contributors.
|
|
|
|
* 4. Neither the name of the University nor the names of its contributors
|
|
|
|
* may be used to endorse or promote products derived from this software
|
|
|
|
* without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
|
|
|
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
|
|
|
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
|
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
|
|
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
|
|
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
|
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
|
|
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
|
|
|
* SUCH DAMAGE.
|
|
|
|
*
|
|
|
|
* @(#)init_main.c 8.9 (Berkeley) 1/21/94
|
1999-08-28 01:08:13 +00:00
|
|
|
* $FreeBSD$
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
|
|
|
|
1999-05-05 12:20:23 +00:00
|
|
|
#include "opt_init_path.h"
|
2002-07-31 00:39:19 +00:00
|
|
|
#include "opt_mac.h"
|
1996-03-02 18:24:13 +00:00
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
#include <sys/param.h>
|
2002-01-13 21:37:49 +00:00
|
|
|
#include <sys/kernel.h>
|
2002-09-01 21:41:24 +00:00
|
|
|
#include <sys/exec.h>
|
1997-01-27 12:43:36 +00:00
|
|
|
#include <sys/file.h>
|
1994-05-24 10:09:53 +00:00
|
|
|
#include <sys/filedesc.h>
|
2000-09-07 01:33:02 +00:00
|
|
|
#include <sys/ktr.h>
|
2001-03-28 11:52:56 +00:00
|
|
|
#include <sys/lock.h>
|
2002-07-31 00:39:19 +00:00
|
|
|
#include <sys/mac.h>
|
1997-01-16 15:58:32 +00:00
|
|
|
#include <sys/mount.h>
|
2000-10-20 07:58:15 +00:00
|
|
|
#include <sys/mutex.h>
|
2002-09-01 20:37:28 +00:00
|
|
|
#include <sys/syscallsubr.h>
|
1995-12-04 16:48:58 +00:00
|
|
|
#include <sys/sysctl.h>
|
1994-05-24 10:09:53 +00:00
|
|
|
#include <sys/proc.h>
|
|
|
|
#include <sys/resourcevar.h>
|
|
|
|
#include <sys/systm.h>
|
2000-09-10 13:54:52 +00:00
|
|
|
#include <sys/signalvar.h>
|
1994-05-24 10:09:53 +00:00
|
|
|
#include <sys/vnode.h>
|
1994-08-24 11:52:21 +00:00
|
|
|
#include <sys/sysent.h>
|
1994-05-24 10:09:53 +00:00
|
|
|
#include <sys/reboot.h>
|
2002-11-21 01:22:38 +00:00
|
|
|
#include <sys/sched.h>
|
2001-03-28 11:52:56 +00:00
|
|
|
#include <sys/sx.h>
|
1995-10-08 00:06:22 +00:00
|
|
|
#include <sys/sysproto.h>
|
1995-12-07 12:48:31 +00:00
|
|
|
#include <sys/vmmeter.h>
|
1997-12-12 04:00:59 +00:00
|
|
|
#include <sys/unistd.h>
|
1998-10-09 23:42:47 +00:00
|
|
|
#include <sys/malloc.h>
|
2000-09-02 19:17:34 +00:00
|
|
|
#include <sys/conf.h>
|
1994-05-24 10:09:53 +00:00
|
|
|
|
|
|
|
#include <machine/cpu.h>
|
|
|
|
|
|
|
|
#include <vm/vm.h>
|
1995-12-07 12:48:31 +00:00
|
|
|
#include <vm/vm_param.h>
|
|
|
|
#include <vm/pmap.h>
|
|
|
|
#include <vm/vm_map.h>
|
|
|
|
#include <sys/user.h>
|
1997-03-01 17:49:09 +00:00
|
|
|
#include <sys/copyright.h>
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2000-08-11 09:05:12 +00:00
|
|
|
void mi_startup(void); /* Should be elsewhere */
|
1994-05-24 10:09:53 +00:00
|
|
|
|
|
|
|
/* Components of the first process -- never freed. */
|
1995-12-10 13:45:30 +00:00
|
|
|
static struct session session0;
|
|
|
|
static struct pgrp pgrp0;
|
1994-05-24 10:09:53 +00:00
|
|
|
struct proc proc0;
|
2002-02-07 20:58:47 +00:00
|
|
|
struct thread thread0;
|
2002-09-15 23:52:25 +00:00
|
|
|
struct kse kse0;
|
|
|
|
struct ksegrp ksegrp0;
|
1998-12-19 02:55:34 +00:00
|
|
|
static struct procsig procsig0;
|
1995-12-10 13:45:30 +00:00
|
|
|
static struct filedesc0 filedesc0;
|
|
|
|
static struct plimit limit0;
|
|
|
|
static struct vmspace vmspace0;
|
1995-08-28 09:19:25 +00:00
|
|
|
struct proc *initproc;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
1996-02-23 19:44:10 +00:00
|
|
|
int cmask = CMASK;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
1995-05-19 03:27:08 +00:00
|
|
|
struct vnode *rootvp;
|
1997-11-24 18:35:04 +00:00
|
|
|
int boothowto = 0; /* initialized so that it can be patched */
|
2000-02-25 11:43:08 +00:00
|
|
|
SYSCTL_INT(_debug, OID_AUTO, boothowto, CTLFLAG_RD, &boothowto, 0, "");
|
2001-05-17 22:28:46 +00:00
|
|
|
int bootverbose;
|
|
|
|
SYSCTL_INT(_debug, OID_AUTO, bootverbose, CTLFLAG_RW, &bootverbose, 0, "");
|
1995-12-04 16:48:58 +00:00
|
|
|
|
1994-05-25 09:21:21 +00:00
|
|
|
/*
|
1995-08-28 09:19:25 +00:00
|
|
|
* This ensures that there is at least one entry so that the sysinit_set
|
|
|
|
* symbol is not undefined. A sybsystem ID of SI_SUB_DUMMY is never
|
|
|
|
* executed.
|
1994-05-25 09:21:21 +00:00
|
|
|
*/
|
2000-08-11 09:05:12 +00:00
|
|
|
SYSINIT(placeholder, SI_SUB_DUMMY, SI_ORDER_ANY, NULL, NULL)
|
1994-08-27 16:14:39 +00:00
|
|
|
|
1998-10-09 23:42:47 +00:00
|
|
|
/*
|
|
|
|
* The sysinit table itself. Items are checked off as the are run.
|
|
|
|
* If we want to register new sysinit types, add them to newsysinit.
|
|
|
|
*/
|
2001-06-13 10:58:39 +00:00
|
|
|
SET_DECLARE(sysinit_set, struct sysinit);
|
|
|
|
struct sysinit **sysinit, **sysinit_end;
|
|
|
|
struct sysinit **newsysinit, **newsysinit_end;
|
1998-10-09 23:42:47 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Merge a new sysinit set into the current set, reallocating it if
|
|
|
|
* necessary. This can only be called after malloc is running.
|
|
|
|
*/
|
|
|
|
void
|
2001-06-13 10:58:39 +00:00
|
|
|
sysinit_add(struct sysinit **set, struct sysinit **set_end)
|
1998-10-09 23:42:47 +00:00
|
|
|
{
|
|
|
|
struct sysinit **newset;
|
|
|
|
struct sysinit **sipp;
|
|
|
|
struct sysinit **xipp;
|
2001-06-13 10:58:39 +00:00
|
|
|
int count;
|
1998-10-09 23:42:47 +00:00
|
|
|
|
2001-06-13 10:58:39 +00:00
|
|
|
count = set_end - set;
|
1998-10-09 23:42:47 +00:00
|
|
|
if (newsysinit)
|
2001-06-13 10:58:39 +00:00
|
|
|
count += newsysinit_end - newsysinit;
|
1998-10-15 17:09:19 +00:00
|
|
|
else
|
2001-06-13 10:58:39 +00:00
|
|
|
count += sysinit_end - sysinit;
|
1998-10-09 23:42:47 +00:00
|
|
|
newset = malloc(count * sizeof(*sipp), M_TEMP, M_NOWAIT);
|
|
|
|
if (newset == NULL)
|
|
|
|
panic("cannot malloc for sysinit");
|
|
|
|
xipp = newset;
|
1998-10-15 17:09:19 +00:00
|
|
|
if (newsysinit)
|
2001-06-13 10:58:39 +00:00
|
|
|
for (sipp = newsysinit; sipp < newsysinit_end; sipp++)
|
1998-10-09 23:42:47 +00:00
|
|
|
*xipp++ = *sipp;
|
1998-10-15 17:09:19 +00:00
|
|
|
else
|
2001-06-13 10:58:39 +00:00
|
|
|
for (sipp = sysinit; sipp < sysinit_end; sipp++)
|
1998-10-15 17:09:19 +00:00
|
|
|
*xipp++ = *sipp;
|
2001-06-13 10:58:39 +00:00
|
|
|
for (sipp = set; sipp < set_end; sipp++)
|
1998-10-15 17:09:19 +00:00
|
|
|
*xipp++ = *sipp;
|
|
|
|
if (newsysinit)
|
|
|
|
free(newsysinit, M_TEMP);
|
1998-10-09 23:42:47 +00:00
|
|
|
newsysinit = newset;
|
2001-06-13 10:58:39 +00:00
|
|
|
newsysinit_end = newset + count;
|
1998-10-09 23:42:47 +00:00
|
|
|
}
|
1994-05-25 09:21:21 +00:00
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* System startup; initialize the world, create process 0, mount root
|
|
|
|
* filesystem, and fork to create init and pagedaemon. Most of the
|
|
|
|
* hard work is done in the lower-level initialization routines including
|
|
|
|
* startup(), which does memory initialization and autoconfiguration.
|
1995-08-28 09:19:25 +00:00
|
|
|
*
|
|
|
|
* This allows simple addition of new kernel subsystems that require
|
|
|
|
* boot time initialization. It also allows substitution of subsystem
|
|
|
|
* (for instance, a scheduler, kernel profiler, or VM system) by object
|
1998-01-30 11:34:06 +00:00
|
|
|
* module. Finally, it allows for optional "kernel threads".
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
1994-05-25 09:21:21 +00:00
|
|
|
void
|
2000-08-11 09:05:12 +00:00
|
|
|
mi_startup(void)
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
1995-08-28 09:19:25 +00:00
|
|
|
|
|
|
|
register struct sysinit **sipp; /* system initialization*/
|
|
|
|
register struct sysinit **xipp; /* interior loop of sort*/
|
|
|
|
register struct sysinit *save; /* bubble*/
|
|
|
|
|
2001-06-13 10:58:39 +00:00
|
|
|
if (sysinit == NULL) {
|
|
|
|
sysinit = SET_BEGIN(sysinit_set);
|
|
|
|
sysinit_end = SET_LIMIT(sysinit_set);
|
|
|
|
}
|
|
|
|
|
1998-10-09 23:42:47 +00:00
|
|
|
restart:
|
1995-08-28 09:19:25 +00:00
|
|
|
/*
|
|
|
|
* Perform a bubble sort of the system initialization objects by
|
|
|
|
* their subsystem (primary key) and order (secondary key).
|
|
|
|
*/
|
2001-06-13 10:58:39 +00:00
|
|
|
for (sipp = sysinit; sipp < sysinit_end; sipp++) {
|
|
|
|
for (xipp = sipp + 1; xipp < sysinit_end; xipp++) {
|
1998-10-09 23:42:47 +00:00
|
|
|
if ((*sipp)->subsystem < (*xipp)->subsystem ||
|
|
|
|
((*sipp)->subsystem == (*xipp)->subsystem &&
|
2000-08-02 21:05:21 +00:00
|
|
|
(*sipp)->order <= (*xipp)->order))
|
1995-08-28 09:19:25 +00:00
|
|
|
continue; /* skip*/
|
|
|
|
save = *sipp;
|
|
|
|
*sipp = *xipp;
|
|
|
|
*xipp = save;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Traverse the (now) ordered list of system initialization tasks.
|
|
|
|
* Perform each task, and continue on to the next task.
|
|
|
|
*
|
|
|
|
* The last item on the list is expected to be the scheduler,
|
|
|
|
* which will not return.
|
|
|
|
*/
|
2001-06-13 10:58:39 +00:00
|
|
|
for (sipp = sysinit; sipp < sysinit_end; sipp++) {
|
1997-12-12 04:00:59 +00:00
|
|
|
|
1998-10-09 23:42:47 +00:00
|
|
|
if ((*sipp)->subsystem == SI_SUB_DUMMY)
|
1995-08-28 09:19:25 +00:00
|
|
|
continue; /* skip dummy task(s)*/
|
|
|
|
|
1998-10-09 23:42:47 +00:00
|
|
|
if ((*sipp)->subsystem == SI_SUB_DONE)
|
|
|
|
continue;
|
|
|
|
|
1999-07-01 13:21:46 +00:00
|
|
|
/* Call function */
|
|
|
|
(*((*sipp)->func))((*sipp)->udata);
|
1998-10-09 23:42:47 +00:00
|
|
|
|
|
|
|
/* Check off the one we're just done */
|
|
|
|
(*sipp)->subsystem = SI_SUB_DONE;
|
|
|
|
|
|
|
|
/* Check if we've installed more sysinit items via KLD */
|
|
|
|
if (newsysinit != NULL) {
|
2001-06-13 10:58:39 +00:00
|
|
|
if (sysinit != SET_BEGIN(sysinit_set))
|
1998-10-09 23:42:47 +00:00
|
|
|
free(sysinit, M_TEMP);
|
|
|
|
sysinit = newsysinit;
|
2001-06-13 10:58:39 +00:00
|
|
|
sysinit_end = newsysinit_end;
|
1998-10-09 23:42:47 +00:00
|
|
|
newsysinit = NULL;
|
2001-06-13 10:58:39 +00:00
|
|
|
newsysinit_end = NULL;
|
1998-10-09 23:42:47 +00:00
|
|
|
goto restart;
|
1995-08-28 09:19:25 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
1997-04-26 11:46:25 +00:00
|
|
|
panic("Shouldn't get here!");
|
1995-08-28 09:19:25 +00:00
|
|
|
/* NOTREACHED*/
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
***************************************************************************
|
|
|
|
****
|
|
|
|
**** The following SYSINIT's belong elsewhere, but have not yet
|
|
|
|
**** been moved.
|
|
|
|
****
|
|
|
|
***************************************************************************
|
|
|
|
*/
|
1995-08-29 23:59:22 +00:00
|
|
|
static void
|
2000-08-11 09:05:12 +00:00
|
|
|
print_caddr_t(void *data __unused)
|
1995-08-29 23:59:22 +00:00
|
|
|
{
|
|
|
|
printf("%s", (char *)data);
|
|
|
|
}
|
1995-12-02 17:11:20 +00:00
|
|
|
SYSINIT(announce, SI_SUB_COPYRIGHT, SI_ORDER_FIRST, print_caddr_t, copyright)
|
2001-05-17 22:28:46 +00:00
|
|
|
SYSINIT(version, SI_SUB_COPYRIGHT, SI_ORDER_SECOND, print_caddr_t, version)
|
|
|
|
|
|
|
|
static void
|
|
|
|
set_boot_verbose(void *data __unused)
|
|
|
|
{
|
|
|
|
|
|
|
|
if (boothowto & RB_VERBOSE)
|
|
|
|
bootverbose++;
|
|
|
|
}
|
|
|
|
SYSINIT(boot_verbose, SI_SUB_TUNABLES, SI_ORDER_ANY, set_boot_verbose, NULL)
|
1995-08-28 09:19:25 +00:00
|
|
|
|
2002-09-01 21:41:24 +00:00
|
|
|
struct sysentvec null_sysvec = {
|
|
|
|
0,
|
|
|
|
NULL,
|
|
|
|
0,
|
|
|
|
0,
|
|
|
|
NULL,
|
|
|
|
0,
|
|
|
|
NULL,
|
|
|
|
NULL,
|
|
|
|
NULL,
|
|
|
|
NULL,
|
|
|
|
NULL,
|
|
|
|
NULL,
|
|
|
|
NULL,
|
|
|
|
"null",
|
|
|
|
NULL,
|
|
|
|
NULL,
|
|
|
|
0,
|
|
|
|
PAGE_SIZE,
|
|
|
|
VM_MIN_ADDRESS,
|
|
|
|
VM_MAXUSER_ADDRESS,
|
|
|
|
USRSTACK,
|
|
|
|
PS_STRINGS,
|
|
|
|
VM_PROT_ALL,
|
|
|
|
NULL,
|
|
|
|
NULL
|
|
|
|
};
|
2002-07-20 02:56:12 +00:00
|
|
|
|
1995-08-28 09:19:25 +00:00
|
|
|
/*
|
|
|
|
***************************************************************************
|
|
|
|
****
|
2002-11-30 22:15:30 +00:00
|
|
|
**** The two following SYSINIT's are proc0 specific glue code. I am not
|
1995-08-28 09:19:25 +00:00
|
|
|
**** convinced that they can not be safely combined, but their order of
|
|
|
|
**** operation has been maintained as the same as the original init_main.c
|
|
|
|
**** for right now.
|
|
|
|
****
|
|
|
|
**** These probably belong in init_proc.c or kern_proc.c, since they
|
|
|
|
**** deal with proc0 (the fork template process).
|
|
|
|
****
|
|
|
|
***************************************************************************
|
|
|
|
*/
|
|
|
|
/* ARGSUSED*/
|
1995-12-10 13:45:30 +00:00
|
|
|
static void
|
2000-08-11 09:05:12 +00:00
|
|
|
proc0_init(void *dummy __unused)
|
1995-08-28 09:19:25 +00:00
|
|
|
{
|
|
|
|
register struct proc *p;
|
|
|
|
register struct filedesc0 *fdp;
|
1996-10-20 21:01:46 +00:00
|
|
|
register unsigned i;
|
2001-09-12 08:38:13 +00:00
|
|
|
struct thread *td;
|
2002-02-07 20:58:47 +00:00
|
|
|
struct ksegrp *kg;
|
|
|
|
struct kse *ke;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2001-07-04 16:20:28 +00:00
|
|
|
GIANT_REQUIRED;
|
1994-05-24 10:09:53 +00:00
|
|
|
p = &proc0;
|
2002-02-07 20:58:47 +00:00
|
|
|
td = &thread0;
|
2002-09-15 23:52:25 +00:00
|
|
|
ke = &kse0;
|
|
|
|
kg = &ksegrp0;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2002-11-21 01:22:38 +00:00
|
|
|
ke->ke_sched = kse0_sched;
|
|
|
|
kg->kg_sched = ksegrp0_sched;
|
|
|
|
p->p_sched = proc0_sched;
|
|
|
|
td->td_sched = thread0_sched;
|
|
|
|
|
2000-09-07 01:33:02 +00:00
|
|
|
/*
|
|
|
|
* Initialize magic number.
|
|
|
|
*/
|
|
|
|
p->p_magic = P_MAGIC;
|
|
|
|
|
1996-03-11 06:14:38 +00:00
|
|
|
/*
|
2002-02-07 20:58:47 +00:00
|
|
|
* Initialize thread, process and pgrp structures.
|
1996-03-11 06:14:38 +00:00
|
|
|
*/
|
|
|
|
procinit();
|
Part 1 of KSE-III
The ability to schedule multiple threads per process
(one one cpu) by making ALL system calls optionally asynchronous.
to come: ia64 and power-pc patches, patches for gdb, test program (in tools)
Reviewed by: Almost everyone who counts
(at various times, peter, jhb, matt, alfred, mini, bernd,
and a cast of thousands)
NOTE: this is still Beta code, and contains lots of debugging stuff.
expect slight instability in signals..
2002-06-29 17:26:22 +00:00
|
|
|
threadinit();
|
1996-03-11 06:14:38 +00:00
|
|
|
|
1996-07-31 09:26:54 +00:00
|
|
|
/*
|
|
|
|
* Initialize sleep queue hash table
|
|
|
|
*/
|
|
|
|
sleepinit();
|
|
|
|
|
1997-08-05 00:02:08 +00:00
|
|
|
/*
|
|
|
|
* additional VM structures
|
|
|
|
*/
|
|
|
|
vm_init2();
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* Create process 0 (the swapper).
|
|
|
|
*/
|
1996-03-11 06:14:38 +00:00
|
|
|
LIST_INSERT_HEAD(&allproc, p, p_list);
|
2001-04-11 18:50:50 +00:00
|
|
|
LIST_INSERT_HEAD(PIDHASH(0), p, p_hash);
|
2002-04-04 21:03:38 +00:00
|
|
|
mtx_init(&pgrp0.pg_mtx, "process group", NULL, MTX_DEF | MTX_DUPOK);
|
1994-05-24 10:09:53 +00:00
|
|
|
p->p_pgrp = &pgrp0;
|
1996-03-11 06:14:38 +00:00
|
|
|
LIST_INSERT_HEAD(PGRPHASH(0), &pgrp0, pg_hash);
|
|
|
|
LIST_INIT(&pgrp0.pg_members);
|
|
|
|
LIST_INSERT_HEAD(&pgrp0.pg_members, p, p_pglist);
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
pgrp0.pg_session = &session0;
|
2002-04-04 21:03:38 +00:00
|
|
|
mtx_init(&session0.s_mtx, "session", NULL, MTX_DEF);
|
1994-05-24 10:09:53 +00:00
|
|
|
session0.s_count = 1;
|
|
|
|
session0.s_leader = p;
|
|
|
|
|
2002-07-20 02:56:12 +00:00
|
|
|
p->p_sysent = &null_sysvec;
|
1994-08-24 11:52:21 +00:00
|
|
|
|
Part 1 of KSE-III
The ability to schedule multiple threads per process
(one one cpu) by making ALL system calls optionally asynchronous.
to come: ia64 and power-pc patches, patches for gdb, test program (in tools)
Reviewed by: Almost everyone who counts
(at various times, peter, jhb, matt, alfred, mini, bernd,
and a cast of thousands)
NOTE: this is still Beta code, and contains lots of debugging stuff.
expect slight instability in signals..
2002-06-29 17:26:22 +00:00
|
|
|
/*
|
|
|
|
* proc_linkup was already done in init_i386() or alphainit() etc.
|
|
|
|
* because the earlier code needed to follow td->td_proc. Otherwise
|
|
|
|
* I would have done it here.. maybe this means this should be
|
|
|
|
* done earlier too.
|
|
|
|
*/
|
2001-01-24 10:40:56 +00:00
|
|
|
p->p_flag = P_SYSTEM;
|
|
|
|
p->p_sflag = PS_INMEM;
|
Part 1 of KSE-III
The ability to schedule multiple threads per process
(one one cpu) by making ALL system calls optionally asynchronous.
to come: ia64 and power-pc patches, patches for gdb, test program (in tools)
Reviewed by: Almost everyone who counts
(at various times, peter, jhb, matt, alfred, mini, bernd,
and a cast of thousands)
NOTE: this is still Beta code, and contains lots of debugging stuff.
expect slight instability in signals..
2002-06-29 17:26:22 +00:00
|
|
|
p->p_state = PRS_NORMAL;
|
|
|
|
td->td_state = TDS_RUNNING;
|
|
|
|
kg->kg_nice = NZERO;
|
|
|
|
kg->kg_pri_class = PRI_TIMESHARE;
|
|
|
|
kg->kg_user_pri = PUSER;
|
|
|
|
td->td_priority = PVM;
|
|
|
|
td->td_base_pri = PUSER;
|
|
|
|
td->td_kse = ke; /* XXXKSE */
|
|
|
|
ke->ke_oncpu = 0;
|
2002-07-14 03:43:33 +00:00
|
|
|
ke->ke_state = KES_THREAD;
|
Part 1 of KSE-III
The ability to schedule multiple threads per process
(one one cpu) by making ALL system calls optionally asynchronous.
to come: ia64 and power-pc patches, patches for gdb, test program (in tools)
Reviewed by: Almost everyone who counts
(at various times, peter, jhb, matt, alfred, mini, bernd,
and a cast of thousands)
NOTE: this is still Beta code, and contains lots of debugging stuff.
expect slight instability in signals..
2002-06-29 17:26:22 +00:00
|
|
|
ke->ke_thread = td;
|
1997-06-16 00:29:36 +00:00
|
|
|
p->p_peers = 0;
|
|
|
|
p->p_leader = p;
|
Part 1 of KSE-III
The ability to schedule multiple threads per process
(one one cpu) by making ALL system calls optionally asynchronous.
to come: ia64 and power-pc patches, patches for gdb, test program (in tools)
Reviewed by: Almost everyone who counts
(at various times, peter, jhb, matt, alfred, mini, bernd,
and a cast of thousands)
NOTE: this is still Beta code, and contains lots of debugging stuff.
expect slight instability in signals..
2002-06-29 17:26:22 +00:00
|
|
|
|
1997-06-16 00:29:36 +00:00
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
bcopy("swapper", p->p_comm, sizeof ("swapper"));
|
|
|
|
|
2000-11-27 22:52:31 +00:00
|
|
|
callout_init(&p->p_itcallout, 0);
|
2001-09-12 08:38:13 +00:00
|
|
|
callout_init(&td->td_slpcallout, 1);
|
2000-11-27 22:52:31 +00:00
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/* Create credentials. */
|
|
|
|
p->p_ucred = crget();
|
|
|
|
p->p_ucred->cr_ngroups = 1; /* group 0 */
|
2000-09-05 22:11:13 +00:00
|
|
|
p->p_ucred->cr_uidinfo = uifind(0);
|
o Merge contents of struct pcred into struct ucred. Specifically, add the
real uid, saved uid, real gid, and saved gid to ucred, as well as the
pcred->pc_uidinfo, which was associated with the real uid, only rename
it to cr_ruidinfo so as not to conflict with cr_uidinfo, which
corresponds to the effective uid.
o Remove p_cred from struct proc; add p_ucred to struct proc, replacing
original macro that pointed.
p->p_ucred to p->p_cred->pc_ucred.
o Universally update code so that it makes use of ucred instead of pcred,
p->p_ucred instead of p->p_pcred, cr_ruidinfo instead of p_uidinfo,
cr_{r,sv}{u,g}id instead of p_*, etc.
o Remove pcred0 and its initialization from init_main.c; initialize
cr_ruidinfo there.
o Restruction many credential modification chunks to always crdup while
we figure out locking and optimizations; generally speaking, this
means moving to a structure like this:
newcred = crdup(oldcred);
...
p->p_ucred = newcred;
crfree(oldcred);
It's not race-free, but better than nothing. There are also races
in sys_process.c, all inter-process authorization, fork, exec, and
exit.
o Remove sigio->sio_ruid since sigio->sio_ucred now contains the ruid;
remove comments indicating that the old arrangement was a problem.
o Restructure exec1() a little to use newcred/oldcred arrangement, and
use improved uid management primitives.
o Clean up exit1() so as to do less work in credential cleanup due to
pcred removal.
o Clean up fork1() so as to do less work in credential cleanup and
allocation.
o Clean up ktrcanset() to take into account changes, and move to using
suser_xxx() instead of performing a direct uid==0 comparision.
o Improve commenting in various kern_prot.c credential modification
calls to better document current behavior. In a couple of places,
current behavior is a little questionable and we need to check
POSIX.1 to make sure it's "right". More commenting work still
remains to be done.
o Update credential management calls, such as crfree(), to take into
account new ruidinfo reference.
o Modify or add the following uid and gid helper routines:
change_euid()
change_egid()
change_ruid()
change_rgid()
change_svuid()
change_svgid()
In each case, the call now acts on a credential not a process, and as
such no longer requires more complicated process locking/etc. They
now assume the caller will do any necessary allocation of an
exclusive credential reference. Each is commented to document its
reference requirements.
o CANSIGIO() is simplified to require only credentials, not processes
and pcreds.
o Remove lots of (p_pcred==NULL) checks.
o Add an XXX to authorization code in nfs_lock.c, since it's
questionable, and needs to be considered carefully.
o Simplify posix4 authorization code to require only credentials, not
processes and pcreds. Note that this authorization, as well as
CANSIGIO(), needs to be updated to use the p_cansignal() and
p_cansched() centralized authorization routines, as they currently
do not take into account some desirable restrictions that are handled
by the centralized routines, as well as being inconsistent with other
similar authorization instances.
o Update libkvm to take these changes into account.
Obtained from: TrustedBSD Project
Reviewed by: green, bde, jhb, freebsd-arch, freebsd-audit
2001-05-25 16:59:11 +00:00
|
|
|
p->p_ucred->cr_ruidinfo = uifind(0);
|
2001-02-21 06:39:57 +00:00
|
|
|
p->p_ucred->cr_prison = NULL; /* Don't jail it. */
|
2002-07-31 00:39:19 +00:00
|
|
|
#ifdef MAC
|
|
|
|
mac_create_proc0(p->p_ucred);
|
|
|
|
#endif
|
2001-10-26 08:12:54 +00:00
|
|
|
td->td_ucred = crhold(p->p_ucred);
|
This Implements the mumbled about "Jail" feature.
This is a seriously beefed up chroot kind of thing. The process
is jailed along the same lines as a chroot does it, but with
additional tough restrictions imposed on what the superuser can do.
For all I know, it is safe to hand over the root bit inside a
prison to the customer living in that prison, this is what
it was developed for in fact: "real virtual servers".
Each prison has an ip number associated with it, which all IP
communications will be coerced to use and each prison has its own
hostname.
Needless to say, you need more RAM this way, but the advantage is
that each customer can run their own particular version of apache
and not stomp on the toes of their neighbors.
It generally does what one would expect, but setting up a jail
still takes a little knowledge.
A few notes:
I have no scripts for setting up a jail, don't ask me for them.
The IP number should be an alias on one of the interfaces.
mount a /proc in each jail, it will make ps more useable.
/proc/<pid>/status tells the hostname of the prison for
jailed processes.
Quotas are only sensible if you have a mountpoint per prison.
There are no privisions for stopping resource-hogging.
Some "#ifdef INET" and similar may be missing (send patches!)
If somebody wants to take it from here and develop it into
more of a "virtual machine" they should be most welcome!
Tools, comments, patches & documentation most welcome.
Have fun...
Sponsored by: http://www.rndassociates.com/
Run for almost a year by: http://www.servetheweb.com/
1999-04-28 11:38:52 +00:00
|
|
|
|
1998-12-19 02:55:34 +00:00
|
|
|
/* Create procsig. */
|
|
|
|
p->p_procsig = &procsig0;
|
1999-02-17 21:03:14 +00:00
|
|
|
p->p_procsig->ps_refcnt = 1;
|
1998-12-19 02:55:34 +00:00
|
|
|
|
2000-08-11 09:05:12 +00:00
|
|
|
/* Initialize signal state for process 0. */
|
|
|
|
siginit(&proc0);
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/* Create the file descriptor table. */
|
|
|
|
fdp = &filedesc0;
|
|
|
|
p->p_fd = &fdp->fd_fd;
|
2002-04-04 21:03:38 +00:00
|
|
|
mtx_init(&fdp->fd_fd.fd_mtx, FILEDESC_LOCK_DESC, NULL, MTX_DEF);
|
1994-05-24 10:09:53 +00:00
|
|
|
fdp->fd_fd.fd_refcnt = 1;
|
|
|
|
fdp->fd_fd.fd_cmask = cmask;
|
|
|
|
fdp->fd_fd.fd_ofiles = fdp->fd_dfiles;
|
|
|
|
fdp->fd_fd.fd_ofileflags = fdp->fd_dfileflags;
|
|
|
|
fdp->fd_fd.fd_nfiles = NDFILE;
|
|
|
|
|
|
|
|
/* Create the limits structures. */
|
|
|
|
p->p_limit = &limit0;
|
|
|
|
for (i = 0; i < sizeof(p->p_rlimit)/sizeof(p->p_rlimit[0]); i++)
|
|
|
|
limit0.pl_rlimit[i].rlim_cur =
|
|
|
|
limit0.pl_rlimit[i].rlim_max = RLIM_INFINITY;
|
1997-01-27 12:43:36 +00:00
|
|
|
limit0.pl_rlimit[RLIMIT_NOFILE].rlim_cur =
|
|
|
|
limit0.pl_rlimit[RLIMIT_NOFILE].rlim_max = maxfiles;
|
|
|
|
limit0.pl_rlimit[RLIMIT_NPROC].rlim_cur =
|
|
|
|
limit0.pl_rlimit[RLIMIT_NPROC].rlim_max = maxproc;
|
1994-05-24 10:09:53 +00:00
|
|
|
i = ptoa(cnt.v_free_count);
|
|
|
|
limit0.pl_rlimit[RLIMIT_RSS].rlim_max = i;
|
|
|
|
limit0.pl_rlimit[RLIMIT_MEMLOCK].rlim_max = i;
|
|
|
|
limit0.pl_rlimit[RLIMIT_MEMLOCK].rlim_cur = i / 3;
|
|
|
|
limit0.p_refcnt = 1;
|
2002-10-09 17:17:24 +00:00
|
|
|
p->p_cpulimit = RLIM_INFINITY;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
|
|
|
/* Allocate a prototype map so we have something to fork. */
|
1999-02-19 14:25:37 +00:00
|
|
|
pmap_pinit0(vmspace_pmap(&vmspace0));
|
1994-05-24 10:09:53 +00:00
|
|
|
p->p_vmspace = &vmspace0;
|
|
|
|
vmspace0.vm_refcnt = 1;
|
2002-09-21 22:07:17 +00:00
|
|
|
vm_map_init(&vmspace0.vm_map, p->p_sysent->sv_minuser,
|
|
|
|
p->p_sysent->sv_maxuser);
|
1999-02-19 14:25:37 +00:00
|
|
|
vmspace0.vm_map.pmap = vmspace_pmap(&vmspace0);
|
1994-05-24 10:09:53 +00:00
|
|
|
|
1998-12-19 08:23:31 +00:00
|
|
|
/*
|
1994-05-24 10:09:53 +00:00
|
|
|
* We continue to place resource usage info and signal
|
|
|
|
* actions in the user struct so they're pageable.
|
|
|
|
*/
|
2001-09-12 08:38:13 +00:00
|
|
|
p->p_stats = &p->p_uarea->u_stats;
|
|
|
|
p->p_sigacts = &p->p_uarea->u_sigacts;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
|
|
|
/*
|
1996-03-11 06:14:38 +00:00
|
|
|
* Charge root for one process.
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
o Merge contents of struct pcred into struct ucred. Specifically, add the
real uid, saved uid, real gid, and saved gid to ucred, as well as the
pcred->pc_uidinfo, which was associated with the real uid, only rename
it to cr_ruidinfo so as not to conflict with cr_uidinfo, which
corresponds to the effective uid.
o Remove p_cred from struct proc; add p_ucred to struct proc, replacing
original macro that pointed.
p->p_ucred to p->p_cred->pc_ucred.
o Universally update code so that it makes use of ucred instead of pcred,
p->p_ucred instead of p->p_pcred, cr_ruidinfo instead of p_uidinfo,
cr_{r,sv}{u,g}id instead of p_*, etc.
o Remove pcred0 and its initialization from init_main.c; initialize
cr_ruidinfo there.
o Restruction many credential modification chunks to always crdup while
we figure out locking and optimizations; generally speaking, this
means moving to a structure like this:
newcred = crdup(oldcred);
...
p->p_ucred = newcred;
crfree(oldcred);
It's not race-free, but better than nothing. There are also races
in sys_process.c, all inter-process authorization, fork, exec, and
exit.
o Remove sigio->sio_ruid since sigio->sio_ucred now contains the ruid;
remove comments indicating that the old arrangement was a problem.
o Restructure exec1() a little to use newcred/oldcred arrangement, and
use improved uid management primitives.
o Clean up exit1() so as to do less work in credential cleanup due to
pcred removal.
o Clean up fork1() so as to do less work in credential cleanup and
allocation.
o Clean up ktrcanset() to take into account changes, and move to using
suser_xxx() instead of performing a direct uid==0 comparision.
o Improve commenting in various kern_prot.c credential modification
calls to better document current behavior. In a couple of places,
current behavior is a little questionable and we need to check
POSIX.1 to make sure it's "right". More commenting work still
remains to be done.
o Update credential management calls, such as crfree(), to take into
account new ruidinfo reference.
o Modify or add the following uid and gid helper routines:
change_euid()
change_egid()
change_ruid()
change_rgid()
change_svuid()
change_svgid()
In each case, the call now acts on a credential not a process, and as
such no longer requires more complicated process locking/etc. They
now assume the caller will do any necessary allocation of an
exclusive credential reference. Each is commented to document its
reference requirements.
o CANSIGIO() is simplified to require only credentials, not processes
and pcreds.
o Remove lots of (p_pcred==NULL) checks.
o Add an XXX to authorization code in nfs_lock.c, since it's
questionable, and needs to be considered carefully.
o Simplify posix4 authorization code to require only credentials, not
processes and pcreds. Note that this authorization, as well as
CANSIGIO(), needs to be updated to use the p_cansignal() and
p_cansched() centralized authorization routines, as they currently
do not take into account some desirable restrictions that are handled
by the centralized routines, as well as being inconsistent with other
similar authorization instances.
o Update libkvm to take these changes into account.
Obtained from: TrustedBSD Project
Reviewed by: green, bde, jhb, freebsd-arch, freebsd-audit
2001-05-25 16:59:11 +00:00
|
|
|
(void)chgproccnt(p->p_ucred->cr_ruidinfo, 1, 0);
|
1995-08-28 09:19:25 +00:00
|
|
|
}
|
|
|
|
SYSINIT(p0init, SI_SUB_INTRINSIC, SI_ORDER_FIRST, proc0_init, NULL)
|
1994-05-24 10:09:53 +00:00
|
|
|
|
1995-08-28 09:19:25 +00:00
|
|
|
/* ARGSUSED*/
|
1995-12-10 13:45:30 +00:00
|
|
|
static void
|
2000-08-11 09:05:12 +00:00
|
|
|
proc0_post(void *dummy __unused)
|
1995-08-28 09:19:25 +00:00
|
|
|
{
|
1998-04-04 13:26:20 +00:00
|
|
|
struct timespec ts;
|
2000-08-11 09:05:12 +00:00
|
|
|
struct proc *p;
|
1996-09-23 04:37:54 +00:00
|
|
|
|
1994-05-25 09:21:21 +00:00
|
|
|
/*
|
1999-02-25 11:03:08 +00:00
|
|
|
* Now we can look at the time, having had a chance to verify the
|
2002-05-16 21:28:32 +00:00
|
|
|
* time from the filesystem. Pretend that proc0 started now.
|
1994-05-25 09:21:21 +00:00
|
|
|
*/
|
2001-03-28 11:52:56 +00:00
|
|
|
sx_slock(&allproc_lock);
|
2000-08-11 09:05:12 +00:00
|
|
|
LIST_FOREACH(p, &allproc, p_list) {
|
|
|
|
microtime(&p->p_stats->p_start);
|
2002-02-22 13:32:01 +00:00
|
|
|
p->p_runtime.sec = 0;
|
|
|
|
p->p_runtime.frac = 0;
|
2000-08-11 09:05:12 +00:00
|
|
|
}
|
2001-03-28 11:52:56 +00:00
|
|
|
sx_sunlock(&allproc_lock);
|
2002-02-26 01:03:39 +00:00
|
|
|
binuptime(PCPU_PTR(switchtime));
|
2000-09-07 01:33:02 +00:00
|
|
|
PCPU_SET(switchticks, ticks);
|
1995-08-28 09:19:25 +00:00
|
|
|
|
1996-09-23 04:37:54 +00:00
|
|
|
/*
|
|
|
|
* Give the ``random'' number generator a thump.
|
|
|
|
*/
|
1998-04-04 13:26:20 +00:00
|
|
|
nanotime(&ts);
|
|
|
|
srandom(ts.tv_sec ^ ts.tv_nsec);
|
1995-08-28 09:19:25 +00:00
|
|
|
}
|
|
|
|
SYSINIT(p0post, SI_SUB_INTRINSIC_POST, SI_ORDER_FIRST, proc0_post, NULL)
|
1994-05-24 10:09:53 +00:00
|
|
|
|
1995-08-28 09:19:25 +00:00
|
|
|
/*
|
|
|
|
***************************************************************************
|
|
|
|
****
|
|
|
|
**** The following SYSINIT's and glue code should be moved to the
|
|
|
|
**** respective files on a per subsystem basis.
|
|
|
|
****
|
|
|
|
***************************************************************************
|
|
|
|
*/
|
1994-05-24 10:09:53 +00:00
|
|
|
|
|
|
|
|
1995-08-28 09:19:25 +00:00
|
|
|
/*
|
|
|
|
***************************************************************************
|
|
|
|
****
|
|
|
|
**** The following code probably belongs in another file, like
|
2000-08-11 09:05:12 +00:00
|
|
|
**** kern/init_init.c.
|
1995-08-28 09:19:25 +00:00
|
|
|
****
|
|
|
|
***************************************************************************
|
|
|
|
*/
|
1994-05-24 10:09:53 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* List of paths to try when searching for "init".
|
|
|
|
*/
|
1999-04-20 21:15:13 +00:00
|
|
|
static char init_path[MAXPATHLEN] =
|
1999-05-05 12:20:23 +00:00
|
|
|
#ifdef INIT_PATH
|
|
|
|
__XSTRING(INIT_PATH);
|
|
|
|
#else
|
1999-05-11 10:08:10 +00:00
|
|
|
"/sbin/init:/sbin/oinit:/sbin/init.bak:/stand/sysinstall";
|
1999-05-05 12:20:23 +00:00
|
|
|
#endif
|
2001-12-16 16:07:20 +00:00
|
|
|
SYSCTL_STRING(_kern, OID_AUTO, init_path, CTLFLAG_RD, init_path, 0,
|
|
|
|
"Path used to search the init process");
|
1994-05-24 10:09:53 +00:00
|
|
|
|
|
|
|
/*
|
1999-04-20 21:15:13 +00:00
|
|
|
* Start the initial user process; try exec'ing each pathname in init_path.
|
1994-05-24 10:09:53 +00:00
|
|
|
* The program is invoked with one argument containing the boot flags.
|
|
|
|
*/
|
|
|
|
static void
|
2000-08-11 09:05:12 +00:00
|
|
|
start_init(void *dummy)
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
|
|
|
vm_offset_t addr;
|
|
|
|
struct execve_args args;
|
1999-04-20 21:15:13 +00:00
|
|
|
int options, error;
|
|
|
|
char *var, *path, *next, *s;
|
|
|
|
char *ucp, **uap, *arg0, *arg1;
|
2001-09-12 08:38:13 +00:00
|
|
|
struct thread *td;
|
1999-07-01 13:21:46 +00:00
|
|
|
struct proc *p;
|
2001-09-15 11:15:22 +00:00
|
|
|
int init_does_devfs = 0;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_lock(&Giant);
|
2000-09-07 01:33:02 +00:00
|
|
|
|
2001-07-04 16:20:28 +00:00
|
|
|
GIANT_REQUIRED;
|
|
|
|
|
2001-09-12 08:38:13 +00:00
|
|
|
td = curthread;
|
|
|
|
p = td->td_proc;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2002-07-03 08:52:37 +00:00
|
|
|
vfs_mountroot();
|
2002-03-08 10:33:11 +00:00
|
|
|
|
2000-08-11 09:05:12 +00:00
|
|
|
/* Get the vnode for '/'. Set p->p_fd->fd_cdir to reference it. */
|
|
|
|
if (VFS_ROOT(TAILQ_FIRST(&mountlist), &rootvnode))
|
|
|
|
panic("cannot find root vnode");
|
2002-01-13 11:58:06 +00:00
|
|
|
FILEDESC_LOCK(p->p_fd);
|
2000-08-11 09:05:12 +00:00
|
|
|
p->p_fd->fd_cdir = rootvnode;
|
|
|
|
VREF(p->p_fd->fd_cdir);
|
|
|
|
p->p_fd->fd_rdir = rootvnode;
|
2001-02-28 20:54:28 +00:00
|
|
|
VREF(p->p_fd->fd_rdir);
|
2002-01-13 11:58:06 +00:00
|
|
|
FILEDESC_UNLOCK(p->p_fd);
|
2001-09-12 08:38:13 +00:00
|
|
|
VOP_UNLOCK(rootvnode, 0, td);
|
2002-07-31 01:11:29 +00:00
|
|
|
#ifdef MAC
|
|
|
|
mac_create_root_mount(td->td_ucred, TAILQ_FIRST(&mountlist));
|
|
|
|
#endif
|
2000-08-11 09:05:12 +00:00
|
|
|
|
2003-01-19 11:03:07 +00:00
|
|
|
/*
|
|
|
|
* For disk based systems, we probably cannot do this yet
|
|
|
|
* since the fs will be read-only. But a NFS root
|
|
|
|
* might be ok. It is worth a shot.
|
|
|
|
*/
|
|
|
|
error = kern_mkdir(td, "/dev", UIO_SYSSPACE, 0700);
|
|
|
|
if (error == EEXIST)
|
|
|
|
error = 0;
|
|
|
|
if (error == 0)
|
|
|
|
error = kernel_vmount(0, "fstype", "devfs",
|
|
|
|
"fspath", "/dev", NULL);
|
|
|
|
if (error != 0)
|
|
|
|
init_does_devfs = 1;
|
2001-09-15 11:15:22 +00:00
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* Need just enough stack to hold the faked-up "execve()" arguments.
|
|
|
|
*/
|
2002-09-21 22:07:17 +00:00
|
|
|
addr = p->p_sysent->sv_usrstack - PAGE_SIZE;
|
1999-04-20 21:15:13 +00:00
|
|
|
if (vm_map_find(&p->p_vmspace->vm_map, NULL, 0, &addr, PAGE_SIZE,
|
|
|
|
FALSE, VM_PROT_ALL, VM_PROT_ALL, 0) != 0)
|
1994-05-24 10:09:53 +00:00
|
|
|
panic("init: couldn't allocate argument space");
|
|
|
|
p->p_vmspace->vm_maxsaddr = (caddr_t)addr;
|
1994-05-25 09:21:21 +00:00
|
|
|
p->p_vmspace->vm_ssize = 1;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
1999-04-20 21:15:13 +00:00
|
|
|
if ((var = getenv("init_path")) != NULL) {
|
2002-10-17 20:03:38 +00:00
|
|
|
strlcpy(init_path, var, sizeof(init_path));
|
2002-04-17 13:06:36 +00:00
|
|
|
freeenv(var);
|
1999-04-20 21:15:13 +00:00
|
|
|
}
|
|
|
|
|
1999-05-05 12:20:23 +00:00
|
|
|
for (path = init_path; *path != '\0'; path = next) {
|
1999-05-11 10:08:10 +00:00
|
|
|
while (*path == ':')
|
1999-04-20 21:15:13 +00:00
|
|
|
path++;
|
1999-05-05 12:20:23 +00:00
|
|
|
if (*path == '\0')
|
1999-04-20 21:15:13 +00:00
|
|
|
break;
|
1999-05-11 10:08:10 +00:00
|
|
|
for (next = path; *next != '\0' && *next != ':'; next++)
|
1999-04-20 21:15:13 +00:00
|
|
|
/* nothing */ ;
|
|
|
|
if (bootverbose)
|
1999-04-24 18:50:48 +00:00
|
|
|
printf("start_init: trying %.*s\n", (int)(next - path),
|
|
|
|
path);
|
1999-04-20 21:15:13 +00:00
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* Move out the boot flag argument.
|
|
|
|
*/
|
|
|
|
options = 0;
|
2002-09-21 22:07:17 +00:00
|
|
|
ucp = (char *)p->p_sysent->sv_usrstack;
|
1994-05-24 10:09:53 +00:00
|
|
|
(void)subyte(--ucp, 0); /* trailing zero */
|
|
|
|
if (boothowto & RB_SINGLE) {
|
|
|
|
(void)subyte(--ucp, 's');
|
|
|
|
options = 1;
|
|
|
|
}
|
|
|
|
#ifdef notyet
|
|
|
|
if (boothowto & RB_FASTBOOT) {
|
|
|
|
(void)subyte(--ucp, 'f');
|
|
|
|
options = 1;
|
|
|
|
}
|
|
|
|
#endif
|
1995-04-10 07:44:31 +00:00
|
|
|
|
|
|
|
#ifdef BOOTCDROM
|
|
|
|
(void)subyte(--ucp, 'C');
|
|
|
|
options = 1;
|
2000-08-20 21:34:39 +00:00
|
|
|
#endif
|
2001-09-15 11:15:22 +00:00
|
|
|
if (init_does_devfs) {
|
2000-09-02 19:17:34 +00:00
|
|
|
(void)subyte(--ucp, 'd');
|
|
|
|
options = 1;
|
|
|
|
}
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
if (options == 0)
|
|
|
|
(void)subyte(--ucp, '-');
|
|
|
|
(void)subyte(--ucp, '-'); /* leading hyphen */
|
|
|
|
arg1 = ucp;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Move out the file name (also arg 0).
|
|
|
|
*/
|
1999-04-20 21:15:13 +00:00
|
|
|
(void)subyte(--ucp, 0);
|
|
|
|
for (s = next - 1; s >= path; s--)
|
|
|
|
(void)subyte(--ucp, *s);
|
1994-05-24 10:09:53 +00:00
|
|
|
arg0 = ucp;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Move out the arg pointers.
|
|
|
|
*/
|
1998-10-06 11:55:40 +00:00
|
|
|
uap = (char **)((intptr_t)ucp & ~(sizeof(intptr_t)-1));
|
1998-07-15 05:21:48 +00:00
|
|
|
(void)suword((caddr_t)--uap, (long)0); /* terminator */
|
|
|
|
(void)suword((caddr_t)--uap, (long)(intptr_t)arg1);
|
|
|
|
(void)suword((caddr_t)--uap, (long)(intptr_t)arg0);
|
1994-05-24 10:09:53 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Point at the arguments.
|
|
|
|
*/
|
|
|
|
args.fname = arg0;
|
1994-05-25 09:21:21 +00:00
|
|
|
args.argv = uap;
|
|
|
|
args.envv = NULL;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Now try to exec the program. If can't for any reason
|
|
|
|
* other than it doesn't exist, complain.
|
1995-08-28 09:19:25 +00:00
|
|
|
*
|
2000-08-11 09:05:12 +00:00
|
|
|
* Otherwise, return via fork_trampoline() all the way
|
1999-07-01 13:21:46 +00:00
|
|
|
* to user mode as init!
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
2001-09-12 08:38:13 +00:00
|
|
|
if ((error = execve(td, &args)) == 0) {
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_unlock(&Giant);
|
1994-05-24 10:09:53 +00:00
|
|
|
return;
|
2000-09-15 19:25:29 +00:00
|
|
|
}
|
1994-05-24 10:09:53 +00:00
|
|
|
if (error != ENOENT)
|
1999-04-24 18:50:48 +00:00
|
|
|
printf("exec %.*s: error %d\n", (int)(next - path),
|
|
|
|
path, error);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
1999-12-20 02:50:49 +00:00
|
|
|
printf("init: not found in path %s\n", init_path);
|
1994-05-24 10:09:53 +00:00
|
|
|
panic("no init");
|
|
|
|
}
|
2000-08-11 09:05:12 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Like kthread_create(), but runs in it's own address space.
|
|
|
|
* We do this early to reserve pid 1.
|
|
|
|
*
|
|
|
|
* Note special case - do not make it runnable yet. Other work
|
|
|
|
* in progress will change this more.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
create_init(const void *udata __unused)
|
|
|
|
{
|
2002-04-19 13:35:53 +00:00
|
|
|
struct ucred *newcred, *oldcred;
|
2000-08-11 09:05:12 +00:00
|
|
|
int error;
|
|
|
|
|
2002-10-02 07:44:29 +00:00
|
|
|
error = fork1(&thread0, RFFDG | RFPROC | RFSTOPPED, 0, &initproc);
|
2000-08-11 09:05:12 +00:00
|
|
|
if (error)
|
|
|
|
panic("cannot fork init: %d\n", error);
|
2002-04-19 13:35:53 +00:00
|
|
|
/* divorce init's credentials from the kernel's */
|
|
|
|
newcred = crget();
|
2001-01-24 10:40:56 +00:00
|
|
|
PROC_LOCK(initproc);
|
|
|
|
initproc->p_flag |= P_SYSTEM;
|
2002-04-19 13:35:53 +00:00
|
|
|
oldcred = initproc->p_ucred;
|
|
|
|
crcopy(newcred, oldcred);
|
2002-07-31 00:39:19 +00:00
|
|
|
#ifdef MAC
|
|
|
|
mac_create_proc1(newcred);
|
|
|
|
#endif
|
2002-04-19 13:35:53 +00:00
|
|
|
initproc->p_ucred = newcred;
|
2001-01-24 10:40:56 +00:00
|
|
|
PROC_UNLOCK(initproc);
|
2002-04-19 13:35:53 +00:00
|
|
|
crfree(oldcred);
|
2002-08-07 17:53:31 +00:00
|
|
|
cred_update_thread(FIRST_THREAD_IN_PROC(initproc));
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_lock_spin(&sched_lock);
|
2001-01-24 10:40:56 +00:00
|
|
|
initproc->p_sflag |= PS_INMEM;
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_unlock_spin(&sched_lock);
|
2002-02-07 20:58:47 +00:00
|
|
|
cpu_set_fork_handler(FIRST_THREAD_IN_PROC(initproc), start_init, NULL);
|
2000-08-11 09:05:12 +00:00
|
|
|
}
|
2001-01-24 10:40:56 +00:00
|
|
|
SYSINIT(init, SI_SUB_CREATE_INIT, SI_ORDER_FIRST, create_init, NULL)
|
2000-08-11 09:05:12 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Make it runnable now.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
kick_init(const void *udata __unused)
|
|
|
|
{
|
2002-02-07 20:58:47 +00:00
|
|
|
struct thread *td;
|
2001-01-24 10:40:56 +00:00
|
|
|
|
2002-02-07 20:58:47 +00:00
|
|
|
td = FIRST_THREAD_IN_PROC(initproc);
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_lock_spin(&sched_lock);
|
2002-09-11 08:13:56 +00:00
|
|
|
TD_SET_CAN_RUN(td);
|
Part 1 of KSE-III
The ability to schedule multiple threads per process
(one one cpu) by making ALL system calls optionally asynchronous.
to come: ia64 and power-pc patches, patches for gdb, test program (in tools)
Reviewed by: Almost everyone who counts
(at various times, peter, jhb, matt, alfred, mini, bernd,
and a cast of thousands)
NOTE: this is still Beta code, and contains lots of debugging stuff.
expect slight instability in signals..
2002-06-29 17:26:22 +00:00
|
|
|
setrunqueue(td); /* XXXKSE */
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_unlock_spin(&sched_lock);
|
2000-08-11 09:05:12 +00:00
|
|
|
}
|
2001-01-24 10:40:56 +00:00
|
|
|
SYSINIT(kickinit, SI_SUB_KTHREAD_INIT, SI_ORDER_FIRST, kick_init, NULL)
|