1998-06-10 19:59:41 +00:00
|
|
|
/*-
|
|
|
|
* Copyright (c) 1982, 1986 The Regents of the University of California.
|
|
|
|
* Copyright (c) 1989, 1990 William Jolitz
|
|
|
|
* Copyright (c) 1994 John Dyson
|
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* This code is derived from software contributed to Berkeley by
|
|
|
|
* the Systems Programming Group of the University of Utah Computer
|
|
|
|
* Science Department, and William Jolitz.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
* 3. All advertising materials mentioning features or use of this software
|
|
|
|
* must display the following acknowledgement:
|
|
|
|
* This product includes software developed by the University of
|
|
|
|
* California, Berkeley and its contributors.
|
|
|
|
* 4. Neither the name of the University nor the names of its contributors
|
|
|
|
* may be used to endorse or promote products derived from this software
|
|
|
|
* without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
|
|
|
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
|
|
|
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
|
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
|
|
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
|
|
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
|
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
|
|
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
|
|
|
* SUCH DAMAGE.
|
|
|
|
*
|
|
|
|
* from: @(#)vm_machdep.c 7.3 (Berkeley) 5/13/91
|
|
|
|
* Utah $Hdr: vm_machdep.c 1.16.1.1 89/06/23$
|
1999-08-28 01:08:13 +00:00
|
|
|
* $FreeBSD$
|
1998-06-10 19:59:41 +00:00
|
|
|
*/
|
2005-01-07 02:29:27 +00:00
|
|
|
/*-
|
1998-06-10 19:59:41 +00:00
|
|
|
* Copyright (c) 1994, 1995, 1996 Carnegie-Mellon University.
|
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* Author: Chris G. Demetriou
|
2004-03-02 06:13:09 +00:00
|
|
|
*
|
1998-06-10 19:59:41 +00:00
|
|
|
* Permission to use, copy, modify and distribute this software and
|
|
|
|
* its documentation is hereby granted, provided that both the copyright
|
|
|
|
* notice and this permission notice appear in all copies of the
|
|
|
|
* software, derivative works or modified versions, and any portions
|
|
|
|
* thereof, and that both notices appear in supporting documentation.
|
2004-03-02 06:13:09 +00:00
|
|
|
*
|
|
|
|
* CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
|
|
|
|
* CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
|
1998-06-10 19:59:41 +00:00
|
|
|
* FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
|
2004-03-02 06:13:09 +00:00
|
|
|
*
|
1998-06-10 19:59:41 +00:00
|
|
|
* Carnegie Mellon requests users of this software to return to
|
|
|
|
*
|
|
|
|
* Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
|
|
|
|
* School of Computer Science
|
|
|
|
* Carnegie Mellon University
|
|
|
|
* Pittsburgh PA 15213-3890
|
|
|
|
*
|
|
|
|
* any improvements or extensions that they make and grant Carnegie the
|
|
|
|
* rights to redistribute these changes.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <sys/param.h>
|
|
|
|
#include <sys/systm.h>
|
|
|
|
#include <sys/proc.h>
|
|
|
|
#include <sys/malloc.h>
|
2000-05-05 09:59:14 +00:00
|
|
|
#include <sys/bio.h>
|
1998-06-10 19:59:41 +00:00
|
|
|
#include <sys/buf.h>
|
2001-10-19 22:45:46 +00:00
|
|
|
#include <sys/ktr.h>
|
|
|
|
#include <sys/lock.h>
|
2000-10-20 07:58:15 +00:00
|
|
|
#include <sys/mutex.h>
|
1998-06-10 19:59:41 +00:00
|
|
|
#include <sys/vnode.h>
|
|
|
|
#include <sys/vmmeter.h>
|
|
|
|
#include <sys/kernel.h>
|
2003-08-29 20:04:10 +00:00
|
|
|
#include <sys/mbuf.h>
|
2003-11-16 06:11:26 +00:00
|
|
|
#include <sys/sf_buf.h>
|
1998-06-10 19:59:41 +00:00
|
|
|
#include <sys/sysctl.h>
|
1999-12-06 18:12:29 +00:00
|
|
|
#include <sys/unistd.h>
|
1998-06-10 19:59:41 +00:00
|
|
|
|
|
|
|
#include <machine/cpu.h>
|
1998-12-04 10:52:48 +00:00
|
|
|
#include <machine/fpu.h>
|
2002-05-28 12:24:29 +00:00
|
|
|
#include <machine/frame.h>
|
1998-06-10 19:59:41 +00:00
|
|
|
#include <machine/md_var.h>
|
2004-11-27 06:51:39 +00:00
|
|
|
#include <machine/pcb.h>
|
2001-06-16 07:14:07 +00:00
|
|
|
|
|
|
|
#include <dev/ofw/openfirm.h>
|
1998-06-10 19:59:41 +00:00
|
|
|
|
|
|
|
#include <vm/vm.h>
|
|
|
|
#include <vm/vm_param.h>
|
|
|
|
#include <vm/vm_kern.h>
|
|
|
|
#include <vm/vm_page.h>
|
|
|
|
#include <vm/vm_map.h>
|
|
|
|
#include <vm/vm_extern.h>
|
|
|
|
|
2009-04-04 00:22:44 +00:00
|
|
|
/*
|
|
|
|
* On systems without a direct mapped region (e.g. PPC64),
|
|
|
|
* we use the same code as the Book E implementation. Since
|
|
|
|
* we need to have runtime detection of this, define some machinery
|
|
|
|
* for sf_bufs in this case, and ignore it on systems with direct maps.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef NSFBUFS
|
|
|
|
#define NSFBUFS (512 + maxusers * 16)
|
|
|
|
#endif
|
|
|
|
|
|
|
|
static void sf_buf_init(void *arg);
|
|
|
|
SYSINIT(sock_sf, SI_SUB_MBUF, SI_ORDER_ANY, sf_buf_init, NULL);
|
|
|
|
|
|
|
|
LIST_HEAD(sf_head, sf_buf);
|
|
|
|
|
|
|
|
/* A hash table of active sendfile(2) buffers */
|
|
|
|
static struct sf_head *sf_buf_active;
|
|
|
|
static u_long sf_buf_hashmask;
|
|
|
|
|
|
|
|
#define SF_BUF_HASH(m) (((m) - vm_page_array) & sf_buf_hashmask)
|
|
|
|
|
|
|
|
static TAILQ_HEAD(, sf_buf) sf_buf_freelist;
|
|
|
|
static u_int sf_buf_alloc_want;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* A lock used to synchronize access to the hash table and free list
|
|
|
|
*/
|
|
|
|
static struct mtx sf_buf_lock;
|
|
|
|
|
|
|
|
|
1998-06-10 19:59:41 +00:00
|
|
|
/*
|
|
|
|
* Finish a fork operation, with process p2 nearly set up.
|
|
|
|
* Copy and update the pcb, set up the stack so that the child
|
|
|
|
* ready to run and return to user mode.
|
|
|
|
*/
|
|
|
|
void
|
2002-02-07 20:58:47 +00:00
|
|
|
cpu_fork(struct thread *td1, struct proc *p2, struct thread *td2, int flags)
|
1998-06-10 19:59:41 +00:00
|
|
|
{
|
2001-10-15 12:24:43 +00:00
|
|
|
struct proc *p1;
|
|
|
|
struct trapframe *tf;
|
|
|
|
struct callframe *cf;
|
2002-02-14 01:39:11 +00:00
|
|
|
struct pcb *pcb;
|
2001-10-15 12:24:43 +00:00
|
|
|
|
2002-02-28 03:24:07 +00:00
|
|
|
KASSERT(td1 == curthread || td1 == &thread0,
|
2002-02-14 01:39:11 +00:00
|
|
|
("cpu_fork: p1 not curproc and not proc0"));
|
2001-10-15 12:24:43 +00:00
|
|
|
CTR3(KTR_PROC, "cpu_fork: called td1=%08x p2=%08x flags=%x", (u_int)td1, (u_int)p2, flags);
|
|
|
|
|
|
|
|
if ((flags & RFPROC) == 0)
|
|
|
|
return;
|
|
|
|
|
|
|
|
p1 = td1->td_proc;
|
|
|
|
|
2004-03-29 19:38:05 +00:00
|
|
|
pcb = (struct pcb *)((td2->td_kstack +
|
|
|
|
td2->td_kstack_pages * PAGE_SIZE - sizeof(struct pcb)) & ~0x2fU);
|
2002-02-14 01:39:11 +00:00
|
|
|
td2->td_pcb = pcb;
|
2001-10-15 12:24:43 +00:00
|
|
|
|
2002-02-14 01:39:11 +00:00
|
|
|
/* Copy the pcb */
|
|
|
|
bcopy(td1->td_pcb, pcb, sizeof(struct pcb));
|
2001-10-15 12:24:43 +00:00
|
|
|
|
|
|
|
/*
|
2002-02-14 01:39:11 +00:00
|
|
|
* Create a fresh stack for the new process.
|
2001-10-15 12:24:43 +00:00
|
|
|
* Copy the trap frame for the return to user mode as if from a
|
|
|
|
* syscall. This copies most of the user mode register values.
|
|
|
|
*/
|
2002-02-14 01:39:11 +00:00
|
|
|
tf = (struct trapframe *)pcb - 1;
|
|
|
|
bcopy(td1->td_frame, tf, sizeof(*tf));
|
2001-10-15 12:24:43 +00:00
|
|
|
|
2002-05-28 12:24:29 +00:00
|
|
|
/* Set up trap frame. */
|
|
|
|
tf->fixreg[FIRSTARG] = 0;
|
|
|
|
tf->fixreg[FIRSTARG + 1] = 0;
|
|
|
|
tf->cr &= ~0x10000000;
|
2002-02-14 01:39:11 +00:00
|
|
|
|
|
|
|
td2->td_frame = tf;
|
2001-10-15 12:24:43 +00:00
|
|
|
|
2002-02-14 01:39:11 +00:00
|
|
|
cf = (struct callframe *)tf - 1;
|
2004-07-22 01:28:51 +00:00
|
|
|
memset(cf, 0, sizeof(struct callframe));
|
2002-02-28 03:24:07 +00:00
|
|
|
cf->cf_func = (register_t)fork_return;
|
|
|
|
cf->cf_arg0 = (register_t)td2;
|
|
|
|
cf->cf_arg1 = (register_t)tf;
|
2001-10-15 12:24:43 +00:00
|
|
|
|
2002-02-28 03:24:07 +00:00
|
|
|
pcb->pcb_sp = (register_t)cf;
|
|
|
|
pcb->pcb_lr = (register_t)fork_trampoline;
|
2008-03-02 17:05:57 +00:00
|
|
|
pcb->pcb_cpu.aim.usr = kernel_pmap->pm_sr[USER_SR];
|
2002-02-14 01:39:11 +00:00
|
|
|
|
- Change comments and asserts to reflect the removal of the global
scheduler lock.
Tested by: kris, current@
Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc.
Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
2007-06-04 23:57:32 +00:00
|
|
|
/* Setup to release spin count in fork_exit(). */
|
Divorce critical sections from spinlocks. Critical sections as denoted by
critical_enter() and critical_exit() are now solely a mechanism for
deferring kernel preemptions. They no longer have any affect on
interrupts. This means that standalone critical sections are now very
cheap as they are simply unlocked integer increments and decrements for the
common case.
Spin mutexes now use a separate KPI implemented in MD code: spinlock_enter()
and spinlock_exit(). This KPI is responsible for providing whatever MD
guarantees are needed to ensure that a thread holding a spin lock won't
be preempted by any other code that will try to lock the same lock. For
now all archs continue to block interrupts in a "spinlock section" as they
did formerly in all critical sections. Note that I've also taken this
opportunity to push a few things into MD code rather than MI. For example,
critical_fork_exit() no longer exists. Instead, MD code ensures that new
threads have the correct state when they are created. Also, we no longer
try to fixup the idlethreads for APs in MI code. Instead, each arch sets
the initial curthread and adjusts the state of the idle thread it borrows
in order to perform the initial context switch.
This change is largely a big NOP, but the cleaner separation it provides
will allow for more efficient alternative locking schemes in other parts
of the kernel (bare critical sections rather than per-CPU spin mutexes
for per-CPU data for example).
Reviewed by: grehan, cognet, arch@, others
Tested on: i386, alpha, sparc64, powerpc, arm, possibly more
2005-04-04 21:53:56 +00:00
|
|
|
td2->td_md.md_spinlock_count = 1;
|
|
|
|
td2->td_md.md_saved_msr = PSL_KERNSET;
|
|
|
|
|
2002-02-14 01:39:11 +00:00
|
|
|
/*
|
|
|
|
* Now cpu_switch() can schedule the new process.
|
|
|
|
*/
|
1998-06-10 19:59:41 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Intercept the return address from a freshly forked process that has NOT
|
|
|
|
* been scheduled yet.
|
|
|
|
*
|
|
|
|
* This is needed to make kernel threads stay in kernel mode.
|
|
|
|
*/
|
|
|
|
void
|
2001-09-20 00:47:17 +00:00
|
|
|
cpu_set_fork_handler(td, func, arg)
|
|
|
|
struct thread *td;
|
2002-03-20 23:17:50 +00:00
|
|
|
void (*func)(void *);
|
1999-07-01 13:21:46 +00:00
|
|
|
void *arg;
|
1998-06-10 19:59:41 +00:00
|
|
|
{
|
2001-10-15 12:24:43 +00:00
|
|
|
struct callframe *cf;
|
|
|
|
|
2008-08-30 18:48:17 +00:00
|
|
|
CTR4(KTR_PROC, "%s called with td=%08x func=%08x arg=%08x",
|
|
|
|
__func__, (u_int)td, (u_int)func, (u_int)arg);
|
2001-10-15 12:24:43 +00:00
|
|
|
|
2002-02-28 03:24:07 +00:00
|
|
|
cf = (struct callframe *)td->td_pcb->pcb_sp;
|
2001-10-15 12:24:43 +00:00
|
|
|
|
2002-02-28 03:24:07 +00:00
|
|
|
cf->cf_func = (register_t)func;
|
|
|
|
cf->cf_arg0 = (register_t)arg;
|
1998-06-10 19:59:41 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2001-09-20 00:47:17 +00:00
|
|
|
cpu_exit(td)
|
2002-06-24 15:48:02 +00:00
|
|
|
register struct thread *td;
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
1998-06-10 19:59:41 +00:00
|
|
|
/*
|
2000-09-07 01:33:02 +00:00
|
|
|
* Reset back to firmware.
|
1998-06-10 19:59:41 +00:00
|
|
|
*/
|
|
|
|
void
|
|
|
|
cpu_reset()
|
|
|
|
{
|
2006-12-28 23:56:50 +00:00
|
|
|
OF_reboot();
|
1998-06-10 19:59:41 +00:00
|
|
|
}
|
|
|
|
|
2003-08-29 20:04:10 +00:00
|
|
|
/*
|
2009-04-04 00:22:44 +00:00
|
|
|
* Allocate a pool of sf_bufs (sendfile(2) or "super-fast" if you prefer. :-))
|
2003-08-29 20:04:10 +00:00
|
|
|
*/
|
2009-04-04 00:22:44 +00:00
|
|
|
static void
|
|
|
|
sf_buf_init(void *arg)
|
2003-08-29 20:04:10 +00:00
|
|
|
{
|
2009-04-04 00:22:44 +00:00
|
|
|
struct sf_buf *sf_bufs;
|
|
|
|
vm_offset_t sf_base;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
/* Don't bother on systems with a direct map */
|
2003-08-29 20:04:10 +00:00
|
|
|
|
2009-04-04 00:22:44 +00:00
|
|
|
if (hw_direct_map)
|
|
|
|
return;
|
|
|
|
|
|
|
|
nsfbufs = NSFBUFS;
|
|
|
|
TUNABLE_INT_FETCH("kern.ipc.nsfbufs", &nsfbufs);
|
|
|
|
|
|
|
|
sf_buf_active = hashinit(nsfbufs, M_TEMP, &sf_buf_hashmask);
|
|
|
|
TAILQ_INIT(&sf_buf_freelist);
|
|
|
|
sf_base = kmem_alloc_nofault(kernel_map, nsfbufs * PAGE_SIZE);
|
|
|
|
sf_bufs = malloc(nsfbufs * sizeof(struct sf_buf), M_TEMP, M_NOWAIT | M_ZERO);
|
|
|
|
|
|
|
|
for (i = 0; i < nsfbufs; i++) {
|
|
|
|
sf_bufs[i].kva = sf_base + i * PAGE_SIZE;
|
|
|
|
TAILQ_INSERT_TAIL(&sf_buf_freelist, &sf_bufs[i], free_entry);
|
|
|
|
}
|
|
|
|
sf_buf_alloc_want = 0;
|
|
|
|
mtx_init(&sf_buf_lock, "sf_buf", NULL, MTX_DEF);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Get an sf_buf from the freelist. Will block if none are available.
|
|
|
|
*/
|
|
|
|
struct sf_buf *
|
|
|
|
sf_buf_alloc(struct vm_page *m, int flags)
|
|
|
|
{
|
|
|
|
struct sf_head *hash_list;
|
|
|
|
struct sf_buf *sf;
|
|
|
|
int error;
|
|
|
|
|
|
|
|
if (hw_direct_map) {
|
|
|
|
/* Shortcut the direct mapped case */
|
|
|
|
|
|
|
|
return ((struct sf_buf *)m);
|
|
|
|
}
|
|
|
|
|
|
|
|
hash_list = &sf_buf_active[SF_BUF_HASH(m)];
|
|
|
|
mtx_lock(&sf_buf_lock);
|
|
|
|
LIST_FOREACH(sf, hash_list, list_entry) {
|
|
|
|
if (sf->m == m) {
|
|
|
|
sf->ref_count++;
|
|
|
|
if (sf->ref_count == 1) {
|
|
|
|
TAILQ_REMOVE(&sf_buf_freelist, sf, free_entry);
|
|
|
|
nsfbufsused++;
|
|
|
|
nsfbufspeak = imax(nsfbufspeak, nsfbufsused);
|
|
|
|
}
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
while ((sf = TAILQ_FIRST(&sf_buf_freelist)) == NULL) {
|
|
|
|
if (flags & SFB_NOWAIT)
|
|
|
|
goto done;
|
|
|
|
|
|
|
|
sf_buf_alloc_want++;
|
|
|
|
mbstat.sf_allocwait++;
|
|
|
|
error = msleep(&sf_buf_freelist, &sf_buf_lock,
|
|
|
|
(flags & SFB_CATCH) ? PCATCH | PVM : PVM, "sfbufa", 0);
|
|
|
|
sf_buf_alloc_want--;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If we got a signal, don't risk going back to sleep.
|
|
|
|
*/
|
|
|
|
if (error)
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
|
|
|
TAILQ_REMOVE(&sf_buf_freelist, sf, free_entry);
|
|
|
|
if (sf->m != NULL)
|
|
|
|
LIST_REMOVE(sf, list_entry);
|
|
|
|
|
|
|
|
LIST_INSERT_HEAD(hash_list, sf, list_entry);
|
|
|
|
sf->ref_count = 1;
|
|
|
|
sf->m = m;
|
|
|
|
nsfbufsused++;
|
|
|
|
nsfbufspeak = imax(nsfbufspeak, nsfbufsused);
|
|
|
|
pmap_qenter(sf->kva, &sf->m, 1);
|
|
|
|
done:
|
|
|
|
mtx_unlock(&sf_buf_lock);
|
|
|
|
return (sf);
|
2003-08-29 20:04:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2009-04-04 00:22:44 +00:00
|
|
|
* Detatch mapped page and release resources back to the system.
|
|
|
|
*
|
|
|
|
* Remove a reference from the given sf_buf, adding it to the free
|
|
|
|
* list when its reference count reaches zero. A freed sf_buf still,
|
|
|
|
* however, retains its virtual-to-physical mapping until it is
|
|
|
|
* recycled or reactivated by sf_buf_alloc(9).
|
2003-08-29 20:04:10 +00:00
|
|
|
*/
|
|
|
|
void
|
2004-03-16 19:04:28 +00:00
|
|
|
sf_buf_free(struct sf_buf *sf)
|
2003-08-29 20:04:10 +00:00
|
|
|
{
|
2009-04-04 00:22:44 +00:00
|
|
|
if (hw_direct_map)
|
|
|
|
return;
|
|
|
|
|
|
|
|
mtx_lock(&sf_buf_lock);
|
|
|
|
sf->ref_count--;
|
|
|
|
if (sf->ref_count == 0) {
|
|
|
|
TAILQ_INSERT_TAIL(&sf_buf_freelist, sf, free_entry);
|
|
|
|
nsfbufsused--;
|
|
|
|
|
|
|
|
if (sf_buf_alloc_want > 0)
|
|
|
|
wakeup_one(&sf_buf_freelist);
|
|
|
|
}
|
|
|
|
mtx_unlock(&sf_buf_lock);
|
2003-08-29 20:04:10 +00:00
|
|
|
}
|
|
|
|
|
1998-06-10 19:59:41 +00:00
|
|
|
/*
|
|
|
|
* Software interrupt handler for queued VM system processing.
|
2004-03-02 06:13:09 +00:00
|
|
|
*/
|
|
|
|
void
|
|
|
|
swi_vm(void *dummy)
|
|
|
|
{
|
2001-06-16 07:14:07 +00:00
|
|
|
#if 0 /* XXX: Don't have busdma stuff yet */
|
1998-06-10 19:59:41 +00:00
|
|
|
if (busdma_swi_pending != 0)
|
|
|
|
busdma_swi();
|
2001-06-16 07:14:07 +00:00
|
|
|
#endif
|
1998-06-10 19:59:41 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Tell whether this address is in some physical memory region.
|
|
|
|
* Currently used by the kernel coredump code in order to avoid
|
|
|
|
* dumping the ``ISA memory hole'' which could cause indefinite hangs,
|
|
|
|
* or other unpredictable behaviour.
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
|
|
int
|
|
|
|
is_physical_memory(addr)
|
|
|
|
vm_offset_t addr;
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* stuff other tests for known memory-mapped devices (PCI?)
|
|
|
|
* here
|
|
|
|
*/
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
2002-07-09 12:57:23 +00:00
|
|
|
|
|
|
|
/*
|
2008-03-12 10:12:01 +00:00
|
|
|
* Threading functions
|
2002-07-09 12:57:23 +00:00
|
|
|
*/
|
|
|
|
void
|
2004-03-02 06:13:09 +00:00
|
|
|
cpu_thread_exit(struct thread *td)
|
2002-07-09 12:57:23 +00:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2002-11-22 23:57:02 +00:00
|
|
|
void
|
2004-03-02 06:13:09 +00:00
|
|
|
cpu_thread_clean(struct thread *td)
|
2002-11-22 23:57:02 +00:00
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2002-07-09 12:57:23 +00:00
|
|
|
void
|
2007-11-14 20:21:54 +00:00
|
|
|
cpu_thread_alloc(struct thread *td)
|
2002-07-09 12:57:23 +00:00
|
|
|
{
|
2004-03-02 06:13:09 +00:00
|
|
|
struct pcb *pcb;
|
|
|
|
|
2004-03-30 13:57:34 +00:00
|
|
|
pcb = (struct pcb *)((td->td_kstack + td->td_kstack_pages * PAGE_SIZE -
|
2004-03-02 06:13:09 +00:00
|
|
|
sizeof(struct pcb)) & ~0x2fU);
|
|
|
|
td->td_pcb = pcb;
|
|
|
|
td->td_frame = (struct trapframe *)pcb - 1;
|
2003-08-16 23:15:15 +00:00
|
|
|
}
|
2002-07-09 12:57:23 +00:00
|
|
|
|
2007-11-14 20:21:54 +00:00
|
|
|
void
|
|
|
|
cpu_thread_free(struct thread *td)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2003-08-16 23:15:15 +00:00
|
|
|
void
|
|
|
|
cpu_thread_swapin(struct thread *td)
|
|
|
|
{
|
2002-07-09 12:57:23 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2003-08-16 23:15:15 +00:00
|
|
|
cpu_thread_swapout(struct thread *td)
|
2002-07-09 12:57:23 +00:00
|
|
|
{
|
2003-08-16 23:15:15 +00:00
|
|
|
}
|
2002-07-09 12:57:23 +00:00
|
|
|
|
2003-08-16 23:15:15 +00:00
|
|
|
void
|
|
|
|
cpu_set_upcall(struct thread *td, struct thread *td0)
|
|
|
|
{
|
2004-03-02 06:13:09 +00:00
|
|
|
struct pcb *pcb2;
|
|
|
|
struct trapframe *tf;
|
|
|
|
struct callframe *cf;
|
|
|
|
|
|
|
|
pcb2 = td->td_pcb;
|
|
|
|
|
|
|
|
/* Copy the upcall pcb */
|
|
|
|
bcopy(td0->td_pcb, pcb2, sizeof(*pcb2));
|
|
|
|
|
|
|
|
/* Create a stack for the new thread */
|
|
|
|
tf = td->td_frame;
|
|
|
|
bcopy(td0->td_frame, tf, sizeof(struct trapframe));
|
|
|
|
tf->fixreg[FIRSTARG] = 0;
|
|
|
|
tf->fixreg[FIRSTARG + 1] = 0;
|
|
|
|
tf->cr &= ~0x10000000;
|
|
|
|
|
|
|
|
/* Set registers for trampoline to user mode. */
|
|
|
|
cf = (struct callframe *)tf - 1;
|
2004-07-22 01:28:51 +00:00
|
|
|
memset(cf, 0, sizeof(struct callframe));
|
2004-03-02 06:13:09 +00:00
|
|
|
cf->cf_func = (register_t)fork_return;
|
|
|
|
cf->cf_arg0 = (register_t)td;
|
|
|
|
cf->cf_arg1 = (register_t)tf;
|
|
|
|
|
|
|
|
pcb2->pcb_sp = (register_t)cf;
|
|
|
|
pcb2->pcb_lr = (register_t)fork_trampoline;
|
2008-03-02 17:05:57 +00:00
|
|
|
pcb2->pcb_cpu.aim.usr = kernel_pmap->pm_sr[USER_SR];
|
Divorce critical sections from spinlocks. Critical sections as denoted by
critical_enter() and critical_exit() are now solely a mechanism for
deferring kernel preemptions. They no longer have any affect on
interrupts. This means that standalone critical sections are now very
cheap as they are simply unlocked integer increments and decrements for the
common case.
Spin mutexes now use a separate KPI implemented in MD code: spinlock_enter()
and spinlock_exit(). This KPI is responsible for providing whatever MD
guarantees are needed to ensure that a thread holding a spin lock won't
be preempted by any other code that will try to lock the same lock. For
now all archs continue to block interrupts in a "spinlock section" as they
did formerly in all critical sections. Note that I've also taken this
opportunity to push a few things into MD code rather than MI. For example,
critical_fork_exit() no longer exists. Instead, MD code ensures that new
threads have the correct state when they are created. Also, we no longer
try to fixup the idlethreads for APs in MI code. Instead, each arch sets
the initial curthread and adjusts the state of the idle thread it borrows
in order to perform the initial context switch.
This change is largely a big NOP, but the cleaner separation it provides
will allow for more efficient alternative locking schemes in other parts
of the kernel (bare critical sections rather than per-CPU spin mutexes
for per-CPU data for example).
Reviewed by: grehan, cognet, arch@, others
Tested on: i386, alpha, sparc64, powerpc, arm, possibly more
2005-04-04 21:53:56 +00:00
|
|
|
|
- Change comments and asserts to reflect the removal of the global
scheduler lock.
Tested by: kris, current@
Tested on: i386, amd64, ULE, 4BSD, libthr, libkse, PREEMPTION, etc.
Discussed with: kris, attilio, kmacy, jhb, julian, bde (small parts each)
2007-06-04 23:57:32 +00:00
|
|
|
/* Setup to release spin count in fork_exit(). */
|
Divorce critical sections from spinlocks. Critical sections as denoted by
critical_enter() and critical_exit() are now solely a mechanism for
deferring kernel preemptions. They no longer have any affect on
interrupts. This means that standalone critical sections are now very
cheap as they are simply unlocked integer increments and decrements for the
common case.
Spin mutexes now use a separate KPI implemented in MD code: spinlock_enter()
and spinlock_exit(). This KPI is responsible for providing whatever MD
guarantees are needed to ensure that a thread holding a spin lock won't
be preempted by any other code that will try to lock the same lock. For
now all archs continue to block interrupts in a "spinlock section" as they
did formerly in all critical sections. Note that I've also taken this
opportunity to push a few things into MD code rather than MI. For example,
critical_fork_exit() no longer exists. Instead, MD code ensures that new
threads have the correct state when they are created. Also, we no longer
try to fixup the idlethreads for APs in MI code. Instead, each arch sets
the initial curthread and adjusts the state of the idle thread it borrows
in order to perform the initial context switch.
This change is largely a big NOP, but the cleaner separation it provides
will allow for more efficient alternative locking schemes in other parts
of the kernel (bare critical sections rather than per-CPU spin mutexes
for per-CPU data for example).
Reviewed by: grehan, cognet, arch@, others
Tested on: i386, alpha, sparc64, powerpc, arm, possibly more
2005-04-04 21:53:56 +00:00
|
|
|
td->td_md.md_spinlock_count = 1;
|
|
|
|
td->td_md.md_saved_msr = PSL_KERNSET;
|
2002-07-09 12:57:23 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2005-04-23 02:32:32 +00:00
|
|
|
cpu_set_upcall_kse(struct thread *td, void (*entry)(void *), void *arg,
|
|
|
|
stack_t *stack)
|
2002-07-09 12:57:23 +00:00
|
|
|
{
|
2008-08-30 18:48:17 +00:00
|
|
|
struct trapframe *tf;
|
|
|
|
uint32_t sp;
|
2004-03-02 06:13:09 +00:00
|
|
|
|
|
|
|
tf = td->td_frame;
|
|
|
|
/* align stack and alloc space for frame ptr and saved LR */
|
2008-08-30 18:48:17 +00:00
|
|
|
sp = ((uint32_t)stack->ss_sp + stack->ss_size - sizeof(uint64_t)) &
|
|
|
|
~0x1f;
|
2004-03-02 06:13:09 +00:00
|
|
|
bzero(tf, sizeof(struct trapframe));
|
|
|
|
|
|
|
|
tf->fixreg[1] = (register_t)sp;
|
2008-08-30 18:48:17 +00:00
|
|
|
tf->fixreg[3] = (register_t)arg;
|
|
|
|
tf->srr0 = (register_t)entry;
|
|
|
|
tf->srr1 = PSL_MBO | PSL_USERSET | PSL_FE_DFLT;
|
|
|
|
td->td_pcb->pcb_flags = 0;
|
2004-03-02 06:13:09 +00:00
|
|
|
|
2008-08-30 18:48:17 +00:00
|
|
|
td->td_retval[0] = (register_t)entry;
|
|
|
|
td->td_retval[1] = 0;
|
2002-07-09 12:57:23 +00:00
|
|
|
}
|
2005-04-23 02:32:32 +00:00
|
|
|
|
2005-07-10 23:31:11 +00:00
|
|
|
int
|
2005-04-23 02:32:32 +00:00
|
|
|
cpu_set_user_tls(struct thread *td, void *tls_base)
|
|
|
|
{
|
|
|
|
|
2006-09-01 06:05:40 +00:00
|
|
|
td->td_frame->fixreg[2] = (register_t)tls_base + 0x7008;
|
2005-07-10 23:31:11 +00:00
|
|
|
return (0);
|
2005-04-23 02:32:32 +00:00
|
|
|
}
|