Add support for starting secondary cpus in kernel, as opposed to relying

on the loader to do it.  Improve smp startup code to be less racy and to
defer certain things until the right time.  This almost boots single user
on my dual ultra 60, it is still very fragile:

SMP: AP CPU #1 Launched!
Enter full pathname of shell or RETURN for /bin/sh:
# ls
Debugger("trapsig")
Stopped at      Debugger+0x1c:  ta              %xcc, 1
db> heh
No such command
db>
This commit is contained in:
jake 2002-03-04 07:12:36 +00:00
parent c87ee2427d
commit 4adfe1f199
9 changed files with 357 additions and 203 deletions

View File

@ -38,12 +38,16 @@
#define ALT_STACK_SIZE 128
struct vmspace;
/*
* Inside the kernel, the globally reserved register g7 is used to
* point at the globaldata structure.
*/
#define PCPU_MD_FIELDS \
struct intr_queue pc_iq; /* interrupt queue */ \
struct vmspace *pc_vmspace; \
vm_offset_t pc_addr; \
u_int pc_mid; \
u_int pc_tlb_ctx; \
u_int pc_tlb_ctx_max; \

View File

@ -29,17 +29,14 @@
#ifndef _MACHINE_SMP_H_
#define _MACHINE_SMP_H_
#define CPU_INITING 1
#define CPU_INITED 2
#define CPU_REJECT 3
#define CPU_STARTING 4
#define CPU_STARTED 5
#define CPU_BOOTSTRAPING 6
#define CPU_BOOTSTRAPPED 7
#define CPU_CLKSYNC 1
#define CPU_INIT 2
#define CPU_BOOTSTRAP 3
#ifndef LOCORE
#include <machine/intr_machdep.h>
#include <machine/tte.h>
#define IDR_BUSY (1<<0)
#define IDR_NACK (1<<1)
@ -53,8 +50,9 @@
struct cpu_start_args {
u_int csa_mid;
u_int csa_state;
u_long csa_data;
vm_offset_t csa_va;
u_long csa_tick;
u_long csa_ver;
struct tte csa_ttes[PCPU_PAGES];
};
struct ipi_level_args {
@ -82,11 +80,21 @@ void ipi_selected(u_int cpus, u_int ipi);
void ipi_all(u_int ipi);
void ipi_all_but_self(u_int ipi);
vm_offset_t mp_tramp_alloc(void);
extern struct ipi_level_args ipi_level_args;
extern struct ipi_tlb_args ipi_tlb_args;
extern int mp_ncpus;
extern vm_offset_t mp_tramp;
extern char *mp_tramp_code;
extern u_long mp_tramp_code_len;
extern u_long mp_tramp_tlb_slots;
extern u_long mp_tramp_func;
extern void mp_startup(void);
extern char tl_ipi_level[];
extern char tl_ipi_test[];
extern char tl_ipi_tlb_context_demap[];
@ -152,7 +160,7 @@ ipi_wait(void *cookie)
if ((count = cookie) != NULL) {
atomic_subtract_int(count, 1);
while (*count != 0)
membar(LoadStore);
;
}
}

View File

@ -33,8 +33,18 @@
#define UPA_CR_MID_SHIFT (17)
#define UPA_CR_MID_SIZE (5)
#define UPA_CR_MID_MASK (((1 << UPA_CR_MID_SIZE) - 1) << UPA_CR_MID_SHIFT)
#define UPA_CR_MID_MASK \
(((1 << UPA_CR_MID_SIZE) - 1) << UPA_CR_MID_SHIFT)
#define UPA_CR_GET_MID(cr) ((cr & UPA_CR_MID_MASK) >> UPA_CR_MID_SHIFT)
#ifdef LOCORE
#define UPA_GET_MID(r1) \
ldxa [%g0] ASI_UPA_CONFIG_REG, r1 ; \
srlx r1, UPA_CR_MID_SHIFT, r1 ; \
and r1, (1 << UPA_CR_MID_SIZE) - 1, r1
#endif
#endif /* _MACHINE_UPA_H_ */

View File

@ -65,6 +65,7 @@
#include <machine/tlb.h>
#include <machine/tsb.h>
#include <machine/tstate.h>
#include <machine/upa.h>
#include <machine/utrap.h>
ASSYM(KERNBASE, KERNBASE);
@ -106,13 +107,14 @@ ASSYM(PTR_SHIFT, PTR_SHIFT);
ASSYM(PAGE_SHIFT, PAGE_SHIFT);
ASSYM(PAGE_MASK, PAGE_MASK);
ASSYM(CPU_INITED, CPU_INITED);
ASSYM(CPU_STARTED, CPU_STARTED);
ASSYM(CPU_CLKSYNC, CPU_CLKSYNC);
ASSYM(CPU_INIT, CPU_INIT);
ASSYM(CSA_MID, offsetof(struct cpu_start_args, csa_mid));
ASSYM(CSA_STATE, offsetof(struct cpu_start_args, csa_state));
ASSYM(CSA_DATA, offsetof(struct cpu_start_args, csa_data));
ASSYM(CSA_VA, offsetof(struct cpu_start_args, csa_va));
ASSYM(CSA_TICK, offsetof(struct cpu_start_args, csa_tick));
ASSYM(CSA_VER, offsetof(struct cpu_start_args, csa_ver));
ASSYM(CSA_TTES, offsetof(struct cpu_start_args, csa_ttes));
ASSYM(KTR_PROC, KTR_PROC);
ASSYM(KTR_TRAP, KTR_TRAP);
@ -158,6 +160,7 @@ ASSYM(PC_MID, offsetof(struct pcpu, pc_mid));
ASSYM(PC_TLB_CTX, offsetof(struct pcpu, pc_tlb_ctx));
ASSYM(PC_TLB_CTX_MAX, offsetof(struct pcpu, pc_tlb_ctx_max));
ASSYM(PC_TLB_CTX_MIN, offsetof(struct pcpu, pc_tlb_ctx_min));
ASSYM(PC_VMSPACE, offsetof(struct pcpu, pc_vmspace));
ASSYM(PC_SIZEOF, sizeof(struct pcpu));
ASSYM(IH_SHIFT, IH_SHIFT);
@ -273,4 +276,7 @@ ASSYM(TF_PIL, offsetof(struct trapframe, tf_pil));
ASSYM(TF_WSTATE, offsetof(struct trapframe, tf_wstate));
ASSYM(TF_SIZEOF, sizeof(struct trapframe));
ASSYM(UPA_CR_MID_SHIFT, UPA_CR_MID_SHIFT);
ASSYM(UPA_CR_MID_SIZE, UPA_CR_MID_SIZE);
ASSYM(UT_MAX, UT_MAX);

View File

@ -41,7 +41,7 @@
.set kernbase,KERNBASE
/*
* void _start(caddr_t metadata, u_int *state, u_int mid, u_int bootmid,
* void _start(caddr_t metadata, u_long o1, u_long o2, u_long o3,
* u_long ofw_vec)
*
* XXX: in am smp system the other cpus are started in the loader, but since
@ -60,19 +60,6 @@ ENTRY(_start)
wrpr %g0, 0, %pil
wr %g0, 0, %fprs
#ifdef SMP
/*
* If we're not the boot processor, go do other stuff.
*/
cmp %o2, %o3
be %xcc, 1f
nop
call _mp_start
nop
sir
1:
#endif
/*
* Get onto our per-cpu panic stack, which precedes the struct pcpu in
* the per-cpu page.
@ -109,8 +96,7 @@ END(_start)
* void cpu_setregs(struct pcpu *pc)
*/
ENTRY(cpu_setregs)
ldx [%o0 + PC_CURTHREAD], %o1
ldx [%o1 + TD_PCB], %o1
ldx [%o0 + PC_CURPCB], %o1
/*
* Disable interrupts, normal globals.

View File

@ -41,7 +41,7 @@
.set kernbase,KERNBASE
/*
* void _start(caddr_t metadata, u_int *state, u_int mid, u_int bootmid,
* void _start(caddr_t metadata, u_long o1, u_long o2, u_long o3,
* u_long ofw_vec)
*
* XXX: in am smp system the other cpus are started in the loader, but since
@ -60,19 +60,6 @@ ENTRY(_start)
wrpr %g0, 0, %pil
wr %g0, 0, %fprs
#ifdef SMP
/*
* If we're not the boot processor, go do other stuff.
*/
cmp %o2, %o3
be %xcc, 1f
nop
call _mp_start
nop
sir
1:
#endif
/*
* Get onto our per-cpu panic stack, which precedes the struct pcpu in
* the per-cpu page.
@ -109,8 +96,7 @@ END(_start)
* void cpu_setregs(struct pcpu *pc)
*/
ENTRY(cpu_setregs)
ldx [%o0 + PC_CURTHREAD], %o1
ldx [%o1 + TD_PCB], %o1
ldx [%o0 + PC_CURPCB], %o1
/*
* Disable interrupts, normal globals.

View File

@ -30,83 +30,130 @@
#include <machine/asmacros.h>
#include <machine/ktr.h>
#include <machine/pstate.h>
#include <machine/upa.h>
#include "assym.s"
.register %g2, #ignore
.register %g3, #ignore
.text
.align 16
1: rd %pc, %l0
ldx [%l0 + (4f-1b)], %l1
add %l0, (6f-1b), %l2
clr %l3
2: cmp %l3, %l1
be %xcc, 3f
nop
ldx [%l2 + TTE_VPN], %l4
ldx [%l2 + TTE_DATA], %l5
sllx %l4, PAGE_SHIFT, %l4
wr %g0, ASI_DMMU, %asi
stxa %l4, [%g0 + AA_DMMU_TAR] %asi
stxa %l5, [%g0] ASI_DTLB_DATA_IN_REG
wr %g0, ASI_IMMU, %asi
stxa %l4, [%g0 + AA_IMMU_TAR] %asi
stxa %l5, [%g0] ASI_ITLB_DATA_IN_REG
membar #Sync
flush %l4
add %l2, 1 << TTE_SHIFT, %l2
add %l3, 1, %l3
ba %xcc, 2b
nop
3: ldx [%l0 + (5f-1b)], %l1
jmpl %l1, %g0
nop
.align 16
4: .xword 0x0
5: .xword 0x0
6:
DATA(mp_tramp_code)
.xword 1b
DATA(mp_tramp_code_len)
.xword 6b-1b
DATA(mp_tramp_tlb_slots)
.xword 4b-1b
DATA(mp_tramp_func)
.xword 5b-1b
/*
* void _mp_start(u_long o0, u_int *state, u_int mid, u_long o3, u_long o4)
* void mp_startup(void)
*/
ENTRY(_mp_start)
/*
* Give away our stack to another processor that may be starting in the
* loader.
*/
clr %sp
/*
* Inform the boot processor which is waiting in the loader that we
* made it.
*/
mov CPU_INITED, %l0
stw %l0, [%o1]
membar #StoreLoad
#if KTR_COMPILE & KTR_SMP
CATR(KTR_SMP, "_mp_start: cpu %d entered kernel"
, %g1, %g2, %g3, 7, 8, 9)
stx %o2, [%g1 + KTR_PARM1]
9:
#endif
ENTRY(mp_startup)
wrpr %g0, PSTATE_NORMAL, %pstate
wrpr %g0, 0, %cleanwin
wrpr %g0, 0, %pil
wr %g0, 0, %fprs
SET(cpu_start_args, %l1, %l0)
mov CPU_CLKSYNC, %l1
membar #StoreLoad
stw %l1, [%l0 + CSA_STATE]
1: ldx [%l0 + CSA_TICK], %l1
brz %l1, 1b
nop
wrpr %l1, 0, %tick
UPA_GET_MID(%o0)
#if KTR_COMPILE & KTR_SMP
CATR(KTR_SMP, "mp_start: cpu %d entered kernel"
, %g1, %g2, %g3, 7, 8, 9)
stx %o0, [%g1 + KTR_PARM1]
9:
#endif
rdpr %ver, %l1
stx %l1, [%l0 + CSA_VER]
/*
* Wait till its our turn to start.
* Inform the boot processor we have inited.
*/
1: membar #StoreLoad
lduw [%l0 + CSA_MID], %l1
cmp %l1, %o2
mov CPU_INIT, %l1
membar #LoadStore
stw %l1, [%l0 + CSA_STATE]
/*
* Wait till its our turn to bootstrap.
*/
1: lduw [%l0 + CSA_MID], %l1
cmp %l1, %o0
bne %xcc, 1b
nop
#if KTR_COMPILE & KTR_SMP
CATR(KTR_SMP, "_mp_start: cpu %d got start signal"
, %g1, %g2, %g3, 7, 8, 9)
stx %o2, [%g1 + KTR_PARM1]
stx %o0, [%g1 + KTR_PARM1]
9:
#endif
/*
* Find our per-cpu page and the tte data that we will use to map it.
*/
ldx [%l0 + CSA_DATA], %l1
ldx [%l0 + CSA_VA], %l2
ldx [%l0 + CSA_TTES + TTE_VPN], %l1
ldx [%l0 + CSA_TTES + TTE_DATA], %l2
/*
* Map the per-cpu page. It uses a locked tlb entry.
*/
wr %g0, ASI_DMMU, %asi
stxa %l2, [%g0 + AA_DMMU_TAR] %asi
stxa %l1, [%g0] ASI_DTLB_DATA_IN_REG
sllx %l1, PAGE_SHIFT, %l1
stxa %l1, [%g0 + AA_DMMU_TAR] %asi
stxa %l2, [%g0] ASI_DTLB_DATA_IN_REG
membar #Sync
/*
* Get onto our per-cpu panic stack, which precedes the struct pcpu
* in the per-cpu page.
*/
set PAGE_SIZE - PC_SIZEOF, %l3
add %l2, %l3, %l2
sub %l2, SPOFF + CCFSZ, %sp
/*
* Inform the boot processor that we're about to start.
*/
mov CPU_STARTED, %l3
stw %l3, [%l0 + CSA_STATE]
membar #StoreLoad
set PCPU_PAGES * PAGE_SIZE - PC_SIZEOF, %l2
add %l1, %l2, %l1
sub %l1, SPOFF + CCFSZ, %sp
/*
* Enable interrupts.
@ -131,7 +178,7 @@ ENTRY(_mp_start)
* And away we go. This doesn't return.
*/
call cpu_mp_bootstrap
mov %l2, %o0
mov %l1, %o0
sir
! NOTREACHED
END(_mp_start)
END(mp_startup)

View File

@ -30,83 +30,130 @@
#include <machine/asmacros.h>
#include <machine/ktr.h>
#include <machine/pstate.h>
#include <machine/upa.h>
#include "assym.s"
.register %g2, #ignore
.register %g3, #ignore
.text
.align 16
1: rd %pc, %l0
ldx [%l0 + (4f-1b)], %l1
add %l0, (6f-1b), %l2
clr %l3
2: cmp %l3, %l1
be %xcc, 3f
nop
ldx [%l2 + TTE_VPN], %l4
ldx [%l2 + TTE_DATA], %l5
sllx %l4, PAGE_SHIFT, %l4
wr %g0, ASI_DMMU, %asi
stxa %l4, [%g0 + AA_DMMU_TAR] %asi
stxa %l5, [%g0] ASI_DTLB_DATA_IN_REG
wr %g0, ASI_IMMU, %asi
stxa %l4, [%g0 + AA_IMMU_TAR] %asi
stxa %l5, [%g0] ASI_ITLB_DATA_IN_REG
membar #Sync
flush %l4
add %l2, 1 << TTE_SHIFT, %l2
add %l3, 1, %l3
ba %xcc, 2b
nop
3: ldx [%l0 + (5f-1b)], %l1
jmpl %l1, %g0
nop
.align 16
4: .xword 0x0
5: .xword 0x0
6:
DATA(mp_tramp_code)
.xword 1b
DATA(mp_tramp_code_len)
.xword 6b-1b
DATA(mp_tramp_tlb_slots)
.xword 4b-1b
DATA(mp_tramp_func)
.xword 5b-1b
/*
* void _mp_start(u_long o0, u_int *state, u_int mid, u_long o3, u_long o4)
* void mp_startup(void)
*/
ENTRY(_mp_start)
/*
* Give away our stack to another processor that may be starting in the
* loader.
*/
clr %sp
/*
* Inform the boot processor which is waiting in the loader that we
* made it.
*/
mov CPU_INITED, %l0
stw %l0, [%o1]
membar #StoreLoad
#if KTR_COMPILE & KTR_SMP
CATR(KTR_SMP, "_mp_start: cpu %d entered kernel"
, %g1, %g2, %g3, 7, 8, 9)
stx %o2, [%g1 + KTR_PARM1]
9:
#endif
ENTRY(mp_startup)
wrpr %g0, PSTATE_NORMAL, %pstate
wrpr %g0, 0, %cleanwin
wrpr %g0, 0, %pil
wr %g0, 0, %fprs
SET(cpu_start_args, %l1, %l0)
mov CPU_CLKSYNC, %l1
membar #StoreLoad
stw %l1, [%l0 + CSA_STATE]
1: ldx [%l0 + CSA_TICK], %l1
brz %l1, 1b
nop
wrpr %l1, 0, %tick
UPA_GET_MID(%o0)
#if KTR_COMPILE & KTR_SMP
CATR(KTR_SMP, "mp_start: cpu %d entered kernel"
, %g1, %g2, %g3, 7, 8, 9)
stx %o0, [%g1 + KTR_PARM1]
9:
#endif
rdpr %ver, %l1
stx %l1, [%l0 + CSA_VER]
/*
* Wait till its our turn to start.
* Inform the boot processor we have inited.
*/
1: membar #StoreLoad
lduw [%l0 + CSA_MID], %l1
cmp %l1, %o2
mov CPU_INIT, %l1
membar #LoadStore
stw %l1, [%l0 + CSA_STATE]
/*
* Wait till its our turn to bootstrap.
*/
1: lduw [%l0 + CSA_MID], %l1
cmp %l1, %o0
bne %xcc, 1b
nop
#if KTR_COMPILE & KTR_SMP
CATR(KTR_SMP, "_mp_start: cpu %d got start signal"
, %g1, %g2, %g3, 7, 8, 9)
stx %o2, [%g1 + KTR_PARM1]
stx %o0, [%g1 + KTR_PARM1]
9:
#endif
/*
* Find our per-cpu page and the tte data that we will use to map it.
*/
ldx [%l0 + CSA_DATA], %l1
ldx [%l0 + CSA_VA], %l2
ldx [%l0 + CSA_TTES + TTE_VPN], %l1
ldx [%l0 + CSA_TTES + TTE_DATA], %l2
/*
* Map the per-cpu page. It uses a locked tlb entry.
*/
wr %g0, ASI_DMMU, %asi
stxa %l2, [%g0 + AA_DMMU_TAR] %asi
stxa %l1, [%g0] ASI_DTLB_DATA_IN_REG
sllx %l1, PAGE_SHIFT, %l1
stxa %l1, [%g0 + AA_DMMU_TAR] %asi
stxa %l2, [%g0] ASI_DTLB_DATA_IN_REG
membar #Sync
/*
* Get onto our per-cpu panic stack, which precedes the struct pcpu
* in the per-cpu page.
*/
set PAGE_SIZE - PC_SIZEOF, %l3
add %l2, %l3, %l2
sub %l2, SPOFF + CCFSZ, %sp
/*
* Inform the boot processor that we're about to start.
*/
mov CPU_STARTED, %l3
stw %l3, [%l0 + CSA_STATE]
membar #StoreLoad
set PCPU_PAGES * PAGE_SIZE - PC_SIZEOF, %l2
add %l1, %l2, %l1
sub %l1, SPOFF + CCFSZ, %sp
/*
* Enable interrupts.
@ -131,7 +178,7 @@ ENTRY(_mp_start)
* And away we go. This doesn't return.
*/
call cpu_mp_bootstrap
mov %l2, %o0
mov %l1, %o0
sir
! NOTREACHED
END(_mp_start)
END(mp_startup)

View File

@ -77,6 +77,7 @@
#include <machine/asi.h>
#include <machine/md_var.h>
#include <machine/smp.h>
#include <machine/tlb.h>
#include <machine/tte.h>
static ih_func_t cpu_ipi_ast;
@ -90,10 +91,36 @@ static ih_func_t cpu_ipi_stop;
*/
struct cpu_start_args cpu_start_args = { -1, -1, 0, 0 };
vm_offset_t mp_tramp;
static struct mtx ap_boot_mtx;
u_int mp_boot_mid;
void cpu_mp_unleash(void *);
SYSINIT(cpu_mp_unleash, SI_SUB_SMP, SI_ORDER_FIRST, cpu_mp_unleash, NULL);
vm_offset_t
mp_tramp_alloc(void)
{
struct tte *tp;
char *v;
int i;
v = OF_claim(NULL, PAGE_SIZE, PAGE_SIZE);
if (v == NULL)
panic("mp_tramp_alloc");
bcopy(mp_tramp_code, v, mp_tramp_code_len);
*(u_long *)(v + mp_tramp_tlb_slots) = kernel_tlb_slots;
*(u_long *)(v + mp_tramp_func) = (u_long)mp_startup;
tp = (struct tte *)(v + mp_tramp_code_len);
for (i = 0; i < kernel_tlb_slots; i++)
tp[i] = kernel_ttes[i];
for (i = 0; i < PAGE_SIZE; i += sizeof(long))
flush(v + i);
return (vm_offset_t)v;
}
/*
* Probe for other cpus.
*/
@ -119,6 +146,31 @@ cpu_mp_probe(void)
return (cpus > 1);
}
static void
sun4u_startcpu(phandle_t cpu, void *func, u_long arg)
{
static struct {
cell_t name;
cell_t nargs;
cell_t nreturns;
cell_t cpu;
cell_t func;
cell_t arg;
} args = {
(cell_t)"SUNW,start-cpu",
3,
0,
0,
0,
0
};
args.cpu = cpu;
args.func = (cell_t)func;
args.arg = (cell_t)arg;
openfirmware(&args);
}
/*
* Fire up any non-boot processors.
*/
@ -129,12 +181,12 @@ cpu_mp_start(void)
struct pcpu *pc;
phandle_t child;
phandle_t root;
vm_offset_t pa;
vm_offset_t va;
char buf[128];
u_long data;
u_int mid;
u_int clock;
int cpuid;
u_int mid;
u_long s;
mtx_init(&ap_boot_mtx, "ap boot", MTX_SPIN);
@ -153,46 +205,31 @@ cpu_mp_start(void)
panic("cpu_mp_start: can't get module id");
if (mid == mp_boot_mid)
continue;
if (OF_getprop(child, "clock-frequency", &clock,
sizeof(clock)) <= 0)
panic("cpu_mp_start: can't get clock");
/*
* Found a non-boot processor. It is currently spinning in
* _mp_start, and it has no stack. Allocate a per-cpu page
* for it, which it will use as a bootstrap stack, and pass
* it through the argument area.
*/
cpuid = mp_ncpus++;
va = kmem_alloc(kernel_map, PAGE_SIZE);
pa = pmap_kextract(va);
if (pa == 0)
panic("cpu_mp_start: pmap_kextract\n");
pc = (struct pcpu *)(va + PAGE_SIZE) - 1;
pcpu_init(pc, cpuid, sizeof(*pc));
pc->pc_mid = mid;
data = TD_V | TD_8K | TD_VA_LOW(va) | TD_PA(pa) |
TD_L | TD_CP | TD_CV | TD_P | TD_W;
/*
* Initialize the argument area to start this cpu.
* Note, order is important here. We must set the pcpu pointer
* and the tte data before letting it loose.
*/
csa->csa_data = data;
csa->csa_va = va;
csa->csa_state = 0;
sun4u_startcpu(child, (void *)mp_tramp, 0);
s = intr_disable();
while (csa->csa_state != CPU_CLKSYNC)
;
membar(StoreLoad);
csa->csa_mid = mid;
csa->csa_state = CPU_STARTING;
while (csa->csa_state == CPU_STARTING)
membar(StoreLoad);
if (csa->csa_state != CPU_STARTED)
panic("cpu_mp_start: bad state %d for cpu %d\n",
csa->csa_state, mid);
csa->csa_state = CPU_BOOTSTRAPING;
while (csa->csa_state == CPU_BOOTSTRAPING)
membar(StoreLoad);
if (csa->csa_state != CPU_BOOTSTRAPPED)
panic("cpu_mp_start: bad state %d for cpu %d\n",
csa->csa_state, mid);
cpu_ipi_send(mid, 0, (u_long)tl_ipi_test, 0);
csa->csa_tick = rd(tick);
while (csa->csa_state != CPU_INIT)
;
csa->csa_tick = 0;
intr_restore(s);
cpuid = mp_ncpus++;
cpu_identify(csa->csa_ver, clock, cpuid);
va = kmem_alloc(kernel_map, PCPU_PAGES * PAGE_SIZE);
pc = (struct pcpu *)(va + (PCPU_PAGES * PAGE_SIZE)) - 1;
pcpu_init(pc, cpuid, sizeof(*pc));
pc->pc_addr = va;
pc->pc_mid = mid;
all_cpus |= 1 << cpuid;
}
PCPU_SET(other_cpus, all_cpus & ~(1 << PCPU_GET(cpuid)));
@ -201,48 +238,71 @@ cpu_mp_start(void)
void
cpu_mp_announce(void)
{
TODO;
}
void
cpu_mp_unleash(void *v)
{
volatile struct cpu_start_args *csa;
struct pcpu *pc;
vm_offset_t pa;
vm_offset_t va;
u_int ctx_min;
u_int ctx_inc;
u_long s;
int i;
ctx_min = 1;
ctx_inc = (8192 - 1) / mp_ncpus;
csa = &cpu_start_args;
SLIST_FOREACH(pc, &cpuhead, pc_allcpu) {
pc->pc_tlb_ctx = ctx_min;
pc->pc_tlb_ctx_min = ctx_min;
pc->pc_tlb_ctx_max = ctx_min + ctx_inc;
ctx_min += ctx_inc;
if (pc->pc_cpuid == PCPU_GET(cpuid))
continue;
KASSERT(pc->pc_idlethread != NULL,
("cpu_mp_unleash: idlethread"));
KASSERT(pc->pc_curthread == pc->pc_idlethread,
("cpu_mp_unleash: curthread"));
pc->pc_curpcb = pc->pc_curthread->td_pcb;
for (i = 0; i < PCPU_PAGES; i++) {
va = pc->pc_addr + i * PAGE_SIZE;
pa = pmap_kextract(va);
if (pa == 0)
panic("cpu_mp_unleash: pmap_kextract\n");
csa->csa_ttes[i].tte_vpn = TV_VPN(va);
csa->csa_ttes[i].tte_data = TD_V | TD_8K | TD_PA(pa) |
TD_L | TD_CP | TD_CV | TD_P | TD_W;
}
csa->csa_state = 0;
csa->csa_mid = pc->pc_mid;
s = intr_disable();
while (csa->csa_state != CPU_BOOTSTRAP)
;
intr_restore(s);
}
}
void
cpu_mp_bootstrap(struct pcpu *pc)
{
struct cpu_start_args *csa;
volatile struct cpu_start_args *csa;
csa = &cpu_start_args;
CTR1(KTR_SMP, "cpu_mp_bootstrap: cpuid=%d", pc->pc_cpuid);
while (csa->csa_state != CPU_BOOTSTRAPING)
membar(StoreLoad);
cpu_setregs(pc);
pmap_map_tsb();
CTR0(KTR_SMP, "cpu_mp_bootstrap: spinning");
csa->csa_state = CPU_BOOTSTRAPPED;
membar(StoreLoad);
for (;;)
;
mtx_lock_spin(&ap_boot_mtx);
CTR1(KTR_SMP, "SMP: AP CPU #%d Launched", PCPU_GET(cpuid));
cpu_setregs(pc);
smp_cpus++;
/* Build our map of 'other' CPUs. */
PCPU_SET(other_cpus, all_cpus & ~(1 << PCPU_GET(cpuid)));
printf("SMP: AP CPU #%d Launched!\n", PCPU_GET(cpuid));
if (smp_cpus == mp_ncpus) {
smp_started = 1; /* enable IPI's, tlb shootdown, freezes etc */
smp_active = 1; /* historic */
}
mtx_unlock_spin(&ap_boot_mtx);
/* wait until all the AP's are up */
while (smp_started == 0)
; /* nothing */
csa->csa_state = CPU_BOOTSTRAP;
for (;;)
;
binuptime(PCPU_PTR(switchtime));
PCPU_SET(switchticks, ticks);