diff --git a/sys/amd64/machine.c b/sys/amd64/machine.c index 4bcf032..981299d 100644 --- a/sys/amd64/machine.c +++ b/sys/amd64/machine.c @@ -181,16 +181,23 @@ void Machine_Init() void Machine_InitAP() { + Critical_Enter(); + + // Setup CPU state Trap_InitAP(); PMap_InitAP(); Machine_GDTInit(); Machine_TSSInit(); //Machine_SyscallInit(); + // Setup LAPIC LAPIC_Init(); - kprintf("AP %d booted!\n", CPU()); + // Boot processor + MP_InitAP(); + Thread_InitAP(); + Critical_Exit(); - while (1) {} + Machine_IdleThread(NULL); } diff --git a/sys/amd64/mp.c b/sys/amd64/mp.c index ed957ff..9430f48 100644 --- a/sys/amd64/mp.c +++ b/sys/amd64/mp.c @@ -17,6 +17,8 @@ extern uint8_t mpstart_end[]; extern AS systemAS; +bool booted; + void MP_Init() { @@ -36,6 +38,7 @@ MP_Init() kprintf("CR3: %016llx RSP: %016llx\n", args[0], args[1]); + booted = 0; LAPIC_StartAP(1, 0x7000); uint16_t old = 0; @@ -45,6 +48,16 @@ MP_Init() if (old != new) kprintf("OLD: %x NEW: %x\n", old, new); old = new; + + if (booted == 1) + break; } } +void +MP_InitAP() +{ + kprintf("AP %d booted!\n", CPU()); + booted = 1; +} + diff --git a/sys/kern/ktimer.c b/sys/kern/ktimer.c index 63c969d..deadd7c 100644 --- a/sys/kern/ktimer.c +++ b/sys/kern/ktimer.c @@ -7,6 +7,7 @@ #include #include #include +#include #include #include @@ -86,7 +87,13 @@ KTimer_Cancel(KTimerEvent *evt) void KTimer_Process() { - uint64_t now = KTime_GetEpoch(); + uint64_t now; + + if (CPU() != 0) { + return; + } + + now = KTime_GetEpoch(); Spinlock_Lock(&timerLock); diff --git a/sys/kern/thread.c b/sys/kern/thread.c index 23f7e7c..0b960c8 100644 --- a/sys/kern/thread.c +++ b/sys/kern/thread.c @@ -25,7 +25,7 @@ Process *kernelProcess; Spinlock schedLock; ThreadQueue waitQueue; ThreadQueue runnableQueue; -Thread *curProc; +Thread *curProc[MAX_CPUS]; // Process List Spinlock procLock; @@ -60,8 +60,21 @@ Thread_Init() // Create an thread object for current context Process *proc = Process_Create(NULL, "init"); - curProc = Thread_Create(proc); - curProc->schedState = SCHED_STATE_RUNNING; + curProc[0] = Thread_Create(proc); + curProc[0]->schedState = SCHED_STATE_RUNNING; +} + +void +Thread_InitAP() +{ + Thread *apthr = Thread_Create(kernelProcess); + + apthr->schedState = SCHED_STATE_RUNNING; + + //PAlloc_Release((void *)thr->kstack); + //thr->kstack = 0; + + curProc[CPU()] = apthr; } /* @@ -415,7 +428,7 @@ Sched_Current() { Spinlock_Lock(&schedLock); - Thread *thr = curProc; + Thread *thr = curProc[CPU()]; Thread_Retain(thr); Spinlock_Unlock(&schedLock); @@ -503,14 +516,14 @@ Sched_Scheduler() * in case we should assert that we never return to a zombie or waiting * thread. */ - ASSERT(curProc->schedState == SCHED_STATE_RUNNING); + ASSERT(curProc[CPU()]->schedState == SCHED_STATE_RUNNING); Spinlock_Unlock(&schedLock); return; } TAILQ_REMOVE(&runnableQueue, next, schedQueue); - prev = curProc; - curProc = next; + prev = curProc[CPU()]; + curProc[CPU()] = next; next->schedState = SCHED_STATE_RUNNING; next->ctxSwitches++; @@ -529,7 +542,7 @@ extern TaskStateSegment64 TSS[MAX_CPUS]; void ThreadKThreadEntry(TrapFrame *tf) { - TSS[CPU()].rsp0 = curProc->kstack + 4096; + TSS[CPU()].rsp0 = curProc[CPU()]->kstack + 4096; Spinlock_Unlock(&schedLock); @@ -572,12 +585,14 @@ Thread_Dump(Thread *thr) void Debug_Threads(int argc, const char *argv[]) { + int c = CPU(); Thread *thr; //Spinlock_Lock(&threadLock); - kprintf("Current Thread: %d(%016llx) %d\n", curProc->tid, curProc, curProc->ctxSwitches); - Thread_Dump(curProc); + kprintf("Current Thread: %d(%016llx) %d\n", + curProc[c]->tid, curProc[c], curProc[c]->ctxSwitches); + Thread_Dump(curProc[c]); TAILQ_FOREACH(thr, &runnableQueue, schedQueue) { kprintf("Runnable Thread: %d(%016llx) %d\n", thr->tid, thr, thr->ctxSwitches);