metal-cos/sys/kern/thread.c
2015-01-19 17:04:14 -08:00

417 lines
8.5 KiB
C

#include <stdbool.h>
#include <stdint.h>
#include <string.h>
#include <sys/kassert.h>
#include <sys/kconfig.h>
#include <sys/kdebug.h>
#include <sys/kmem.h>
#include <sys/ktime.h>
#include <sys/mp.h>
#include <sys/spinlock.h>
#include <sys/thread.h>
#include <machine/trap.h>
#include <machine/pmap.h>
Spinlock threadLock;
uint64_t nextProcessID;
uint64_t nextThreadID;
Thread *curProc;
// Scheduler Queues
ThreadQueue waitQueue;
ThreadQueue runnableQueue;
// Process List
ProcessQueue processList;
Slab processSlab;
Slab threadSlab;
void Handle_GlobalInit();
void
Thread_Init()
{
nextProcessID = 1;
nextThreadID = 1;
Slab_Init(&processSlab, "Process Objects", sizeof(Process), 16);
Slab_Init(&threadSlab, "Thread Objects", sizeof(Thread), 16);
Spinlock_Init(&threadLock, "Thread Lock");
TAILQ_INIT(&waitQueue);
TAILQ_INIT(&runnableQueue);
TAILQ_INIT(&processList);
Handle_GlobalInit();
// Create an thread object for current context
Process *proc = Thread_CreateProcess();
curProc = Thread_Create(proc);
curProc->schedState = SCHED_STATE_RUNNING;
}
Thread *
Thread_Current()
{
return curProc;
}
Thread *
Thread_Create(Process *proc)
{
Thread *thr = (Thread *)Slab_Alloc(&threadSlab);
if (!thr)
return NULL;
memset(thr, 0, sizeof(*thr));
thr->tid = nextThreadID++;
thr->kstack = (uintptr_t)PAlloc_AllocPage();
if (thr->kstack == 0) {
Slab_Free(&threadSlab, thr);
return NULL;
}
thr->proc = proc;
if (proc != NULL) {
// XXX: Add to thread list
proc->threads++;
thr->ustack = proc->ustackNext;
proc->ustackNext += MEM_USERSPACE_STKLEN;
}
if ((proc != NULL) && (proc->space != NULL)) {
thr->space = proc->space;
} else {
// XXX: for kernel threads
thr->space = PMap_NewAS();
if (thr->space == NULL) {
PAlloc_Release((void *)thr->kstack);
Slab_Free(&threadSlab, thr);
return NULL;
}
}
thr->schedState = SCHED_STATE_NULL;
thr->timerEvt = NULL;
Thread_InitArch(thr);
// Initialize queue
return thr;
}
Process *
Thread_CreateProcess()
{
Process *proc = (Process *)Slab_Alloc(&processSlab);
if (!proc)
return NULL;
memset(proc, 0, sizeof(*proc));
proc->pid = nextProcessID++;
proc->threads = 0;
TAILQ_INIT(&proc->threadList);
proc->space = PMap_NewAS();
if (proc->space == NULL) {
Slab_Free(&processSlab, proc);
return NULL;
}
proc->ustackNext = MEM_USERSPACE_STKBASE;
Semaphore_Init(&proc->zombieSemaphore, 0, "Zombie Semaphore");
TAILQ_INIT(&proc->zombieQueue);
Handle_Init(proc);
TAILQ_INSERT_TAIL(&processList, proc, processList);
return proc;
}
Thread *
Thread_KThreadCreate(void (*f)(void *), void *arg)
{
Thread *thr = Thread_Create(NULL);
if (!thr)
return NULL;
Thread_SetupKThread(thr, f, (uintptr_t)arg, 0, 0);
return thr;
}
Thread *
Thread_UThreadCreate(Thread *oldThr, uint64_t rip, uint64_t arg)
{
Process *proc = oldThr->proc;
Thread *thr = (Thread *)Slab_Alloc(&threadSlab);
if (!thr)
return NULL;
memset(thr, 0, sizeof(*thr));
thr->tid = nextThreadID++;
thr->kstack = (uintptr_t)PAlloc_AllocPage();
if (thr->kstack == 0) {
Slab_Free(&threadSlab, thr);
return NULL;
}
thr->space = oldThr->space;
thr->schedState = SCHED_STATE_NULL;
thr->ustack = proc->ustackNext;
proc->ustackNext += MEM_USERSPACE_STKLEN;
PMap_AllocMap(thr->space, thr->ustack, MEM_USERSPACE_STKLEN, PTE_W);
Thread_InitArch(thr);
// Initialize queue
Thread_SetupUThread(thr, rip, arg);
thr->proc = proc;
proc->threads++;
TAILQ_INSERT_TAIL(&proc->threadList, thr, threadList);
return thr;
}
void
Thread_SetRunnable(Thread *thr)
{
Spinlock_Lock(&threadLock);
if (thr->schedState == SCHED_STATE_WAITING) {
thr->waitTime += KTime_GetEpochNS() - thr->waitStart;
thr->waitStart = 0;
TAILQ_REMOVE(&waitQueue, thr, schedQueue);
}
thr->schedState = SCHED_STATE_RUNNABLE;
TAILQ_INSERT_TAIL(&runnableQueue, thr, schedQueue);
Spinlock_Unlock(&threadLock);
}
void
Thread_SetWaiting(Thread *thr)
{
Spinlock_Lock(&threadLock);
thr->schedState = SCHED_STATE_WAITING;
TAILQ_INSERT_TAIL(&waitQueue, thr, schedQueue);
thr->waitStart = KTime_GetEpochNS();
Spinlock_Unlock(&threadLock);
}
void
Thread_SetZombie(Thread *thr)
{
Spinlock_Lock(&threadLock);
thr->schedState = SCHED_STATE_ZOMBIE;
if (thr->proc) {
TAILQ_INSERT_TAIL(&thr->proc->zombieQueue, thr, schedQueue);
} else {
kprintf("ERROR: Thread not associated with process and no zombie queue!\n");
}
Spinlock_Unlock(&threadLock);
}
void
Thread_Destroy(Thread *thr)
{
// Free userspace stack
if (thr->proc) {
thr->proc->threads--;
TAILQ_REMOVE(&thr->proc->threadList, thr, threadList);
}
// Free AS
PAlloc_Release((void *)thr->kstack);
Slab_Free(&threadSlab, thr);
}
uint64_t
Thread_Wait(Thread *thr, uint64_t tid)
{
Thread *t;
uint64_t status;
ASSERT(thr->proc != NULL);
if (tid == 0) {
t = TAILQ_FIRST(&thr->proc->zombieQueue);
if (!t) {
return 0;
}
TAILQ_REMOVE(&thr->proc->zombieQueue, t, schedQueue);
status = t->exitValue;
Thread_Destroy(t);
return status;
}
TAILQ_FOREACH(t, &thr->proc->zombieQueue, schedQueue) {
if (t->tid == tid) {
TAILQ_REMOVE(&thr->proc->zombieQueue, t, schedQueue);
status = t->exitValue;
Thread_Destroy(t);
return status;
}
}
return 0;
}
void
Thread_Switch(Thread *oldthr, Thread *newthr)
{
// Load AS
PMap_LoadAS(newthr->space);
Thread_SwitchArch(oldthr, newthr);
}
void
Thread_Scheduler()
{
Thread *prev;
Thread *next;
Spinlock_Lock(&threadLock);
// Select next thread
next = TAILQ_FIRST(&runnableQueue);
if (!next) {
/*
* There may be no other runnable processes on this core. This is a
* good opportunity to migrate threads. We should never hit this case
* once the OS is up and running because of the idle threads, but just
* in case we should assert that we never return to a zombie or waiting
* thread.
*/
ASSERT(curProc->schedState == SCHED_STATE_RUNNING);
Spinlock_Unlock(&threadLock);
return;
}
TAILQ_REMOVE(&runnableQueue, next, schedQueue);
prev = curProc;
curProc = next;
next->schedState = SCHED_STATE_RUNNING;
next->ctxSwitches++;
if (prev->schedState == SCHED_STATE_RUNNING) {
prev->schedState = SCHED_STATE_RUNNABLE;
TAILQ_INSERT_TAIL(&runnableQueue, prev, schedQueue);
}
Thread_Switch(prev, next);
Spinlock_Unlock(&threadLock);
}
extern TaskStateSegment64 TSS[MAX_CPUS];
void
ThreadKThreadEntry(TrapFrame *tf)
{
TSS[CPU()].rsp0 = curProc->kstack + 4096;
Spinlock_Unlock(&threadLock);
Trap_Pop(tf);
}
void
Thread_ProcDump(Process *proc)
{
kprintf("pid %llu\n", proc->pid);
kprintf("space %016llx\n", proc->space);
kprintf("threads %llu\n", proc->threads);
kprintf("nextFD %llu\n", proc->nextFD);
}
void
Thread_Dump(Thread *thr)
{
// Thread_DumpArch(thr)
kprintf("space %016llx\n", thr->space);
kprintf("kstack %016llx\n", thr->kstack);
kprintf("tid %llu\n", thr->tid);
kprintf("state %d\n", thr->schedState);
kprintf("ctxswtch %llu\n", thr->ctxSwitches);
kprintf("utime %llu\n", thr->userTime);
kprintf("ktime %llu\n", thr->kernTime);
kprintf("wtime %llu\n", thr->waitTime);
if (thr->proc) {
Thread_ProcDump(thr->proc);
}
}
void
Debug_Threads(int argc, const char *argv[])
{
Thread *thr;
//Spinlock_Lock(&threadLock);
kprintf("Current Thread: %d(%016llx) %d\n", curProc->tid, curProc, curProc->ctxSwitches);
Thread_Dump(curProc);
TAILQ_FOREACH(thr, &runnableQueue, schedQueue)
{
kprintf("Runnable Thread: %d(%016llx) %d\n", thr->tid, thr, thr->ctxSwitches);
Thread_Dump(thr);
}
TAILQ_FOREACH(thr, &waitQueue, schedQueue)
{
kprintf("Waiting Thread: %d(%016llx) %d\n", thr->tid, thr, thr->ctxSwitches);
Thread_Dump(thr);
}
//Spinlock_Unlock(&threadLock);
}
REGISTER_DBGCMD(threads, "Display list of threads", Debug_Threads);
void
Debug_Processes(int argc, const char *argv[])
{
Process *proc;
//Spinlock_Lock(&threadLock);
TAILQ_FOREACH(proc, &processList, processList)
{
kprintf("Process: %d(%016llx)\n", proc->pid, proc);
}
//Spinlock_Unlock(&threadLock);
}
REGISTER_DBGCMD(processes, "Display list of processes", Debug_Processes);
void
Debug_ThreadInfo(int argc, const char *argv[])
{
}
REGISTER_DBGCMD(threadinfo, "Display thread state", Debug_ThreadInfo);