641 lines
13 KiB
C
641 lines
13 KiB
C
|
|
#include <stdbool.h>
|
|
#include <stdint.h>
|
|
#include <string.h>
|
|
|
|
#include <errno.h>
|
|
#include <sys/syscall.h>
|
|
|
|
#include <sys/kassert.h>
|
|
#include <sys/kconfig.h>
|
|
#include <sys/kdebug.h>
|
|
#include <sys/kmem.h>
|
|
#include <sys/ktime.h>
|
|
#include <sys/mp.h>
|
|
#include <sys/spinlock.h>
|
|
#include <sys/thread.h>
|
|
|
|
#include <machine/trap.h>
|
|
#include <machine/pmap.h>
|
|
|
|
// Special Kernel Process
|
|
Process *kernelProcess;
|
|
|
|
// Scheduler Queues
|
|
Spinlock schedLock;
|
|
ThreadQueue waitQueue;
|
|
ThreadQueue runnableQueue;
|
|
Thread *curProc[MAX_CPUS];
|
|
|
|
// Process List
|
|
Spinlock procLock;
|
|
uint64_t nextProcessID;
|
|
ProcessQueue processList;
|
|
|
|
// Memory Pools
|
|
Slab processSlab;
|
|
Slab threadSlab;
|
|
|
|
void Handle_GlobalInit();
|
|
|
|
void
|
|
Thread_Init()
|
|
{
|
|
nextProcessID = 1;
|
|
|
|
Slab_Init(&processSlab, "Process Objects", sizeof(Process), 16);
|
|
Slab_Init(&threadSlab, "Thread Objects", sizeof(Thread), 16);
|
|
|
|
Spinlock_Init(&procLock, "Process List Lock", SPINLOCK_TYPE_NORMAL);
|
|
Spinlock_Init(&schedLock, "Scheduler Lock", SPINLOCK_TYPE_RECURSIVE);
|
|
|
|
TAILQ_INIT(&waitQueue);
|
|
TAILQ_INIT(&runnableQueue);
|
|
TAILQ_INIT(&processList);
|
|
|
|
Handle_GlobalInit();
|
|
|
|
// Kernel Process
|
|
kernelProcess = Process_Create(NULL, "kernel");
|
|
|
|
// Create an thread object for current context
|
|
Process *proc = Process_Create(NULL, "init");
|
|
curProc[0] = Thread_Create(proc);
|
|
curProc[0]->schedState = SCHED_STATE_RUNNING;
|
|
}
|
|
|
|
void
|
|
Thread_InitAP()
|
|
{
|
|
Thread *apthr = Thread_Create(kernelProcess);
|
|
|
|
apthr->schedState = SCHED_STATE_RUNNING;
|
|
|
|
//PAlloc_Release((void *)thr->kstack);
|
|
//thr->kstack = 0;
|
|
|
|
curProc[CPU()] = apthr;
|
|
}
|
|
|
|
/*
|
|
* Process
|
|
*/
|
|
|
|
Process *
|
|
Process_Create(Process *parent, const char *title)
|
|
{
|
|
Process *proc = (Process *)Slab_Alloc(&processSlab);
|
|
|
|
if (!proc)
|
|
return NULL;
|
|
|
|
memset(proc, 0, sizeof(*proc));
|
|
|
|
proc->pid = nextProcessID++;
|
|
proc->threads = 0;
|
|
proc->refCount = 1;
|
|
TAILQ_INIT(&proc->threadList);
|
|
|
|
if (title) {
|
|
strncpy((char *)&proc->title, title, PROCESS_TITLE_LENGTH);
|
|
} else {
|
|
proc->title[0] = '\0';
|
|
}
|
|
|
|
proc->space = PMap_NewAS();
|
|
if (proc->space == NULL) {
|
|
Slab_Free(&processSlab, proc);
|
|
return NULL;
|
|
}
|
|
proc->ustackNext = MEM_USERSPACE_STKBASE;
|
|
|
|
Spinlock_Init(&proc->lock, "Process Lock", SPINLOCK_TYPE_NORMAL);
|
|
|
|
Semaphore_Init(&proc->zombieSemaphore, 0, "Zombie Semaphore");
|
|
TAILQ_INIT(&proc->zombieQueue);
|
|
|
|
Handle_Init(proc);
|
|
|
|
proc->parent = parent;
|
|
if (parent) {
|
|
Spinlock_Lock(&parent->lock);
|
|
TAILQ_INSERT_TAIL(&parent->childrenList, proc, siblingList);
|
|
Spinlock_Unlock(&parent->lock);
|
|
}
|
|
TAILQ_INIT(&proc->childrenList);
|
|
TAILQ_INIT(&proc->zombieProc);
|
|
Semaphore_Init(&proc->zombieProcSemaphore, 0, "Zombie Process Semaphore");
|
|
|
|
Spinlock_Lock(&procLock);
|
|
TAILQ_INSERT_TAIL(&processList, proc, processList);
|
|
Spinlock_Unlock(&procLock);
|
|
|
|
return proc;
|
|
}
|
|
|
|
static void
|
|
Process_Destroy(Process *proc)
|
|
{
|
|
Handle_Destroy(proc);
|
|
|
|
Spinlock_Destroy(&proc->lock);
|
|
Semaphore_Destroy(&proc->zombieSemaphore);
|
|
Semaphore_Destroy(&proc->zombieProcSemaphore);
|
|
PMap_DestroyAS(proc->space);
|
|
|
|
// XXX: We need to promote zombie processes to our parent
|
|
// XXX: Release the semaphore as well
|
|
|
|
if (proc->parent) {
|
|
Spinlock_Lock(&proc->parent->lock);
|
|
TAILQ_REMOVE(&proc->parent->zombieProc, proc, siblingList);
|
|
Spinlock_Unlock(&proc->parent->lock);
|
|
}
|
|
Spinlock_Lock(&procLock);
|
|
TAILQ_REMOVE(&processList, proc, processList);
|
|
Spinlock_Unlock(&procLock);
|
|
|
|
Slab_Free(&processSlab, proc);
|
|
}
|
|
|
|
Process *
|
|
Process_Lookup(uint64_t pid)
|
|
{
|
|
Process *p;
|
|
Process *proc = NULL;
|
|
|
|
Spinlock_Lock(&procLock);
|
|
TAILQ_FOREACH(p, &processList, processList) {
|
|
if (p->pid == pid) {
|
|
Process_Retain(p);
|
|
proc = p;
|
|
break;
|
|
}
|
|
}
|
|
Spinlock_Unlock(&procLock);
|
|
|
|
return proc;
|
|
}
|
|
|
|
void
|
|
Process_Retain(Process *proc)
|
|
{
|
|
ASSERT(proc->refCount != 0);
|
|
__sync_fetch_and_add(&proc->refCount, 1);
|
|
}
|
|
|
|
void
|
|
Process_Release(Process *proc)
|
|
{
|
|
ASSERT(proc->refCount != 0);
|
|
if (__sync_fetch_and_sub(&proc->refCount, 1) == 1) {
|
|
Process_Destroy(proc);
|
|
}
|
|
}
|
|
|
|
uint64_t
|
|
Process_Wait(Process *proc, uint64_t pid)
|
|
{
|
|
Thread *thr;
|
|
Thread *thr_temp;
|
|
Process *p = NULL;
|
|
uint64_t status;
|
|
|
|
// XXX: Need to verify pid exists!
|
|
|
|
while (1) {
|
|
Semaphore_Acquire(&proc->zombieProcSemaphore);
|
|
// XXX: Forced exit check!
|
|
|
|
Spinlock_Lock(&proc->lock);
|
|
p = TAILQ_FIRST(&proc->zombieProc);
|
|
if (pid == 0 || p->pid == pid) {
|
|
TAILQ_REMOVE(&proc->zombieProc, p, siblingList);
|
|
Spinlock_Unlock(&proc->lock);
|
|
break;
|
|
}
|
|
Spinlock_Unlock(&proc->lock);
|
|
|
|
Semaphore_Release(&proc->zombieProcSemaphore);
|
|
}
|
|
|
|
status = (p->pid << 16) | p->exitCode;
|
|
|
|
// Release threads
|
|
TAILQ_FOREACH_SAFE(thr, &p->zombieQueue, schedQueue, thr_temp) {
|
|
Thread_Release(thr);
|
|
}
|
|
|
|
// Release process
|
|
Process_Release(p);
|
|
|
|
return status;
|
|
}
|
|
|
|
/*
|
|
* Thread
|
|
*/
|
|
|
|
Thread *
|
|
Thread_Create(Process *proc)
|
|
{
|
|
Thread *thr = (Thread *)Slab_Alloc(&threadSlab);
|
|
|
|
if (!thr)
|
|
return NULL;
|
|
|
|
memset(thr, 0, sizeof(*thr));
|
|
|
|
ASSERT(proc != NULL);
|
|
|
|
thr->tid = proc->nextThreadID++;
|
|
thr->kstack = (uintptr_t)PAlloc_AllocPage();
|
|
if (thr->kstack == 0) {
|
|
Slab_Free(&threadSlab, thr);
|
|
return NULL;
|
|
}
|
|
|
|
Process_Retain(proc);
|
|
|
|
Spinlock_Lock(&proc->lock);
|
|
thr->proc = proc;
|
|
proc->threads++;
|
|
TAILQ_INSERT_TAIL(&proc->threadList, thr, threadList);
|
|
thr->space = proc->space;
|
|
thr->ustack = proc->ustackNext;
|
|
proc->ustackNext += MEM_USERSPACE_STKLEN;
|
|
Spinlock_Unlock(&proc->lock);
|
|
|
|
thr->schedState = SCHED_STATE_NULL;
|
|
thr->timerEvt = NULL;
|
|
thr->refCount = 1;
|
|
|
|
Thread_InitArch(thr);
|
|
// Initialize queue
|
|
|
|
return thr;
|
|
}
|
|
|
|
Thread *
|
|
Thread_KThreadCreate(void (*f)(void *), void *arg)
|
|
{
|
|
Thread *thr = Thread_Create(kernelProcess);
|
|
if (!thr)
|
|
return NULL;
|
|
|
|
Thread_SetupKThread(thr, f, (uintptr_t)arg, 0, 0);
|
|
|
|
return thr;
|
|
}
|
|
|
|
Thread *
|
|
Thread_UThreadCreate(Thread *oldThr, uint64_t rip, uint64_t arg)
|
|
{
|
|
Process *proc = oldThr->proc;
|
|
Thread *thr = (Thread *)Slab_Alloc(&threadSlab);
|
|
|
|
if (!thr)
|
|
return NULL;
|
|
|
|
memset(thr, 0, sizeof(*thr));
|
|
|
|
thr->tid = proc->nextThreadID++;
|
|
thr->kstack = (uintptr_t)PAlloc_AllocPage();
|
|
if (thr->kstack == 0) {
|
|
Slab_Free(&threadSlab, thr);
|
|
return NULL;
|
|
}
|
|
|
|
thr->space = oldThr->space;
|
|
thr->schedState = SCHED_STATE_NULL;
|
|
thr->refCount = 1;
|
|
|
|
Spinlock_Lock(&proc->lock);
|
|
thr->ustack = proc->ustackNext;
|
|
proc->ustackNext += MEM_USERSPACE_STKLEN;
|
|
Spinlock_Unlock(&proc->lock);
|
|
|
|
PMap_AllocMap(thr->space, thr->ustack, MEM_USERSPACE_STKLEN, PTE_W);
|
|
// XXX: Check failure
|
|
|
|
Thread_InitArch(thr);
|
|
// Initialize queue
|
|
|
|
Thread_SetupUThread(thr, rip, arg);
|
|
|
|
Process_Retain(proc);
|
|
|
|
Spinlock_Lock(&proc->lock);
|
|
thr->proc = proc;
|
|
// XXX: Process lock
|
|
proc->threads++;
|
|
TAILQ_INSERT_TAIL(&proc->threadList, thr, threadList);
|
|
Spinlock_Unlock(&proc->lock);
|
|
|
|
return thr;
|
|
}
|
|
|
|
static void
|
|
Thread_Destroy(Thread *thr)
|
|
{
|
|
Process *proc = thr->proc;
|
|
|
|
// Free userspace stack
|
|
|
|
Spinlock_Lock(&proc->lock);
|
|
proc->threads--;
|
|
TAILQ_REMOVE(&proc->threadList, thr, threadList);
|
|
Spinlock_Unlock(&proc->lock);
|
|
|
|
// Free AS
|
|
PAlloc_Release((void *)thr->kstack);
|
|
Slab_Free(&threadSlab, thr);
|
|
|
|
// Release process handle
|
|
Process_Release(thr->proc);
|
|
}
|
|
|
|
Thread *
|
|
Thread_Lookup(Process *proc, uint64_t tid)
|
|
{
|
|
Thread *t;
|
|
Thread *thr = NULL;
|
|
|
|
Spinlock_Lock(&proc->lock);
|
|
TAILQ_FOREACH(t, &proc->threadList, threadList) {
|
|
if (t->tid == tid) {
|
|
Thread_Retain(t);
|
|
thr = t;
|
|
break;
|
|
}
|
|
}
|
|
Spinlock_Unlock(&proc->lock);
|
|
|
|
return thr;
|
|
}
|
|
|
|
void
|
|
Thread_Retain(Thread *thr)
|
|
{
|
|
ASSERT(thr->refCount != 0);
|
|
__sync_fetch_and_add(&thr->refCount, 1);
|
|
}
|
|
|
|
void
|
|
Thread_Release(Thread *thr)
|
|
{
|
|
ASSERT(thr->refCount != 0);
|
|
if (__sync_fetch_and_sub(&thr->refCount, 1) == 1) {
|
|
Thread_Destroy(thr);
|
|
}
|
|
}
|
|
|
|
uint64_t
|
|
Thread_Wait(Thread *thr, uint64_t tid)
|
|
{
|
|
Thread *t;
|
|
uint64_t status;
|
|
|
|
ASSERT(thr->proc != NULL);
|
|
|
|
if (tid == 0) {
|
|
t = TAILQ_FIRST(&thr->proc->zombieQueue);
|
|
if (!t) {
|
|
return SYSCALL_PACK(EAGAIN, 0);
|
|
}
|
|
|
|
TAILQ_REMOVE(&thr->proc->zombieQueue, t, schedQueue);
|
|
status = t->exitValue;
|
|
Thread_Release(t);
|
|
return SYSCALL_PACK(0, status);
|
|
}
|
|
|
|
TAILQ_FOREACH(t, &thr->proc->zombieQueue, schedQueue) {
|
|
if (t->tid == tid) {
|
|
TAILQ_REMOVE(&thr->proc->zombieQueue, t, schedQueue);
|
|
status = t->exitValue;
|
|
Thread_Release(t);
|
|
return SYSCALL_PACK(0, status);
|
|
}
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
/*
|
|
* Scheduler Functions
|
|
*/
|
|
|
|
Thread *
|
|
Sched_Current()
|
|
{
|
|
Spinlock_Lock(&schedLock);
|
|
|
|
Thread *thr = curProc[CPU()];
|
|
Thread_Retain(thr);
|
|
|
|
Spinlock_Unlock(&schedLock);
|
|
|
|
return thr;
|
|
}
|
|
|
|
void
|
|
Sched_SetRunnable(Thread *thr)
|
|
{
|
|
Spinlock_Lock(&schedLock);
|
|
|
|
if (thr->schedState == SCHED_STATE_WAITING) {
|
|
thr->waitTime += KTime_GetEpochNS() - thr->waitStart;
|
|
thr->waitStart = 0;
|
|
TAILQ_REMOVE(&waitQueue, thr, schedQueue);
|
|
}
|
|
thr->schedState = SCHED_STATE_RUNNABLE;
|
|
TAILQ_INSERT_TAIL(&runnableQueue, thr, schedQueue);
|
|
|
|
Spinlock_Unlock(&schedLock);
|
|
}
|
|
|
|
void
|
|
Sched_SetWaiting(Thread *thr)
|
|
{
|
|
Spinlock_Lock(&schedLock);
|
|
|
|
thr->schedState = SCHED_STATE_WAITING;
|
|
TAILQ_INSERT_TAIL(&waitQueue, thr, schedQueue);
|
|
thr->waitStart = KTime_GetEpochNS();
|
|
|
|
Spinlock_Unlock(&schedLock);
|
|
}
|
|
|
|
void
|
|
Sched_SetZombie(Thread *thr)
|
|
{
|
|
Process *proc = thr->proc;
|
|
|
|
Spinlock_Lock(&schedLock);
|
|
|
|
thr->schedState = SCHED_STATE_ZOMBIE;
|
|
Spinlock_Lock(&proc->lock);
|
|
TAILQ_INSERT_TAIL(&proc->zombieQueue, thr, schedQueue);
|
|
Spinlock_Unlock(&proc->lock);
|
|
|
|
if (proc->threads == 1) {
|
|
// All processes have parents except 'init' and 'kernel'
|
|
ASSERT(proc->parent != NULL);
|
|
Spinlock_Lock(&proc->parent->lock);
|
|
TAILQ_REMOVE(&proc->parent->childrenList, proc, siblingList);
|
|
TAILQ_INSERT_TAIL(&proc->parent->zombieProc, proc, siblingList);
|
|
Semaphore_Release(&proc->parent->zombieProcSemaphore);
|
|
Spinlock_Unlock(&proc->parent->lock);
|
|
}
|
|
|
|
Spinlock_Unlock(&schedLock);
|
|
}
|
|
|
|
static void
|
|
Sched_Switch(Thread *oldthr, Thread *newthr)
|
|
{
|
|
// Load AS
|
|
PMap_LoadAS(newthr->space);
|
|
|
|
Thread_SwitchArch(oldthr, newthr);
|
|
}
|
|
|
|
void
|
|
Sched_Scheduler()
|
|
{
|
|
Thread *prev;
|
|
Thread *next;
|
|
|
|
Spinlock_Lock(&schedLock);
|
|
|
|
// Select next thread
|
|
next = TAILQ_FIRST(&runnableQueue);
|
|
if (!next) {
|
|
/*
|
|
* There may be no other runnable processes on this core. This is a
|
|
* good opportunity to migrate threads. We should never hit this case
|
|
* once the OS is up and running because of the idle threads, but just
|
|
* in case we should assert that we never return to a zombie or waiting
|
|
* thread.
|
|
*/
|
|
ASSERT(curProc[CPU()]->schedState == SCHED_STATE_RUNNING);
|
|
Spinlock_Unlock(&schedLock);
|
|
return;
|
|
}
|
|
TAILQ_REMOVE(&runnableQueue, next, schedQueue);
|
|
|
|
prev = curProc[CPU()];
|
|
curProc[CPU()] = next;
|
|
next->schedState = SCHED_STATE_RUNNING;
|
|
next->ctxSwitches++;
|
|
|
|
if (prev->schedState == SCHED_STATE_RUNNING) {
|
|
prev->schedState = SCHED_STATE_RUNNABLE;
|
|
TAILQ_INSERT_TAIL(&runnableQueue, prev, schedQueue);
|
|
}
|
|
|
|
Sched_Switch(prev, next);
|
|
|
|
Spinlock_Unlock(&schedLock);
|
|
}
|
|
|
|
extern TaskStateSegment64 TSS[MAX_CPUS];
|
|
|
|
void
|
|
ThreadKThreadEntry(TrapFrame *tf)
|
|
{
|
|
TSS[CPU()].rsp0 = curProc[CPU()]->kstack + 4096;
|
|
|
|
Spinlock_Unlock(&schedLock);
|
|
|
|
Trap_Pop(tf);
|
|
}
|
|
|
|
/*
|
|
* Debugging
|
|
*/
|
|
|
|
void
|
|
Process_Dump(Process *proc)
|
|
{
|
|
kprintf("title %s\n", proc->title);
|
|
kprintf("pid %llu\n", proc->pid);
|
|
kprintf("space %016llx\n", proc->space);
|
|
kprintf("threads %llu\n", proc->threads);
|
|
kprintf("refCount %d\n", proc->refCount);
|
|
kprintf("nextFD %llu\n", proc->nextFD);
|
|
}
|
|
|
|
void
|
|
Thread_Dump(Thread *thr)
|
|
{
|
|
// Thread_DumpArch(thr)
|
|
kprintf("space %016llx\n", thr->space);
|
|
kprintf("kstack %016llx\n", thr->kstack);
|
|
kprintf("tid %llu\n", thr->tid);
|
|
kprintf("refCount %d\n", thr->refCount);
|
|
kprintf("state %d\n", thr->schedState);
|
|
kprintf("ctxswtch %llu\n", thr->ctxSwitches);
|
|
kprintf("utime %llu\n", thr->userTime);
|
|
kprintf("ktime %llu\n", thr->kernTime);
|
|
kprintf("wtime %llu\n", thr->waitTime);
|
|
if (thr->proc) {
|
|
Process_Dump(thr->proc);
|
|
}
|
|
}
|
|
|
|
static void
|
|
Debug_Threads(int argc, const char *argv[])
|
|
{
|
|
int c = CPU();
|
|
Thread *thr;
|
|
|
|
//Spinlock_Lock(&threadLock);
|
|
|
|
kprintf("CPU %d\n", c);
|
|
kprintf("Current Thread: %d(%016llx) %d\n",
|
|
curProc[c]->tid, curProc[c], curProc[c]->ctxSwitches);
|
|
Thread_Dump(curProc[c]);
|
|
TAILQ_FOREACH(thr, &runnableQueue, schedQueue)
|
|
{
|
|
kprintf("Runnable Thread: %d(%016llx) %d\n", thr->tid, thr, thr->ctxSwitches);
|
|
Thread_Dump(thr);
|
|
}
|
|
TAILQ_FOREACH(thr, &waitQueue, schedQueue)
|
|
{
|
|
kprintf("Waiting Thread: %d(%016llx) %d\n", thr->tid, thr, thr->ctxSwitches);
|
|
Thread_Dump(thr);
|
|
}
|
|
|
|
//Spinlock_Unlock(&threadLock);
|
|
}
|
|
|
|
REGISTER_DBGCMD(threads, "Display list of threads", Debug_Threads);
|
|
|
|
static void
|
|
Debug_Processes(int argc, const char *argv[])
|
|
{
|
|
Process *proc;
|
|
|
|
//Spinlock_Lock(&threadLock);
|
|
|
|
TAILQ_FOREACH(proc, &processList, processList)
|
|
{
|
|
kprintf("Process: %d(%016llx)\n", proc->pid, proc);
|
|
Process_Dump(proc);
|
|
}
|
|
|
|
//Spinlock_Unlock(&threadLock);
|
|
}
|
|
|
|
REGISTER_DBGCMD(processes, "Display list of processes", Debug_Processes);
|
|
|
|
static void
|
|
Debug_ThreadInfo(int argc, const char *argv[])
|
|
{
|
|
}
|
|
|
|
REGISTER_DBGCMD(threadinfo, "Display thread state", Debug_ThreadInfo);
|
|
|