metal-cos/sys/kern/sched.c

147 lines
3.1 KiB
C

#include <stdbool.h>
#include <stdint.h>
#include <string.h>
#include <errno.h>
#include <sys/syscall.h>
#include <sys/kassert.h>
#include <sys/kconfig.h>
#include <sys/kdebug.h>
#include <sys/kmem.h>
#include <sys/ktime.h>
#include <sys/mp.h>
#include <sys/spinlock.h>
#include <sys/thread.h>
#include <machine/trap.h>
#include <machine/pmap.h>
// Scheduler Queues
Spinlock schedLock;
ThreadQueue waitQueue;
ThreadQueue runnableQueue;
Thread *curProc[MAX_CPUS];
/*
* Scheduler Functions
*/
Thread *
Sched_Current()
{
Spinlock_Lock(&schedLock);
Thread *thr = curProc[CPU()];
Thread_Retain(thr);
Spinlock_Unlock(&schedLock);
return thr;
}
void
Sched_SetRunnable(Thread *thr)
{
Spinlock_Lock(&schedLock);
if (thr->schedState == SCHED_STATE_WAITING) {
thr->waitTime += KTime_GetEpochNS() - thr->waitStart;
thr->waitStart = 0;
TAILQ_REMOVE(&waitQueue, thr, schedQueue);
}
thr->schedState = SCHED_STATE_RUNNABLE;
TAILQ_INSERT_TAIL(&runnableQueue, thr, schedQueue);
Spinlock_Unlock(&schedLock);
}
void
Sched_SetWaiting(Thread *thr)
{
Spinlock_Lock(&schedLock);
thr->schedState = SCHED_STATE_WAITING;
TAILQ_INSERT_TAIL(&waitQueue, thr, schedQueue);
thr->waitStart = KTime_GetEpochNS();
Spinlock_Unlock(&schedLock);
}
void
Sched_SetZombie(Thread *thr)
{
Process *proc = thr->proc;
Spinlock_Lock(&schedLock);
thr->schedState = SCHED_STATE_ZOMBIE;
Spinlock_Lock(&proc->lock);
TAILQ_INSERT_TAIL(&proc->zombieQueue, thr, schedQueue);
Spinlock_Unlock(&proc->lock);
if (proc->threads == 1) {
// All processes have parents except 'init' and 'kernel'
ASSERT(proc->parent != NULL);
Mutex_Lock(&proc->parent->zombieProcLock);
Spinlock_Lock(&proc->parent->lock); // Guards child list
TAILQ_REMOVE(&proc->parent->childrenList, proc, siblingList);
TAILQ_INSERT_TAIL(&proc->parent->zombieProc, proc, siblingList);
Spinlock_Unlock(&proc->parent->lock);
CV_Signal(&proc->parent->zombieProcCV);
Mutex_Unlock(&proc->parent->zombieProcLock);
}
Spinlock_Unlock(&schedLock);
}
static void
Sched_Switch(Thread *oldthr, Thread *newthr)
{
// Load AS
PMap_LoadAS(newthr->space);
Thread_SwitchArch(oldthr, newthr);
}
void
Sched_Scheduler()
{
Thread *prev;
Thread *next;
Spinlock_Lock(&schedLock);
// Select next thread
next = TAILQ_FIRST(&runnableQueue);
if (!next) {
/*
* There may be no other runnable processes on this core. This is a
* good opportunity to migrate threads. We should never hit this case
* once the OS is up and running because of the idle threads, but just
* in case we should assert that we never return to a zombie or waiting
* thread.
*/
ASSERT(curProc[CPU()]->schedState == SCHED_STATE_RUNNING);
Spinlock_Unlock(&schedLock);
return;
}
TAILQ_REMOVE(&runnableQueue, next, schedQueue);
prev = curProc[CPU()];
curProc[CPU()] = next;
next->schedState = SCHED_STATE_RUNNING;
next->ctxSwitches++;
if (prev->schedState == SCHED_STATE_RUNNING) {
prev->schedState = SCHED_STATE_RUNNABLE;
TAILQ_INSERT_TAIL(&runnableQueue, prev, schedQueue);
}
Sched_Switch(prev, next);
Spinlock_Unlock(&schedLock);
}