Documentation cleanup throughout the kernel

This commit is contained in:
Ali Mashtizadeh 2023-09-09 20:18:08 -04:00
parent ac1d7e05ee
commit 22704368cb
17 changed files with 318 additions and 82 deletions

View File

@ -41,7 +41,7 @@ TaskStateSegment64 TSS[MAX_CPUS];
static char df_stack[4096]; static char df_stack[4096];
/* /**
* Machine_GDTInit -- * Machine_GDTInit --
* *
* Configures the Global Descriptor Table (GDT) that lists all segments * Configures the Global Descriptor Table (GDT) that lists all segments
@ -85,7 +85,7 @@ Machine_GDTInit()
kprintf("Done!\n"); kprintf("Done!\n");
} }
/* /**
* Machine_TSSInit -- * Machine_TSSInit --
* *
* Configures the Task State Segment (TSS) that specifies the kernel stack * Configures the Task State Segment (TSS) that specifies the kernel stack
@ -119,7 +119,7 @@ Machine_TSSInit()
kprintf("Done!\n"); kprintf("Done!\n");
} }
/* /**
* Machine_SyscallInit -- * Machine_SyscallInit --
* *
* Configure the model specific registers (MSRs) that specify how to * Configure the model specific registers (MSRs) that specify how to
@ -139,7 +139,7 @@ Machine_SyscallInit()
kprintf("Done!\n"); kprintf("Done!\n");
} }
/* /**
* Machine_EarlyInit -- * Machine_EarlyInit --
* *
* Initializes early kernel state. * Initializes early kernel state.
@ -161,7 +161,7 @@ Machine_IdleThread(void *test)
while (1) { enable_interrupts(); hlt(); } while (1) { enable_interrupts(); hlt(); }
} }
/* /**
* Machine_Init -- * Machine_Init --
* *
* At this point the assembly startup code has setup temporary processor * At this point the assembly startup code has setup temporary processor
@ -245,7 +245,7 @@ void Machine_Init()
breakpoint(); breakpoint();
} }
/* /**
* Machine_InitAP -- * Machine_InitAP --
* *
* Shorter initialization routine for co-processors. * Shorter initialization routine for co-processors.

View File

@ -1,4 +1,8 @@
/**
* Low level multiprocessor boot code
*/
#define KERNEL_BASE 0xFFFF800000000000 #define KERNEL_BASE 0xFFFF800000000000
#define LOWMEM(_x) (_x - KERNEL_BASE) #define LOWMEM(_x) (_x - KERNEL_BASE)
#define MPLOWMEM(_x) (_x - KERNEL_BASE + 0x7000) #define MPLOWMEM(_x) (_x - KERNEL_BASE + 0x7000)
@ -9,34 +13,40 @@
.text .text
/**
* mpstart_begin --
*
* Support processors start executing here in 16-bit compat mode.
*/
.code16 .code16
.globl mpstart_begin .globl mpstart_begin
mpstart_begin: mpstart_begin:
// Disable interrupts
cli cli
// Initialize the segment registers to 0, which is an offset in 16-bit mode
xorw %ax, %ax xorw %ax, %ax
movw %ax, %ds movw %ax, %ds
movw %ax, %es movw %ax, %es
movw %ax, %fs movw %ax, %fs
movw %ax, %gs movw %ax, %gs
//.byte 0xf1 // Load 32-bit GDT
# Load 32-bit GDT
lgdt (mpstartgdtdesc32 - mpstart_begin + 0x7000) lgdt (mpstartgdtdesc32 - mpstart_begin + 0x7000)
movl %cr0, %eax movl %cr0, %eax
orl 0x00000011, %eax orl 0x00000011, %eax
movl %eax, %cr0 movl %eax, %cr0
// Long jump to reload the code segment switching us into 32-bit mode
ljmp $0x08, $(mpstart_enter32 - mpstart_begin + 0x7000) ljmp $0x08, $(mpstart_enter32 - mpstart_begin + 0x7000)
nop nop
nop nop
// Now we're in 32-bit mode
.code32 .code32
mpstart_enter32: mpstart_enter32:
nop nop
nop nop
@ -47,8 +57,6 @@ mpstart_enter32:
movw %ax, %fs movw %ax, %fs
movw %ax, %gs movw %ax, %gs
//.byte 0xf1
// Setup stack // Setup stack
movw %ax,%ss movw %ax,%ss
movl $0x7000, %esp movl $0x7000, %esp
@ -83,19 +91,20 @@ mpstart_enter32:
orl $0x8005002B, %eax orl $0x8005002B, %eax
movl %eax, %cr0 movl %eax, %cr0
// Long jump again to swithc us into 64-bit mode
movl $LOWMEM(mpstart_enter64ptr), %edi movl $LOWMEM(mpstart_enter64ptr), %edi
ljmp *(%edi) ljmp *(%edi)
nop nop
nop nop
// Now we're playing with power
.code64 .code64
mpstart_enter64: mpstart_enter64:
nop nop
nop nop
//.byte 0xf1 // Jump into the high memory address where the kernel should execute
movq $LOWMEM(mpstart_high64ptr), %rdi movq $LOWMEM(mpstart_high64ptr), %rdi
jmp *(%rdi) jmp *(%rdi)
@ -106,8 +115,6 @@ mpstart_high64:
nop nop
nop nop
//.byte 0xf1
// Load Initial CPU State // Load Initial CPU State
movq (0x6F00), %rax movq (0x6F00), %rax
movq (0x6F08), %rbx movq (0x6F08), %rbx
@ -116,6 +123,7 @@ mpstart_high64:
call Machine_InitAP call Machine_InitAP
// If we get here something went wrong so we write HALT to the display
1: 1:
movw $(0x5000 + 'H'), (0xB8098) movw $(0x5000 + 'H'), (0xB8098)
movw $(0x5000 + 'A'), (0xB809A) movw $(0x5000 + 'A'), (0xB809A)
@ -132,6 +140,9 @@ mpstart_enter64ptr:
mpstart_high64ptr: mpstart_high64ptr:
.quad mpstart_high64 .quad mpstart_high64
/**
* 32-bit GDT Table
*/
.p2align 12 .p2align 12
mpstartgdt32: mpstartgdt32:
.quad 0x0000000000000000 /* Null */ .quad 0x0000000000000000 /* Null */

View File

@ -14,10 +14,19 @@
.text .text
/**
* _start --
*
* ELF entry point.
*/
.globl _start .globl _start
_start: .code32 _start: .code32
jmp multiboot_entry jmp multiboot_entry
/**
* This header needs to be present near the start of the ELF binary that
* describes how the loader should load the operating system image into memory.
*/
.align 4 .align 4
multiboot_header: .code32 multiboot_header: .code32
.long MULTIBOOT_HEADER_MAGIC .long MULTIBOOT_HEADER_MAGIC
@ -35,43 +44,53 @@ multiboot_header: .code32
// %ebx: multiboot info structure // %ebx: multiboot info structure
// //
multiboot_entry: .code32 multiboot_entry: .code32
movl %eax, %edx // Save multiboot magic // Save multiboot magic
movl %eax, %edx
// Setup the stack pointer
movl $LOWMEM(lmfarptr), %edi movl $LOWMEM(lmfarptr), %edi
movw $(0x7000 + 'A'), (0xB8000) movw $(0x7000 + 'A'), (0xB8000)
movl $LOWMEM(stack + STACK_SIZE), %esp movl $LOWMEM(stack + STACK_SIZE), %esp
// Reset EFLAGS to a known state
movw $(0x7000 + 'B'), (0xB8002) movw $(0x7000 + 'B'), (0xB8002)
pushl $0 pushl $0
popf popf
// Configure the CPU control register cr4
movw $(0x7000 + 'C'), (0xB8004) movw $(0x7000 + 'C'), (0xB8004)
movl %cr4, %eax movl %cr4, %eax
orl $0x0000006A0, %eax orl $0x0000006A0, %eax
movl %eax, %cr4 movl %eax, %cr4
// Load the 32-bit page table
movw $(0x7000 + 'D'), (0xB8006) movw $(0x7000 + 'D'), (0xB8006)
movl $LOWMEM(bootpgtbl1), %eax movl $LOWMEM(bootpgtbl1), %eax
movl %eax, %cr3 movl %eax, %cr3
// Enable 64-bit mode (long mode) and no-execute (NX) support
movw $(0x7000 + 'E'), (0xB8008) movw $(0x7000 + 'E'), (0xB8008)
movl $0xC0000080, %ecx movl $0xC0000080, %ecx
rdmsr rdmsr
orl $0x0900, %eax orl $0x0900, %eax
wrmsr wrmsr
// Load the GDT that contains segment descriptors.
movw $(0x7000 + 'E'), (0xB800A) movw $(0x7000 + 'E'), (0xB800A)
movl $LOWMEM(bootgdtdesc), %eax movl $LOWMEM(bootgdtdesc), %eax
lgdt (%eax) lgdt (%eax)
// Configure cr0
movw $(0x7000 + 'F'), (0xB800C) movw $(0x7000 + 'F'), (0xB800C)
movl %cr0, %eax movl %cr0, %eax
orl $0x8005002B, %eax orl $0x8005002B, %eax
movl %eax, %cr0 movl %eax, %cr0
// Long jump into 64-bit mode
movw $(0x7000 + '0'), (0xB800E) movw $(0x7000 + '0'), (0xB800E)
ljmp *(%edi) ljmp *(%edi)
// No we're playing with power
lmenter: .code64 lmenter: .code64
movw $(0x7000 + '1'), (0xB8010) movw $(0x7000 + '1'), (0xB8010)
xorw %ax, %ax xorw %ax, %ax
@ -87,6 +106,7 @@ lmenter: .code64
orq %rcx, %rax orq %rcx, %rax
movq %rax, %rsp movq %rax, %rsp
// Long jump into the high memory address where the kernel executes in
movq $LOWMEM(lmhighptr), %rdi movq $LOWMEM(lmhighptr), %rdi
jmp *(%rdi) jmp *(%rdi)
@ -98,6 +118,7 @@ lmhigh:
call MachineBoot_Entry call MachineBoot_Entry
# Print halt to the graphics memory if we return
movw $(0x5000 + 'H'), (0xB8098) movw $(0x5000 + 'H'), (0xB8098)
movw $(0x5000 + 'A'), (0xB809A) movw $(0x5000 + 'A'), (0xB809A)
movw $(0x5000 + 'L'), (0xB809C) movw $(0x5000 + 'L'), (0xB809C)

View File

@ -13,6 +13,7 @@
#include <sys/spinlock.h> #include <sys/spinlock.h>
#include <sys/disk.h> #include <sys/disk.h>
#include <sys/bufcache.h> #include <sys/bufcache.h>
#include <errno.h>
Spinlock cacheLock; Spinlock cacheLock;
XMem *diskBuf; XMem *diskBuf;
@ -30,6 +31,11 @@ DEFINE_SLAB(BufCacheEntry, &cacheEntrySlab);
#define HASHTABLEENTRIES 128 #define HASHTABLEENTRIES 128
#define BLOCKSIZE (16*1024) #define BLOCKSIZE (16*1024)
/**
* BufCache_Init --
*
* Initialize the system buffer cache.
*/
void void
BufCache_Init() BufCache_Init()
{ {
@ -74,6 +80,18 @@ BufCache_Init()
cacheAlloc = 0; cacheAlloc = 0;
} }
/**
* BufCacheLookup --
*
* Looks up a buffer cache entry that can be used by BufCache_Alloc or
* BufCache_Read to allocate the underlying buffer.
*
* @param [in] disk Disk object
* @param [in] diskOffset Block offset within the disk
* @param [out] entry If successful, this contains the buffer cache entry.
* @retval 0 if successful
* @return ENOENT if not present.
*/
static int static int
BufCacheLookup(Disk *disk, uint64_t diskOffset, BufCacheEntry **entry) BufCacheLookup(Disk *disk, uint64_t diskOffset, BufCacheEntry **entry)
{ {
@ -94,9 +112,21 @@ BufCacheLookup(Disk *disk, uint64_t diskOffset, BufCacheEntry **entry)
} }
*entry = NULL; *entry = NULL;
return 0; return ENOENT;
} }
/**
* BufCacheAlloc --
*
* Allocates a buffer cache entry that can be used by BufCache_Alloc or
* BufCache_Read to allocate the underlying buffer..
*
* @param [in] disk Disk object
* @param [in] diskOffset Block offset within the disk
* @param [out] entry If successful, this contains the buffer cache entry.
* @retval 0 if successful.
* @return ENOMEM if there's no buffer cache entries free.
*/
static int static int
BufCacheAlloc(Disk *disk, uint64_t diskOffset, BufCacheEntry **entry) BufCacheAlloc(Disk *disk, uint64_t diskOffset, BufCacheEntry **entry)
{ {
@ -107,7 +137,7 @@ BufCacheAlloc(Disk *disk, uint64_t diskOffset, BufCacheEntry **entry)
e = TAILQ_FIRST(&lruList); e = TAILQ_FIRST(&lruList);
if (e == NULL) { if (e == NULL) {
kprintf("BufCache: No space left!\n"); kprintf("BufCache: No space left!\n");
return -1; return ENOMEM;
} }
TAILQ_REMOVE(&lruList, e, lruEntry); TAILQ_REMOVE(&lruList, e, lruEntry);
@ -130,13 +160,16 @@ BufCacheAlloc(Disk *disk, uint64_t diskOffset, BufCacheEntry **entry)
return 0; return 0;
} }
/* /**
* BufCache_Alloc -- * BufCache_Alloc --
* *
* Allocate a buffer cache entry. * Allocate a buffer cache entry to allow writing new data to disk.
* *
* Results: * @param [in] disk Disk object
* Returns 0 if successful and sets the entry, otherwise returns an error code. * @param [in] diskOffset Block offset within the disk
* @param [out] entry If successful, this contains the buffer cache entry.
* @retval 0 if successful
* @return Otherwise returns an error code.
*/ */
int int
BufCache_Alloc(Disk *disk, uint64_t diskOffset, BufCacheEntry **entry) BufCache_Alloc(Disk *disk, uint64_t diskOffset, BufCacheEntry **entry)
@ -157,14 +190,13 @@ BufCache_Alloc(Disk *disk, uint64_t diskOffset, BufCacheEntry **entry)
return status; return status;
} }
/* /**
* BufCache_Release -- * BufCache_Release --
* *
* Release a buffer cache entry. If no other references are held the * Release a buffer cache entry. If no other references are held the
* buffer cache entry is placed on the LRU list. * buffer cache entry is placed on the LRU list.
* *
* Results: * @param [in] entry Buffer cache entry.
* None.
*/ */
void void
BufCache_Release(BufCacheEntry *entry) BufCache_Release(BufCacheEntry *entry)
@ -179,13 +211,16 @@ BufCache_Release(BufCacheEntry *entry)
Spinlock_Unlock(&cacheLock); Spinlock_Unlock(&cacheLock);
} }
/* /**
* BufCache_Read -- * BufCache_Read --
* *
* Read block from disk into the buffer cache. * Read block from disk into the buffer cache.
* *
* Results: * @param [in] disk Disk object
* Returns 0 if successful, otherwise an error code is returned. * @param [in] diskOffset Block offset within the disk
* @param [out] entry If successful, this contains the buffer cache entry.
* @retval 0 if successful
* @return Otherwise returns an error code.
*/ */
int int
BufCache_Read(Disk *disk, uint64_t diskOffset, BufCacheEntry **entry) BufCache_Read(Disk *disk, uint64_t diskOffset, BufCacheEntry **entry)
@ -223,13 +258,13 @@ BufCache_Read(Disk *disk, uint64_t diskOffset, BufCacheEntry **entry)
return status; return status;
} }
/* /**
* BufCache_Write -- * BufCache_Write --
* *
* Write a buffer cache entry to disk. * Write a buffer cache entry to disk.
* *
* Results: * @retval 0 if successful
* Returns 0 if successful, otherwise an error code is returned. * @return Otherwise an error code is returned.
*/ */
int int
BufCache_Write(BufCacheEntry *entry) BufCache_Write(BufCacheEntry *entry)

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2006-2022 Ali Mashtizadeh * Copyright (c) 2006-2023 Ali Mashtizadeh
* All rights reserved. * All rights reserved.
* Generic Copyin/Copyout routines * Generic Copyin/Copyout routines
*/ */
@ -15,13 +15,15 @@
extern int copy_unsafe(void *to_addr, void *from_addr, uintptr_t len); extern int copy_unsafe(void *to_addr, void *from_addr, uintptr_t len);
extern int copystr_unsafe(void *to_addr, void *from_addr, uintptr_t len); extern int copystr_unsafe(void *to_addr, void *from_addr, uintptr_t len);
/* /**
* Copy_In -- * Copy_In --
* Safely copy memory from userspace. Prevents userspace pointers from * Safely copy memory from userspace. Prevents userspace pointers from
* reading kernel memory. * reading kernel memory.
* *
* Results: * @param [in] fromuser User address to copy from.
* Returns EFAULT if the address is invalid or causes a fault. * @param [in] tokernel Kernel address to copy to.
* @param [in] len Length of the data to copy.
* @retval EFAULT if the address is invalid or causes a fault.
* *
* Side effects: * Side effects:
* Kernel page fault may have occurred. * Kernel page fault may have occurred.
@ -47,13 +49,15 @@ Copy_In(uintptr_t fromuser, void *tokernel, uintptr_t len)
return copy_unsafe(tokernel, (void *)fromuser, len); return copy_unsafe(tokernel, (void *)fromuser, len);
} }
/* /**
* Copy_Out -- * Copy_Out --
* Safely copy memory to userspace. Prevents userspace pointers from * Safely copy memory to userspace. Prevents userspace pointers from
* writing kernel memory. * writing kernel memory.
* *
* Results: * @param [in] fromkernel Kernel address to copy from.
* Returns EFAULT if the address is invalid or causes a fault. * @param [in] touser User address to copy to.
* @param [in] len Length of the data to copy.
* @retval EFAULT if the address is invalid or causes a fault.
* *
* Side effects: * Side effects:
* Kernel page fault may have occurred. * Kernel page fault may have occurred.
@ -79,13 +83,16 @@ Copy_Out(void *fromkernel, uintptr_t touser, uintptr_t len)
return copy_unsafe((void *)touser, fromkernel, len); return copy_unsafe((void *)touser, fromkernel, len);
} }
/* /**
* Copy_StrIn -- * Copy_StrIn --
*
* Safely copy a string from userspace. Prevents userspace pointers from * Safely copy a string from userspace. Prevents userspace pointers from
* reading kernel memory. * reading kernel memory.
* *
* Results: * @param [in] fromuser User address to copy from.
* Returns EFAULT if the address is invalid or causes a fault. * @param [in] tokernel Kernel address to copy to.
* @param [in] len Maximum string length.
* @retval EFAULT if the address is invalid or causes a fault.
* *
* Side effects: * Side effects:
* Kernel page fault may have occurred. * Kernel page fault may have occurred.
@ -111,13 +118,15 @@ Copy_StrIn(uintptr_t fromuser, void *tokernel, uintptr_t len)
return copystr_unsafe(tokernel, (void *)fromuser, len); return copystr_unsafe(tokernel, (void *)fromuser, len);
} }
/* /**
* Copy_StrOut -- * Copy_StrOut --
* Safely copy a string to userspace. Prevents userspace pointers from * Safely copy a string to userspace. Prevents userspace pointers from
* writing kernel memory. * writing kernel memory.
* *
* Results: * @param [in] fromkernel Kernel address to copy from.
* Returns EFAULT if the address is invalid or causes a fault. * @param [in] touser User address to copy to.
* @param [in] len Maximum string length.
* @retval EFAULT if the address is invalid or causes a fault.
* *
* Side effects: * Side effects:
* Kernel page fault may have occurred. * Kernel page fault may have occurred.

View File

@ -1,3 +1,7 @@
/*
* Copyright (c) 2023 Ali Mashtizadeh
* All rights reserved.
*/
#include <stdbool.h> #include <stdbool.h>
#include <stdint.h> #include <stdint.h>
@ -33,7 +37,7 @@ CV_Destroy(CV *cv)
return; return;
} }
/* /**
* CV_Wait -- * CV_Wait --
* *
* Wait to be woken up on a condition. * Wait to be woken up on a condition.
@ -52,7 +56,7 @@ CV_Wait(CV *cv, Mutex *mtx)
return; return;
} }
/* /**
* CV_Signal -- * CV_Signal --
* *
* Wake a single thread waiting on the condition. * Wake a single thread waiting on the condition.
@ -65,7 +69,7 @@ CV_Signal(CV *cv)
return; return;
} }
/* /**
* CV_WakeAll -- * CV_WakeAll --
* *
* Wake all threads waiting on the condition. * Wake all threads waiting on the condition.

View File

@ -1,3 +1,7 @@
/*
* Copyright (c) 2023 Ali Mashtizadeh
* All rights reserved.
*/
#include <stdbool.h> #include <stdbool.h>
#include <stdint.h> #include <stdint.h>
@ -39,7 +43,7 @@ Mutex_Destroy(Mutex *mtx)
return; return;
} }
/* /**
* Mutex_Lock -- * Mutex_Lock --
* *
* Acquires the mutex. * Acquires the mutex.
@ -67,7 +71,7 @@ Mutex_Lock(Mutex *mtx)
return; return;
} }
/* /**
* Mutex_TryLock -- * Mutex_TryLock --
* *
* Attempts to acquire the user mutex. Returns EBUSY if the lock is * Attempts to acquire the user mutex. Returns EBUSY if the lock is
@ -88,7 +92,7 @@ Mutex_TryLock(Mutex *mtx)
return 0; return 0;
} }
/* /**
* Mutex_Unlock -- * Mutex_Unlock --
* *
* Releases the user mutex. * Releases the user mutex.

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2013-2018 Ali Mashtizadeh * Copyright (c) 2013-2023 Ali Mashtizadeh
* All rights reserved. * All rights reserved.
*/ */
@ -60,10 +60,12 @@ PAlloc_Init()
pageInfoTable = NULL; pageInfoTable = NULL;
} }
/* /**
* The late init call is made after the page tables are initialized using a * PAlloc_LateInit --
* small boot memory region (2nd 16MBs). This is where initialize the XMem *
* region that represents the PageInfo array, and map memory into it. * The late init call is made after the page tables are initialized using
* a small boot memory region (2nd 16MBs). This is where initialize the
* XMem region that represents the PageInfo array, and map memory into it.
*/ */
void void
PAlloc_LateInit() PAlloc_LateInit()
@ -81,6 +83,11 @@ PAlloc_LateInit()
// Free old pages // Free old pages
} }
/**
* PAlloc_AddRegion --
*
* Add a physical memory region to the page allocator.
*/
void void
PAlloc_AddRegion(uintptr_t start, uintptr_t len) PAlloc_AddRegion(uintptr_t start, uintptr_t len)
{ {
@ -156,6 +163,11 @@ PAlloc_AddRegion(uintptr_t start, uintptr_t len)
Spinlock_Unlock(&pallocLock); Spinlock_Unlock(&pallocLock);
} }
/**
* PAllocGetInfo --
*
* Lookup the PageInfo structure for a given physical address.
*/
static inline PageInfo * static inline PageInfo *
PAllocGetInfo(void *pg) PAllocGetInfo(void *pg)
{ {
@ -163,6 +175,15 @@ PAllocGetInfo(void *pg)
return &pageInfoTable[entry]; return &pageInfoTable[entry];
} }
/**
* PAlloc_AllocPage --
*
* Allocate a physical page and return the page's address in the Kernel's
* ident mapped memory region.
*
* @retval NULL if no memory is available.
* @return Newly allocated physical page.
*/
void * void *
PAlloc_AllocPage() PAlloc_AllocPage()
{ {
@ -191,6 +212,11 @@ PAlloc_AllocPage()
return (void *)pg; return (void *)pg;
} }
/**
* PAllocFreePage --
*
* Free a page.
*/
static void static void
PAllocFreePage(void *region) PAllocFreePage(void *region)
{ {
@ -213,6 +239,11 @@ PAllocFreePage(void *region)
freePages++; freePages++;
} }
/**
* PAlloc_Retain --
*
* Increment the reference count for a physical page.
*/
void void
PAlloc_Retain(void *pg) PAlloc_Retain(void *pg)
{ {
@ -224,6 +255,12 @@ PAlloc_Retain(void *pg)
Spinlock_Unlock(&pallocLock); Spinlock_Unlock(&pallocLock);
} }
/**
* PAlloc_Release --
*
* Deccrement the reference count for a physical page. If the reference
* count is zero the page will be freed.
*/
void void
PAlloc_Release(void *pg) PAlloc_Release(void *pg)
{ {

View File

@ -1,3 +1,7 @@
/*
* Copyright (c) 2013-2023 Ali Mashtizadeh
* All rights reserved.
*/
#include <stdbool.h> #include <stdbool.h>
#include <stdint.h> #include <stdint.h>

View File

@ -1,3 +1,7 @@
/*
* Copyright (c) 2013-2023 Ali Mashtizadeh
* All rights reserved.
*/
#include <stdbool.h> #include <stdbool.h>
#include <stdint.h> #include <stdint.h>

View File

@ -1,3 +1,7 @@
/*
* Copyright (c) 2013-2023 Ali Mashtizadeh
* All rights reserved.
*/
#include <stdbool.h> #include <stdbool.h>
#include <stdint.h> #include <stdint.h>

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2013-2018 Ali Mashtizadeh * Copyright (c) 2013-2023 Ali Mashtizadeh
* All rights reserved. * All rights reserved.
*/ */

View File

@ -1,3 +1,7 @@
/*
* Copyright (c) 2022-2023 Ali Mashtizadeh
* All rights reserved.
*/
#include <stdbool.h> #include <stdbool.h>
#include <stdint.h> #include <stdint.h>
@ -61,6 +65,12 @@ Spinlock_Destroy(Spinlock *lock)
Spinlock_Unlock(&lockListLock); Spinlock_Unlock(&lockListLock);
} }
/**
* Spinlock_Lock --
*
* Spin until we acquire the spinlock. This will also disable interrupts
* to prevent deadlocking with interrupt handlers.
*/
void void
Spinlock_Lock(Spinlock *lock) __NO_LOCK_ANALYSIS Spinlock_Lock(Spinlock *lock) __NO_LOCK_ANALYSIS
{ {
@ -90,6 +100,11 @@ Spinlock_Lock(Spinlock *lock) __NO_LOCK_ANALYSIS
TAILQ_INSERT_TAIL(&lockStack[CPU()], lock, lockStack); TAILQ_INSERT_TAIL(&lockStack[CPU()], lock, lockStack);
} }
/**
* Spinlock_Unlock --
*
* Release the spinlock. This will re-enable interrupts.
*/
void void
Spinlock_Unlock(Spinlock *lock) __NO_LOCK_ANALYSIS Spinlock_Unlock(Spinlock *lock) __NO_LOCK_ANALYSIS
{ {

View File

@ -1,3 +1,7 @@
/*
* Copyright (c) 2013-2023 Ali Mashtizadeh
* All rights reserved.
*/
#include <stdbool.h> #include <stdbool.h>
#include <stdint.h> #include <stdint.h>

View File

@ -1,3 +1,7 @@
/*
* Copyright (c) 2013-2023 Ali Mashtizadeh
* All rights reserved.
*/
#include <stdbool.h> #include <stdbool.h>
#include <stdint.h> #include <stdint.h>
@ -211,10 +215,14 @@ Thread_Destroy(Thread *thr)
Slab_Free(&threadSlab, thr); Slab_Free(&threadSlab, thr);
} }
/* /**
* Thread_Lookup -- * Thread_Lookup --
* *
* Lookup a thread by TID and increment its reference count. * Lookup a thread by TID and increment its reference count.
*
* @param [in] proc Process within which to find a specific thread.
* @param [in] tid Thread ID of the thread to find.
* @retval NULL if the thread isn't found.
*/ */
Thread * Thread *
Thread_Lookup(Process *proc, uint64_t tid) Thread_Lookup(Process *proc, uint64_t tid)
@ -235,7 +243,7 @@ Thread_Lookup(Process *proc, uint64_t tid)
return thr; return thr;
} }
/* /**
* Thread_Retain -- * Thread_Retain --
* *
* Increment the reference count for a given thread. * Increment the reference count for a given thread.
@ -247,7 +255,7 @@ Thread_Retain(Thread *thr)
__sync_fetch_and_add(&thr->refCount, 1); __sync_fetch_and_add(&thr->refCount, 1);
} }
/* /**
* Thread_Release -- * Thread_Release --
* *
* Decrement the reference count for a given thread. * Decrement the reference count for a given thread.
@ -261,7 +269,7 @@ Thread_Release(Thread *thr)
} }
} }
/* /**
* Thread_Wait -- * Thread_Wait --
* *
* Wait for any thread (tid == TID_ANY) or a specific thread. * Wait for any thread (tid == TID_ANY) or a specific thread.

View File

@ -1,3 +1,7 @@
/*
* Copyright (c) 2013-2023 Ali Mashtizadeh
* All rights reserved.
*/
#include <stdbool.h> #include <stdbool.h>
#include <stdint.h> #include <stdint.h>
@ -23,6 +27,11 @@ static Slab vnodeSlab;
DEFINE_SLAB(VFS, &vfsSlab); DEFINE_SLAB(VFS, &vfsSlab);
DEFINE_SLAB(VNode, &vnodeSlab); DEFINE_SLAB(VNode, &vnodeSlab);
/**
* VFS_MountRoot --
*
* Mount the root file system from a specific disk.
*/
int int
VFS_MountRoot(Disk *rootDisk) VFS_MountRoot(Disk *rootDisk)
{ {
@ -44,6 +53,13 @@ VFS_MountRoot(Disk *rootDisk)
return 0; return 0;
} }
/**
* VFS_Lookup --
*
* Lookup a VNode by a path. This function recursively searches the
* directory heirarchy until the given path is found otherwise returns
* NULL if not found.
*/
VNode * VNode *
VFS_Lookup(const char *path) VFS_Lookup(const char *path)
{ {
@ -89,14 +105,22 @@ VFS_Lookup(const char *path)
// Release oldNode // Release oldNode
if (*end == '\0') if (*end == '\0') {
Log(vfs, "%s %lx\n", path, curNode);
return curNode; return curNode;
}
start = end + 1; start = end + 1;
end = end + 1; end = end + 1;
} }
} }
/**
* VFS_Stat --
*
* Return the struct stat that contains the file and directory information
* for a given VNode.
*/
int int
VFS_Stat(const char *path, struct stat *sb) VFS_Stat(const char *path, struct stat *sb)
{ {
@ -111,30 +135,79 @@ VFS_Stat(const char *path, struct stat *sb)
return 0; return 0;
} }
/**
* VFS_Open --
*
* Open a vnode for reading and writing.
*
* @param [in] fn VNode to open.
* @return Return status
*/
int int
VFS_Open(VNode *fn) VFS_Open(VNode *fn)
{ {
return fn->op->open(fn); return fn->op->open(fn);
} }
/**
* VFS_Close --
*
* Close a vnode.
*
* @param [in] fn VNode to close.
* @return Return status
*/
int int
VFS_Close(VNode *fn) VFS_Close(VNode *fn)
{ {
return fn->op->close(fn); return fn->op->close(fn);
} }
/**
* VFS_Read --
*
* Read from a vnode.
*
* @param [in] fn VNode to read from.
* @param [in] buf Buffer to write the data to.
* @param [in] off File offset in bytes.
* @param [in] len Length to read in bytes.
* @return Return status
*/
int int
VFS_Read(VNode *fn, void *buf, uint64_t off, uint64_t len) VFS_Read(VNode *fn, void *buf, uint64_t off, uint64_t len)
{ {
return fn->op->read(fn, buf, off, len); return fn->op->read(fn, buf, off, len);
} }
/**
* VFS_Write --
*
* Write from a vnode.
*
* @param [in] fn VNode to write to.
* @param [in] buf Buffer to read the data from.
* @param [in] off File offset in bytes.
* @param [in] len Length to read in bytes.
* @return Return status
*/
int int
VFS_Write(VNode *fn, void *buf, uint64_t off, uint64_t len) VFS_Write(VNode *fn, void *buf, uint64_t off, uint64_t len)
{ {
return fn->op->write(fn, buf, off, len); return fn->op->write(fn, buf, off, len);
} }
/**
* VFS_ReadDir --
*
* Read a directory entry from a vnode.
*
* @param [in] fn VNode to read from.
* @param [in] buf Buffer to write the data to.
* @param [in] off Directory offset in bytes.
* @param [in] len Length to read in bytes.
* @return Return status
*/
int int
VFS_ReadDir(VNode *fn, void *buf, uint64_t len, uint64_t *off) VFS_ReadDir(VNode *fn, void *buf, uint64_t len, uint64_t *off)
{ {

View File

@ -1,3 +1,7 @@
/*
* Copyright (c) 2023 Ali Mashtizadeh
* All rights reserved.
*/
#include <stdbool.h> #include <stdbool.h>
#include <stdint.h> #include <stdint.h>
@ -44,7 +48,7 @@ WaitChannel_Destroy(WaitChannel *wchan)
Spinlock_Destroy(&wchan->lock); Spinlock_Destroy(&wchan->lock);
} }
/* /**
* WaitChannel_Lock -- * WaitChannel_Lock --
* *
* Acquires the wait channel lock. * Acquires the wait channel lock.
@ -55,7 +59,7 @@ WaitChannel_Lock(WaitChannel *wchan)
Spinlock_Lock(&wchan->lock); Spinlock_Lock(&wchan->lock);
} }
/* /**
* WaitChannel_Sleep -- * WaitChannel_Sleep --
* *
* Places the current thread to asleep while releasing the wait channel * Places the current thread to asleep while releasing the wait channel
@ -76,7 +80,7 @@ WaitChannel_Sleep(WaitChannel *wchan)
Sched_Scheduler(); Sched_Scheduler();
} }
/* /**
* WaitChannel_Wake -- * WaitChannel_Wake --
* *
* Wake up a single thread. * Wake up a single thread.
@ -101,7 +105,7 @@ WaitChannel_Wake(WaitChannel *wchan)
Spinlock_Unlock(&wchan->lock); Spinlock_Unlock(&wchan->lock);
} }
/* /**
* WaitChannel_WakeAll -- * WaitChannel_WakeAll --
* *
* Wakes up all threads currently sleeping on the wait channel. * Wakes up all threads currently sleeping on the wait channel.
@ -124,6 +128,5 @@ WaitChannel_WakeAll(WaitChannel *wchan)
} }
Spinlock_Unlock(&wchan->lock); Spinlock_Unlock(&wchan->lock);
} }