This commit is contained in:
quackerd 2020-02-24 02:16:26 -05:00
parent 4424d6401e
commit a9f2b48a95
Signed by: d
GPG Key ID: 590A22374D0B819F
51 changed files with 640 additions and 217 deletions

View File

@ -3,6 +3,7 @@ cmake_minimum_required(VERSION 3.10)
# disable in-source build
set(CMAKE_DISABLE_IN_SOURCE_BUILD ON)
set(CMAKE_DISABLE_SOURCE_CHANGES ON)
set(CMAKE_EXPORT_COMPILE_COMMANDS ON)
project(bond)
@ -69,8 +70,10 @@ set(DMP_FLAGS
${DMP_FLAGS_${ARCH}})
set(SUBMODULES
kern
common
ke
arch
mm
scripts
test)

View File

@ -3,9 +3,10 @@ set(CC_SRC
cpu.c
main.c
intr.c
mem.c
print.c
brute.c)
brute.c
pmap.c
paging.c)
set(AS_SRC
boot.asm

View File

@ -19,6 +19,7 @@ bits 32
; Identity map the first 4G memory, where the kernel binary and multiboot info is
; Map the first 4G memory to KERN_PMAP temporarily so we have access to printf
; Map the first 1G memory, which contains the kernel, to KERN_BASE_START
; Map the nth PML4 to itself for recursive page tables
arch_init_32:
cli ; close interrupt
cld ; set direction
@ -75,6 +76,11 @@ arch_init_32:
add eax, GET_PDPT(KERN_BASE_START) * 8
mov dword [eax], 10000011b ; ebx lower bits is attribute = R/W + SU + 1G page, high bits = physical 0th GB
; map the recursive mapping
mov eax, GET_PADDR(kern_early_pml4)
add eax, GET_PML4(KERN_RPT_START) * 8
mov dword [eax], GET_PADDR(kern_early_pml4) + 11b
BOCHS_BREAK
; enable PAE

View File

@ -1,4 +1,4 @@
#include <kern/cdef.h>
#include <common/cdef.h>
#include "cpu.h"

View File

@ -1,6 +1,6 @@
#pragma once
#include <kern/cdef.h>
#include <common/cdef.h>
#define HAL_CORE_COUNT 1
struct ATTR_PACKED hal_gdt_ptr {

View File

@ -1,9 +1,9 @@
#include <arch/intr.h>
#include <arch/mem.h>
#include <arch/pmap.h>
#include <arch/print.h>
#include <kern/print.h>
#include <kern/status.h>
#include <kern/brute.h>
#include <ke/print.h>
#include <ke/status.h>
#include <ke/brute.h>
#include "cpu.h"

View File

@ -1,10 +1,18 @@
#include <kern/cdef.h>
#include <kern/print.h>
#include <common/cdef.h>
#include <ke/print.h>
#include <arch/print.h>
#include <arch/brute.h>
#include <mm/phys.h>
#include <arch/mlayout.h>
// private headers
#include "multiboot2.h"
#include "pmap_p.h"
static const char* _loader_name;
// kernel entry point
extern void kmain();
ATTR_USED void
arch_main(void *mb_info)
@ -12,36 +20,46 @@ arch_main(void *mb_info)
/* init printf related stuff */
arch_print_init();
kprintf("Multiboot info: 0x%p\n", mb_info);
kprintf("Initializing arch layer...\n");
kprintf("Processing multiboot info @ 0x%p...\n", mb_info);
for (struct multiboot_tag *tag = (struct multiboot_tag *) ((uintptr) mb_info + 8);
tag->type != MULTIBOOT_TAG_TYPE_END;
tag = (struct multiboot_tag *) ((multiboot_uint8_t *) tag
+ ((tag->size + 7) & ~7u))) {
kprintf("Tag 0x%p: %d, Size %d\n", (void *) tag, tag->type, tag->size);
// kprintf("Tag 0x%p: %d, Size %d\n", (void *) tag, tag->type, tag->size);
switch (tag->type) {
case MULTIBOOT_TAG_TYPE_MMAP:
kprintf("Found multiboot memory map.\n");
for (struct multiboot_mmap_entry *entry = ((struct multiboot_tag_mmap *) tag)->entries;
(multiboot_uint8_t *) entry < (multiboot_uint8_t *) tag + tag->size;
entry = (multiboot_memory_map_t *) ((uintptr) entry +
((struct multiboot_tag_mmap *) tag)->entry_size))
kprintf(" base_addr = 0x%lx,"
" length = 0x%lx, type = 0x%x\n",
((struct multiboot_tag_mmap *) tag)->entry_size)) {
kprintf("Adding to pmap seg: base = 0x%lx,"
" length = 0x%lx, type = 0x%x.\n",
(ulong) entry->addr,
(ulong) entry->len,
entry->type);
if (entry->type == MULTIBOOT_MEMORY_AVAILABLE) {
// add physical segments to mm phys subsystem
arch_mem_addseg(entry->addr, entry->len);
}
}
break;
case MULTIBOOT_TAG_TYPE_BOOT_LOADER_NAME:
kprintf("BoND is loaded by: %s\n", ((struct multiboot_tag_string *) tag)->string);
kprintf("Found multiboot loader name.\n");
_loader_name = ((struct multiboot_tag_string *) tag)->string;
break;
default:
kprintf("Unhandled multiboot tag type: %d\n", tag->type);
kprintf("Ignoring multiboot tag type: %d size: 0x%x\n", tag->type, tag->size);
break;
}
}
kprintf("Arch layer initialized.\n");
kprintf("BoND is loaded by: %s\n", _loader_name);
kprintf("kernel start: 0x%p end: 0x%p\n", (void*)KERN_IMG_START, (void*)ARCH_ML_KIMAGE_STOP);
arch_brute();
kprintf("Initializing memory...");
arch_mem_init();
kmain();
}

View File

@ -1,66 +0,0 @@
#include <kern/cdef.h>
#include <arch/mem.h>
#include <arch/mlayout.h>
/**
Page Table Definitions
**/
#define PML4_PRESENT (1ull << 0)
#define PML4_WRITE (1ull << 1)
#define PML4_USER (1ull << 2)
#define PML4_WRITE_THROUGH (1ull << 3)
#define PML4_CACHE_DISABLED (1ull << 4)
#define PML4_ACCESSED (1ull << 5)
#define PML4_EXECUTION_DISABLED (1ull << 63)
#define PDPT_PRESENT (1ull << 0)
#define PDPT_WRITE (1ull << 1)
#define PDPT_USER (1ull << 2)
#define PDPT_WRITE_THROUGH (1ull << 3)
#define PDPT_CACHE_DISABLED (1ull << 4)
#define PDPT_ACCESSED (1ull << 5)
#define PDPT_EXECUTION_DISABLED (1ull << 63)
#define PD_PRESENT (1ull << 0)
#define PD_WRITE (1ull << 1)
#define PD_USER (1ull << 2)
#define PD_WRITE_THROUGH (1ull << 3)
#define PD_CACHE_DISABLED (1ull << 4)
#define PD_ACCESSED (1ull << 5)
#define PD_EXECUTION_DISABLED (1ull << 63)
#define PT_PRESENT (1ull << 0)
#define PT_WRITE (1ull << 1)
#define PT_USER (1ull << 2)
#define PT_WRITE_THROUGH (1ull << 3)
#define PT_CACHE_DISABLED (1ull << 4)
#define PT_ACCESSED (1ull << 5)
#define PT_DIRTY (1ull << 6)
#define PT_ATTRIBUTE_TABLE (1ull << 7)
#define PT_GLOBAL (1ull << 8)
#define PT_EXECUTION_DISABLED (1ull << 63)
#define PML4_ENTRY_NUM(vaddr) (((vaddr) >> 39) & 0x1FF)
#define PDPT_ENTRY_NUM(vaddr) (((vaddr) >> 30) & 0x1FF)
#define PD_ENTRY_NUM(vaddr) (((vaddr) >> 21) & 0x1FF)
#define PT_ENTRY_NUM(vaddr) (((vaddr) >> 12) & 0x1FF)
void
write_page_tbl(void *base, uintptr pdpt_addr, uint64 attr)
{
if (base == NULL)
{
return;
}
uint64 entry = (pdpt_addr & 0xFFFFFFFFFF000ul) | attr;
((uint8 *) base)[0] = (uint8) (entry & 0xFFul);
((uint8 *) base)[1] = (uint8) ((entry >> 8u) & 0xFFu);
((uint8 *) base)[2] = (uint8) ((entry >> 16u) & 0xFFu);
((uint8 *) base)[3] = (uint8) ((entry >> 24u) & 0xFFu);
((uint8 *) base)[4] = (uint8) ((entry >> 32u) & 0xFFu);
((uint8 *) base)[5] = (uint8) ((entry >> 40u) & 0xFFu);
((uint8 *) base)[6] = (uint8) ((entry >> 48u) & 0xFFu);
((uint8 *) base)[7] = (uint8) ((entry >> 56u) & 0xFFu);
}

91
arch/paging.c Normal file
View File

@ -0,0 +1,91 @@
#include <mm/phys.h>
#include <common/libkern.h>
#include <arch/pmap.h>
#include <ke/status.h>
#include "paging.h"
mm_paddr page_alloc_zero()
{
return 0;
}
/* map physical memory to virtual */
// XXX: this should undo stuff if pages are not enough
// TODO: use huge pages later (2MB + 1GB)
static int
_map_page(arch_pml4e *pml4, mm_paddr paddr, mm_paddr vaddr, uint attr)
{
int uspace = vaddr < ARCH_ML_KSPACE_START;
if (uspace && (vaddr > ARCH_ML_USPACE_END)) {
BRUTE("non-canonical vaddr");
}
/* must be 4k aligned */
if((paddr & (ARCH_KPAGE_SZ - 1)) != 0 ||
(vaddr & (ARCH_KPAGE_SZ - 1)) != 0 ){
BRUTE("addresses not aligned");
}
mm_paddr alloc_pt;
arch_pml4e *pml4_ent = pml4 + PML4_ENTRY_NUM(vaddr);
if (*pml4_ent == 0) {
alloc_pt = page_alloc_zero();
if (!alloc_pt) {
BRUTE("not enough pages");
}
arch_write_page_entry(pml4_ent, alloc_pt, PML4E_ATTR_RW | PML4E_ATTR_P | (uspace ? PML4E_ATTR_US : 0));
}
arch_pdpte *pdpt_ent = (arch_pdpte*)arch_pmap_map(*pml4_ent, ARCH_KPAGE_SZ) + PDPT_ENTRY_NUM(vaddr);
if (*pdpt_ent == 0) {
alloc_pt = page_alloc_zero();
if (!alloc_pt) {
BRUTE("not enough pages");
}
arch_write_page_entry(pdpt_ent, alloc_pt, PDPTE_ATTR_RW | PDPTE_ATTR_P | (uspace ? PDPTE_ATTR_US : 0));
}
arch_pde *pde_ent = (arch_pdpte*)arch_pmap_map(*pdpt_ent, ARCH_KPAGE_SZ) + PD_ENTRY_NUM(vaddr);
if (*pde_ent == 0) {
alloc_pt = page_alloc_zero();
if (!alloc_pt) {
BRUTE("not enough pages");
}
arch_write_page_entry(pde_ent, alloc_pt, PDE_ATTR_RW | PDE_ATTR_P | (uspace ? PDE_ATTR_US : 0));
}
arch_pte *pte_ent = (arch_pdpte*)arch_pmap_map(*pde_ent, ARCH_KPAGE_SZ) + PT_ENTRY_NUM(vaddr);
if (*pte_ent != 0) {
BRUTE("vaddr 0x%p is already mapped", (void*)vaddr);
}
uint64 pattr = 0;
pattr |= (attr & ARCH_VADDR_ATTR_PRESENT ? PTE_ATTR_P : 0);
pattr |= (attr & ARCH_VADDR_ATTR_NX ? PTE_ATTR_NX : 0);
pattr |= (attr & ARCH_VADDR_ATTR_READONLY ? 0 : PTE_ATTR_RW);
pattr |= (attr & ARCH_VADDR_ATTR_UNCACHED ? PTE_ATTR_PCD : 0);
arch_write_page_entry(pte_ent, paddr, pattr);
return S_OK;
}
int
arch_map_vaddr(void * base, mm_paddr paddr, uintptr vaddr, usize sz, uint attr)
{
if ((sz & (ARCH_KPAGE_SZ - 1)) != 0) {
BRUTE("Unaligned size");
}
for(mm_paddr caddr = paddr; caddr < paddr + sz; caddr += ARCH_KPAGE_SZ) {
_map_page(base, paddr, vaddr, attr);
}
return S_OK;
}

93
arch/paging.h Normal file
View File

@ -0,0 +1,93 @@
#pragma once
#include <common/libkern.h>
#include <arch/mlayout.h>
typedef uint64_t arch_pml4e;
typedef uint64_t arch_pdpte;
typedef uint64_t arch_pde;
typedef uint64_t arch_pte;
/**
Page Table Definitions
**/
#define PML4E_ATTR_P (1ul << 0u)
#define PML4E_ATTR_RW (1ul << 1u)
#define PML4E_ATTR_US (1ul << 2u)
#define PML4E_ATTR_PWT (1ul << 3u)
#define PML4E_ATTR_PCD (1ul << 4u)
#define PML4E_ATTR_A (1ul << 5u)
#define PML4E_ATTR_NX (1ul << 63u)
#define PDPTE_ATTR_P (1ul << 0u)
#define PDPTE_ATTR_RW (1ul << 1u)
#define PDPTE_ATTR_US (1ul << 2u)
#define PDPTE_ATTR_PWT (1ul << 3u)
#define PDPTE_ATTR_PCD (1ul << 4u)
#define PDPTE_ATTR_A (1ul << 5u)
#define PDPTE_ATTR_D (1ul << 6u)
#define PDPTE_ATTR_PS (1ul << 7u)
#define PDPTE_ATTR_G (1ul << 8u)
#define PDPTE_ATTR_PAT (1ul << 12u)
#define PDPTE_ATTR_NX (1ul << 63u)
#define PDE_ATTR_P (1ul << 0u)
#define PDE_ATTR_RW (1ul << 1u)
#define PDE_ATTR_US (1ul << 2u)
#define PDE_ATTR_PWT (1ul << 3u)
#define PDE_ATTR_PCD (1ul << 4u)
#define PDE_ATTR_A (1ul << 5u)
#define PDE_ATTR_D (1ul << 6u)
#define PDE_ATTR_PS (1ul << 7u)
#define PDE_ATTR_G (1ul << 8u)
#define PDE_ATTR_PAT (1ul << 12u)
#define PDE_ATTR_NX (1ul << 63u)
#define PTE_ATTR_P (1ul << 0u)
#define PTE_ATTR_RW (1ul << 1u)
#define PTE_ATTR_US (1ul << 2u)
#define PTE_ATTR_PWT (1ul << 3u)
#define PTE_ATTR_PCD (1ul << 4u)
#define PTE_ATTR_A (1ul << 5u)
#define PTE_ATTR_D (1ul << 6u)
#define PTE_ATTR_PS (1ul << 7u)
#define PTE_ATTR_G (1ul << 8u)
#define PTE_ATTR_PAT (1ul << 12u)
#define PTE_ATTR_NX (1ul << 63u)
#define PML4_ENTRY_NUM(vaddr) (((vaddr) >> 39u) & 0x1FFu)
#define PDPT_ENTRY_NUM(vaddr) (((vaddr) >> 30u) & 0x1FFu)
#define PD_ENTRY_NUM(vaddr) (((vaddr) >> 21u) & 0x1FFu)
#define PT_ENTRY_NUM(vaddr) (((vaddr) >> 12u) & 0x1FFu)
#define PDPTE_MAPPING_SZ (PDE_MAPPING_SZ * 512)
#define PDE_MAPPING_SZ (PTE_MAPPING_SZ * 512)
#define PTE_MAPPING_SZ (ARCH_KPAGE_SZ)
static inline void
arch_write_page_entry(uint64_t *base, mm_paddr offset, uint64_t attr)
{
attr = (offset & 0xFFFFFFFFFF000ul) | attr;
memcpy(base, &attr, sizeof(uint64_t));
}
// trace the page table to see if there exists a pdpte entry for a given pml4
// note _nr means this doesn't depend on the recursive page mapping
static inline int
arch_pdpte_exists_nr(arch_pml4e *pml4, mm_paddr paddr)
{
}
static inline int
arch_pde_exists_nr()
{
}
static inline int
arch_pte_exists_nr()
{
}

146
arch/pmap.c Normal file
View File

@ -0,0 +1,146 @@
#include <arch/pmap.h>
#include <ke/brute.h>
#include <mm/phys.h>
#include <common/libkern.h>
#include "pmap_p.h"
#include "paging.h"
struct arch_pmap_segs {
mm_paddr start;
mm_paddr stop;
};
// the physical memory segments information obtained from multiboot info
static struct arch_pmap_segs _phys_segs[ARCH_PMAP_MAX_PHYS_SEGS];
static usize _phys_segs_sz = 0;
// the base addr for mm_page structures
static mm_paddr _mm_pages_base;
// initializes _pmap region
static void
_pmap_init(mm_paddr *cur_addr, arch_pml4e *kern_pml4)
{
usize high_mem = _phys_segs[_phys_segs_sz - 1].stop;
if (high_mem >= ARCH_ML_MAX_RAM) {
BRUTE("Only supports maximum %ld bytes RAM", ARCH_ML_MAX_RAM);
}
kprintf("Total memory size: %ld bytes", high_mem + 1);
// map all 1GB sections
usize num = ((high_mem + 1) & ~0x3FFFFFFFu) >> 30u;
for (usize i = 0; i < num; i++) {
kprintf("");
}
kprintf("pmap: 1GB segment");
// map all 2MB sections
// map all 4KB sections
}
static void
_mm_pg_init(mm_paddr *cur_addr, arch_pml4e *kern_pml4)
{
}
// initializes kernel mapping
static void
_kern_mapping_init(mm_paddr *cur_addr, arch_pml4e *kern_pml4)
{
// map the kernel with 2MB mapping now
KASSERT((ARCH_ML_KIMAGE_PADDR & (PDE_MAPPING_SZ - 1)) == 0, "kernel vaddr not 2MB aligned.");
KASSERT(((uintptr) KERN_IMG_START & (PDE_MAPPING_SZ - 1)) == 0, "kernel paddr not 2MB aligned.");
const uintptr kern_map_end = ALIGN_UP2((uintptr) (ARCH_ML_KIMAGE_STOP), PDE_MAPPING_SZ);
kprintf("kern_map_end: 0x%p", (void *) kern_map_end);
*cur_addr = kern_map_end - KERN_BASE_START;
*kern_pml4 = arch_pmap_map(ARCH_ML_PMAP_START + *cur_addr, ARCH_KPAGE_SZ);
*cur_addr += ARCH_KPAGE_SZ;
// pdpt for the kernel, kernel must be within the first GB so only 1
arch_pdpte *kern_pdpt = cur_addr;
cur_addr += ARCH_KPAGE_SZ * kern_num_pdpt;
arch_write_page_entry(kern_pml4 + PML4_ENTRY_NUM((uintptr) KERN_IMG_START),
kern_pdpt, PML4E_ATTR_P | PML4E_ATTR_RW);
// pd for the kernel
const uintptr kern_pd = cur_addr;
cur_addr += ARCH_KPAGE_SZ;
kern_pml4 = cur_addr;
cur_addr += ARCH_KPAGE_SZ;
}
// maps device memory
void *
arch_pmap_mapdev(ATTR_UNUSED uintptr paddr, ATTR_UNUSED usize size)
{
return NULL;
}
// maps a physical segment to pmap region
void *
arch_pmap_map(mm_paddr paddr, ATTR_UNUSED usize sz)
{
return (void *) ARCH_PHYS_TO_PMAP(paddr);
}
// obtains the paddr of the corresponding struct mm_page for a specific paddr
mm_paddr
arch_paddr_to_mm_page(mm_paddr paddr)
{
return (paddr / ARCH_KPAGE_SZ) * sizeof(struct mm_page) + _mm_pages_base;
}
// adds an available physical segment to _phys_segs
void
arch_mem_addseg(mm_paddr start, usize len)
{
KASSERT(_phys_segs_sz < ARCH_PMAP_MAX_PHYS_SEGS, "too many physical segments!");
_phys_segs[_phys_segs_sz].start = start;
_phys_segs[_phys_segs_sz].stop = start + len - 1;
_phys_segs_sz++;
}
// sets up paging for kernel and mm_pages for mm_phys
// specifically, we do:
// 1. Map the kernel to KERN_BASE
// 2. Allocate mm_page for the all physical memory (avail and unavail) and put them after the kernel paddr
// 2.5. Map all mm_page (s) to KERN_MM_PAGE_START
// 3. Direct map all available physical memory to PMAP_BASE
// 4. Save the mapping and switch the page table to the new table
// 5. Save the information to mm_phys for future phys setup
void
arch_mem_init()
{
// we use 2M (PDE) pages to map the kernel so align the physical address to 2MB
mm_paddr cur_addr = ALIGN_UP2(ARCH_ML_KIMAGE_PADDR, PDE_MAPPING_SZ);
// allocate the pml4 for the kernel
arch_pml4e *kern_pml4 = (arch_pml4e *) ARCH_PHYS_TO_PMAP(cur_addr);
memset(kern_pml4, 0, ARCH_KPAGE_SZ);
cur_addr += ARCH_KPAGE_SZ;
_kern_mapping_init(&cur_addr, kern_pml4);
_mm_pg_init(&cur_addr, kern_pml4);
_pmap_init(&cur_addr, kern_pml4);
// copy the physical segments information to mm_phys
for (usize i = 0; i < _phys_segs_sz; i++) {
mm_phys_add_phys_seg(_phys_segs[i].start, _phys_segs[i].stop);
}
// add reserved segment information to mm_phys
// we reserve everything from KERN_IMG_PADDR to what we've allocated so far
mm_phys_add_reserved_seg(ARCH_ML_KIMAGE_PADDR, cur_addr - ARCH_ML_KIMAGE_PADDR + 1);
// reserve the 0th page
mm_phys_add_reserved_seg(0, ARCH_KPAGE_SZ);
}

12
arch/pmap_p.h Normal file
View File

@ -0,0 +1,12 @@
#pragma once
#include <arch/mlayout.h>
#define ARCH_PMAP_TO_PHYS(pmaddr) ((pmaddr) - ARCH_ML_PMAP_START)
#define ARCH_PHYS_TO_PMAP(paddr) (ARCH_ML_PMAP_START + (paddr))
void
arch_mem_init();
void
arch_mem_addseg(uintptr start, usize len);

View File

@ -1,6 +1,6 @@
#include <kern/cdef.h>
#include <arch/mem.h>
#include <kern/libkern.h>
#include <common/cdef.h>
#include <arch/pmap.h>
#include <common/libkern.h>
#define FB_PADDR (0xb8000)
#define FB_ROW (25)

7
common/CMakeLists.txt Normal file
View File

@ -0,0 +1,7 @@
set(SUBMODULE common)
set(CC_SRC
avl_tree.c
)
include(${MK}/kern.cmake)

View File

@ -1,7 +1,8 @@
#include <kern/avl_tree.h>
#include <kern/libkern.h>
#include <kern/poison.h>
#include <kern/cdef.h>
#include <common/avl_tree.h>
#include <common/cdef.h>
#include <common/libkern.h>
#include <ke/poison.h>
static inline struct avl_node *
_avl_node_max(struct avl_node *node)

1
compile_flags.txt Normal file
View File

@ -0,0 +1 @@
-Iinc

3
gdb_qemu.dbg Normal file
View File

@ -0,0 +1,3 @@
target remote localhost:1234
symbol-file bond.elf
break arch_main

View File

@ -1,7 +1,7 @@
#pragma once
#include <arch/intr.h>
#include <kern/status.h>
#include <ke/status.h>
/**
* Interrupt context structure

View File

@ -1,10 +0,0 @@
#pragma once
#include <kern/cdef.h>
#include <arch/mlayout.h>
static inline void *
arch_pmap_map(uintptr paddr, ATTR_UNUSED usize size)
{
return (void*)(paddr + KERN_PMAP_START);
}

View File

@ -4,13 +4,20 @@
* Kernel Memory Layout
* ----------------------- 0x0000,0000,0000,0000 - User Space
* Application SIZE: 0x0000,8000,0000,0000 (256x PML4, 128TB)
* ----------------------- 0x0000,8000,0000,0000
* ----------------------- 0x0000,7FFF,FFFF,FFFF
* Non-canonical
* ----------------------- 0xFFFF,8000,0000,0000 - Kernel Space
* Unused
* ----------------------- 0xFFFF,9000,0000,0000
* PMAP SIZE: 0x0000,0400,0000,0000 (8x PML4, 4TB)
* ----------------------- 0xFFFF,93FF,FFFF,FFFF
* ----------------------- 0xFFFF,9400,0000,0000
* MM_PAGE SIZE: 0x0000,0400,0000,0000 (8x PML4, 4TB)
* ----------------------- 0xFFFF,97FF,FFFF,FFFF
* ----------------------- 0xFFFF,9800,0000,0000
* RPT (recursive page tables)
* SIZE: 0x0000,0080,0000,0000 (1x PML4, 512GB)
* ----------------------- 0xFFFF,987F,FFFF,FFFF
* Unused
* ----------------------- 0xFFFF,FFFF,8000,0000
* Kernel Image SIZE: 0x0000,0000,8000,0000 (2GB)
@ -27,21 +34,30 @@
#define KERN_PAGE_SZ 0x1000
#define KERN_PMAP_START 0xFFFF900000000000
#define KERN_PMAP_STOP 0xFFFF940000000000
#define KERN_PMAP_STOP 0xFFFF93FFFFFFFFFF
#define KERN_MM_PAGE_START 0xFFFF940000000000
#define KERN_MM_PAGE_STOP 0xFFFF97FFFFFFFFFF
#define KERN_RPT_START 0xFFFF980000000000
#define KERN_RPT_STOP 0xFFFF987FFFFFFFFF
#define KERN_BASE_START 0xFFFFFFFF80000000
#define KERN_BASE_STOP 0x0000000000000000
#define KERN_BASE_STOP 0xFFFFFFFFFFFFFFFF
#else
#define KERN_IMG_PADDR (0x2000000)
#define KERN_PAGE_SZ (0x1000)
#define ARCH_ML_KIMAGE_PADDR (0x2000000u)
#define ARCH_KPAGE_SZ (0x1000u)
extern const char KERN_IMG_START[];
extern const char KERN_IMG_STOP[];
extern const char ARCH_ML_KIMAGE_START[];
extern const char ARCH_ML_KIMAGE_STOP[];
#define KERN_PMAP_START (0xFFFF900000000000)
#define KERN_PMAP_STOP (0xFFFF940000000000)
#define KERN_BASE_START (0xFFFFFFFF80000000)
#define KERN_BASE_STOP (0x0000000000000000)
#define ARCH_ML_USPACE_END (0x00007FFFFFFFFFFFu)
#define ARCH_ML_KSPACE_START (0xFFFF800000000000u)
#define ARCH_ML_PMAP_START (0xFFFF900000000000u)
#define ARCH_ML_PMAP_STOP (0xFFFF93FFFFFFFFFFu)
#define ARCH_ML_MMPAGE_START (0xFFFF940000000000u)
#define ARCH_ML_MMPAGE_STOP (0xFFFF97FFFFFFFFFFu)
#define ARCH_ML_KBASE_START (0xFFFFFFFF80000000u)
#define ARCH_ML_KBASE_STOP (0xFFFFFFFFFFFFFFFFu)
#define ARCH_ML_MAX_RAM (ARCH_ML_PMAP_STOP - ARCH_ML_PMAP_START + 1)
#endif

27
inc/arch/pmap.h Normal file
View File

@ -0,0 +1,27 @@
#pragma once
#include <common/cdef.h>
#include <arch/mlayout.h>
#include <mm/phys.h>
#define ARCH_PMAP_MAX_PHYS_SEGS (64)
// on x86-64 we always keep full direct mapping in the kernel
// maps normal memory (cacheable)
void *
arch_pmap_map(mm_paddr paddr, usize size);
mm_paddr
arch_paddr_to_mm_page(mm_paddr paddr);
// maps device memory (uncacheable)
void *
arch_pmap_mapdev(mm_paddr paddr, usize size);
#define ARCH_VADDR_ATTR_PRESENT (0x1u)
#define ARCH_VADDR_ATTR_UNCACHED (0x2u)
#define ARCH_VADDR_ATTR_READONLY (0x4u)
#define ARCH_VADDR_ATTR_NX (0x8u)
int
arch_map_vaddr(void * base, mm_paddr paddr, uintptr vaddr, usize sz, uint attr);

View File

@ -1,5 +1,5 @@
#pragma once
#include <kern/cdef.h>
#include <common/cdef.h>
#include <arch/print.h>
void

View File

@ -1,6 +1,6 @@
#pragma once
#include <kern/cdef.h>
#include <common/cdef.h>
struct avl_node
{

View File

@ -20,6 +20,7 @@ typedef size_t usize;
typedef unsigned char uchar;
typedef unsigned long ulong;
typedef unsigned int uint;
typedef unsigned short ushort;
#define KABI __attribute__((sysv_abi))
#define STATIC_ASSERT(cond, msg) _Static_assert((cond), msg)
@ -33,7 +34,7 @@ typedef unsigned int uint;
#define ATTR_UNUSED __attribute__((unused))
#define ATTR_USED __attribute__((used))
#define ATTR_SECTION(x) __attribute__ ((section (#x)))
#define ATTR_ALIGN(x) _Alignas(x)
#define ATTR_ALIGN(x) __attribute__((aligned (x)))
#define ATTR_FMT_PRINTF __attribute__((format (printf, 1, 2)))
#define BOCHS_BREAK __asm__("xchg %bx, %bx")

View File

@ -1,6 +1,6 @@
#pragma once
#include <kern/cdef.h>
#include <common/cdef.h>
#define LDS_DECL(name) \
extern const char __start_##name[]; \

View File

@ -1,6 +1,6 @@
#pragma once
#include <kern/cdef.h>
#include <common/cdef.h>
/*
* Common macros, etc
@ -8,7 +8,9 @@
#define OBTAIN_STRUCT_ADDR(member_addr, struct_name, member_name) ((struct_name*)((uintptr)(member_addr) - (uintptr)(&(((struct_name*)0)->member_name))))
#define CEIL(num, div) \
#define ALIGN_UP2(num, round) (((num) + (round) - 1) & ~((round) - 1))
#define DIV_CEIL(num, div) \
({ __typeof__(num) _num = (num); \
__typeof__(div) _div = (div); \
((_num + _div - 1) / _div); })

View File

@ -1,8 +1,8 @@
#pragma once
#include <kern/cdef.h>
#include <kern/poison.h>
#include <kern/brute.h>
#include <common/cdef.h>
#include <ke/poison.h>
#include <ke/brute.h>
struct list_entry {
struct list_entry *prev;

16
inc/ke/brute.h Normal file
View File

@ -0,0 +1,16 @@
#pragma once
#include <common/cdef.h>
#include <ke/print.h>
#include <arch/brute.h>
#define BRUTE(fmt, ...) do { \
kprintf("Kernel brute at %s:%d: " fmt "\n", __FILE__, __LINE__, ##__VA_ARGS__); \
arch_brute(); \
} while(0)
#define KASSERT(expr, msg, ...) do { \
if (!(expr)) { \
BRUTE("Assertion \"" #expr "\" failed: " msg , ##__VA_ARGS__); \
} \
} while(0)

View File

@ -1,6 +1,6 @@
#pragma once
#include <kern/lds.h>
#include <common/lds.h>
typedef void (kinitf)(void*);

View File

@ -1,7 +1,7 @@
#pragma once
#include <kern/cdef.h>
#include <kern/print.h>
#include <common/cdef.h>
#include <ke/print.h>
ATTR_FMT_PRINTF int
kprintf(const char *fmt, ...);

27
inc/ke/spin_lock.h Normal file
View File

@ -0,0 +1,27 @@
#pragma once
#include <common/cdef.h>
// implements a simple ticket lock
struct ke_spin_lock {
// LOW 16 bits: cur ticket
// HIGH 16 bits: cur owner
DECL_ATOMIC(uint32) val;
};
#define KE_SPIN_LOCK_INITIALIZER {.val = ATOMIC_VAR_INIT(0)}
STATIC_ASSERT(sizeof(struct ke_spin_lock) == sizeof(uint32), "ke_spin_lock size isn't 32 bits");
void
ke_spin_lock_init(struct ke_spin_lock *lock);
void
ke_spin_lock_acq(struct ke_spin_lock *lock);
void
ke_spin_lock_rel(struct ke_spin_lock *lock);
// returns non-zero on success otherwise zero
int
ke_spin_lock_try_acq(struct ke_spin_lock *lock);

View File

@ -1,6 +1,6 @@
#pragma once
#include <kern/cdef.h>
#include <common/cdef.h>
/**
* Specific error codes

View File

@ -1,16 +0,0 @@
#pragma once
#include <kern/cdef.h>
#include <kern/print.h>
#include <arch/brute.h>
#define BRUTE(fmt, ...) do { \
kprintf("Kernel brute: " fmt "\n", ##__VA_ARGS__); \
arch_brute(); \
} while(0)
#define KASSERT(expr, msg, ...) do { \
if (!(expr)) { \
BRUTE("Assertion \"" #expr "\" failed at %s:%d: " msg , __FILE__, __LINE__ , ##__VA_ARGS__); \
} \
} while(0)

View File

@ -1,27 +0,0 @@
#pragma once
#include <kern/cdef.h>
// implements a simple ticket lock
struct spin_lock {
// LOW 16 bits: cur ticket
// HIGH 16 bits: cur owner
DECL_ATOMIC(uint32) val;
};
#define SPIN_LOCK_INITIALIZER {.val = ATOMIC_VAR_INIT(0)}
STATIC_ASSERT(sizeof(struct spin_lock) == sizeof(uint32), "spin_lock size isn't 32 bits");
void
spin_lock_init(struct spin_lock *lock);
void
spin_lock_acq(struct spin_lock *lock);
void
spin_lock_rel(struct spin_lock *lock);
// returns non-zero on success otherwise zero
int
spin_lock_try_acq(struct spin_lock *lock);

28
inc/mm/phys.h Normal file
View File

@ -0,0 +1,28 @@
#pragma once
#include <common/cdef.h>
#include <ke/spin_lock.h>
#include <common/list.h>
typedef uintptr mm_paddr;
struct mm_page {
struct ke_spin_lock page_lock; // page lock
mm_paddr phys_addr; // physical address of the page
uint8 phys_order; // order of the page in the buddy allocator
uint8 phys_pool; // which pool it belongs to in the buddy allocator
struct list_entry phys_flist_ent; // list entry for the free list in the buddy allocator
};
#define MM_PHYS_ORDER_MAX (10)
#define MM_PHYS_ORDER_FREE (MM_PAGE_ORDER_MAX + 1)
#define MM_PHYS_POOL_DMA (0)
#define MM_PHYS_POOL_GENERIC (1)
#define MM_PHYS_MAX_POOLS (MM_PHYS_POOL_DMA + 1)
void
mm_phys_add_phys_seg(mm_paddr start, usize len);
void
mm_phys_add_reserved_seg(mm_paddr start, usize len);

View File

@ -1,7 +1,7 @@
#pragma once
#include <kern/cdef.h>
#include <kern/lds.h>
#include <common/cdef.h>
#include <common/lds.h>
LDS_DECL(ktest);

View File

@ -1,6 +1,5 @@
set(SUBMODULE kern)
set(SUBMODULE ke)
set(CC_SRC
avl_tree.c
libkern.c
kmain.c
print.c

View File

@ -1,7 +1,7 @@
#include <kern/cdef.h>
#include <kern/brute.h>
#include <kern/kinit.h>
#include <kern/libkern.h>
#include <common/cdef.h>
#include <ke/brute.h>
#include <ke/kinit.h>
#include <common/libkern.h>
static int
kinit_cmpf(const void *ki1, const void *ki2)
@ -26,10 +26,9 @@ init_kinit()
* Kernel entry point
* @param boot_info passed by the bootloader
*/
ATTR_UNUSED void KABI
kmain(ATTR_UNUSED void *boot_info)
ATTR_USED void
kmain()
{
KASSERT(boot_info != NULL, "bootinfo is NULL");
init_kinit();
BRUTE("Control reached end of kmain");
}

View File

@ -1,5 +1,5 @@
#include <kern/cdef.h>
#include <kern/libkern.h>
#include <common/cdef.h>
#include <common/libkern.h>
void
memswp(void *dst, void *src, usize size)

View File

@ -1,13 +1,13 @@
#include <kern/print.h>
#include <ke/print.h>
#include <arch/print.h>
#include <kern/libkern.h>
#include <kern/spin_lock.h>
#include <common/libkern.h>
#include <ke/spin_lock.h>
/* max space needed for each byte is when printing it in binary = 8 bits */
#define NBUF_SZ (sizeof(uintmax) * 8)
static char nbuf[NBUF_SZ];
static struct spin_lock print_lock = SPIN_LOCK_INITIALIZER;
static struct ke_spin_lock print_lock = KE_SPIN_LOCK_INITIALIZER;
static int
_printu(char *buf, uintmax num, uint base, int cap)
@ -64,6 +64,7 @@ _vprintf(const char *fmt, va_list args)
case 'p':
sz_ptr = 1;
base = 16;
usignf = 1;
goto pnum;
case 'd':
goto pnum;
@ -146,9 +147,9 @@ kprintf(const char *fmt, ...)
va_list args;
va_start(args, fmt);
spin_lock_acq(&print_lock);
ke_spin_lock_acq(&print_lock);
ret = _vprintf(fmt, args);
spin_lock_rel(&print_lock);
ke_spin_lock_rel(&print_lock);
va_end(args);
return ret;
@ -159,9 +160,9 @@ kvprintf(const char *fmt, va_list args)
{
int ret;
spin_lock_acq(&print_lock);
ke_spin_lock_acq(&print_lock);
ret = _vprintf(fmt, args);
spin_lock_rel(&print_lock);
ke_spin_lock_rel(&print_lock);
return ret;
}

View File

@ -1,5 +1,5 @@
#include <kern/cdef.h>
#include <kern/spin_lock.h>
#include <common/cdef.h>
#include <ke/spin_lock.h>
static inline uint32
_spin_lock_get_ticket(uint32 val)
@ -14,13 +14,13 @@ _spin_lock_get_owner(uint32 val)
}
void
spin_lock_init(struct spin_lock *lock)
ke_spin_lock_init(struct ke_spin_lock *lock)
{
atomic_store(&lock->val, 0);
}
void
spin_lock_acq(struct spin_lock *lock)
ke_spin_lock_acq(struct ke_spin_lock *lock)
{
uint32 val;
@ -37,14 +37,14 @@ spin_lock_acq(struct spin_lock *lock)
}
void
spin_lock_rel(struct spin_lock *lock)
ke_spin_lock_rel(struct ke_spin_lock *lock)
{
// increment ticket
atomic_fetch_add(&lock->val, 1);
}
int
spin_lock_try_acq(struct spin_lock *lock)
ke_spin_lock_try_acq(struct ke_spin_lock *lock)
{
uint32 val;

7
mm/CMakeLists.txt Normal file
View File

@ -0,0 +1,7 @@
set(SUBMODULE mm)
set(CC_SRC
phys.c
)
include(${MK}/kern.cmake)

38
mm/phys.c Normal file
View File

@ -0,0 +1,38 @@
#include <mm/phys.h>
#include <arch/pmap.h>
struct mm_phys_seg {
mm_paddr start;
mm_paddr stop;
};
static ATTR_UNUSED struct list_entry _freelist[MM_PHYS_MAX_POOLS][MM_PHYS_ORDER_MAX];
static struct mm_phys_seg _phys_segs[ARCH_PMAP_MAX_PHYS_SEGS];
static usize _phys_segs_sz = 0;
static struct mm_phys_seg _reserve_segs[ARCH_PMAP_MAX_PHYS_SEGS];
static usize _reserve_segs_sz = 0;
static void
_set_phys_seg(struct mm_phys_seg *seg, mm_paddr start, usize len)
{
seg->start = start;
seg->stop = start + len - 1;
}
void
mm_phys_add_phys_seg(mm_paddr start, usize len)
{
KASSERT(_phys_segs_sz < ARCH_PMAP_MAX_PHYS_SEGS, "too many physical segments!");
_set_phys_seg(&_phys_segs[_phys_segs_sz], start, len);
_phys_segs_sz++;
}
void
mm_phys_add_reserved_seg(mm_paddr start, usize len)
{
KASSERT(_reserve_segs_sz < ARCH_PMAP_MAX_PHYS_SEGS, "too many reserved segments!");
_set_phys_seg(&_reserve_segs[_reserve_segs_sz], start, len);
_reserve_segs_sz++;
}

View File

@ -31,5 +31,5 @@ SECTIONS
*(COMMON)
}
KERN_IMG_STOP = .;
ARCH_ML_KIMAGE_STOP = .;
}

View File

@ -7,4 +7,3 @@ set(CC_SRC
)
include(${MK}/kern.cmake)

View File

@ -1,8 +1,8 @@
#include <kern/cdef.h>
#include <kern/libkern.h>
#include <kern/avl_tree.h>
#include <common/cdef.h>
#include <common/libkern.h>
#include <common/avl_tree.h>
#include <test/ktest.h>
#include <kern/brute.h>
#include <ke/brute.h>
struct test_node {
struct avl_node tree_entry;

View File

@ -1,7 +1,7 @@
#include <test/ktest.h>
#include <kern/cdef.h>
#include <kern/print.h>
#include <kern/kinit.h>
#include <common/cdef.h>
#include <ke/print.h>
#include <ke/kinit.h>
static uint ktest_cases = 0;

View File

@ -1,7 +1,7 @@
#include <test/ktest.h>
#include <kern/cdef.h>
#include <kern/list.h>
#include <kern/libkern.h>
#include <common/cdef.h>
#include <common/list.h>
#include <common/libkern.h>
#define ARR_SZ(arr) (sizeof(arr) / sizeof((arr)[0]))

View File

@ -1,6 +1,6 @@
#include <kern/cdef.h>
#include <kern/libkern.h>
#include <kern/brute.h>
#include <common/cdef.h>
#include <common/libkern.h>
#include <ke/brute.h>
#include <test/ktest.h>
#define MAX_ELE (10)