wip
This commit is contained in:
parent
a9f2b48a95
commit
e573ec96b8
@ -22,11 +22,11 @@ set(TOOLCHAINS
|
||||
LD
|
||||
DMP)
|
||||
|
||||
FOREACH(f IN LISTS TOOLCHAINS)
|
||||
if(NOT ${f})
|
||||
FOREACH (f IN LISTS TOOLCHAINS)
|
||||
if (NOT ${f})
|
||||
message(FATAL_ERROR "Toolchain ${f} cannot be found.")
|
||||
endif()
|
||||
endforeach(f)
|
||||
endif ()
|
||||
endforeach (f)
|
||||
|
||||
set(CC_WARN_IGNORE
|
||||
-Wno-gnu-statement-expression # this means we can assign the return val of the last statment in a block, useful in MAX, MIN, etc.
|
||||
@ -78,15 +78,15 @@ set(SUBMODULES
|
||||
test)
|
||||
|
||||
# process submodules
|
||||
FOREACH(f IN LISTS SUBMODULES)
|
||||
FOREACH (f IN LISTS SUBMODULES)
|
||||
add_subdirectory(${f})
|
||||
endforeach(f)
|
||||
endforeach (f)
|
||||
|
||||
# process dependencies
|
||||
FOREACH(f IN LISTS SUBMODULES)
|
||||
FOREACH (f IN LISTS SUBMODULES)
|
||||
set(OBJS ${OBJS} ${OBJS_${f}})
|
||||
set(TARGETS ${TARGETS} ${TARGET_${f}})
|
||||
endforeach(f)
|
||||
endforeach (f)
|
||||
|
||||
# set target names
|
||||
|
||||
@ -136,9 +136,11 @@ add_custom_target(${proj}_iso ALL
|
||||
|
||||
# hack for clion not parsing custom targets
|
||||
if ($ENV{CLION_IDE})
|
||||
SET(CMAKE_C_COMPILER ${CC})
|
||||
include_directories(${INC})
|
||||
add_executable(kernel ${G_CC_SRC})
|
||||
add_definitions(-DKOPT_DBG)
|
||||
target_compile_options(kernel PRIVATE ${CC_FLAGS})
|
||||
endif()
|
||||
endif ()
|
||||
|
||||
|
||||
|
@ -1,6 +1,7 @@
|
||||
#include <arch/brute.h>
|
||||
|
||||
void arch_brute()
|
||||
ATTR_NORETURN void
|
||||
arch_brute()
|
||||
{
|
||||
while(1) {}
|
||||
}
|
||||
|
70
arch/main.c
70
arch/main.c
@ -1,65 +1,81 @@
|
||||
#include <common/cdef.h>
|
||||
#include <common/consts.h>
|
||||
#include <ke/print.h>
|
||||
#include <arch/print.h>
|
||||
#include <arch/brute.h>
|
||||
#include <mm/phys.h>
|
||||
#include <arch/mlayout.h>
|
||||
#include <arch/pmap.h>
|
||||
#include <mm/phys.h>
|
||||
|
||||
// private headers
|
||||
#include "multiboot2.h"
|
||||
#include "pmap_p.h"
|
||||
|
||||
static const char* _loader_name;
|
||||
|
||||
// kernel entry point
|
||||
extern void kmain();
|
||||
extern void
|
||||
kmain();
|
||||
|
||||
ATTR_USED void
|
||||
arch_main(void *mb_info)
|
||||
/*
|
||||
* process muliboot info and populate various subsystems
|
||||
* after this mate, the original multiboot info can be safely discarded
|
||||
*/
|
||||
static void
|
||||
proc_mbinfo(void *mb_info)
|
||||
{
|
||||
/* init printf related stuff */
|
||||
arch_print_init();
|
||||
|
||||
kprintf("Processing multiboot info @ 0x%p...\n", mb_info);
|
||||
uint8 mmap_detected = 0;
|
||||
|
||||
for (struct multiboot_tag *tag = (struct multiboot_tag *) ((uintptr) mb_info + 8);
|
||||
tag->type != MULTIBOOT_TAG_TYPE_END;
|
||||
tag = (struct multiboot_tag *) ((multiboot_uint8_t *) tag
|
||||
+ ((tag->size + 7) & ~7u))) {
|
||||
// kprintf("Tag 0x%p: %d, Size %d\n", (void *) tag, tag->type, tag->size);
|
||||
PDBG("Tag 0x%p: %d, Size %d", (void *) tag, tag->type, tag->size);
|
||||
switch (tag->type) {
|
||||
case MULTIBOOT_TAG_TYPE_MMAP:
|
||||
kprintf("Found multiboot memory map.\n");
|
||||
mmap_detected = 1;
|
||||
PDBG("Detected MultiBoot memory map.");
|
||||
for (struct multiboot_mmap_entry *entry = ((struct multiboot_tag_mmap *) tag)->entries;
|
||||
(multiboot_uint8_t *) entry < (multiboot_uint8_t *) tag + tag->size;
|
||||
entry = (multiboot_memory_map_t *) ((uintptr) entry +
|
||||
((struct multiboot_tag_mmap *) tag)->entry_size)) {
|
||||
kprintf("Adding to pmap seg: base = 0x%lx,"
|
||||
" length = 0x%lx, type = 0x%x.\n",
|
||||
(ulong) entry->addr,
|
||||
(ulong) entry->len,
|
||||
entry->type);
|
||||
|
||||
PDBG("Adding to pmap seg: base = 0x%lx,"
|
||||
" length = 0x%lx, type = 0x%x.",
|
||||
(ulong) entry->addr,
|
||||
(ulong) entry->len,
|
||||
entry->type);
|
||||
|
||||
if (entry->type == MULTIBOOT_MEMORY_AVAILABLE) {
|
||||
// add physical segments to mm phys subsystem
|
||||
arch_mem_addseg(entry->addr, entry->len);
|
||||
archp_mem_addseg(entry->addr, entry->len);
|
||||
}
|
||||
}
|
||||
break;
|
||||
case MULTIBOOT_TAG_TYPE_BOOT_LOADER_NAME:
|
||||
kprintf("Found multiboot loader name.\n");
|
||||
_loader_name = ((struct multiboot_tag_string *) tag)->string;
|
||||
PDBG("Detected bootloader: %s", ((struct multiboot_tag_string *) tag)->string);
|
||||
break;
|
||||
default:
|
||||
kprintf("Ignoring multiboot tag type: %d size: 0x%x\n", tag->type, tag->size);
|
||||
PDBG("Ignoring MultiBoot tag type: %d size: 0x%x", tag->type, tag->size);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
kprintf("BoND is loaded by: %s\n", _loader_name);
|
||||
kprintf("kernel start: 0x%p end: 0x%p\n", (void*)KERN_IMG_START, (void*)ARCH_ML_KIMAGE_STOP);
|
||||
if (!mmap_detected) {
|
||||
BRUTE("proc_mbinfo: could not find MMAP tag.");
|
||||
}
|
||||
}
|
||||
|
||||
kprintf("Initializing memory...");
|
||||
arch_mem_init();
|
||||
ATTR_USED void
|
||||
arch_main(void *mb_info)
|
||||
{
|
||||
arch_print_init();
|
||||
|
||||
PINFO("BoND %s. Kernel loaded at: 0x%p size: 0x%lx.", KVERSION, (void *) ARCH_ML_KIMAGE_START,
|
||||
(usize) (ARCH_ML_KIMAGE_STOP - ARCH_ML_KIMAGE_START));
|
||||
|
||||
PINFO("Processing MultiBoot struct at 0x%p...", mb_info);
|
||||
proc_mbinfo(mb_info);
|
||||
// at this point we don't need access to multiboot info anymore -> we can freely overwrite this region
|
||||
|
||||
PINFO("Initializing early memory...");
|
||||
archp_mem_init();
|
||||
|
||||
kmain();
|
||||
}
|
||||
|
67
arch/pmap.c
67
arch/pmap.c
@ -3,26 +3,26 @@
|
||||
#include <mm/phys.h>
|
||||
#include <common/libkern.h>
|
||||
|
||||
#include "pmap_p.h"
|
||||
#include "paging.h"
|
||||
|
||||
struct arch_pmap_segs {
|
||||
mm_paddr start;
|
||||
mm_paddr stop;
|
||||
struct arch_pmap_segs
|
||||
{
|
||||
mm_paddr base;
|
||||
usize len;
|
||||
};
|
||||
|
||||
// the physical memory segments information obtained from multiboot info
|
||||
static struct arch_pmap_segs _phys_segs[ARCH_PMAP_MAX_PHYS_SEGS];
|
||||
static usize _phys_segs_sz = 0;
|
||||
static struct arch_pmap_segs phys_segs[ARCH_PMAP_MAX_PHYS_SEGS];
|
||||
static usize phys_segs_sz = 0;
|
||||
|
||||
// the base addr for mm_page structures
|
||||
static mm_paddr _mm_pages_base;
|
||||
|
||||
// initializes _pmap region
|
||||
static void
|
||||
_pmap_init(mm_paddr *cur_addr, arch_pml4e *kern_pml4)
|
||||
pmap_init(mm_paddr *cur_addr, arch_pml4e *kern_pml4)
|
||||
{
|
||||
usize high_mem = _phys_segs[_phys_segs_sz - 1].stop;
|
||||
usize high_mem = phys_segs[phys_segs_sz - 1].stop;
|
||||
|
||||
if (high_mem >= ARCH_ML_MAX_RAM) {
|
||||
BRUTE("Only supports maximum %ld bytes RAM", ARCH_ML_MAX_RAM);
|
||||
@ -44,16 +44,16 @@ _pmap_init(mm_paddr *cur_addr, arch_pml4e *kern_pml4)
|
||||
}
|
||||
|
||||
static void
|
||||
_mm_pg_init(mm_paddr *cur_addr, arch_pml4e *kern_pml4)
|
||||
mm_pg_init(mm_paddr *cur_addr, arch_pml4e *kern_pml4)
|
||||
{
|
||||
|
||||
}
|
||||
|
||||
// initializes kernel mapping
|
||||
static void
|
||||
_kern_mapping_init(mm_paddr *cur_addr, arch_pml4e *kern_pml4)
|
||||
kern_mapping_init(mm_paddr *cur_addr, arch_pml4e *kern_pml4)
|
||||
{
|
||||
// map the kernel with 2MB mapping now
|
||||
// map the kernel with 4KB mapping now
|
||||
KASSERT((ARCH_ML_KIMAGE_PADDR & (PDE_MAPPING_SZ - 1)) == 0, "kernel vaddr not 2MB aligned.");
|
||||
KASSERT(((uintptr) KERN_IMG_START & (PDE_MAPPING_SZ - 1)) == 0, "kernel paddr not 2MB aligned.");
|
||||
|
||||
@ -97,29 +97,32 @@ arch_pmap_map(mm_paddr paddr, ATTR_UNUSED usize sz)
|
||||
mm_paddr
|
||||
arch_paddr_to_mm_page(mm_paddr paddr)
|
||||
{
|
||||
return (paddr / ARCH_KPAGE_SZ) * sizeof(struct mm_page) + _mm_pages_base;
|
||||
return (paddr / ARCH_KPAGE_SZ) * sizeof(struct mm_phys_page) + _mm_pages_base;
|
||||
}
|
||||
|
||||
// adds an available physical segment to _phys_segs
|
||||
// adds an available physical segment to phys_segs
|
||||
void
|
||||
arch_mem_addseg(mm_paddr start, usize len)
|
||||
archp_mem_addseg(mm_paddr start, usize len)
|
||||
{
|
||||
KASSERT(_phys_segs_sz < ARCH_PMAP_MAX_PHYS_SEGS, "too many physical segments!");
|
||||
_phys_segs[_phys_segs_sz].start = start;
|
||||
_phys_segs[_phys_segs_sz].stop = start + len - 1;
|
||||
_phys_segs_sz++;
|
||||
if (phys_segs_sz >= ARCH_PMAP_MAX_PHYS_SEGS) {
|
||||
BRUTE("archp_mem_addseg: too many physical segments.");
|
||||
}
|
||||
phys_segs[phys_segs_sz].base = start;
|
||||
phys_segs[phys_segs_sz].len = start + len - 1;
|
||||
phys_segs_sz++;
|
||||
}
|
||||
|
||||
// sets up paging for kernel and mm_pages for mm_phys
|
||||
// specifically, we do:
|
||||
// 1. Map the kernel to KERN_BASE
|
||||
// 2. Allocate mm_page for the all physical memory (avail and unavail) and put them after the kernel paddr
|
||||
// 2.5. Map all mm_page (s) to KERN_MM_PAGE_START
|
||||
// 3. Direct map all available physical memory to PMAP_BASE
|
||||
// 4. Save the mapping and switch the page table to the new table
|
||||
// 5. Save the information to mm_phys for future phys setup
|
||||
/* sets up initial paging and mm_phys_pages
|
||||
* specifically, we do:
|
||||
* 1. Map the kernel to KERN_BASE
|
||||
* 2. Allocate mm_page for the all physical memory (avail and unavail) and put them after the kernel paddr
|
||||
* 2.5. Map all mm_page (s) to KERN_MM_PAGE_START
|
||||
* 3. Direct map all available physical memory to PMAP_BASE
|
||||
* 4. Save the mapping and switch the page table to the new table
|
||||
* 5. Save the information to mm_phys for future phys setup
|
||||
*/
|
||||
void
|
||||
arch_mem_init()
|
||||
archp_mem_init()
|
||||
{
|
||||
// we use 2M (PDE) pages to map the kernel so align the physical address to 2MB
|
||||
mm_paddr cur_addr = ALIGN_UP2(ARCH_ML_KIMAGE_PADDR, PDE_MAPPING_SZ);
|
||||
@ -129,13 +132,13 @@ arch_mem_init()
|
||||
memset(kern_pml4, 0, ARCH_KPAGE_SZ);
|
||||
cur_addr += ARCH_KPAGE_SZ;
|
||||
|
||||
_kern_mapping_init(&cur_addr, kern_pml4);
|
||||
_mm_pg_init(&cur_addr, kern_pml4);
|
||||
_pmap_init(&cur_addr, kern_pml4);
|
||||
kern_mapping_init(&cur_addr, kern_pml4);
|
||||
mm_pg_init(&cur_addr, kern_pml4);
|
||||
pmap_init(&cur_addr, kern_pml4);
|
||||
|
||||
// copy the physical segments information to mm_phys
|
||||
for (usize i = 0; i < _phys_segs_sz; i++) {
|
||||
mm_phys_add_phys_seg(_phys_segs[i].start, _phys_segs[i].stop);
|
||||
for (usize i = 0; i < phys_segs_sz; i++) {
|
||||
mm_phys_add_phys_seg(phys_segs[i].start, phys_segs[i].stop);
|
||||
}
|
||||
|
||||
// add reserved segment information to mm_phys
|
||||
|
@ -1,12 +0,0 @@
|
||||
#pragma once
|
||||
|
||||
#include <arch/mlayout.h>
|
||||
|
||||
#define ARCH_PMAP_TO_PHYS(pmaddr) ((pmaddr) - ARCH_ML_PMAP_START)
|
||||
#define ARCH_PHYS_TO_PMAP(paddr) (ARCH_ML_PMAP_START + (paddr))
|
||||
|
||||
void
|
||||
arch_mem_init();
|
||||
|
||||
void
|
||||
arch_mem_addseg(uintptr start, usize len);
|
@ -1,3 +1,5 @@
|
||||
#pragma once
|
||||
#include <common/cdef.h>
|
||||
|
||||
void arch_brute();
|
||||
ATTR_NORETURN void
|
||||
arch_brute();
|
||||
|
@ -22,6 +22,15 @@ arch_pmap_mapdev(mm_paddr paddr, usize size);
|
||||
#define ARCH_VADDR_ATTR_UNCACHED (0x2u)
|
||||
#define ARCH_VADDR_ATTR_READONLY (0x4u)
|
||||
#define ARCH_VADDR_ATTR_NX (0x8u)
|
||||
|
||||
int
|
||||
arch_map_vaddr(void * base, mm_paddr paddr, uintptr vaddr, usize sz, uint attr);
|
||||
|
||||
// module-private functions
|
||||
#define ARCH_PMAP_TO_PHYS(pmaddr) ((pmaddr) - ARCH_ML_PMAP_START)
|
||||
#define ARCH_PHYS_TO_PMAP(paddr) (ARCH_ML_PMAP_START + (paddr))
|
||||
void
|
||||
archp_mem_init();
|
||||
|
||||
void
|
||||
archp_mem_addseg(uintptr start, usize len);
|
||||
|
@ -36,5 +36,7 @@ typedef unsigned short ushort;
|
||||
#define ATTR_SECTION(x) __attribute__ ((section (#x)))
|
||||
#define ATTR_ALIGN(x) __attribute__((aligned (x)))
|
||||
#define ATTR_FMT_PRINTF __attribute__((format (printf, 1, 2)))
|
||||
#define ATTR_NORETURN _Noreturn
|
||||
|
||||
|
||||
#define BOCHS_BREAK __asm__("xchg %bx, %bx")
|
||||
|
3
inc/common/consts.h
Normal file
3
inc/common/consts.h
Normal file
@ -0,0 +1,3 @@
|
||||
#pragma once
|
||||
|
||||
#define KVERSION ("0.1")
|
@ -8,3 +8,13 @@ kprintf(const char *fmt, ...);
|
||||
|
||||
int
|
||||
kvprintf(const char *fmt, va_list args);
|
||||
|
||||
#ifdef KOPT_DBG
|
||||
#define PDBG(fmt, ...) (kprintf("[DEBUG] " fmt "\n", ##__VA_ARGS__))
|
||||
#else
|
||||
#define PDBG(fmt, ...)
|
||||
#endif
|
||||
|
||||
#define PWARN(fmt, ...) (kprintf("[WARN] " fmt "\n", ##__VA_ARGS__))
|
||||
#define PINFO(fmt, ...) (kprintf("[INFO] " fmt "\n", ##__VA_ARGS__))
|
||||
#define PERR(fmt, ...) (kprintf("[ERROR] " fmt "\n", ##__VA_ARGS__))
|
||||
|
@ -6,23 +6,24 @@
|
||||
|
||||
typedef uintptr mm_paddr;
|
||||
|
||||
struct mm_page {
|
||||
struct mm_phys_page {
|
||||
struct ke_spin_lock page_lock; // page lock
|
||||
mm_paddr phys_addr; // physical address of the page
|
||||
uint8 phys_order; // order of the page in the buddy allocator
|
||||
uint8 phys_pool; // which pool it belongs to in the buddy allocator
|
||||
struct list_entry phys_flist_ent; // list entry for the free list in the buddy allocator
|
||||
};
|
||||
uint8 order; // order of the page in the buddy allocator
|
||||
uint8 free;
|
||||
} ATTR_ALIGN(8);
|
||||
|
||||
// maximum allocated page sz = 2^10 * 4096
|
||||
#define MM_PHYS_ORDER_MAX (10)
|
||||
#define MM_PHYS_ORDER_FREE (MM_PAGE_ORDER_MAX + 1)
|
||||
|
||||
#define MM_PHYS_POOL_DMA (0)
|
||||
#define MM_PHYS_POOL_GENERIC (1)
|
||||
// DMA ZONE, 0 - 16MB
|
||||
#define MM_PHYS_ARENA_DMA (0)
|
||||
#define MM_PHYS_ARENA_MAX_ADDR (16 * 1024 * 1024)
|
||||
|
||||
// GENERIC ZONE
|
||||
#define MM_PHYS_ARENA_GENERIC (1)
|
||||
#define MM_PHYS_MAX_POOLS (MM_PHYS_POOL_DMA + 1)
|
||||
|
||||
void
|
||||
mm_phys_add_phys_seg(mm_paddr start, usize len);
|
||||
|
||||
void
|
||||
mm_phys_add_reserved_seg(mm_paddr start, usize len);
|
||||
|
Loading…
Reference in New Issue
Block a user