From e573ec96b8311308c38df2429b967641277b874b Mon Sep 17 00:00:00 2001 From: quackerd Date: Tue, 23 Jun 2020 16:20:23 +0800 Subject: [PATCH] wip --- CMakeLists.txt | 20 +++++++------ arch/brute.c | 3 +- arch/main.c | 70 ++++++++++++++++++++++++++++----------------- arch/pmap.c | 67 ++++++++++++++++++++++--------------------- arch/pmap_p.h | 12 -------- inc/arch/brute.h | 4 ++- inc/arch/pmap.h | 9 ++++++ inc/common/cdef.h | 2 ++ inc/common/consts.h | 3 ++ inc/ke/print.h | 10 +++++++ inc/mm/phys.h | 21 +++++++------- mm/phys.c | 1 + 12 files changed, 130 insertions(+), 92 deletions(-) delete mode 100644 arch/pmap_p.h create mode 100644 inc/common/consts.h diff --git a/CMakeLists.txt b/CMakeLists.txt index f196615..ddd9128 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -22,11 +22,11 @@ set(TOOLCHAINS LD DMP) -FOREACH(f IN LISTS TOOLCHAINS) - if(NOT ${f}) +FOREACH (f IN LISTS TOOLCHAINS) + if (NOT ${f}) message(FATAL_ERROR "Toolchain ${f} cannot be found.") - endif() -endforeach(f) + endif () +endforeach (f) set(CC_WARN_IGNORE -Wno-gnu-statement-expression # this means we can assign the return val of the last statment in a block, useful in MAX, MIN, etc. @@ -78,15 +78,15 @@ set(SUBMODULES test) # process submodules -FOREACH(f IN LISTS SUBMODULES) +FOREACH (f IN LISTS SUBMODULES) add_subdirectory(${f}) -endforeach(f) +endforeach (f) # process dependencies -FOREACH(f IN LISTS SUBMODULES) +FOREACH (f IN LISTS SUBMODULES) set(OBJS ${OBJS} ${OBJS_${f}}) set(TARGETS ${TARGETS} ${TARGET_${f}}) -endforeach(f) +endforeach (f) # set target names @@ -136,9 +136,11 @@ add_custom_target(${proj}_iso ALL # hack for clion not parsing custom targets if ($ENV{CLION_IDE}) + SET(CMAKE_C_COMPILER ${CC}) include_directories(${INC}) add_executable(kernel ${G_CC_SRC}) + add_definitions(-DKOPT_DBG) target_compile_options(kernel PRIVATE ${CC_FLAGS}) -endif() +endif () diff --git a/arch/brute.c b/arch/brute.c index 3ceafd9..d418647 100644 --- a/arch/brute.c +++ b/arch/brute.c @@ -1,6 +1,7 @@ #include -void arch_brute() +ATTR_NORETURN void +arch_brute() { while(1) {} } diff --git a/arch/main.c b/arch/main.c index bdece26..13d4e89 100644 --- a/arch/main.c +++ b/arch/main.c @@ -1,65 +1,81 @@ #include +#include #include #include #include -#include #include +#include +#include -// private headers #include "multiboot2.h" -#include "pmap_p.h" - -static const char* _loader_name; // kernel entry point -extern void kmain(); +extern void +kmain(); -ATTR_USED void -arch_main(void *mb_info) +/* + * process muliboot info and populate various subsystems + * after this mate, the original multiboot info can be safely discarded + */ +static void +proc_mbinfo(void *mb_info) { - /* init printf related stuff */ - arch_print_init(); - - kprintf("Processing multiboot info @ 0x%p...\n", mb_info); + uint8 mmap_detected = 0; for (struct multiboot_tag *tag = (struct multiboot_tag *) ((uintptr) mb_info + 8); tag->type != MULTIBOOT_TAG_TYPE_END; tag = (struct multiboot_tag *) ((multiboot_uint8_t *) tag + ((tag->size + 7) & ~7u))) { -// kprintf("Tag 0x%p: %d, Size %d\n", (void *) tag, tag->type, tag->size); + PDBG("Tag 0x%p: %d, Size %d", (void *) tag, tag->type, tag->size); switch (tag->type) { case MULTIBOOT_TAG_TYPE_MMAP: - kprintf("Found multiboot memory map.\n"); + mmap_detected = 1; + PDBG("Detected MultiBoot memory map."); for (struct multiboot_mmap_entry *entry = ((struct multiboot_tag_mmap *) tag)->entries; (multiboot_uint8_t *) entry < (multiboot_uint8_t *) tag + tag->size; entry = (multiboot_memory_map_t *) ((uintptr) entry + ((struct multiboot_tag_mmap *) tag)->entry_size)) { - kprintf("Adding to pmap seg: base = 0x%lx," - " length = 0x%lx, type = 0x%x.\n", - (ulong) entry->addr, - (ulong) entry->len, - entry->type); + + PDBG("Adding to pmap seg: base = 0x%lx," + " length = 0x%lx, type = 0x%x.", + (ulong) entry->addr, + (ulong) entry->len, + entry->type); + if (entry->type == MULTIBOOT_MEMORY_AVAILABLE) { // add physical segments to mm phys subsystem - arch_mem_addseg(entry->addr, entry->len); + archp_mem_addseg(entry->addr, entry->len); } } break; case MULTIBOOT_TAG_TYPE_BOOT_LOADER_NAME: - kprintf("Found multiboot loader name.\n"); - _loader_name = ((struct multiboot_tag_string *) tag)->string; + PDBG("Detected bootloader: %s", ((struct multiboot_tag_string *) tag)->string); break; default: - kprintf("Ignoring multiboot tag type: %d size: 0x%x\n", tag->type, tag->size); + PDBG("Ignoring MultiBoot tag type: %d size: 0x%x", tag->type, tag->size); break; } } - kprintf("BoND is loaded by: %s\n", _loader_name); - kprintf("kernel start: 0x%p end: 0x%p\n", (void*)KERN_IMG_START, (void*)ARCH_ML_KIMAGE_STOP); + if (!mmap_detected) { + BRUTE("proc_mbinfo: could not find MMAP tag."); + } +} - kprintf("Initializing memory..."); - arch_mem_init(); +ATTR_USED void +arch_main(void *mb_info) +{ + arch_print_init(); + + PINFO("BoND %s. Kernel loaded at: 0x%p size: 0x%lx.", KVERSION, (void *) ARCH_ML_KIMAGE_START, + (usize) (ARCH_ML_KIMAGE_STOP - ARCH_ML_KIMAGE_START)); + + PINFO("Processing MultiBoot struct at 0x%p...", mb_info); + proc_mbinfo(mb_info); + // at this point we don't need access to multiboot info anymore -> we can freely overwrite this region + + PINFO("Initializing early memory..."); + archp_mem_init(); kmain(); } diff --git a/arch/pmap.c b/arch/pmap.c index c8ef6c6..6890399 100644 --- a/arch/pmap.c +++ b/arch/pmap.c @@ -3,26 +3,26 @@ #include #include -#include "pmap_p.h" #include "paging.h" -struct arch_pmap_segs { - mm_paddr start; - mm_paddr stop; +struct arch_pmap_segs +{ + mm_paddr base; + usize len; }; // the physical memory segments information obtained from multiboot info -static struct arch_pmap_segs _phys_segs[ARCH_PMAP_MAX_PHYS_SEGS]; -static usize _phys_segs_sz = 0; +static struct arch_pmap_segs phys_segs[ARCH_PMAP_MAX_PHYS_SEGS]; +static usize phys_segs_sz = 0; // the base addr for mm_page structures static mm_paddr _mm_pages_base; // initializes _pmap region static void -_pmap_init(mm_paddr *cur_addr, arch_pml4e *kern_pml4) +pmap_init(mm_paddr *cur_addr, arch_pml4e *kern_pml4) { - usize high_mem = _phys_segs[_phys_segs_sz - 1].stop; + usize high_mem = phys_segs[phys_segs_sz - 1].stop; if (high_mem >= ARCH_ML_MAX_RAM) { BRUTE("Only supports maximum %ld bytes RAM", ARCH_ML_MAX_RAM); @@ -44,16 +44,16 @@ _pmap_init(mm_paddr *cur_addr, arch_pml4e *kern_pml4) } static void -_mm_pg_init(mm_paddr *cur_addr, arch_pml4e *kern_pml4) +mm_pg_init(mm_paddr *cur_addr, arch_pml4e *kern_pml4) { } // initializes kernel mapping static void -_kern_mapping_init(mm_paddr *cur_addr, arch_pml4e *kern_pml4) +kern_mapping_init(mm_paddr *cur_addr, arch_pml4e *kern_pml4) { - // map the kernel with 2MB mapping now + // map the kernel with 4KB mapping now KASSERT((ARCH_ML_KIMAGE_PADDR & (PDE_MAPPING_SZ - 1)) == 0, "kernel vaddr not 2MB aligned."); KASSERT(((uintptr) KERN_IMG_START & (PDE_MAPPING_SZ - 1)) == 0, "kernel paddr not 2MB aligned."); @@ -97,29 +97,32 @@ arch_pmap_map(mm_paddr paddr, ATTR_UNUSED usize sz) mm_paddr arch_paddr_to_mm_page(mm_paddr paddr) { - return (paddr / ARCH_KPAGE_SZ) * sizeof(struct mm_page) + _mm_pages_base; + return (paddr / ARCH_KPAGE_SZ) * sizeof(struct mm_phys_page) + _mm_pages_base; } -// adds an available physical segment to _phys_segs +// adds an available physical segment to phys_segs void -arch_mem_addseg(mm_paddr start, usize len) +archp_mem_addseg(mm_paddr start, usize len) { - KASSERT(_phys_segs_sz < ARCH_PMAP_MAX_PHYS_SEGS, "too many physical segments!"); - _phys_segs[_phys_segs_sz].start = start; - _phys_segs[_phys_segs_sz].stop = start + len - 1; - _phys_segs_sz++; + if (phys_segs_sz >= ARCH_PMAP_MAX_PHYS_SEGS) { + BRUTE("archp_mem_addseg: too many physical segments."); + } + phys_segs[phys_segs_sz].base = start; + phys_segs[phys_segs_sz].len = start + len - 1; + phys_segs_sz++; } -// sets up paging for kernel and mm_pages for mm_phys -// specifically, we do: -// 1. Map the kernel to KERN_BASE -// 2. Allocate mm_page for the all physical memory (avail and unavail) and put them after the kernel paddr -// 2.5. Map all mm_page (s) to KERN_MM_PAGE_START -// 3. Direct map all available physical memory to PMAP_BASE -// 4. Save the mapping and switch the page table to the new table -// 5. Save the information to mm_phys for future phys setup +/* sets up initial paging and mm_phys_pages + * specifically, we do: + * 1. Map the kernel to KERN_BASE + * 2. Allocate mm_page for the all physical memory (avail and unavail) and put them after the kernel paddr + * 2.5. Map all mm_page (s) to KERN_MM_PAGE_START + * 3. Direct map all available physical memory to PMAP_BASE + * 4. Save the mapping and switch the page table to the new table + * 5. Save the information to mm_phys for future phys setup + */ void -arch_mem_init() +archp_mem_init() { // we use 2M (PDE) pages to map the kernel so align the physical address to 2MB mm_paddr cur_addr = ALIGN_UP2(ARCH_ML_KIMAGE_PADDR, PDE_MAPPING_SZ); @@ -129,13 +132,13 @@ arch_mem_init() memset(kern_pml4, 0, ARCH_KPAGE_SZ); cur_addr += ARCH_KPAGE_SZ; - _kern_mapping_init(&cur_addr, kern_pml4); - _mm_pg_init(&cur_addr, kern_pml4); - _pmap_init(&cur_addr, kern_pml4); + kern_mapping_init(&cur_addr, kern_pml4); + mm_pg_init(&cur_addr, kern_pml4); + pmap_init(&cur_addr, kern_pml4); // copy the physical segments information to mm_phys - for (usize i = 0; i < _phys_segs_sz; i++) { - mm_phys_add_phys_seg(_phys_segs[i].start, _phys_segs[i].stop); + for (usize i = 0; i < phys_segs_sz; i++) { + mm_phys_add_phys_seg(phys_segs[i].start, phys_segs[i].stop); } // add reserved segment information to mm_phys diff --git a/arch/pmap_p.h b/arch/pmap_p.h deleted file mode 100644 index fb26e5a..0000000 --- a/arch/pmap_p.h +++ /dev/null @@ -1,12 +0,0 @@ -#pragma once - -#include - -#define ARCH_PMAP_TO_PHYS(pmaddr) ((pmaddr) - ARCH_ML_PMAP_START) -#define ARCH_PHYS_TO_PMAP(paddr) (ARCH_ML_PMAP_START + (paddr)) - -void -arch_mem_init(); - -void -arch_mem_addseg(uintptr start, usize len); diff --git a/inc/arch/brute.h b/inc/arch/brute.h index 1552120..c538359 100644 --- a/inc/arch/brute.h +++ b/inc/arch/brute.h @@ -1,3 +1,5 @@ #pragma once +#include -void arch_brute(); +ATTR_NORETURN void +arch_brute(); diff --git a/inc/arch/pmap.h b/inc/arch/pmap.h index 7a7c303..d5fb661 100644 --- a/inc/arch/pmap.h +++ b/inc/arch/pmap.h @@ -22,6 +22,15 @@ arch_pmap_mapdev(mm_paddr paddr, usize size); #define ARCH_VADDR_ATTR_UNCACHED (0x2u) #define ARCH_VADDR_ATTR_READONLY (0x4u) #define ARCH_VADDR_ATTR_NX (0x8u) + int arch_map_vaddr(void * base, mm_paddr paddr, uintptr vaddr, usize sz, uint attr); +// module-private functions +#define ARCH_PMAP_TO_PHYS(pmaddr) ((pmaddr) - ARCH_ML_PMAP_START) +#define ARCH_PHYS_TO_PMAP(paddr) (ARCH_ML_PMAP_START + (paddr)) +void +archp_mem_init(); + +void +archp_mem_addseg(uintptr start, usize len); diff --git a/inc/common/cdef.h b/inc/common/cdef.h index 263a27d..99395f6 100644 --- a/inc/common/cdef.h +++ b/inc/common/cdef.h @@ -36,5 +36,7 @@ typedef unsigned short ushort; #define ATTR_SECTION(x) __attribute__ ((section (#x))) #define ATTR_ALIGN(x) __attribute__((aligned (x))) #define ATTR_FMT_PRINTF __attribute__((format (printf, 1, 2))) +#define ATTR_NORETURN _Noreturn + #define BOCHS_BREAK __asm__("xchg %bx, %bx") diff --git a/inc/common/consts.h b/inc/common/consts.h new file mode 100644 index 0000000..2cbdb19 --- /dev/null +++ b/inc/common/consts.h @@ -0,0 +1,3 @@ +#pragma once + +#define KVERSION ("0.1") diff --git a/inc/ke/print.h b/inc/ke/print.h index bc68e0b..2262a41 100644 --- a/inc/ke/print.h +++ b/inc/ke/print.h @@ -8,3 +8,13 @@ kprintf(const char *fmt, ...); int kvprintf(const char *fmt, va_list args); + +#ifdef KOPT_DBG +#define PDBG(fmt, ...) (kprintf("[DEBUG] " fmt "\n", ##__VA_ARGS__)) +#else +#define PDBG(fmt, ...) +#endif + +#define PWARN(fmt, ...) (kprintf("[WARN] " fmt "\n", ##__VA_ARGS__)) +#define PINFO(fmt, ...) (kprintf("[INFO] " fmt "\n", ##__VA_ARGS__)) +#define PERR(fmt, ...) (kprintf("[ERROR] " fmt "\n", ##__VA_ARGS__)) diff --git a/inc/mm/phys.h b/inc/mm/phys.h index 2259e15..156b886 100644 --- a/inc/mm/phys.h +++ b/inc/mm/phys.h @@ -6,23 +6,24 @@ typedef uintptr mm_paddr; -struct mm_page { +struct mm_phys_page { struct ke_spin_lock page_lock; // page lock - mm_paddr phys_addr; // physical address of the page - uint8 phys_order; // order of the page in the buddy allocator - uint8 phys_pool; // which pool it belongs to in the buddy allocator struct list_entry phys_flist_ent; // list entry for the free list in the buddy allocator -}; + uint8 order; // order of the page in the buddy allocator + uint8 free; +} ATTR_ALIGN(8); +// maximum allocated page sz = 2^10 * 4096 #define MM_PHYS_ORDER_MAX (10) #define MM_PHYS_ORDER_FREE (MM_PAGE_ORDER_MAX + 1) -#define MM_PHYS_POOL_DMA (0) -#define MM_PHYS_POOL_GENERIC (1) +// DMA ZONE, 0 - 16MB +#define MM_PHYS_ARENA_DMA (0) +#define MM_PHYS_ARENA_MAX_ADDR (16 * 1024 * 1024) + +// GENERIC ZONE +#define MM_PHYS_ARENA_GENERIC (1) #define MM_PHYS_MAX_POOLS (MM_PHYS_POOL_DMA + 1) void mm_phys_add_phys_seg(mm_paddr start, usize len); - -void -mm_phys_add_reserved_seg(mm_paddr start, usize len); diff --git a/mm/phys.c b/mm/phys.c index f253b12..2f98de3 100644 --- a/mm/phys.c +++ b/mm/phys.c @@ -6,6 +6,7 @@ struct mm_phys_seg { mm_paddr stop; }; + static ATTR_UNUSED struct list_entry _freelist[MM_PHYS_MAX_POOLS][MM_PHYS_ORDER_MAX]; static struct mm_phys_seg _phys_segs[ARCH_PMAP_MAX_PHYS_SEGS];