Eventually went with not using in-place linked list for PMM (Don't really want to mix up PMM and VMM).

So yeah, now keep trace of those physical pages on kernel heap, Windows does that, linux does that, I have to do that since there are more attributes(paged/non-paged) that the kernel requires.
Hmm finished PMM alpha and redefined all those PMM/VMM interfaces.
The code is still broken right now.
This commit is contained in:
secXsQuared 2016-06-25 00:25:54 -07:00
parent be746b4f6f
commit 8da028f518
37 changed files with 308 additions and 157 deletions

View File

@ -1,7 +1,7 @@
#ifndef _S_ATOMIC_H_
#define _S_ATOMIC_H_
#include "s_abi.h"
#include "s_type.h"
#include "g_abi.h"
#include "g_type.h"
extern uint64_t KAPI k_interlocked_exchange(uint64_t* target, uint64_t val);

View File

@ -1,6 +1,6 @@
#ifndef _S_BOOT_H_
#define _S_BOOT_H_
#include "s_abi.h"
#include "g_abi.h"
//
// HAL Boot Info
//

View File

@ -1,8 +1,8 @@
#ifndef _S_CONTEXT_H_
#define _S_CONTEXT_H_
#include "s_abi.h"
#include "s_mm.h"
#include "g_abi.h"
#include "s_vmm.h"
#include "s_intr.h"
// This function should never return and directly context switches to the target

View File

@ -1,7 +1,7 @@
#ifndef _S_INTR_H_
#define _S_INTR_H_
#include "s_abi.h"
#include "g_abi.h"
typedef struct
{
@ -23,15 +23,12 @@ typedef enum
// IRQL APIs
typedef uint64_t k_irql_t;
#define K_IRQL_HIGH 4
#define K_IRQL_IO 3
#define K_IRQL_DPC 2
#define K_IRQL_APC 1
#define K_IRQL_LOW 0
#define K_IRQL_DISABLED_LEVEL 3
#define K_IRQL_DPC_LEVEL 2
#define K_IRQL_APC_LEVEL 1
#define K_IRQL_PASSIVE_LEVEL 0
extern KAPI k_irql_t k_raise_irql(k_irql_t irql);
extern KAPI k_irql_t k_lower_irql(k_irql_t irql);
extern KAPI k_irql_t k_set_irql(k_irql_t irql);
extern KAPI k_irql_t k_get_irql();
@ -51,4 +48,6 @@ extern void KAPI k_register_exc_handler(k_exc_type_t type, k_exc_handler_t handl
extern k_exc_handler_t KAPI k_deregister_exc_handler(uint64_t index);
extern void KAPI k_halt_cpu();
#endif

View File

@ -1,27 +0,0 @@
#ifndef _S_PMM_H_
#define _S_PMM_H_
#include "s_abi.h"
#include "s_type.h"
#define K_PAGE_SIZE 4096
typedef uint64_t k_virtual_addr_t;
typedef uint64_t k_physical_addr_t;
//
// all the address spaces passed by the kernel would be initialized by k_create_address_space
// which means the kernel area/ as well as the HAL reserved vaddr ranges would be properly mapped
//
typedef k_physical_addr_t (KAPI *k_physical_page_alloc)(k_virtual_addr_t virtual_addr);
typedef void (KAPI *k_physical_page_free)(void *v_addr, k_physical_addr_t page);
// this function should map the v_addr to p_addr for the target address space
extern void KAPI k_map_virtual_addr(k_physical_addr_t addr_space,
k_virtual_addr_t v_addr,
k_physical_addr_t p_addr,
k_physical_page_alloc alloc);
#endif

View File

@ -0,0 +1,28 @@
#ifndef _S_PMM_H_
#define _S_PMM_H_
#include "g_abi.h"
#include "g_type.h"
#define K_PAGE_SIZE 4096
typedef uint64_t k_physical_addr_t;
// note that these bases must be K_PAGE_SIZE aligned
// the size is the # of pages
// attr is not useful yet
// if unalignment is detected, the kernel bug checks.
typedef struct
{
k_physical_addr_t base;
uint64_t size;
uint32_t attr;
} k_pmm_node_t;
typedef struct
{
uint64_t num_of_nodes;
k_pmm_node_t nodes[];
} k_pmm_info_t;
#endif

View File

@ -1,12 +1,30 @@
#ifndef _S_VMM_H_
#define _S_VMM_H_
#include "g_abi.h"
#include "g_type.h"
#include "s_pmm.h"
typedef uint64_t k_virtual_addr_t;
#include "s_abi.h"
#include "s_type.h"
#define K_BASE_VADDR 0xFF
#define K_END_VADDR 0xFF
//
// all the address spaces passed by the kernel would be initialized by k_create_address_space
// which means the kernel area/ as well as the HAL reserved vaddr ranges would be properly mapped
//
typedef k_physical_addr_t (KAPI *k_physical_page_alloc)();
typedef void (KAPI *k_physical_page_free)(k_physical_addr_t page);
// this function should map the v_addr to p_addr for the target address space
extern void KAPI k_map_virtual_addr(k_physical_addr_t addr_space,
k_virtual_addr_t v_addr,
k_physical_addr_t p_addr,
k_physical_page_alloc alloc);
typedef struct
{
// the kernel always reserves this much virtual space for HAL

View File

@ -238,18 +238,21 @@ static void _avl_tree_swap_nodes(avl_tree_node_t *node1, avl_tree_node_t *node2)
return;
}
static avl_tree_node_t *KAPI _avl_tree_node_delete(avl_tree_node_t *root, avl_tree_node_t *node,
avl_tree_node_compare_func_t compare)
static avl_tree_node_t *KAPI _avl_tree_node_delete(avl_tree_node_t *root,
avl_tree_node_t *node,
avl_tree_node_compare_func_t compare,
avl_tree_node_t **deleted_node)
{
if (root == NULL || node == NULL || compare == NULL)
if (root == NULL || node == NULL || compare == NULL || deleted_node == NULL)
return root;
const int32_t comp = compare(root, node);
if (comp < 0)
root->right = _avl_tree_node_delete(root->right, node, compare);
root->right = _avl_tree_node_delete(root->right, node, compare, deleted_node);
else if (comp > 0)
root->left = _avl_tree_node_delete(root->left, node, compare);
root->left = _avl_tree_node_delete(root->left, node, compare, deleted_node);
else
{
*deleted_node = root;
// node with only one child or no child
if ((root->left == NULL) || (root->right == NULL))
{
@ -274,7 +277,7 @@ static avl_tree_node_t *KAPI _avl_tree_node_delete(avl_tree_node_t *root, avl_tr
_avl_tree_swap_nodes(successor, root);
// Detach the inorder successor
successor->right = _avl_tree_node_delete(successor->right, root, compare);
successor->right = _avl_tree_node_delete(successor->right, root, compare, deleted_node);
root = successor;
}
@ -403,13 +406,14 @@ void KAPI avl_tree_insert(avl_tree_t *tree, avl_tree_node_t *data)
return;
}
void KAPI avl_tree_delete(avl_tree_t *tree, avl_tree_node_t *data)
avl_tree_node_t *KAPI avl_tree_delete(avl_tree_t *tree, avl_tree_node_t *data)
{
avl_tree_node_t *node = NULL;
if (tree != NULL && data != NULL)
{
tree->root = _avl_tree_node_delete(tree->root, data, tree->compare);
tree->root = _avl_tree_node_delete(tree->root, data, tree->compare, &node);
}
return;
return node;
}
int32_t KAPI avl_tree_size(avl_tree_t *tree)

View File

@ -39,7 +39,7 @@ avl_tree_node_t *KAPI avl_tree_search(avl_tree_t *tree, avl_tree_node_t *entry);
void KAPI avl_tree_insert(avl_tree_t *tree, avl_tree_node_t *entry);
void KAPI avl_tree_delete(avl_tree_t *tree, avl_tree_node_t *entry);
avl_tree_node_t* KAPI avl_tree_delete(avl_tree_t *tree, avl_tree_node_t *entry);
void KAPI avl_tree_init(avl_tree_t *tree, avl_tree_node_compare_func_t);

View File

@ -6,7 +6,7 @@
#ifndef _LINKED_LIST_H_
#define _LINKED_LIST_H_
#include "s_abi.h"
#include "g_abi.h"
typedef struct _linked_list_node_t
{

View File

@ -6,7 +6,7 @@
#ifndef _SALLOC_H_
#define _SALLOC_H_
#include "s_abi.h"
#include "g_abi.h"
void KAPI salloc_init(void *base, uint32_t size);

View File

@ -3,7 +3,7 @@
* See COPYING under root for details
*/
#include "s_abi.h"
#include "g_abi.h"
#include "bit_ops.h"
typedef union

View File

@ -6,8 +6,8 @@
#ifndef _BIT_OPERATION_H_
#define _BIT_OPERATION_H_
#include "s_type.h"
#include "s_abi.h"
#include "g_type.h"
#include "g_abi.h"
static inline uint64_t KAPI bit_mask(uint32_t bit)
{

View File

@ -6,8 +6,8 @@
#ifndef _STD_LIB_H_
#define _STD_LIB_H_
#include "s_abi.h"
#include "s_type.h"
#include "g_abi.h"
#include "g_type.h"
uint32_t KAPI rand( void );

View File

@ -3,8 +3,8 @@
* See COPYING under root for details
*/
#include "s_type.h"
#include "s_abi.h"
#include "g_type.h"
#include "g_abi.h"
#include "std_lib.h"
void KAPI mem_cpy(void *src, void *dst, uint64_t size)

View File

@ -3,8 +3,8 @@
* See COPYING under root for details
*/
#include "s_abi.h"
#include "s_type.h"
#include "g_abi.h"
#include "g_type.h"
#include "hal_mem.h"
#include "salloc.h"

View File

@ -3,7 +3,7 @@
* See COPYING under root for details
*/
#include "s_abi.h"
#include "g_abi.h"
#include "std_lib.h"
#include "hal_print.h"
#include "hal_var.h"

View File

@ -1,7 +1,7 @@
#ifndef _HAL_ARCH_H_
#define _HAL_ARCH_H_
#include "s_abi.h"
#include "g_abi.h"
typedef struct
{

View File

@ -7,8 +7,8 @@
#define _HAL_IO_H_
#include "s_intr.h"
#include "s_abi.h"
#include "s_type.h"
#include "g_abi.h"
#include "g_type.h"
#define GATE_DPL_0 (0ull << 13)
#define GATE_DPL_1 (1ull << 13)

View File

@ -6,8 +6,8 @@
#ifndef _HAL_MEM_H_
#define _HAL_MEM_H_
#include "s_abi.h"
#include "s_type.h"
#include "g_abi.h"
#include "g_type.h"
#include "linked_list.h"
#define GDT_ENTRY_SIZE 8

View File

@ -5,8 +5,8 @@
#ifndef _HAL_PRINT_H_
#define _HAL_PRINT_H_
#include "s_abi.h"
#include "s_type.h"
#include "g_abi.h"
#include "g_type.h"
#define get_column(pos) (pos % 80)
#define get_row(pos) (pos / 80)

View File

@ -1,6 +1,7 @@
#ifndef _K_ALLOC_H_
#define _K_ALLOC_H_
#include "s_abi.h"
#include "g_abi.h"
#include "g_type.h"
void KAPI k_alloc_init();

View File

@ -1,8 +1,9 @@
#ifndef _ATOMIC_H_
#define _ATOMIC_H_
#include "s_abi.h"
#include "g_abi.h"
#include "k_intr.h"
#include "s_atomic.h"
typedef struct
{
@ -13,8 +14,8 @@ void KAPI k_spin_lock(k_spin_lock_t *lock);
void KAPI k_spin_unlock(k_spin_lock_t *lock);
k_irql_t KAPI k_spin_lock_irq_set(k_spin_lock_t *lock, k_irql_t irql);
k_irql_t KAPI k_spin_lock_irql_set(k_spin_lock_t *lock, k_irql_t irql);
void KAPI k_spin_lock_irq_restore(k_spin_lock_t *lock, k_irql_t irql);
void KAPI k_spin_unlock_irql_restore(k_spin_lock_t *lock, k_irql_t irql);
#endif

View File

@ -0,0 +1,8 @@
#include "g_type.h"
#include "g_abi.h"
#include "k_intr.h"
#define K_BUG_CHECK_IRQL_MISMATCH 0
#define K_BUG_CHECK_PMM_UNALIGNED 1
void KAPI k_bug_check(uint64_t reason);

View File

@ -2,23 +2,7 @@
#define _K_INTR_H_
#include "s_intr.h"
#include "s_type.h"
#include "s_abi.h"
typedef uint64_t k_irql_t;
int32_t KAPI k_register_interrupt_handler(k_handler_type_t type,
uint32_t priority,
void (*handler)(uint64_t pc, uint64_t sp, uint64_t error));
void KAPI k_deregister_interrupt_handler(int32_t index);
void KAPI k_disable_interrupt();
void KAPI k_enable_interrupt();
k_irql_t KAPI k_get_current_irql();
void KAPI k_set_current_irql(k_irql_t irql);
#include "g_type.h"
#include "g_abi.h"
#endif

View File

@ -1,3 +0,0 @@
#include "s_abi.h"
#include "s_mm.h"

View File

@ -0,0 +1,26 @@
#include "avl_tree.h"
#include "linked_list.h"
#include "g_abi.h"
#include "s_pmm.h"
typedef struct
{
// more attributes such as paged/non-paged coming soon
linked_list_node_t free_list_node;
avl_tree_node_t avl_tree_node;
k_physical_addr_t base;
} k_physical_page_info_t;
void KAPI k_pmm_init(k_pmm_info_t* info);
k_physical_addr_t KAPI k_alloc_page();
k_physical_page_info_t* KAPI k_query_page(k_physical_addr_t base);
void KAPI k_free_page(k_physical_addr_t base);
// TODO: implement these somehow, i might just reserve the first 16MB for these
k_physical_addr_t KAPI k_alloc_contiguous_pages(uint64_t num_of_page,
k_physical_addr_t highest_p_addr);
k_physical_addr_t KAPI k_free_contiguous_pages(k_physical_addr_t base);

View File

@ -1,4 +1,5 @@
#include "s_abi.h"
#include "g_abi.h"
#include "k_alloc.h"
#include "salloc.h"
#define K_KERNEL_HEAP_SIZE 8192

View File

@ -1,11 +1,10 @@
#include "k_atomic.h"
#include "s_hal.h"
void KAPI k_spin_lock(k_spin_lock_t *lock)
{
if (lock != NULL)
{
while (hal_interlocked_exchange(&lock->val, 1) == 1);
while (k_interlocked_exchange(&lock->val, 1) == 1);
}
return;
}
@ -17,4 +16,25 @@ void KAPI k_spin_unlock(k_spin_lock_t *lock)
lock->val = 0;
}
return;
}
k_irql_t KAPI k_spin_lock_irql_set(k_spin_lock_t *lock, k_irql_t irql)
{
k_irql_t prev_irql = k_get_irql();
if(lock != NULL)
{
k_set_irql(irql);
k_spin_lock(lock);
}
return prev_irql;
}
void KAPI k_spin_unlock_irql_restore(k_spin_lock_t *lock, k_irql_t irql)
{
if(lock != NULL)
{
k_spin_unlock(lock);
k_set_irql(irql);
}
return;
}

View File

@ -4,7 +4,6 @@
*/
#include "k_alloc.h"
#include "s_hal.h"
#include "k_lib_test.h"
// returning from this function results in halting the cpu

View File

@ -0,0 +1,8 @@
#include "g_abi.h"
#include "g_type.h"
#include "k_bug_check.h"
void KAPI k_bug_check(uint64_t reason)
{
k_halt_cpu();
}

View File

@ -1,34 +1 @@
#include "k_intr.h"
#include "s_hal.h"
int32_t KAPI k_register_interrupt_handler(k_handler_type_t type,
uint32_t priority,
void (*handler)(uint64_t pc, uint64_t sp, uint64_t error))
{
return hal_register_interrupt_handler(type, priority, handler);
}
void KAPI k_deregister_interrupt_handler(int32_t index)
{
hal_deregister_interrupt_handler(index);
}
k_irql_t KAPI k_get_current_irql()
{
return hal_read_interrupt_priority();
}
void KAPI k_set_current_irql(k_irql_t irql)
{
hal_set_interrupt_priority(irql);
}
void KAPI k_disable_interrupt()
{
hal_disable_interrupt();
}
void KAPI k_enable_interrupt()
{
hal_disable_interrupt();
}
#include "k_intr.h"

View File

@ -1,20 +0,0 @@
#include "k_mm.h"
static uint64_t top_of_stack;
static _Bool _PMM_INITIALZIED = false;
void KAPI k_pmm_init()
{
}
uint64_t KAPI k_alloc_page()
{
}
uint64_t KAPI k_free_page()
{
}

137
x64/src/kernel/k_pmm.c Normal file
View File

@ -0,0 +1,137 @@
#include "k_alloc.h"
#include "k_bug_check.h"
#include "k_atomic.h"
#include "k_pmm.h"
static avl_tree_t _active_avl_tree;
static linked_list_t _free_list;
static k_spin_lock_t _lock;
static _Bool _initialized;
/*
* A comparison function between tree_node and your_node
* Returns:
* < 0 if tree_node < your_node
* = 0 if tree_node == your_node
* > 0 if tree_node > your_node
*/
static int32_t _avl_compare(avl_tree_node_t *tree_node, avl_tree_node_t *my_node)
{
k_physical_addr_t tree_base = OBTAIN_STRUCT_ADDR(tree_node, k_physical_page_info_t, avl_tree_node)->base;
k_physical_addr_t my_base = OBTAIN_STRUCT_ADDR(my_node, k_physical_page_info_t, avl_tree_node)->base;
if (tree_base > my_base)
return 1;
else if (tree_base < my_base)
return -1;
else
return 0;
}
void KAPI k_pmm_init(k_pmm_info_t* info)
{
if(info != NULL && !_initialized)
{
linked_list_init(&_free_list);
avl_tree_init(&_active_avl_tree, _avl_compare);
for(int i = 0; i < info->num_of_nodes; i++)
{
k_pmm_node_t* each_node = &info->nodes[i];
if(each_node->base % K_PAGE_SIZE != 0)
{
// if not aligned, bug check
k_bug_check(K_BUG_CHECK_PMM_UNALIGNED);
}
for(int j = 0; j <= each_node->size; j++)
{
// note that k_alloc function here might trigger page fault
// however it's fine as long as we don't touch linked list just yet
// it will use the pages that are already on file to enlarge the kernel heap
// don't worry, be happy :)
k_physical_page_info_t* page_info = k_alloc(sizeof(k_physical_page_info_t));
page_info->base = each_node->base;
linked_list_push_back(&_free_list, &page_info->free_list_node);
}
}
_initialized = true;
}
}
// these APIs can only be called at IRQL == DISABLED.
// we need to guarantee that on the same CPU, these APIs are not preempted by
// potential callers of these, since timer/interrupts queue DPC, which might trigger
// page fault (kernel heap), therefore, it must set IRQL to DISABLED
k_physical_addr_t KAPI k_alloc_page()
{
if(!_initialized)
return NULL;
k_irql_t irql = k_spin_lock_irql_set(&_lock, K_IRQL_DISABLED_LEVEL);
linked_list_node_t *node = NULL;
k_physical_page_info_t *page_info = NULL;
k_physical_addr_t base = NULL;
node = linked_list_pop_front(&_free_list);
if (node != NULL)
{
page_info = OBTAIN_STRUCT_ADDR(node, k_physical_page_info_t, free_list_node);
base = page_info->base;
avl_tree_insert(&_active_avl_tree, &page_info->avl_tree_node);
}
k_spin_unlock_irql_restore(&_lock, irql);
return base;
}
k_physical_page_info_t* KAPI k_query_page(k_physical_addr_t base)
{
if(!_initialized)
return NULL;
k_irql_t irql = k_spin_lock_irql_set(&_lock, K_IRQL_DISABLED_LEVEL);
avl_tree_node_t *node = NULL;
// search for dummy
k_physical_page_info_t dummy, *page_info = NULL;
dummy.base = base;
node = avl_tree_delete(&_active_avl_tree, &dummy.avl_tree_node);
if (node != NULL)
{
page_info = OBTAIN_STRUCT_ADDR(node, k_physical_page_info_t, avl_tree_node);
}
k_spin_unlock_irql_restore(&_lock, irql);
return page_info;
}
void KAPI k_free_page(k_physical_addr_t base)
{
if(!_initialized)
return;
// just lock since not sharing with anyone
k_irql_t irql = k_spin_lock_irql_set(&_lock, K_IRQL_DISABLED_LEVEL);
avl_tree_node_t *node = NULL;
// search for dummy
k_physical_page_info_t dummy, *page_info;
dummy.base = base;
node = avl_tree_delete(&_active_avl_tree, &dummy.avl_tree_node);
if (node != NULL)
{
page_info = OBTAIN_STRUCT_ADDR(node, k_physical_page_info_t, avl_tree_node);
linked_list_push_back(&_free_list, &page_info->free_list_node);
}
k_spin_unlock_irql_restore(&_lock, irql);
return;
}

View File

@ -1,8 +1,8 @@
#ifndef _K_TEST_DRIVER_H_
#define _K_TEST_DRIVER_H_
#include "s_type.h"
#include "s_abi.h"
#include "g_type.h"
#include "g_abi.h"
void KAPI test_begin(char *name);