VMM half-done. PMM refactored. Haven't got a chance to touch the HAL yet.

This commit is contained in:
secXsQuared 2016-06-25 14:40:47 -07:00
parent 8da028f518
commit f9b3df8b60
12 changed files with 292 additions and 111 deletions

View File

@ -3,8 +3,8 @@
* See COPYING under root for details
*/
#ifndef _S_ABI_H_
#define _S_ABI_H_
#ifndef _G_ABI_H_
#define _G_ABI_H_
#include <stddef.h>
#include <stdarg.h>

View File

@ -3,8 +3,8 @@
* See COPYING under root for details
*/
#ifndef _S_TYPE_H_
#define _S_TYPE_H_
#ifndef _G_TYPE_H_
#define _G_TYPE_H_
#include <stdint.h>
#include <stdbool.h>

View File

@ -5,10 +5,14 @@
#include "s_pmm.h"
typedef uint64_t k_virtual_addr_t;
typedef k_physical_addr_t k_address_space_t;
#define K_BASE_VADDR 0xFFFF800000000000
#define K_END_VADDR 0xFFFFFFFFFFFFFFFF
#define K_BASE_VADDR 0xFF
#define K_END_VADDR 0xFF
//U_BASE = 1MB
#define U_BASE_VADDR 0x0000000000100000
#define U_END_BADDR 0x7FFFFFFFFFFFFFFF
//
// all the address spaces passed by the kernel would be initialized by k_create_address_space
@ -42,19 +46,16 @@ typedef struct
// so that these pages are global (modifying the mapping in this area affects everyone)
// the K_BASE_VADDR to K_END_VADDR includes the reserved virtual addr space by the HAL
// if HAL's reserved virtual addr will be mapped to different physical pages, the HAL should make the change
k_physical_addr_t KAPI k_create_address_space(k_physical_addr_t addr_space,
k_address_space_t KAPI k_create_address_space(k_address_space_t address_space,
k_physical_page_alloc alloc);
// this function destroys the target address space without destroying the K_BASE_VADDR to K_END_VADDR
// target_addr_space is guaranteed to be not the same as the current address space
// when the function returns, the current address space must stay unchanged
void KAPI k_destroy_address_space(k_physical_addr_t target_addr_space,
void KAPI k_destroy_address_space(k_address_space_t address_space,
k_physical_page_free free);
// as the name implies
void KAPI k_switch_address_space(k_physical_addr_t target_addr_space);
// as the name implies
k_physical_addr_t KAPI k_get_current_address_space();
void KAPI k_switch_address_space(k_address_space_t target_addr_space);
#endif

View File

@ -3,7 +3,6 @@
#include "s_boot.h"
#include "s_context.h"
#include "s_intr.h"
#include "s_asm.h"
uint64_t KAPI k_interlocked_exchange(uint64_t* target, uint64_t val)
{

View File

@ -1,5 +1,5 @@
#ifndef _ATOMIC_H_
#define _ATOMIC_H_
#ifndef _K_ATOMIC_H_
#define _K_ATOMIC_H_
#include "g_abi.h"
#include "k_intr.h"

View File

@ -1,3 +1,6 @@
#ifndef _K_BUG_CHECK_H_
#define _K_BUG_CHECK_H_
#include "g_type.h"
#include "g_abi.h"
#include "k_intr.h"
@ -5,4 +8,6 @@
#define K_BUG_CHECK_IRQL_MISMATCH 0
#define K_BUG_CHECK_PMM_UNALIGNED 1
void KAPI k_bug_check(uint64_t reason);
void KAPI k_bug_check(uint64_t reason);
#endif

View File

@ -5,4 +5,6 @@
#include "g_type.h"
#include "g_abi.h"
void KAPI k_exc_handler_page_fault(void *context, void *intr_stack);
#endif

View File

@ -1,26 +1,48 @@
#ifndef _K_PMM_H_
#define _K_PMM_H_
#include "avl_tree.h"
#include "linked_list.h"
#include "g_abi.h"
#include "s_pmm.h"
#include "k_atomic.h"
#define PMM_STATUS_SUCCESS 0
#define PMM_STATUS_CANNOT_ALLOC_NODE 1
#define PMM_STATUS_PAGE_NOT_FOUND 2
#define PMM_STATUS_INVALID_ARGUMENTS 3
#define PMM_STATUS_INIT_UNALIGNED 4
#define PMM_STATUS_NOT_ENOUGH_PAGE 5
//#define PMM_PAGE_ATTR_FREE_BIT 0
//#define PMM_PAGE_ATTR_PAGED_BIT 1
//
//typedef struct
//{
// uint32_t attr;
//} k_physical_page_attr_t;
typedef struct
{
// more attributes such as paged/non-paged coming soon
linked_list_node_t free_list_node;
avl_tree_node_t avl_tree_node;
k_physical_addr_t base;
} k_physical_page_info_t;
avl_tree_t active_tree;
linked_list_t free_list;
k_spin_lock_t lock;
_Bool initialized;
} k_pmm_descriptor_t;
void KAPI k_pmm_init(k_pmm_info_t* info);
int32_t KAPI k_pmm_init(k_pmm_info_t *info, k_pmm_descriptor_t *desc);
k_physical_addr_t KAPI k_alloc_page();
int32_t KAPI k_alloc_page(k_pmm_descriptor_t *desc, k_physical_addr_t *out);
k_physical_page_info_t* KAPI k_query_page(k_physical_addr_t base);
void KAPI k_free_page(k_physical_addr_t base);
int32_t KAPI k_free_page(k_pmm_descriptor_t *desc, k_physical_addr_t base);
// TODO: implement these somehow, i might just reserve the first 16MB for these
k_physical_addr_t KAPI k_alloc_contiguous_pages(uint64_t num_of_page,
k_physical_addr_t highest_p_addr);
int32_t KAPI k_alloc_contiguous_pages(k_pmm_descriptor_t *desc,
uint64_t num_of_page,
k_physical_addr_t highest_p_addr,
k_physical_addr_t *out);
k_physical_addr_t KAPI k_free_contiguous_pages(k_physical_addr_t base);
int32_t KAPI k_free_contiguous_pages(k_pmm_descriptor_t *desc,
k_physical_addr_t base);
#endif

View File

@ -0,0 +1,35 @@
#ifndef _K_VMM_H_
#define _K_VMM_H_
#include "s_vmm.h"
#include "avl_tree.h"
#include "k_atomic.h"
#define VMM_STATUS_SUCCESS 0
#define VMM_STATUS_INVALID_ARGUMENTS 1
#define VMM_STATUS_CANNOT_ALLOC_NODE 2
typedef struct
{
uint32_t attr;
} k_virtual_addr_attribute_t;
typedef struct
{
avl_tree_t region_tree;
_Bool initialized;
k_spin_lock_t lock;
} k_vmm_descriptor_t;
int32_t k_vmm_init(k_vmm_descriptor_t *desc);
int32_t k_alloc_virtual_address(k_vmm_descriptor_t *desc,
k_virtual_addr_t base,
uint64_t size,
k_virtual_addr_attribute_t attr);
int64_t k_query_virtual_address(k_vmm_descriptor_t* desc, k_virtual_addr_t v_addr, uint64_t* out);
int64_t k_free_virtual_address(k_vmm_descriptor_t *desc, k_virtual_addr_t base);
#endif

View File

@ -1 +1,6 @@
#include "k_intr.h"
#include "k_intr.h"
void KAPI k_exc_handler_page_fault(void *context, void *intr_stack)
{
}

View File

@ -1,12 +1,14 @@
#include "k_alloc.h"
#include "k_bug_check.h"
#include "k_atomic.h"
#include "k_pmm.h"
static avl_tree_t _active_avl_tree;
static linked_list_t _free_list;
static k_spin_lock_t _lock;
static _Bool _initialized;
typedef struct
{
linked_list_node_t free_list_node;
avl_tree_node_t avl_tree_node;
k_physical_addr_t base;
//k_physical_page_attr_t attr;
} k_physical_page_descriptor_t;
/*
* A comparison function between tree_node and your_node
@ -17,8 +19,12 @@ static _Bool _initialized;
*/
static int32_t _avl_compare(avl_tree_node_t *tree_node, avl_tree_node_t *my_node)
{
k_physical_addr_t tree_base = OBTAIN_STRUCT_ADDR(tree_node, k_physical_page_info_t, avl_tree_node)->base;
k_physical_addr_t my_base = OBTAIN_STRUCT_ADDR(my_node, k_physical_page_info_t, avl_tree_node)->base;
k_physical_addr_t tree_base = OBTAIN_STRUCT_ADDR(tree_node,
k_physical_page_descriptor_t,
avl_tree_node)->base;
k_physical_addr_t my_base = OBTAIN_STRUCT_ADDR(my_node,
k_physical_page_descriptor_t,
avl_tree_node)->base;
if (tree_base > my_base)
return 1;
else if (tree_base < my_base)
@ -27,111 +33,136 @@ static int32_t _avl_compare(avl_tree_node_t *tree_node, avl_tree_node_t *my_node
return 0;
}
void KAPI k_pmm_init(k_pmm_info_t* info)
int32_t KAPI k_pmm_init(k_pmm_info_t *info, k_pmm_descriptor_t *desc)
{
if(info != NULL && !_initialized)
if (info == NULL || desc == NULL || desc->initialized)
{
linked_list_init(&_free_list);
avl_tree_init(&_active_avl_tree, _avl_compare);
for(int i = 0; i < info->num_of_nodes; i++)
{
k_pmm_node_t* each_node = &info->nodes[i];
if(each_node->base % K_PAGE_SIZE != 0)
{
// if not aligned, bug check
k_bug_check(K_BUG_CHECK_PMM_UNALIGNED);
}
for(int j = 0; j <= each_node->size; j++)
{
// note that k_alloc function here might trigger page fault
// however it's fine as long as we don't touch linked list just yet
// it will use the pages that are already on file to enlarge the kernel heap
// don't worry, be happy :)
k_physical_page_info_t* page_info = k_alloc(sizeof(k_physical_page_info_t));
page_info->base = each_node->base;
linked_list_push_back(&_free_list, &page_info->free_list_node);
}
}
_initialized = true;
return PMM_STATUS_INVALID_ARGUMENTS;
}
linked_list_init(&desc->free_list);
avl_tree_init(&desc->active_tree, _avl_compare);
for (int i = 0; i < info->num_of_nodes; i++)
{
k_pmm_node_t *each_node = &info->nodes[i];
if (each_node->base % K_PAGE_SIZE != 0)
{
// if not aligned, bug check
return PMM_STATUS_INIT_UNALIGNED;
}
for (int j = 0; j <= each_node->size; j++)
{
// note that k_alloc function here might trigger page fault
// however it's fine as long as we don't touch linked list just yet
// it will use the pages that are already on file to enlarge the kernel heap
// don't worry, be happy :)
k_physical_page_descriptor_t *page_info = k_alloc(sizeof(k_physical_page_descriptor_t));
if (page_info == NULL)
{
return PMM_STATUS_CANNOT_ALLOC_NODE;
}
page_info->base = each_node->base;
linked_list_push_back(&desc->free_list, &page_info->free_list_node);
}
}
desc->initialized = true;
return PMM_STATUS_SUCCESS;
}
// these APIs can only be called at IRQL == DISABLED.
// free lists can only be updated at IRQL == DISABLED
// we need to guarantee that on the same CPU, these APIs are not preempted by
// potential callers of these, since timer/interrupts queue DPC, which might trigger
// page fault (kernel heap), therefore, it must set IRQL to DISABLED
k_physical_addr_t KAPI k_alloc_page()
int32_t KAPI k_alloc_page(k_pmm_descriptor_t *desc, k_physical_addr_t *out)
{
if(!_initialized)
return NULL;
k_irql_t irql = k_spin_lock_irql_set(&_lock, K_IRQL_DISABLED_LEVEL);
if (desc == NULL || !desc->initialized)
return PMM_STATUS_INVALID_ARGUMENTS;
k_irql_t irql = k_spin_lock_irql_set(&desc->lock, K_IRQL_DISABLED_LEVEL);
int32_t result = PMM_STATUS_SUCCESS;
linked_list_node_t *node = NULL;
k_physical_page_info_t *page_info = NULL;
k_physical_addr_t base = NULL;
node = linked_list_pop_front(&_free_list);
k_physical_page_descriptor_t *page_info = NULL;
node = linked_list_pop_front(&desc->free_list);
if (node != NULL)
{
page_info = OBTAIN_STRUCT_ADDR(node, k_physical_page_info_t, free_list_node);
base = page_info->base;
avl_tree_insert(&_active_avl_tree, &page_info->avl_tree_node);
page_info = OBTAIN_STRUCT_ADDR(node,
k_physical_page_descriptor_t,
free_list_node);
avl_tree_insert(&desc->active_tree, &page_info->avl_tree_node);
*out = page_info->base;
}
k_spin_unlock_irql_restore(&_lock, irql);
return base;
}
k_physical_page_info_t* KAPI k_query_page(k_physical_addr_t base)
{
if(!_initialized)
return NULL;
k_irql_t irql = k_spin_lock_irql_set(&_lock, K_IRQL_DISABLED_LEVEL);
avl_tree_node_t *node = NULL;
// search for dummy
k_physical_page_info_t dummy, *page_info = NULL;
dummy.base = base;
node = avl_tree_delete(&_active_avl_tree, &dummy.avl_tree_node);
if (node != NULL)
else
{
page_info = OBTAIN_STRUCT_ADDR(node, k_physical_page_info_t, avl_tree_node);
result = PMM_STATUS_NOT_ENOUGH_PAGE;
}
k_spin_unlock_irql_restore(&_lock, irql);
k_spin_unlock_irql_restore(&desc->lock, irql);
return page_info;
return result;
}
void KAPI k_free_page(k_physical_addr_t base)
//int32_t KAPI k_query_page(k_pmm_descriptor_t *desc,
// k_physical_addr_t base,
// k_physical_page_attr_t *out)
//{
//
// if (desc == NULL || !desc->initialized)
// return PMM_STATUS_INVALID_ARGUMENTS;
//
// k_irql_t irql = k_spin_lock_irql_set(&desc->lock, K_IRQL_DISABLED_LEVEL);
// int32_t result = PMM_STATUS_SUCCESS;
// avl_tree_node_t *node = NULL;
// // search for dummy
// k_physical_page_descriptor_t dummy, *page_info = NULL;
// dummy.base = base;
//
// node = avl_tree_delete(&desc->pages_tree, &dummy.avl_tree_node);
//
// if (node != NULL)
// {
// page_info = OBTAIN_STRUCT_ADDR(node, k_physical_page_descriptor_t, avl_tree_node);
// *out = page_info->attr;
// }
// else
// {
// result = PMM_STATUS_PAGE_NOT_FOUND;
// }
//
// k_spin_unlock_irql_restore(&desc->lock, irql);
//
// return result;
//}
int32_t KAPI k_free_page(k_pmm_descriptor_t* desc, k_physical_addr_t base)
{
if(!_initialized)
return;
if (desc == NULL || !desc->initialized)
return PMM_STATUS_INVALID_ARGUMENTS;
// just lock since not sharing with anyone
k_irql_t irql = k_spin_lock_irql_set(&_lock, K_IRQL_DISABLED_LEVEL);
k_irql_t irql = k_spin_lock_irql_set(&desc->lock, K_IRQL_DISABLED_LEVEL);
int32_t result = PMM_STATUS_SUCCESS;
avl_tree_node_t *node = NULL;
// search for dummy
k_physical_page_info_t dummy, *page_info;
k_physical_page_descriptor_t dummy, *page_info;
dummy.base = base;
node = avl_tree_delete(&_active_avl_tree, &dummy.avl_tree_node);
node = avl_tree_delete(&desc->active_tree, &dummy.avl_tree_node);
if (node != NULL)
{
page_info = OBTAIN_STRUCT_ADDR(node, k_physical_page_info_t, avl_tree_node);
linked_list_push_back(&_free_list, &page_info->free_list_node);
page_info = OBTAIN_STRUCT_ADDR(node, k_physical_page_descriptor_t, avl_tree_node);
linked_list_push_back(&desc->free_list, &page_info->free_list_node);
}
else
{
result = PMM_STATUS_PAGE_NOT_FOUND;
}
k_spin_unlock_irql_restore(&_lock, irql);
k_spin_unlock_irql_restore(&desc->lock, irql);
return;
return result;
}

81
x64/src/kernel/k_vmm.c Normal file
View File

@ -0,0 +1,81 @@
#include "k_alloc.h"
#include "k_vmm.h"
typedef struct
{
avl_tree_node_t tree_node;
k_virtual_addr_attribute_t attribute;
k_virtual_addr_t base;
uint64_t size;
} k_virtual_addr_descriptor_t;
/*
* A comparison function between tree_node and your_node
* Returns:
* < 0 if tree_node < your_node
* = 0 if tree_node == your_node
* > 0 if tree_node > your_node
*/
static int32_t _avl_compare(avl_tree_node_t *tree_node, avl_tree_node_t *my_node)
{
k_virtual_addr_descriptor_t *that = OBTAIN_STRUCT_ADDR(tree_node,
k_virtual_addr_descriptor_t,
tree_node);
k_virtual_addr_descriptor_t *mine = OBTAIN_STRUCT_ADDR(my_node,
k_virtual_addr_descriptor_t,
tree_node);
// if overlap, consider them to be the same
if (is_overlap(that->base, that->base + that->size, mine->base, mine->base + mine->size) == 1)
return 0;
else if (that->base < mine->base)
return -1;
else
return 1;
}
int32_t k_vmm_init(k_vmm_descriptor_t *desc)
{
if (desc == NULL || desc->initialized)
{
return VMM_STATUS_INVALID_ARGUMENTS;
}
avl_tree_init(&desc->region_tree, _avl_compare);
return VMM_STATUS_SUCCESS;
}
int32_t k_alloc_virtual_address(k_vmm_descriptor_t *desc,
k_virtual_addr_t base,
uint64_t size,
k_virtual_addr_attribute_t attr)
{
if(desc == NULL || !desc->initialized)
{
return VMM_STATUS_INVALID_ARGUMENTS;
}
k_virtual_addr_descriptor_t* node = k_alloc(sizeof(k_virtual_addr_descriptor_t));
if(node == NULL)
{
return VMM_STATUS_CANNOT_ALLOC_NODE;
}
node->base =base;
node->size = size;
node->attribute = attr;
avl_tree_insert(&desc->region_tree, &node->tree_node);
return VMM_STATUS_SUCCESS;
}
int64_t k_query_virtual_address(k_vmm_descriptor_t *desc, k_virtual_addr_t v_addr, uint64_t *out)
{
}
int64_t k_free_virtual_address(k_vmm_descriptor_t *desc, k_virtual_addr_t base)
{
}