BROKEN AF

This commit is contained in:
secXsQuared 2016-09-13 00:41:09 -04:00
parent f412efdbfd
commit 1f080a6b80
25 changed files with 246 additions and 210 deletions

View File

@ -1,50 +0,0 @@
/*-------------------------------------------------------
|
| bifrost_rwlock.h
|
| Contains Bifrost readers-writer lock APIs,
|
|--------------------------------------------------------
|
| Copyright ( C ) 2016 Microsoft Corp.
| All Rights Reserved
| Confidential and Proprietary
|
|--------------------------------------------------------
*/
#ifndef _BIFROST_RWLOCK_H_
#define _BIFROST_RWLOCK_H_
#include "bifrost_lock.h"
#include "bifrost_intr.h"
#include "bifrost_types.h"
typedef struct
{
hw_spin_lock_t w_mutex;
hw_spin_lock_t r_mutex;
hw_spin_lock_t res_lock;
hw_spin_lock_t r_try;
uint32_t reader_ct;
uint32_t writer_ct;
} hw_rwlock_t;
void ke_rwlock_init(hw_rwlock_t *lock);
void ke_reader_lock(hw_rwlock_t *lock);
void ke_reader_unlock(hw_rwlock_t *lock);
hw_irql_t ke_reader_lock_raise_irql(hw_rwlock_t *lock, hw_irql_t irq);
void ke_reader_unlock_lower_irql(hw_rwlock_t *lock, hw_irql_t irq);
void ke_writer_lock(hw_rwlock_t *lock);
void ke_writer_unlock(hw_rwlock_t *lock);
hw_irql_t ke_writer_lock_raise_irql(hw_rwlock_t *lock, hw_irql_t irq);
void ke_writer_unlock_lower_irql(hw_rwlock_t *lock, hw_irql_t irq);
#endif

View File

@ -3,10 +3,10 @@
#include "g_abi.h"
#include "g_type.h"
extern int32_t KAPI ke_interlocked_exchange(int32_t *target, int32_t val);
extern int32_t KAPI ke_interlocked_exchange_32(int32_t *target, int32_t val);
extern int32_t KAPI ke_interlocked_increment(int32_t *target, int32_t val);
extern int32_t KAPI ke_interlocked_increment_32(int32_t *target, int32_t increment);
extern int32_t KAPI ke_interlocked_compare_exchange(int32_t *target, int32_t compare, int32_t val);
extern int32_t KAPI ke_interlocked_compare_exchange_32(int32_t *target, int32_t compare, int32_t val);
#endif

View File

@ -13,7 +13,7 @@ typedef struct
uint64_t krnl_start;
uint64_t krnl_end;
k_hal_intr_info_t intr_info;
k_linked_list_t pmm_info;
k_pmm_info_t* pmm_info;
char cpu_vd_str[13];
} k_hal_boot_info_t;

View File

@ -23,10 +23,10 @@ typedef enum
} k_exc_type_t;
// IRQL APIs
typedef uint64_t k_irql_t;
#define K_IRQL_DISABLED_LEVEL 3
#define K_IRQL_DPC_LEVEL 2
#define K_IRQL_APC_LEVEL 1
typedef uint32_t k_irql_t;
#define K_IRQL_DISABLED_LEVEL 15
#define K_IRQL_DPC_LEVEL 4
#define K_IRQL_APC_LEVEL 2
#define K_IRQL_PASSIVE_LEVEL 0
extern k_irql_t KAPI ke_set_irql(k_irql_t irql);
@ -37,6 +37,10 @@ extern void KAPI ke_halt_cpu();
extern void KAPI ke_set_timer_timeout(uint64_t timeout);
extern int32_t KAPI ke_get_core_id();
extern int32_t KAPI ke_issue_interrupt(int32_t core_id, uint32_t vector);
// Interrupt handler registration
// context is a parameter passed by the kernel. HAL must pass back.
// intr_stack is a parameter passed by the HAL. Used by some HAL interrupt context functions.

View File

@ -24,10 +24,16 @@ typedef k_physical_addr_t (KAPI *k_physical_page_alloc)();
typedef void (KAPI *k_physical_page_free)(k_physical_addr_t page);
#define K_PAGE_ATTR_KERNEL (1 << 2)
#define K_PAGE_ATTR_CACHED (1 << 3)
#define K_PAGE_ATTR_NOT_EXECUTABLE (1 << 63)
#define K_PAGE_ATTR_WRITABLE (1 << 1)
// this function should map the v_addr to p_addr for the target address space
extern void KAPI ke_map_virtual_addr(k_physical_addr_t addr_space,
k_virtual_addr_t v_addr,
k_physical_addr_t p_addr,
uint64_t attribute,
k_physical_page_alloc alloc);
typedef struct

View File

@ -79,18 +79,29 @@ mov cr8,rdi
ret
; ============================
; uint64_t KAPI hal_interlocked_exchange(uint64_t* dst, uint64_t val);
global hal_interlocked_exchange
hal_interlocked_exchange:
lock xchg qword [rdi], rsi
mov rax, rsi
; int32_t KAPI hal_interlocked_exchange_32(int32_t *target, int32_t val)
global hal_interlocked_exchange_32
hal_interlocked_exchange_32:
lock xchg dword [rdi], esi
xor rax, rax
mov eax, esi
ret
; ============================
; uint64_t KAPI hal_interlocked_increment(uint64_t* dst);
global hal_interlocked_increment
; int32_t KAPI hal_interlocked_compare_exchange_32(int32_t *dst, int32_t compare, int32_t val);
global hal_interlocked_compare_exchange_32
hal_interlocked_exchange:
mov eax, esi; eax = compare
lock cmpxchg dword [rdi], edx ; edx = val, rdi = ptr to dst
ret
; ============================
; int32_t KAPI hal_interlocked_increment_32(int32_t *target, int32_t increment);
global hal_interlocked_increment_32
hal_interlocked_increment:
lock inc qword [rdi]
lock xadd dword [rdi], esi ; [rdi] = [rdi] + esi, esi = old [rdi]
xor rax, rax
mov eax, esi
ret
; ============================

View File

@ -3,7 +3,6 @@
* See COPYING under root for details
*/
#include "k_bit_ops.h"
#include "hal_print.h"
#include "hal_mem.h"
#include "hal_intr.h"

View File

@ -1,30 +1,43 @@
#include <hal_intr.h>
#include "hal_arch.h"
#include "s_atomic.h"
#include "s_boot.h"
#include "s_context.h"
#include "s_intr.h"
void KAPI ke_interlocked_increment(uint64_t *target)
int32_t KAPI ke_interlocked_increment_32(int32_t *target, int32_t increment)
{
return hal_interlocked_increment(target);
return hal_interlocked_increment_32(target, increment);
}
uint64_t KAPI ke_interlocked_exchange(uint64_t *target, uint64_t val)
int32_t KAPI ke_interlocked_compare_exchange_32(int32_t *target, int32_t compare, int32_t val)
{
return hal_interlocked_exchange(target, val);
return hal_interlocked_compare_exchange_32(target, compare, val);
}
int32_t KAPI ke_interlocked_exchange_32(int32_t *target, int32_t val)
{
return hal_interlocked_exchange_32(target, val);
}
k_irql_t KAPI ke_set_irql(k_irql_t irql)
{
return 0;
k_irql_t old_irql = (k_irql_t)hal_read_cr8();
hal_write_cr8((k_irql_t)irql);
return old_irql;
}
k_irql_t KAPI ke_get_irql()
{
return 0;
return (k_irql_t)hal_read_cr8();
}
void KAPI ke_halt_cpu()
{
hal_halt_cpu();
}
}
int32_t KAPI ke_get_core_id()
{
return hal_get_core_id();
}

View File

@ -19,9 +19,11 @@ typedef struct
} STRUCT_PACKED hal_idt_ptr_t;
extern void KAPI hal_interlocked_increment(uint64_t* dst);
extern int32_t KAPI hal_interlocked_increment_32(int32_t *target, int32_t increment);
extern uint64_t KAPI hal_interlocked_exchange(uint64_t *dst, uint64_t val);
extern int32_t KAPI hal_interlocked_compare_exchange_32(int32_t *dst, int32_t compare, int32_t val);
extern int32_t KAPI hal_interlocked_exchange_32(int32_t *target, int32_t val);
extern void KAPI hal_cpuid(uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx);

View File

@ -3,9 +3,9 @@
#include "g_abi.h"
#include "g_type.h"
void KAPI k_alloc_init();
void KAPI ke_alloc_init();
void* KAPI k_alloc(uint32_t size);
void* KAPI ke_alloc(uint32_t size);
void KAPI ke_free(void *ptr);

View File

@ -0,0 +1,4 @@
#ifndef _K_BOOT_H_
#define _K_BOOT_H_
#include "s_boot.h"
#endif

View File

@ -8,6 +8,6 @@
#define K_BUG_CHECK_IRQL_MISMATCH 0
#define K_BUG_CHECK_PMM_UNALIGNED 1
void KAPI k_bug_check(uint64_t reason);
void KAPI ke_bug_check(uint64_t reason);
#endif

View File

@ -15,7 +15,7 @@
// uint32_t attr;
//} k_physical_page_attr_t;
k_status_t KAPI ke_pmm_init(k_pmm_info_t *info);
k_status_t KAPI sx_pmm_init(k_pmm_info_t *info);
k_status_t KAPI k_alloc_page(k_physical_addr_t *out);

View File

@ -0,0 +1,34 @@
#ifndef _K_RWLOCK_H_
#define _K_RWLOCK_H_
#include "k_spin_lock.h"
#include "g_type.h"
typedef struct
{
k_spin_lock_t w_mutex;
k_spin_lock_t r_mutex;
k_spin_lock_t res_lock;
k_spin_lock_t r_try;
uint32_t reader_ct;
uint32_t writer_ct;
} k_rwwlock_t;
void ke_rwwlock_init(k_rwwlock_t *lock);
void ke_rwwlock_reader_lock(k_rwwlock_t *lock);
void ke_rwwlock_reader_unlock(k_rwwlock_t *lock);
void ke_rwwlock_writer_lock(k_rwwlock_t *lock);
void ke_rwwlock_writer_unlock(k_rwwlock_t *lock);
k_irql_t ke_rwwlock_reader_lock_raise_irql(k_rwwlock_t *lock, k_irql_t irql);
void ke_rwwlock_reader_unlock_lower_irql(k_rwwlock_t *lock, k_irql_t irql);
k_irql_t ke_rwwlock_writer_lock_raise_irql(k_rwwlock_t *lock, k_irql_t irql);
void ke_rwwlock_writer_unlock_lower_irql(k_rwwlock_t *lock, k_irql_t irql);
#endif

View File

@ -26,6 +26,7 @@
#define FACILITY_GENERIC 0
#define FACILITY_REF 1
#define FACILITY_PMM 2
typedef enum _k_status_t
{
@ -41,7 +42,13 @@ typedef enum _k_status_t
REF_STATUS_DUPLICATED_HANDLE = SX_MAKE_STATUS(SEVERITY_ERROR, FACILITY_REF, 1),
PMM_STATUS_INVALID_ARGUMENTS = SX_MAKE_STATUS(SEVERITY_ERROR, FACILITY_PMM, 1),
PMM_STATUS_ALLOCATION_FAILED = SX_MAKE_STATUS(SEVERITY_ERROR, FACILITY_PMM, 3),
PMM_STATUS_UNINITIALIZED = SX_MAKE_STATUS(SEVERITY_ERROR, FACILITY_PMM, 4),
PMM_STATUS_NOT_ENOUGH_PAGE = SX_MAKE_STATUS(SEVERITY_ERROR, FACILITY_PMM, 5),
} k_status_t;
#endif

View File

@ -33,4 +33,4 @@ int64_t KAPI k_query_virtual_address(k_vmm_descriptor_t* desc, k_virtual_addr_t
int64_t KAPI k_free_virtual_address(k_vmm_descriptor_t *desc, k_virtual_addr_t base);
#endif
#endif

View File

@ -8,7 +8,7 @@
static _Bool _k_alloc_initialized;
static uint8_t _k_alloc_heap[K_KERNEL_HEAP_SIZE];
void KAPI k_alloc_init()
void KAPI ke_alloc_init()
{
if (!_k_alloc_initialized)
{
@ -17,7 +17,7 @@ void KAPI k_alloc_init()
}
}
void *KAPI k_alloc(uint32_t size)
void *KAPI ke_alloc(uint32_t size)
{
return _k_alloc_initialized ? ke_salloc(_k_alloc_heap, size) : NULL;
}

View File

@ -3,7 +3,8 @@
* See COPYING under root for details
*/
#include "s_boot.h"
#include "k_pmm.h"
#include "k_boot.h"
#include "k_alloc.h"
#include "k_intr.h"
#include "k_lib_test.h"
@ -21,7 +22,8 @@ void KAPI ke_main(k_hal_boot_info_t *boot_info)
}
// init kernel heap
k_alloc_init();
sx_pmm_init(boot_info->pmm_info);
ke_alloc_init();
hal_printf("KERNEL: Base Addr is 0x%X. Size is %uB, %uKB.\n",
boot_info->krnl_start,

View File

@ -1,8 +1,10 @@
#include "k_print.h"
#include "g_abi.h"
#include "g_type.h"
#include "k_bug_check.h"
void KAPI k_bug_check(uint64_t reason)
void KAPI ke_bug_check(uint64_t reason)
{
ke_printf("BugCheck: Reason - %ul\n", reason);
ke_halt_cpu();
}

View File

@ -1,7 +1,7 @@
#include <k_spin_lock.h>
#include <k_status.h>
#include <k_assert.h>
#include "k_rwwlock.h"
#include "k_status.h"
#include "k_alloc.h"
#include "k_bug_check.h"
#include "k_pmm.h"
typedef struct
@ -9,12 +9,12 @@ typedef struct
k_linked_list_node_t free_list_node;
k_avl_tree_node_t avl_tree_node;
k_physical_addr_t base;
//k_physical_page_attr_t attr;
int32_t attr;
} k_physical_page_descriptor_t;
static k_avl_tree_t active_tree;
static k_linked_list_t free_list;
static k_spin_lock_t lock;
static k_rwwlock_t lock;
static _Bool initialized;
/*
@ -24,7 +24,7 @@ static _Bool initialized;
* = 0 if tree_node == your_node
* > 0 if tree_node > your_node
*/
static int32_t base_addr_compare(k_avl_tree_node_t *tree_node, k_avl_tree_node_t *my_node)
static int32_t base_addr_compare(void *tree_node, void *my_node)
{
k_physical_addr_t tree_base = OBTAIN_STRUCT_ADDR(tree_node,
k_physical_page_descriptor_t,
@ -40,24 +40,27 @@ static int32_t base_addr_compare(k_avl_tree_node_t *tree_node, k_avl_tree_node_t
return 0;
}
k_status_t KAPI ke_pmm_init(k_pmm_info_t *info)
k_status_t KAPI sx_pmm_init(k_pmm_info_t *info)
{
if (info == NULL || desc == NULL || desc->initialized)
if (info == NULL)
{
return PMM_STATUS_INVALID_ARGUMENTS;
}
ke_linked_list_init(&desc->free_list);
ke_avl_tree_init(&desc->active_tree, base_addr_compare);
if (initialized)
{
return STATUS_SUCCESS;
}
ke_rwwlock_init(&lock);
ke_linked_list_init(&free_list);
ke_avl_tree_init(&active_tree, base_addr_compare);
for (uint32_t i = 0; i < info->num_of_nodes; i++)
{
k_pmm_node_t *each_node = &info->nodes[i];
if (each_node->base % K_PAGE_SIZE != 0)
{
// if not aligned, bug check
return PMM_STATUS_INIT_UNALIGNED;
}
ke_assert (each_node->base % K_PAGE_SIZE != 0);
for (uint64_t j = 0; j <= each_node->size; j++)
{
@ -65,19 +68,19 @@ k_status_t KAPI ke_pmm_init(k_pmm_info_t *info)
// however it's fine as long as we don't touch linked list just yet
// it will use the pages that are already on file to enlarge the kernel heap
// don't worry, be happy :)
k_physical_page_descriptor_t *page_info = k_alloc(sizeof(k_physical_page_descriptor_t));
k_physical_page_descriptor_t *page_info = ke_alloc(sizeof(k_physical_page_descriptor_t));
if (page_info == NULL)
{
return PMM_STATUS_CANNOT_ALLOC_NODE;
return PMM_STATUS_ALLOCATION_FAILED;
}
page_info->base = each_node->base;
ke_linked_list_push_back(&desc->free_list, &page_info->free_list_node);
ke_linked_list_push_back(&free_list, &page_info->free_list_node);
}
}
desc->initialized = true;
return PMM_STATUS_SUCCESS;
initialized = true;
return STATUS_SUCCESS;
}
// free lists can only be updated at IRQL == DISABLED
@ -85,89 +88,101 @@ k_status_t KAPI ke_pmm_init(k_pmm_info_t *info)
// potential callers of these, since timer/interrupts queue DPC, which might trigger
// page fault (kernel heap), therefore, it must set IRQL to DISABLED
k_status_t KAPI ke_alloc_page(k_pmm_descriptor_t *desc, k_physical_addr_t *out)
k_status_t KAPI ke_alloc_page(k_physical_addr_t *out)
{
if (desc == NULL || !desc->initialized)
return PMM_STATUS_INVALID_ARGUMENTS;
if (!initialized)
{
return PMM_STATUS_UNINITIALIZED;
}
k_irql_t irql = ke_spin_lock_raise_irql(&desc->lock, K_IRQL_DISABLED_LEVEL);
int32_t result = PMM_STATUS_SUCCESS;
if (out == NULL)
{
return PMM_STATUS_INVALID_ARGUMENTS;
}
k_irql_t irql = ke_rwwlock_writer_lock_raise_irql(&lock, K_IRQL_DISABLED_LEVEL);
k_status_t result = STATUS_SUCCESS;
k_linked_list_node_t *node = NULL;
k_physical_page_descriptor_t *page_info = NULL;
node = ke_linked_list_pop_front(&desc->free_list);
node = ke_linked_list_pop_front(&free_list);
if (node != NULL)
{
page_info = OBTAIN_STRUCT_ADDR(node,
k_physical_page_descriptor_t,
free_list_node);
ke_avl_tree_insert(&desc->active_tree, &page_info->avl_tree_node);
ke_avl_tree_insert(&active_tree, &page_info->avl_tree_node);
*out = page_info->base;
} else
{
result = PMM_STATUS_NOT_ENOUGH_PAGE;
}
ke_spin_unlock_lower_irql(&desc->lock, irql);
ke_rwwlock_writer_unlock_lower_irql(&lock, irql);
return result;
}
k_status_t KAPI ke_query_page_attr(k_physical_addr_t base,
int32_t *out)
{
if (!initialized)
{
return PMM_STATUS_UNINITIALIZED;
}
if (out == NULL)
{
return PMM_STATUS_INVALID_ARGUMENTS;
}
k_irql_t irql = ke_rwwlock_reader_lock_raise_irql(&lock, K_IRQL_DISABLED_LEVEL);
k_status_t result = STATUS_SUCCESS;
k_avl_tree_node_t *node = NULL;
// search for dummy
k_physical_page_descriptor_t dummy, *page_info = NULL;
dummy.base = base;
node = ke_avl_tree_delete(&active_tree, &dummy.avl_tree_node);
if (node != NULL)
{
page_info = OBTAIN_STRUCT_ADDR(node, k_physical_page_descriptor_t, avl_tree_node);
*out = page_info->attr;
} else
{
result = PMM_STATUS_INVALID_ARGUMENTS;
}
ke_rwwlock_reader_unlock_lower_irql(&lock, irql);
return result;
}
//int32_t KAPI k_query_page(k_pmm_descriptor_t *desc,
// k_physical_addr_t base,
// k_physical_page_attr_t *out)
//{
//
// if (desc == NULL || !desc->initialized)
// return PMM_STATUS_INVALID_ARGUMENTS;
//
// k_irql_t irql = k_spin_lock_irql_set(&desc->lock, K_IRQL_DISABLED_LEVEL);
// int32_t result = PMM_STATUS_SUCCESS;
// avl_tree_node_t *node = NULL;
// // search for dummy
// k_physical_page_descriptor_t dummy, *page_info = NULL;
// dummy.base = base;
//
// node = avl_tree_delete(&desc->pages_tree, &dummy.avl_tree_node);
//
// if (node != NULL)
// {
// page_info = OBTAIN_STRUCT_ADDR(node, k_physical_page_descriptor_t, avl_tree_node);
// *out = page_info->attr;
// }
// else
// {
// result = PMM_STATUS_PAGE_NOT_FOUND;
// }
//
// k_spin_unlock_irql_restore(&desc->lock, irql);
//
// return result;
//}
k_status_t KAPI ke_free_page(k_pmm_descriptor_t *desc, k_physical_addr_t base)
k_status_t KAPI ke_free_page(k_physical_addr_t base)
{
if (desc == NULL || !desc->initialized)
return PMM_STATUS_INVALID_ARGUMENTS;
if (!initialized)
{
return PMM_STATUS_UNINITIALIZED;
}
// just lock since not sharing with anyone
k_irql_t irql = ke_spin_lock_raise_irql(&desc->lock, K_IRQL_DISABLED_LEVEL);
int32_t result = PMM_STATUS_SUCCESS;
k_irql_t irql = ke_rwwlock_writer_lock_raise_irql(&lock, K_IRQL_DISABLED_LEVEL);
k_status_t result = STATUS_SUCCESS;
k_avl_tree_node_t *node = NULL;
// search for dummy
k_physical_page_descriptor_t dummy, *page_info;
dummy.base = base;
node = ke_avl_tree_delete(&desc->active_tree, &dummy.avl_tree_node);
node = ke_avl_tree_delete(&active_tree, &dummy.avl_tree_node);
if (node != NULL)
{
page_info = OBTAIN_STRUCT_ADDR(node, k_physical_page_descriptor_t, avl_tree_node);
ke_linked_list_push_back(&desc->free_list, &page_info->free_list_node);
ke_linked_list_push_back(&free_list, &page_info->free_list_node);
} else
{
result = PMM_STATUS_PAGE_NOT_FOUND;
result = PMM_STATUS_INVALID_ARGUMENTS;
}
ke_spin_unlock_lower_irql(&desc->lock, irql);
ke_rwwlock_writer_unlock_lower_irql(&lock, irql);
return result;
}

View File

@ -83,7 +83,7 @@ k_status_t KAPI ke_reference_obj(k_ref_node_t *ref_node)
if (ref_node == NULL)
return REF_STATUS_INVALID_ARGUMENTS;
int32_t old_ref_count = ke_interlocked_increment(&ref_node->ref_count, 1);
int32_t old_ref_count = ke_interlocked_increment_32(&ref_node->ref_count, 1);
ke_assert(old_ref_count >= 1);
@ -99,7 +99,7 @@ k_status_t KAPI ke_dereference_obj(k_ref_node_t *ref_node)
k_status_t result = STATUS_SUCCESS;
int32_t old_ref_count = ke_interlocked_increment(&ref_node->ref_count, -1);
int32_t old_ref_count = ke_interlocked_increment_32(&ref_node->ref_count, -1);
ke_assert(old_ref_count >= 1);
@ -174,7 +174,7 @@ static k_status_t KAPI ke_create_handle(k_ref_node_t *ref,
if (SX_SUCCESS(result))
{
// TODO: CHECK OVERFLOW
node->handle = (k_handle_t) ke_interlocked_increment(&_handle_base, 1);
node->handle = (k_handle_t) ke_interlocked_increment_32(&_handle_base, 1);
node->ref = ref;
irql = ke_spin_lock_raise_irql(&_handle_tree_lock, K_IRQL_DPC_LEVEL);
k_handle_node_t *existing_node = search_handle_node(node->handle);
@ -254,7 +254,7 @@ k_status_t KAPI sx_create_handle(k_ref_node_t *ref, k_handle_t *out)
return REF_STATUS_UNINITIALIZED;
k_handle_node_t *node;
node = (k_handle_node_t *) k_alloc(sizeof(k_handle_node_t));
node = (k_handle_node_t *) ke_alloc(sizeof(k_handle_node_t));
if (node == NULL)
{
return REF_STATUS_ALLOCATION_FAILED;

View File

@ -1,9 +1,8 @@
#include "bifrost_rwlock.h"
#include "bifrost_intr.h"
#include "k_rwwlock.h"
void ke_rwlock_init(hw_rwlock_t *lock)
void ke_rwwlock_init(k_rwwlock_t *lock)
{
if(lock != NULL)
if (lock != NULL)
{
ke_spin_lock_init(&lock->w_mutex);
ke_spin_lock_init(&lock->r_mutex);
@ -15,14 +14,14 @@ void ke_rwlock_init(hw_rwlock_t *lock)
return;
}
void ke_reader_lock(hw_rwlock_t *lock)
void ke_rwwlock_reader_lock(k_rwwlock_t *lock)
{
if(lock != NULL)
if (lock != NULL)
{
ke_spin_lock(&lock->r_try);
ke_spin_lock(&lock->r_mutex);
lock->reader_ct++;
if(lock->reader_ct == 1)
if (lock->reader_ct == 1)
{
ke_spin_lock(&lock->res_lock);
}
@ -32,13 +31,13 @@ void ke_reader_lock(hw_rwlock_t *lock)
return;
}
void ke_reader_unlock(hw_rwlock_t *lock)
void ke_rwwlock_reader_unlock(k_rwwlock_t *lock)
{
if(lock != NULL)
if (lock != NULL)
{
ke_spin_lock(&lock->r_mutex);
lock->reader_ct--;
if(lock->reader_ct == 0)
if (lock->reader_ct == 0)
{
ke_spin_unlock(&lock->res_lock);
}
@ -47,31 +46,11 @@ void ke_reader_unlock(hw_rwlock_t *lock)
return;
}
hw_irql_t ke_reader_lock_raise_irql(hw_rwlock_t *lock, hw_irql_t irql)
{
hw_irql_t msk = ke_raise_irql(irql);
if(lock != NULL)
{
ke_reader_lock(lock);
}
return msk;
}
void ke_reader_unlock_lower_irql(hw_rwlock_t *lock, hw_irql_t irq)
{
if(lock != NULL)
{
ke_reader_unlock(lock);
}
ke_lower_irql(irq);
return;
}
void ke_writer_lock(hw_rwlock_t *lock)
void ke_rwwlock_writer_lock(k_rwwlock_t *lock)
{
ke_spin_lock(&lock->w_mutex);
lock->writer_ct++;
if(lock->writer_ct == 1)
if (lock->writer_ct == 1)
{
ke_spin_lock(&lock->r_try);
}
@ -79,34 +58,42 @@ void ke_writer_lock(hw_rwlock_t *lock)
ke_spin_lock(&lock->res_lock);
}
void ke_writer_unlock(hw_rwlock_t *lock)
void ke_rwwlock_writer_unlock(k_rwwlock_t *lock)
{
ke_spin_unlock(&lock->res_lock);
ke_spin_lock(&lock->w_mutex);
lock->writer_ct--;
if(lock->writer_ct == 0)
if (lock->writer_ct == 0)
{
ke_spin_unlock(&lock->r_try);
}
ke_spin_unlock(&lock->w_mutex);
}
hw_irql_t ke_writer_lock_raise_irql(hw_rwlock_t *lock, hw_irql_t irql)
k_irql_t ke_rwwlock_reader_lock_raise_irql(k_rwwlock_t *lock, k_irql_t irql)
{
hw_irql_t msk = ke_raise_irql(irql);
if(lock != NULL)
{
ke_reader_lock(lock);
}
return msk;
k_irql_t old_irql = ke_raise_irql(irql);
ke_rwwlock_reader_lock(lock);
return old_irql;
}
void ke_writer_unlock_lower_irql(hw_rwlock_t *lock, hw_irql_t irq)
void ke_rwwlock_reader_unlock_lower_irql(k_rwwlock_t *lock, k_irql_t irql)
{
if(lock != NULL)
{
ke_reader_unlock(lock);
}
ke_lower_irql(irq);
ke_rwwlock_reader_unlock(lock);
ke_lower_irql(irql);
return;
}
k_irql_t ke_rwwlock_writer_lock_raise_irql(k_rwwlock_t *lock, k_irql_t irql)
{
k_irql_t old_irql = ke_raise_irql(irql);
ke_rwwlock_writer_lock(lock);
return old_irql;
}
void ke_rwwlock_writer_unlock_lower_irql(k_rwwlock_t *lock, k_irql_t irql)
{
ke_rwwlock_writer_unlock(lock);
ke_lower_irql(irql);
return;
}

View File

@ -5,7 +5,7 @@
#include "g_abi.h"
#include "g_type.h"
#include "k_bit_ops.h"
#include "k_stdlib.h"
typedef union
{

View File

@ -14,7 +14,7 @@ void KAPI ke_spin_lock(k_spin_lock_t *lock)
{
if (lock != NULL)
{
while (ke_interlocked_compare_exchange(&lock->val, 0, 1) != 0);
while (ke_interlocked_compare_exchange_32(&lock->val, 0, 1) != 0);
}
return;
}

View File

@ -16,7 +16,7 @@ typedef struct
* = 0 if tree_node == your_node
* > 0 if tree_node > your_node
*/
static int32_t _avl_compare(k_avl_tree_node_t *tree_node, k_avl_tree_node_t *my_node)
static int32_t base_addr_compare(void *tree_node, void *my_node)
{
k_virtual_addr_descriptor_t *that = OBTAIN_STRUCT_ADDR(tree_node,
k_virtual_addr_descriptor_t,
@ -54,7 +54,7 @@ int32_t KAPI k_alloc_virtual_address(k_vmm_descriptor_t *desc,
{
return VMM_STATUS_INVALID_ARGUMENTS;
}
k_virtual_addr_descriptor_t* node = k_alloc(sizeof(k_virtual_addr_descriptor_t));
k_virtual_addr_descriptor_t* node = ke_alloc(sizeof(k_virtual_addr_descriptor_t));
if(node == NULL)
{