This commit is contained in:
secXsQuared 2016-08-26 22:13:54 -07:00
parent 4032174150
commit 5a3d467dbd
254 changed files with 52535 additions and 347 deletions

View File

@ -0,0 +1,64 @@
#include "bifrost.h"
static int count = 0;
void timer_proc(void *kp, void *up)
{
hw_printf("SYS_TICK: %d . COUNT: %d.\n", ke_get_system_tick(), ke_interlocked_increment(&count, 1));
}
hw_handle_t event;
void driver2(void* par)
{
hw_printf("Sleeping...Thread2\n");
hw_thread_sleep(5);
hw_printf("Signaling event..\n");
hw_event_signal(event);
hw_printf("Sleeping2...Thread2\n");
hw_thread_sleep(5);
hw_printf("Exiting..\n");
hw_thread_exit(10);
}
void driver(void *par)
{
hw_handle_t timer;
hw_timer_create(&timer, TIMER_TYPE_AUTO_RESET);
hw_timer_set(timer, 2, true);
while (count < 20)
{
timer_proc(NULL, NULL);
hw_timer_wait(timer);
}
hw_timer_cancel(timer);
hw_close_handle(timer);
hw_printf("Sleeping for some cycles..\n");
hw_thread_sleep(10);
hw_printf("Hmmm... sound sleep...\n");
if(ke_get_current_core() == 1)
{
hw_printf("Core1 finished...\n");
}
else
{
hw_event_create(&event, EVENT_TYPE_MANUAL);
hw_handle_t thread2_handle;
hw_thread_create(driver2, NULL, PRIORITY_DEFAULT, THREAD_DEFAULT_STACK_SIZE, &thread2_handle);
hw_thread_start(thread2_handle);
hw_printf("Waiting for event...\n");
hw_event_wait(event);
hw_printf("Waiting for driver2 exit...\n");
hw_wait_for_thread_exit(thread2_handle);
int32_t exit;
hw_thread_get_exit_code(thread2_handle, &exit);
hw_printf("Thread2 exited with %d\n", exit);
hw_close_handle(thread2_handle);
hw_close_handle(event);
}
hw_thread_exit(0);
}

View File

@ -0,0 +1,277 @@
#ifndef _BIFROST_H
#define _BIFROST_H
// =======================
// BIFROST User API header
// =======================
// types
#include <stdint.h>
#include <stddef.h>
#include <stdarg.h>
#include <stdbool.h>
typedef void (*hw_callback_func_t)(void *kernel_args, void *user_args);
typedef uint32_t hw_handle_t;
#define TRUE (true)
#define FALSE (false)
// =======================
// Status Codes
// =======================
#define HW_RESULT_SEVERITY_SUCCESS 0
#define HW_RESULT_SEVERITY_ERROR 1
#define HW_SUCCESS(hr) (((uint16_t)(hr)) >> 15 == 0)
#define HW_RESULT_CODE(hr) ((hr) & 0x7F)
#define HW_RESULT_FACILITY(hr) (((hr) & 0x7F80) >> 7)
#define HW_RESULT_SEVERITY(hr) (((hr) >> 15) & 0x1)
#define MAKE_HW_RESULT(sev, fac, code) \
(((uint16_t)(sev)<<15) | ((uint16_t)(fac)<<7) | ((uint16_t)(code)))
#define HW_RESULT_FACILITY_THREAD 6
#define HW_RESULT_FACILITY_DPC 1
#define HW_RESULT_FACILITY_SEM 2
#define HW_RESULT_FACILITY_REF 3
#define HW_RESULT_FACILITY_APC 4
#define HW_RESULT_FACILITY_EVENT 4
#define HW_RESULT_FACILITY_TIMER 5
#define HW_RESULT_NO_FACILITY 0
typedef enum
{
STATUS_SUCCESS = MAKE_HW_RESULT(HW_RESULT_SEVERITY_SUCCESS,
HW_RESULT_NO_FACILITY, 0),
THREAD_STATUS_INVALID_ARGUMENT = MAKE_HW_RESULT(HW_RESULT_SEVERITY_ERROR,
HW_RESULT_FACILITY_THREAD, 1),
THREAD_STATUS_INVALID_STATE = MAKE_HW_RESULT(HW_RESULT_SEVERITY_ERROR,
HW_RESULT_FACILITY_THREAD, 2),
THREAD_STATUS_UNINITIALIZED = MAKE_HW_RESULT(HW_RESULT_SEVERITY_ERROR,
HW_RESULT_FACILITY_THREAD, 3),
THREAD_STATUS_OUT_OF_MEMORY = MAKE_HW_RESULT(HW_RESULT_SEVERITY_ERROR,
HW_RESULT_FACILITY_THREAD, 4),
THREAD_STATUS_ID_OVERFLOW = MAKE_HW_RESULT(HW_RESULT_SEVERITY_ERROR,
HW_RESULT_FACILITY_THREAD, 5),
DPC_STATUS_NOT_ENOUGH_MEM = MAKE_HW_RESULT(HW_RESULT_SEVERITY_ERROR,
HW_RESULT_FACILITY_DPC, 1),
DPC_STATUS_INVALID_ARGUMENTS = MAKE_HW_RESULT(HW_RESULT_SEVERITY_ERROR,
HW_RESULT_FACILITY_DPC, 2),
DPC_STATUS_NOT_INITIALIZED = MAKE_HW_RESULT(HW_RESULT_SEVERITY_ERROR,
HW_RESULT_FACILITY_DPC, 3),
SEM_STATUS_CANNOT_ALLOCATE_MEM = MAKE_HW_RESULT(HW_RESULT_SEVERITY_ERROR,
HW_RESULT_FACILITY_SEM, 1),
SEM_STATUS_OCCUPIED = MAKE_HW_RESULT(HW_RESULT_SEVERITY_ERROR,
HW_RESULT_FACILITY_SEM, 2),
SEM_STATUS_INVALID_ARGUMENTS = MAKE_HW_RESULT(HW_RESULT_SEVERITY_ERROR,
HW_RESULT_FACILITY_SEM, 3),
SEM_STATUS_INVALID_CONTEXT = MAKE_HW_RESULT(HW_RESULT_SEVERITY_ERROR,
HW_RESULT_FACILITY_SEM, 4),
REF_STATUS_CANNOT_ALLOCATE_MEM = MAKE_HW_RESULT(HW_RESULT_SEVERITY_ERROR,
HW_RESULT_FACILITY_REF, 1),
REF_STATUS_HANDLE_NOT_FOUND = MAKE_HW_RESULT(HW_RESULT_SEVERITY_ERROR,
HW_RESULT_FACILITY_REF, 2),
REF_STATUS_INVALID_ARGUMENTS = MAKE_HW_RESULT(HW_RESULT_SEVERITY_ERROR,
HW_RESULT_FACILITY_REF, 3),
REF_STATUS_HANDLE_DUPLICATE = MAKE_HW_RESULT(HW_RESULT_SEVERITY_ERROR,
HW_RESULT_FACILITY_REF, 4),
REF_STATUS_UNINITIALIZED = MAKE_HW_RESULT(HW_RESULT_SEVERITY_ERROR,
HW_RESULT_FACILITY_REF, 5),
REF_STATUS_REF_FREED = MAKE_HW_RESULT(HW_RESULT_SEVERITY_ERROR,
HW_RESULT_FACILITY_REF, 6),
REF_STATUS_NO_EFFECT = MAKE_HW_RESULT(HW_RESULT_SEVERITY_SUCCESS,
HW_RESULT_FACILITY_REF, 7),
APC_STATUS_CANNOT_ALLOCATE_MEM = MAKE_HW_RESULT(HW_RESULT_SEVERITY_ERROR,
HW_RESULT_FACILITY_APC, 1),
APC_STATUS_INVALID_ARGUMENTS = MAKE_HW_RESULT(HW_RESULT_SEVERITY_ERROR,
HW_RESULT_FACILITY_APC, 2),
APC_STATUS_NOT_INITIALIZED = MAKE_HW_RESULT(HW_RESULT_SEVERITY_ERROR,
HW_RESULT_FACILITY_APC, 3),
EVENT_STATUS_CANNOT_ALLOCATE_MEM = MAKE_HW_RESULT(HW_RESULT_SEVERITY_ERROR,
HW_RESULT_FACILITY_EVENT, 1),
EVENT_STATUS_INVALID_ARGUMENTS = MAKE_HW_RESULT(HW_RESULT_SEVERITY_ERROR,
HW_RESULT_FACILITY_EVENT, 2),
TIMER_STATUS_SUCCESS = MAKE_HW_RESULT(HW_RESULT_SEVERITY_SUCCESS,
HW_RESULT_FACILITY_TIMER, 0),
TIMER_STATUS_CANNOT_ALLOCATE_MEM = MAKE_HW_RESULT(HW_RESULT_SEVERITY_ERROR,
HW_RESULT_FACILITY_TIMER, 1),
TIMER_STATUS_INVALID_ARGUMENTS = MAKE_HW_RESULT(HW_RESULT_SEVERITY_ERROR,
HW_RESULT_FACILITY_TIMER, 2),
TIMER_STATUS_NOT_INITIALIZED = MAKE_HW_RESULT(HW_RESULT_SEVERITY_ERROR,
HW_RESULT_FACILITY_TIMER, 3),
} hw_result_t;
// =======================
// Memory Allocation
// =======================
extern void *hw_alloc(size_t size);
extern void hw_free(void *ptr);
// =======================
// Events
// =======================
typedef enum
{
EVENT_TYPE_MANUAL,
EVENT_TYPE_AUTO
} hw_event_type_t;
extern hw_result_t hw_event_wait(hw_handle_t event);
extern hw_result_t hw_event_reset(hw_handle_t event);
extern hw_result_t hw_event_signal(hw_handle_t handle);
extern hw_result_t hw_event_create(hw_handle_t *out, hw_event_type_t event_type);
// =======================
// HAL
// =======================
#define HW_CACHELINE_SIZE (64)
extern uint32_t ke_get_system_tick();
extern void ke_flush_addr(void *addr, uint32_t num_of_cacheline);
extern uint32_t ke_get_current_core();
extern int32_t ke_interlocked_exchange(int32_t *addr, int32_t val);
extern int32_t ke_interlocked_compare_exchange(int32_t *addr, int32_t compare, int32_t val);
extern int32_t ke_interlocked_increment(int32_t *addr, int32_t val);
// =======================
// Print
// =======================
extern void hw_printf(const char *format, ...);
// =======================
// Handles
// =======================
extern hw_result_t hw_close_handle(hw_handle_t handle);
// =======================
// Semaphores
// =======================
extern hw_result_t hw_sem_create(hw_handle_t *out, int32_t count);
extern hw_result_t hw_sem_wait(hw_handle_t handle, int32_t quota);
extern hw_result_t hw_sem_signal(hw_handle_t handle, int32_t quota);
extern hw_result_t hw_sem_trywait(hw_handle_t handle, int32_t quota);
// =======================
// stdlib
// =======================
extern int32_t hw_memcmp(const void *ptr1, const void *ptr2, const size_t len);
extern void hw_memset(void *ptr, uint8_t value, size_t len);
// =======================
// threads
// =======================
#define THREAD_DEFAULT_STACK_SIZE 0x4000
#define THREAD_EXIT_CODE_TERMINATED 0xDEADDEAD
typedef enum
{
PRIORITY_HIGHEST = 0,
PRIORITY_HIGH,
PRIORITY_DEFAULT,
PRIORITY_LOW,
PRIORITY_LOWEST,
PRIORITY_LEVEL_NUM
} hw_thread_priority_t;
extern int32_t hw_current_thread_id();
extern hw_handle_t hw_current_thread();
extern hw_result_t hw_wait_for_thread_exit(hw_handle_t handle);
extern hw_result_t hw_thread_sleep(uint32_t millis);
extern void hw_thread_exit(int32_t exit_code);
extern hw_result_t hw_thread_create(void (*proc)(void *),
void *args,
hw_thread_priority_t priority,
uint32_t stack_size,
hw_handle_t *thread_handle);
extern hw_result_t hw_thread_start(hw_handle_t thread_handle);
extern hw_result_t hw_thread_terminate(hw_handle_t thread_handle);
extern hw_result_t hw_thread_get_exit_code(hw_handle_t thread_handle, int32_t *exit_code);
extern hw_result_t hw_thread_open(int32_t thread_id, hw_handle_t *out);
// =======================
// Timers
// =======================
typedef enum
{
TIMER_TYPE_MANUAL_RESET,
TIMER_TYPE_AUTO_RESET
} hw_timer_type_t;
extern hw_result_t hw_timer_create(hw_handle_t *out,
hw_timer_type_t type);
extern hw_result_t hw_timer_wait(hw_handle_t timer_handle);
extern hw_result_t hw_timer_set(hw_handle_t timer_handle, uint32_t tick, bool periodic);
extern hw_result_t hw_timer_cancel(hw_handle_t timer_handle);
#endif

View File

@ -0,0 +1,23 @@
#pragma once
#define OBTAIN_STRUCT_ADDR(member_addr, member_name, struct_name) ((struct_name*)((char*)(member_addr)-(char*)(&(((struct_name*)0)->member_name))))
#include <stdint.h>
#include <stddef.h>
#include <stdio.h>
#include <stdbool.h>
#include <bifrost_types.h>
typedef void HW_FUNC();
typedef void HW_RUN_FUNC(uint32_t iteration);
typedef struct {
HW_FUNC* pinit;
HW_RUN_FUNC* prunTest;
HW_FUNC* pcleanup;
const char* testPathName;
uint32_t testPathHash;
} HW_XLIST;
#define HW_XLIST_TERMINATION (0xDEADBEEFul)

View File

@ -0,0 +1,33 @@
/*-------------------------------------------------------
|
| atomic.c
|
| Atomic operations that use instructions specific to
| the x86 ISA
|
|--------------------------------------------------------
|
| Copyright ( C ) 2013 Microsoft Corp.
| All Rights Reserved
| Confidential and Proprietary
|
|--------------------------------------------------------
*/
#include "bifrost_private.h"
void hw_lock(hw_lock_t* lock, const bool yield)
{
const HW_TESTID id = hw_getMyInstanceID();
while (HW_TESTID(-1) != hw_storeConditional(&lock->owner, HW_TESTID(-1), id))
{
// TODO: yield?
}
}
void hw_unlock(hw_lock_t* lock, const bool yield)
{
hw_assert(hw_getMyInstanceID() == lock->owner);
const_cast<volatile HW_TESTID*>(&lock->owner)[0] = HW_TESTID(-1);
}

View File

@ -0,0 +1,84 @@
/*-------------------------------------------------------
|
| context.c
|
| Thread switching functions for 'x86' architecture.
|
|--------------------------------------------------------
|
| Copyright ( C ) 2013 Microsoft Corp.
| All Rights Reserved
| Confidential and Proprietary
|
|--------------------------------------------------------
*/
#include "bifrost_private.h"
EXTERN_C UINT32 ts_arch_startNextIteration_ASM(VOID* kernelRegs, VOID* testStackPtr, HW_RUN_FUNC* prunTest, UINT32 iteration);
EXTERN_C UINT32 ts_arch_runIdleThread_ASM(VOID* kernelRegs, VOID (*prunTest)());
EXTERN_C UINT32 ts_arch_enter_init_cleanup_ASM(VOID* kernelRegs, VOID* testStackPtr, HW_FUNC* prunTest);
EXTERN_C UINT32 ts_arch_resume_test_ASM(VOID* curTestRegs, VOID *targetTestRegs);
EXTERN_C VOID ts_arch_resume_kernel_ASM(VOID* testRegs, VOID *kernelRegs);
UINT32 ts_arch_enter_init_cleanup_test(HW_FUNC* prunTest, UINT32 testIdx)
{
// Get the test's stack pointer
HW_TS_TESTDATA* testData = (HW_TS_TESTDATA*)hw_readptr(&testSlaveVars->testData);
VOID *testStackPtr = (VOID*)hw_readptr(&testData[testIdx].testStackPtr);
// Transfer control
ts_arch_enter_init_cleanup_ASM(&testSlaveVars->kernelRegs, testStackPtr, prunTest);
return 0; // We never actually hit this return, so the 0 is kind of bogus. Return is done from the _ASM
}
UINT32 ts_arch_startNextIteration(HW_RUN_FUNC* prunTest, UINT32 iteration, UINT32 testIdx, VOID *curTestRegs)
{
// Get the test's stack pointer
HW_TS_TESTDATA* testData = (HW_TS_TESTDATA*)hw_readptr(&testSlaveVars->testData);
VOID *testStackPtr = (VOID*)hw_readptr(&testData[testIdx].testStackPtr);
//Enable preemptive tasking timer
if(hw_pTestConfigs->bfinit.PREEMPTION_ON)
{
arch_int_startPreemptionTimer();
}
hw_int_enable(1 << 0);
// Transfer control
hw_errmsg( "%s: Error: ts_arch_startNextIteration was run and is not yet implemented.\n", __func__ );
ts_arch_startNextIteration_ASM(curTestRegs, testStackPtr, prunTest, iteration);
return 0; // We never actually hit this return, so the 0 is kind of bogus. Return is done from the _ASM
}
UINT32 ts_arch_runIdleThread(VOID (*prunTest)(), VOID *curTestRegs)
{
// Transfer control
hw_errmsg( "%s: Error: ts_arch_runIdleThread was run and is not yet implemented.\n", __func__ );
ts_arch_runIdleThread_ASM(curTestRegs, prunTest);
return 0;
}
UINT32 ts_arch_resume_test(VOID* curTestRegs, VOID *targetTestRegs)
{
ts_arch_resume_test_ASM(targetTestRegs, curTestRegs);
return 0; // We never actually hit this return, so the 0 is kind of bogus. Return is done from the _ASM
}
VOID ts_arch_resume_kernel(VOID* testRegs, VOID *kernelRegs)
{
//Disable preemptive tasking timer
if(hw_pTestConfigs->bfinit.PREEMPTION_ON)
{
arch_int_stopPreemptionTimer();
}
// Transfer control to assembly handler
ts_arch_resume_kernel_ASM(testRegs, kernelRegs);
}

View File

@ -0,0 +1,119 @@
.text
.align 4
.global ts_arch_startNextIteration_ASM
.global ts_arch_enter_init_cleanup_ASM
.global ts_arch_resume_test_ASM
.global ts_arch_resume_kernel_ASM
.global ts_arch_runIdleThread_ASM
// Possibly needs to save off program state and processor status for
// context switches
// Inputs: RDI = Pointer to the register save space
.macro TS_ARCH_SAVE_REGS_ASM
mov %rax, 0x00(%rdi)
mov %rbx, 0x08(%rdi)
mov %rcx, 0x10(%rdi)
mov %rdx, 0x18(%rdi)
mov %rsp, 0x20(%rdi)
mov %rbp, 0x28(%rdi)
mov %rsi, 0x30(%rdi)
mov %rdi, 0x38(%rdi)
mov %r8, 0x40(%rdi)
mov %r9, 0x48(%rdi)
mov %r10, 0x50(%rdi)
mov %r11, 0x58(%rdi)
mov %r12, 0x60(%rdi)
mov %r13, 0x68(%rdi)
mov %r14, 0x70(%rdi)
mov %r15, 0x78(%rdi)
.endm
// Inputs: RDI = Pointer to the register save space
.macro TS_ARCH_RESTORE_REGS_ASM
mov 0x00(%rdi), %rax
mov 0x08(%rdi), %rbx
mov 0x10(%rdi), %rcx
mov 0x18(%rdi), %rdx
mov 0x20(%rdi), %rsp
mov 0x28(%rdi), %rbp
mov 0x30(%rdi), %rsi
mov 0x40(%rdi), %r8
mov 0x48(%rdi), %r9
mov 0x50(%rdi), %r10
mov 0x58(%rdi), %r11
mov 0x60(%rdi), %r12
mov 0x68(%rdi), %r13
mov 0x70(%rdi), %r14
mov 0x78(%rdi), %r15
mov 0x38(%rdi), %rdi
.endm
// Inputs: RDI = Pointer to the register save space of the current test/kernel
// RSI = Pointer to the register save space of the test/kernel to restore
ts_arch_resume_test_ASM:
TS_ARCH_SAVE_REGS_ASM
mov %rsi, %rdi
TS_ARCH_RESTORE_REGS_ASM
mov $0, %rax
ret
// Inputs: RDI = Pointer to the register save space of the current test
// RSI = Pointer to the register save space of the kernel to restore
ts_arch_resume_kernel_ASM:
TS_ARCH_SAVE_REGS_ASM
mov %rsi, %rdi
TS_ARCH_RESTORE_REGS_ASM
mov $0, %rax
ret
// Inputs: RDI = Pointer to the register save space of the current test
// RSI = Pointer to the ts_poll function
ts_arch_runIdleThread_ASM:
//Return -- Not yet implemented in X86
ret
// Inputs: RDI = Pointer to the register save space of the kernel
// RSI = Pointer to the stack space of the test
// RDX = Pointer to the runTest_* function
// RCX = Iteration argument for runTest_*
ts_arch_enter_init_cleanup_ASM:
// Save all the registers
TS_ARCH_SAVE_REGS_ASM
// Temporary save the kernel stack we'll return to
mov %rsp, %rax
// Change the stack
mov %rsi, %rsp
// Save the kernel register storage space pointer so we can restore registers on the return
push %rdi
// Load the iteration argument
mov %rcx, %rdi
// Call runTest_*
mov %rcx, %rdi
call *%rdx
// Pop the kernelRegs pointer
pop %rdi
// Restore all the registers
TS_ARCH_RESTORE_REGS_ASM
// Return 1 to indicate that this is a return from runTest_ instead of a resume kernel
mov $1, %rax
ret
// Inputs: RDI = Pointer to the register save space of current test
// RSI = Pointer to the stack space of the next test
// RDX = Pointer to the runTest_* function
// RCX = Iteration argument for runTest_*
ts_arch_startNextIteration_ASM:
//Not yet implemented
ret

View File

@ -0,0 +1,32 @@
/*-------------------------------------------------------
|
| exception.c
|
| Fucntions for exception handling on 'x86' architecture.
|
|--------------------------------------------------------
|
| Copyright ( C ) 2013 Microsoft Corp.
| All Rights Reserved
| Confidential and Proprietary
|
|--------------------------------------------------------
*/
#include "bifrost_private.h"
void arch_exc_globalInit()
{
}
HW_EXC_HANDLER_FUNC* arch_exc_registerHandler(HW_EXC_VECTOR exception, HW_EXC_HANDLER_FUNC* handler)
{
return NULL;
}
HW_EXC_VECTOR arch_exc_queryExceptionCause(void)
{
return (HW_EXC_VECTOR)0;
}

View File

@ -0,0 +1,69 @@
/*-------------------------------------------------------
|
| interrupt.c
|
| Fucntions for interrupt handling on 'X86' architecture.
|
|--------------------------------------------------------
|
| Copyright ( C ) 2013 Microsoft Corp.
| All Rights Reserved
| Confidential and Proprietary
|
|--------------------------------------------------------
*/
#include "bifrost_private.h"
void arch_int_globalInit()
{
}
void arch_int_init()
{
}
HW_INT_HANDLER_FUNC* arch_int_registerHandler(HW_INT_VECTOR interrupt, HW_INT_HANDLER_FUNC* handler)
{
return NULL;
}
UINT32 arch_int_enable(UINT32 mask)
{
return mask;
}
UINT32 arch_int_disable(UINT32 mask)
{
return mask;
}
void arch_int_set(UINT32 mask)
{
}
void arch_int_clear(UINT32 mask)
{
}
HW_RESULT arch_int_timerSetTimeout(HW_INT_VECTOR timer, UINT32 timeout)
{
return HW_E_NOTIMPL;
}
HW_RESULT arch_int_timerSetCompare(HW_INT_VECTOR timer, UINT32 compare)
{
return HW_E_NOTIMPL;
}
UINT32 arch_int_queryInterrupt(void)
{
return 0;
}
UINT32 arch_int_queryEnable(void)
{
return 0;
}

View File

@ -0,0 +1,44 @@
/*-------------------------------------------------------
|
| main.c
|
| Architecture specific entry point for Bifrost
|
|--------------------------------------------------------
|
| Copyright ( C ) 2013 Microsoft Corp.
| All Rights Reserved
| Confidential and Proprietary
|
|--------------------------------------------------------
*/
#include "bifrost_private.h"
//extern int platform_init();
int main(int argc, char* argv[])
{
//// Only the linux platform is supported for x86 right now, so call the platform specific init and nothing else
//platform_init();
//initGlobals();
UINT32 dwpid = hw_getCoreNum();
// Check if this core is active
if(hw_activeCoreList[dwpid])
{
if(dwpid == bifrostCachedGlobals.tpid)
{
testDriver(dwpid);
}
else
{
testSlave(dwpid);
}
}
for(;;){
asm volatile("hlt");
}
}

View File

@ -0,0 +1,34 @@
/*-----------------------------------------------------
|
| preemption.c
|
| Contains functions used in preemptive tasking
| configuration and setup.
|
|------------------------------------------------------
|
| Copyright (C) 2011 Microsoft Corporation
| All Rights Reserved
| Confidential and Proprietary
|
|------------------------------------------------------
*/
#include "bifrost_private.h"
HW_INT_VECTOR_MASK hw_criticalSectionBegin()
{
return 0;
}
void hw_criticalSectionEnd(HW_INT_VECTOR_MASK intMask)
{
}
// Debug function. This is not public code currently.
void hw_resetPreemptionTimer(UINT32 ticks)
{
}

View File

@ -0,0 +1,104 @@
######################################################
#
# sources.imk
#
# Contains all C source information for the
# Bifrost build for x86 architecture.
#
# Copyright (C) 2016 Microsoft Corporation
# All Rights Reserved
# Confidential and Proprietary
#
######################################################
ARCHTYPE := x86
#
# Kernel CFLAGS
#
X86_KERNEL_CFLAGS = $(DEFAULT_CFLAGS) $(WARNING_CFLAGS)
#
# Kernel binary selection--if CUSTOM_KERNEL_CFLAGS
# is specified, build a testsuite-specific
# version of the kernel under BIN_DIR--otherwise,
# use the shared kernel.
#
X86_KERNEL_BINARY = $(if $(CUSTOM_KERNEL_CFLAGS),$(BIN_DIR)/obj/x86/kernel/bifrost_kernel.so,$(OBJ_ROOT)/x86/$(BFCOMP_MODE)/kernel/bifrost_kernel.so)
# These files will be compiled as Assembly instead of C++.
# Apply the appropriate COMPILE_LANGUAGE override for them
X86_ASM_SOURCES := $(BF_SRC_ROOT)/arch/x86/context.s
X86_O3OPT_SOURCES := $(SHARED_KERNEL_O3OPT_SOURCES) \
$(DRIVER_O3OPT_SOURCES)
X86_SOURCES := $(SHARED_KERNEL_SOURCES) \
$(DRIVER_SOURCES) \
$(X86_ASM_SOURCES) \
$(X86_O3OPT_SOURCES) \
$(X86_SYS_SOURCES) \
$(BF_SRC_ROOT)/arch/x86/atomic.c \
$(BF_SRC_ROOT)/arch/x86/context.c \
$(BF_SRC_ROOT)/arch/x86/exception.c \
$(BF_SRC_ROOT)/arch/x86/interrupt.c \
$(BF_SRC_ROOT)/arch/x86/main.c \
$(BF_SRC_ROOT)/arch/x86/preemption.c \
$(BF_SRC_ROOT)/arch/x86/suspend.c \
$(BF_SRC_ROOT)/arch/x86/terminate.c \
$(BF_SRC_ROOT)/arch/x86/time.c
X86_HEADERS := $(SHARED_HEADERS) \
$(BF_SRC_ROOT)/system/include/bifrost_system_api_x86.h \
$(BF_SRC_ROOT)/system/include/bifrost_system_constants_x86.h
X86_IDIRS := $(BF_SRC_ROOT)/arch/x86/include
# Add library IDIRS
ARCHTYPE := x86
$(eval $(kernel_lib_template))
define kernel_objs_template
FLAG := $$(call TOLOWER,$$(FLAG_UPPER))
#
# x86 kernel objects and rules
#
X86_KERNEL_OBJS := $$(X86_SOURCES:$(BF_SRC_ROOT)/%=$(OBJ_ROOT)/x86/$$(FLAG)/kernel/%.o)
X86_O3OPT_OBJS := $$(X86_O3OPT_SOURCES:$(BF_SRC_ROOT)/%=$(OBJ_ROOT)/x86/$$(FLAG)/kernel/%.o)
$$(filter %.s.o %.S.o,$$(X86_KERNEL_OBJS)): COMPILE_LANGUAGE := assembler-with-cpp
# Specify -O3 optimization on a per-file basis
$$(X86_O3OPT_OBJS): X86_KERNEL_CFLAGS += -O3
$$(X86_KERNEL_OBJS) $$(X86_PLAT_$(FLAG_UPPER)_OBJS): $(OBJ_ROOT)/x86/$$(FLAG)/kernel/%.o : $(BF_SRC_ROOT)/% $(X86_HEADERS)
@echo Compiling $$< to $$@...
@mkdir -p $$(@D)
@$(COMPILE) -c -static -shared \
-x $$(COMPILE_LANGUAGE) \
$$(X86_KERNEL_CFLAGS) $(FLAG_UPPER:%=-D%) \
-D__X86__ \
$(BIFROST_IDIRS:%=-I%) \
$(X86_IDIRS:%=-I%) \
$(X86_SYS_IDIRS:%=-I%) \
$$< -o $$@
#
# Bifrost kernel binary for this architecture
#
$(OBJ_ROOT)/x86/$$(FLAG)/kernel/bifrost_kernel.so: $$(X86_KERNEL_OBJS)
@echo Linking $$@...
@mkdir -p $$(@D)
ld -r $$^ -o $$@
endef
$(foreach FLAG_UPPER,$(BFCOMP_FLAGS_UPPER),$(eval $(kernel_objs_template)))

View File

@ -0,0 +1,21 @@
/*-------------------------------------------------------
|
| suspend.c
|
| Fucntions for suspending the execution on a core for 'x86' architecture.
|
|--------------------------------------------------------
|
| Copyright ( C ) 2014 Microsoft Corp.
| All Rights Reserved
| Confidential and Proprietary
|
|--------------------------------------------------------
*/
#include "bifrost_private.h"
void arch_suspend()
{
}

View File

@ -0,0 +1,19 @@
/*-------------------------------------------------------
|
| terminate.c
|
| Fucntions for terminate the execution on a core for 'x86' architecture.
|
|--------------------------------------------------------
|
| Copyright ( C ) 2014 Microsoft Corp.
| All Rights Reserved
| Confidential and Proprietary
|
|--------------------------------------------------------
*/
#include "bifrost_private.h"
void hw_terminate()
{
}

View File

@ -0,0 +1,40 @@
/*-----------------------------------------------------
|
| time.c
|
| Contains functions for obtaining the current time
| with the x86 architecture.
|
|------------------------------------------------------
|
| Copyright (C) 2011 Microsoft Corporation
| All Rights Reserved
| Confidential and Proprietary
|
|------------------------------------------------------
*/
#include "bifrost_private.h"
HW_RESULT arch_int_startPreemptionTimer()
{
return S_OK;
}
HW_RESULT arch_int_stopPreemptionTimer()
{
return S_OK;
}
UINT64 hw_getCycleCount()
{
UINT64 currentTime = RDTSC();
//
// The TSC register increments at a constant rate regardless
// of power state so we can generally count on it for a realtime
// reading.
//
return currentTime - bifrostCachedGlobals.hw_beginning_cycle_count;
}

View File

@ -0,0 +1,19 @@
/*
* Assembly functions to support interlocked exchange
*/
.text
// a2 = int ptr
// a3 = val
.global arch_interlocked_compare_exchange
//uint32_t hw_interlocked_compare_exchange(uint32_t* addr, uint32_t compare, uint32_t val)
.align 4
arch_interlocked_compare_exchange:
entry a1, 16
wsr.scompare1 a3
s32c1i a4, a2, 0
mov a2, a4
retw

View File

@ -0,0 +1,129 @@
#include "bifrost_hs_intr.h"
#include "bifrost_hs_mem.h"
#include "bifrost_hs_boot.h"
#include "bifrost_hs_context.h"
#include "bifrost_types.h"
#include "bifrost_system_constants_xtensa.h"
#include "interrupt.h"
#include "context.h"
#include "atomic.h"
#include "mem.h"
int32_t ke_hal_setup(hw_arch_bootinfo_t *bootinfo)
{
int32_t result = 0;
if(bootinfo == NULL)
return -1;
result = arch_interrupt_init(&bootinfo->int_info);
if(result != 0)
return result;
result = arch_mem_init(bootinfo);
return result;
}
void ke_set_timer_timeout(uint32_t ms)
{
uint32_t timeout = ms * (PROC_FREQUENCY_MHZ * 1000);
arch_set_timer_timeout(timeout);
return;
}
void ke_trigger_intr(uint32_t core, uint32_t vec)
{
arch_trigger_interrupt(core, vec);
}
// IRQL on Xtensa has identical mapping between kernel defined and arch specific mask
hw_irql_t ke_set_irql(hw_irql_t irql)
{
return arch_set_irql(irql);
}
hw_irql_t ke_get_irql()
{
return arch_get_irql();
}
void ke_context_switch(void *intr_context, void *old_context, void *new_context)
{
return arch_context_switch((UserFrame*)intr_context, (UserFrame*)old_context, (UserFrame*)new_context);
}
void ke_create_context(void *context, void *pc, void *sp, hw_irql_t irql, void *arg)
{
arch_create_context((UserFrame*)context, pc, sp, (uint32_t)irql, arg);
}
uint32_t ke_get_current_core()
{
return xthal_get_prid();
}
void ke_flush_addr(void *addr, uint32_t num_of_cacheline)
{
xthal_dcache_region_writeback_inv(addr, num_of_cacheline * HW_CACHELINE_SIZE);
}
hw_intr_handler_t ke_register_intr_handler(uint32_t vec, hw_intr_handler_t handler, void *context)
{
return arch_register_intr_handler(vec, handler, context);
}
int32_t ke_interlocked_exchange(int32_t *addr, int32_t data)
{
int32_t orig = *addr;
while(arch_interlocked_compare_exchange(addr, orig, data) != orig)
{
orig = *addr;
}
return orig;
}
int32_t ke_interlocked_compare_exchange(int32_t *addr, int32_t compare, int32_t val)
{
return arch_interlocked_compare_exchange(addr, compare, val);
}
int32_t ke_interlocked_increment(int32_t *addr, int32_t val)
{
int32_t orig = *addr;
while(arch_interlocked_compare_exchange(addr, orig, orig + val) != orig)
{
orig = *addr;
}
return orig;
}
void ke_register_exc_handler(hw_exc_type_t type, hw_exc_handler_t handler)
{
switch(type)
{
case invalid_op_exc:
arch_register_exc_handler(HW_EXC_ILLEGAL, handler);
case debug_exc:
break;
case div_by_zero_exc:
arch_register_exc_handler(HW_EXC_DIVIDE_BY_ZERO, handler);
case unrecoverable_exc:
break;
case unsupported_thr_fatal_exc:
break;
case unsupported_thr_nonfatal_exc:
break;
case page_fault_exc:
arch_register_exc_handler(HW_EXC_INSTR_ADDR_ERROR, handler);
arch_register_exc_handler(HW_EXC_INSTR_DATA_ERROR, handler);
arch_register_exc_handler(HW_EXC_LOAD_STORE_ADDR_ERROR, handler);
arch_register_exc_handler(HW_EXC_LOAD_STORE_DATA_ERROR, handler);
break;
case general_protection_exc:
break;
}
}

View File

@ -0,0 +1,99 @@
/*-------------------------------------------------------
|
| 1BL.S
|
| 1BL for 'xtensa' architecture.
|
|--------------------------------------------------------
|
| Copyright ( C ) 2014 Microsoft Corp.
| All Rights Reserved
| Confidential and Proprietary
|
|--------------------------------------------------------
*/
.equiv PCIE_RESET_RELEASE_ADDR, 0x04080090
.equiv PCIE_RESET_RELEASE_DATA, 0x00000003
.equiv INTC_P0_0_CLEAR_ADDR, 0x04000008
.equiv INTC_P0_0_CLEAR_BIT, 0
.equiv INTC_P0_0_CLEAR_BIT_MASK, 0x1
.equiv INTC_P1_0_CLEAR_ADDR, 0x04000088
.equiv INTC_P1_0_CLEAR_BIT, 1
.equiv DRAM_RESET_TABLE_BASE, 0xA1000000
#ifdef USE_ALT_VECTOR
.equiv SRAM_RESET_VECTOR, 0x00100020
#else
.equiv SRAM_RESET_VECTOR, 0x00100000
#endif
.equiv SBOOT_RESET_VECTOR, 0x07108000
.equiv SCRPAD0_ADDR, 0x02000030
.equiv SCRPAD0_DATA_BIT, 1
.begin no-absolute-literals
.section .SharedResetVector.text, "ax"
.align 4
.global _SharedResetVector
_SharedResetVector:
j .SharedResetHandler
.align 4
.literal_position
.align 4
.SharedResetHandler:
rsr.prid a0
bltui a0, 2, .ControlNode
.CoreN:
movi a1, DRAM_RESET_TABLE_BASE
addx4 a1, a0, a1
l32i a1, a1, 0
jx a1
.ControlNode:
beqi a0, 0, .Core0 // dispatch for core0
.Core1:
#ifndef L2BOOT
movi a1, INTC_P1_0_CLEAR_ADDR
.Poll:
l32ai a2, a1, 0
bbci a2, INTC_P1_0_CLEAR_BIT, .Poll
s32i a2, a1, 0
#endif
j .CoreN
.Core0:
#ifndef L2BOOT
movi a0, PCIE_RESET_RELEASE_ADDR
movi a1, PCIE_RESET_RELEASE_DATA
l32i a2, a0, 0
beq a1, a2, .PCIE_Initialized
s32i a1, a0, 0
.PCIE_Initialized:
#ifdef PCIE_SYNC
movi a1, INTC_P0_0_CLEAR_ADDR
.Poll_2bl:
l32ai a2, a1, 0
bbci a2, INTC_P0_0_CLEAR_BIT, .Poll_2bl
s32i a2, a1, 0
#endif
#ifdef SBOOT
movi a0, SBOOT_RESET_VECTOR
#else
movi a0, SRAM_RESET_VECTOR
l32i a0, a0, 0
#endif
jx a0
#else // L2BOOT
movi a0, DRAM_RESET_TABLE_BASE
l32i a0, a0, 0
jx a0
#endif
.size _SharedResetVector, . - _SharedResetVector
.end no-absolute-literals

View File

@ -0,0 +1,368 @@
/*-------------------------------------------------------
|
| 2bl.c
|
| main of boot loader which is called from reset handler.
|
|--------------------------------------------------------
|
| Copyright ( C ) 2013 Microsoft Corp.
| All Rights Reserved
| Confidential and Proprietary
|
|--------------------------------------------------------
*/
#include "init_helper.h"
#include "HuPPCIeATU.h"
#include "HupreBootDefines.h"
#include "hup_chip.h"
#define BOOT_STACK_SIZE (0x400)
extern int __stack;
BOOT_DIAG* gBootDiagPtr = (BOOT_DIAG*)BOOT_DIAG_BASE_ADDR;
#ifndef PCIE_SYNC
INIT_OPTION gInitOption __attribute__ ((section(".sram_init_opt.data")));
INIT_OPTION* gInitOptionPtr = (INIT_OPTION*)&gInitOption;
#else
INIT_OPTION* gInitOptionPtr = (INIT_OPTION*)INIT_OPTION_BASE_ADDR;
#endif
extern void dramc_init(int dpd);
void
StatusUpdate(
HUPRE_BOOT_STATUS Status
)
/*++
Routine Description:
Perform a status update the SoC can see to trace our progress.
Arguments:
Status - Status to write
Return value:
None.
--*/
{
reg_write32(HUP_CN_MISC_INTC_SCRATCH_REG0, Status);
}
void
DoDramInit()
/*++
Routine Description:
Setup DDR.
Arguments:
None.
Return value:
None.
--*/
{
dramc_init(0);
}
void
MapAtu()
/*++
Routine Description:
Temporary ATU setup.
ISSUE-2014/04/10-jloeser: Remove once HupDriver.sys ATU setup has been
verified.
Arguments:
None.
Return value:
None.
--*/
{
UINT32 msiSocHigh;
UINT32 msiSocLow;
volatile PCIE_ATU_REGISTERS* regs;
regs = (volatile PCIE_ATU_REGISTERS *)(HUP_ADDRESS_pcie + PCIE_ATU_STARTING_BYTE_OFFSET);
//
// prepare for writing outbound ATU entry 31
//
regs->VIEWPORT.Bits.REGION_DIR = PCIE_ATU_REGION_DIR_OUTBOUND; //0;
regs->VIEWPORT.Bits.REGION_INDEX = 31;
regs->LWR_BASE.Raw = XTENSA_PCIE_END - 4096;
regs->UPPER_BASE = 0;
regs->LIMIT_ADDR.Raw = XTENSA_PCIE_END - 1;
//
// Obtain MSI address: Found at offsets 0x54 (low), 0x58 (high)
// from PCI config space.
//
msiSocLow = *(PUINT32)(HUP_ADDRESS_pcie + 0x54);
msiSocHigh = *(PUINT32)(HUP_ADDRESS_pcie + 0x58);
regs->LWR_TARGET_ADDR.Raw = msiSocLow & ~0xFFFUL;
regs->UPPER_TARGET_ADDR = msiSocHigh;
regs->REGION_CTRL1.Raw = 0;
regs->REGION_CTRL3.Raw = 0;
regs->REGION_CTRL2.Raw = 0;
regs->REGION_CTRL2.OutboundBits.REGION_EN = 1;
}
void
DoGetMsiDetails(
PUINT32* MsiAddress,
PUINT32 MsiData0
)
/*++
Routine Description:
Obtain MSI address and adata.
The last 4KB of the PCI range have been setup by the SoC to contain the
the MSI page.
The 12-bit offset into the PCI range and the data base word are found
found in the PCIe config space.
Arguments:
MsiAddress - Upon return, will contain the address to write to to trigger
an MSI.
MsiData0 - Upon return, will contain the data to write to trigger MSI 0.
Return value:
None.
--*/
{
UINT32 msiSocLow;
//
// ISSUE-2014/04/10-jloeser: Remove once HupDriver.sys ATU setup has been
// verified.
//
//MapAtu();
//
// Obtain MSI address: Found at offsets 0x54 (low) from PCI config space.
//
msiSocLow = *(PUINT32)(HUP_ADDRESS_pcie + 0x54);
*MsiAddress = (PUINT32)(XTENSA_PCIE_END - 4096 + (msiSocLow & 0xFFCUL));
//
// Obtain MSI data: Found in lower 16 bits at offset 0x5C of PCI config
// space.
//
*MsiData0 = *(PUINT32)(HUP_ADDRESS_pcie + 0x5C) & 0xFFFF;
}
void
DoSendMsi(
PUINT32 MsiAddress,
UINT32 MsiData
)
/*++
Routine Description:
Signal DDR readiness to SoC by sending it an MSI.
Arguments:
MsiAddress - Address to write to to send an MSI.
MsiData - Data to write.
Return value:
None.
--*/
{
__asm__ __volatile__ ( "memw \n" );
*MsiAddress = MsiData;
}
void
DoWaitFor3BL()
/*++
Routine Description:
Wait for 3BL readyness.
Arguments:
None.
Return value:
None.
--*/
{
UINT32 bits;
do
{
bits = *(volatile UINT32*)(HUP_CN_MISC_INTC_P0_0_DATA);
} while ((bits & 0x2) == 0);
//
// Ack our interrupt at the interrupt controller.
//
*(UINT32*)(HUP_CN_MISC_INTC_P0_0_DATA) = bits;
}
#ifdef BASRAM_ONLY
// Can't declare ANY variables in the function
// in which we switch the stack. Otherwise,
// they'll write above the top of our stack.
// So main will just switch the stack and then
// execute its other tasks in a second function
int main_body();
int main()
{
// Set up stacks first of all
asm volatile(
"mov a1, %[stack];"
:
: [stack] "r" ((int)&__stack - BOOT_STACK_SIZE * get_prid())
: "a1"
);
main_body();
// Bogus return, we should never get here
return 0;
}
int main_body()
{
if(get_prid() == 0)
{
#ifdef PCIE_SYNC
*(r32*)PCIE_RESET_RELEASE_ADDR = PCIE_RESET_RELEASE_DATA;
while(*(r32*)HUP_CN_MISC_INTC_P0_0_DATA == 0);
*(r32*)HUP_CN_MISC_INTC_P0_0_DATA = 0;
#endif
StatusUpdate(HUPRE_BOOT_STATUS_2BL_STARTED);
*(r32*)HUP_CHIP_POR_CLOCKGATING1_ADDRESS = 0x303FFFF;
*(r32*)HUP_CHIP_POR_CLOCKGATING2_ADDRESS = 0x03FFFFFF;
*(r32*)HUP_CHIP_POR_CLOCKGATING5_ADDRESS = 0xFFFFFFFF;
*(r32*)HUP_CHIP_POR_CLOCKGATING6_ADDRESS = 0xFFFFFFFF;
if(gInitOptionPtr->dramc_init_opt.std.DRAMC_INIT_FOR_BASRAM_MODE ||
get_platform_type() == PLAT_VEL ||
get_platform_type() == PLAT_PAL)
{
gBootDiagPtr->dramc.init_time.start = (unsigned long long )get_ccount() * 2;
DoDramInit();
gBootDiagPtr->dramc.init_time.end = (unsigned long long )get_ccount() * 2;
StatusUpdate(HUPRE_BOOT_STATUS_2BL_DDR_UP);
}
StatusUpdate(HUPRE_BOOT_STATUS_2BL_READY_FOR_3BL);
set_dramc_ready();
StatusUpdate(HUPRE_BOOT_STATUS_2BL_DONE);
}
else if(get_prid() == 1)
{
wait_for_dramc_ready();
}
asm volatile(
"movi a0, 0x20122000 \n"
"jx a0 \n"
);
// Bogus return, we should never get here
return 0;
}
#else //!BASRAM_ONLY
// No stack switching on this path
// since only one core executes it
int main()
{
PUINT32 msiAddress;
UINT32 msiData0;
StatusUpdate(HUPRE_BOOT_STATUS_2BL_STARTED);
*(r32*)HUP_CHIP_POR_CLOCKGATING1_ADDRESS = 0x303FFFF;
*(r32*)HUP_CHIP_POR_CLOCKGATING2_ADDRESS = 0x03FFFFFF;
*(r32*)HUP_CHIP_POR_CLOCKGATING5_ADDRESS = 0xFFFFFFFF;
*(r32*)HUP_CHIP_POR_CLOCKGATING6_ADDRESS = 0xFFFFFFFF;
if(gInitOptionPtr->dramc_init_opt.std.NO_DRAMC_INIT == 0)
{
gBootDiagPtr->dramc.init_time.start = (unsigned long long )get_ccount() * 2;
DoDramInit();
gBootDiagPtr->dramc.init_time.end = (unsigned long long )get_ccount() * 2;
}
#ifndef VALIDATION_MODE
set_dramc_ready();
#else
StatusUpdate(HUPRE_BOOT_STATUS_2BL_DDR_UP);
StatusUpdate(HUPRE_BOOT_STATUS_2BL_READY_FOR_3BL);
DoGetMsiDetails(&msiAddress, &msiData0);
StatusUpdate(HUPRE_BOOT_STATUS_2BL_MSI_MAPPED);
DoSendMsi(msiAddress, msiData0);
StatusUpdate(HUPRE_BOOT_STATUS_2BL_MSI_SENT);
DoWaitFor3BL();
StatusUpdate(HUPRE_BOOT_STATUS_2BL_SEEN_3BL);
StatusUpdate(HUPRE_BOOT_STATUS_2BL_DONE);
#endif //VALIDATION_MODE
// Invalidate cached stack
asm volatile(
"mov a2, %[addr]\n"
"loopnez %[size], .sram_stack_inv_end\n"
"dhi a2, 0\n"
"addi a2, a2, 64\n"
".sram_stack_inv_end:\n"
:
: [addr] "r" ((int)&__stack - BOOT_STACK_SIZE), [size] "r" (BOOT_STACK_SIZE / XCHAL_DCACHE_LINESIZE)
: "a2"
);
// Transfer control via jump table
asm volatile(
"movi a0, 0xA1000000 \n"
"l32i a0, a0, 0 \n"
"jx a0 \n"
);
// Bogus return, we should never get here
return 0;
}
#endif //BASRAM_ONLY

View File

@ -0,0 +1,25 @@
/*
* Alternate reset vector
*/
.begin literal_prefix .AltResetVector
.section .AltResetVector.text, "ax"
/* Minimal vector, just jump to the handler dfefined below */
.align 4
.global _AltResetVector
_AltResetVector:
j _AltResetHandler
.size _AltResetVector, . - _AltResetVector
/* Alternate reset vector handler, just jump to the normal boot vector */
.align 4
.literal_position // tells the assembler/linker to place literals here
.align 4
.global _AltResetHandler
_AltResetHandler:
j.l 0x7100000, a0
.end literal_prefix

View File

@ -0,0 +1,269 @@
//---------------------------------------------------------------------------
//
// Copyright (c) Microsoft Corporation. All rights reserved.
//
// Abstract:
// Contains hardware data structure definitions of the internal
// address translation unit (iATU) of the PCIe endpoint on the HuP.
//
//---------------------------------------------------------------------------
#pragma once
static const UINT32 HUP_ADDRESS_pcie = 0x04400000;
static const UINT32 XTENSA_PCIE_BASE = 0x3E000000;
static const UINT32 XTENSA_PCIE_SIZE = 0x02000000;
static const UINT32 XTENSA_PCIE_END = 0x40000000;
// These constants are configured in silicon and cannot be read by
// software.
//
static const UINT32 PCIE_ATU_CX_INTERNAL_ATU_ENABLE = 1; // Indicates that the ATU is enabled
static const UINT32 PCIE_ATU_CX_ATU_NUM_OUTBOUND_REGIONS = 32; // Maximum number of outbound translation regions that can be defined
static const UINT32 PCIE_ATU_CX_ATU_NUM_INBOUND_REGIONS = 32; // Maximum number of inbound translation regions that can be defined
static const UINT32 PCIE_ATU_CX_ATU_MIN_REGION_SIZE = (UINT32)(4 * 1024); // Minimum size and alignment of a single translation region
// Starting byte and DWORD offsets of ATU registers in PCIe config space
static const UINT32 PCIE_ATU_STARTING_BYTE_OFFSET = 0x900;
static const UINT32 PCIE_ATU_STARTING_DWORD_OFFSET = 0x900 >> 2;
// Byte and DWORD offsets of the last ATU register in PCIe config space.
static const UINT32 PCIE_ATU_LAST_REGISTER_BYTE_OFFSET = 0x920;
static const UINT32 PCIE_ATU_LAST_REGISTER_DWORD_OFFSET = 0x920 >> 2;
//static const UINT32 PCIE_ATU_REGION_DIR_OUTBOUND = 0;
//static const UINT32 PCIE_ATU_REGION_DIR_INBOUND = 1;
#define PCIE_ATU_REGION_DIR_OUTBOUND 0;
#define PCIE_ATU_REGION_DIR_INBOUND 1;
//////////////////////////////////////////////////////
// ATU Register Definitions.
// Register byte offsets are in comments.
// Where the spec defines inbound and outbound registers at the same
// offset, if they are identical then only one version is defined here.
// If they are not identical then the register contains a union of 2
// bitfield structs (one for each direction).
//
// The viewport register selects a region and direction for subsequent
// register read/write operations.
//
//////////////////////////////////////
// Example 1:
// ASIC-side code to set outbound region 2 to map the first 4K
// of the HuP PCIe 1GB data window to a specific SoC address
/*
PPCIE_ATU_REGISTERS pRegisters = (PPCIE_ATU_REGISTERS)(0x4400000 + PCIE_ATU_STARTING_BYTE_OFFSET); // 0x4400000 is start of PCIe config space in HuP memory map
pRegisters->VIEWPORT.Bits.REGION_DIR = PCIE_ATU_REGION_DIR_OUTBOUND;
pRegisters->VIEWPORT.Bits.REGION_INDEX = 2;
pRegisters->LWR_BASE.Raw = 0x40000000; // 0x400000000 is start of 1GB PCIe data window in HuP memory map. This must be 4K-aligned.
pRegisters->UPPER_BASE = 0;
pRegisters->LIMIT_ADDR.Raw = 0x40000FFF; // create a 4K mapping. This is the lower 32-bits of the address of the last byte in the mapping. (Upper bits are defined by UPPER_BASE).
pRegisters->LWR_TARGET_ADDR.Raw = SocAddress.LowPart;
pRegisters->UPPER_TARGET_ADDR = SocAddress.HighPart;
pRegisters->REGION_CTRL1.Raw = 0;
pRegisters->REGION_CTRL3.Raw = 0;
pRegisters->REGION_CTRL2.Raw = 0;
pRegisters->REGION_CTRL2.OutboundBits.REGION_EN = 1; // Indicates that the region mapping should be used. Always set this register last after all other registers for the region have been programmed.
////////////////////////////
// Example 2:
// SoC-side code to set inbound region 3 to map the INTC register set
// on BAR 0.
PPCIE_ATU_REGISTERS pRegisters = (PPCIE_ATU_REGISTERS)(pBAR1 + PCIE_ATU_STARTING_BYTE_OFFSET); // Assumes that pBAR1 was set to the system virtual address of the start of BAR 1 from a prior call to MmMapIoSpace and that BAR 1 maps to the PCIe core register set.
pRegisters->VIEWPORT.Bits.REGION_DIR = PCIE_ATU_REGION_DIR_INBOUND;
pRegisters->VIEWPORT.Bits.REGION_INDEX = 3;
pRegisters->LWR_BASE.Raw = pBAR0Physical; // Assumes that pBAR0Physical was set to the SoC's 4K-aligned starting physical address for BAR 0 assigned by Windows and received during IRP_MN_START_DEVICE or PrepareHardware
pRegisters->UPPER_BASE = 0; // Assumes 32-bit Windows. If running on 64-bit set UPPER_BASE to upper DWORD of pBAR0Physical
pRegisters->LIMIT_ADDR.Raw = pBAR0Physical + 0xFFF; // Create a 4K mapping. This is the lower 32-bits of the address of the last byte in the mapping. (Upper bits are defined by UPPER_BASE).
pRegisters->LWR_TARGET_ADDR.Raw = 0x4000000; // Start of INTC registers in HuP memory map. Must be 4K-aligned.
pRegisters->UPPER_TARGET_ADDR = 0; // HuP uses 32-bit addressing so high word is zero
pRegisters->REGION_CTRL1.Raw = 0;
pRegisters->REGION_CTRL2.Raw = 0;
pRegisters->REGION_CTRL3.Raw = 0;
pRegisters->REGION_CTRL2.InboundBits.BAR_NUM = 0; // Shown for completeness. Setting Raw to zero earlier already set this bit to zero. For other BARs set this field to the BAR number.
pRegisters->REGION_CTRL2.InboundBits.REGION_EN = 1;
*/
typedef struct PCIeATUViewPortBits
{
UINT32 REGION_INDEX:5;
UINT32 VP_RSVD:26;
UINT32 REGION_DIR:1;
} PCIE_ATU_VIEWPORT_BITS;
typedef union PCIeATUViewport
{
PCIE_ATU_VIEWPORT_BITS Bits;
UINT32 Raw;
} PCIE_ATU_VIEWPORT;
// Per-direction, per-channel registers
//
typedef struct PCIeATURegionCtrl1Bits
{
UINT32 TYPE:5;
UINT32 TC:3;
UINT32 TD:1;
UINT32 ATTR:2;
UINT32 IDO:1;
UINT32 TH:1;
UINT32 RSVD_P_13:3;
UINT32 AT:2;
UINT32 PH:2;
UINT32 CTRL_1_FUNC_NUM:5;
UINT32 RSVDP_25:7;
} PCIE_ATU_REGION_CTRL1_BITS;
typedef union PCIeATURegionCtrl1
{
PCIE_ATU_REGION_CTRL1_BITS Bits;
UINT32 Raw;
} PCIE_ATU_REGION_CTRL1;
typedef struct PCIeATURegionCtrl2OutboundBits
{
UINT32 MSG_CODE:8;
UINT32 RSVDP_8:11;
UINT32 FUNC_BYPASS:1;
UINT32 RSVDP_20:7;
UINT32 DMA_BYPASS:1;
UINT32 CFG_SHIFT_MODE:1;
UINT32 INVERT_MODE:1;
UINT32 RSVDP_30:1;
UINT32 REGION_EN:1;
} PCIE_ATU_REGION_CTRL2_OUTBOUND_BITS;
typedef struct PCIeATURegionCtrl2InboundBits
{
UINT32 MSG_CODE:8;
UINT32 BAR_NUM:3;
UINT32 RSVDP_11:3;
UINT32 TC_MATCH_EN:1;
UINT32 TD_MATCH_EN:1;
UINT32 ATTR_MATCH_EN:1;
UINT32 TH_MATCH_EN:1;
UINT32 AT_MATCH_EN:1;
UINT32 FUNC_MATCH_EN:1;
UINT32 VF_MATCH_EN:1;
UINT32 MSG_CODE_MATCH_EN:1;
UINT32 PH_MATCH_EN:1;
UINT32 RSVDP_23:1;
UINT32 RESPONSE_CODE:2;
UINT32 RSVDP_26:1;
UINT32 FUZZY_TYPE_MATCH_CODE:1;
UINT32 CFG_SHIFT_MODE:1;
UINT32 INVERT_MODE:1;
UINT32 MATCH_MODE:1;
UINT32 REGION_EN:1;
} PCIE_ATU_REGION_CTRL2_INBOUND_BITS;
typedef union PCIeATURegionCtrl2
{
PCIE_ATU_REGION_CTRL2_OUTBOUND_BITS OutboundBits;
PCIE_ATU_REGION_CTRL2_INBOUND_BITS InboundBits;
UINT32 Raw;
} PCIE_ATU_REGION_CTRL2;
typedef struct PCIeATULwrBaseBits
{
UINT32 LWR_BASE_HW:12;
UINT32 LWR_BASE_RW:20;
} PCIE_ATU_LWR_BASE_BITS;
typedef union PCIeATULwrBase
{
PCIE_ATU_LWR_BASE_BITS Bits;
UINT32 Raw;
} PCIE_ATU_LWR_BASE;
typedef struct PCIeATULimitAddrBits
{
UINT32 LIMIT_ADDR_HW:12;
UINT32 LIMIT_ADDR_RW:20;
} PCIE_ATU_LIMIT_ADDR_BITS;
typedef union PCIeATULimitAddr
{
PCIE_ATU_LIMIT_ADDR_BITS Bits;
UINT32 Raw;
} PCIE_ATU_LIMIT_ADDR;
typedef struct PCIeATULwrTargetAddrBits
{
UINT32 LWR_TARGET_ADDR_HW:12;
UINT32 LWR_TARGET_ADDR_RW:20;
} PCIE_ATU_LWR_TARGET_ADDR_BITS;
typedef union PCIeATULwrTargetAddr
{
PCIE_ATU_LWR_TARGET_ADDR_BITS Bits;
UINT32 Raw;
} PCIE_ATU_LWR_TARGET_ADDR;
typedef struct PCIeATURegionCtrl3Bits
{
UINT32 VF_NUMBER:1;
UINT32 RSVDP_1:30;
UINT32 VF_ACTIVE:1;
} PCIE_ATU_REGION_CTRL3_BITS;
typedef union PCIeATURegionCtrl3
{
PCIE_ATU_REGION_CTRL3_BITS Bits;
UINT32 Raw;
} PCIE_ATU_REGION_CTRL3;
// Full register set definition
//
typedef struct PCIeATURegisters
{
PCIE_ATU_VIEWPORT VIEWPORT; // 0x900
PCIE_ATU_REGION_CTRL1 REGION_CTRL1; // 0x904
PCIE_ATU_REGION_CTRL2 REGION_CTRL2; // 0x908
PCIE_ATU_LWR_BASE LWR_BASE; // 0x90C
UINT32 UPPER_BASE; // 0x910
PCIE_ATU_LIMIT_ADDR LIMIT_ADDR; // 0x914
PCIE_ATU_LWR_TARGET_ADDR LWR_TARGET_ADDR; // 0x918
UINT32 UPPER_TARGET_ADDR; // 0x91C
PCIE_ATU_REGION_CTRL3 REGION_CTRL3; // 0x920
} PCIE_ATU_REGISTERS, *PPCIE_ATU_REGISTERS;
//C_ASSERT(sizeof(PCIE_ATU_VIEWPORT) == sizeof(UINT32));
//C_ASSERT(sizeof(PCIE_ATU_REGION_CTRL1) == sizeof(UINT32));
//C_ASSERT(sizeof(PCIE_ATU_REGION_CTRL2) == sizeof(UINT32));
//C_ASSERT(sizeof(PCIE_ATU_LWR_BASE) == sizeof(UINT32));
//C_ASSERT(sizeof(PCIE_ATU_LIMIT_ADDR) == sizeof(UINT32));
//C_ASSERT(sizeof(PCIE_ATU_LWR_TARGET_ADDR) == sizeof(UINT32));
//C_ASSERT(sizeof(PCIE_ATU_REGION_CTRL3) == sizeof(UINT32));
//C_ASSERT(sizeof(PCIE_ATU_REGISTERS) == (9 * sizeof(UINT32)));

View File

@ -0,0 +1,79 @@
/*++
Copyright (c) 2013 Microsoft Corporation. All Rights Reserved.
Module Name:
HupreBootDefines.h
Abstract:
HupRE boot status value definitions.
Author:
Jork Loeser (jloeser) 28-Oct-2013
--*/
#pragma once
//
// Status codes propagated from ASIC to SoC.
//
// While the upper 16 bits carry semantic meaning, the lower 16 bits do not
// and can change anytime.
//
// Upper 16 bits:
//
// 0x0000 - reset state
// 0x0001 - 2BL can be downloaded
// 0x0002 - 3BL can be downloaded
// 0x0003 - 3BL has been started and is ready to proceed with handshake.
// 0x8000 - (or'ed) Error. The remaining bits might contain a more details.
//
typedef enum _HUPRE_BOOT_STATUS
{
HUPRE_BOOT_STATUS_RESET = 0, // Set by HW
HUPRE_BOOT_STATUS_1BL_CORE_UP,
HUPRE_BOOT_STATUS_1BL_READY_FOR_2BL = 0x00010000,
HUPRE_BOOT_STATUS_1BL_PCI_UP,
HUPRE_BOOT_STATUS_1BL_CACHE_CLEANED,
HUPRE_BOOT_STATUS_1BL_SEEN_2BL,
HUPRE_BOOT_STATUS_1BL_DONE,
HUPRE_BOOT_STATUS_2BL_STARTED,
HUPRE_BOOT_STATUS_2BL_CACHE_UP,
HUPRE_BOOT_STATUS_2BL_DDR_UP,
HUPRE_BOOT_STATUS_2BL_READY_FOR_3BL = 0x00020000,
HUPRE_BOOT_STATUS_2BL_MSI_MAPPED,
HUPRE_BOOT_STATUS_2BL_MSI_SENT,
HUPRE_BOOT_STATUS_2BL_SEEN_3BL,
HUPRE_BOOT_STATUS_2BL_FIREWALLS_SET,
HUPRE_BOOT_STATUS_2BL_DONE,
HUPRE_BOOT_STATUS_3BL_STARTED = 0x00030000,
HUPRE_BOOT_STATUS_3BL_TOPOLOGY_DONE,
HUPRE_BOOT_STATUS_3BL_INTC_LINES_ENABLED,
HUPRE_BOOT_STATUS_3BL_MSGPOOL_DONE,
HUPRE_BOOT_STATUS_3BL_INQUEUES_DONE,
HUPRE_BOOT_STATUS_3BL_PCIE_CONFIG_DONE = 0x00040000,
HUPRE_BOOT_STATUS_3BL_PCIE_HANDSHAKE_DONE,
HUPRE_BOOT_STATUS_ERROR = 0x80000000,
} HUPRE_BOOT_STATUS;
//
// INTC specifics to use for status communication from SoC to ASIC
//
enum
{
//
// Interrupt line to use. Same for core0/1.
//
HUPRE_BOOT_INTC_LINE = 0,
//
// Bits to use for signal readyness of various boot images.
//
HUPRE_BOOT_INTC_BIT_2BL = 0,
HUPRE_BOOT_INTC_BIT_3BL = 1,
};

View File

@ -0,0 +1,254 @@
//-----------------------------------------------------------------------------
// File: ddc_params.h
// Author : Matthew Pumar<matpumar@microsoft.com>
// Generated on: 2016-06-27 12:44:57.886442
//-----------------------------------------------------------------------------
// Description : AUTOGENERATED HEADER by ddc_timing.py DO NOT MODIFY
//
//-----------------------------------------------------------------------------
// Copyright (c) 2015 by Microsoft.
// This model is the confidential and proprietary property of Microsoft and the
// possession or use of this file requires a written license from Microsoft.
//-----------------------------------------------------------------------------
#ifndef _DDC_PARAMS_H_
#define _DDC_PARAMS_H_
#define DDC_MIN 0
#define DDC_TYP 1
#define DDC_MAX 2
#define DDC_REG 3
#define DDC_PROG 4
#define DDC_MICRON 5
#define DDC_RAND 6
typedef struct{
uint32_t dqsck[5];
uint32_t ppd[5];
uint32_t rrd[5];
uint32_t zqcs[5];
uint32_t rank[5];
uint32_t rfcab[5];
uint32_t refi[5];
uint32_t zqcl[5];
uint32_t rtp[5];
uint32_t dqsq[5];
uint32_t dqss[5];
uint32_t rfcpb[5];
uint32_t rpab[5];
uint32_t rcpb[5];
uint32_t xpd[5];
uint32_t rpst[5];
uint32_t rcd[5];
uint32_t xsr[5];
uint32_t zqreset[5];
uint32_t cke[5];
uint32_t wtr[5];
uint32_t wpre[5];
uint32_t bl[5];
uint32_t wl[5];
uint32_t faw[5];
uint32_t zqinit[5];
uint32_t rl[5];
uint32_t wr[5];
uint32_t rcab[5];
uint32_t rlat[5];
uint32_t rppb[5];
uint32_t sr[5];
uint32_t mrri[5];
uint32_t mrw[5];
uint32_t mrr[5];
uint32_t mrd[5];
uint32_t ccd[5];
uint32_t ras[5];
} lp3_ddc_params;
typedef struct{
uint32_t dqsck[6];
uint32_t ppd[6];
uint32_t rrd[6];
uint32_t dqs2dq[6];
uint32_t rank[6];
uint32_t rfcab[6];
uint32_t refi[6];
uint32_t rtw[6];
uint32_t rtp[6];
uint32_t dqss[6];
uint32_t rfcpb[6];
uint32_t rpab[6];
uint32_t zqlatch[6];
uint32_t rcpb[6];
uint32_t xpd[6];
uint32_t ckelpd[6];
uint32_t rpst[6];
uint32_t rcd[6];
uint32_t xsr[6];
uint32_t ccdmw[6];
uint32_t escke[6];
uint32_t zqreset[6];
uint32_t cke[6];
uint32_t wtr[6];
uint32_t rtrrd[6];
uint32_t wpre[6];
uint32_t bl[6];
uint32_t mrwckel[6];
uint32_t wl[6];
uint32_t faw[6];
uint32_t cmdcke[6];
uint32_t rl[6];
uint32_t wr[6];
uint32_t ckesr[6];
uint32_t zqinit[6];
uint32_t rcab[6];
uint32_t rlat[6];
uint32_t rppb[6];
uint32_t wrwtr[6];
uint32_t sr[6];
uint32_t mrri[6];
uint32_t mrw[6];
uint32_t odtlon[6];
uint32_t odton[6];
uint32_t mrr[6];
uint32_t mrd[6];
uint32_t ccd[6];
uint32_t ras[6];
} lp4_ddc_params;
typedef enum ddc_c2py_mapping{
LP3_DQSCK = 21,
LP3_PPD = 2,
LP3_RRD = 8,
LP3_ZQCS = 38,
LP3_RANK = 34,
LP3_RFCAB = 14,
LP3_REFI = 35,
LP3_ZQCL = 37,
LP3_RTP = 11,
LP3_DQSQ = 22,
LP3_DQSS = 23,
LP3_RFCPB = 15,
LP3_RPAB = 5,
LP3_RCPB = 4,
LP3_XPD = 19,
LP3_RPST = 28,
LP3_RCD = 7,
LP3_XSR = 20,
LP3_ZQRESET = 39,
LP3_CKE = 17,
LP3_WTR = 16,
LP3_WPRE = 27,
LP3_BL = 33,
LP3_WL = 24,
LP3_FAW = 10,
LP3_ZQINIT = 36,
LP3_RL = 25,
LP3_WR = 13,
LP3_RCAB = 3,
LP3_RLAT = 26,
LP3_RPPB = 6,
LP3_SR = 18,
LP3_MRRI = 32,
LP3_MRW = 29,
LP3_MRR = 31,
LP3_MRD = 30,
LP3_CCD = 12,
LP3_RAS = 9,
LP4_DQSCK = 61,
LP4_PPD = 48,
LP4_RRD = 43,
LP4_DQS2DQ = 63,
LP4_RANK = 82,
LP4_RFCAB = 52,
LP4_REFI = 78,
LP4_RTW = 84,
LP4_RTP = 50,
LP4_DQSS = 62,
LP4_RFCPB = 53,
LP4_RPAB = 46,
LP4_ZQLATCH = 80,
LP4_RCPB = 42,
LP4_XPD = 59,
LP4_CKELPD = 75,
LP4_RPST = 68,
LP4_RCD = 44,
LP4_XSR = 60,
LP4_CCDMW = 55,
LP4_ESCKE = 77,
LP4_ZQRESET = 81,
LP4_CKE = 57,
LP4_WTR = 56,
LP4_RTRRD = 85,
LP4_WPRE = 67,
LP4_BL = 83,
LP4_MRWCKEL = 73,
LP4_WL = 64,
LP4_FAW = 49,
LP4_CMDCKE = 76,
LP4_RL = 65,
LP4_WR = 51,
LP4_CKESR = 74,
LP4_ZQINIT = 79,
LP4_RCAB = 41,
LP4_RLAT = 66,
LP4_RPPB = 47,
LP4_WRWTR = 86,
LP4_SR = 58,
LP4_MRRI = 72,
LP4_MRW = 69,
LP4_ODTLON = 87,
LP4_ODTON = 88,
LP4_MRR = 71,
LP4_MRD = 70,
LP4_CCD = 54,
LP4_RAS = 45,
LP4_DQSCK_NS = 110,
LP4_PPD_NS = 97,
LP4_RRD_NS = 92,
LP4_DQS2DQ_NS = 112,
LP4_RANK_NS = 131,
LP4_RFCAB_NS = 101,
LP4_REFI_NS = 127,
LP4_RTW_NS = 133,
LP4_RTP_NS = 99,
LP4_DQSS_NS = 111,
LP4_RFCPB_NS = 102,
LP4_RPAB_NS = 95,
LP4_ZQLATCH_NS = 129,
LP4_RCPB_NS = 91,
LP4_XPD_NS = 108,
LP4_CKELPD_NS = 124,
LP4_RPST_NS = 117,
LP4_RCD_NS = 93,
LP4_XSR_NS = 109,
LP4_CCDMW_NS = 104,
LP4_ESCKE_NS = 126,
LP4_ZQRESET_NS = 130,
LP4_CKE_NS = 106,
LP4_WTR_NS = 105,
LP4_RTRRD_NS = 134,
LP4_WPRE_NS = 116,
LP4_BL_NS = 132,
LP4_MRWCKEL_NS = 122,
LP4_WL_NS = 113,
LP4_FAW_NS = 98,
LP4_CMDCKE_NS = 125,
LP4_RL_NS = 114,
LP4_WR_NS = 100,
LP4_CKESR_NS = 123,
LP4_ZQINIT_NS = 128,
LP4_RCAB_NS = 90,
LP4_RLAT_NS = 115,
LP4_RPPB_NS = 96,
LP4_WRWTR_NS = 135,
LP4_SR_NS = 107,
LP4_MRRI_NS = 121,
LP4_MRW_NS = 118,
LP4_ODTLON_NS = 136,
LP4_ODTON_NS = 137,
LP4_MRR_NS = 120,
LP4_MRD_NS = 119,
LP4_CCD_NS = 103,
LP4_RAS_NS = 94
} ddc_c2py_map;
#endif

View File

@ -0,0 +1,320 @@
/*-------------------------------------------------------
|
| init_helper.h
|
| helper functions definition.
|
|--------------------------------------------------------
|
| Copyright ( C ) 2013 Microsoft Corp.
| All Rights Reserved
| Confidential and Proprietary
|
|--------------------------------------------------------
*/
#ifndef __INIT_HELPER_H__
#define __INIT_HELPER_H__
#include <xtensa/config/core.h>
#include <stdint.h>
#include "init_helper_common.h"
#define PCIE_RESET_RELEASE_ADDR (0x04080090)
#define PCIE_RESET_RELEASE_DATA (0x00000003)
#define HUP_CN_MISC_INTC_P0_0_DATA (0x04000000)
#define HUP_CN_MISC_INTC_P0_0_CLEAR (0x04000008)
#define HUP_CN_MISC_INTC_P1_0_SET (0x04000084)
#define HUP_CN_MISC_INTC_P1_0_CLEAR (0x04000088)
#define HUP_CN_MISC_INTC_SCRATCH_REG0 (0x04005000)
#define HUP_CN_MISC_INTC_SCRATCH_REG1 (0x04005010)
#define HUP_CN_MISC_POR_BITS4CONTORLCORE (0x04080800)
#define DRAMC_READY (0x00000002)
//
// C-portable version of extern "C"
//
#ifdef __cplusplus
#define EXTERN_C extern "C"
#else
#define EXTERN_C extern
#endif
#ifndef MC_INIT_BFTEST
// Bifrost compatibility typedefs and memory access functions
typedef uint8_t UINT8, *PUINT8;
typedef uint32_t UINT32, *PUINT32;
static inline UINT32 hw_read32(volatile UINT32* addr)
{
return *addr;
}
static inline void hw_write32(volatile UINT32* addr, UINT32 data)
{
*addr = data;
}
#endif //MC_INIT_BFTEST
typedef volatile unsigned int r32;
typedef volatile unsigned long long r64;
typedef volatile unsigned short r16;
typedef volatile unsigned char r8;
typedef unsigned long long u64;
typedef unsigned int u32;
typedef unsigned short u16;
typedef unsigned char u8;
typedef int s32;
typedef short s16;
typedef char s8;
typedef enum{
PLAT_CHIP = 0,
PLAT_MVP = 1,
PLAT_PAL = 2,
PLAT_RPP = 3,
PLAT_ISS = 4,
PLAT_SIM = 5,
PLAT_VEL = 6,
PLAT_ZEBU = 7,
}PLATFORM_TYPE;
typedef enum{
OPT_MASK_NO_DRAMC_RESET = 0x00000001,
OPT_MASK_NO_DRAMC_INIT = 0x00000002,
OPT_MASK_DRAMC_INIT_FOR_BASRAM_MODE = 0x00000004,
OPT_MASK_SIM_SILICON_MODE = 0x00000008,
OPT_MASK_DDR_MODE = 0x00000030,
OPT_MASK_FREQUENCY = 0x00000F00,
OPT_MASK_UPCTL = 0x0000F000,
OPT_MASK_SCHEDULER = 0x000F0000,
OPT_MASK_PHY = 0x3FF00000,
OPT_MASK_PHY_MR3 = 0x00C00000,
OPT_MASK_PHY_ZQ0PR = 0x03000000,
OPT_MASK_PHY_ZQ1PR = 0x0C000000,
OPT_MASK_PHY_PIR = 0x10000000,
OPT_MASK_PHY_DTCR = 0x20000000,
}DRAMC_INIT_OPTION_MASK;
typedef enum{
OPT_MASK_LSB_NO_DRAMC_RESET = 0,
OPT_MASK_LSB_NO_DRAMC_INIT = 1,
OPT_MASK_LSB_DRAMC_INIT_FOR_BASRAM_MODE = 2,
OPT_MASK_LSB_SIM_SILICON_MODE = 3,
OPT_MASK_LSB_DDR_MODE = 4,
OPT_MASK_LSB_FREQUENCY = 8,
OPT_MASK_LSB_UPCTL = 12,
OPT_MASK_LSB_SCHEDULER = 16,
OPT_MASK_LSB_PHY = 20,
OPT_MASK_LSB_PHY_MR3 = 22,
OPT_MASK_LSB_PHY_ZQ0PR = 24,
OPT_MASK_LSB_PHY_ZQ1PR = 26,
OPT_MASK_LSB_PHY_PIR = 28,
OPT_MASK_LSB_PHY_DTCR = 29,
}DRAMC_INIT_OPTION_MASK_LSB;
typedef enum{
LPDDR_DEFAULT = 0,
LPDDR_2 = 2,
LPDDR_3 = 3,
}DRAMC_INIT_OPTION_DDR_MODE;
typedef enum{
CHANNEL_BIT9_DEFAULT = 1,
CHANNEL_BIT10 = 2,
CHANNEL_BIT11 = 4,
CHANNEL_BIT26 = 8,
}POR_INIT_DRAMC_INTERLEAVE_MODE;
typedef enum{
LPDDR_SIZE_DEFAULT = 0,
LPDDR_SIZE_4Gb = 4,
LPDDR_SIZE_8Gb = 8,
}DRAMC_INIT_OPTION_DDR_SIZE;
typedef enum{
LPDDR_RANKS_DEFAULT = 0,
LPDDR_RANKS_SINGLE = 1,
LPDDR_RANKS_DOUBLE = 2,
}DRAMC_INIT_OPTION_DDR_RANKS;
typedef enum{
FREQ_DEFAULT = 1600,
FREQ_400MHZ = 400,
FREQ_800MHZ = 800,
FREQ_1066MHZ = 1066,
FREQ_1333MHZ = 1333,
}DRAMC_INIT_OPTION_FREQUENCY;
typedef enum{
UPCTL_MASK_TREFI = 0x00000001,
}DRAMC_INIT_OPTION_UPCTL_MASK;
typedef enum{
DRAMC_INIT_BEGIN = 0x00050000,
DRAMC_INIT_POR = 0x00051000,
DRAMC_INIT_SCHEDULER = 0x00052000,
DRAMC_INIT_FREQUENCY = 0x00053000,
DRAMC_INIT_PHY_STARTED = 0x00054000,
DRAMC_INIT_PHY_BEFORE_TRAINING = 0x00054001,
DRAMC_INIT_PHY_AFTER_TRAINING = 0x00054002,
DRAMC_INIT_DFI = 0x00055000,
DRAMC_INIT_POWER_UP = 0x00056000,
DRAMC_INIT_TIMING_REGS = 0x00057000,
DRAMC_INIT_DFI_TIMING_REGS = 0x00058000,
DRAMC_INIT_MEMORY_STARTED = 0x00059000,
DRAMC_INIT_MEMORY_MRW_RESET = 0x00059001,
DRAMC_INIT_MEMORY_MRW_ZQ = 0x00059002,
DRAMC_INIT_MEMORY_MRW_MR2 = 0x00059003,
DRAMC_INIT_MEMORY_MRW_MR1 = 0x00059004,
DRAMC_INIT_MEMORY_MRW_MR3 = 0x00059005,
DRAMC_INIT_MEMORY_MRW_REF = 0x00059006,
DRAMC_INIT_DFICTRLUPD = 0x0005A000,
DRAMC_INIT_MOVE_STATE_CONFIG = 0x0005B000,
DRAMC_INIT_OVERRIDE_SEQ_STARTED = 0x0005C000,
DRAMC_INIT_OVERRIDE_SEQ_CONFIG = 0x0005C001,
DRAMC_INIT_OVERRIDE_SEQ_MCMD = 0x0005C002,
DRAMC_INIT_MOVE_STATE_ACCESS = 0x0005D000,
}DRAMC_INIT_STATUS;
#define reg_write32(addr, val) ((*(volatile unsigned int*)(addr)) = val)
#define reg_read32(addr, val) (val = (*(volatile unsigned int*)addr))
#define get_prid() xthal_get_prid()
#define get_ccount() xthal_get_ccount()
static inline void delay_cycles(unsigned int cycles)
{
unsigned int start = get_ccount();
while((unsigned int)(get_ccount() - start) < cycles);
}
#define delay_ns(ns) delay_cycles(ns / 2)
#define delay_us(us) delay_cycles(us * 1000 / 2)
#define delay_ms(ms) delay_cycles(ms * 1000000 / 2)
static inline PLATFORM_TYPE get_platform_type()
{
#ifdef SIM_PLAT
return PLAT_SIM;
#else
unsigned int reg_data = 0;
reg_data = *(volatile unsigned int*)(HUP_CN_MISC_POR_BITS4CONTORLCORE);
return (PLATFORM_TYPE)((reg_data & 0xFF000000) >> 24);
#endif
}
static inline unsigned int get_dramc_init_option(DRAMC_INIT_OPTION_MASK option_mask)
{
unsigned int reg_data = *(volatile unsigned int*)(HUP_CN_MISC_INTC_SCRATCH_REG1);
unsigned int ret = 0;
PLATFORM_TYPE plat = get_platform_type();
switch(option_mask)
{
case OPT_MASK_DRAMC_INIT_FOR_BASRAM_MODE:
ret = (reg_data & OPT_MASK_DRAMC_INIT_FOR_BASRAM_MODE) >> OPT_MASK_LSB_DRAMC_INIT_FOR_BASRAM_MODE;
break;
case OPT_MASK_NO_DRAMC_INIT:
ret = (reg_data & OPT_MASK_DRAMC_INIT_FOR_BASRAM_MODE) >> OPT_MASK_LSB_DRAMC_INIT_FOR_BASRAM_MODE;
break;
case OPT_MASK_NO_DRAMC_RESET:
ret = (reg_data & OPT_MASK_NO_DRAMC_RESET) >> OPT_MASK_LSB_NO_DRAMC_RESET;
break;
case OPT_MASK_SIM_SILICON_MODE:
ret = (reg_data & OPT_MASK_SIM_SILICON_MODE) >> OPT_MASK_LSB_SIM_SILICON_MODE;
break;
case OPT_MASK_DDR_MODE:
if((plat == PLAT_PAL) || (plat == PLAT_RPP))
{
ret = LPDDR_3;
}
else
{
ret = (reg_data & OPT_MASK_DDR_MODE) >> OPT_MASK_LSB_DDR_MODE;
}
break;
case OPT_MASK_FREQUENCY:
if((plat == PLAT_PAL) || (plat == PLAT_RPP))
{
ret = FREQ_800MHZ;
}
else
{
ret = (reg_data & OPT_MASK_FREQUENCY) >> OPT_MASK_LSB_FREQUENCY;
}
break;
case OPT_MASK_UPCTL:
ret = (reg_data & OPT_MASK_UPCTL) >> OPT_MASK_LSB_UPCTL;
break;
case OPT_MASK_SCHEDULER:
ret = (reg_data & OPT_MASK_SCHEDULER) >> OPT_MASK_LSB_SCHEDULER;
break;
case OPT_MASK_PHY:
ret = (reg_data & OPT_MASK_PHY) >> OPT_MASK_LSB_PHY;
break;
case OPT_MASK_PHY_MR3:
ret = (reg_data & OPT_MASK_PHY_MR3) >> OPT_MASK_LSB_PHY_MR3;
break;
case OPT_MASK_PHY_ZQ0PR:
ret = (reg_data & OPT_MASK_PHY_ZQ0PR) >> OPT_MASK_LSB_PHY_ZQ0PR;
break;
case OPT_MASK_PHY_ZQ1PR:
ret = (reg_data & OPT_MASK_PHY_ZQ1PR) >> OPT_MASK_LSB_PHY_ZQ1PR;
break;
case OPT_MASK_PHY_PIR:
ret = (reg_data & OPT_MASK_PHY_PIR) >> OPT_MASK_LSB_PHY_PIR;
break;
case OPT_MASK_PHY_DTCR:
ret = (reg_data & OPT_MASK_PHY_DTCR) >> OPT_MASK_LSB_PHY_DTCR;
break;
default:
ret = 0;
break;
}
return ret;
}
static inline void set_dramc_ready()
{
delay_us(10);
*(unsigned int*)(HUP_CN_MISC_INTC_P1_0_SET) = 0x2;
}
static inline void wait_for_dramc_ready()
{
unsigned int reg_data = 0;
do
{
reg_data = *(volatile unsigned int*)(HUP_CN_MISC_INTC_P1_0_CLEAR);
}while((reg_data & 0x2) == 0);
*(unsigned int*)(HUP_CN_MISC_INTC_P1_0_CLEAR) = 0x2;
}
static void inline post_status(unsigned int status )
{
*(unsigned int *)(HUP_CN_MISC_INTC_SCRATCH_REG0) = status;
}
#endif // __INIT_HELPER_H__

View File

@ -0,0 +1,177 @@
/*-------------------------------------------------------
|
| init_helper_common.h
|
| Definitions and structs that are used
| by both Bifrost and 2BL
|
|--------------------------------------------------------
|
| Copyright ( C ) 2013 Microsoft Corp.
| All Rights Reserved
| Confidential and Proprietary
|
|--------------------------------------------------------
*/
#ifndef __INIT_HELPER_COMMON_H__
#define __INIT_HELPER_COMMON_H__
#define INIT_OPTION_BASE_ADDR (0x00100200)
#define BOOT_DIAG_BASE_ADDR (0x00100400)
#define CACHED_BOOT_DIAG_BASE_ADDR (0x20100400)
typedef struct
{
unsigned long long start;
unsigned long long end;
}BOOT_DIAG_DRAMC_INIT_TIME;
typedef struct
{
unsigned int pir;
unsigned int dtcr;
unsigned int mr3;
unsigned int zq0pr;
unsigned int zq1pr;
unsigned int aclcdlr;
unsigned int dtdr[2];
unsigned int dtedr[2];
unsigned int dx0lcdlr[2];
unsigned int dx1lcdlr[2];
unsigned int dx2lcdlr[2];
unsigned int dx3lcdlr[2];
unsigned int dx0bdlr[6];
unsigned int dx1bdlr[6];
unsigned int dx2bdlr[6];
unsigned int dx3bdlr[6];
unsigned int dx4bdlr[6];
unsigned int dx5bdlr[6];
unsigned int dx6bdlr[6];
unsigned int dx7bdlr[6];
unsigned int dx8bdlr[6];
unsigned int dx9bdlr[6];
}BOOT_DIAG_DRAMC_TRAINING_STAT;
typedef struct
{
unsigned int aclcdlr;
unsigned int dx0lcdlr1;
unsigned int dx1lcdlr1;
unsigned int dx2lcdlr1;
unsigned int dx3lcdlr1;
}BOOT_DIAG_DRAMC_SHMOO_OVERRIDE;
typedef struct
{
BOOT_DIAG_DRAMC_INIT_TIME init_time;
BOOT_DIAG_DRAMC_TRAINING_STAT training_stat;
BOOT_DIAG_DRAMC_SHMOO_OVERRIDE shmoo_override;
}BOOT_DIAG_DRAMC;
typedef struct
{
BOOT_DIAG_DRAMC dramc;
}BOOT_DIAG;
typedef struct
{
unsigned int NO_DRAMC_RESET :1;
unsigned int NO_DRAMC_INIT :1;
unsigned int DRAMC_INIT_FOR_BASRAM_MODE :1;
unsigned int FULL_DRAMC_INIT :1;
unsigned int DDR_MODE :2;
unsigned int DDR_SIZE :4;
unsigned int DDR_RANKS :2;
unsigned int FREQUENCY_OPT :11;
unsigned int UPCTL_OPT :1;
unsigned int DDR_CONF :2;
unsigned int PHY_OPT :2;
unsigned int DRAMC_INTERLEAVE_MODE :4;
}DRAMC_INIT_OPT_STD;
typedef struct
{
unsigned int mr3 :2;
unsigned int zq0pr :2;
unsigned int zq1pr :2;
unsigned int rsvd :26;
}DRAMC_INIT_OPT_SHMOO_DS;
typedef struct
{
unsigned int dx0lcdlr1 :8;
unsigned int dx1lcdlr1 :8;
unsigned int dx2lcdlr1 :8;
unsigned int dx3lcdlr1 :8;
}DRAMC_INIT_OPT_SHMOO_WDQD;
typedef struct
{
unsigned int dx0lcdlr1 :8;
unsigned int dx1lcdlr1 :8;
unsigned int dx2lcdlr1 :8;
unsigned int dx3lcdlr1 :8;
}DRAMC_INIT_OPT_SHMOO_RDQSD;
typedef struct
{
unsigned int dx0lcdlr1 :8;
unsigned int dx1lcdlr1 :8;
unsigned int dx2lcdlr1 :8;
unsigned int dx3lcdlr1 :8;
}DRAMC_INIT_OPT_SHMOO_RDQSND;
typedef struct
{
unsigned int acd :8;
unsigned int rsvd :24;
}DRAMC_INIT_OPT_SHMOO_AC;
typedef struct
{
unsigned int ds :1;
unsigned int wdqd :1;
unsigned int rdqsd :1;
unsigned int rdqsnd :1;
unsigned int ac :1;
unsigned int rsvd :27;
}DRAMC_INIT_OPT_SHMOO_ENABLE_MASK;
typedef struct
{
DRAMC_INIT_OPT_SHMOO_ENABLE_MASK enable;
DRAMC_INIT_OPT_SHMOO_DS ds;
DRAMC_INIT_OPT_SHMOO_WDQD wdqd;
DRAMC_INIT_OPT_SHMOO_RDQSD rdqsd;
DRAMC_INIT_OPT_SHMOO_RDQSND rdqsnd;
DRAMC_INIT_OPT_SHMOO_AC ac;
}DRAMC_INIT_OPT_SHMOO;
typedef struct
{
DRAMC_INIT_OPT_STD std;
DRAMC_INIT_OPT_SHMOO shmoo;
}DRAMC_INIT_OPT;
typedef struct
{
unsigned int skip_dram_heap_init;
unsigned int suite_rand_seed;
unsigned int verbosity;
unsigned int runtime;
unsigned int jtm_interval;
unsigned int jtm_slope;
unsigned int jtm_offset;
unsigned int bock_test;
}BF_INIT_OPT;
typedef struct
{
DRAMC_INIT_OPT dramc_init_opt;
BF_INIT_OPT bf_init_opt;
}INIT_OPTION;
#endif // __INIT_HELPER_COMMON_H__

View File

@ -0,0 +1,222 @@
lp4_ddc_params lp4_400mhz_param_db = {
.dqsck={1,2,2,0,2,2},
.ppd={4,4,4,0,4,4},
.rrd={2,4,10,0,4,4},
.dqs2dq={1,1,1,0,1,1},
.rank={1,10,8,0,10,10},
.rfcab={110,112,118,0,112,112},
.refi={1561,1563,1569,0,1563,1562},
.rtw={21,23,29,0,23,23},
.rtp={8,8,8,0,8,8},
.dqss={0,2,8,0,2,1},
.rfcpb={54,56,62,0,56,56},
.rpab={7,9,15,0,9,9},
.zqlatch={10,12,18,0,12,12},
.rcpb={21,25,37,0,25,24},
.xpd={1,3,9,0,5,5},
.ckelpd={8,10,16,0,10,10},
.rpst={0,1,7,0,1,1},
.rcd={6,8,14,0,8,8},
.xsr={73,75,81,0,75,75},
.ccdmw={30,32,38,0,32,32},
.escke={2,2,2,0,2,3},
.zqreset={18,20,26,0,20,20},
.cke={1,3,9,0,3,4},
.wtr={6,8,14,0,8,8},
.rtrrd={35,37,43,0,37,37},
.wpre={2,2,2,0,2,1.8},
.bl={0,0,0,0,0,0},
.mrwckel={8,10,16,0,10,10},
.wl={6,6,6,0,6,6},
.faw={14,16,22,0,16,40},
.cmdcke={1,3,9,0,3,3},
.rl={12,12,12,0,12,12},
.wr={10,10,10,0,10,10},
.ckesr={8,10,16,0,10,10},
.zqinit={398,400,406,0,400,400},
.rcab={24,26,32,0,26,26},
.rlat={4,6,12,0,6,6},
.rppb={6,8,14,0,8,8},
.wrwtr={17,19,25,0,19,19},
.sr={4,6,12,0,6,6},
.mrri={8,10,16,0,10,0},
.mrw={10,10,14,0,10,10},
.odtlon={4,4,4,0,4,4},
.odton={2,2,2,0,2,2},
.mrr={2,8,10,0,8,8},
.mrd={8,10,16,0,10,10},
.ccd={8,8,16,0,8,8},
.ras={15,17,23,0,17,17}
};
lp4_ddc_params lp4_800mhz_param_db = {
.dqsck={1,3,3,0,3,3},
.ppd={4,4,4,0,4,4},
.rrd={4,8,20,0,8,8},
.dqs2dq={1,1,1,0,1,2},
.rank={1,10,15,0,10,10},
.rfcab={220,224,236,0,224,224},
.refi={3121,3125,3137,0,3125,3124},
.rtw={22,26,38,0,26,26},
.rtp={8,8,8,0,8,8},
.dqss={1,2,2,0,2,2},
.rfcpb={108,112,124,0,112,112},
.rpab={13,17,29,0,17,17},
.zqlatch={20,24,36,0,24,24},
.rcpb={40,97,72,0,97,97},
.xpd={6,6,12,0,6,6},
.ckelpd={6,10,22,0,10,10},
.rpst={1,1,13,0,1,1},
.rcd={11,15,27,0,15,15},
.xsr={226,230,242,0,230,230},
.ccdmw={28,32,44,0,32,32},
.escke={2,2,2,0,2,3},
.zqreset={36,40,52,0,40,40},
.cke={2,6,12,0,6,6},
.wtr={4,8,20,0,8,8},
.rtrrd={38,42,54,0,42,42},
.wpre={2,2,2,0,2,1.8},
.bl={0,0,0,0,0,0},
.mrwckel={8,12,24,0,12,12},
.wl={8,8,8,0,8,8},
.faw={28,32,44,0,32,40},
.cmdcke={0,3,7,0,3,3},
.rl={16,16,16,0,16,16},
.wr={16,16,16,0,16,16},
.ckesr={8,12,24,0,12,12},
.zqinit={796,800,812,0,800,800},
.rcab={43,51,75,0,51,51},
.rlat={2,6,18,0,6,6},
.rppb={11,15,27,0,15,15},
.wrwtr={18,22,34,0,22,22},
.sr={8,12,24,0,12,12},
.mrri={14,18,30,0,18,18},
.mrw={4,8,16,0,8,10},
.odtlon={0,0,0,0,0,0},
.odton={1,1,1,0,1,2},
.mrr={3,14,28,0,14,7},
.mrd={8,12,16,0,12,12},
.ccd={8,8,16,0,8,8},
.ras={30,34,46,0,34,34}
};
lp4_ddc_params lp4_1066mhz_param_db = {
.dqsck={2,4,4,0,4,4},
.ppd={4,4,4,0,4,4},
.rrd={6,11,27,0,11,11},
.dqs2dq={1,1,1,0,1,2},
.rank={2,10,20,0,10,10},
.rfcab={294,299,315,0,299,299},
.refi={4159,4165,4181,0,4165,4163},
.rtw={22,27,43,0,27,27},
.rtp={8,8,8,0,8,8},
.dqss={1,2,3,0,2,2},
.rfcpb={144,150,166,0,150,150},
.rpab={18,23,39,0,23,23},
.zqlatch={27,32,48,0,32,32},
.rcpb={54,97,96,0,97,97},
.xpd={8,8,16,0,8,8},
.ckelpd={5,10,26,0,10,10},
.rpst={1,1,17,0,1,1},
.rcd={14,20,36,0,20,20},
.xsr={302,307,323,0,307,307},
.ccdmw={27,32,48,0,32,32},
.escke={2,2,2,0,2,3},
.zqreset={48,54,70,0,54,54},
.cke={3,8,16,0,8,8},
.wtr={6,11,27,0,11,11},
.rtrrd={39,45,61,0,45,45},
.wpre={2,2,2,0,2,2},
.bl={0,0,0,0,0,0},
.mrwckel={10,15,31,0,15,15},
.wl={10,10,10,0,10,10},
.faw={38,43,59,0,43,43},
.cmdcke={0,3,10,0,3,3},
.rl={22,22,22,0,22,22},
.wr={20,20,20,0,20,20},
.ckesr={11,16,32,0,16,16},
.zqinit={1061,1067,1083,0,1067,1067},
.rcab={57,68,100,0,68,68},
.rlat={1,6,22,0,6,6},
.rppb={14,20,36,0,20,20},
.wrwtr={18,24,40,0,24,24},
.sr={11,16,32,0,16,16},
.mrri={17,23,39,0,23,23},
.mrw={6,11,21,0,11,11},
.odtlon={4,4,4,0,4,4},
.odton={1,1,1,0,1,2},
.mrr={4,14,28,0,14,9},
.mrd={10,15,21,0,15,15},
.ccd={8,8,16,0,8,8},
.ras={40,45,61,0,45,45}
};
lp4_ddc_params lp4_1333mhz_param_db = {
.dqsck={2,5,5,0,5,5},
.ppd={4,4,4,0,4,4},
.rrd={7,14,34,0,14,14},
.dqs2dq={2,2,2,0,2,1},
.rank={2,10,25,0,10,10},
.rfcab={367,374,394,0,374,374},
.refi={5202,5208,5228,0,5208,5206},
.rtw={22,29,49,0,29,29},
.rtp={10,10,10,0,10,10},
.dqss={0,2,22,0,2,2},
.rfcpb={180,187,207,0,187,187},
.rpab={22,28,48,0,28,28},
.zqlatch={34,40,60,0,40,40},
.rcpb={68,80,120,0,80,80},
.xpd={4,10,30,0,10,10},
.ckelpd={4,10,30,0,10,10},
.rpst={0,1,21,0,1,1},
.rcd={18,24,44,0,24,24},
.xsr={244,250,270,0,250,250},
.ccdmw={26,32,52,0,32,32},
.escke={2,2,2,0,2,3},
.zqreset={60,67,87,0,67,67},
.cke={4,10,30,0,10,10},
.wtr={7,14,34,0,14,14},
.rtrrd={41,48,68,0,48,48},
.wpre={2,2,2,0,2,3},
.bl={0,0,0,0,0,0},
.mrwckel={12,19,39,0,19,19},
.wl={12,12,12,0,12,12},
.faw={47,54,74,0,54,54},
.cmdcke={0,3,23,0,3,3},
.rl={28,28,28,0,28,28},
.wr={24,24,24,0,24,24},
.ckesr={14,20,40,0,20,20},
.zqinit={1327,1334,1354,0,1334,1334},
.rcab={78,84,104,0,84,84},
.rlat={0,6,26,0,6,6},
.rppb={18,24,44,0,24,24},
.wrwtr={19,26,46,0,26,26},
.sr={14,20,40,0,20,20},
.mrri={26,32,52,0,32,0},
.mrw={14,14,40,0,14,14},
.odtlon={4,4,4,0,4,4},
.odton={2,2,2,0,2,2},
.mrr={4,12,30,0,12,12},
.mrd={12,19,39,0,19,19},
.ccd={8,8,16,0,8,8},
.ras={50,56,76,0,56,56}
};
void update_ddc_params_frequency(int frequency){
if(frequency == 400){
lp4_param_db = lp4_400mhz_param_db;
}
if(frequency == 800){
lp4_param_db = lp4_800mhz_param_db;
}
if(frequency == 1066){
lp4_param_db = lp4_1066mhz_param_db;
}
if(frequency == 1333){
lp4_param_db = lp4_1333mhz_param_db;
}
}

View File

@ -0,0 +1,19 @@
#ifndef __KAHALU_GLOBAL_DEFINES_HEADER_INC
#define __KAHALU_GLOBAL_DEFINES_HEADER_INC
#ifdef MCU_IP_VERIF
#include <stdio.h>
#include <stdlib.h>
#include <svdpi.h>
#include <stddef.h>
#include <unistd.h>
#endif //MCU_IP_VERIF
//Global variables
typedef enum {
REGACC_RD = 0,
REGACC_WR = 1
} REGACC_TYPE;
#endif //__KAHALU_GLOBAL_DEFINES_HEADER_INC

View File

@ -0,0 +1,94 @@
#ifndef __KAHALU_SIM_INIT_HEADER_INC
#define __KAHALU_SIM_INIT_HEADER_INC
#include <kahalu_utils.h>
//DPI imports
//extern void bfm_read32(uint32_t* addr, uint32_t* data);
//extern void bfm_write32(uint32_t* addr, uint32_t data);
//extern void simulation_tb_wait(uint32_t cycles, char * clock);
//extern void simulation_wait_ns(uint32_t ns);
// MTC extern functions from mtc_train.c
extern void program_non_default_regs_for_mtc_write_training_init();
// DPI export
//extern int simulation_init_seq (const int dram_mode, const int active_ranks, const int dram_cfg, const int autoref_int, const int training_test_num, const int dfi_training_en, const int ddc_param_mode, const int wr_dbi, const int rd_dbi, const int use_real_init_seq, const int dram_frequency, int delay_model, const int enable_pmb, const int phy_init_train, const int dump_phy_regs);
extern int release_global_hold_seq(const int mc, const int cycles, char* clk);
//DDC timing routines
void update_ddc_fields(const int mc, const int dram_frequency);
void update_all_ddc_param(const int mc, int val, const int dram_frequency);
#ifdef MCU_IP_VERIF
void update_ddc_params_from_yml();
#endif //MCU_IP_VERIF
void update_ddc_params_frequency(int frequency);
//Init routines
void write_mr_reg(const int mc, uint32_t mr_addr, uint32_t rank, uint32_t data);
void pmb_enable_function(const int mc);
void configure_dram(const int mc, int config_num);
void mc_setup(const int mc, const int mode, const int active_ranks, const int dram_cfg);
void init_dram(const int mc, const int mode, const int active_ranks, const int dram_cfg);
void wait_for_dfi_init(const int mc);
uint32_t read_mr_reg(const int mc, uint32_t mr_addr, uint32_t rank);
void send_mr_commands(const int mc, const int dram_mode, const int active_ranks, const int wr_dbi_dis, const int rd_dbi_dis, const int ddc_mode, const int frequency);
int mcu_reset_init_seq
(
const int mc,
const int dram_mode,
const int active_ranks,
const int dram_cfg,
const int autoref_int,
const int ddc_param_mode,
const int wr_dbi_dis,
const int rd_dbi_dis,
const int dram_frequency,
const int enable_pmb,
const int skip_part_phy_init
);
void perf_regs_init(const int mc);
#ifdef MCU_IP_VERIF
extern int simulation_init_seq
#else
EXTERN_C int simulation_init_seq
#endif //MCU_IP_VERIF
(
const int mc,
const int dram_mode,
const int active_ranks,
const int dram_cfg,
const int autoref_int,
const int training_test_num,
const int phy_training_mode,
const int ddc_param_mode,
const int wr_dbi_dis,
const int rd_dbi_dis,
const int use_real_init_seq,
const int dram_frequency,
const int delay_model,
const int enable_pmb,
const int phy_init_train,
const int dump_phy_regs,
const int seed,
const int skip_mcu_init,
const int pmg_setup_en,
const int training_en,
const int skip_part_phy_init,
const int mov_window_size,
const int mtc_adjust_step,
const int mtc_use_read_methodB,
const int set_bdlr_val
);
void do_yml_reg_writes(char *fname);
void set_autorefresh(const int mc, int interval, int dram_mode, const int dram_frequency);
void program_ddc_phy_params(const int mc, const int rd_dbi_dis, const int dram_frequency);
#endif

View File

@ -0,0 +1,168 @@
#ifndef __KAHALU_UTILS_HEADER_INC
#define __KAHALU_UTILS_HEADER_INC
#include <stdio.h>
#include <stdlib.h>
#include <stddef.h>
#include <unistd.h>
#include <errno.h>
#ifdef MCU_IP_VERIF
#include <svdpi.h>
#include <bist.h>
#include "conf_defs.h"
#define hw_status(...) printf (__VA_ARGS__)
#else
#ifdef REAL_MCPHY
#define CONF_HAS___PHY_RTL
#endif //REAL_MCPHY
#include "init_helper.h"
#ifndef MC_INIT_BFTEST
#define hw_status(...)
#else
#include "bifrost.h"
#endif
#include "hup_chip.h"
#endif //MCU_IP_VERIF
#include <ddc_params.h>
#include <mcu_regs.h>
#include <kahalu_global_defines.h>
//Reg Model
static PTR_Mcu_regs chip_regs = (PTR_Mcu_regs)(MCU_REGS_MCP_REGS_ADDRESS);
extern uint32_t* g_last_regacc_addr;
extern REGACC_TYPE g_last_regacc_type;
extern int dram_type;
extern lp4_ddc_params lp4_param_db;
extern lp3_ddc_params lp3_param_db;
extern void simulation_tb_wait(uint32_t cycles, const char * clock);
#ifdef MCU_IP_VERIF
//DPI imports
extern void bfm_read32(uint32_t* addr, uint32_t* data);
extern void bfm_write32(uint32_t* addr, uint32_t data);
extern void update_ddc_params_from_yml();
#define hw_sleep(ns) simulation_wait_ns(ns);
extern void simulation_wait_ns(uint32_t t_ns);
extern void print_sim_time();
extern char* get_sim_time();
extern int sv_get_wire_delay_reg_wr(uint32_t rank, uint32_t slice);
extern int sv_get_wire_delay_reg_rd(uint32_t rank, uint32_t slice);
#else
#ifndef MC_INIT_BFTEST
#define hw_sleep(ns) delay_ns(ns)
#endif
#endif //MCU_IP_VERIF
// DPI export
//extern int simulation_init_seq (const int dram_mode, const int active_ranks, const int dram_cfg, const int autoref_int, const int training_test_num, const int dfi_training_en, const int ddc_param_mode, const int wr_dbi, const int rd_dbi, const int use_real_init_seq, const int dram_frequency, int delay_model, const int enable_pmb, const int phy_init_train, const int dump_phy_regs);
//extern void release_global_hold_seq(const int cycles, char* clk);
static inline void hw_phy_regacc_post_wait(uint32_t is_read)
{
//The SNPS LPDDR4 PHY requires spacing between register accesses.
//Assuming if there was an access to a non-LPDDR4-PHY register
//that would take more time than the required spacing,
//this delay is added only when there are consecutive accesses
//to LPDDR4-PHY registers, from the single thread that manages
//the LPDDR4-PHY; it is also assumed that only one thread will
//be managing the LPDDR4-PHY at the SoC level.
//TODO: FIXME: The tb_wait routines should be replaced with appropriate
//wait routines for SoC level
if ( is_read )
{
//Command Spacing from Read to a following Read/Write: 2 pclk cycles.
simulation_tb_wait(2, "apb");
}
else
{
//Command spacing from Write to a following Write/Read:
// 1 pclk + (`DWC_AFIFO_SYNC_STAGES+ 4) ctl_clk cycles
// DWC_AFIFO_SYNC_STAGES = 4; to be safe making it 10 ctl_clk cycles
simulation_tb_wait(10, "mem");
simulation_tb_wait(1, "apb");
}
}
static inline uint32_t hw_phy_read32(uint32_t* addr)
{
uint32_t ret;
//hw_phy_regacc_wait(addr);
#ifdef MCU_IP_VERIF
bfm_read32(addr, &ret);
#else
ret = hw_read32((uint32_t*)addr);
#endif //MCU_IP_VERIF
hw_phy_regacc_post_wait(1);
return ret;}
static inline void hw_phy_write32(uint32_t* addr, uint32_t data)
{
//hw_phy_regacc_wait(addr);
#ifdef MCU_IP_VERIF
bfm_write32(addr, data);
#else
hw_write32((uint32_t*)addr, data);
#endif //MCU_IP_VERIF
hw_phy_regacc_post_wait(0);
}
#ifdef MCU_IP_VERIF
static inline uint32_t hw_read32(uint32_t* addr)
{
uint32_t ret;
//hw_phy_regacc_wait(addr);
bfm_read32(addr, &ret);
return ret;}
static inline void hw_write32(uint32_t* addr, uint32_t data)
{
//hw_phy_regacc_wait(addr);
bfm_write32(addr, data);
}
#endif //MCU_IP_VERIF
static inline void print_with_time(const char *line)
{
#ifdef MCU_IP_VERIF
print_sim_time();
#endif //MCU_IP_VERIF
hw_status("%s", line);
}
static inline uint32_t get_mcu_baseaddr(const int mc)
{
uint32_t base_addr = 0;
#ifndef MCU_IP_VERIF
base_addr = HUP_CHIP_MCU0_ADDRESS + (HUP_CHIP_MCU1_ADDRESS-HUP_CHIP_MCU0_ADDRESS)*mc;
#endif //MCU_IP_VERIF
return base_addr;
}
//Helper functions
//extern int dram_type;
//extern lp4_ddc_params lp4_param_db;
//extern lp3_ddc_params lp3_param_db;
uint32_t getRandInterval(uint32_t begin, uint32_t end);
#ifdef MCU_IP_VERIF
void get_current_dir();
void print_addr(volatile uint32_t* address);
void do_yml_reg_writes(char *fname);
#endif //MCU_IP_VERIF
void set_reg(const int mc, uintptr_t addr, uint32_t field_mask, uint32_t data);
void set_reg_val(const int mc, uintptr_t addr, uint32_t field_mask, uint32_t data);
#endif

View File

@ -0,0 +1,97 @@
#ifndef MTC_INIT_H
#define MTC_INIT_H
#include "conf_defs.h"
#include <math.h>
#include <assert.h>
#define WDLY_MASK 0x1ff
#define NUM_SLICES 2
#define MAX_SLICES 4
#define NUM_RANKS 2
#define NUM_READ_DELAYS 3
#define MOV_WINDOW_MAX 63
#define DQSL 3
#define TPRD 0x3f
#define DQS_CYCLE_TIME 625
#define DQS_HALF_CYCLE_TIME DQS_CYCLE_TIME/2
// ----- Top level init functions -----
void program_non_default_regs_for_mtc_read_and_write_training_init();
void program_non_default_regs_for_mtc_write_training_init();
void program_mtc_training_window_size(int val);
void program_mtc_training_adj_step(int val);
void read_methodA_init();
int get_WDQPRD(int dqs_no,int rankid);
// ----- Register Access functions ------
void set_rank_id(int rankid);
void set_dxngcr3_read_training_disable_drift_compensation(int dqs_no,int rankid);
int get_phy_delay_write_reg(int dqs_no,int rankid);
int get_mtc_delay_write_reg(int dqs_no,int rankid);
int get_pub_gcr3_gateacctlclk();
void set_pub_gcr3_gateacctlclk(int value);
void set_dxngcr3_wdlvt(int dqs_no,int rankid,int value);
void set_pubgcr3_pure(int value);
void set_crb_active_ranks(int value);
int get_phy_dxngtr0_dgsl(int dqs_no,int rankid);
int get_phy_dxngtr0(int dqs_no,int rankid);
int get_phy_dxngtr0_wdqsl(int dqs_no,int rankid);
int get_dxnmdlr0_iprd(int dqs_no,int rankid);
void set_mtc_dxngsr0 (int dqs_no,int rankid,int value);
int get_mtc_dxngsr6(int dqs_no,int rankid);
void set_mtc_dxngtr0 (int dqs_no,int rankid,int value);
int get_mtc_dxngtr0_dgsl(int dqs_no,int rankid);
int get_phy_dxngsr0_gdqsprd(int dqs_no,int rankid);
int get_phy_dxngsr0(int dqs_no,int rankid);
// Helper functions
unsigned int rand_interval(unsigned int min, unsigned int max);
// --- External Global variables ----
int global_delay_model;
int global_dram_mode, global_dram_frequency;
int global_wdqdprd[NUM_SLICES];
// Positive number for global_wire_delay_reg_wr means Write DQ is
// delayed; thus the movement will be to the left;
// Negative number for global_wire_delay_reg_wr means Write DQS is
// delayed; thus the movement will be to the right.
int global_wire_delay_reg_wr[NUM_RANKS][NUM_SLICES];
// Positive number for global_wire_delay_reg_rd means Read DQS is
// delayed; thus the movement will be to the left;
// Negative number for global_wire_delay_reg_rd means Read DQ is
// delayed; thus the movement will be to the right.
int global_wire_delay_reg_rd[NUM_RANKS][NUM_SLICES];
int global_drift_enabled_for_phy_based_training;
int global_wdqsl[NUM_RANKS][NUM_SLICES];
int global_golden_wdqsl[NUM_RANKS][NUM_SLICES];
int global_golden_wr_delay_val[NUM_RANKS][NUM_SLICES];
int global_golden_rd_lr2_val[NUM_RANKS][NUM_SLICES];
int global_golden_rd_lr3_val[NUM_RANKS][NUM_SLICES]; //Same is used for lr4 also
int program_single_rank;
int dxngtr0_dgsl[NUM_RANKS][NUM_SLICES];
int dxngsr0_gdqsprd[NUM_RANKS][NUM_SLICES];
int adjust_gate_delay[NUM_RANKS][NUM_SLICES];
int global_final_adj_values[NUM_RANKS][NUM_SLICES];
int global_final_gate_adj_values[NUM_RANKS][NUM_SLICES];
int random_write_data ;
int dont_program_training_regs_for_write ;
int dont_program_training_regs_for_read ;
int trn_frq;
// read configuration parameters
int gate_adj_disable;
int phy_based_delay_adj;
int wdlvt_enable;
// Global default MTC test control variables
int global_sample_cnt;
int global_zqcal_break;
#endif

View File

@ -0,0 +1,40 @@
#ifndef __MTC_TRAIN_HEADER_INC_
#define __MTC_TRAIN_HEADER_INC_
#include "conf_defs.h"
#include <kahalu_sim_init.h>
// prototypes
void mtc_phy_update_training_test();
void mtc_training_tests(int mtc_test_num,int dram_mode,int autoref_int,int delay_model,int dram_frequency, int active_ranks);
void create_write_training_delay_testcase(int mov_window);
extern void set_autorefresh(const int mc, int interval,int dram_mode, const int dram_frequency);
#define PHY_REGS_PIR_OFFSET 0x1
#define PHY_REGS_PGSR0_OFFSET 0x12
#define STATUS_IDONE_COMPARE 0x1
#define POLLING_LIMIT 100
#ifdef CONF_HAS___PHY_RTL
#define GOLDEN_RD_DELAY_VAL_1600 0x1f
#define GOLDEN_WR_DELAY_VAL_1600 0x5
#define GOLDEN_WDQSL_VAL_1600 0x3
#define GOLDEN_RDGSL_VAL_1600 0x1
#define GOLDEN_RD_DELAY_VAL_1333 0x26
#define GOLDEN_WR_DELAY_VAL_1333 0x3d
#define GOLDEN_RD_DELAY_VAL_400 0x7d
#define GOLDEN_WR_DELAY_VAL_400 0xdf
#define MVM_DELTA 20
#define MVM_DELTA_RD 20
#else
#define GOLDEN_RD_DELAY_VAL_1600 0
#define GOLDEN_WR_DELAY_VAL_1600 1
#define GOLDEN_RD_DELAY_VAL_1333 0
#define GOLDEN_WR_DELAY_VAL_1333 1
#define GOLDEN_RD_DELAY_VAL_400 0
#define GOLDEN_WR_DELAY_VAL_400 1
#define MVM_DELTA 5
#define MVM_DELTA_RD 5
#endif
#endif

View File

@ -0,0 +1,17 @@
//
// Autogenerated by h2inc flow. This file MUST be kept in sync with hup_chip.h!
//
.equiv HUP_CHIP_GPIO15_GPIOAFSEL_ADDRESS, 0x444b420
.equiv HUP_GPIO_GPIOAFSEL_AFSEL_FIELD_MASK, 0xff
.equiv HUP_CHIP_POR_GPIO_IE_GP127_GP96_ADDRESS, 0x40805ac
.equiv HUP_CHIP_POR_GPIO_REG_OVERRIDE_ADDRESS, 0x40805e8
.equiv HUP_POR_GPIO_REG_OVERRIDE_ENABLE_FIELD_MASK, 0x1
.equiv HUP_CHIP_POR_SPI_DIV16_ADDRESS, 0x408050c
.equiv HUP_POR_SPI_DIV16_SEL_FIELD_MASK, 0x1f
.equiv HUP_CHIP_SPI5_CTRLR0_ADDRESS, 0x4465000
.equiv HUP_CHIP_SPI5_CTRLR1_ADDRESS, 0x4465004
.equiv HUP_CHIP_SPI5_SSIENR_ADDRESS, 0x4465008
.equiv HUP_CHIP_SPI5_SER_ADDRESS, 0x4465010
.equiv HUP_CHIP_SPI5_BAUDR_ADDRESS, 0x4465014
.equiv HUP_CHIP_SPI5_DR0_ADDRESS, 0x4465060
.equiv HUP_CHIP_SPI5_SR_ADDRESS, 0x4465028

View File

@ -0,0 +1,20 @@
#include "kahalu_sim_init.h"
#include "snps_lp4_phy_lib.h"
#ifdef CONF_HAS___PHY_RTL
void phy_init_synps_lp4
(
const int mc,
const int active_ranks,
const int ddc_mode,
const int wr_dbi_dis,
const int rd_dbi_dis,
const int dram_frequency,
const int phy_init_train,
const int dump_phy_regs,
const int set_bdlr_val
);
#endif

View File

@ -0,0 +1,26 @@
#include "kahalu_sim_init.h"
#include "snps_lp4_phy_lib.h"
#ifdef CONF_HAS___PHY_RTL
void phy_regs_initial_settings(const int mc, const int dram_frequency, const int ddc_mode, const int active_ranks, const int wr_dbi_dis, const int rd_dbi_dis);
void perform_cbt();
void wait_for_pgsr0_idone();
void set_pub_regs(const int mc, const int dram_mode, const int dram_frequency, const int ddc_mode, const int active_ranks, const int wr_dbi_dis, const int rd_dbi_dis);
int phy_real_init_synps_lp4
(
const int mc,
const int ddc_mode,
const int active_ranks,
const int dram_frequency,
const int wr_dbi_dis,
const int rd_dbi_dis,
const int skip_part_phy_init
);
#endif

View File

@ -0,0 +1,9 @@
#include "kahalu_sim_init.h"
#ifdef CONF_HAS___PHY_RTL
void phy_init_synps_lp4_reg_write(const int mc, const int dram_frequency);
#endif

View File

@ -0,0 +1,276 @@
/*-------------------------------------------------------
|
| spi.inc
|
| routines for SPI operation.
|
|--------------------------------------------------------
|
| Copyright ( C ) 2015 Microsoft Corp.
| All Rights Reserved
| Confidential and Proprietary
|
|--------------------------------------------------------
*/
#include "sboot.inc"
// From hup_chip.h
// gpio_regs[15].gpioafsel.afsel
// Set GPIO 123-127 to hardware control
.equiv HUP_GPIO_15_GPIOAFSEL_DATA, 0xF8
// From hup_chip.h
// por_regs.GPIO_IE_GP127_GP96 GPIO_IE_GP127_GP96
// Enable receiver on GPIO125
.equiv HUP_POR_GPIO_IE3_DATA, 0x20000000
.equiv HUP_POR_GPIO_IE3_MASK, 0x20000000
// From hup_chip.h
// por_regs.GPIO_reg_override
// Enable register overrides
.equiv HUP_POR_GPIO_REG_OVERRIDE_DATA, 0x1
// From hup_chip.h
// por_regs.spi_div16
// set divider to 1/N
.equiv HUP_POR_SPI_DIV_DATA, 0xF
// SCPH: 0x1 (clock toggles at start of first data bit)
// SCPOL: 0x1 (inactive state is high)
// TMOD: 0x3 (EEPROM read)
// DFS_32: 0x7 (8 bit frame size)
.equiv SPI_REGS_CTRLR0_DATA, 0x000703C0
// Always use slave line 0
.equiv SPI_REGS_SER_DATA_LO, 0x00000000
.equiv SPI_REGS_SER_DATA_HI, 0x00000001
.equiv SPI_REGS_BAUD_DATA, 0x00000002
// From Synopsys databook: https://iebhwdev/sites/Silicon/Makena/Shared%20Documents/Vendor/Synopsys/Databooks/LowSpeedIO/dw_apb_ssi_db_4p00a.pdf
.equiv SPI_STS_BSY_BIT, 0
// Transmit FIFO Not Full
.equiv SPI_STS_TNF_BIT, 1
// Transmit FIFO Empty
.equiv SPI_STS_TFE_BIT, 2
// Receive FIFO Not Empty
.equiv SPI_STS_RNE_BIT, 3
// Receive FIFO Full
.equiv SPI_STS_RFF_BIT, 4
.equiv SPI_FLASH_READ_CMD, 0x03
.equiv SPI_FLASH_READ_HDR_CMD, (0x000000 << 8) | SPI_FLASH_READ_CMD
.equiv SPI_FLASH_HEADER_SIZE, 12
.equiv SPI_FRAME_MASK, 0x000000FF
// Shift loop counter. Starts at 32 and decrements
// because Tensilica left shifting instruction shifts
// by 32 - <reg value>
.equiv READ_SHIFT_LOOP_START, 32
.equiv READ_SHIFT_LOOP_END, 0
.equiv READ_SHIFT_LOOP_INCR, -8
// Stall until the SPI is idle.
// Arguments:
// pSr: Value will be clobbered. Used to point to the status register
// pDr: Value will be clobbered. Used to point to the data register
// SrData: Value will be clobbered. Used to store value read from status register
// DrData: Value will be clobbered. Used to store value read from data register
.macro SPI_IDLE pSr, pDr, SrData, DrData
LOCAL .WaitWhileBusy
LOCAL .RxFifoEmpty
movi \pSr, HUP_CHIP_SPI5_SR_ADDRESS
movi \pDr, HUP_CHIP_SPI5_DR0_ADDRESS
.WaitWhileBusy:
// Read status register
l32i \SrData, \pSr, 0
// Branch down if Receive FIFO Not Empty flag not set
bbci \SrData, SPI_STS_RNE_BIT, .RxFifoEmpty
// Otherwise read from the data register
l32i \DrData, \pDr, 0
.RxFifoEmpty:
// Branch back if Transmit FIFO Empty flag not set
bbci \SrData, SPI_STS_TFE_BIT, .WaitWhileBusy
// Branch back if Busy flag is set
bbsi \SrData, SPI_STS_BSY_BIT, .WaitWhileBusy
.endm
// Read/modify/write a register
// Arguments:
// Temp: Value will be clobbered. Used to store temporary values
// ReadValue: Value will be clobbered. Used to store the value read from the register
// AddrReg: Value will be clobbered. Used to store the address value
// MaskReg: Value will be clobbered. Used to store the mask value
// Addr: address of the register
// Data: data to write
// Mask: data to write
.macro REG_RMW_BITS Temp, ReadValue, AddrReg, MaskReg, Addr, Data, Mask
movi \MaskReg, \Mask
movi \AddrReg, \Addr
// Read register value
l32i \ReadValue, \AddrReg, 0
// Invert mask and use it to zero
// out the relevant bits in the reg value
movi \Temp, -1
xor \Temp, \Temp, \MaskReg
and \ReadValue, \Temp, \ReadValue
// Mask the data and OR it with the
// remaining reg value
movi \Temp, \Data
and \Temp, \Temp, \MaskReg
or \ReadValue, \Temp, \ReadValue
// Write the register
s32i \ReadValue, \AddrReg, 0
.endm
// Read 32 bits from the SPI data register
// Arguments:
// Ret: The register in which to store the return value
// pSr: Value will be clobbered. Used to point to the status register
// pDr: Value will be clobbered. Used to point to the data register
// RegData: Value will be clobbered. Used to store value read from registers
// Ctr: Value will be clobbered. Used as a counter for the read loop
.macro SPI_RECV32 Ret, pSr, pDr, RegData, Ctr
LOCAL .CheckRxAvail
movi \pSr, HUP_CHIP_SPI5_SR_ADDRESS
movi \pDr, HUP_CHIP_SPI5_DR0_ADDRESS
// Zero out the return register
movi \Ret, 0
// Shift loop counter.
movi \Ctr, READ_SHIFT_LOOP_START
.CheckRxAvail:
// Read status register
l32i \RegData, \pSr, 0
// Branch back if Receive FIFO Not Empty flag not set (meaning Receive FIFO is empty)
bbci \RegData, SPI_STS_RNE_BIT, .CheckRxAvail
// Read from Receive FIFO
l32i \RegData, \pDr, 0
// Shift left as necessary and OR
// into the return register
wsr.sar \Ctr
sll \RegData, \RegData
or \Ret, \RegData, \Ret
addi \Ctr, \Ctr, READ_SHIFT_LOOP_INCR
// If not done, branch back and read another byte
bnei \Ctr, READ_SHIFT_LOOP_END, .CheckRxAvail
.endm
// Read 8 bits from the SPI data register
// Arguments:
// Ret: The register in which to store the return value
// pSr: Value will be clobbered. Used to point to the status register
// pDr: Value will be clobbered. Used to point to the data register
// RegData: Value will be clobbered. Used to store value read from registers
// Ctr: Unused. Added to maintain same footprint as SPI_RECV32
.macro SPI_RECV8 Ret, pSr, pDr, RegData, Ctr
LOCAL .CheckRxAvail
movi \pSr, HUP_CHIP_SPI5_SR_ADDRESS
movi \pDr, HUP_CHIP_SPI5_DR0_ADDRESS
.CheckRxAvail:
// Read status register
l32i \RegData, \pSr, 0
// Branch back if Receive FIFO Not Empty flag not set (meaning Receive FIFO is empty)
bbci \RegData, SPI_STS_RNE_BIT, .CheckRxAvail
// Read from Receive FIFO
l32i \Ret, \pDr, 0
.endm
// Initiate a Flash transfer by writing a
// command over SPI.
// Arguments:
// Cmd: The register containing the command to write
// Cnt: The number of bytes to be transferred
// RegAddr: Value will be clobbered. Used to store addresses of various registers
// RegData: Value will be clobbered. Used to store data which is written to or read from registers
// TempCmd: Value will be clobbered. Stores intermediate values of the command as it is serialized
// Ctr: Value will be clobbered. Used as a counter for the write loop
.macro SPI_TRANSFER Cmd, Cnt, RegAddr, RegData, TempCmd, Ctr
LOCAL .WaitTxAvail
LOCAL .EndTx
// Barrier before disable
memw
// Disable while configuring CTRLR1
movi \RegAddr, HUP_CHIP_SPI5_SSIENR_ADDRESS
//movi \RegAddr, SPI_REGS_SSIENR_ADDR
movi \RegData, 0
s32i \RegData, \RegAddr, 0
// Barrier after disable
memw
// Set up CTRLR1 with the number of bytes to be written
movi \RegAddr, HUP_CHIP_SPI5_CTRLR1_ADDRESS
addi \RegData, \Cnt, -1
s32i \RegData, \RegAddr, 0
// Disable slave select until
// we've finished writing the command
// into the TXFIFO
movi \RegAddr, HUP_CHIP_SPI5_SER_ADDRESS
movi \RegData, SPI_REGS_SER_DATA_LO
s32i \RegData, \RegAddr, 0
// Barrier before enable
memw
// Re-enable
movi \RegAddr, HUP_CHIP_SPI5_SSIENR_ADDRESS
movi \RegData, 1
s32i \RegData, \RegAddr, 0
// Barrier after enable
memw
// Store off the command value to a temp register
mov \TempCmd, \Cmd
movi \Ctr, 0
.WaitTxAvail:
// Read status register into RegData
//movi \RegAddr, SPI_REGS_SSPSR_ADDR
movi \RegAddr, HUP_CHIP_SPI5_SR_ADDRESS
l32i \RegData, \RegAddr, 0
// If Transmit FIFO Not Full flag is not set (meaning FIFO is full), branch back
bbci \RegData, SPI_STS_TNF_BIT, .WaitTxAvail
// Mask data for first byte
movi \RegData, SPI_FRAME_MASK
and \RegData, \RegData, \TempCmd
// Write to data register
//movi \RegAddr, SPI_REGS_SSPDR_ADDR
movi \RegAddr, HUP_CHIP_SPI5_DR0_ADDRESS
s32i \RegData, \RegAddr, 0
// Barrier to ensure bytes arrive
// at the SPI in order
memw
// Shift data for next byte
srli \TempCmd, \TempCmd, 8
// As long as there is data left to send, branch back and transmit next byte
addi \Ctr, \Ctr, 8
blti \Ctr, 32, .WaitTxAvail
// Barrier before reenabling slave select
memw
// Reenable slave select to actually begin the transfer
movi \RegAddr, HUP_CHIP_SPI5_SER_ADDRESS
movi \RegData, SPI_REGS_SER_DATA_HI
s32i \RegData, \RegAddr, 0
// Barrier after reenabling slave select
memw
.endm

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,22 @@
#ifndef __KAHALU_GLOBALS_HEADER_INC
#define __KAHALU_GLOBALS_HEADER_INC
#include <stdio.h>
#include <stdlib.h>
#include <stddef.h>
#include <unistd.h>
#ifdef MCU_IP_VERIF
#include <svdpi.h>
#endif //MCU_IP_VERIF
#include <kahalu_global_defines.h>
//Global variables
static REGACC_TYPE g_last_regacc_type;
static uint32_t* g_last_regacc_addr;
#endif //__KAHALU_GLOBALS_HEADER_INC

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,169 @@
#include <kahalu_utils.h>
void simulation_tb_wait(uint32_t cycles, const char * clock) {
hw_sleep(cycles * 10); //Slowest clock for now.
}
uint32_t getRandInterval(uint32_t begin, uint32_t end) {
uint32_t range = 1 + end - begin;
uint32_t limit = RAND_MAX - (RAND_MAX % range);
uint32_t randVal;
do {
randVal = rand();
} while (randVal >= limit);
return (randVal % range) + begin;
}
void print_addr(volatile uint32_t* address){
int print_val = 0;
print_val = (intptr_t)address;
hw_status("printval from function : %08x\n", print_val);
}
void set_reg(const int mc, uintptr_t addr, uint32_t field_mask, uint32_t data){
uint32_t i, prog_val;
uint32_t mc_base_addr = get_mcu_baseaddr(mc);
hw_status("Received addr = %08x, field_mask = %-8x, data = %08x\n", (addr + mc_base_addr), field_mask, data);
if(field_mask != 0){
//calculate how much we need to shift the data
for(i=0; (((field_mask>>i)&1)==0); i++);
}else{
hw_status("Write mask is 0, returning without programming\n");
return;
}
hw_status("Read Address = %08x\n", (addr + mc_base_addr));
//get the data in the register
prog_val = hw_read32((uint32_t *)(addr + mc_base_addr));
//clear the field we want to program
prog_val = prog_val & (~field_mask);
//put data in the field
prog_val |= ((data<<i)&field_mask);
hw_status("Writing Address = %08x With Value = %08x\n", (addr + mc_base_addr), prog_val);
//program the value
hw_write32((uint32_t *)(addr + mc_base_addr), prog_val);
}
void set_reg_val(const int mc, uintptr_t addr, uint32_t field_mask, uint32_t data){
uint32_t i, prog_val;
uint32_t mc_base_addr = get_mcu_baseaddr(mc);
hw_status("Set Reg Val Received addr = %08x, field_mask = %-8x, data = %08x\n", (addr + mc_base_addr), field_mask, data);
if(field_mask != 0){
//calculate how much we need to shift the data
//for(i=0; (((field_mask>>i)&1)==0); i++);
}else{
hw_status("Write mask is 0, returning without programming\n");
return;
}
hw_status("Read Address = %08x\n", (addr + mc_base_addr));
//get the data in the register
prog_val = hw_read32((uint32_t *)(addr + mc_base_addr));
hw_status("read back data = %08x",prog_val);
//clear the field we want to program
prog_val = prog_val & (~field_mask);
hw_status("cleared read data = %08x",prog_val);
//put data in the field
prog_val |= (data & field_mask);
hw_status("Writing Address = %08x With Value = %08x\n", (addr + mc_base_addr), prog_val);
//program the value
hw_write32((uint32_t *)(addr + mc_base_addr), prog_val);
}
void do_yml_reg_writes(char *fname){
uint32_t line_val, line_num, prog_addr, prog_data, prog_mask;
char line[80];
FILE *fr;
/*"rt" means open the file for reading text */
/* open the file for reading */
fr = fopen (fname, "rt");
if(fr==NULL){
hw_status("Cound not find %s Skipping do_yml_reg_writes()\n", fname);
return;
}else{
hw_status("Found %s, proceeding with do_yml_reg_writes()\n", fname);
}
line_num = 0;
while(fgets(line, 80, fr) != NULL)
{
line_val = (uint32_t)strtol(line, NULL, 2);
hw_status ("Read line from file: %08x ", line_val);
if(line_num == 0){
//do nothing
hw_status("num regs\n");
}else if((line_num%3)==1){
prog_addr = line_val;
hw_status("addr\n");
}else if((line_num%3)==2){
prog_mask = line_val;
hw_status("mask\n");
}else if((line_num%3)==0){
prog_data = line_val;
hw_status("data\n");
set_reg(0, prog_addr, prog_mask, prog_data);
}
line_num++;
}
fclose(fr); /* close the file prior to exiting the routine */
}
#ifdef MCU_IP_VERIF
void get_current_dir(){
char cwd[1024];
if (getcwd(cwd, sizeof(cwd)) != NULL)
fprintf(stdout, "Current working dir: %s\n", cwd);
else
perror("getcwd() error");
}
#endif //MCU_IP_VERIF
/*
UINT32 get_reg(volatile UINT32* addr, UINT32 field_mask){
UINT32 ret_val = (hw_read32(addr) & field_mask);
if(field_mask == 0){
return 0;
} else{
while((field_mask & 0x1)==0){
field_mask >>= 1;
ret_val >>= 1;
}
}
return ret_val;
}
*/

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,537 @@
#include "kahalu_sim_init.h"
#include "snps_lp4_phy_lib.h"
#ifdef CONF_HAS___PHY_RTL
void phy_init_synps_lp4(const int mc, const int active_ranks, const int ddc_mode, const int wr_dbi_dis, const int rd_dbi_dis, const int dram_frequency, const int phy_init_train, const int dump_phy_regs, const int set_bdlr_val) {
uint32_t rd_data = 0;
uint32_t wr_data = 0;
uint32_t rd_addr = 0;
uint32_t rl_val = 0;
uint32_t rtp_val = 0;
uint32_t tmp_set_bdlr_val;
uint32_t mc_base_addr = get_mcu_baseaddr(mc);
hw_status("KAHALU_SIM_INIT: SNPS PHY INIT frequency = %0d, ddc_mode = %0d, wr_dbi_dis= %0d, rd_dbi_dis = %0d\n", dram_frequency, ddc_mode, wr_dbi_dis, rd_dbi_dis);
program_ddc_phy_params(mc, rd_dbi_dis, dram_frequency);
hw_status("KAHALU_SIM_INIT: DEASSERTING RESET to PHY\n");
hw_write32((uint32_t*)(MCU_REGS_PHYCTRL_REGS_SNP_CTL_RST_N_ADDRESS + mc_base_addr), 0x01);
//Enable PUB mode
hw_status("KAHALU_SIM_INIT: ENABLING PUB\n");
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_PGCR1_ADDRESS + mc_base_addr), 0x02024640);
//for MTC coverage
if (set_bdlr_val != 0)
{
wr_data = PHY_REGS_DWC_DDRPHY_PUB_RANKIDR_RANKWID_SET(0) | PHY_REGS_DWC_DDRPHY_PUB_RANKIDR_RANKRID_SET(0);
hw_phy_write32((uint32_t*) MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_RANKIDR_ADDRESS, wr_data);
tmp_set_bdlr_val = set_bdlr_val & 0xff;
wr_data = (tmp_set_bdlr_val << 24) | (tmp_set_bdlr_val << 16) | (tmp_set_bdlr_val << 8) | tmp_set_bdlr_val;
hw_phy_write32((uint32_t*) MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX0BDLR3_ADDRESS, wr_data);
hw_phy_write32((uint32_t*) MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX0BDLR4_ADDRESS, wr_data);
hw_phy_write32((uint32_t*) MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX0BDLR5_ADDRESS, tmp_set_bdlr_val);
hw_phy_write32((uint32_t*) MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX1BDLR3_ADDRESS, wr_data);
hw_phy_write32((uint32_t*) MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX1BDLR4_ADDRESS, wr_data);
hw_phy_write32((uint32_t*) MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX1BDLR5_ADDRESS, tmp_set_bdlr_val);
wr_data = PHY_REGS_DWC_DDRPHY_PUB_RANKIDR_RANKWID_SET(1) | PHY_REGS_DWC_DDRPHY_PUB_RANKIDR_RANKRID_SET(1);
hw_phy_write32((uint32_t*) MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_RANKIDR_ADDRESS, wr_data);
wr_data = (tmp_set_bdlr_val << 24) | (tmp_set_bdlr_val << 16) | (tmp_set_bdlr_val << 8) | tmp_set_bdlr_val;
hw_phy_write32((uint32_t*) MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX0BDLR3_ADDRESS, wr_data);
hw_phy_write32((uint32_t*) MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX0BDLR4_ADDRESS, wr_data);
hw_phy_write32((uint32_t*) MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX0BDLR5_ADDRESS, tmp_set_bdlr_val);
hw_phy_write32((uint32_t*) MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX1BDLR3_ADDRESS, wr_data);
hw_phy_write32((uint32_t*) MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX1BDLR4_ADDRESS, wr_data);
hw_phy_write32((uint32_t*) MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX1BDLR5_ADDRESS, tmp_set_bdlr_val);
wr_data = PHY_REGS_DWC_DDRPHY_PUB_RANKIDR_RANKWID_SET(0) | PHY_REGS_DWC_DDRPHY_PUB_RANKIDR_RANKRID_SET(0);
hw_phy_write32((uint32_t*) MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_RANKIDR_ADDRESS, wr_data);
}
//Reconfigure DQ/DM Mapping for DXnDQMAP;
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX0DQMAP0_ADDRESS + mc_base_addr), 0x00035678);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX0DQMAP1_ADDRESS + mc_base_addr), 0x00004012);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX1DQMAP0_ADDRESS + mc_base_addr), 0x00035678);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX1DQMAP1_ADDRESS + mc_base_addr), 0x00004012);
//Programming DRAM Configuration (setting memory type to LP4)
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DCR_ADDRESS + mc_base_addr), 0x0000040d);
//Driving retention enable ports of PHY top
hw_write32((uint32_t*)(MCU_REGS_PHYCTRL_REGS_SNP_RET_EN_N_ADDRESS + mc_base_addr), 0x1);
hw_write32((uint32_t*)(MCU_REGS_PHYCTRL_REGS_SNP_RET_EN_I_ADDRESS + mc_base_addr), 0x2);
hw_write32((uint32_t*)(MCU_REGS_PHYCTRL_REGS_SNP_EXT_EN_I_ADDRESS + mc_base_addr), 0x01);
//PHY Timing registers
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_PTR1_ADDRESS + mc_base_addr), 0x2e8112c0);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_PTR3_ADDRESS + mc_base_addr), 0x00000014);
rd_data = hw_phy_read32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_PTR4_ADDRESS + mc_base_addr));
rd_data = rd_data & (uint32_t)(~PHY_REGS_DWC_DDRPHY_PUB_PTR4_TDINIT1_FIELD_MASK);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_PTR4_ADDRESS + mc_base_addr), (uint32_t)(rd_data|PHY_REGS_DWC_DDRPHY_PUB_PTR4_TDINIT1_SET(lp4_param_db.xpd[ddc_mode]+2)) );
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_PTR5_ADDRESS + mc_base_addr), 0x00000014);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_PTR6_ADDRESS + mc_base_addr), 0x03300640);
//FREQ VARIATION -- Frequency of clk_mem = 0.5 * DRAM clock frequency
//PLL Control Register 0 for 400MHz
if (dram_frequency == 400) {
rd_data = hw_phy_read32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_PLLCR0_ADDRESS + mc_base_addr));
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_PLLCR0_ADDRESS + mc_base_addr), (uint32_t)(rd_data|PHY_REGS_DWC_DDRPHY_PUB_PLLCR0_FRQSEL_SET(0x6)) );
rd_data = hw_phy_read32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX8SL0PLLCR0_ADDRESS + mc_base_addr));
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX8SL0PLLCR0_ADDRESS + mc_base_addr), (uint32_t)(rd_data|PHY_REGS_DWC_DDRPHY_PUB_DX8SL0PLLCR0_FRQSEL_SET(0x6)));
rd_data = hw_phy_read32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX8SL1PLLCR0_ADDRESS + mc_base_addr));
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX8SL1PLLCR0_ADDRESS + mc_base_addr), (uint32_t)(rd_data|PHY_REGS_DWC_DDRPHY_PUB_DX8SL1PLLCR0_FRQSEL_SET(0x6)));
rd_data = hw_phy_read32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX8SL2PLLCR0_ADDRESS + mc_base_addr));
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX8SL2PLLCR0_ADDRESS + mc_base_addr), (uint32_t)(rd_data|PHY_REGS_DWC_DDRPHY_PUB_DX8SL2PLLCR0_FRQSEL_SET(0x6)));
rd_data = hw_phy_read32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX8SL3PLLCR0_ADDRESS + mc_base_addr));
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX8SL3PLLCR0_ADDRESS + mc_base_addr), (uint32_t)(rd_data|PHY_REGS_DWC_DDRPHY_PUB_DX8SL3PLLCR0_FRQSEL_SET(0x6)));
rd_data = hw_phy_read32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX8SL4PLLCR0_ADDRESS + mc_base_addr));
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX8SL4PLLCR0_ADDRESS + mc_base_addr), (uint32_t)(rd_data|PHY_REGS_DWC_DDRPHY_PUB_DX8SL4PLLCR0_FRQSEL_SET(0x6)));
rd_data = hw_phy_read32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX8SL5PLLCR0_ADDRESS + mc_base_addr));
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX8SL5PLLCR0_ADDRESS + mc_base_addr), (uint32_t)(rd_data|PHY_REGS_DWC_DDRPHY_PUB_DX8SL5PLLCR0_FRQSEL_SET(0x6)));
rd_data = hw_phy_read32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX8SL6PLLCR0_ADDRESS + mc_base_addr));
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX8SL6PLLCR0_ADDRESS + mc_base_addr), (uint32_t)(rd_data|PHY_REGS_DWC_DDRPHY_PUB_DX8SL6PLLCR0_FRQSEL_SET(0x6)));
rd_data = hw_phy_read32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX8SL7PLLCR0_ADDRESS + mc_base_addr));
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX8SL7PLLCR0_ADDRESS + mc_base_addr), (uint32_t)(rd_data|PHY_REGS_DWC_DDRPHY_PUB_DX8SL7PLLCR0_FRQSEL_SET(0x6)));
rd_data = hw_phy_read32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX8SL8PLLCR0_ADDRESS + mc_base_addr));
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX8SL8PLLCR0_ADDRESS + mc_base_addr), (uint32_t)(rd_data|PHY_REGS_DWC_DDRPHY_PUB_DX8SL8PLLCR0_FRQSEL_SET(0x6)));
rd_data = hw_phy_read32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX8SLBPLLCR0_ADDRESS + mc_base_addr));
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX8SLBPLLCR0_ADDRESS + mc_base_addr), (uint32_t)(rd_data|PHY_REGS_DWC_DDRPHY_PUB_DX8SLBPLLCR0_FRQSEL_SET(0x6)));
} else if (dram_frequency == 800) {
rd_data = hw_phy_read32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_PLLCR0_ADDRESS + mc_base_addr));
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_PLLCR0_ADDRESS + mc_base_addr), (uint32_t)(rd_data|PHY_REGS_DWC_DDRPHY_PUB_PLLCR0_FRQSEL_SET(0x2)) );
rd_data = hw_phy_read32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX8SL0PLLCR0_ADDRESS + mc_base_addr));
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX8SL0PLLCR0_ADDRESS + mc_base_addr), (uint32_t)(rd_data|PHY_REGS_DWC_DDRPHY_PUB_DX8SL0PLLCR0_FRQSEL_SET(0x2)));
rd_data = hw_phy_read32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX8SL1PLLCR0_ADDRESS + mc_base_addr));
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX8SL1PLLCR0_ADDRESS + mc_base_addr), (uint32_t)(rd_data|PHY_REGS_DWC_DDRPHY_PUB_DX8SL1PLLCR0_FRQSEL_SET(0x2)));
rd_data = hw_phy_read32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX8SL2PLLCR0_ADDRESS + mc_base_addr));
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX8SL2PLLCR0_ADDRESS + mc_base_addr), (uint32_t)(rd_data|PHY_REGS_DWC_DDRPHY_PUB_DX8SL2PLLCR0_FRQSEL_SET(0x2)));
rd_data = hw_phy_read32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX8SL3PLLCR0_ADDRESS + mc_base_addr));
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX8SL3PLLCR0_ADDRESS + mc_base_addr), (uint32_t)(rd_data|PHY_REGS_DWC_DDRPHY_PUB_DX8SL3PLLCR0_FRQSEL_SET(0x2)));
rd_data = hw_phy_read32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX8SL4PLLCR0_ADDRESS + mc_base_addr));
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX8SL4PLLCR0_ADDRESS + mc_base_addr), (uint32_t)(rd_data|PHY_REGS_DWC_DDRPHY_PUB_DX8SL4PLLCR0_FRQSEL_SET(0x2)));
rd_data = hw_phy_read32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX8SL5PLLCR0_ADDRESS + mc_base_addr));
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX8SL5PLLCR0_ADDRESS + mc_base_addr), (uint32_t)(rd_data|PHY_REGS_DWC_DDRPHY_PUB_DX8SL5PLLCR0_FRQSEL_SET(0x2)));
rd_data = hw_phy_read32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX8SL6PLLCR0_ADDRESS + mc_base_addr));
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX8SL6PLLCR0_ADDRESS + mc_base_addr), (uint32_t)(rd_data|PHY_REGS_DWC_DDRPHY_PUB_DX8SL6PLLCR0_FRQSEL_SET(0x2)));
rd_data = hw_phy_read32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX8SL7PLLCR0_ADDRESS + mc_base_addr));
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX8SL7PLLCR0_ADDRESS + mc_base_addr), (uint32_t)(rd_data|PHY_REGS_DWC_DDRPHY_PUB_DX8SL7PLLCR0_FRQSEL_SET(0x2)));
rd_data = hw_phy_read32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX8SL8PLLCR0_ADDRESS + mc_base_addr));
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX8SL8PLLCR0_ADDRESS + mc_base_addr), (uint32_t)(rd_data|PHY_REGS_DWC_DDRPHY_PUB_DX8SL8PLLCR0_FRQSEL_SET(0x2)));
rd_data = hw_phy_read32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX8SLBPLLCR0_ADDRESS + mc_base_addr));
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX8SLBPLLCR0_ADDRESS + mc_base_addr), (uint32_t)(rd_data|PHY_REGS_DWC_DDRPHY_PUB_DX8SLBPLLCR0_FRQSEL_SET(0x2)));
} else if (dram_frequency == 1066) {
rd_data = hw_phy_read32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_PLLCR0_ADDRESS + mc_base_addr));
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_PLLCR0_ADDRESS + mc_base_addr), (uint32_t)(rd_data|PHY_REGS_DWC_DDRPHY_PUB_PLLCR0_FRQSEL_SET(0x1)) );
rd_data = hw_phy_read32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX8SL0PLLCR0_ADDRESS + mc_base_addr));
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX8SL0PLLCR0_ADDRESS + mc_base_addr), (uint32_t)(rd_data|PHY_REGS_DWC_DDRPHY_PUB_DX8SL0PLLCR0_FRQSEL_SET(0x1)));
rd_data = hw_phy_read32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX8SL1PLLCR0_ADDRESS + mc_base_addr));
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX8SL1PLLCR0_ADDRESS + mc_base_addr), (uint32_t)(rd_data|PHY_REGS_DWC_DDRPHY_PUB_DX8SL1PLLCR0_FRQSEL_SET(0x1)));
rd_data = hw_phy_read32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX8SL2PLLCR0_ADDRESS + mc_base_addr));
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX8SL2PLLCR0_ADDRESS + mc_base_addr), (uint32_t)(rd_data|PHY_REGS_DWC_DDRPHY_PUB_DX8SL2PLLCR0_FRQSEL_SET(0x1)));
rd_data = hw_phy_read32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX8SL3PLLCR0_ADDRESS + mc_base_addr));
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX8SL3PLLCR0_ADDRESS + mc_base_addr), (uint32_t)(rd_data|PHY_REGS_DWC_DDRPHY_PUB_DX8SL3PLLCR0_FRQSEL_SET(0x1)));
rd_data = hw_phy_read32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX8SL4PLLCR0_ADDRESS + mc_base_addr));
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX8SL4PLLCR0_ADDRESS + mc_base_addr), (uint32_t)(rd_data|PHY_REGS_DWC_DDRPHY_PUB_DX8SL4PLLCR0_FRQSEL_SET(0x1)));
rd_data = hw_phy_read32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX8SL5PLLCR0_ADDRESS + mc_base_addr));
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX8SL5PLLCR0_ADDRESS + mc_base_addr), (uint32_t)(rd_data|PHY_REGS_DWC_DDRPHY_PUB_DX8SL5PLLCR0_FRQSEL_SET(0x1)));
rd_data = hw_phy_read32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX8SL6PLLCR0_ADDRESS + mc_base_addr));
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX8SL6PLLCR0_ADDRESS + mc_base_addr), (uint32_t)(rd_data|PHY_REGS_DWC_DDRPHY_PUB_DX8SL6PLLCR0_FRQSEL_SET(0x1)));
rd_data = hw_phy_read32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX8SL7PLLCR0_ADDRESS + mc_base_addr));
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX8SL7PLLCR0_ADDRESS + mc_base_addr), (uint32_t)(rd_data|PHY_REGS_DWC_DDRPHY_PUB_DX8SL7PLLCR0_FRQSEL_SET(0x1)));
rd_data = hw_phy_read32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX8SL8PLLCR0_ADDRESS + mc_base_addr));
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX8SL8PLLCR0_ADDRESS + mc_base_addr), (uint32_t)(rd_data|PHY_REGS_DWC_DDRPHY_PUB_DX8SL8PLLCR0_FRQSEL_SET(0x1)));
rd_data = hw_phy_read32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX8SLBPLLCR0_ADDRESS + mc_base_addr));
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX8SLBPLLCR0_ADDRESS + mc_base_addr), (uint32_t)(rd_data|PHY_REGS_DWC_DDRPHY_PUB_DX8SLBPLLCR0_FRQSEL_SET(0x1)));
}
//DRAM timing parameter
// DTPR0:
// [4:0] = RTP (MR2 [2:0])
// [14:8] = RP (maps to RPab typical cycles = 34)
// [22:16] = RAS
// [28:24] = RRD
wr_data = 0;
if(ddc_mode == DDC_MICRON){
hw_status("KAHALU_SIM_INIT: DDC_MODE is DDC_MICRON\n");
rl_val = lp4_param_db.rl[DDC_TYP];
rtp_val = lp4_param_db.rtp[DDC_TYP];
//please don't delete. Needed for SoC porting - hw_status("lp4_param_db.rl = %d\n",lp4_param_db.rl[DDC_TYP]);
//please don't delete. Needed for SoC porting - hw_status("lp4_param_db.rtp = %d\n",lp4_param_db.rtp[DDC_TYP]);
} else{
hw_status("KAHALU_SIM_INIT: DDC_MODE is not DDC_MICRON\n");
rl_val = lp4_param_db.rl[DDC_REG];
//rl_val = 28;
rtp_val = lp4_param_db.rtp[DDC_REG];
//please don't delete. Needed for SoC porting - hw_status("lp4_param_db.rl = %d\n",lp4_param_db.rl[DDC_TYP]);
//please don't delete. Needed for SoC porting - hw_status("lp4_param_db.rtp = %d\n",lp4_param_db.rtp[DDC_TYP]);
}
if((rl_val == 6) && (rtp_val == 8)){
wr_data = 0;
}else if (((rl_val == 10)||(rl_val == 12)) && (rtp_val == 8)){
wr_data = 1;
}else if((rl_val == 14) && (rtp_val == 8)){
wr_data = 2;
}else if((rl_val == 20) && (rtp_val == 8)){
wr_data = 3;
}else if(((rl_val == 24)||(rl_val == 28)) && (rtp_val == 10)){
wr_data = 4;
}else if(((rl_val == 28)||(rl_val == 32)) && (rtp_val == 12)){ //*
wr_data = 5;
}else if(((rl_val == 32) ||(rl_val == 36)) && (rtp_val == 14)){
wr_data = 6;
}else if(((rl_val == 36)||(rl_val == 40)) && (rtp_val == 16)){
wr_data = 7;
}else{
wr_data = 0;
}
//please don't delete. Needed for SoC porting - hw_status("lp4_param_db.rpab=%d\n",lp4_param_db.rpab[ddc_mode]);
//please don't delete. Needed for SoC porting - hw_status("lp4_param_db.ras=%d\n" ,lp4_param_db.ras[ddc_mode]);
//please don't delete. Needed for SoC porting - hw_status("lp4_param_db.rrd=%d\n",lp4_param_db.rrd[ddc_mode]);
//hw_status("ddc_mode=%d\n",ddc_mode);
//hw_status("RPAB: {%d,%d,%d,%d,%d,%d}",lp4_param_db.rpab[0],lp4_param_db.rpab[1],lp4_param_db.rpab[2],lp4_param_db.rpab[3],lp4_param_db.rpab[4],lp4_param_db.rpab[5]);
hw_status("DTPR0.rpab=%d\n",lp4_param_db.rpab[ddc_mode]);
hw_status("DTPR0.ras=%d\n" ,lp4_param_db.ras[ddc_mode]);
hw_status("DTPR0.rrd=%d\n",lp4_param_db.rrd[ddc_mode]);
wr_data = ((wr_data << (uint32_t)PHY_REGS_DWC_DDRPHY_PUB_DTPR0_TRTP_LSB) |
(lp4_param_db.rpab[ddc_mode] << (uint32_t)PHY_REGS_DWC_DDRPHY_PUB_DTPR0_TRP_LSB) |
(lp4_param_db.ras[ddc_mode] << (uint32_t)PHY_REGS_DWC_DDRPHY_PUB_DTPR0_TRAS_LSB) |
(lp4_param_db.rrd[ddc_mode] << (uint32_t)PHY_REGS_DWC_DDRPHY_PUB_DTPR0_TRRD_LSB) );
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DTPR0_ADDRESS + mc_base_addr), wr_data);
// DTPR1:
//PHY_REGS_DWC_DDRPHY_PUB_DTPR1_TWLMRD_LSB
//PHY_REGS_DWC_DDRPHY_PUB_DTPR1_TFAW_LSB
//PHY_REGS_DWC_DDRPHY_PUB_DTPR1_TMOD_LSB
//PHY_REGS_DWC_DDRPHY_PUB_DTPR1_TMRD_LSB
//please don't delete. Needed for SoC porting - hw_status("lp4_param_db.faw=%d\n",lp4_param_db.faw[ddc_mode]);
//please don't delete. Needed for SoC porting - hw_status("lp4_param_db.mrd=%d\n" ,lp4_param_db.mrd[ddc_mode]);
hw_status("DTPR1.faw=%d\n",lp4_param_db.faw[ddc_mode]);
hw_status("DTPR1.mrd=%d\n",lp4_param_db.mrd[ddc_mode]);
wr_data = ((16 << (uint32_t)PHY_REGS_DWC_DDRPHY_PUB_DTPR1_TWLMRD_LSB) |
(lp4_param_db.faw[ddc_mode] << (uint32_t)PHY_REGS_DWC_DDRPHY_PUB_DTPR1_TFAW_LSB) |
(7 << (uint32_t)PHY_REGS_DWC_DDRPHY_PUB_DTPR1_TMOD_LSB) | //MOD is DDR3/DDR4 param. Don't care for LPDDR.
(lp4_param_db.mrd[ddc_mode] << (uint32_t)PHY_REGS_DWC_DDRPHY_PUB_DTPR1_TMRD_LSB) );
//Use database timing value to program timing registers
//hw_phy_write32((uint32_t)MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DTPR1_ADDRESS, 0x1740071A);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DTPR1_ADDRESS + mc_base_addr), wr_data);
// DTRP2:
//PHY_REGS_DWC_DDRPHY_PUB_DTPR2_TRTW_LSB
//PHY_REGS_DWC_DDRPHY_PUB_DTPR2_TRTODT_LSB
//PHY_REGS_DWC_DDRPHY_PUB_DTPR2_TCMDCKE_LSB
//PHY_REGS_DWC_DDRPHY_PUB_DTPR2_TCKE_LSB
//PHY_REGS_DWC_DDRPHY_PUB_DTPR2_TXS_LSB
//please don't delete. Needed for SoC porting - hw_status("lp4_param_db.cmdcke=%d\n",lp4_param_db.cmdcke[ddc_mode]);
//please don't delete. Needed for SoC porting - hw_status("lp4_param_db.cke=%d\n" ,lp4_param_db.cke[ddc_mode]);
//please don't delete. Needed for SoC porting - hw_status("lp4_param_db.xsr=%d\n" ,lp4_param_db.xsr[ddc_mode]);
hw_status("DTPR2.cmdcke=%d\n",lp4_param_db.cmdcke[ddc_mode]);
hw_status("DTPR2.cke=%d\n" ,lp4_param_db.cke[ddc_mode]);
hw_status("DTPR2.xsr=%d\n" ,lp4_param_db.xsr[ddc_mode]);
wr_data = ((0 << (uint32_t)PHY_REGS_DWC_DDRPHY_PUB_DTPR2_TRTW_LSB) | //Stardard bus turn around delay; No additional delay
(0 << (uint32_t)PHY_REGS_DWC_DDRPHY_PUB_DTPR2_TRTODT_LSB) | //MOD is DDR3/DDR4 param. Don't care for LPDDR.
(lp4_param_db.cmdcke[ddc_mode]<< (uint32_t)PHY_REGS_DWC_DDRPHY_PUB_DTPR2_TCMDCKE_LSB) |
(lp4_param_db.cke[ddc_mode] << (uint32_t)PHY_REGS_DWC_DDRPHY_PUB_DTPR2_TCKE_LSB) |
(7 << (uint32_t)PHY_REGS_DWC_DDRPHY_PUB_DTPR2_TVRCG_LSB) |
(lp4_param_db.xsr[ddc_mode] << (uint32_t)PHY_REGS_DWC_DDRPHY_PUB_DTPR2_TXS_LSB) );
//Use database timing value to program timing registers
//hw_phy_write32((uint32_t)MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DTPR2_ADDRESS, 0x000c01d9);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DTPR2_ADDRESS + mc_base_addr), wr_data);
// DTRP3:
//PHY_REGS_DWC_DDRPHY_PUB_DTPR3_TOFDX_LSB
//PHY_REGS_DWC_DDRPHY_PUB_DTPR3_TCCD_LSB
//PHY_REGS_DWC_DDRPHY_PUB_DTPR3_TDLLK_LSB
//PHY_REGS_DWC_DDRPHY_PUB_DTPR3_TDQSCKMAX_LSB
//PHY_REGS_DWC_DDRPHY_PUB_DTPR3_TDQSCK_LSB
//please don't delete. Needed for SoC porting - hw_status("lp4_param_db.dqsck=%d\n",lp4_param_db.dqsck[ddc_mode]);
hw_status("DTPR3.dqsckmax=%d\n",lp4_param_db.dqsck[ddc_mode]);
hw_status("DTPR3.dqsck=%d\n",lp4_param_db.dqsck[ddc_mode]);
wr_data = ((0 << (uint32_t)PHY_REGS_DWC_DDRPHY_PUB_DTPR3_TOFDX_LSB) | //ODT turn-off dealy extention = 0
(0 << (uint32_t)PHY_REGS_DWC_DDRPHY_PUB_DTPR3_TCCD_LSB) | //CCD=BL/2
(384 << (uint32_t)PHY_REGS_DWC_DDRPHY_PUB_DTPR3_TDLLK_LSB) | //Don't care. DLL locking time. Use default in PUB.
(lp4_param_db.dqsck[ddc_mode]<< (uint32_t)PHY_REGS_DWC_DDRPHY_PUB_DTPR3_TDQSCKMAX_LSB) |
(lp4_param_db.dqsck[ddc_mode]<< (uint32_t)PHY_REGS_DWC_DDRPHY_PUB_DTPR3_TDQSCK_LSB) );
//1600MHz setting
//hw_phy_write32((uint32_t)MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DTPR3_ADDRESS, 0x02000606);
//1333MHz setting
//hw_phy_write32((uint32_t)MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DTPR3_ADDRESS, 0x02000505);
//Use database timing value to program timing registers
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DTPR3_ADDRESS + mc_base_addr), wr_data);
// DTRP4:
//PHY_REGS_DWC_DDRPHY_PUB_DTPR4_TAOND_TAOFD_LSB
//PHY_REGS_DWC_DDRPHY_PUB_DTPR4_TRFC_LSB
//PHY_REGS_DWC_DDRPHY_PUB_DTPR4_TWLO_LSB
//PHY_REGS_DWC_DDRPHY_PUB_DTPR4_TXP_LSB
//please don't delete. Needed for SoC porting - hw_status("lp4_param_db.rfcab=%d\n",lp4_param_db.rfcab[ddc_mode]);
//please don't delete. Needed for SoC porting - hw_status("lp4_param_db.xpd=%d\n",lp4_param_db.xpd[ddc_mode]);
hw_status("DTPR4.rfcab=%d\n",lp4_param_db.rfcab[ddc_mode]);
hw_status("DTPR4.xpd=%d\n",lp4_param_db.xpd[ddc_mode]);
wr_data = ((0 << (uint32_t)PHY_REGS_DWC_DDRPHY_PUB_DTPR4_TAOND_TAOFD_LSB) | //DDR2 only. Don't care. Set to 0.
(lp4_param_db.rfcab[ddc_mode]<< (uint32_t)PHY_REGS_DWC_DDRPHY_PUB_DTPR4_TRFC_LSB) |
(43 << (uint32_t)PHY_REGS_DWC_DDRPHY_PUB_DTPR4_TWLO_LSB) | //Not used by DDC; Use default
(lp4_param_db.xpd[ddc_mode] << (uint32_t)PHY_REGS_DWC_DDRPHY_PUB_DTPR4_TXP_LSB) );
//Use database timing value to program timing registers
//hw_phy_write32((uint32_t)MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DTPR4_ADDRESS, 0x01202814);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DTPR4_ADDRESS + mc_base_addr), wr_data);
// DTRP5:
//PHY_REGS_DWC_DDRPHY_PUB_DTPR5_TRC_LSB
//PHY_REGS_DWC_DDRPHY_PUB_DTPR5_TRCD_LSB
//PHY_REGS_DWC_DDRPHY_PUB_DTPR5_TWTR_LSB
//please don't delete. Needed for SoC porting - hw_status("lp4_param_db.rcpb=%d\n",lp4_param_db.rcpb[ddc_mode]);
//please don't delete. Needed for SoC porting - hw_status("lp4_param_db.rcd=%d\n",lp4_param_db.rcd[ddc_mode]);
//please don't delete. Needed for SoC porting - hw_status("lp4_param_db.wtr=%d\n",lp4_param_db.wtr[ddc_mode]);
hw_status("DTPR5.rcpb=%d\n",lp4_param_db.rcpb[ddc_mode]);
hw_status("DTPR5.rcd=%d\n",lp4_param_db.rcd[ddc_mode]);
hw_status("DTPR5.wtr=%d\n",lp4_param_db.wtr[ddc_mode]);
wr_data = ((lp4_param_db.rcpb[ddc_mode] << (uint32_t)PHY_REGS_DWC_DDRPHY_PUB_DTPR5_TRC_LSB) |
(lp4_param_db.rcd[ddc_mode] << (uint32_t)PHY_REGS_DWC_DDRPHY_PUB_DTPR5_TRCD_LSB) |
(lp4_param_db.wtr[ddc_mode] << (uint32_t)PHY_REGS_DWC_DDRPHY_PUB_DTPR5_TWTR_LSB) );
//Use database timing value to program timing registers
//hw_phy_write32((uint32_t)MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DTPR5_ADDRESS, 0x60654410);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DTPR5_ADDRESS + mc_base_addr), wr_data);
// DTRP6:
//PHY_REGS_DWC_DDRPHY_PUB_DTPR6_PUBWLEN_LSB
//PHY_REGS_DWC_DDRPHY_PUB_DTPR6_PUBRLEN_LSB
//PHY_REGS_DWC_DDRPHY_PUB_DTPR6_PUBWL_LSB
//PHY_REGS_DWC_DDRPHY_PUB_DTPR6_PUBRL_LSB
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DTPR6_ADDRESS + mc_base_addr), 0x00000000); //RL/WL disabled in PUB. Will be calculated from MR settings.
//ZQCR Configuration register for ZQ cal
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_ZQCR_ADDRESS + mc_base_addr), 0x008a2c58);
// PIR. Initiates PLL initialization, Impedence caliberation, Delay line caliberation.
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_PIR_ADDRESS + mc_base_addr), 0x00000073);
//FREQ VARIATION
//PGCR2 PHY general configuration register
if (dram_frequency == 400) {
//mem_clk_freq = 200MHz
//mem_clk_period = 5ns
//Adding 10% margin = 5.5ns
//tREFPRD = (9*3900/mem_clk_period)-600 = 5781 = 0x1695
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_PGCR2_ADDRESS + mc_base_addr), 0x10f01695);
} else if (dram_frequency == 800) {
//mem_clk_freq = 400MHz
//mem_clk_period = 2.5ns
//Adding 10% margin = 2.75ns
//tREFPRD = (9*3900/mem_clk_period)-600 = 12164 = 0x2f84
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_PGCR2_ADDRESS + mc_base_addr), 0x10f02f84);
} else if (dram_frequency == 1066) {
//mem_clk_freq = 533MHz
//mem_clk_period = 1.876ns
//Adding 10% margin = 2.06ns
//tREFPRD = (9*3900/mem_clk_period)-600 = 12164 = 0x4037
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_PGCR2_ADDRESS + mc_base_addr), 0x10f04037);
} else if (dram_frequency == 1333) {
//mem_clk_freq = 666.6666MHz
//mem_clk_period = 1.5ns
//Adding 10% margin = 1.65ns
//tREFPRD = (9*3900/mem_clk_period)-600 = 20672 = 0x50c0
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_PGCR2_ADDRESS + mc_base_addr), 0x10f050c0);
} else {
//mem_clk_freq = 800MHz
//mem_clk_period = 1.25ns
//Adding 10% margin = 1.375ns
//tREFPRD = (9*3900/mem_clk_period)-600 = 24927 = 0x615f
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_PGCR2_ADDRESS + mc_base_addr), 0x10f0615f);
}
//Waiting for PLL initialization, Impedence caliberation, Delay line caliberation to be complete
rd_data = hw_phy_read32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_PGSR0_ADDRESS + mc_base_addr));
rd_data = 0;
do{
rd_data = hw_phy_read32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_PGSR0_ADDRESS + mc_base_addr));
}while((rd_data & 0x00000001) != 0x00000001);
hw_status("KAHALU_SIM_INIT: PLL INITIALIZATION COMPLETE\n");
//MR0
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_MR0_ADDRESS + mc_base_addr), 0x00000000);
//FREQ VARIATION
//MR1
if (dram_frequency == 400) {
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_MR1_ADDRESS + mc_base_addr), 0x00000016);
} else if (dram_frequency == 800) {
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_MR1_ADDRESS + mc_base_addr), 0x00000026); //nWR=16, wr-preable =2 nclk, BL=2'b10
} else if (dram_frequency == 1066) {
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_MR1_ADDRESS + mc_base_addr), 0x00000036); //nWR=20, wr-preable =2 nclk, BL=2'b10
} else if (dram_frequency == 1333) {
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_MR1_ADDRESS + mc_base_addr), 0x00000046); //nWR=24, wr-preable =2 nclk, BL=2'b10
} else {
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_MR1_ADDRESS + mc_base_addr), 0x00000056); //nWR=30, wr-preable =2 nclk, BL=2'b10
}
//MR3
//write MR3 to enable rd and wr dbi except when dbi is disabled by test
if( (wr_dbi_dis ==1) && (rd_dbi_dis == 1) )
wr_data = 0x31; //00110001 wr_dbi + rd_dbi cleared
else if(wr_dbi_dis)
wr_data = 0x71; //01110001 wr_dbi cleared
else if(rd_dbi_dis)
wr_data = 0xb1; //10110001 rd_dbi cleared
else
wr_data = 0xf1; //enable both
//hw_phy_write32((uint32_t)MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_MR3_ADDRESS, 0x000000f1);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_MR3_ADDRESS + mc_base_addr), wr_data);
//FREQ VARIATION
//MR2
if (dram_frequency == 400) {
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_MR2_ADDRESS + mc_base_addr), 0x000000009);
} else if (dram_frequency == 800) {
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_MR2_ADDRESS + mc_base_addr), 0x000000012);
} else if (dram_frequency == 1066) {
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_MR2_ADDRESS + mc_base_addr), 0x00000001b);
} else if (dram_frequency == 1333) {
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_MR2_ADDRESS + mc_base_addr), 0x000000024);
} else {
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_MR2_ADDRESS + mc_base_addr), 0x00000002D);
}
//MR4
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_MR4_ADDRESS + mc_base_addr), 0x00000003);
//MR12
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_MR12_ADDRESS + mc_base_addr), 0x0000004D);
//MR11
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_MR11_ADDRESS + mc_base_addr), 0x00000000);
// DX8SL0DXCTL2 Data slice control register. Value got from Synopsys
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX8SL0DXCTL2_ADDRESS + mc_base_addr), 0x00281800);
// DX8SL1DXCTL2 Data slice control register. Value got from Synopsys
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX8SL1DXCTL2_ADDRESS + mc_base_addr), 0x00281800);
// DX8SL2DXCTL2 Data slice control register. Value got from Synopsys
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX8SL2DXCTL2_ADDRESS + mc_base_addr), 0x00281800);
// DX8SL3DXCTL2 Data slice control register. Value got from Synopsys
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX8SL2DXCTL2_ADDRESS + mc_base_addr), 0x00281800);
// DX8SL0DQSCTL Data slice control register. Value got from Synopsys
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX8SL0DQSCTL_ADDRESS + mc_base_addr), 0x012640c4);
// DX8SL1DQSCTL Data slice control register. Value got from Synopsys
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX8SL1DQSCTL_ADDRESS + mc_base_addr), 0x012640c4);
// DX8SL2DQSCTL Data slice control register. Value got from Synopsys
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX8SL2DQSCTL_ADDRESS + mc_base_addr), 0x012640c4);
// DX8SL3DQSCTL Data slice control register. Value got from Synopsys
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX8SL3DQSCTL_ADDRESS + mc_base_addr), 0x012640c4);
// DSGCR PHY update request disabled
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DSGCR_ADDRESS + mc_base_addr), 0x02a04180);
// DTCR0 Training configuration register.
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DTCR0_ADDRESS + mc_base_addr), 0x900051c7);
if (active_ranks == 1) {
//DTCR1 Training configuration register.
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DTCR1_ADDRESS + mc_base_addr), 0x00010236);
} else {
//DTCR1 Training configuration register.
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DTCR1_ADDRESS + mc_base_addr), 0x00030236);
}
// Starting DRAM initializarion
hw_status("KAHALU_SIM_INIT: DRAM INITIALIZATION STARTED\n");
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_PIR_ADDRESS + mc_base_addr), 0x00000181);
rd_data = hw_phy_read32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_PGSR0_ADDRESS + mc_base_addr));
rd_data = 0;
do{
rd_data = hw_phy_read32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_PGSR0_ADDRESS + mc_base_addr));
}while((rd_data & 0x00000001) != 0x00000001);
hw_status("KAHALU_SIM_INIT: DRAM INITIALIZATION COMPLETE\n");
if (dump_phy_regs) {
hw_status("DUMPING ALL REGISTERS BEFORE TRAINING\n");
rd_addr = MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_RIDR_ADDRESS;
do
{
hw_read32((uint32_t*)(rd_addr+mc_base_addr));
rd_addr += 4;
} while (rd_addr != MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX4SLBIOCR_ADDRESS);
}
if (!phy_init_train)
{
hw_status("KAHALU_SIM_INIT: PROGRAMMING REGISTERS WITH TRAINED VALUES for FREQUENCY %0d\n",dram_frequency);
phy_init_synps_lp4_reg_write(mc, dram_frequency);
}
else
{
hw_status("TRAININGS START\n");
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_PIR_ADDRESS + mc_base_addr), 0x0012FE01);
rd_data = 0;
do
{
rd_data = hw_phy_read32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_PGSR0_ADDRESS + mc_base_addr));
} while ((rd_data & 0x00000001) != 0x00000001);
hw_status("TRAININGS COMPLETE\n");
if (rd_data & 0x7ffc0000)
{
hw_status("ERROR: PHY_INIT_TRAIN failed; PGSR0 = 0x%0x\n", rd_data);
}
}
if (dump_phy_regs) {
hw_status("DUMPING ALL REGISTERS AFTER TRAINING\n");
rd_addr = MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_RIDR_ADDRESS;
do
{
hw_read32((uint32_t*)(rd_addr+mc_base_addr));
rd_addr += 4;
} while (rd_addr != MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX4SLBIOCR_ADDRESS);
}
hw_status("KAHALU_SIM_INIT: DISABLING PUB\n");
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_PGCR1_ADDRESS + mc_base_addr), 0x02004600);
hw_status("KAHALU_SIM_INIT: PHY INITIALIZATION COMPLETE\n");
}
#endif

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,452 @@
#include "kahalu_sim_init.h"
#ifdef CONF_HAS___PHY_RTL
void phy_init_synps_lp4_reg_write(const int mc, const int dram_frequency) {
uint32_t rd_data = 0;
uint32_t wr_data = 0;
uint32_t mc_base_addr = get_mcu_baseaddr(mc);
hw_status("KAHALU_SIM_INIT: PROGRAMMING TRAINING REGISTERS for frequency %0d\n",dram_frequency);
//Select backdoor values based on frequency
//400MHz
if (dram_frequency == 400) {
hw_status("KAHALU_SIM_INIT: PROGRAMMING REGISTERS WITH TRAINED VALUES FOR 400MHz\n");
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_ACMDLR1_ADDRESS + mc_base_addr), 0x00fc00fc); //0x00f000f0
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX0GCR5_ADDRESS + mc_base_addr), 0x09093f3f);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX0GCR6_ADDRESS + mc_base_addr), 0x09091919);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX0BDLR0_ADDRESS + mc_base_addr), 0x01010101);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX0BDLR1_ADDRESS + mc_base_addr), 0x01010101);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX0BDLR2_ADDRESS + mc_base_addr), 0x00000001);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX1GCR5_ADDRESS + mc_base_addr), 0x09093f3f);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX1GCR6_ADDRESS + mc_base_addr), 0x09091919);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX1BDLR0_ADDRESS + mc_base_addr), 0x01010101);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX1BDLR1_ADDRESS + mc_base_addr), 0x01010101);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX1BDLR2_ADDRESS + mc_base_addr), 0x00000001);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX8SL0DXCTL2_ADDRESS + mc_base_addr), 0x00181800);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX0MDLR1_ADDRESS + mc_base_addr), 0x000000e0);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_RANKIDR_ADDRESS + mc_base_addr), 0x00000000);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX0LCDLR0_ADDRESS + mc_base_addr), 0x000000f9);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX0LCDLR1_ADDRESS + mc_base_addr), 0x000000df);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX0LCDLR2_ADDRESS + mc_base_addr), 0x0000003b);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX0LCDLR5_ADDRESS + mc_base_addr), 0x0000001f);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX0GTR0_ADDRESS + mc_base_addr), 0x01010001);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX1LCDLR0_ADDRESS + mc_base_addr), 0x000000f9);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX1LCDLR1_ADDRESS + mc_base_addr), 0x000000df);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX1LCDLR2_ADDRESS + mc_base_addr), 0x0000003b);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX1LCDLR5_ADDRESS + mc_base_addr), 0x0000001f);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX1GTR0_ADDRESS + mc_base_addr), 0x01010001);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX0LCDLR3_ADDRESS + mc_base_addr), 0x0000007d);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX0LCDLR4_ADDRESS + mc_base_addr), 0x0000007d);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX1LCDLR3_ADDRESS + mc_base_addr), 0x0000007d);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX1LCDLR4_ADDRESS + mc_base_addr), 0x0000007d);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX1GCR5_ADDRESS + mc_base_addr), 0x09093f3f);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX1GCR6_ADDRESS + mc_base_addr), 0x09091919);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX1BDLR0_ADDRESS + mc_base_addr), 0x01010101);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX1BDLR1_ADDRESS + mc_base_addr), 0x01010101);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX1BDLR2_ADDRESS + mc_base_addr), 0x00000001);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX1MDLR1_ADDRESS + mc_base_addr), 0x000000fc);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_RANKIDR_ADDRESS + mc_base_addr), 0x00000001);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX0LCDLR0_ADDRESS + mc_base_addr), 0x000000f9);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX0LCDLR1_ADDRESS + mc_base_addr), 0x000000df);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX0LCDLR2_ADDRESS + mc_base_addr), 0x0000003b);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX0LCDLR5_ADDRESS + mc_base_addr), 0x0000001f);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX0GTR0_ADDRESS + mc_base_addr), 0x01010001);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX1LCDLR0_ADDRESS + mc_base_addr), 0x000000f9);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX1LCDLR1_ADDRESS + mc_base_addr), 0x000000df);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX1LCDLR2_ADDRESS + mc_base_addr), 0x0000003b);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX1LCDLR5_ADDRESS + mc_base_addr), 0x0000001f);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX1GTR0_ADDRESS + mc_base_addr), 0x01010001);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX0LCDLR3_ADDRESS + mc_base_addr), 0x0000007d);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX0LCDLR4_ADDRESS + mc_base_addr), 0x0000007d);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX1LCDLR3_ADDRESS + mc_base_addr), 0x0000007d);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX1LCDLR4_ADDRESS + mc_base_addr), 0x0000007d);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX1GCR5_ADDRESS + mc_base_addr), 0x09093f3f);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX1GCR6_ADDRESS + mc_base_addr), 0x09091919);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX1BDLR0_ADDRESS + mc_base_addr), 0x01010101);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX1BDLR1_ADDRESS + mc_base_addr), 0x01010101);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX1BDLR2_ADDRESS + mc_base_addr), 0x00000001);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX1MDLR1_ADDRESS + mc_base_addr), 0x000000fc);
//FIXME: Check if this is needed
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX8SL1DXCTL2_ADDRESS + mc_base_addr), 0x00181800);
//set phy to do non-contiguous read response FING
rd_data = hw_phy_read32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_PGCR3_ADDRESS + mc_base_addr));
hw_sleep(15);
wr_data = rd_data | 0x2 << PHY_REGS_DWC_DDRPHY_PUB_PGCR3_RDMODE_LSB;
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_PGCR3_ADDRESS + mc_base_addr), wr_data);
hw_sleep(15);
rd_data = hw_phy_read32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX8SL0DXCTL2_ADDRESS + mc_base_addr));
hw_sleep(15);
wr_data = rd_data | 0x2 << PHY_REGS_DWC_DDRPHY_PUB_DX8SL0DXCTL2_RDMODE_LSB;
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX8SL0DXCTL2_ADDRESS + mc_base_addr), wr_data);
hw_sleep(15);
rd_data = hw_phy_read32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX8SL1DXCTL2_ADDRESS + mc_base_addr));
hw_sleep(15);
wr_data = rd_data | 0x2 << PHY_REGS_DWC_DDRPHY_PUB_DX8SL1DXCTL2_RDMODE_LSB;
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX8SL1DXCTL2_ADDRESS + mc_base_addr), wr_data);
hw_sleep(15);
//800MHz
} else if (dram_frequency == 800) {
hw_status("KAHALU_SIM_INIT: PROGRAMMING REGISTERS WITH TRAINED VALUES FOR 800MHz\n");
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_ACMDLR1_ADDRESS + mc_base_addr), 0x007c007c);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX0GCR5_ADDRESS + mc_base_addr), 0x09093f3f);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX0GCR6_ADDRESS + mc_base_addr), 0x09091919);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX0BDLR0_ADDRESS + mc_base_addr), 0x01010101);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX0BDLR1_ADDRESS + mc_base_addr), 0x01010101);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX0BDLR2_ADDRESS + mc_base_addr), 0x00000001);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_ZQ0SR_ADDRESS + mc_base_addr), 0x00000230);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_ZQ1SR_ADDRESS + mc_base_addr), 0x00000200);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_RANKIDR_ADDRESS + mc_base_addr), 0x00000000);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX0LCDLR0_ADDRESS + mc_base_addr), 0x0000007c);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX0LCDLR1_ADDRESS + mc_base_addr), 0x00000024);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX0LCDLR2_ADDRESS + mc_base_addr), 0x00000005);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX0LCDLR3_ADDRESS + mc_base_addr), 0x0000003f);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX0LCDLR4_ADDRESS + mc_base_addr), 0x0000003f);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX0LCDLR5_ADDRESS + mc_base_addr), 0x0000000f);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX0GTR0_ADDRESS + mc_base_addr), 0x02010002);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX0MDLR1_ADDRESS+ mc_base_addr), 0x00000100);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX1GCR5_ADDRESS + mc_base_addr), 0x09093f3f);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX1GCR6_ADDRESS + mc_base_addr), 0x09091919);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX1BDLR0_ADDRESS + mc_base_addr), 0x01010101);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX1BDLR1_ADDRESS + mc_base_addr), 0x01010101);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX1BDLR2_ADDRESS + mc_base_addr), 0x00000001);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX1LCDLR0_ADDRESS + mc_base_addr), 0x0000007c);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX1LCDLR1_ADDRESS + mc_base_addr), 0x00000024);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX1LCDLR2_ADDRESS + mc_base_addr), 0x00000005);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX1LCDLR3_ADDRESS + mc_base_addr), 0x0000003f);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX1LCDLR4_ADDRESS + mc_base_addr), 0x0000003f);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX1LCDLR5_ADDRESS + mc_base_addr), 0x0000000f);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX1GTR0_ADDRESS + mc_base_addr), 0x02010002);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX1MDLR1_ADDRESS + mc_base_addr), 0x00000060);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_RANKIDR_ADDRESS + mc_base_addr), 0x00000001);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX0LCDLR0_ADDRESS + mc_base_addr), 0x0000007c);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX0LCDLR1_ADDRESS + mc_base_addr), 0x00000024);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX0LCDLR2_ADDRESS + mc_base_addr), 0x00000005);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX0LCDLR3_ADDRESS + mc_base_addr), 0x0000003f);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX0LCDLR4_ADDRESS + mc_base_addr), 0x0000003f);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX0LCDLR5_ADDRESS + mc_base_addr), 0x0000000f);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX0GTR0_ADDRESS + mc_base_addr), 0x02010002);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX0MDLR1_ADDRESS+ mc_base_addr), 0x00000100);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX1GCR5_ADDRESS + mc_base_addr), 0x09093f3f);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX1GCR6_ADDRESS + mc_base_addr), 0x09091919);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX1BDLR0_ADDRESS + mc_base_addr), 0x01010101);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX1BDLR1_ADDRESS + mc_base_addr), 0x01010101);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX1BDLR2_ADDRESS + mc_base_addr), 0x00000001);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX1LCDLR0_ADDRESS + mc_base_addr), 0x0000007c);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX1LCDLR1_ADDRESS + mc_base_addr), 0x00000024);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX1LCDLR2_ADDRESS + mc_base_addr), 0x00000005);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX1LCDLR3_ADDRESS + mc_base_addr), 0x0000003f);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX1LCDLR4_ADDRESS + mc_base_addr), 0x0000003f);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX1LCDLR5_ADDRESS + mc_base_addr), 0x0000000f);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX1GTR0_ADDRESS + mc_base_addr), 0x02010002);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX1MDLR1_ADDRESS + mc_base_addr), 0x00000060);
//set phy to do non-contiguous read response FING
rd_data = hw_phy_read32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_PGCR3_ADDRESS + mc_base_addr));
hw_sleep(15);
wr_data = rd_data | 0x2 << PHY_REGS_DWC_DDRPHY_PUB_PGCR3_RDMODE_LSB;
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_PGCR3_ADDRESS + mc_base_addr), wr_data);
hw_sleep(15);
rd_data = hw_phy_read32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX8SL0DXCTL2_ADDRESS + mc_base_addr));
hw_sleep(15);
wr_data = rd_data | 0x2 << PHY_REGS_DWC_DDRPHY_PUB_DX8SL0DXCTL2_RDMODE_LSB;
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX8SL0DXCTL2_ADDRESS + mc_base_addr), wr_data);
hw_sleep(15);
rd_data = hw_phy_read32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX8SL1DXCTL2_ADDRESS + mc_base_addr));
hw_sleep(15);
wr_data = rd_data | 0x2 << PHY_REGS_DWC_DDRPHY_PUB_DX8SL1DXCTL2_RDMODE_LSB;
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX8SL1DXCTL2_ADDRESS + mc_base_addr), wr_data);
hw_sleep(15);
//1066MHz
}else if (dram_frequency == 1066) {
hw_status("KAHALU_SIM_INIT: PROGRAMMING REGISTERS WITH TRAINED VALUES FOR 1066MHz\n");
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_ACMDLR1_ADDRESS + mc_base_addr), 0x00500050);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_ZQ1SR_ADDRESS + mc_base_addr), 0x00000203);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX0GCR5_ADDRESS + mc_base_addr), 0x09093f3f);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX0GCR6_ADDRESS + mc_base_addr), 0x09091919);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX0BDLR0_ADDRESS + mc_base_addr), 0x01010101);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX0BDLR1_ADDRESS + mc_base_addr), 0x01010101);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX0BDLR2_ADDRESS + mc_base_addr), 0x00000001);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_RANKIDR_ADDRESS + mc_base_addr), 0x00000000);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX0LCDLR0_ADDRESS + mc_base_addr), 0x0000005d);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX0LCDLR1_ADDRESS + mc_base_addr), 0x00000033);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX0LCDLR2_ADDRESS + mc_base_addr), 0x00000055);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX0LCDLR3_ADDRESS + mc_base_addr), 0x0000002f);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX0LCDLR4_ADDRESS + mc_base_addr), 0x0000002f);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX0LCDLR5_ADDRESS + mc_base_addr), 0x0000000b);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX0MDLR1_ADDRESS + mc_base_addr), 0x0000005e);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX0GTR0_ADDRESS + mc_base_addr), 0x02010001);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX1GCR5_ADDRESS + mc_base_addr), 0x09093f3f);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX1GCR6_ADDRESS + mc_base_addr), 0x09091919);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX1BDLR0_ADDRESS + mc_base_addr), 0x01010101);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX1BDLR1_ADDRESS + mc_base_addr), 0x01010101);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX1BDLR2_ADDRESS + mc_base_addr), 0x00000001);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX1LCDLR0_ADDRESS + mc_base_addr), 0x0000005d);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX1LCDLR1_ADDRESS + mc_base_addr), 0x00000033);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX1LCDLR2_ADDRESS + mc_base_addr), 0x00000055);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX1LCDLR3_ADDRESS + mc_base_addr), 0x0000002f);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX1LCDLR4_ADDRESS + mc_base_addr), 0x0000002f);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX1LCDLR5_ADDRESS + mc_base_addr), 0x0000000b);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX1MDLR1_ADDRESS + mc_base_addr), 0x00000040);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX1GTR0_ADDRESS + mc_base_addr), 0x02010001);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_RANKIDR_ADDRESS + mc_base_addr), 0x00000001);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX0LCDLR0_ADDRESS + mc_base_addr), 0x0000005d);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX0LCDLR1_ADDRESS + mc_base_addr), 0x00000033);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX0LCDLR2_ADDRESS + mc_base_addr), 0x00000055);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX0LCDLR3_ADDRESS + mc_base_addr), 0x0000002f);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX0LCDLR4_ADDRESS + mc_base_addr), 0x0000002f);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX0LCDLR5_ADDRESS + mc_base_addr), 0x0000000b);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX0MDLR1_ADDRESS + mc_base_addr), 0x0000005e);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX0GTR0_ADDRESS + mc_base_addr), 0x02010001);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX1GCR5_ADDRESS + mc_base_addr), 0x09093f3f);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX1GCR6_ADDRESS + mc_base_addr), 0x09091919);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX1BDLR0_ADDRESS + mc_base_addr), 0x01010101);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX1BDLR1_ADDRESS + mc_base_addr), 0x01010101);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX1BDLR2_ADDRESS + mc_base_addr), 0x00000001);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX1LCDLR0_ADDRESS + mc_base_addr), 0x0000005d);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX1LCDLR1_ADDRESS + mc_base_addr), 0x00000033);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX1LCDLR2_ADDRESS + mc_base_addr), 0x00000055);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX1LCDLR3_ADDRESS + mc_base_addr), 0x0000002f);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX1LCDLR4_ADDRESS + mc_base_addr), 0x0000002f);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX1LCDLR5_ADDRESS + mc_base_addr), 0x0000000b);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX1MDLR1_ADDRESS + mc_base_addr), 0x00000040);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX1GTR0_ADDRESS + mc_base_addr), 0x02010001);
//set phy to do non-contiguous read response FING
rd_data = hw_phy_read32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_PGCR3_ADDRESS + mc_base_addr));
hw_sleep(15);
wr_data = rd_data | 0x2 << PHY_REGS_DWC_DDRPHY_PUB_PGCR3_RDMODE_LSB;
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_PGCR3_ADDRESS + mc_base_addr), wr_data);
hw_sleep(15);
rd_data = hw_phy_read32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX8SL0DXCTL2_ADDRESS + mc_base_addr));
hw_sleep(15);
wr_data = rd_data | 0x2 << PHY_REGS_DWC_DDRPHY_PUB_DX8SL0DXCTL2_RDMODE_LSB;
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX8SL0DXCTL2_ADDRESS + mc_base_addr), wr_data);
hw_sleep(15);
rd_data = hw_phy_read32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX8SL1DXCTL2_ADDRESS + mc_base_addr));
hw_sleep(15);
wr_data = rd_data | 0x2 << PHY_REGS_DWC_DDRPHY_PUB_DX8SL1DXCTL2_RDMODE_LSB;
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX8SL1DXCTL2_ADDRESS + mc_base_addr), wr_data);
hw_sleep(15);
//1333MHz
} else if (dram_frequency == 1333) {
hw_status("KAHALU_SIM_INIT: PROGRAMMING REGISTERS WITH TRAINED VALUES FOR 1333MHz\n");
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_ACMDLR1_ADDRESS + mc_base_addr), 0x004a004a);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX0GCR5_ADDRESS + mc_base_addr), 0x09093f3f);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX0GCR6_ADDRESS + mc_base_addr), 0x09091919);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX0BDLR0_ADDRESS + mc_base_addr), 0x01010101);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX0BDLR1_ADDRESS + mc_base_addr), 0x01010101);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX0BDLR2_ADDRESS + mc_base_addr), 0x00000001);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_RANKIDR_ADDRESS + mc_base_addr), 0x00000000);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX0LCDLR0_ADDRESS + mc_base_addr), 0x0000004a);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX0LCDLR1_ADDRESS + mc_base_addr), 0x0000003d);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX0LCDLR2_ADDRESS + mc_base_addr), 0x0000003b);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX0LCDLR3_ADDRESS + mc_base_addr), 0x00000026);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX0LCDLR4_ADDRESS + mc_base_addr), 0x00000026);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX0LCDLR5_ADDRESS + mc_base_addr), 0x00000009);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX0GTR0_ADDRESS + mc_base_addr), 0x02010001);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX1GCR5_ADDRESS + mc_base_addr), 0x09093f3f);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX1GCR6_ADDRESS + mc_base_addr), 0x09091919);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX1BDLR0_ADDRESS + mc_base_addr), 0x01010101);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX1BDLR1_ADDRESS + mc_base_addr), 0x01010101);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX1BDLR2_ADDRESS + mc_base_addr), 0x00000001);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX1LCDLR0_ADDRESS + mc_base_addr), 0x0000004a);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX1LCDLR1_ADDRESS + mc_base_addr), 0x0000003d);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX1LCDLR2_ADDRESS + mc_base_addr), 0x0000003b);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX1LCDLR3_ADDRESS + mc_base_addr), 0x00000026);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX1LCDLR4_ADDRESS + mc_base_addr), 0x00000026);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX1LCDLR5_ADDRESS + mc_base_addr), 0x00000009);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX1GTR0_ADDRESS + mc_base_addr), 0x02010001);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_RANKIDR_ADDRESS + mc_base_addr), 0x00000001);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX0LCDLR0_ADDRESS + mc_base_addr), 0x0000004a);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX0LCDLR1_ADDRESS + mc_base_addr), 0x0000003d);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX0LCDLR2_ADDRESS + mc_base_addr), 0x0000003b);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX0LCDLR3_ADDRESS + mc_base_addr), 0x00000026);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX0LCDLR4_ADDRESS + mc_base_addr), 0x00000026);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX0LCDLR5_ADDRESS + mc_base_addr), 0x00000009);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX0GTR0_ADDRESS + mc_base_addr), 0x02010001);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX1GCR5_ADDRESS + mc_base_addr), 0x09093f3f);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX1GCR6_ADDRESS + mc_base_addr), 0x09091919);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX1BDLR0_ADDRESS + mc_base_addr), 0x01010101);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX1BDLR1_ADDRESS + mc_base_addr), 0x01010101);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX1BDLR2_ADDRESS + mc_base_addr), 0x00000001);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX1LCDLR0_ADDRESS + mc_base_addr), 0x0000004a);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX1LCDLR1_ADDRESS + mc_base_addr), 0x0000003d);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX1LCDLR2_ADDRESS + mc_base_addr), 0x0000003b);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX1LCDLR3_ADDRESS + mc_base_addr), 0x00000026);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX1LCDLR4_ADDRESS + mc_base_addr), 0x00000026);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX1LCDLR5_ADDRESS + mc_base_addr), 0x00000009);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX1GTR0_ADDRESS + mc_base_addr), 0x02010001);
//set phy to do non-contiguous read response FING
rd_data = hw_phy_read32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_PGCR3_ADDRESS + mc_base_addr));
hw_sleep(15);
wr_data = rd_data | 0x2 << PHY_REGS_DWC_DDRPHY_PUB_PGCR3_RDMODE_LSB;
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_PGCR3_ADDRESS + mc_base_addr), wr_data);
hw_sleep(15);
rd_data = hw_phy_read32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX8SL0DXCTL2_ADDRESS + mc_base_addr));
hw_sleep(15);
wr_data = rd_data | 0x2 << PHY_REGS_DWC_DDRPHY_PUB_DX8SL0DXCTL2_RDMODE_LSB;
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX8SL0DXCTL2_ADDRESS + mc_base_addr), wr_data);
hw_sleep(15);
rd_data = hw_phy_read32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX8SL1DXCTL2_ADDRESS + mc_base_addr));
hw_sleep(15);
wr_data = rd_data | 0x2 << PHY_REGS_DWC_DDRPHY_PUB_DX8SL1DXCTL2_RDMODE_LSB;
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX8SL1DXCTL2_ADDRESS + mc_base_addr), wr_data);
hw_sleep(15);
//1600MHz as default
} else {
hw_status("KAHALU_SIM_INIT: PROGRAMMING REGISTERS WITH TRAINED VALUES FOR 1600MHz\n");
hw_sleep(15);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_ACMDLR1_ADDRESS + mc_base_addr), 0x003c003c);
hw_sleep(15);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX0GCR5_ADDRESS + mc_base_addr), 0x09093f3f);
hw_sleep(15);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX0GCR6_ADDRESS + mc_base_addr), 0x09091919);
hw_sleep(15);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX0BDLR0_ADDRESS + mc_base_addr), 0x01010101);
hw_sleep(15);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX0BDLR1_ADDRESS + mc_base_addr), 0x01010101);
hw_sleep(15);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX0BDLR2_ADDRESS + mc_base_addr), 0x00000001);
hw_sleep(15);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX0MDLR1_ADDRESS + mc_base_addr), 0x00000040);
hw_sleep(15);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX1GCR5_ADDRESS + mc_base_addr), 0x09093f3f);
hw_sleep(15);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX1GCR6_ADDRESS + mc_base_addr), 0x09091919);
hw_sleep(15);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX1BDLR0_ADDRESS + mc_base_addr), 0x01010101);
hw_sleep(15);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX1BDLR1_ADDRESS + mc_base_addr), 0x01010101);
hw_sleep(15);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX1BDLR2_ADDRESS + mc_base_addr), 0x00000001);
hw_sleep(15);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX1MDLR1_ADDRESS + mc_base_addr), 0x00000020);
hw_sleep(15);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX8SL0DXCTL2_ADDRESS + mc_base_addr), 0x00381800);
hw_sleep(15);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX8SL1DXCTL2_ADDRESS + mc_base_addr), 0x00381800);
//hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DTEDR0_ADDRESS + mc_base_addr), 0x0001c4a2);
//hw_sleep(15);
//hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DTEDR1_ADDRESS + mc_base_addr), 0x00007e00);
//hw_sleep(15);
//hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DTEDR2_ADDRESS + mc_base_addr), 0x00007e00);
//hw_sleep(15);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_VTDR_ADDRESS + mc_base_addr), 0x7f003200);
hw_sleep(200);
//Ensure PGCR3[25]-WDLVT is programmed to 1'b0 to select MTC based WR training.
rd_data = hw_phy_read32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX0GCR3_ADDRESS + mc_base_addr));
hw_sleep(15);
wr_data = rd_data & (~PHY_REGS_DWC_DDRPHY_PUB_DX0GCR3_WDLVT_FIELD_MASK);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX0GCR3_ADDRESS + mc_base_addr), wr_data);
hw_sleep(15);
rd_data = hw_phy_read32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX1GCR3_ADDRESS + mc_base_addr));
hw_sleep(15);
wr_data = rd_data & (~PHY_REGS_DWC_DDRPHY_PUB_DX1GCR3_WDLVT_FIELD_MASK);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX1GCR3_ADDRESS + mc_base_addr), wr_data);
//---------------------------------------------------------------------------------------------
//For RANK0
//---------------------------------------------------------------------------------------------
hw_sleep(15);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_RANKIDR_ADDRESS + mc_base_addr), 0x00000000);
hw_sleep(200);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX0LCDLR0_ADDRESS + mc_base_addr), 0x0000003e);
hw_sleep(15);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX0LCDLR1_ADDRESS + mc_base_addr), 0x00000005+(0x3F*0x3));
hw_sleep(15);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX0LCDLR2_ADDRESS + mc_base_addr), 0x00000028);
hw_sleep(15);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX0LCDLR5_ADDRESS + mc_base_addr), 0x00000007);
hw_sleep(15);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX0GTR0_ADDRESS + mc_base_addr), 0x03010001);
hw_sleep(15);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX1LCDLR0_ADDRESS + mc_base_addr), 0x0000003e);
hw_sleep(15);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX1LCDLR1_ADDRESS + mc_base_addr), 0x00000005+(0x3F*0x3));
hw_sleep(15);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX1LCDLR2_ADDRESS + mc_base_addr), 0x00000028);
hw_sleep(15);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX1LCDLR5_ADDRESS + mc_base_addr), 0x00000007);
hw_sleep(15);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX1GTR0_ADDRESS + mc_base_addr), 0x03010001);
hw_sleep(200);
//---------------------------------------------------------------------------------------------
//For RANK1
//---------------------------------------------------------------------------------------------
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_RANKIDR_ADDRESS + mc_base_addr), 0x00010001);
hw_sleep(200);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX0LCDLR0_ADDRESS + mc_base_addr), 0x0000003e);
hw_sleep(15);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX0LCDLR1_ADDRESS + mc_base_addr), 0x00000005+(0x3F*0x3));
hw_sleep(15);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX0LCDLR2_ADDRESS + mc_base_addr), 0x00000028);
hw_sleep(15);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX0LCDLR5_ADDRESS + mc_base_addr), 0x00000007);
hw_sleep(15);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX0GTR0_ADDRESS + mc_base_addr), 0x03010001);
hw_sleep(15);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX1LCDLR0_ADDRESS + mc_base_addr), 0x0000003e);
hw_sleep(15);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX1LCDLR1_ADDRESS + mc_base_addr), 0x00000005+(0x3F*0x3));
hw_sleep(15);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX1LCDLR2_ADDRESS + mc_base_addr), 0x00000028);
hw_sleep(15);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX1LCDLR5_ADDRESS + mc_base_addr), 0x00000007);
hw_sleep(15);
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX1GTR0_ADDRESS + mc_base_addr), 0x03010001);
hw_sleep(15);
//set phy to do non-contiguous read response FING
rd_data = hw_phy_read32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_PGCR3_ADDRESS + mc_base_addr));
hw_sleep(15);
wr_data = rd_data | 0x2 << PHY_REGS_DWC_DDRPHY_PUB_PGCR3_RDMODE_LSB;
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_PGCR3_ADDRESS + mc_base_addr), wr_data);
hw_sleep(15);
rd_data = hw_phy_read32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX8SL0DXCTL2_ADDRESS + mc_base_addr));
hw_sleep(15);
wr_data = rd_data | 0x2 << PHY_REGS_DWC_DDRPHY_PUB_DX8SL0DXCTL2_RDMODE_LSB;
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX8SL0DXCTL2_ADDRESS + mc_base_addr), wr_data);
hw_sleep(15);
rd_data = hw_phy_read32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX8SL1DXCTL2_ADDRESS + mc_base_addr));
hw_sleep(15);
wr_data = rd_data | 0x2 << PHY_REGS_DWC_DDRPHY_PUB_DX8SL1DXCTL2_RDMODE_LSB;
hw_phy_write32((uint32_t*)(MCU_REGS_PHY_REGS_DWC_DDRPHY_PUB_DX8SL1DXCTL2_ADDRESS + mc_base_addr), wr_data);
hw_sleep(15);
}
}
#endif //CONF_HAS___PHY_RTL

View File

@ -0,0 +1,180 @@
/*-------------------------------------------------------
|
| SBOOT.S
|
| 1BL for standalone boot.
|
|--------------------------------------------------------
|
| Copyright ( C ) 2015 Microsoft Corp.
| All Rights Reserved
| Confidential and Proprietary
|
|--------------------------------------------------------
*/
.equiv SRAM_RESET_VECTOR, 0x00100000
.altmacro
#include "spi.inc"
.begin no-absolute-literals
.section .StandaloneBootVector.text, "ax"
.align 4
.global _StandaloneBootVector
_StandaloneBootVector:
j _StandaloneBootHandler
.size _StandaloneBootVector, . - _StandaloneBootVector
.align 4
.literal_position
.align 4
.global _StandaloneBootHandler
_StandaloneBootHandler:
// Set GPIOs to hardware control
REG_RMW_BITS a0, a1, a2, a3, HUP_CHIP_GPIO15_GPIOAFSEL_ADDRESS, HUP_GPIO_15_GPIOAFSEL_DATA, HUP_GPIO_GPIOAFSEL_AFSEL_FIELD_MASK
// Enable receiver on GPIO125
REG_RMW_BITS a0, a1, a2, a3, HUP_CHIP_POR_GPIO_IE_GP127_GP96_ADDRESS, HUP_POR_GPIO_IE3_DATA, HUP_POR_GPIO_IE3_MASK
// Enable GPIO register overrides
REG_RMW_BITS a0, a1, a2, a3, HUP_CHIP_POR_GPIO_REG_OVERRIDE_ADDRESS, HUP_POR_GPIO_REG_OVERRIDE_DATA, HUP_POR_GPIO_REG_OVERRIDE_ENABLE_FIELD_MASK
// Configure clock
REG_RMW_BITS a0, a1, a2, a3, HUP_CHIP_POR_SPI_DIV16_ADDRESS, HUP_POR_SPI_DIV_DATA, HUP_POR_SPI_DIV16_SEL_FIELD_MASK
// Disable while configuring
movi a0, HUP_CHIP_SPI5_SSIENR_ADDRESS
movi a1, 0
s32i a1, a0, 0
// Barrier after the disable
memw
// Configure CTRLR0
movi a0, HUP_CHIP_SPI5_CTRLR0_ADDRESS
movi a1, SPI_REGS_CTRLR0_DATA
s32i a1, a0, 0
// Set up BAUD register
movi a0, HUP_CHIP_SPI5_BAUDR_ADDRESS
movi a1, SPI_REGS_BAUD_DATA
s32i a1, a0, 0
// Read data till idle
SPI_IDLE a4, a5, a6, a7
// Barrier before the enable
memw
// Reenable
movi a0, HUP_CHIP_SPI5_SSIENR_ADDRESS
movi a1, 1
s32i a1, a0, 0
// Barrier after the enable
memw
// Read the header in flash (12 bytes)
movi a0, SPI_FLASH_READ_HDR_CMD
movi a1, SPI_FLASH_HEADER_SIZE
SPI_TRANSFER a0, a1, a2, a3, a4, a5
// Address in Flash where data is stored
SPI_RECV32 a0, a4, a5, a6, a7
// Address in memory to copy to
SPI_RECV32 a1, a4, a5, a6, a7
// Size of data (in bytes)
SPI_RECV32 a2, a4, a5, a6, a7
SPI_IDLE a4, a5, a6, a7
// Construct second read command.
// Bits 8-31 are the Flash address as read
// from the header. Need to reverse
// it as Flash protocol requires it in big-endian
srli a3, a0, 0
movi a8, SPI_FRAME_MASK
and a9, a3, a8
srli a3, a3, 8
movi a8, SPI_FRAME_MASK
and a8, a3, a8
slli a9, a9, 8
or a9, a8, a9
srli a3, a3, 8
movi a8, SPI_FRAME_MASK
and a8, a3, a8
slli a9, a9, 8
or a9, a8, a9
// a3 == <read cmd> | (<flash address> << 8)
slli a3, a9, 8
movi a4, SPI_FLASH_READ_CMD
or a3, a3, a4
// Do next transfer. Size is taken from
// the 'byte size' we read from Flash
SPI_TRANSFER a3, a2, a4, a5, a6, a7
// Convert size in bytes to size in DWORDs (round down).
// Store the remainder in a0 for later
movi a0, 0x3
and a0, a0, a2
srli a2, a2, 2
// Loop over the data, copying it from
// Flash to the target memory address.
.recv_loop:
SPI_RECV32 a3, a4, a5, a6, a7
s32i a3, a1, 0
addi a2, a2, -1
addi a1, a1, 4
bnez a2, .recv_loop
beqz a0, .recv_loop_end
// Copy any leftover, byte by byte
.recv_loop2:
SPI_RECV8 a3, a4, a5, a6, a7
s8i a3, a1, 0
addi a1, a1, 1
addi a0, a0, -1
bnez a0, .recv_loop2
.recv_loop_end:
// Undo register configuration that we did earlier
// before moving on to 2BL
// Barrier before disabling SPI
memw
// Disable SPI5
movi a0, HUP_CHIP_SPI5_SSIENR_ADDRESS
movi a1, 0
s32i a1, a0, 0
// Revert SPI clock divider
REG_RMW_BITS a0, a1, a2, a3, HUP_CHIP_POR_SPI_DIV16_ADDRESS, 0, HUP_POR_SPI_DIV16_SEL_FIELD_MASK
// Disable GPIO register overrides
REG_RMW_BITS a0, a1, a2, a3, HUP_CHIP_POR_GPIO_REG_OVERRIDE_ADDRESS, 0, HUP_POR_GPIO_REG_OVERRIDE_ENABLE_FIELD_MASK
// Disable receiver on GPIO125
REG_RMW_BITS a0, a1, a2, a3, HUP_CHIP_POR_GPIO_IE_GP127_GP96_ADDRESS, 0, HUP_POR_GPIO_IE3_MASK
// Revert GPIOs to software control
REG_RMW_BITS a0, a1, a2, a3, HUP_CHIP_GPIO15_GPIOAFSEL_ADDRESS, 0, HUP_GPIO_GPIOAFSEL_AFSEL_FIELD_MASK
// Jump to 2BL
movi a0, SRAM_RESET_VECTOR
l32i a0, a0, 0
jx a0
.size _StandaloneBootHandler, . - _StandaloneBootHandler
.end no-absolute-literals

View File

@ -0,0 +1,94 @@
/*
* Assembly functions to support context switching
*/
#include <xtensa/cacheasm.h>
.text
.align 4
//
// bss_clear_ASM
//
// Clears all BSS sections in cacheline-size chunks
//
// Arguments:
// [a2] table_start: address of the start of the BSS info table
//
.global bss_clear_ASM
bss_clear_ASM:
entry a1, 32
movi a5, 0
mov a6, a2 // table start
// Load entry count
l32i a7, a6, 0
addi a6, a6, 4
beqz a7, .L5zte
.L0zte:
l32i a8, a6, 0 // get start address, assumed multiple of 4
l32i a9, a6, 4 // get end address, assumed multiple of 4
addi a6, a6, 8 // next entry
sub a10, a9, a8 // a10 = length, assumed a multiple of 4
bbci.l a10, 2, .L1zte
s32i a5, a8, 0 // clear 4 bytes to make length multiple of 8
addi a8, a8, 4
.L1zte:
bbci.l a10, 3, .L2zte
s32i a5, a8, 0 // clear 8 bytes to make length multiple of 16
s32i a5, a8, 4
addi a8, a8, 8
.L2zte:
bbci.l a10, 4, .L3zte
s32i a5, a8, 0 // clear 16 bytes to make length multiple of 32
s32i a5, a8, 4
s32i a5, a8, 8
s32i a5, a8, 12
addi a8, a8, 16
.L3zte:
bbci.l a10, 5, .L4zte
s32i a5, a8, 0 // clear 32 bytes to make length multiple of 64
s32i a5, a8, 4
s32i a5, a8, 8
s32i a5, a8, 12
s32i a5, a8, 16
s32i a5, a8, 20
s32i a5, a8, 24
s32i a5, a8, 28
addi a8, a8, 32
.L4zte:
srli a10, a10, 6 // length is now multiple of 64, divide by 64
loopnez a10, .end_clearzte
dpfwo a8, 384
s32i a5, a8, 0 // clear 64 bytes at a time...
s32i a5, a8, 4
s32i a5, a8, 8
s32i a5, a8, 12
s32i a5, a8, 16
s32i a5, a8, 20
s32i a5, a8, 24
s32i a5, a8, 28
s32i a5, a8, 32
s32i a5, a8, 36
s32i a5, a8, 40
s32i a5, a8, 44
s32i a5, a8, 48
s32i a5, a8, 52
s32i a5, a8, 56
s32i a5, a8, 60
addi a8, a8, 64
.end_clearzte:
addi a7, a7, -1
bnez a7, .L0zte // loop until end of table of *.bss sections
.L5zte:
// Synchronize memory access
dcache_writeback_inv_all a11, a12
memw
retw

View File

@ -0,0 +1,61 @@
/*
* Assembly functions to support context switching
*/
#include "xtruntime-frames-custom.h"
#include <xtensa/coreasm.h>
.text
// a2 = context base
.global _arch_context_switch
_arch_context_switch:
// set 4th bit of ps, i.e. EXCM = 1
rsr.ps a15
movi a14, 1 << XCHAL_PS_EXCM_SHIFT
or a15, a15, a14
wsr.ps a15
// restore ps
l32i a15, a2, UEXC_ps
wsr.eps2 a15
// restore pc
l32i a15, a2, UEXC_pc
wsr.epc2 a15
// restore scompare
l32i a15, a2, UEXC_scompare1
wsr.scompare1 a15
// restore misc01
l32i a15, a2, UEXC_misc0
wsr.misc0 a15
l32i a15, a2, UEXC_misc1
wsr.misc1 a15
// restore sar
l32i a15, a2, UEXC_sar
wsr.sar a15
// restore GPs
l32i a0, a2, UEXC_a0
l32i a1, a2, UEXC_a1
l32i a3, a2, UEXC_a3
l32i a4, a2, UEXC_a4
l32i a5, a2, UEXC_a5
l32i a6, a2, UEXC_a6
l32i a7, a2, UEXC_a7
l32i a8, a2, UEXC_a8
l32i a9, a2, UEXC_a9
l32i a10, a2, UEXC_a10
l32i a11, a2, UEXC_a11
l32i a12, a2, UEXC_a12
l32i a13, a2, UEXC_a13
l32i a14, a2, UEXC_a14
l32i a15, a2, UEXC_a15
l32i a2, a2, UEXC_a2
rfi 2

View File

@ -0,0 +1,46 @@
/*-------------------------------------------------------
|
| context.c
|
| Thread switching functions for 'xtensa' architecture.
|
|--------------------------------------------------------
|
| Copyright ( C ) 2013 Microsoft Corp.
| All Rights Reserved
| Confidential and Proprietary
|
|--------------------------------------------------------
*/
#include "xtruntime-frames-custom.h"
#include "context.h"
UserFrame* switch_to_this[26];
void arch_context_switch(UserFrame* intr_context, UserFrame* prev_context, UserFrame* next_context)
{
// save context
*prev_context = *intr_context;
#ifdef BEMU
switch_to_this[xthal_get_prid()] = next_context;
#else
xthal_window_spill();
// hardware context switch, it should never comeback
__asm volatile("mov a2, %0 \n" \
"j _arch_context_switch"
:: "a" (next_context)
: "a2");
#endif
}
void arch_create_context(UserFrame* context, void* pc, void* sp, uint32_t irql_mask, void* arg)
{
context->pc = (uint32_t)pc;
context->a1 = (uint32_t)sp;
context->ps = irql_mask | XCHAL_PS_WOE_MASK | XCHAL_PS_UM_MASK | (1 << XCHAL_PS_CALLINC_SHIFT);
context->a6 = (uint32_t)arg;
return;
}

View File

@ -0,0 +1,51 @@
/*
* Assembly functions to support context switching
*/
#include <xtensa/cacheasm.h>
.text
.align 4
//
// do_global_ctors_ASM
//
// Call global constructors
//
// Arguments:
// [a2] table_start: address of the start of the global constructor info table
//
.global do_global_ctors_ASM
do_global_ctors_ASM:
entry a1, 32
mov a3, a2 // table start
// Load entry count
l32i a4, a3, 0
addi a3, a3, 4
beqz a4, .L1zte
.L0zte:
l32i a5, a3, 0 // get start address, assumed multiple of 4
addi a3, a3, 4 // next entry
addi a5, a5, -8 // First entry is at symbol - 8
.L1zte:
l32i a6, a5, 0 // Load constructor pointer
beqi a6, -1, .L2zte // Value == -1 signals end of table
callx8 a6 // Call constructor
addi a5, a5, -4 // Next entry
j .L1zte
.L2zte:
addi a4, a4, -1
bnez a4, .L0zte // loop until end of table of constructor sections
.L3zte:
// Synchronize memory access
dcache_writeback_inv_all a7, a8
memw
retw

View File

@ -0,0 +1,8 @@
#ifndef _ATOMIC_H_
#define _ATOMIC_H_
#include "bifrost_types.h"
extern "C" int32_t arch_interlocked_compare_exchange(int32_t* addr, int32_t compare, int32_t val);
#endif

View File

@ -0,0 +1,11 @@
#ifndef _CONTEXT_H_
#define _CONTEXT_H_
#include "xtruntime-frames-custom.h"
#include "bifrost_hs_context.h"
void arch_context_switch(UserFrame* intr_context, UserFrame* prev_context, UserFrame* next_context);
void arch_create_context(UserFrame* context, void* pc, void* sp, uint32_t irql_mask, void* arg);
#endif

View File

@ -0,0 +1,31 @@
#ifndef _INTERRUPT_H_
#define _INTERRUPT_H_
#include "xtruntime-frames-custom.h"
#include "bifrost_types.h"
#include "bifrost_hs_intr.h"
#define TIMER_INTR_VEC HW_INT_TIMER_2 // 2
#define APC_INTR_VEC HW_INT_SW_5 // 21
#define DPC_INTR_VEC HW_INT_SW_10 // 26
#define PROC_FREQUENCY_MHZ (50)
int32_t arch_interrupt_init(hw_arch_intr_info_t* intr_info);
extern "C" uint32_t arch_set_irql(uint32_t irql);
extern "C" uint32_t arch_get_irql();
void arch_set_timer_timeout(uint32_t timeout);
hw_intr_handler_t arch_register_intr_handler(uint32_t vec, hw_intr_handler_t handler, void* context);
void arch_register_exc_handler(uint32_t vec, hw_exc_handler_t handler);
void arch_context_switch(UserFrame* intr_context, UserFrame* prev_context, UserFrame* next_context);
void arch_trigger_interrupt(uint32_t core, uint32_t vec);
void arch_suspend();
#endif

View File

@ -0,0 +1,9 @@
#ifndef _MEM_H_
#define _MEM_H_
#include "bifrost_hs_boot.h"
#include "bifrost_types.h"
int32_t arch_mem_init(hw_arch_bootinfo_t* boot_info);
#endif

View File

@ -0,0 +1,42 @@
/*
* Assembly functions to support context switching
*/
#include <xtensa/coreasm.h>
.text
.global arch_set_irql
.global arch_get_irql
.align 4
arch_get_irql:
entry a1, 16
movi a8, XCHAL_PS_INTLEVEL_MASK
rsr.ps a2
and a2,a2,a8
retw
.align 4
// a2 = the IRQL you would like to set, only the low 4 bits are accepted
arch_set_irql:
entry a1, 16
// only keep low 4 bits of a2
// a8 = 0xF, a9 = ~a8
movi a8, XCHAL_PS_INTLEVEL_MASK
movi a9, ~(XCHAL_PS_INTLEVEL_MASK)
// keep lower 4 bits
and a2,a2,a8
// a10 = old ps, a11 = old irql
rsr.ps a10
and a11, a10, a8
// clear the lower 4 bits of a10
and a10, a10, a9
// add with a2, the new irql
add a10, a10, a2
mov a2, a11
wsr.ps a10
retw

View File

@ -0,0 +1,202 @@
/*-------------------------------------------------------
|
| interrupt.c
|
| Fucntions for interrupt handling on 'xtensa' architecture.
|
|--------------------------------------------------------
|
| Copyright ( C ) 2013 Microsoft Corp.
| All Rights Reserved
| Confidential and Proprietary
|
|--------------------------------------------------------
*/
#include <stdint.h>
#include "hup_chip.h"
#include "interrupt.h"
#include "xtensa/xtruntime.h"
#include "bifrost_system_constants_xtensa.h"
//#include "intc.h"
//
// Interrupt handlers
//
static void *_intr_handler_context_table[HW_PROC_CNT][HW_INT_CNT];
static hw_intr_handler_t _intr_handler_table[HW_PROC_CNT][HW_INT_CNT];
static hw_exc_handler_t _exc_handler_table[HW_PROC_CNT][HW_EXC_CNT];
static uint32_t _timer_timeout;
// Assumes: IRQL == the IRQL the dispatcher
void arch_set_timer_timeout(uint32_t timeout)
{
_timer_timeout = timeout;
xthal_set_ccompare(TIMER_INTR_VEC, xthal_get_ccount() + _timer_timeout);
return;
}
void arch_intr_dispatch(uint32_t vec, UserFrame *intr_context)
{
uint32_t coreid = xthal_get_prid();
// save user frame
if (vec >= HW_INT_CNT)
{
// TODO PRINT INVALID VECTOR
}
if (vec == TIMER_INTR_VEC)
{
xthal_set_ccompare(TIMER_INTR_VEC, xthal_get_ccount() + _timer_timeout);
}
if (_intr_handler_table[coreid][vec] == NULL)
{
// TODO PRINT UNHANDLED INTERRUPT
}
else
{
_intr_handler_table[coreid][vec](intr_context, _intr_handler_context_table[vec]);
}
return;
}
void arch_trigger_interrupt(uint32_t core, uint32_t vec)
{
if (core == xthal_get_prid())
{
xthal_set_intset(1u << vec);
}
else
{
// TODO:
// lib_intc::IntCtrl_SetInt(core, vec);
HUP_chip *chip = (HUP_chip * )(0x0);
chip->intc.proc_ints_t[ke_get_current_core()].proc_ints._int[3].interrupt_data.data = vec;
}
}
void arch_inter_proc_intr_handler(void *context, void *ignored)
{
uint32_t vec;
HUP_chip *chip = (HUP_chip * )(0x0);
vec = chip->intc.proc_ints_t[ke_get_current_core()].proc_ints._int[3].interrupt_data.data;
chip->intc.proc_ints_t[ke_get_current_core()].proc_ints._int[3].interrupt_data.data = 0;
xthal_set_intset(1u << vec);
return;
}
void arch_exc_dispatch(uint32_t vec, UserFrame *context)
{
uint32_t coreid = xthal_get_prid();
if (vec >= HW_EXC_CNT)
{
// TODO PRINT INVALID VECTOR
}
if (_exc_handler_table[coreid][vec] == NULL)
{
// TODO PRINT UNHANDLED EXCEPTION
}
else
{
_exc_handler_table[coreid][vec]((uint64_t) context->pc, (uint64_t) context->a1, (uint64_t) context->exccause);
}
return;
}
int32_t arch_interrupt_init(hw_arch_intr_info_t *intr_info)
{
if (intr_info == NULL)
return 1;
HUP_chip *chip = (HUP_chip * )(0x0);
arch_set_irql(HW_IRQL_DISABLED_LEVEL);
// Enable dispatch
_xtos_dispatch_level1_interrupts();
_xtos_dispatch_level2_interrupts();
_xtos_dispatch_level3_interrupts();
uint32_t coreid = xthal_get_prid();
for (int j = 0; j < HW_INT_CNT; j++)
{
_intr_handler_context_table[coreid][j] = NULL;
_intr_handler_table[coreid][j] = NULL;
}
for (int i = 0; i < HW_EXC_CNT; i++)
{
if (i != XCHAL_EXCCAUSE_SYSTEM_CALL &&
i != XCHAL_EXCCAUSE_LEVEL1_INTERRUPT &&
i != XCHAL_EXCCAUSE_ALLOCA)
{
_xtos_set_exception_handler(i, (_xtos_handler) arch_exc_dispatch);
}
}
for (int j = 0; j < HW_EXC_CNT; j++)
{
_exc_handler_table[coreid][j] = NULL;
}
for (int i = 0; i < HW_INT_CNT; i++)
{
_xtos_set_interrupt_handler(i, (_xtos_handler) arch_intr_dispatch);
}
// Enable IPI int vector at INTC
chip->intc.proc_ints_t[ke_get_current_core()].proc_ints.interrupt_line_mask.AsUINT32 = 1 << 3;
arch_register_intr_handler(HW_INT_INTC_3, arch_inter_proc_intr_handler, NULL);
// enable all interrupts but set IRQL = DISABLED
xthal_int_enable(1 << TIMER_INTR_VEC);
xthal_int_enable(1 << DPC_INTR_VEC);
xthal_int_enable(1 << APC_INTR_VEC);
xthal_int_enable(1 << HW_INT_INTC_3);
intr_info->timer_vec = TIMER_INTR_VEC;
intr_info->apc_vec = APC_INTR_VEC;
intr_info->dpc_vec = DPC_INTR_VEC;
xthal_set_ccompare(TIMER_INTR_VEC, 0xFFFFFFFF);
return 0;
}
hw_intr_handler_t arch_register_intr_handler(uint32_t vec, hw_intr_handler_t handler, void *context)
{
if (vec >= HW_INT_CNT)
return NULL;
uint32_t core = xthal_get_prid();
hw_intr_handler_t old = _intr_handler_table[core][vec];
_intr_handler_table[core][vec] = handler;
_intr_handler_context_table[core][vec] = context;
return old;
}
void arch_register_exc_handler(uint32_t vec, hw_exc_handler_t handler)
{
if (vec >= HW_INT_CNT)
return;
uint32_t core = xthal_get_prid();
_exc_handler_table[core][vec] = handler;
return;
}
void arch_suspend()
{
__asm volatile("waiti 0");
}

View File

@ -0,0 +1,204 @@
/*-------------------------------------------------------
|
| main.c
|
| Entry point for 'xtensa' architecture.
|
|--------------------------------------------------------
|
| Copyright ( C ) 2013 Microsoft Corp.
| All Rights Reserved
| Confidential and Proprietary
|
|--------------------------------------------------------
*/
#include "conf_defs.h"
#include <xtensa/config/core.h>
char kstack[HW_PROC_CNT][1024*1024*2];
extern int kmain(void);
int main(void)
{
// Set up stacks first of all
__asm volatile(
"mov a1, %[stack]\n"
:
: [stack] "r" (kstack[xthal_get_prid()] + sizeof(kstack[0]))
: "a1"
);
if(xthal_get_prid() > 3)
{
__asm volatile("waiti 15");
}
unsigned int wait = 2986 * xthal_get_prid();
while(wait--);
return kmain();
}
#if 0
#include "bifrost_thread.h"
#include "bifrost_private.h"
extern void system_init();
extern void sys_init_memory();
extern void platform_init();
extern UINTPTR hw_bssInfo[];
extern UINTPTR hw_ctorInfo[];
// Set aside space for kernel stacks
// Align to cacheline boundaries to prevent false
// sharing issues.
UINT8 kernelStack[HW_PROC_CNT][HW_KERNEL_STACK_SIZE] __attribute__ ((aligned (HW_MAX_CACHELINE_SIZE),section(".ddr_wb.bss")));
//
// This variable tracks whether a core should
// start executing Bifrost code or not. The point
// at which the 'stall' occurs, as well as the
// definition of this variable, may change from
// platform to platform.
//
extern volatile UINT8 hw_tsStallFlag[HW_PROC_CNT];
//
// This variable tracks whether memory init is finished
//
MEM_DDR_UC volatile UINT32 hw_memInitFinished = 0;
MEM_DDR_UC volatile UINT32 hw_bssInitFinished = 0;
EXTERN_C void bss_clear_ASM(UINT32 table_start);
EXTERN_C void sram_stall_ASM();
EXTERN_C void do_global_ctors_ASM(UINT32 table_start);
extern HW_RESULT arch_int_startTimer0();
extern void arch_int_timer0_handler(...);
// Can't declare ANY variables in the function
// in which we switch the stack. Otherwise,
// they'll write above the top of our stack.
// So main will just switch the stack and then
// execute its other tasks in a second function
int main_body();
int main()
{
// Set up stacks first of all
asm volatile(
"mov a1, %[stack]\n"
:
: [stack] "r" (kernelStack[xthal_get_prid()] + sizeof(kernelStack[0]))
: "a1"
);
main_body();
// Should never reach here--this
// return will NOT work properly
}
extern void testDriver(void* dwpid);
extern void testSlave(void* dwpid);
int main_body()
{
// memory initialization
if(hw_getCoreNum() == 0)
{
sys_init_memory();
if(hw_pTestConfigs->bfinit.BSS_INIT)
{
bss_clear_ASM((UINT32)hw_bssInfo);
}
hw_bssInitFinished = 1;
plat_post(BOOT_STATUS_DONE_BSS_INIT, HW_INTERNAL_TESTID);
do_global_ctors_ASM((UINT32)hw_ctorInfo);
plat_post(BOOT_STATUS_DONE_MEM_INIT, HW_INTERNAL_TESTID);
hw_memInitFinished = 1;
}
else
{
while(hw_bssInitFinished == 0);
arch_int_startTimer0();
while(hw_memInitFinished == 0);
}
system_init();
UINT32 dwpid = hw_getCoreNum();
// Wait for the driver to setup the global IDT
if(dwpid != hw_read32(&hw_pTestConfigs->tpid))
{
while(!hw_bIntrInitFinished);
}
ts_exc_init();
ts_int_init();
hw_thread_init();
hw_bIntThreadInitFinished = TRUE;
// Only active cores should run anything
if(hw_activeCoreList[dwpid])
{
// If there are any PLATFORM SPECIFIC initialization steps,
// a file should be created under src/bifrost/platform/* that has
// the code to be executed. For all other platforms that support
// this architecture, a STUB function should be created that does
// the equivalent or nothing at all.
platform_init();
UINT32 tid;
if(dwpid == hw_read32(&hw_pTestConfigs->tpid))
{
hw_thread_create(testDriver,
(void*)dwpid,
PRIORITY_DEFAULT,
THREAD_DEFAULT_STACK_SIZE,
&tid);
}
else
{
while(!hw_bIntThreadInitFinished);
hw_thread_create(testSlave,
(void*)dwpid,
PRIORITY_DEFAULT,
THREAD_DEFAULT_STACK_SIZE,
&tid);
}
hw_thread_start(tid);
}
while(1);
// Should never reach here--this
// return will NOT work properly
}
void core_cleanup()
{
// When the driver finishes, perform a magic write
// that will end the simulation and communicate
// pass/fail status.
if(hw_getCoreNum() == bifrostCachedGlobals.tpid)
{
if(td_testDriverGlobalPass)
{
hw_write32(&hw_extcomSpace.eot_flag, 0xC001C0DE);
}
else
{
hw_write32(&hw_extcomSpace.eot_flag, 0xBAADC0DE);
}
}
hw_terminate();
}
#endif

View File

@ -0,0 +1,9 @@
#include "bifrost_hs_boot.h"
#include "mem.h"
int32_t arch_mem_init(hw_arch_bootinfo_t* mem_info)
{
mem_info->mem_count = 0;
return 0;
}

View File

@ -0,0 +1,142 @@
######################################################
#
# sources.imk
#
# Contains all C source information for the
# Bifrost build for xtensa architecture.
#
# Copyright (C) 2016 Microsoft Corporation
# All Rights Reserved
# Confidential and Proprietary
#
######################################################
ARCHTYPE := xtensa
#
# Kernel CFLAGS
#
XTENSA_KERNEL_CFLAGS = $(XTENSA_DEFAULT_CFLAGS) $(XTENSA_WARNING_CFLAGS)
#
# Xtensa architecture-specific sources
#
#
# Kernel binary selection--if CUSTOM_KERNEL_CFLAGS
# is specified, build a testsuite-specific
# version of the kernel under BIN_DIR--otherwise,
# use the shared kernel.
#
XTENSA_KERNEL_BINARY = $(if $(CUSTOM_KERNEL_CFLAGS),$(BIN_DIR)/obj/xtensa/kernel/bifrost_kernel.so,$(OBJ_ROOT)/xtensa/$(BFCOMP_MODE)/kernel/bifrost_kernel.so)
# These files will be compiled as Assembly instead of C++.
# Apply the appropriate COMPILE_LANGUAGE override for them
XTENSA_ASM_SOURCES := $(BF_SRC_ROOT)/core/hal/xtensa/bss.s \
$(BF_SRC_ROOT)/core/hal/xtensa/atomic.s \
$(BF_SRC_ROOT)/core/hal/xtensa/context.S \
$(BF_SRC_ROOT)/core/hal/xtensa/ctor.s \
$(BF_SRC_ROOT)/core/hal/xtensa/interrupt.S
XTENSA_O3OPT_SOURCES := $(SHARED_KERNEL_O3OPT_SOURCES) \
$(DRIVER_O3OPT_SOURCES) \
#$(BF_SRC_ROOT)/shared/driver/driver_helper/td_checkHeap.c \
$(BF_SRC_ROOT)/system/perfmon/bifrost_perfmon.c \
$(BF_SRC_ROOT)/system/perfmon/td_perfmon.c
$(foreach FILE,$(XTENSA_O3OPT_SOURCES),$(eval $(FILE)_FLAGS := -O3))
XTENSA_SOURCES := \
$(SHARED_KERNEL_SOURCES) \
$(DRIVER_SOURCES) \
$(XTENSA_O3OPT_SOURCES) \
$(XTENSA_SYS_SOURCES)
XTENSA_HAL_SOURCES := \
$(XTENSA_ASM_SOURCES) \
$(BF_SRC_ROOT)/core/hal/xtensa/bifrost_hs_impl.c \
$(BF_SRC_ROOT)/core/hal/xtensa/context.c \
$(BF_SRC_ROOT)/core/hal/xtensa/interrupt.c \
$(BF_SRC_ROOT)/core/hal/xtensa/main.c \
$(BF_SRC_ROOT)/core/hal/xtensa/mem.c
XTENSA_CORE_APP_SOURCES := \
$(SHARED_CORE_APP_SOURCES)
XTENSA_HEADERS := $(SHARED_HEADERS) \
$(BF_SRC_ROOT)/system/include/bifrost_system_api_xtensa.h \
$(BF_SRC_ROOT)/system/include/bifrost_system_constants_xtensa.h
XTENSA_IDIRS := \
$(BF_SRC_ROOT)/core/hal/xtensa/inc \
$(BF_TOOL_ROOT)/lsp/src
# Add library IDIRS
ARCHTYPE := xtensa
$(eval $(kernel_lib_template))
# For Xtensa compilation only, split out files which
# can't be compiled into bifrost_kernel.so--the symbols they
# need aren't available until later in the process.
# TODO: come up with a way that doesn't involve explicitly listing the names
XTENSA_SOURCES := $(filter-out $(BF_SRC_ROOT)/system/tables/ddr_sram_bak_table.c $(BF_SRC_ROOT)/system/tables/heap_table.c $(BF_SRC_ROOT)/system/tables/bss_table.c $(BF_SRC_ROOT)/system/ctor_table.c,$(XTENSA_SOURCES))
define kernel_objs_template
FLAG := $$(call TOLOWER,$$(FLAG_UPPER))
#
# Xtensa kernel objects and rules
#
XTENSA_KERNEL_OBJS := $$(patsubst $(BF_SRC_ROOT)/%,$(OBJ_ROOT)/xtensa/$$(FLAG)/kernel/%.o,$(XTENSA_SOURCES))
XTENSA_CORE_APP_OBJS := $$(patsubst $(BF_SRC_ROOT)/%,$(OBJ_ROOT)/xtensa/$$(FLAG)/kernel/%.o,$(XTENSA_CORE_APP_SOURCES))
XTENSA_HAL_OBJS := $$(patsubst $(BF_SRC_ROOT)/%,$(OBJ_ROOT)/xtensa/$$(FLAG)/kernel/%.o,$(XTENSA_HAL_SOURCES))
$$(filter %.s.o %.S.o,$$(XTENSA_KERNEL_OBJS) $$(XTENSA_HAL_OBJS)): COMPILE_LANGUAGE := assembler-with-cpp
$$(XTENSA_CORE_APP_OBJS): BIFROST_IDIRS += $(BIFROST_APP_IDIRS)
$$(XTENSA_KERNEL_OBJS) $$(XTENSA_CORE_APP_OBJS) $$(XTENSA_HAL_OBJS) $$(XTENSA_PLAT_$(FLAG_UPPER)_OBJS): $(OBJ_ROOT)/xtensa/$$(FLAG)/kernel/%.o : $(BF_SRC_ROOT)/% $(XTENSA_HEADERS)
@echo Compiling $$< to $$@...
@mkdir -p $$(@D)
@$(XT_COMPILE) -c \
-x $$(COMPILE_LANGUAGE) \
--xtensa-core=$(CMN_CORE) \
$$(XTENSA_KERNEL_CFLAGS) $(FLAG_UPPER:%=-D%) \
$$($$<_FLAGS) \
$$(BIFROST_IDIRS:%=-I%) \
$(XTENSA_IDIRS:%=-I%) \
$(XTENSA_SYS_IDIRS:%=-I%) \
$$< -o $$@
#
# Bifrost kernel binary for this architecture
#
$(OBJ_ROOT)/xtensa/$$(FLAG)/kernel/bifrost_kernel.so: $$(XTENSA_KERNEL_OBJS)
@echo Linking $$@...
@mkdir -p $$(@D)
$(XT_LD) --multilib-dir=nort \
--xtensa-core=$(CMN_CORE) \
-r $$^ -o $$@
$(OBJ_ROOT)/xtensa/$$(FLAG)/kernel/hal.so: $$(XTENSA_HAL_OBJS)
@echo Linking $$@...
@mkdir -p $$(@D)
$(XT_LD) --multilib-dir=nort \
--xtensa-core=$(CMN_CORE) \
-r $$^ -o $$@
$(OBJ_ROOT)/xtensa/$$(FLAG)/kernel/bifrost_testapp.so: $$(XTENSA_CORE_APP_OBJS)
@echo Linking $$@...
@mkdir -p $$(@D)
$(XT_LD) --multilib-dir=nort \
--xtensa-core=$(CMN_CORE) \
-r $$^ -o $$@
endef
$(foreach FLAG_UPPER,$(BFCOMP_FLAGS_UPPER),$(eval $(kernel_objs_template)))

View File

@ -0,0 +1,76 @@
/*-------------------------------------------------------
|
| terminate.c
|
| Fucntions for terminate the execution on a core for 'xtensa' architecture.
|
|--------------------------------------------------------
|
| Copyright ( C ) 2014 Microsoft Corp.
| All Rights Reserved
| Confidential and Proprietary
|
|--------------------------------------------------------
*/
#include "bifrost_private.h"
void hw_terminate()
{
HW_PLATFORM platform = hw_get_platform();
UINT32 dwpid = hw_getCoreNum();
switch(platform)
{
case ISS_PLATFORM:
case VISS_PLATFORM:
if(dwpid == bifrostCachedGlobals.tpid || hw_read32(&hw_pTestConfigs->bfinit.VPA) || !hw_read32(&hw_pTestConfigs->bfinit.STANDALONE))
{
// In standalone, unconditionally print out a message
// to announce successful exit of test
if(dwpid == bifrostCachedGlobals.tpid && hw_read32(&hw_pTestConfigs->bfinit.STANDALONE))
{
if(td_testDriverGlobalPass)
{
td_printf("Successful run.\n");
}
else
{
td_printf("Unsuccessful run.\n");
}
}
// Terminate this core with a simcall
asm volatile(
"movi a2, %[code];"
"simcall;"
:
: [code] "i" (SYS_exit)
: "a2"
);
}
else
{
// Do continuous interrupt wait instructions
// to halt processor execution
while(1)
{
asm volatile("waiti 15");
}
}
break;
case SIM_PLATFORM:
case EMU_PLATFORM:
case CHIP_PLATFORM:
case RPP_PLATFORM:
// Do continuous interrupt wait instructions
// to halt processor execution
while(1)
{
asm volatile("waiti 15");
}
break;
default:
break;
}
}

View File

@ -0,0 +1,526 @@
/*-----------------------------------------------------
|
| hw_alloc.c
|
|------------------------------------------------------
|
| Revision History :
|
| #5 - 15 February 2005 J. Hanes
| Drop hw_api_trace_enable; use SHARED_ERRMSG, SHARED_TRACE
|
| #5 - 15 February 2005 J. Hanes
| Use SHARED_ERRMSGfor print level control
| SHARED_TRACEfor execution trace
|
| #5 - 13 January 2005 D. McDonnell
| changed rc var to type void * to support 64 bit targets
|
| #4 - 12 April J. Hanes
| Return (void *) 0 on fail, not HW_FAIL.
|
| #3 - 12 January 2004 J. Hanes
| Use hw_api_trace_enable to control interface trace.
| Use ctp_printf() instead of HW_printf()
|
| #2 - 8 January 2004 J. Hanes
| Use stop-on-error to control stop on error.
|
|------------------------------------------------------
|
| Copyright (C) 2003 Microsoft Corporation
| All Rights Reserved
| Confidential and Proprietary
|
|------------------------------------------------------
*/
#include <bifrost_mem.h>
#include "bifrost_alloc.h"
#include "bifrost_lock.h"
#include "bifrost_print.h"
int32_t allocs;
int32_t frees;
#define HW_HEAP_SIZE 1024*1024*16
char heap[HW_HEAP_SIZE];
char *heap_ptr;
static hw_spin_lock_t _lock;
void hw_alloc_setup()
{
heap_ptr = &heap[0];
ke_spin_lock_init(&_lock);
}
void *hw_alloc(size_t size)
{
hw_irql_t irql;
irql = ke_spin_lock_raise_irql(&_lock, HW_IRQL_DISABLED_LEVEL);
void *result = NULL;
if (heap_ptr + size <= heap + HW_HEAP_SIZE)
{
result = heap_ptr;
heap_ptr += size;
}
ke_spin_unlock_lower_irql(&_lock, irql);
// Alloc profiling
//if(ke_get_current_core() == 0)
// hw_printf("Alloced %d: 0x%X\n", ke_interlocked_increment(&allocs,1), result);
return result;
}
void hw_free(void *ptr)
{
// Alloc profiling
// if(ke_get_current_core() == 0)
// hw_printf("Freed %d: 0x%X\n", ke_interlocked_increment(&frees,1), ptr);
return;
}
//
//
////
//// Hooks for preprocessing heap types and blocks,
//// and for translating addresses.
//// To be overridden by project-specific
//// functions, if necessary.
////
//HW_HEAP_TYPE sys_mapHeapType(HW_HEAP_TYPE type) __attribute__ ((weak));
//HW_HEAP_TYPE sys_mapHeapType(HW_HEAP_TYPE type)
//{
// return type;
//}
//void* sys_translateAllocAddr(HW_HEAP_TYPE type, void* addr) __attribute__ ((weak));
//void* sys_translateAllocAddr(HW_HEAP_TYPE type, void* addr)
//{
// return addr;
//}
//void* sys_untranslateAllocAddr(HW_HEAP_TYPE type, void* addr) __attribute__ ((weak));
//void* sys_untranslateAllocAddr(HW_HEAP_TYPE type, void* addr)
//{
// return addr;
//}
//
////
//// Modifies a size, pattern, and mask so that
//// they all match a certain align boundary.
//// This boundary must be a power of 2.
//// 'description' is printed in the resulting
//// message if a parameter is modified.
////
//void alignAllocationWithName(PTR_FIELD* size,
// PTR_FIELD* pattern,
// PTR_FIELD* mask,
// PTR_FIELD alignValue,
// const char* caller,
// const char* description)
//{
// if(*size % alignValue != 0)
// {
// hw_printf(HW_ALLOC_DEBUG, "%s: allocation request size 0x%llx not aligned to %s 0x%llx. Bifrost has corrected it, new value is: 0x%llx.\n",
// caller, *size, description, alignValue, *size + (alignValue - (*size % alignValue)));
// *size += (alignValue - (*size % alignValue));
// }
//
// if(*pattern % alignValue != 0)
// {
// hw_printf(HW_ALLOC_DEBUG, "%s: allocation request pattern 0x%llx not aligned to %s 0x%llx. Bifrost has corrected it, new value is: 0x%llx.\n",
// caller, *pattern, description, alignValue, *pattern - (*pattern % alignValue));
// *pattern -= (*pattern % alignValue);
// }
//
// //
// // This is guaranteed to produce the correct mask because
// // all allocation correction sizes are powers of 2.
// //
// UINTPTR alignMask = alignValue - 1;
//
// if((*mask & alignMask) != alignMask)
// {
// hw_printf(HW_ALLOC_DEBUG, "%s: allocation request mask 0x%llx does not mask sufficient bits for %s 0x%llx. Bifrost has corrected it, new value is: 0x%llx.\n",
// caller, *mask, description, alignValue, *mask | alignMask);
// *mask |= alignMask;
// }
//}
//
//static HW_RESULT
//find_next_unused_record( HW_ALLOC_TRACKER* allocTracker, HW_ALLOC_RECORD** returnRecord )
//{
// HW_RESULT rc = HW_E_NOT_FOUND;
// BOOL found = FALSE;
// UINT32 ctr = 0;
//
// HW_ALLOC_RECORD (*allocRecords)[HW_ALLOC_NUM_BLOCKS] = &allocTracker->records;
// UINT32 next_unused = hw_read32(&allocTracker->next_unused);
//
// HW_ALLOC_RECORD* currentRecord = NULL;
//
// while ( !found && ( ctr < HW_ALLOC_NUM_BLOCKS ) )
// {
// currentRecord = &(*allocRecords)[next_unused];
// if ( hw_readptr(&currentRecord->addr) == (UINTPTR)NULL )
// {
// found = TRUE;
// rc = HW_S_OK;
// }
//
// next_unused++;
//
// // Wrap around array if necessary
// if ( next_unused >= HW_ALLOC_NUM_BLOCKS )
// {
// next_unused = 0;
// }
//
// ctr++;
// }
//
// if(!found)
// {
// *returnRecord = NULL;
// hw_errmsg("%s: no unused record found (max number of allocs reached)!\n", __func__);
// rc = HW_E_OVERFLOW;
// }
// else
// {
// *returnRecord = currentRecord;
// hw_write32(&allocTracker->next_unused, next_unused);
// }
//
// return rc;
//}
//
//static HW_RESULT
//find_record_for_address( UINTPTR addr, HW_ALLOC_TRACKER* allocTracker, HW_ALLOC_RECORD** returnRecord )
//{
// HW_RESULT rc = HW_E_NOT_FOUND;
// BOOL found = FALSE;
// UINT32 ctr = 0;
//
// UINT32 entry_cnt = hw_read32(&allocTracker->count);
// UINT32 entries_found = 0;
// HW_ALLOC_RECORD (*allocRecords)[HW_ALLOC_NUM_BLOCKS] = &allocTracker->records;
//
// HW_ALLOC_RECORD* currentRecord = NULL;
//
// while ( !found && ctr < HW_ALLOC_NUM_BLOCKS && entries_found < entry_cnt )
// {
// currentRecord = &(*allocRecords)[ctr];
// UINTPTR rec_addr = hw_readptr(&currentRecord->addr);
//
// if( rec_addr != (UINTPTR)NULL )
// {
// if ( rec_addr == addr )
// {
// found = TRUE;
// rc = HW_S_OK;
// }
// entries_found++;
// }
//
// ctr++;
// }
//
// if(!found)
// {
// *returnRecord = NULL;
// hw_errmsg("%s: no record found for addr 0x%x!\n", __func__, addr);
// rc = HW_E_NOT_FOUND;
// }
// else
// {
// *returnRecord = currentRecord;
// }
//
// return rc;
//}
//
////
//// Function: hw_alloc
////
//// Allocates heap addresses. The addresses
//// returned by this function are already set
//// up in CPU pagetables and are available for
//// immediate use by the caller.
////
//// Parameters:
//// type - Heap type. See <HW_HEAP_TYPE>
//// size - Size of the allocation.
//// pattern, mask - This function guarantees that:
//// <returned address> & mask == pattern & mask
//// To request an exact address set mask to HW_MASK_EXACT_ADDR.
////
//// Returns:
//// The address of the allocated memory block.
////
//// See Also:
//// <hw_free>
////
////
//void* hw_allocAlignedWithType(HW_HEAP_TYPE type,
// SIZE_FIELD size,
// PTR_FIELD pattern,
// PTR_FIELD mask)
//{
// hw_log(HW_EVENT_CHECKPOINT, (UINTPTR)__func__, (UINTPTR)"enter", 0, 0);
// //
// // Get the test ID of the current test
// //
// HW_TESTID testID = hw_getMyInstanceID();
//
// type = sys_mapHeapType(type);
//
// HW_HEAP_BLOCK heapBlockID = sys_getHeapBlockID(type);
// if(heapBlockID == HW_HEAP_BLOCK_INVALID)
// {
// hw_errmsg("%s, invalid heap type (%d)\n", __func__, type);
// return NULL;
// }
//
// void* ret = NULL;
// HW_ALLOC_TRACKER* allocTracker = (HW_ALLOC_TRACKER*)hw_readptr(&testSlaveVars->allocTracker);
// HW_ALLOC_RECORD* allocRecord = NULL;
// HW_RESULT record_find_rc = HW_S_OK;
//
// if(hw_getCoreNum() == bifrostCachedGlobals.tpid)
// //
// // If this thread is the driver, use the
// // td_allocAligned function.
// //
// {
// ret = td_allocAligned(heapBlockID, size, pattern, mask, testID, bifrostCachedGlobals.tpid);
// // Find an appropriate alloc record
// if(1)
// {
// record_find_rc = find_next_unused_record(allocTracker, &allocRecord);
// }
// }
// else
// {
// //
// // Align all memory allocations to cacheline
// // boundaries.
// //
// alignAllocation(&size, &pattern, &mask, HW_CACHELINE_SIZE, "cacheline boundary");
//
// HW_MESSAGE message = {{0}};
// message.metadata.command = MSG_TS_CMD_ALLOC;
// message.metadata.size = 5 * sizeof(PTR_FIELD);
//
// PTR_FIELD* datafield = (PTR_FIELD*) message.data;
// datafield[0] = (PTR_FIELD)heapBlockID;
// datafield[1] = size;
// datafield[2] = pattern;
// datafield[3] = mask;
// datafield[4] = testID;
//
// hw_irql_t prev_irql = hw_raise_irql(HW_IRQL_DISABLED_LEVEL);
//
// placeMessage(&testSlaveVars->driverMailbox, &message);
//
// // While we wait for the driver to respond, find
// // an appropriate alloc record
// if(1)
// {
// record_find_rc = find_next_unused_record(allocTracker, &allocRecord);
// }
//
// while(!getMessage(&testSlaveVars->slaveMailbox, &message));
// ret = (void*)(UINTPTR)*(PTR_FIELD*) message.data;
//
// hw_lower_irql(prev_irql);
//
// //
// // Raise a fatal error if the allocation or
// // the alloc tracking failed.
// //
// if(NULL == ret || record_find_rc != HW_S_OK)
// {
// if(hw_bAllocFailuresAreFatal)
// {
// hw_errmsg("%s(type=0x%x, size=0x%llx, pattern=0x%llx, mask=0x%llx, testID=%d) failed\n",
// __func__, type, size, pattern, mask, testID);
// hw_fatal();
// }
// else
// {
// hw_critical("WARNING: %s(type=0x%x, size=0x%llx, pattern=0x%llx, mask=0x%llx, testID=%d) failed\n",
// __func__, type, size, pattern, mask, testID);
// }
// }
// }
//
// // If the allocation succeeded, record it
// if(ret != NULL)
// {
// // Record allocation
// hw_printf(HW_ALLOC_DEBUG, "%s: recording addr 0x%x size 0x%llx type %d -> 0x%x\n", __func__, ret, size, type, allocRecord);
//
// hw_writeptr(&allocRecord->addr, (UINTPTR)ret);
// hw_writeptr(&allocRecord->size, (SIZE_T)size);
// hw_write32((UINT32*)&allocRecord->type, type);
// hw_write32(&allocTracker->count, hw_read32(&allocTracker->count) + 1);
// }
//
// ret = sys_translateAllocAddr(type, ret);
//
// hw_log(HW_EVENT_CHECKPOINT, (UINTPTR)__func__, (UINTPTR)"exit", 0, 0);
// return ret;
//}
//
////
//// Function: hw_free
////
//// Free a previously allocated physical address
////
//// Parameters:
//// type - Heap type. See <HW_HEAP_TYPE>
//// addr - Address of the block to free
////
//// Returns:
//// Nothing
////
//// See Also:
//// <hw_alloc>
////
//void hw_freeWithType(HW_HEAP_TYPE type, void* addr)
//{
// HW_TESTID testID = hw_getMyInstanceID();
// HW_ALLOC_TRACKER* allocTracker = (HW_ALLOC_TRACKER*)hw_readptr(&testSlaveVars->allocTracker);
// HW_ALLOC_RECORD* allocRecord;
// HW_RESULT record_find_rc = HW_S_OK;
//
// type = sys_mapHeapType(type);
// HW_HEAP_BLOCK heapBlockID = sys_getHeapBlockID(type);
// if(heapBlockID == HW_HEAP_BLOCK_INVALID)
// {
// hw_errmsg("%s, invalid heap type (%d)\n", __func__, type);
// return;
// }
//
// addr = sys_untranslateAllocAddr(type, addr);
//
// // Record allocation, and
// // use this information to invalidate cache if necessary.
// // Kernel code only frees at a point where cache invalidation
// // is no longer required.
// if(1)
// {
// record_find_rc = find_record_for_address((UINTPTR)addr, allocTracker, &allocRecord);
//
// if(record_find_rc == HW_S_OK)
// {
// if(hw_heapBlockInfo[type].cached)
// {
// hw_cacheInvalidateBuffer((void*)hw_readptr(&allocRecord->addr), hw_readptr(&allocRecord->size));
// }
//
// // Clear allocation record
// hw_writeptr(&allocRecord->addr, (UINTPTR)NULL);
// hw_write32(&allocTracker->count, hw_read32(&allocTracker->count) - 1);
// }
// }
//
// if(hw_getCoreNum() == bifrostCachedGlobals.tpid)
// //
// // If this thread is the driver, use the
// // td_free function.
// //
// {
// td_free(heapBlockID, (UINTPTR)addr, testID);
// }
// else
// {
// HW_MESSAGE message = {{0}};
// message.metadata.command = MSG_TS_CMD_FREE;
// message.metadata.size = 3 * sizeof(PTR_FIELD);
// PTR_FIELD* datafield = (PTR_FIELD*) message.data;
//
// datafield[0] = (PTR_FIELD) heapBlockID;
// datafield[1] = (UINTPTR)addr;
// datafield[2] = testID;
//
// placeMessage(&testSlaveVars->driverMailbox, &message);
// }
//}
//
////
//// Free all previously allocated heap addresses
////
//void hw_freeAllWithType(HW_HEAP_TYPE type)
//{
// HW_TESTID testID = hw_getMyInstanceID();
// HW_ALLOC_TRACKER* allocTracker = (HW_ALLOC_TRACKER*)hw_readptr(&testSlaveVars->allocTracker);
// HW_ALLOC_RECORD (*allocRecords)[HW_ALLOC_NUM_BLOCKS] = &allocTracker->records;
//
// type = sys_mapHeapType(type);
// HW_HEAP_BLOCK heapBlockID = sys_getHeapBlockID(type);
// if(heapBlockID == HW_HEAP_BLOCK_INVALID)
// {
// hw_errmsg("%s, invalid heap type (%d)\n", __func__, type);
// return;
// }
//
// // Record allocation, and
// // use this information to invalidate cache if necessary.
// // Kernel code only frees at a point where cache invalidation
// // is no longer required.
// if(1)
// {
// HW_ALLOC_RECORD* currentRecord;
// UINT32 entry_cnt = hw_read32(&allocTracker->count);
// UINT32 entries_found = 0;
// UINT32 entries_freed = 0;
// for ( UINT32 ctr = 0; ctr < HW_ALLOC_NUM_BLOCKS && entries_found < entry_cnt; ctr++ )
// {
// currentRecord = &(*allocRecords)[ctr];
// UINTPTR addr = hw_readptr(&currentRecord->addr);
// if( addr != (UINTPTR)NULL )
// {
// if( (HW_HEAP_TYPE)hw_read32((UINT32*)&currentRecord->type) == type )
// {
// if(hw_heapBlockInfo[type].cached)
// {
// hw_cacheInvalidateBuffer((void*)addr, hw_readptr(&currentRecord->size));
// }
// // Clear allocation record
// hw_writeptr(&currentRecord->addr, (UINTPTR)NULL);
// entries_freed++;
// }
// entries_found++;
// }
// }
// hw_write32(&allocTracker->count, entry_cnt - entries_freed);
// }
//
// if(hw_getCoreNum() == bifrostCachedGlobals.tpid)
// //
// // If this thread is the driver, use the
// // td_freeAll function.
// //
// {
// td_freeAll(heapBlockID, testID);
// }
// else
// {
// HW_MESSAGE message = {{0}};
// message.metadata.command = MSG_TS_CMD_FREE_ALL;
// message.metadata.size = 2 * sizeof(PTR_FIELD);
//
// PTR_FIELD* datafield = (PTR_FIELD*) message.data;
// datafield[0] = (PTR_FIELD)heapBlockID;
// datafield[1] = testID;
//
// placeMessage(&testSlaveVars->driverMailbox, &message);
// }
//}
//
//void hw_setAllocFailuresNonFatal()
//{
// hw_bAllocFailuresAreFatal = FALSE;
//}
//

View File

@ -0,0 +1,143 @@
#include <bifrost_thread.h>
#include "bifrost_assert.h"
#include "bifrost_apc.h"
#include "conf_defs.h"
#include "bifrost_stdlib.h"
#include "bifrost_alloc.h"
static uint32_t _apc_int_vec;
static uint32_t _apc_initialized[HW_PROC_CNT] = {false};
typedef struct
{
linked_list_node_t list_node;
hw_callback_func_t free_func;
hw_callback_func_t proc;
void *args;
} hw_apc_node_t;
static void apc_node_free(void *node, void *up)
{
hw_free(node);
}
hw_result_t ke_apc_drain(uint32_t core)
{
if (!_apc_initialized[core])
return APC_STATUS_NOT_INITIALIZED;
if (core < HW_PROC_CNT)
{
ke_trigger_intr(core, _apc_int_vec);
return STATUS_SUCCESS;
}
return APC_STATUS_INVALID_ARGUMENTS;
}
static hw_result_t ke_apc_queue(hw_tcb_t *tcb, hw_callback_func_t proc, void *args, hw_apc_node_t *node)
{
hw_assert(ke_get_irql() <= HW_IRQL_DPC_LEVEL);
if (proc != NULL && tcb != NULL && node != NULL)
{
if (!_apc_initialized[tcb->core_id])
return APC_STATUS_NOT_INITIALIZED;
node->args = args;
node->proc = proc;
node->free_func = apc_node_free;
hw_irql_t irql;
irql = ke_spin_lock_raise_irql(&tcb->apc_lock, HW_IRQL_DPC_LEVEL);
linked_list_push_back(&tcb->apc_list, &node->list_node);
ke_spin_unlock_lower_irql(&tcb->apc_lock, irql);
if (tcb->state == STATE_RUN)
{
ke_apc_drain(tcb->core_id);
}
return STATUS_SUCCESS;
}
return APC_STATUS_INVALID_ARGUMENTS;
}
hw_result_t hw_apc_queue(hw_handle_t thread_handle, hw_callback_func_t proc, void *args)
{
hw_assert(ke_get_irql() <= HW_IRQL_DPC_LEVEL);
if (proc == NULL)
return APC_STATUS_INVALID_ARGUMENTS;
// reference tcb pointer
hw_ref_node_t *ref;
hw_result_t result = hw_open_obj_by_handle(thread_handle, &ref);
if (HW_SUCCESS(result))
{
hw_tcb_t *tcb = OBTAIN_STRUCT_ADDR(ref, ref_node, hw_tcb_t);
if (_apc_initialized[tcb->core_id])
{
hw_apc_node_t *node = (hw_apc_node_t *) hw_alloc(sizeof(hw_apc_node_t));
if (node == NULL)
{
result = APC_STATUS_CANNOT_ALLOCATE_MEM;
}
else
{
node->args = args;
node->proc = proc;
node->free_func = apc_node_free;
result = ke_apc_queue(tcb, proc, args, node);
}
}
else
{
result = APC_STATUS_NOT_INITIALIZED;
}
// dereference the tcb pointer ref
ke_dereference_obj(&tcb->ref_node);
}
return result;
}
static void ke_apc_interrupt_handler(void *intr_stack, void *usr_context)
{
hw_assert(ke_get_irql() == HW_IRQL_APC_LEVEL);
if (_apc_initialized[ke_get_current_core()])
{
// no need to reference since current thread context
hw_tcb_t *tcb = ke_current_thread();
hw_irql_t irql;
while (1)
{
linked_list_node_t *node;
irql = ke_spin_lock_raise_irql(&tcb->apc_lock, HW_IRQL_DPC_LEVEL);
node = linked_list_pop_front(&tcb->apc_list);
ke_spin_unlock_lower_irql(&tcb->apc_lock, irql);
if (node == NULL)
{
break;
}
else
{
hw_apc_node_t *apc_node = OBTAIN_STRUCT_ADDR(node, list_node, hw_apc_node_t);
apc_node->proc(NULL, apc_node->args);
apc_node->free_func(apc_node, NULL);
}
}
}
return;
}
hw_result_t ke_apc_setup(uint32_t int_vec)
{
if (!_apc_initialized[ke_get_current_core()])
{
_apc_int_vec = int_vec;
ke_register_intr_handler(_apc_int_vec, ke_apc_interrupt_handler, NULL);
_apc_initialized[ke_get_current_core()] = true;
}
return STATUS_SUCCESS;
}

View File

@ -0,0 +1,11 @@
#include "bifrost_assert.h"
#include "bifrost_print.h"
void hw_assert_ex(const char* expr_str, const char* file, int32_t line, int32_t expr)
{
if (!expr)
{
hw_printf("Assertion \"%s\" failed at %s: %d\n", expr_str, file, line);
}
return;
}

View File

@ -0,0 +1,161 @@
#include <bifrost_thread.h>
#include <bifrost_timer.h>
#include "bifrost_dpc.h"
#include "bifrost_context.h"
#include "bifrost_thread.h"
#include "conf_defs.h"
#include "bifrost_alloc.h"
#include "bifrost_stdlib.h"
#include "bifrost_system_constants.h"
static linked_list_t _dpc_list_arr[HW_PROC_CNT];
static MEM_SRAM_UC hw_spin_lock_t _dpc_lock_arr[HW_PROC_CNT];
static _Bool _dpc_initialized[HW_PROC_CNT] = {false};
static uint32_t _dpc_int_vec;
typedef struct
{
linked_list_node_t list_node;
hw_callback_func_t proc;
void *args;
hw_callback_func_t free_func;
} hw_dpc_node_t;
static void dpc_free(void *node, void *np)
{
hw_free(node);
}
// Used for storing regs (kernel context) for switching to the first thread
// Theoretically can return to kernel by restoring these regs
static uint64_t _dummy_regs[16];
hw_result_t ke_dpc_drain(uint32_t core)
{
if (!_dpc_initialized[core])
return DPC_STATUS_NOT_INITIALIZED;
if (core < HW_PROC_CNT)
{
ke_trigger_intr(core, _dpc_int_vec);
return STATUS_SUCCESS;
}
return DPC_STATUS_INVALID_ARGUMENTS;
}
static hw_result_t ke_dpc_queue(uint32_t core, hw_callback_func_t proc, void *arg, hw_dpc_node_t *node)
{
if (!_dpc_initialized[core])
return DPC_STATUS_NOT_INITIALIZED;
if (core < HW_PROC_CNT && proc != NULL && node != NULL)
{
hw_irql_t irql;
node->args = arg;
node->proc = proc;
node->free_func = dpc_free;
irql = ke_spin_lock_raise_irql(&_dpc_lock_arr[core], HW_IRQL_DISABLED_LEVEL);
linked_list_push_back(&_dpc_list_arr[core], &node->list_node);
ke_spin_unlock_lower_irql(&_dpc_lock_arr[core], irql);
return STATUS_SUCCESS;
}
return DPC_STATUS_INVALID_ARGUMENTS;
}
hw_result_t hw_dpc_queue(uint32_t core, hw_callback_func_t proc, void *args)
{
if (!_dpc_initialized[core])
return DPC_STATUS_NOT_INITIALIZED;
if (core < HW_PROC_CNT && proc != NULL && _dpc_initialized[ke_get_current_core()])
{
hw_dpc_node_t *node = (hw_dpc_node_t *) hw_alloc(sizeof(hw_dpc_node_t));
if (node == NULL)
{
return DPC_STATUS_NOT_ENOUGH_MEM;
}
return ke_dpc_queue(core, proc, args, node);
}
return DPC_STATUS_INVALID_ARGUMENTS;
}
static void ke_dpc_interrupt_handler(void *intr_stack, void *usr_context)
{
uint32_t core_id = ke_get_current_core();
void *prev_context = NULL;
hw_thread_schedule_info_t info;
bool scheduled = false;
if (_dpc_initialized[core_id])
{
hw_irql_t irql;
// handle normal DPCs
while (1)
{
linked_list_node_t *node;
irql = ke_spin_lock_raise_irql(&_dpc_lock_arr[core_id], HW_IRQL_DISABLED_LEVEL);
node = linked_list_pop_front(&_dpc_list_arr[core_id]);
ke_spin_unlock_lower_irql(&_dpc_lock_arr[core_id], irql);
if (node == NULL)
{
break;
}
else
{
hw_dpc_node_t *dpc_node = OBTAIN_STRUCT_ADDR(node, list_node, hw_dpc_node_t);
dpc_node->proc(NULL, dpc_node->args);
dpc_node->free_func(dpc_node, NULL);
}
}
// scheduler DPC
while (ke_query_and_clear_scheduler_dpc(ke_get_current_core()))
{
ke_thread_schedule(&info, NULL);
if (!scheduled)
{
// the prev context is only updated the first time scheduler is called
prev_context = info.prev_context;
scheduled = true;
}
}
// timer tick
if (ke_query_and_clear_timer_dpc(ke_get_current_core()))
{
ke_timer_tick(NULL, NULL);
}
if (scheduled)
{
if (prev_context == NULL)
{
ke_context_switch(intr_stack, _dummy_regs, info.next_context);
}
else
{
ke_context_switch(intr_stack, prev_context, info.next_context);
}
}
}
return;
}
hw_result_t ke_dpc_setup(uint32_t int_vec)
{
if (!_dpc_initialized[ke_get_current_core()])
{
for (uint32_t i = 0; i < HW_PROC_CNT; i++)
{
ke_spin_lock_init(&_dpc_lock_arr[i]);
linked_list_init(&_dpc_list_arr[i]);
}
_dpc_int_vec = int_vec;
ke_register_intr_handler(_dpc_int_vec, ke_dpc_interrupt_handler, NULL);
_dpc_initialized[ke_get_current_core()] = true;
return STATUS_SUCCESS;
}
return DPC_STATUS_NOT_INITIALIZED;
}

View File

@ -0,0 +1,255 @@
#include <bifrost_event.h>
#include <bifrost_thread.h>
#include "bifrost_assert.h"
#include "bifrost_stdlib.h"
#include "bifrost_alloc.h"
#include "bifrost_apc.h"
static void event_node_free(void *node, void *up)
{
hw_free(node);
}
static void event_free(void *node, void *up)
{
hw_free(node);
}
// =================
// Ke Functions
// =================
hw_result_t ke_event_wait(hw_event_t *event, hw_event_node_t *node)
{
if (event == NULL || node == NULL)
{
return EVENT_STATUS_INVALID_ARGUMENTS;
}
if (event->signaled)
{
return STATUS_SUCCESS;
}
hw_tcb_t *cur_thread = ke_current_thread();
ke_reference_obj(&cur_thread->ref_node);
hw_result_t result;
hw_irql_t irql;
irql = ke_spin_lock_raise_irql(&event->lock, HW_IRQL_DPC_LEVEL);
// TODO: finish tid and check duplicate registration?
node->tcb = cur_thread;
linked_list_push_back(&event->waiting_threads, &node->list_node);
// we want to disable DPC here since hw_block will immediately yield if
// a thread blocks itself. Otherwise the sem lock will be locked forever resulting in
// deadlocks. This is only a note since interrupt is disabled here.
//
// also thread_block really can't go after unlocking sem_lock, since if before hw_thread_block
// is called, the scheduler context switch to another thread that signals the sem, then the thread
// would be unblocked again. If we unblock a thread then block it, it will remain blocked forever.
//
// Anyways, hw_thread_block state change must take place simultaneously with the sem_block queue.
result = ke_thread_block(cur_thread);
// release the lock
ke_spin_unlock_lower_irql(&event->lock, irql);
return result;
}
hw_result_t ke_event_reset(hw_event_t *event)
{
hw_assert(ke_get_irql() <= HW_IRQL_DPC_LEVEL);
if (event != NULL)
{
// acquire lock
hw_irql_t irql;
irql = ke_spin_lock_raise_irql(&event->lock, HW_IRQL_DPC_LEVEL);
event->signaled = false;
ke_spin_unlock_lower_irql(&event->lock, irql);
return STATUS_SUCCESS;
}
return EVENT_STATUS_INVALID_ARGUMENTS;
}
hw_result_t ke_event_signal(hw_event_t *event)
{
hw_assert(ke_get_irql() <= HW_IRQL_DPC_LEVEL);
hw_result_t result = STATUS_SUCCESS;
if (event != NULL)
{
// acquire the lock
hw_irql_t irql;
irql = ke_spin_lock_raise_irql(&event->lock, HW_IRQL_DPC_LEVEL);
if (!event->signaled)
{
event->signaled = true;
linked_list_node_t *node = linked_list_pop_front(&event->waiting_threads);
while (node != NULL)
{
hw_event_node_t *event_node = OBTAIN_STRUCT_ADDR(node, list_node, hw_event_node_t);
// sync, unblock
result = ke_thread_resume((hw_tcb_t *) event_node->tcb);
ke_dereference_obj(&((hw_tcb_t *) event_node->tcb)->ref_node);
event_node->free_func(event_node, NULL);
node = linked_list_pop_front(&event->waiting_threads);
}
if (event->type == EVENT_TYPE_AUTO)
{
event->signaled = false;
}
}
// release the lock
ke_spin_unlock_lower_irql(&event->lock, irql);
}
else
{
result = EVENT_STATUS_INVALID_ARGUMENTS;
}
return result;
}
hw_result_t ke_event_init(hw_event_t *event, hw_event_type_t event_type)
{
if (event != NULL)
{
linked_list_init(&event->waiting_threads);
ke_spin_lock_init(&event->lock);
event->type = event_type;
event->signaled = false;
return STATUS_SUCCESS;
}
return EVENT_STATUS_INVALID_ARGUMENTS;
}
// =================
// HW Functions
// =================
hw_result_t hw_event_create(hw_handle_t *out, hw_event_type_t event_type)
{
hw_assert(ke_get_irql() < HW_IRQL_DPC_LEVEL);
hw_result_t result = STATUS_SUCCESS;
hw_event_t *event = (hw_event_t *) hw_alloc(sizeof(hw_event_t));
if (event == NULL)
return EVENT_STATUS_CANNOT_ALLOCATE_MEM;
result = ke_reference_create(&event->ref_node, event_free);
if (!HW_SUCCESS(result))
{
hw_free(event);
return result;
}
result = ke_event_init(event, event_type);
if (!HW_SUCCESS(result))
{
ke_dereference_obj(&event->ref_node);
return result;
}
result = hw_create_handle(&event->ref_node, out);
ke_dereference_obj(&event->ref_node);
return result;
}
hw_result_t hw_event_signal(hw_handle_t handle)
{
hw_assert(ke_get_irql() <= HW_IRQL_DPC_LEVEL);
hw_result_t result;
hw_ref_node_t *ref;
// reference sem pointer
result = hw_open_obj_by_handle(handle, &ref);
if (!HW_SUCCESS(result))
{
return result;
}
hw_event_t *event = OBTAIN_STRUCT_ADDR(ref, ref_node, hw_event_t);
result = ke_event_signal(event);
//dereference ref node
ke_dereference_obj(&event->ref_node);
return result;
}
hw_result_t hw_event_wait(hw_handle_t handle)
{
hw_assert(ke_get_irql() <= HW_IRQL_APC_LEVEL);
hw_result_t result;
hw_ref_node_t *ref;
// reference sem pointer
result = hw_open_obj_by_handle(handle, &ref);
if (!HW_SUCCESS(result))
{
return result;
}
hw_event_t *event = OBTAIN_STRUCT_ADDR(ref, ref_node, hw_event_t);
hw_event_node_t *event_node = (hw_event_node_t *) hw_alloc(sizeof(hw_event_node_t));
if (event_node != NULL)
{
event_node->free_func = event_node_free;
result = ke_event_wait(event, event_node);
}
else
{
result = EVENT_STATUS_CANNOT_ALLOCATE_MEM;
}
ke_dereference_obj(&event->ref_node);
return result;
}
hw_result_t hw_event_reset(hw_handle_t handle)
{
hw_assert(ke_get_irql() <= HW_IRQL_DPC_LEVEL);
hw_result_t result;
hw_ref_node_t *ref;
// reference sem pointer
result = hw_open_obj_by_handle(handle, &ref);
if (!HW_SUCCESS(result))
{
return result;
}
hw_event_t *event = OBTAIN_STRUCT_ADDR(ref, ref_node, hw_event_t);
result = ke_event_reset(event);
//dereference ref node
ke_dereference_obj(&event->ref_node);
return result;
}

View File

@ -0,0 +1,35 @@
#include <bifrost_thread.h>
#include <conf_defs.h>
#include "bifrost_intr.h"
#include "bifrost_dpc.h"
#include "bifrost_timer.h"
#include "bifrost_assert.h"
static uint32_t _sys_tick[HW_PROC_CNT] = {0};
uint32_t ke_get_system_tick()
{
return _sys_tick[ke_get_current_core()];
}
hw_irql_t ke_raise_irql(hw_irql_t irql)
{
hw_assert(ke_get_irql() <= irql);
return ke_set_irql(irql);
}
hw_irql_t ke_lower_irql(hw_irql_t irql)
{
hw_assert(ke_get_irql() >= irql);
return ke_set_irql(irql);
}
void ke_timer_interrupt_handler(void *intr_stack, void *usr_context)
{
_sys_tick[ke_get_current_core()]++;
ke_queue_scheduler_dpc(ke_get_current_core());
ke_queue_timer_dpc(ke_get_current_core());
ke_dpc_drain(ke_get_current_core());
return;
}

View File

@ -0,0 +1,42 @@
#include "bifrost_lock.h"
#include "bifrost_intr.h"
#include "bifrost_mem.h"
hw_irql_t ke_spin_lock_raise_irql(hw_spin_lock_t *lock, hw_irql_t irql)
{
hw_irql_t old = ke_raise_irql(irql);
ke_spin_lock(lock);
return old;
}
void ke_spin_unlock_lower_irql(hw_spin_lock_t *lock, hw_irql_t irql)
{
ke_spin_unlock(lock);
ke_lower_irql(irql);
return;
}
void ke_spin_lock(hw_spin_lock_t *lock)
{
if (lock != NULL)
{
while (ke_interlocked_compare_exchange(&lock->lock, 0, 1) != 0);
}
}
void ke_spin_unlock(hw_spin_lock_t *lock)
{
if (lock != NULL)
{
lock->lock = 0;
}
}
void ke_spin_lock_init(hw_spin_lock_t *lock)
{
if (lock != NULL)
{
lock->lock = 0;
}
}

View File

@ -0,0 +1,406 @@
///*-----------------------------------------------------
// |
// | hw_printf.c
// |
// | prints to screen and logs to file
// |
// |------------------------------------------------------
// |
// | Revision History:
// |
// | #5 - 3 March 2005 J. Hanes
// | Cosmetic.
// |
// | #4 - 18 Febrary 2005 J. Hanes
// | Add ctp_print_control();
// | replace hw_print_enable with hw_print_level.
// |
// | #3 - 1 Febrary 2005 J. Hanes
// | Use hw_print_enable to disable print
// |
// | #2 - 30 June 2004 J. Hanes
// | Add hw_ignore_printf()
// |
// |------------------------------------------------------
// |
// | Copyright (C) 2003 Microsoft Corporation
// | All Rights Reserved
// | Confidential and Proprietary
// |
// |------------------------------------------------------
// */
//
//
//
// #include <bifrost_intr.h>
//
//void
//hw_set_verbosity( UINT32 verbosity )
//{
// HW_TS_TESTDATA* testData = (HW_TS_TESTDATA*)hw_readptr(&testSlaveVars->testData);
//
// // Mask out the print sync bit, this function isn't
// // allowed to touch it
// verbosity &= ~PRINT_TYPE(HW_PRINT_SYNC);
//
// //UINT32 testIdx = hw_read32(&testSlaveVars->dwCurrentXTest);
// //
// //if(testIdx == HW_NO_TEST_INDEX || hw_getCoreNum() == bifrostCachedGlobals.tpid)
// //{
// // // Grab the previous print sync setting and combine
// // // it with the requested verbosity
// // verbosity |= bifrostCachedGlobals.hw_global_verbosity & PRINT_TYPE(HW_PRINT_SYNC);
// // bifrostCachedGlobals.hw_global_verbosity = verbosity;
// //}
// //else
// //{
// // // Grab the previous print sync setting and combine
// // // it with the requested verbosity
// // verbosity |= hw_read32(&testData[testIdx].verbosity) & PRINT_TYPE(HW_PRINT_SYNC);
// // hw_write32(&testData[testIdx].verbosity, verbosity);
// //}
//}
//
//void hw_set_print_synchronization(BOOL value)
//{
// HW_TS_TESTDATA* testData = (HW_TS_TESTDATA*)hw_readptr(&testSlaveVars->testData);
//
// //UINT32 testIdx = hw_read32(&testSlaveVars->dwCurrentXTest);
// //
// //// Read and modify the verbosity settings to include
// //// the HW_PRINT_SYNC bit
// //UINT32 current_verbosity = hw_read32(&testData[testIdx].verbosity);
// //current_verbosity &= ~PRINT_TYPE(HW_PRINT_SYNC);
// //current_verbosity |= (value & 0x1) << HW_PRINT_SYNC;
// //
// //hw_write32(&testData[testIdx].verbosity, current_verbosity);
//}
//
//// Get the address of the empty buffer starting at 'tail'
//UCHAR* getBuffer(HW_PRINTF_BUFFER* bufArr, UINT32 len)
//{
// hw_cacheFlushAddr(&bufArr->head);
// hw_cacheFlushAddr(&bufArr->tail);
//
// UINT32 head, tail;
//
// head = hw_read32(&bufArr->head);
// tail = hw_read32(&bufArr->tail);
//
// UINT32 tLen; // tLen = total length of strings waiting to be printed + length of this string
//
// if(head <= tail)
// {
// tLen = tail - head;
// }
// else
// {
// tLen = tail + HW_PRINTF_BUF_SIZE - head;
// }
// tLen += len;
//
// UCHAR* rp = bufArr->buffer + tail;
//
// // Check for buffer overflow.
// // HW_PRINTF_BUF_SIZE - 2 is the maximum
// // string size since 1) we're not allowed
// // to completely fill the buffer, there
// // has to be at least one padding element
// // and 2) if the string loops around the
// // end of the array, an additional \0
// // character is added.
// if(tLen > HW_PRINTF_BUF_SIZE - 2)
// {
// rp = 0;
// }
//
// return rp;
//}
//
//BOOL verbosity_enabled(HW_PRINT_TYPE print_type)
//{
// HW_TS_TESTDATA* p_testData = (HW_TS_TESTDATA*)hw_readptr(&testSlaveVars->testData);
//
// //UINT32 testIdx = hw_read32(&testSlaveVars->dwCurrentXTest);
// UINT32 current_verbosity = 0x0;
// BOOL ret = TRUE;
//
// ////
// //// If the current test is "None", we are in
// //// kernel code, so use the global verbosity.
// ////
// //if(testIdx == HW_NO_TEST_INDEX)
// //{
// // current_verbosity = bifrostCachedGlobals.hw_global_verbosity;
// //}
// //else
// //{
// // if(p_testData != NULL)
// // {
// // current_verbosity = hw_read32(&p_testData[testIdx].verbosity);
// // }
// //}
// //
// //if(!(PRINT_TYPE(print_type) & current_verbosity))
// //{
// // ret = FALSE;
// //}
//
// return ret;
//}
//
//void sendPrintMessage(UINT32 totalLen, UINT64 time)
//{
// HW_TS_TESTDATA* testData = (HW_TS_TESTDATA*)hw_readptr(&testSlaveVars->testData);
//
// UINT32 testID = hw_getMyInstanceID();
// UINT32 current_verbosity;
//
// //
// // If the current test is "None", we are in internal
// // test slave code, so use the global verbosity.
// // If this is the driver, always use global verbosity.
// //
// if(testID == HW_INTERNAL_TESTID || hw_getCoreNum() == bifrostCachedGlobals.tpid)
// {
// current_verbosity = bifrostCachedGlobals.hw_global_verbosity;
// }
// else
// {
// //current_verbosity = hw_read32(&testData[testIdx].verbosity);
// current_verbosity = 0;
// }
//
// HW_MESSAGE message;
// message.metadata.command = MSG_TS_CMD_PRINTF;
// message.metadata.size = 3 * sizeof(UINT64);
// UINT64* datafield = (UINT64*) message.data;
// datafield[0] = totalLen;
// datafield[1] = time;
// datafield[2] = testID;
//
// //
// // Notify driver
// //
// placeMessage(&testSlaveVars->driverLowPrioMailbox, &message);
//
// //
// // If print synchronization is on, flush the buffer
// // before continuing so the prints we see are
// // synchronized with test execution and not
// // delayed due to testdriver processing.
// //
// if(current_verbosity & PRINT_TYPE(HW_PRINT_SYNC))
// {
// hw_flushMailbox();
// }
//}
//
//void hw_printf( HW_PRINT_TYPE print_type, const char* fmt, ... )
//{
// if(!verbosity_enabled(print_type))
// {
// return;
// }
//
// va_list args;
// va_start( args, fmt );
//
// if(hw_getCoreNum() == bifrostCachedGlobals.tpid || hw_pTestConfigs->bfinit.DIRECT_PRINT_EN)
// //
// // If this thread is the driver, or we're
// // in an environment where each core has direct
// // print access, place the string in the
// // internal print buffer and call the plat_puts
// // function.
// //
// {
// CHAR* buffer = (CHAR*)hw_readptr(&testSlaveVars->internalPrintBuffer);
// int str_length = hw_vsnprintf( buffer, HW_PRINTF_BUF_SIZE, fmt, args );
// plat_puts( buffer, str_length );
// }
// else
// {
// hw_vprintf( fmt, args );
// }
//
// va_end( args );
//}
//
//void ts_copyToPrintBuffer( HW_PRINTF_BUFFER* buffer, BOOL write_null, const char* str, int str_length)
//{
// UINT32 tail;
// CHAR* destBuffer;
// const char* bp;
//
// //
// // Flush and update buffer variables
// //
// hw_cacheFlushAddr(&buffer->tail);
// tail = hw_read32(&buffer->tail);
//
// //
// // If the buffer is overflowing, just wait
// //
// while(!(destBuffer = (CHAR*) getBuffer(buffer, str_length)));
//
// //
// // If the message wraps around the end of the buffer,
// // break it into two chunks
// //
// if(tail + str_length > HW_PRINTF_BUF_SIZE)
// {
// // Calculate length of each chunk
// UINT32 len1 = HW_PRINTF_BUF_SIZE - tail;
// UINT32 len2 = str_length - len1;
//
// //
// // Copy from temp buffer into real buffer until we reach wrap point.
// // Then flush
// //
// hw_strncpytomem(destBuffer, str, len1);
// hw_cacheFlushBuffer(destBuffer, len1);
//
// //
// // We've copied the first chunk, now get ready to copy the second
// //
// bp = str + len1;
//
// // If we're writing the null terminator, add one to len2 to account
// // for that additional character, and subtract 1 from bp because
// // len1 counts the null terminator
// if(write_null)
// {
// bp--;
// len2++;
// }
// // Otherwise, write the character at bp - 1 on top of the
// // null terminator at the very end of the buffer
// else
// {
// hw_write8((UINT8*)destBuffer + len1 - 1, *(bp - 1));
// }
//
// destBuffer = (CHAR*) buffer->buffer;
//
// //
// // Copy rest of temp buffer into real buffer
// // and flush
// //
// hw_strncpytomem(destBuffer, bp, len2);
// hw_cacheFlushBuffer(destBuffer, len2);
//
// //
// // Update tail pointer
// //
// tail = len2;
// }
// else
// {
// //
// // Copy from temp buffer into real buffer
// // and flush
// //
// hw_strncpytomem(destBuffer, str, str_length);
// hw_cacheFlushBuffer(destBuffer, str_length);
//
// //
// // Update tail pointer
// //
// tail += str_length;
//
// if(tail == HW_PRINTF_BUF_SIZE)
// {
// tail = 0;
// }
// }
//
// // If we don't want the null character, move the
// // tail back so that the receiver doesn't pick it up
// if(!write_null && tail != 0)
// {
// tail--;
// }
//
// //
// // Update and flush tail pointer
// //
// hw_write32(&buffer->tail, tail);
// hw_cacheFlushAddr(&buffer->tail);
//}
//
//void ts_vprintf( HW_PRINTF_BUFFER* buffer, BOOL write_null, const char* fmt, va_list args )
//{
// //
// // HW_PRINTF_BUF_SIZE - 2 is the maximum
// // string size since 1) we're not allowed
// // to completely fill the buffer, there
// // has to be at least one padding element
// // and 2) if the string loops around the
// // end of the array, an additional \0
// // character is added.
// //
// UINT32 totalLen;
// CHAR tempBuffer[HW_PRINTF_BUF_SIZE - 2];
//
// // snap the time
// UINT64 time = hw_getTime();
//
// //
// // snprintf the passed argument into a temporary buffer.
// // Add one to totalLen to account for null terminator
// //
// totalLen = hw_vsnprintf( tempBuffer, _countof(tempBuffer), fmt, args ) + 1;
//
// //
// // Set critical section during usage of print buffer and mailbox
// //
// hw_irql_t prev_irql = hw_raise_irql(HW_IRQL_DISABLED_LEVEL);
//
// ts_copyToPrintBuffer(buffer, write_null, tempBuffer, totalLen);
//
// // Notify testdriver that print message
// // is ready
// sendPrintMessage(totalLen, time);
//
// hw_lower_irql(prev_irql);
//}
//
//void hw_vprintf( const char* fmt, va_list args )
//{
// hw_log(HW_EVENT_CHECKPOINT, (UINTPTR)"printf", (UINTPTR)"enter", 0, 0);
// HW_PRINTF_BUFFER* buffer = &testSlaveVars->printBuffer;
// ts_vprintf(buffer, TRUE, fmt, args);
// hw_log(HW_EVENT_CHECKPOINT, (UINTPTR)"printf", (UINTPTR)"exit", 0, 0);
//}
//
////
//// A wrapper function to prepend a string
//// to a print message.
////
//void hw_printfWithPrefix( const char* prefix, HW_PRINT_TYPE print_type, const char* fmt, ... )
//{
// if(!verbosity_enabled(print_type))
// {
// return;
// }
//
// char hw_printf_internal_buffer[ HW_PRINTF_BUF_SIZE ];
// char* dest = hw_printf_internal_buffer;
// SIZE_T size = _countof(hw_printf_internal_buffer);
// SIZE_T len;
//
// len = hw_snprintf( dest, size, prefix );
// size -= len + 1;
// dest += len;
//
// va_list args;
// va_start( args, fmt );
// hw_vsnprintf( dest, size, fmt, args );
// va_end( args );
//
// hw_printf(print_type, "%s", hw_printf_internal_buffer);
//}
//

View File

@ -0,0 +1,316 @@
#include <bifrost_assert.h>
#include <bifrost_thread.h>
#include "bifrost_statuscode.h"
#include "bifrost_mem.h"
#include "bifrost_ref.h"
#include "bifrost_lock.h"
#include "bifrost_stdlib.h"
#include "bifrost_alloc.h"
typedef struct
{
avl_tree_node_t tree_node;
hw_handle_t handle;
hw_ref_node_t *ref;
hw_callback_func_t free_func;
} hw_handle_node_t;
static void handle_node_free(void *node, void *up)
{
hw_free(node);
}
// ===========================
// Ke Functions
// ===========================
static avl_tree_t _handle_tree;
static bool _initialized;
static hw_spin_lock_t _handle_tree_lock;
static int32_t _handle_base;
static int32_t handle_compare(avl_tree_node_t *tree_node, avl_tree_node_t *my_node)
{
hw_handle_node_t *tcb = OBTAIN_STRUCT_ADDR(tree_node, tree_node, hw_handle_node_t);
hw_handle_node_t *my_tcb = OBTAIN_STRUCT_ADDR(my_node, tree_node, hw_handle_node_t);
if ((uintptr_t) tcb->handle > (uintptr_t) my_tcb->handle)
return -1;
else if ((uintptr_t) tcb->handle == (uintptr_t) my_tcb->handle)
return 0;
else
return 1;
}
static hw_handle_node_t *search_handle_node(hw_handle_t handle)
{
avl_tree_node_t *result;
hw_handle_node_t temp;
temp.handle = handle;
result = avl_tree_search(&_handle_tree, &temp.tree_node);
return result == NULL ? NULL : OBTAIN_STRUCT_ADDR(result, tree_node, hw_handle_node_t);
}
hw_result_t ke_reference_setup()
{
if (!_initialized)
{
avl_tree_init(&_handle_tree, handle_compare);
ke_spin_lock_init(&_handle_tree_lock);
_handle_base = HW_HANDLE_BASE;
_initialized = true;
}
return STATUS_SUCCESS;
}
hw_result_t ke_reference_create(hw_ref_node_t *ref,
hw_callback_func_t free_func)
{
hw_assert(ke_get_irql() <= HW_IRQL_DPC_LEVEL);
if (!_initialized)
return REF_STATUS_UNINITIALIZED;
if (ref == NULL || free_func == NULL)
return REF_STATUS_INVALID_ARGUMENTS;
ref->callback = free_func;
ref->ref_count = 1;
return STATUS_SUCCESS;
}
hw_result_t ke_reference_obj(hw_ref_node_t *ref_node)
{
hw_assert(ke_get_irql() <= HW_IRQL_DPC_LEVEL);
if (!_initialized)
return REF_STATUS_UNINITIALIZED;
if (ref_node == NULL)
return REF_STATUS_INVALID_ARGUMENTS;
int32_t old_ref_count = ke_interlocked_increment(&ref_node->ref_count, 1);
hw_assert(old_ref_count >= 1);
return STATUS_SUCCESS;
}
hw_result_t ke_dereference_obj(hw_ref_node_t *ref_node)
{
hw_assert(ke_get_irql() <= HW_IRQL_DPC_LEVEL);
if (!_initialized)
return REF_STATUS_UNINITIALIZED;
if (ref_node == NULL)
return REF_STATUS_INVALID_ARGUMENTS;
hw_result_t result = STATUS_SUCCESS;
int32_t old_ref_count = ke_interlocked_increment(&ref_node->ref_count, -1);
hw_assert(old_ref_count >= 1);
if (old_ref_count == 1)
{
ref_node->callback(ref_node, NULL);
}
return result;
}
static hw_result_t ke_open_obj_by_handle(hw_handle_t handle, hw_ref_node_t **out)
{
hw_assert(ke_get_irql() <= HW_IRQL_DPC_LEVEL);
if (!_initialized)
{
return REF_STATUS_UNINITIALIZED;
}
if (out == NULL)
{
return REF_STATUS_INVALID_ARGUMENTS;
}
hw_irql_t irql;
hw_result_t result = STATUS_SUCCESS;
hw_ref_node_t *ref = NULL;
irql = ke_spin_lock_raise_irql(&_handle_tree_lock, HW_IRQL_DPC_LEVEL);
hw_handle_node_t *handle_node = search_handle_node(handle);
if (handle_node == NULL)
{
result = REF_STATUS_HANDLE_NOT_FOUND;
}
else
{
ref = handle_node->ref;
}
// PREREQUISITE: Having a handle -> having a reference
// MUST GUARANTEE that handle exists while we reference
if (HW_SUCCESS(result))
{
// reference the object then return the reference
result = ke_reference_obj(ref);
if (HW_SUCCESS(result))
{
*out = ref;
}
}
ke_spin_unlock_lower_irql(&_handle_tree_lock, irql);
return result;
}
static hw_result_t ke_create_handle(hw_ref_node_t *ref,
hw_handle_node_t *node,
hw_handle_t *out)
{
hw_assert(ke_get_irql() <= HW_IRQL_DPC_LEVEL);
if (!_initialized)
return REF_STATUS_UNINITIALIZED;
if (ref == NULL || node == NULL || out == NULL)
return REF_STATUS_INVALID_ARGUMENTS;
hw_result_t result = STATUS_SUCCESS;
hw_irql_t irql;
if (HW_SUCCESS(result))
{
// TODO: CHECK OVERFLOW
node->handle = (hw_handle_t) ke_interlocked_increment(&_handle_base, 1);
node->ref = ref;
irql = ke_spin_lock_raise_irql(&_handle_tree_lock, HW_IRQL_DPC_LEVEL);
hw_handle_node_t *existing_node = search_handle_node(node->handle);
if (existing_node == NULL)
{
avl_tree_insert(&_handle_tree, &node->tree_node);
}
else
{
result = REF_STATUS_HANDLE_DUPLICATE;
}
ke_spin_unlock_lower_irql(&_handle_tree_lock, irql);
}
if (HW_SUCCESS(result))
{
ke_reference_obj(ref);
*out = node->handle;
}
else
{
node->free_func(node, NULL);
}
return result;
}
static hw_result_t ke_close_handle(hw_handle_t handle)
{
hw_assert(ke_get_irql() <= HW_IRQL_DPC_LEVEL);
if (!_initialized)
return REF_STATUS_UNINITIALIZED;
hw_irql_t irql;
hw_result_t result = STATUS_SUCCESS;
hw_ref_node_t *ref = NULL;
irql = ke_spin_lock_raise_irql(&_handle_tree_lock, HW_IRQL_DPC_LEVEL);
hw_handle_node_t *handle_node = search_handle_node(handle);
if (handle_node == NULL)
{
result = REF_STATUS_HANDLE_NOT_FOUND;
}
else
{
ref = handle_node->ref;
avl_tree_delete(&_handle_tree, &handle_node->tree_node);
handle_node->free_func(handle_node, NULL);
}
ke_spin_unlock_lower_irql(&_handle_tree_lock, irql);
if (HW_SUCCESS(result))
{
// dereference the object
result = ke_dereference_obj(ref);
}
return result;
}
// ===========================
// HW Functions
// ===========================
hw_result_t hw_create_handle(hw_ref_node_t *ref, hw_handle_t *out)
{
hw_assert(ke_get_irql() <= HW_IRQL_DPC_LEVEL);
if (!_initialized)
return REF_STATUS_UNINITIALIZED;
hw_handle_node_t *node;
node = (hw_handle_node_t *) hw_alloc(sizeof(hw_handle_node_t));
if (node == NULL)
{
return REF_STATUS_CANNOT_ALLOCATE_MEM;
}
node->free_func = handle_node_free;
return ke_create_handle(ref, node, out);
}
hw_result_t hw_close_handle(hw_handle_t handle)
{
hw_assert(ke_get_irql() <= HW_IRQL_DPC_LEVEL);
if (!_initialized)
return REF_STATUS_UNINITIALIZED;
if (handle == HW_HANDLE_CURRENT_THREAD)
{
return REF_STATUS_NO_EFFECT;
}
hw_result_t result = ke_close_handle(handle);
return result;
}
hw_result_t hw_open_obj_by_handle(hw_handle_t handle, hw_ref_node_t **out)
{
hw_assert(ke_get_irql() <= HW_IRQL_DPC_LEVEL);
if (!_initialized)
return REF_STATUS_UNINITIALIZED;
if (out == NULL)
return REF_STATUS_INVALID_ARGUMENTS;
// check special handles first
if (handle == HW_HANDLE_CURRENT_THREAD)
{
// no need to ref first since definitely current thread context
hw_tcb_t *tcb = ke_current_thread();
ke_reference_obj(&tcb->ref_node);
*out = &tcb->ref_node;
return STATUS_SUCCESS;
}
return ke_open_obj_by_handle(handle, out);
}

View File

@ -0,0 +1,112 @@
#include "bifrost_rwlock.h"
#include "bifrost_intr.h"
void ke_rwlock_init(hw_rwlock_t *lock)
{
if(lock != NULL)
{
ke_spin_lock_init(&lock->w_mutex);
ke_spin_lock_init(&lock->r_mutex);
ke_spin_lock_init(&lock->res_lock);
ke_spin_lock_init(&lock->r_try);
lock->reader_ct = 0;
lock->writer_ct = 0;
}
return;
}
void ke_reader_lock(hw_rwlock_t *lock)
{
if(lock != NULL)
{
ke_spin_lock(&lock->r_try);
ke_spin_lock(&lock->r_mutex);
lock->reader_ct++;
if(lock->reader_ct == 1)
{
ke_spin_lock(&lock->res_lock);
}
ke_spin_unlock(&lock->r_mutex);
ke_spin_unlock(&lock->r_try);
}
return;
}
void ke_reader_unlock(hw_rwlock_t *lock)
{
if(lock != NULL)
{
ke_spin_lock(&lock->r_mutex);
lock->reader_ct--;
if(lock->reader_ct == 0)
{
ke_spin_unlock(&lock->res_lock);
}
ke_spin_unlock(&lock->r_mutex);
}
return;
}
hw_irql_t ke_reader_lock_raise_irql(hw_rwlock_t *lock, hw_irql_t irql)
{
hw_irql_t msk = ke_raise_irql(irql);
if(lock != NULL)
{
ke_reader_lock(lock);
}
return msk;
}
void ke_reader_unlock_lower_irql(hw_rwlock_t *lock, hw_irql_t irq)
{
if(lock != NULL)
{
ke_reader_unlock(lock);
}
ke_lower_irql(irq);
return;
}
void ke_writer_lock(hw_rwlock_t *lock)
{
ke_spin_lock(&lock->w_mutex);
lock->writer_ct++;
if(lock->writer_ct == 1)
{
ke_spin_lock(&lock->r_try);
}
ke_spin_unlock(&lock->w_mutex);
ke_spin_lock(&lock->res_lock);
}
void ke_writer_unlock(hw_rwlock_t *lock)
{
ke_spin_unlock(&lock->res_lock);
ke_spin_lock(&lock->w_mutex);
lock->writer_ct--;
if(lock->writer_ct == 0)
{
ke_spin_unlock(&lock->r_try);
}
ke_spin_unlock(&lock->w_mutex);
}
hw_irql_t ke_writer_lock_raise_irql(hw_rwlock_t *lock, hw_irql_t irql)
{
hw_irql_t msk = ke_raise_irql(irql);
if(lock != NULL)
{
ke_reader_lock(lock);
}
return msk;
}
void ke_writer_unlock_lower_irql(hw_rwlock_t *lock, hw_irql_t irq)
{
if(lock != NULL)
{
ke_reader_unlock(lock);
}
ke_lower_irql(irq);
return;
}

View File

@ -0,0 +1,289 @@
/*-----------------------------------------------------
|
| hw_semaphore.c
|
| Provides support for semaphores and mutexes.
|
|------------------------------------------------------
|
| Copyright (C) 2016 Microsoft Corporation
| All Rights Reserved
| Confidential and Proprietary
|
|------------------------------------------------------
*/
#include <bifrost_semaphore.h>
#include "bifrost_ref.h"
#include "bifrost_stdlib.h"
#include "bifrost_semaphore.h"
#include "bifrost_assert.h"
#include "bifrost_thread.h"
#include "bifrost_alloc.h"
static void sem_node_free(void *node, void *up)
{
hw_free(node);
}
static void sem_free(void *sem, void *up)
{
hw_free(sem);
}
//
// Ke stuff
//
hw_result_t ke_sem_init(hw_sem_t *sem, int32_t count)
{
if (sem == NULL)
return SEM_STATUS_INVALID_ARGUMENTS;
ke_spin_lock_init(&sem->lock);
linked_list_init(&sem->block_list);
sem->count = count;
return STATUS_SUCCESS;
}
hw_result_t ke_sem_signal(hw_sem_t *sem, int32_t quota)
{
hw_assert(ke_get_irql() <= HW_IRQL_DPC_LEVEL);
if (quota <= 0 || sem == NULL)
return SEM_STATUS_INVALID_ARGUMENTS;
hw_result_t result = STATUS_SUCCESS;
hw_irql_t irq;
// sem_signal can be shared with higher level interrupts, so irq save
// also disable DPC
irq = ke_spin_lock_raise_irql(&sem->lock, HW_IRQL_DPC_LEVEL);
sem->count += quota;
while (quota != 0)
{
linked_list_node_t *node = linked_list_first(&sem->block_list);
if (node != NULL)
{
hw_sem_node_t *sem_node = OBTAIN_STRUCT_ADDR(node, node, hw_sem_node_t);
if (sem_node->quota <= quota)
{
quota -= sem_node->quota;
linked_list_pop_front(&sem->block_list);
result = ke_thread_resume((hw_tcb_t*)sem_node->tcb);
ke_dereference_obj(&((hw_tcb_t*)sem_node->tcb)->ref_node);
sem_node->free_callback(sem_node, NULL);
}
else
{
sem_node->quota -= quota;
quota = 0;
}
}
else
break;
}
ke_spin_unlock_lower_irql(&sem->lock, irq);
return result;
}
hw_result_t ke_sem_wait(hw_sem_t *sem, hw_sem_node_t *node, int quota)
{
hw_assert(ke_get_irql() <= HW_IRQL_APC_LEVEL);
if (sem == NULL || quota <= 0 || node == NULL)
{
return SEM_STATUS_INVALID_ARGUMENTS;
}
hw_irql_t irq;
hw_result_t result = STATUS_SUCCESS;
irq = ke_spin_lock_raise_irql(&sem->lock, HW_IRQL_DPC_LEVEL);
if (sem->count < quota)
{
// if we don't have enough resources
// if sem->count > 0, then we allocate all existing quota to the thread
// then wait for the remaining quota requested by the thread
// else we can't allocate anything then just wait for the quota requested
int32_t real_quota = sem->count > 0 ? quota - sem->count : quota;
sem->count -= quota;
node->quota = real_quota;
// guaranteed tcb is valid since current thread context
node->tcb = ke_current_thread();
// reference for holding a pointer for usage of
ke_reference_obj(&((hw_tcb_t*)node->tcb)->ref_node);
linked_list_push_back(&sem->block_list, &node->node);
// we want to disable DPC here since hw_block will immediately yield if
// a thread blocks itself. Otherwise the sem lock will be locked forever resulting in
// deadlocks. This is only a note since interrupt is disabled here.
//
// also thread_block really can't go after unlocking sem_lock, since if before hw_thread_block
// is called, the scheduler context switch to another thread that signals the sem, then the thread
// would be unblocked again. If we unblock a thread then block it, it will remain blocked forever.
//
// Anyways, hw_thread_block state change must take place simultaneously with the sem_block queue.
result = ke_thread_block((hw_tcb_t*)node->tcb);
}
else
{
// we have enough resources
sem->count -= quota;
}
ke_spin_unlock_lower_irql(&sem->lock, irq);
return result;
}
hw_result_t ke_sem_trywait(hw_sem_t* sem, int32_t quota)
{
hw_assert(ke_get_irql() <= HW_IRQL_DPC_LEVEL);
if (quota <= 0 || sem == NULL)
return SEM_STATUS_INVALID_ARGUMENTS;
hw_result_t result = SEM_STATUS_OCCUPIED;
hw_irql_t irq;
irq = ke_spin_lock_raise_irql(&sem->lock, HW_IRQL_DPC_LEVEL);
if (sem->count >= quota)
{
result = STATUS_SUCCESS;
sem->count -= quota;
}
ke_spin_unlock_lower_irql(&sem->lock, irq);
return result;
}
//
// hw thingys
//
hw_result_t hw_sem_create(hw_handle_t *out, int32_t count)
{
hw_assert(ke_get_irql() <= HW_IRQL_DPC_LEVEL);
hw_result_t result = STATUS_SUCCESS;
hw_sem_t *sem = (hw_sem_t *) hw_alloc(sizeof(hw_sem_t));
if (sem == NULL)
return SEM_STATUS_CANNOT_ALLOCATE_MEM;
result = ke_reference_create(&sem->ref_node, sem_free);
if (!HW_SUCCESS(result))
{
hw_free(sem);
return result;
}
result = ke_sem_init(sem, count);
if (!HW_SUCCESS(result))
{
ke_dereference_obj(&sem->ref_node);
return result;
}
result = hw_create_handle(&sem->ref_node, out);
ke_dereference_obj(&sem->ref_node);
return result;
}
hw_result_t hw_sem_wait(hw_handle_t handle, int32_t quota)
{
hw_assert(ke_get_irql() <= HW_IRQL_APC_LEVEL);
if (quota <= 0)
return SEM_STATUS_INVALID_ARGUMENTS;
hw_result_t result;
hw_ref_node_t *ref;
// reference sem pointer
result = hw_open_obj_by_handle(handle, &ref);
if (!HW_SUCCESS(result))
{
return result;
}
hw_sem_t *sem = OBTAIN_STRUCT_ADDR(ref, ref_node, hw_sem_t);
hw_sem_node_t* sem_node = (hw_sem_node_t*)hw_alloc(sizeof(hw_sem_node_t));
if(sem_node == NULL)
{
ke_dereference_obj(&sem->ref_node);
return SEM_STATUS_CANNOT_ALLOCATE_MEM;
}
sem_node->free_callback = sem_node_free;
ke_sem_wait(sem, sem_node, quota);
// dereference sem pointer
ke_dereference_obj(&sem->ref_node);
return result;
}
hw_result_t hw_sem_signal(hw_handle_t handle, int32_t quota)
{
hw_assert(ke_get_irql() <= HW_IRQL_DPC_LEVEL);
if (quota <= 0)
return SEM_STATUS_INVALID_ARGUMENTS;
hw_result_t result;
hw_ref_node_t *ref;
// reference sem pointer
result = hw_open_obj_by_handle(handle, &ref);
if (!HW_SUCCESS(result))
{
return result;
}
hw_sem_t *sem = OBTAIN_STRUCT_ADDR(ref, ref_node, hw_sem_t);
result = ke_sem_signal(sem, quota);
//dereference ref node
ke_dereference_obj(&sem->ref_node);
return result;
}
hw_result_t hw_sem_trywait(hw_handle_t handle, int32_t quota)
{
hw_assert(ke_get_irql() <= HW_IRQL_DPC_LEVEL);
if (quota <= 0)
return SEM_STATUS_INVALID_ARGUMENTS;
hw_result_t result;
hw_ref_node_t *ref;
// reference sem pointer
result = hw_open_obj_by_handle(handle, &ref);
if (!HW_SUCCESS(result))
{
return result;
}
hw_sem_t *sem = OBTAIN_STRUCT_ADDR(ref, ref_node, hw_sem_t);
result = ke_sem_trywait(sem, quota);
ke_dereference_obj(&sem->ref_node);
return result;
}

View File

@ -0,0 +1,563 @@
/*-----------------------------------------------------
|
| hw_vsnprintf.c
|
| Server-side implementation of printf for bifrost
| Only takes a single string parameter.
|
|------------------------------------------------------
|
| Revision History :
|
| #5 - 31 January 2005 J. Hanes
| Add cts_print_level, cts_print_control()
|
| #4 - 18 October 2004 J. Hanes
| Add cts_print_string and cts_print_hex
|
|------------------------------------------------------
|
| Copyright (C) 2003 Microsoft Corporation
| All Rights Reserved
| Confidential and Proprietary
|
|------------------------------------------------------
*/
#include <bifrost_types.h>
#include <bifrost_intr.h>
#include <conf_defs.h>
#include <math.h>
#define FLG_LONG (1 << 1)
#define FLG_DLONG (1 << 2)
#define FLG_ZERO (1 << 3)
#define FLG_PLUS (1 << 4)
#define FLG_MINUS (1 << 5)
#define FLG_SPACE (1 << 6)
#define FLG_HASH (1 << 7)
#define GetCh() (*fmt++)
#define PutCh(ch) do \
{ \
if ( numChars < bufsize - 1 ) \
{ \
outstring[ numChars++ ] = (ch); \
} \
else \
{ \
if(ch){}; \
} \
} while(FALSE) \
static uint32_t
local_strlen( const char * s )
{
int len = 0;
while ( s[ len ] )
{
len++;
}
return len;
}
int
hw_vsnprintf( char* outstring,
uint32_t bufsize,
const char* fmt,
va_list args )
{
const char* prefix;
const char* value;
char postfix [ 8 ];
char buf [ 64 ];
char ch;
uint32_t numChars;
int32_t flags;
int32_t width;
int32_t precision;
int32_t len;
int32_t leading_zeroes;
int32_t ii;
int done;
numChars = 0;
while ( numChars < bufsize - 1 )
{
ch = GetCh();
if (ch == 0)
{
break;
}
if ( ch != '%' )
{
PutCh(ch);
continue;
}
/* %[flags][width][.precision][size]type */
/* isolate the flags */
flags = 0;
done = 0;
do
{
ch = GetCh();
switch ( ch )
{
case '-':
flags |= FLG_MINUS;
break;
case '+':
flags |= FLG_PLUS ;
break;
case ' ':
flags |= FLG_SPACE;
break;
case '0':
flags |= FLG_ZERO ;
break;
default:
done = 1;
break;
}
} while ( !done );
/* get the field width */
if ( ch == '*' )
{
width = va_arg( args, int );
if (width < 0)
{
flags |= FLG_MINUS;
width = -width;
}
ch = GetCh();
}
else if ((ch >= '0') && (ch <= '9'))
{
width = 0;
do
{
width = width * 10 + (ch - '0');
ch = GetCh();
} while ((ch >= '0') && (ch <= '9'));
}
else
{
/* unlimited */
width = -1;
}
/* get the field precision */
if ( ch == '.' )
{
ch = GetCh();
if ( ch == '*' )
{
precision = va_arg( args, int );
if (precision < 0)
{
precision = 0;
}
ch = GetCh();
}
else
{
precision = 0;
while ((ch >= '0') && (ch <= '9'))
{
precision = precision * 10 + (ch - '0');
ch = GetCh();
}
}
}
else
{
/* unlimited */
precision = -1;
}
/* isolate the size */
while ( 1 )
{
if ( ch == 'l' )
{
ch = GetCh();
if (ch == 'l')
{
flags |= FLG_DLONG;
ch = GetCh();
}
else
{
flags |= FLG_LONG;
}
}
else if (ch == 'L')
{
flags |= FLG_DLONG;
ch = GetCh();
}
else
{
break;
}
}
value = "";
len = 0;
prefix = "";
postfix[0] = 0;
leading_zeroes = 0;
switch ( ch )
{
case 'd':
case 'i':
{
int64_t num;
if ( flags & FLG_DLONG )
{
num = va_arg( args, int64_t );
}
else
{
num = va_arg( args, int );
}
if (num < 0)
{
num = -num;
prefix = "-";
}
if (precision < 0)
{
precision = 1;
}
len = 0;
do
{
len ++;
buf[ sizeof(buf) - len ] = ((char) (num % 10)) + '0';
num /= 10;
} while ( num );
value = &buf[ sizeof(buf) - len ];
if ( precision > len )
{
leading_zeroes = precision - len;
len = precision;
}
break;
}
case 'u':
{
uint64_t num;
if (flags & FLG_DLONG)
{
num = va_arg( args, uint64_t );
}
else
{
num = va_arg( args, unsigned int );
}
if (precision < 0)
{
precision = 1;
}
len = 0;
do
{
len++;
buf[sizeof(buf) - len] = ((char) (num % 10)) + '0';
num /= 10;
} while (num);
value = &buf[ sizeof(buf) - len ];
if ( precision > len )
{
leading_zeroes = precision - len;
len = precision;
}
break;
}
case 'e': /* [-]d.dddddde[+/-]dd */
case 'E': /* [-]d.ddddddE[+/-]dd */
case 'f': /* [-]d.dddddd */
case 'g': /* if exp < -4 or exp > prec use 'e', else use 'f' */
case 'G': /* if exp < -4 or exp > prec use 'E', else use 'f' */
{
uint64_t temp, temp2;
int32_t count;
bool negative = FALSE;
double num, fraction;
num = va_arg( args, double );
if (precision < 0)
{
precision = 1;
}
// Special case: value is NaN
if(isnan(num))
{
value = "nan";
len = 3;
break;
}
if(num < 0)
{
num = -num;
negative = TRUE;
}
//
// Assert that the value to be printed does not
// exceed the limit of a uint64_t.
//
//hw_assert(num < (double) ~0ull);
temp = (uint64_t) num;
fraction = num - temp;
len = 0;
count = 0;
do
{
len++;
count++;
fraction *= 10;
temp2 = (uint64_t) fraction;
buf[sizeof(buf) - precision + len - 1] = ((char) (temp2 % 10)) + '0';
} while (count < precision);
len++;
buf[sizeof(buf) - len] = '.';
do
{
len++;
buf[sizeof(buf) - len] = ((char) (temp % 10)) + '0';
temp /= 10;
} while (temp);
if(negative)
{
len++;
buf[sizeof(buf) - len] = '-';
}
value = &buf[ sizeof(buf) - len ];
break;
}
case 'x':
case 'X':
{
uint64_t num;
const char *map;
if (flags & FLG_DLONG)
{
num = va_arg( args, uint64_t );
}
else
{
num = va_arg( args, unsigned int);
}
if( ch == 'x')
{
map = "0123456789abcdef";
}
else
{
map = "0123456789ABCDEF";
}
if ( precision < 0 )
{
precision = 1;
}
len = 0;
do
{
len ++;
buf[ sizeof(buf) - len ] = map[ num & 15 ];
num /= 16;
} while ( num );
value = &buf[ sizeof(buf) - len ];
if (precision > len)
{
leading_zeroes = precision - len;
len = precision;
}
break;
}
case 's':
value = va_arg( args, char * );
if ( 0 == value )
{
value = "<NULL>";
}
while ( (precision < 0 || len < precision) && value[len] )
{
len++;
}
break;
case 0:
PutCh('%');
goto exit;
break;
case 'c':
buf[0] = (char) (va_arg( args, int ) & 0xFF);
value = buf;
len = 1;
break;
default:
/*
* Handle unknown formatting commands
* by just printing the command character using the
* specified width and precision. This is how %% is
* handled.
*/
buf[ 0 ] = ch;
value = buf;
len = 1;
break;
}
if (prefix[0] == 0)
{
if (flags & FLG_PLUS)
{
prefix = "+";
}
else if (flags & FLG_SPACE)
{
prefix = " ";
}
}
else if (flags & FLG_ZERO)
{
/* prefix goes before zero padding, but after space padding */
ii = 0;
while ( prefix[ ii ] )
{
PutCh( prefix[ ii ++ ] );
}
width -= ii;
}
if ((width > 0) && ((flags & FLG_MINUS) == 0))
{
int jj;
char fillChar;
jj = width - local_strlen( prefix ) - len - local_strlen( postfix );
fillChar = (flags & FLG_ZERO) ? '0' : ' ';
if ( jj > 0 )
{
width -= jj;
while ( jj -- > 0 )
{
PutCh( fillChar );
}
}
}
if ( (flags & FLG_ZERO) == 0 )
{
ii = 0;
while ( prefix[ ii ] )
{
PutCh( prefix[ ii ++ ] );
}
width -= ii;
}
len -= leading_zeroes;
while ( leading_zeroes -- )
{
PutCh( '0' );
}
ii = 0;
while ( ii < len )
{
PutCh( value[ ii ++ ] );
}
width -= len;
ii = 0;
while ( postfix[ ii ] )
{
PutCh( postfix[ ii ++ ] );
}
width -= ii;
if (flags & FLG_MINUS)
{
/* handle padding for left-justified field */
if (width > 0)
{
while (width-- > 0)
{
PutCh(' ');
}
}
}
}
exit:
outstring[ numChars ] = 0;
return numChars;
}
int hw_snprintf( char * outstring, uint32_t size, const char * fmt, ... )
{
int ret;
va_list args;
va_start( args, fmt );
ret = hw_vsnprintf( outstring, size, fmt, args );
va_end( args );
return ret;
}
#define PRINTBUF_SIZE (1024)
extern int plat_puts( const char* buffer, int str_length );
char printbuf[HW_PROC_CNT][PRINTBUF_SIZE];
void hw_printf(const char *format, ...)
{
hw_irql_t prev_irql = ke_raise_irql(HW_IRQL_DISABLED_LEVEL);
int corenum = ke_get_current_core();
int len = hw_snprintf(printbuf[corenum], PRINTBUF_SIZE, "Core %d: ", ke_get_current_core());
va_list argptr;
va_start(argptr, format);
len += hw_vsnprintf(&printbuf[corenum][len], PRINTBUF_SIZE - len, format, argptr);
va_end(argptr);
plat_puts(printbuf[corenum], len);
ke_lower_irql(prev_irql);
}

View File

@ -0,0 +1,651 @@
#include "bifrost_stdlib.h"
int32_t hw_memcmp(const void *ptr1, const void *ptr2, const size_t len)
{
size_t ctr = 0;
uint8_t a = 0;
uint8_t b = 0;
for (ctr = 0; (ctr < len) && (a == b); ctr++)
{
a = ((uint8_t *) ptr1)[ctr];
b = ((uint8_t *) ptr2)[ctr];
}
if (a < b)
{
return -1;
}
else if (a > b)
{
return 1;
}
else
{
return 0;
}
}
void hw_memcpy(void *destination, const void *source, size_t n)
{
size_t ctr;
if((uint64_t)destination % 32 == 0 && (uint64_t)source % 32 == 0)
{
for (ctr = 0; ctr < n / sizeof(uint32_t); ctr++)
{
*(((uint32_t *) destination) + ctr) = *(((uint32_t *) source) + ctr);
}
// If size is not a multiple of sizeof(uint32_t),
// copy whatever is left
for (ctr *= sizeof(uint32_t); ctr < n; ctr++)
{
*(((uint8_t *) destination) + ctr) = *(((uint8_t *) source) + ctr);
}
}
else
{
for (ctr = 0; ctr < n; ctr++)
{
*(((uint8_t *) destination) + ctr) = *(((uint8_t *) source) + ctr);
}
}
}
void hw_memset(void *ptr, uint8_t value, size_t len)
{
size_t ctr;
uint32_t value32 = value;
value32 |= (value32 << 8) | (value32 << 16) | (value32 << 24);
//
// Handle if ptr is not aligned to 4 bytes
//
uint8_t *bPtr = (uint8_t *) ptr;
for (ctr = 0; ctr < len && ctr < (4 - ((uintptr_t) ptr % 4)); ctr++)
{
bPtr[ctr] = value;
}
//
// While remaining length is greater than 4 bytes,
// write 4 bytes at a time
//
uint32_t *dPtr = (uint32_t *) (bPtr + ctr);
len -= ctr;
for (ctr = 0; ctr < len / 4; ctr++)
{
dPtr[ctr] = value32;
}
//
// If the endpoint is not aligned to 4 bytes, handle
//
bPtr = (uint8_t * )(dPtr + ctr);
len -= (ctr * 4);
for (ctr = 0; ctr < len; ctr++)
{
bPtr[ctr] = value;
}
}
//
//size_t
//hw_strnlen(const char *s, size_t maxlen)
//{
// static const char *const myname = "hw_strnlen";
// size_t len = 0;
//
// if (0 == s)
// {
// hw_errmsg("%s: Error: NULL parameter s\n", myname);
// }
// while (len < maxlen && s[len])
// {
// len++;
// }
// if (len >= maxlen)
// {
// hw_errmsg("%s: Error: reached buffer length limit %d\n"
// "\twithout finding a terminating NULL\n", myname, maxlen);
// }
// return len;
//}
//
//
//char *
//hw_strncpy(char *s1, const char *s2, size_t n)
//{
// size_t ii = 0;
//
// if (0 == s1)
// {
// hw_errmsg("%s: Error: NULL destination parameter s1\n", __func__);
// }
// else if (0 == s2)
// {
// hw_errmsg("%s: Error: NULL source parameter s2\n", __func__);
// }
// else
// {
// for (ii = 0; ii < n; ii++)
// {
// s1[ii] = s2[ii];
// if ('\0' == s2[ii])
// {
// break;
// }
// }
// }
// if (ii >= n && n > 0) // if length was non-zero and we reached the end without finding null character
// {
// s1[n - 1] = '\0';
// }
// return (s1);
//}
//
//
//char *
//hw_strncat(char *s1, const char *s2, size_t n)
//{
// unsigned int foundnull = 0;
// size_t sdex;
// size_t ddex;
//
// if (0 == s1)
// {
// hw_errmsg("%s: Error: NULL destination parameter s1\n", __func__);
// }
// else if (0 == s2)
// {
// hw_errmsg("%s: Error: NULL source parameter s2\n", __func__);
// }
// else
// {
// for (sdex = 0, ddex = 0; sdex < n; ddex++)
// {
// if (0 == s1[ddex])
// {
// foundnull = 1;
// }
// if (1 == foundnull)
// {
// s1[ddex] = s2[sdex++];
// }
// }
// }
// if (0 == foundnull)
// {
// /*
// * ALWAYS terminate the string,
// * truncating the output if necessary.
// */
// s1[n - 1] = 0;
// hw_errmsg("%s: Error: reached buffer length limit %d\n"
// "\tOutput string truncated\n", __func__, n);
// }
// return (s1);
//
//}
//
//
//int32_t
//hw_strcmp(const char *str1, const char *str2)
//{
// const char *p1 = str1;
// const char *p2 = str2;
//
// if ((0 == str1) || (0 == str2))
// {
// int rc;
// hw_errmsg("hw_strcmp: Error: got 0 for input string pointer: "
// "hw_strcmp( 0x%08x, 0x%08x )\n", str1, str2);
// if (str1 > str2) rc = 1;
// else if (str1 < str2) rc = -1;
// else rc = 0;
// return (rc);
// }
// else
// {
// while (*p1 && *p2)
// {
// if (*p1 > *p2) return (1);
// else if (*p1 < *p2) return (-1);
// p1++;
// p2++;
// }
// if (*p1 > *p2) return (1);
// else if (*p1 < *p2) return (-1);
// return (0);
// }
//
//}
//
//UINT64 hw_strhash(const CHAR *str)
//{
// UINT64 hashval = 5381;
// uint32_t c = 0;
//
// while ((c = *str++))
// {
// hashval = ((hashval << 5) + hashval) + c; /* hashval * 33 + c */
// }
//
// return hashval;
//}
//
//BOOL hw_isspace(UCHAR ch)
//{
// BOOL ret = FALSE;
// switch (ch)
// {
// case ' ':
// case '\t':
// case '\n':
// case '\v':
// case '\f':
// case '\r':
// ret = TRUE;
// break;
// default:
// break;
// }
//
// return ret;
//}
//
//BOOL hw_isupper(UCHAR ch)
//{
// return (ch >= 'A' && ch <= 'Z');
//}
//
//BOOL hw_islower(UCHAR ch)
//{
// return (ch >= 'a' && ch <= 'z');
//}
//
//BOOL hw_isdigit(UCHAR ch)
//{
// return (ch >= '0' && ch <= '9');
//}
//
//
///*-----------------------------------------------------
// |
// | hw_strtof.c
// |
// |------------------------------------------------------
// |
// | Revision History :
// |
// | #2 - 20 January 2005 J. Hanes
// | pick lint
// |
// | #1 - 2 March 2004 J. Hanes
// | Ooops, forgot to p5 add it.
// | - 8 February 2004 J. Hanes
// | Copied/modified from
// | hwdev/antichips/antinemo/sysver/lib/src/shared/SSW_stdlib/strtof.c
// |
// |------------------------------------------------------
// |
// | Copyright (C) 2004 Microsoft Corporation
// | All Rights Reserved
// | Confidential and Proprietary
// |
// |------------------------------------------------------
// */
//
//#define MAX10 (6)
//
//static float tentothe2tothe[MAX10] =
// {1.0e1, 1.0e2, 1.0e4, 1.0e8, 1.0e16, 1.0e32};
//
//static float
//hw_pow10(int exp)
//{
// int i = MAX10, j = 32, minus;
// float f = 1.0;
//
// if ((minus = (exp < 0)))
// exp = -exp;
//
// while (--i >= 0)
// {
// if (exp >= j)
// {
// f *= tentothe2tothe[i];
// exp -= j;
// }
// j >>= 1;
// }
//
// return (minus) ? (1.0 / f) : f;
//}
//
//
//float
//hw_strtof(const char *str,
// char **endScan)
//{
// float x = 0.0, div;
// int negsign, exp = 0, expsign;
//
// /* eat any leading whitespace */
// while ((hw_isspace((int) *str)))
// {
// str++;
// }
//
// if ((negsign = (*str == '-')))
// {
// str++;
// }
// else if (*str == '+')
// {
// str++;
// }
//
// /* now read in the first part of the number */
// while (isdigit((int) *str))
// x = 10.0 * x + (*str++ - '0');
//
// /* if we hit a period, do the decimal part now */
// if (*str == '.')
// {
// str++;
// div = 10.0;
// while (isdigit((int) *str))
// {
// x += (*str++ - '0') / div;
// div *= 10.0;
// }
// }
//
// /* check for an exponent */
// if ((*str == 'e') || (*str == 'E'))
// {
// str++;
//
// if ((expsign = (*str == '-')))
// str++;
// else if (*str == '+')
// str++;
//
// /* handle leading zeros, such as in 1.0e-07 or 1.0e001 */
// while (*str == '0')
// str++;
//
// /* now do the exponent */
// while (isdigit((int) *str))
// exp = 10 * exp + (*str++ - '0');
// if (expsign)
// exp = -exp;
//
// if (exp)
// x *= hw_pow10(exp);
// }
//
// if (negsign)
// x = -x;
//
// if (endScan)
// *endScan = (char *) str;
//
// return x;
//}
//
///*-----------------------------------------------------
// |
// | hw_strtol.c
// |
// | hw_strtoul(), hw_strtol(), and hw_atoi()
// |
// |------------------------------------------------------
// |
// | Revision History :
// |
// | #3 - 15 February 2005 J. Hanes
// | Drop hw_api_trace_enable; use SHARED_ERRMSG, SHARED_TRACE
// |
// | #2 - 28 April 2004 J. Hanes
// | Check for 0 string pointer in strtol() and strtoul()
// |
// | #1 - 8 February 2004 J. Hanes
// | Copied in and modified from
// | hwdev/antichips/antinemo/sysver/lib/src/shared/SSW_stdlib/strtol.c
// | Appended atoi() and atol to the bottom.
// | Replaced NULL with 0
// |
// |------------------------------------------------------
// |
// | Copyright (C) 1997 CagEnt Technologies Inc.
// | Copyright (C) 1999 WebTV Networks Inc.
// | Copyright (C) 2004 Microsoft Corporation
// | All Rights Reserved
// | Confidential and Proprietary
// |
// |------------------------------------------------------
// */
//
//
//#define NUMNEG (01000)
//
//
//static int
//_chval(int ch, int radix)
//{
// int val;
//
// val = (hw_isdigit(ch) ? (ch) - '0' :
// hw_islower(ch) ? (ch) - 'a' + 10 :
// hw_isupper(ch) ? (ch) - 'A' + 10 : -1);
//
// return (val < radix ? val : -1);
//}
//
//
//static unsigned long int
//_strtoul(const char *nsptr,
// char **endptr,
// int base)
//{
// const unsigned char *nptr = (const unsigned char *) nsptr; /* see scanf */
// int c, ok = 0;
//
// while (((c = *nptr++) != 0) && hw_isspace(c));
// if (c == '0')
// {
// ok = 1;
// c = *nptr++;
// if (c == 'x' || c == 'X')
// {
// if (base == 0 || base == 16)
// {
// ok = 0;
// base = 16;
// c = *nptr++;
// }
// }
// else if (base == 0)
// base = 8;
// }
//
// if (base == 0) base = 10;
//
// {
// unsigned long dhigh = 0, dlow = 0;
// int digit;
//
// while ((digit = _chval(c, base)) >= 0)
// {
// ok = 1;
// dlow = base * dlow + digit;
// dhigh = base * dhigh + (dlow >> 16);
// dlow &= 0xffff;
// c = *nptr++;
// }
// if (endptr)
// *endptr = ok ? (char *) nptr - 1 : (char *) nsptr;
// /* extra result */
//#ifdef ERRNO
// return overflowed ? (errno = ERANGE, ULONG_MAX)
// : (dhigh << 16) | dlow;
//#else
// return (dhigh << 16) | dlow;
//#endif
// }
//}
//
//
///*
// * The way negation is treated in this may not be quite right ...
// */
//uint32_t
//hw_strtoul(const char *nsptr,
// char **endptr,
// int base)
//{
// const unsigned char *nptr = (const unsigned char *) nsptr;
// int flag = 0, c;
//
// if (0 == nsptr)
// {
// return 0;
// }
// else if (0 == *nsptr)
// {
// return 0;
// }
//
//#ifdef ERRNO
// int errno_saved = errno;
//#endif
// while (((c = *nptr++) != 0) && hw_isspace(c));
// nptr--;
//
//#ifdef ERRNO
// errno = 0;
//#endif
//
// {
// char *endp;
// unsigned long int ud = _strtoul((char *) nptr, &endp, base);
//
// if (endptr)
// {
// *endptr = endp == (char *) nptr ? (char *) nsptr : endp;
// }
//
// /*
// * The following lines depend on the facts that
// * unsigned->int casts and unary '-' cannot cause arithmetic traps.
// * Recode to avoid this?
// */
//#ifdef ERRNO
// if (errno == ERANGE)
// return (uint32_t) ud;
// errno = errno_saved;
//#endif
// return (uint32_t) ud;
// }
//}
//
//
///*
// * The specification in the ANSI information bulletin upsets me here:
// * strtol is of type long int, and 'if the correct value would cause
// * overflow LONG_MAX or LONG_MIN is returned'. Thus for hex input the
// * string 0x80000000 will be considered to have overflowed, and so will
// * be returned as LONG_MAX.
// * These days one should use strtoul for unsigned values, so some of
// * my worries go away.
// */
//int32_t
//hw_strtol(const char *nsptr,
// char **endptr,
// int base)
//{
// const unsigned char *nptr = (const unsigned char *) nsptr;
// int flag = 0;
// int c;
//
// if (0 == nsptr)
// {
// return 0;
// }
// else if (0 == *nsptr)
// {
// return 0;
// }
//
// while (((c = *nptr++) != 0) && hw_isspace(c));
//
// switch (c)
// {
// case '-':
// flag |= NUMNEG;
// /* drop through */
// case '+':
// break;
// default:
// nptr--;
// break;
// }
//
// {
// char *endp;
// unsigned long ud = _strtoul((char *) nptr, &endp, base);
//
// if (endptr)
// {
// *endptr = endp == (char *) nptr ? (char *) nsptr : endp;
// }
//
// /*
// * The following lines depend on the facts that
// * unsigned->int casts and unary '-' cannot cause arithmetic traps.
// * Recode to avoid this?
// */
//#ifdef ERRNO
// if (flag & NUMNEG) {
// return (-(long) ud <= 0) ? -(long) ud : (errno =
// ERANGE, LONG_MIN);
// }
// else {
// return (+(long) ud >= 0) ? +(long) ud : (errno =
// ERANGE, LONG_MAX);
// }
//#else
// if (flag & NUMNEG)
// {
// ud = -ud;
// }
//
// return (int32_t) ud;
//#endif
// }
//
//}
//
///* hw_strtol() */
//
//
//
//int32_t
//hw_atoi(const char *nsptr)
//{
// return (int32_t) hw_strtol(nsptr, 0, 0);
//}
//
//
//int32_t
//hw_atol(const char *nsptr)
//{
// return (int32_t) hw_strtol(nsptr, 0, 0);
//}
//

View File

@ -0,0 +1,959 @@
#include <bifrost_timer.h>
#include <bifrost_thread.h>
#include "bifrost_apc.h"
#include "bifrost_context.h"
#include "bifrost_rwlock.h"
#include "bifrost_dpc.h"
#include "bifrost_mem.h"
#include "conf_defs.h"
#include "bifrost_stdlib.h"
#include "bifrost_alloc.h"
#include "bifrost_system_constants.h"
#include "bifrost_assert.h"
// The global AVL tree is only for fast TCB lookup
static MEM_SRAM_UC hw_rwlock_t _tree_lock;
static avl_tree_t _thread_tree;
// Represents the current thread
static hw_tcb_t *_current_thread[HW_PROC_CNT];
// These lists are per core X per state X per priority
// no lock needed since only the sheduler of each core modifies this
static linked_list_t _scheduler_queue[HW_PROC_CNT][STATE_NUM - 1][PRIORITY_LEVEL_NUM];
static linked_list_t _scheduler_notif_queue[HW_PROC_CNT];
static hw_spin_lock_t _scheduler_lock[HW_PROC_CNT];
static bool _scheduler_queued[HW_PROC_CNT] = {false};
// The global non-decreasing thread_id counter
static int32_t _thread_id_count;
// The global variable signifying whether the library is initialized
static _Bool _initialized[HW_PROC_CNT] = {false};
static MEM_SRAM_UC int32_t _avl_initialized = 0;
static volatile _Bool _thread_avl_initialized = false;
//==================================
// TCB helper Routines
//==================================
static int thread_tree_compare(avl_tree_node_t *tree_node, avl_tree_node_t *my_node)
{
hw_tcb_t *tcb = OBTAIN_STRUCT_ADDR(tree_node, tree_node, hw_tcb_t);
hw_tcb_t *my_tcb = OBTAIN_STRUCT_ADDR(my_node, tree_node, hw_tcb_t);
return tcb->thread_id - my_tcb->thread_id;
}
static void tcb_free(void *tcb, void *up)
{
hw_free(tcb);
return;
}
// scheduler lock must be held before calling this thing
static void notify_scheduler(hw_tcb_t *thread_handle)
{
if (!thread_handle->in_scheduler_queue)
{
linked_list_push_back(&_scheduler_notif_queue[thread_handle->core_id], &thread_handle->scheduler_queue_node);
thread_handle->in_scheduler_queue = true;
}
return;
}
//==================================
// Per Processor NULL proc
//==================================
static hw_handle_t _null_proc_handles[HW_PROC_CNT];
static void null_proc(void *par)
{
while (1)
{
//hw_printf("%s: running\n", __func__);
ke_thread_yield(ke_get_current_core());
}
}
static inline _Bool is_thread_initialized()
{
return _initialized[ke_get_current_core()] && _avl_initialized == 1;
}
//==================================
// Ke Functions
//==================================
hw_tcb_t *ke_current_thread()
{
hw_assert(ke_get_irql() <= HW_IRQL_APC_LEVEL);
return _current_thread[ke_get_current_core()];
}
hw_result_t ke_thread_open(int32_t id, hw_tcb_t **out)
{
hw_assert(ke_get_irql() <= HW_IRQL_APC_LEVEL);
if (!is_thread_initialized())
return THREAD_STATUS_UNINITIALIZED;
if (out == NULL)
return THREAD_STATUS_INVALID_ARGUMENT;
hw_result_t result;
avl_tree_node_t *temp;
hw_tcb_t *target = NULL;
hw_tcb_t clone;
clone.thread_id = id;
hw_irql_t irql = ke_reader_lock_raise_irql(&_tree_lock, HW_IRQL_APC_LEVEL);
temp = avl_tree_search(&_thread_tree, &clone.tree_node);
if (temp != NULL)
{
// reference for having a pointer
target = OBTAIN_STRUCT_ADDR(temp, tree_node, hw_tcb_t);
result = ke_reference_obj(&target->ref_node);
}
else
{
result = THREAD_STATUS_INVALID_ARGUMENT;
}
ke_reader_unlock_lower_irql(&_tree_lock, irql);
if (HW_SUCCESS(result))
{
*out = target;
}
return result;
}
void ke_thread_schedule(void *info, void *up)
{
hw_assert(ke_get_irql() == HW_IRQL_DPC_LEVEL);
hw_thread_schedule_info_t *result = (hw_thread_schedule_info_t *) info;
if (result == NULL || !is_thread_initialized())
return;
uint32_t coreid = ke_get_current_core();
ke_spin_lock(&_scheduler_lock[coreid]);
// take care of the notification queue first
linked_list_node_t *node = linked_list_pop_front(&_scheduler_notif_queue[coreid]);
while (node != NULL)
{
hw_tcb_t *tcb = OBTAIN_STRUCT_ADDR(node, scheduler_queue_node, hw_tcb_t);
// target thread is not the currently running thread
if (tcb != _current_thread[coreid])
{
if (tcb->state == STATE_OUTSIDE)
{
// this means the target thread is being deleted ->
// detach the thread from scheduler queue and deref the thread
linked_list_remove_ref(&_scheduler_queue[coreid][tcb->location][tcb->priority],
&tcb->list_node);
ke_dereference_obj(&tcb->ref_node);
}
else
{
// target thread is being moved around
if (tcb->location != STATE_OUTSIDE)
{
// this means not a new thread, need to detach from the prev queue
linked_list_remove_ref(&_scheduler_queue[coreid][tcb->location][tcb->priority],
&tcb->list_node);
}
// add to the next queue
linked_list_push_back(&_scheduler_queue[coreid][tcb->state][tcb->priority],
&tcb->list_node);
// update the location
tcb->location = tcb->state;
}
}
// else
// {
// the target thread is the currently running thread
// in this case we don't move it around the queues just yet
// }
// reset the tcb queued flag
tcb->in_scheduler_queue = false;
node = linked_list_pop_front(&_scheduler_notif_queue[coreid]);
}
// handle the currently running thread state change
// and pick a new thread
hw_tcb_t *old_tcb = _current_thread[coreid];
// check current thread -> block to block queue, run to ready queue, exit to exit queue
if (old_tcb != NULL)
{
if (old_tcb->state == STATE_OUTSIDE)
{
result->prev_context = NULL;
ke_dereference_obj(&old_tcb->ref_node);
}
else
{
result->prev_context = old_tcb->regs;
if (old_tcb->state == STATE_RUN)
{
// if nothing is changed about the thread
// change its state to ready
old_tcb->state = STATE_READY;
}
// move to whatever old_tcb state is
linked_list_push_back(&_scheduler_queue[coreid][old_tcb->state][old_tcb->priority],
&old_tcb->list_node);
// update location
old_tcb->location = old_tcb->state;
}
}
else
{
result->prev_context = NULL;
}
// pick a new thread
for (int i = 0; i < PRIORITY_LEVEL_NUM; i++)
{
linked_list_node_t *front;
front = linked_list_pop_front(&_scheduler_queue[coreid][STATE_READY][i]);
if (front != NULL)
{
_current_thread[coreid] = OBTAIN_STRUCT_ADDR(front, list_node, hw_tcb_t);
_current_thread[coreid]->state = STATE_RUN;
result->next_context = _current_thread[coreid]->regs;
// update the location
_current_thread[coreid]->location = STATE_RUN;
break;
}
}
ke_spin_unlock(&_scheduler_lock[coreid]);
// check if there is a queued APC, if there is then fire an APC interrupt
if (linked_list_first(&_current_thread[coreid]->apc_list) != NULL)
{
ke_apc_drain(coreid);
}
return;
}
//
// this assumes that tcb is referenced once already
//
hw_result_t ke_thread_create(hw_tcb_t *tcb,
void (*proc)(void *),
void *args,
hw_thread_priority_t priority,
uint32_t stack_size,
void *stack_ptr)
{
hw_assert(ke_get_irql() <= HW_IRQL_APC_LEVEL);
if (!is_thread_initialized())
return THREAD_STATUS_UNINITIALIZED;
if (proc == NULL || tcb == NULL || stack_ptr == NULL || stack_size == 0)
return THREAD_STATUS_INVALID_ARGUMENT;
hw_irql_t irql;
int32_t alloc_thread_id;
uint32_t coreid = ke_get_current_core();
alloc_thread_id = ke_interlocked_increment(&_thread_id_count, 1);
if (alloc_thread_id == THREAD_INVALID_PID)
{
return THREAD_STATUS_ID_OVERFLOW;
}
tcb->thread_id = alloc_thread_id;
tcb->proc = proc;
tcb->stack_ptr = stack_ptr;
tcb->args = args;
tcb->priority = priority;
tcb->stack_size = stack_size;
tcb->location = STATE_OUTSIDE;
tcb->core_id = ke_get_current_core();
tcb->exit_code = 0;
tcb->initialized = false;
tcb->in_scheduler_queue = false;
ke_create_context(tcb->regs, (void *) tcb->proc, tcb->stack_ptr, HW_IRQL_USER_LEVEL, args);
ke_spin_lock_init(&tcb->apc_lock);
linked_list_init(&tcb->apc_list);
hw_result_t result;
result = ke_event_init(&tcb->thread_exit_event, EVENT_TYPE_MANUAL);
if (!HW_SUCCESS(result))
{
return result;
}
// reference the TCB for avl tree
result = ke_reference_obj(&tcb->ref_node);
if (HW_SUCCESS(result))
{
// reference the TCB for scheduler
result = ke_reference_obj(&tcb->ref_node);
if (!HW_SUCCESS(result))
{
// deref the tcb for avl tree
ke_dereference_obj(&tcb->ref_node);
}
}
if (HW_SUCCESS(result))
{
// write avl tree and issue command
// we go to DPC level here since AVL insertion and notif node insertion
// must take place atomically
// (The order of notif node matters)
irql = ke_writer_lock_raise_irql(&_tree_lock, HW_IRQL_APC_LEVEL);
ke_spin_lock_raise_irql(&_scheduler_lock[coreid], HW_IRQL_DPC_LEVEL);
avl_tree_insert(&_thread_tree, &tcb->tree_node);
tcb->state = STATE_NEW;
notify_scheduler(tcb);
ke_spin_unlock_lower_irql(&_scheduler_lock[coreid], HW_IRQL_APC_LEVEL);
ke_writer_unlock_lower_irql(&_tree_lock, irql);
}
return result;
}
//
// this assumes that tcb is referenced once already
//
hw_result_t ke_thread_start(hw_tcb_t *tcb)
{
hw_assert(ke_get_irql() <= HW_IRQL_APC_LEVEL);
if (!is_thread_initialized())
return THREAD_STATUS_UNINITIALIZED;
hw_result_t result = STATUS_SUCCESS;
if (tcb == NULL)
{
result = THREAD_STATUS_INVALID_ARGUMENT;
}
else
{
hw_irql_t irql;
irql = ke_spin_lock_raise_irql(&_scheduler_lock[tcb->core_id], HW_IRQL_DPC_LEVEL);
if (tcb->state == STATE_NEW)
{
tcb->state = STATE_READY;
notify_scheduler(tcb);
}
else
{
result = THREAD_STATUS_INVALID_STATE;
}
ke_spin_unlock_lower_irql(&_scheduler_lock[tcb->core_id], irql);
}
return result;
}
//
// this assumes that tcb is referenced once already
//
hw_result_t ke_thread_terminate(hw_tcb_t *tcb)
{
hw_assert(ke_get_irql() <= HW_IRQL_APC_LEVEL);
if (!is_thread_initialized())
return THREAD_STATUS_UNINITIALIZED;
hw_result_t result;
hw_irql_t irql;
if (tcb == NULL)
{
result = THREAD_STATUS_INVALID_ARGUMENT;
}
else
{
// if the thread is running on the target core, then schedule an DPC
irql = ke_raise_irql(HW_IRQL_DPC_LEVEL);
ke_spin_lock(&_scheduler_lock[tcb->core_id]);
hw_thread_state_t old_state = tcb->state;
if (old_state != STATE_EXIT)
{
tcb->exit_code = THREAD_EXIT_CODE_TERMINATED;
tcb->state = STATE_EXIT;
notify_scheduler(tcb);
}
if (old_state == STATE_RUN)
{
// yielding here works since the irql is set to DPC_LEVEL, which masks off DPC
// interrupts, yield will trigger once we hw_unlock_irq_restore.
// if it's on another core, trivial, definitely works
ke_thread_yield(tcb->core_id);
}
ke_spin_unlock(&_scheduler_lock[tcb->core_id]);
// event signal should happen at DPC LEVEL and when scheduler lock is released
result = ke_event_signal(&tcb->thread_exit_event);
ke_lower_irql(irql);
}
return result;
}
//
// get exit code also informs the kernel that the thread can be freed
//
hw_result_t ke_thread_get_exit_code(hw_tcb_t *tcb, int32_t *exit_code)
{
hw_assert(ke_get_irql() <= HW_IRQL_APC_LEVEL);
if (!is_thread_initialized())
return THREAD_STATUS_UNINITIALIZED;
if (tcb == NULL)
{
return THREAD_STATUS_INVALID_ARGUMENT;
}
hw_result_t result = STATUS_SUCCESS;
hw_irql_t irql;
irql = ke_writer_lock_raise_irql(&_tree_lock, HW_IRQL_APC_LEVEL);
ke_spin_lock_raise_irql(&_scheduler_lock[tcb->core_id], HW_IRQL_DPC_LEVEL);
if (tcb->state == STATE_EXIT)
{
if(exit_code != NULL)
{
*exit_code = tcb->exit_code;
}
tcb->state = STATE_OUTSIDE;
notify_scheduler(tcb);
avl_tree_delete(&_thread_tree, &tcb->tree_node);
ke_dereference_obj(&tcb->ref_node);
}
else
{
result = THREAD_STATUS_INVALID_STATE;
}
ke_spin_unlock_lower_irql(&_scheduler_lock[tcb->core_id], HW_IRQL_APC_LEVEL);
ke_writer_unlock_lower_irql(&_tree_lock, irql);
return result;
}
void ke_thread_exit(int32_t exit_code)
{
hw_assert(ke_get_irql() <= HW_IRQL_APC_LEVEL);
if (is_thread_initialized())
{
hw_irql_t irql;
uint32_t coreid = ke_get_current_core();
// Don't need a reference here since the context is already current thread
hw_tcb_t *current_thread = ke_current_thread();
irql = ke_raise_irql(HW_IRQL_DPC_LEVEL);
ke_spin_lock(&_scheduler_lock[coreid]);
current_thread->exit_code = exit_code;
current_thread->state = STATE_EXIT;
notify_scheduler(current_thread);
ke_thread_yield(coreid);
ke_spin_unlock(&_scheduler_lock[coreid]);
// signal the event at DPC and when scheduler lock is unlocked
ke_event_signal(&current_thread->thread_exit_event);
ke_lower_irql(irql);
}
}
hw_result_t ke_thread_block(hw_tcb_t *tcb)
{
hw_assert(ke_get_irql() <= HW_IRQL_DPC_LEVEL);
if (!is_thread_initialized())
return THREAD_STATUS_UNINITIALIZED;
hw_result_t result = STATUS_SUCCESS;
if (tcb == NULL)
{
return THREAD_STATUS_INVALID_ARGUMENT;
}
else
{
hw_irql_t irql;
irql = ke_spin_lock_raise_irql(&_scheduler_lock[tcb->core_id], HW_IRQL_DPC_LEVEL);
hw_thread_state_t old_state = tcb->state;
if (old_state == STATE_READY || old_state == STATE_NEW || old_state == STATE_RUN)
{
tcb->state = STATE_BLOCK;
notify_scheduler(tcb);
if (old_state == STATE_RUN)
{
ke_thread_yield(tcb->core_id);
}
}
else
{
result = THREAD_STATUS_INVALID_STATE;
}
ke_spin_unlock_lower_irql(&_scheduler_lock[tcb->core_id], irql);
}
return result;
}
hw_result_t ke_thread_resume(hw_tcb_t *tcb)
{
hw_assert(ke_get_irql() <= HW_IRQL_DPC_LEVEL);
if (!is_thread_initialized())
return THREAD_STATUS_UNINITIALIZED;
hw_result_t result = STATUS_SUCCESS;
if (tcb == NULL)
{
result = THREAD_STATUS_INVALID_ARGUMENT;
}
else
{
hw_irql_t irql;
irql = ke_spin_lock_raise_irql(&_scheduler_lock[tcb->core_id], HW_IRQL_DPC_LEVEL);
if (tcb->state == STATE_BLOCK)
{
tcb->state = STATE_READY;
notify_scheduler(tcb);
}
else
{
result = THREAD_STATUS_INVALID_STATE;
}
ke_spin_unlock_lower_irql(&_scheduler_lock[tcb->core_id], irql);
}
return result;
}
hw_result_t ke_thread_yield(uint32_t core)
{
ke_queue_scheduler_dpc(core);
return ke_dpc_drain(core);
}
//==================================
// HW Functions
//==================================
hw_result_t hw_thread_setup()
{
uint32_t coreid = ke_get_current_core();
if (ke_interlocked_exchange(&_avl_initialized, 1) == 0)
{
// if avl not initialized, then do it
avl_tree_init(&_thread_tree, thread_tree_compare);
ke_rwlock_init(&_tree_lock);
_thread_id_count = 0;
_thread_avl_initialized = true;
}
else
{
while (!_thread_avl_initialized);
}
hw_result_t result = STATUS_SUCCESS;
if (!_initialized[coreid])
{
linked_list_init(&_scheduler_notif_queue[coreid]);
ke_spin_lock_init(&_scheduler_lock[coreid]);
_current_thread[coreid] = NULL;
for (int j = 0; j < STATE_NUM - 1; j++)
{
for (int k = 0; k < PRIORITY_LEVEL_NUM; k++)
{
linked_list_init(&_scheduler_queue[coreid][j][k]);
}
}
_initialized[coreid] = true;
result = hw_thread_create(null_proc, NULL, PRIORITY_LOWEST, THREAD_DEFAULT_STACK_SIZE,
&_null_proc_handles[coreid]);
if (HW_SUCCESS(result))
{
result = hw_thread_start(_null_proc_handles[coreid]);
}
}
return result;
}
hw_result_t hw_thread_create(void (*proc)(void *),
void *args,
hw_thread_priority_t priority,
uint32_t stack_size,
hw_handle_t *thread_handle)
{
hw_assert(ke_get_irql() <= HW_IRQL_APC_LEVEL);
if (!is_thread_initialized())
return THREAD_STATUS_UNINITIALIZED;
if (proc == NULL || thread_handle == NULL)
return THREAD_STATUS_INVALID_ARGUMENT;
if (stack_size == 0)
{
stack_size = THREAD_DEFAULT_STACK_SIZE;
}
hw_tcb_t *tcb = (hw_tcb_t *) hw_alloc(sizeof(hw_tcb_t) + stack_size);
if (tcb == NULL)
{
return THREAD_STATUS_OUT_OF_MEMORY;
}
void *stack = (char *) tcb + sizeof(hw_tcb_t) + stack_size;
// reference for the tcb pointer this function owns
hw_result_t result = ke_reference_create(&tcb->ref_node, tcb_free);
if (!HW_SUCCESS(result))
{
hw_free(tcb);
return result;
}
result = hw_create_handle(&tcb->ref_node, thread_handle);
if (!HW_SUCCESS(result))
{
hw_free(tcb);
return result;
}
// FINISHED CREATING REFERABLE tcb object
result = ke_thread_create(tcb,
proc,
args,
priority,
stack_size,
stack);
if (!HW_SUCCESS(result))
{
hw_close_handle(*thread_handle);
}
// dereference for the tcb pointer this function owns
ke_dereference_obj(&tcb->ref_node);
return result;
}
hw_result_t hw_thread_start(hw_handle_t thread_handle)
{
hw_assert(ke_get_irql() <= HW_IRQL_APC_LEVEL);
if (!is_thread_initialized())
return THREAD_STATUS_UNINITIALIZED;
hw_result_t result;
hw_ref_node_t *ref;
result = hw_open_obj_by_handle(thread_handle, &ref);
if (HW_SUCCESS(result))
{
hw_tcb_t *tcb = OBTAIN_STRUCT_ADDR(ref, ref_node, hw_tcb_t);
result = ke_thread_start(tcb);
ke_dereference_obj(&tcb->ref_node);
}
return result;
}
hw_result_t hw_thread_terminate(hw_handle_t thread_handle)
{
hw_assert(ke_get_irql() <= HW_IRQL_APC_LEVEL);
if (!is_thread_initialized())
return THREAD_STATUS_UNINITIALIZED;
hw_result_t result;
hw_ref_node_t *ref;
result = hw_open_obj_by_handle(thread_handle, &ref);
if (HW_SUCCESS(result))
{
hw_tcb_t *tcb = OBTAIN_STRUCT_ADDR(ref, ref_node, hw_tcb_t);
result = ke_thread_terminate(tcb);
ke_dereference_obj(&tcb->ref_node);
}
return result;
}
hw_result_t hw_thread_get_exit_code(hw_handle_t thread_handle, int32_t *exit_code)
{
hw_assert(ke_get_irql() <= HW_IRQL_APC_LEVEL);
if (!is_thread_initialized())
return THREAD_STATUS_UNINITIALIZED;
hw_result_t result;
hw_ref_node_t *ref;
result = hw_open_obj_by_handle(thread_handle, &ref);
if (HW_SUCCESS(result))
{
hw_tcb_t *tcb = OBTAIN_STRUCT_ADDR(ref, ref_node, hw_tcb_t);
result = ke_thread_get_exit_code(tcb, exit_code);
ke_dereference_obj(&tcb->ref_node);
}
return result;
}
void hw_thread_exit(int32_t exit_code)
{
hw_assert(ke_get_irql() <= HW_IRQL_APC_LEVEL);
if (is_thread_initialized())
{
ke_thread_exit(exit_code);
}
return;
}
hw_result_t hw_thread_block(hw_handle_t thread_handle)
{
hw_assert(ke_get_irql() <= HW_IRQL_DPC_LEVEL);
if (!is_thread_initialized())
return THREAD_STATUS_UNINITIALIZED;
hw_result_t result;
hw_ref_node_t *ref;
result = hw_open_obj_by_handle(thread_handle, &ref);
if (HW_SUCCESS(result))
{
hw_tcb_t *tcb = OBTAIN_STRUCT_ADDR(ref, ref_node, hw_tcb_t);
result = ke_thread_block(tcb);
ke_dereference_obj(&tcb->ref_node);
}
return result;
}
hw_result_t hw_thread_resume(hw_handle_t thread_handle)
{
hw_assert(ke_get_irql() <= HW_IRQL_DPC_LEVEL);
if (!is_thread_initialized())
return THREAD_STATUS_UNINITIALIZED;
hw_result_t result;
hw_ref_node_t *ref;
result = hw_open_obj_by_handle(thread_handle, &ref);
if (HW_SUCCESS(result))
{
hw_tcb_t *tcb = OBTAIN_STRUCT_ADDR(ref, ref_node, hw_tcb_t);
result = ke_thread_resume(tcb);
ke_dereference_obj(&tcb->ref_node);
}
return result;
}
int32_t ke_current_thread_id()
{
hw_assert(ke_get_irql() <= HW_IRQL_APC_LEVEL);
// NO need to reference since current thread context
hw_tcb_t *tcb = ke_current_thread();
return tcb == NULL ? THREAD_INVALID_PID : tcb->thread_id;
}
int32_t hw_current_thread_id()
{
return ke_current_thread_id();
}
hw_handle_t hw_current_thread()
{
hw_assert(ke_get_irql() <= HW_IRQL_APC_LEVEL);
return HW_HANDLE_CURRENT_THREAD;
}
bool ke_query_and_clear_scheduler_dpc(uint32_t core)
{
bool result = _scheduler_queued[core];
_scheduler_queued[core] = false;
return result;
}
hw_result_t ke_queue_scheduler_dpc(uint32_t core)
{
_scheduler_queued[core] = true;
return STATUS_SUCCESS;
}
hw_result_t hw_wait_for_thread_exit(hw_handle_t thread_handle)
{
hw_assert(ke_get_irql() <= HW_IRQL_APC_LEVEL);
hw_result_t result;
hw_ref_node_t *ref;
result = hw_open_obj_by_handle(thread_handle, &ref);
if (!HW_SUCCESS(result))
{
return result;
}
hw_event_node_t *event_node = (hw_event_node_t *) hw_alloc(sizeof(hw_event_node_t));
if (event_node == NULL)
{
return THREAD_STATUS_OUT_OF_MEMORY;
}
// TODO: works but extremely bad
event_node->free_func = tcb_free;
hw_tcb_t *tcb = OBTAIN_STRUCT_ADDR(ref, ref_node, hw_tcb_t);
result = ke_event_wait(&tcb->thread_exit_event, event_node);
ke_dereference_obj(&tcb->ref_node);
return result;
}
hw_result_t hw_thread_assert_state(hw_handle_t thread_handle, hw_thread_state_t state)
{
if (state == STATE_OUTSIDE)
return THREAD_STATUS_INVALID_ARGUMENT;
hw_result_t result = STATUS_SUCCESS;
hw_ref_node_t *ref;
result = hw_open_obj_by_handle(thread_handle, &ref);
if (HW_SUCCESS(result))
{
hw_tcb_t *tcb = OBTAIN_STRUCT_ADDR(ref, ref_node, hw_tcb_t);
hw_irql_t irql;
uint32_t coreid = tcb->core_id;
irql = ke_spin_lock_raise_irql(&_scheduler_lock[coreid], HW_IRQL_DPC_LEVEL);
if (tcb->state != state)
{
result = THREAD_STATUS_INVALID_STATE;
}
if (HW_SUCCESS(result))
{
result = THREAD_STATUS_INVALID_STATE;
if (state == STATE_RUN)
{
if (_current_thread[tcb->core_id] == tcb)
{
result = STATUS_SUCCESS;
}
}
else
{
linked_list_node_t *node = linked_list_first(&_scheduler_queue[coreid][state][tcb->priority]);
while (node != NULL)
{
hw_tcb_t *tcb2 = OBTAIN_STRUCT_ADDR(node, list_node, hw_tcb_t);
if (tcb2 == tcb)
{
result = STATUS_SUCCESS;
break;
}
node = linked_list_next(node);
}
}
}
ke_spin_unlock_lower_irql(&_scheduler_lock[coreid], irql);
ke_dereference_obj(&tcb->ref_node);
}
return result;
}
hw_result_t hw_thread_sleep(uint32_t millis)
{
if (millis == 0)
return STATUS_SUCCESS;
hw_handle_t timer;
hw_result_t result;
result = hw_timer_create(&timer, TIMER_TYPE_MANUAL_RESET);
if (!HW_SUCCESS(result))
{
return result;
}
result = hw_timer_set(timer, millis, false);
if (!HW_SUCCESS(result))
{
hw_close_handle(timer);
return result;
}
result = hw_timer_wait(timer);
hw_close_handle(timer);
return result;
}
hw_result_t hw_thread_open(int32_t thread_id, hw_handle_t *out)
{
hw_assert(ke_get_irql() <= HW_IRQL_APC_LEVEL);
if (!is_thread_initialized())
return THREAD_STATUS_UNINITIALIZED;
if (out == NULL)
{
return THREAD_STATUS_INVALID_ARGUMENT;
}
hw_result_t result;
hw_tcb_t *target = NULL;
result = ke_thread_open(thread_id, &target);
if (HW_SUCCESS(result))
{
// only enter here if ptr ref
// if we successfully referenced it, then create a handle
result = hw_create_handle(&target->ref_node, out);
// don't need ptr anymore, deref the pointer
ke_dereference_obj(&target->ref_node);
}
return result;
}

View File

@ -0,0 +1,355 @@
#include <conf_defs.h>
#include <bifrost_timer.h>
#include <bifrost_stdlib.h>
#include <bifrost_alloc.h>
#include <bifrost_apc.h>
#include <bifrost_print.h>
#include "bifrost_assert.h"
static linked_list_t _timer_list[HW_PROC_CNT];
static bool _initialized[HW_PROC_CNT] = {false};
static bool _timer_queued[HW_PROC_CNT] = {false};
static void timer_free(void *timer, void *up)
{
hw_free(timer);
}
static void timer_node_free(void *timer, void *up)
{
hw_free(timer);
}
// ==========================
// Ke Functions
// ==========================
hw_result_t ke_timer_init(hw_timer_t *timer,
hw_timer_type_t timer_type)
{
if (timer != NULL)
{
linked_list_init(&timer->waiting_threads);
timer->tick = 0;
timer->elapsed_tick = 0;
ke_spin_lock_init(&timer->lock);
timer->timer_type = timer_type;
timer->signaled = false;
timer->periodic = false;
timer->active = false;
return STATUS_SUCCESS;
}
return TIMER_STATUS_INVALID_ARGUMENTS;
}
hw_result_t ke_timer_setup()
{
uint32_t coreid = ke_get_current_core();
if (!_initialized[coreid])
{
linked_list_init(&_timer_list[coreid]);
_initialized[coreid] = true;
}
return TIMER_STATUS_SUCCESS;
}
hw_result_t ke_timer_wait(hw_timer_t *timer, hw_timer_node_t *node)
{
hw_assert(ke_get_irql() <= HW_IRQL_APC_LEVEL);
if (!_initialized[ke_get_current_core()])
{
return TIMER_STATUS_NOT_INITIALIZED;
}
if (timer == NULL || node == NULL)
{
return TIMER_STATUS_INVALID_ARGUMENTS;
}
hw_tcb_t *cur_thread = ke_current_thread();
ke_reference_obj(&cur_thread->ref_node);
hw_irql_t irql;
irql = ke_spin_lock_raise_irql(&timer->lock, HW_IRQL_DPC_LEVEL);
if (!timer->signaled)
{
// timer has not expired
node->tcb = (void *) cur_thread;
linked_list_push_back(&timer->waiting_threads, &node->list_node);
ke_thread_block(cur_thread);
}
// release the lock
ke_spin_unlock_lower_irql(&timer->lock, irql);
return STATUS_SUCCESS;
}
hw_result_t ke_timer_set(hw_timer_t *timer, uint32_t tick, bool periodic)
{
hw_assert(ke_get_irql() <= HW_IRQL_DPC_LEVEL);
uint32_t coreid = ke_get_current_core();
if (!_initialized[coreid])
return TIMER_STATUS_NOT_INITIALIZED;
if (timer == NULL || tick == 0)
return TIMER_STATUS_INVALID_ARGUMENTS;
hw_irql_t irql;
// Raising to DPC level prevents anyone
// else from accessing the timer list
irql = ke_raise_irql(HW_IRQL_DPC_LEVEL);
ke_spin_lock(&timer->lock);
timer->tick = tick;
timer->elapsed_tick = 0;
timer->periodic = periodic;
// reset signaled
timer->signaled = false;
if (!timer->active)
{
// reference timer for keeping it on the linked list
ke_reference_obj(&timer->ref_node);
linked_list_push_back(&_timer_list[coreid], &timer->list_node);
timer->active = true;
}
ke_spin_unlock(&timer->lock);
ke_lower_irql(irql);
return STATUS_SUCCESS;
}
hw_result_t ke_timer_cancel(hw_timer_t *timer)
{
hw_assert(ke_get_irql() <= HW_IRQL_DPC_LEVEL);
uint32_t coreid = ke_get_current_core();
if (!_initialized[coreid])
return TIMER_STATUS_NOT_INITIALIZED;
if (timer == NULL)
return TIMER_STATUS_INVALID_ARGUMENTS;
hw_irql_t irql = ke_raise_irql(HW_IRQL_DPC_LEVEL);
ke_spin_lock(&timer->lock);
// simply release from the list
if (timer->active)
{
linked_list_remove_ref(&_timer_list[coreid], &timer->list_node);
timer->active = false;
// remove timer for deleting it from the list
ke_dereference_obj(&timer->ref_node);
}
ke_spin_unlock(&timer->lock);
ke_lower_irql(irql);
return STATUS_SUCCESS;
}
bool ke_query_and_clear_timer_dpc(uint32_t core)
{
bool result = _timer_queued[core];
_timer_queued[core] = false;
return result;
}
hw_result_t ke_queue_timer_dpc(uint32_t core)
{
_timer_queued[core] = true;
return STATUS_SUCCESS;
}
void ke_timer_tick(void *kp, void *up)
{
hw_assert(ke_get_irql() == HW_IRQL_DPC_LEVEL);
uint32_t coreid = ke_get_current_core();
if (!_initialized[coreid])
return;
linked_list_node_t *cur_node = linked_list_first(&_timer_list[coreid]);
while (cur_node != NULL)
{
hw_timer_t *timer = OBTAIN_STRUCT_ADDR(cur_node, list_node, hw_timer_t);
cur_node = linked_list_next(cur_node);
timer->elapsed_tick++;
if (timer->elapsed_tick >= timer->tick)
{
// if timer expired
ke_spin_lock(&timer->lock);
linked_list_node_t *list_node = linked_list_pop_front(&timer->waiting_threads);
while (list_node != NULL)
{
hw_timer_node_t *node = OBTAIN_STRUCT_ADDR(list_node, list_node, hw_timer_node_t);
ke_thread_resume((hw_tcb_t *) node->tcb);
list_node = linked_list_pop_front(&timer->waiting_threads);
ke_dereference_obj(&((hw_tcb_t *) node->tcb)->ref_node);
node->free_func(node, NULL);
}
if(timer->timer_type == TIMER_TYPE_MANUAL_RESET)
{
timer->signaled = true;
}
if (!timer->periodic)
{
// if not periodic, remove from the list
timer->active = false;
linked_list_remove_ref(&_timer_list[coreid], &timer->list_node);
// remove timer for deleting it from the list
ke_dereference_obj(&timer->ref_node);
}
else
{
// otherwise still active
timer->elapsed_tick = 0;
}
ke_spin_unlock(&timer->lock);
}
}
return;
}
// ===================
// HW functions
// ===================
hw_result_t hw_timer_wait(hw_handle_t handle)
{
hw_assert(ke_get_irql() <= HW_IRQL_APC_LEVEL);
hw_result_t result;
hw_ref_node_t *ref;
result = hw_open_obj_by_handle(handle, &ref);
if (!HW_SUCCESS(result))
{
return result;
}
hw_timer_t *timer = OBTAIN_STRUCT_ADDR(ref, ref_node, hw_timer_t);
hw_timer_node_t* node = (hw_timer_node_t*)hw_alloc(sizeof(hw_timer_node_t));
if(node == NULL)
{
ke_dereference_obj(&timer->ref_node);
return TIMER_STATUS_CANNOT_ALLOCATE_MEM;
}
node->free_func = timer_node_free;
result = ke_timer_wait(timer, node);
ke_dereference_obj(&timer->ref_node);
return result;
}
hw_result_t hw_timer_set(hw_handle_t handle, uint32_t tick, bool periodic)
{
hw_assert(ke_get_irql() <= HW_IRQL_DPC_LEVEL);
hw_result_t result;
hw_ref_node_t *ref;
result = hw_open_obj_by_handle(handle, &ref);
if (!HW_SUCCESS(result))
{
return result;
}
hw_timer_t *timer = OBTAIN_STRUCT_ADDR(ref, ref_node, hw_timer_t);
result = ke_timer_set(timer, tick, periodic);
ke_dereference_obj(&timer->ref_node);
return result;
}
hw_result_t hw_timer_cancel(hw_handle_t handle)
{
hw_assert(ke_get_irql() <= HW_IRQL_DPC_LEVEL);
hw_result_t result;
hw_ref_node_t *ref;
result = hw_open_obj_by_handle(handle, &ref);
if (!HW_SUCCESS(result))
{
return result;
}
hw_timer_t *timer = OBTAIN_STRUCT_ADDR(ref, ref_node, hw_timer_t);
result = ke_timer_cancel(timer);
ke_dereference_obj(&timer->ref_node);
return result;
}
hw_result_t hw_timer_create(hw_handle_t *out,
hw_timer_type_t type)
{
hw_assert(ke_get_irql() < HW_IRQL_DPC_LEVEL);
hw_result_t result = STATUS_SUCCESS;
hw_timer_t *timer = (hw_timer_t *) hw_alloc(sizeof(hw_timer_t));
if (timer == NULL)
return TIMER_STATUS_CANNOT_ALLOCATE_MEM;
result = ke_reference_create(&timer->ref_node, timer_free);
if (!HW_SUCCESS(result))
{
hw_free(timer);
return result;
}
result = ke_timer_init(timer, type);
if (!HW_SUCCESS(result))
{
ke_dereference_obj(&timer->ref_node);
return result;
}
result = hw_create_handle(&timer->ref_node, out);
ke_dereference_obj(&timer->ref_node);
return result;
}

View File

@ -0,0 +1,127 @@
/*-------------------------------------------------------
|
| bifrost_alloc.h
|
| Contains functions and structs for Bifrost's
| various implementaions of alloc.
|
|--------------------------------------------------------
|
| Copyright ( C ) 2011 Microsoft Corp.
| All Rights Reserved
| Confidential and Proprietary
|
|-l-------------------------------------------------------
*/
#ifndef _BIFROST_ALLOC_H
#define _BIFROST_ALLOC_H
#include "bifrost_types.h"
void* hw_alloc(size_t size);
void hw_free(void* ptr);
void hw_alloc_setup();
//
//#define HW_ALLOC_NUM_BLOCKS (0x2000 - 1)
//
//typedef struct
//{
// uint64_t addr;
// uint64_t size;
// HW_HEAP_TYPE type;
//} HW_ALLOC_RECORD;
//
//typedef struct
//{
// HW_ALLOC_RECORD records[HW_ALLOC_NUM_BLOCKS];
// uint32_t next_unused;
// uint32_t count;
//} __attribute__ ((packed,aligned(HW_MAX_CACHELINE_SIZE))) HW_ALLOC_TRACKER;
//
//#define ALLOC_NO_OWNER (~0u)
//#define HW_INTERNAL_TESTID (~1u)
//
//#define ALLOC_IDENTITY_MAX_NUM_LISTS (3)
//
//#define HW_MASK_EXACT_ADDR (0xFFFFFFFF)
//
////
//// RESERVED_MEM_BYTES indicates the number of bytes at the
//// beginning of each block which will not be handed out
//// as allocations.
////
//// Setting RESERVED_MEM_BYTES non-zero has the effect
//// of making the first successful allocation return a non-zero
//// buffer address, even if the block at address zero is free.
////
//// This is a Good Thing, because the allocator returns
//// zero to indicate failure. An error could result from
//// letting RESERVED_MEM_BYTES be 0, because allocation
//// could succeed and return 0 as the block address.
////
//#define RESERVED_MEM_BYTES (0x1000)
//
//typedef struct blocklist_node_struct
//{
// PTR_FIELD addr;
// SIZE_FIELD size;
// UINT32 free;
// UINT32 owner;
// struct blocklist_node_struct* next;
// struct blocklist_node_struct* prev;
//} blistNode;
//
//typedef struct
//{
// blistNode* block_list;
// blistNode block_array[ HW_ALLOC_NUM_BLOCKS + 1 ];
// UINT32 next_unused;
// PTR_FIELD unusedAddr;
// UINT8 rsvd[16]; // Pad out to cacheline size
//} __attribute__ ((aligned(HW_MAX_CACHELINE_SIZE))) allocBlock;
//
//extern allocBlock heapAllocBlock[HW_HEAP_BLOCK_CNT];
//extern HW_ERROR_CODE init_heap_blocklist();
//
//extern allocBlock hostPhysicalAllocBlock;
//extern HW_ERROR_CODE init_physical_blocklist();
//
////
//// Functions for printing alloc's internal
//// structures.
////
//extern void print_blocklist( allocBlock* block );
//extern void print_node( blistNode* np );
//
////
//// Functions for performing allocation
////
//extern HW_RESULT find_next_unused( UINT32* index, allocBlock* block );
//extern blistNode* new_blist_node( PTR_FIELD remote_addr,
// PTR_FIELD size,
// allocBlock* block );
//
//extern void insert_after( blistNode* item, blistNode* after );
//extern blistNode* merge_blocks( blistNode* b1, blistNode* b2 );
//extern blistNode* find_block( PTR_FIELD addr, allocBlock* block );
//extern blistNode* free_block( blistNode* bp, HW_TESTID testID );
//extern HW_ERROR_CODE free_all_blocks( HW_TESTID testID, allocBlock* block );
//
//extern blistNode* find_best_fit( PTR_FIELD size,
// PTR_FIELD pattern,
// PTR_FIELD mask,
// allocBlock* block,
// UINT32 blockAlign );
//
//extern BOOL find_best_fit_identity( PTR_FIELD size,
// PTR_FIELD pattern,
// PTR_FIELD mask,
// UINT32 numBlocks,
// allocBlock** allocBlocks,
// blistNode** foundNodes,
// UINT32 blockAlign );
#endif

View File

@ -0,0 +1,13 @@
#ifndef _BIFROST_APC_H
#define _BIFROST_APC_H
#include "bifrost_statuscode.h"
#include "bifrost_thread.h"
hw_result_t hw_apc_queue(hw_handle_t thread_handle, hw_callback_func_t callback, void* args);
hw_result_t ke_apc_setup(uint32_t vec);
hw_result_t ke_apc_drain(uint32_t core);
#endif

View File

@ -0,0 +1,10 @@
#ifndef _BIFROST_ASSERT_H
#define _BIFROST_ASSERT_H
#include "bifrost_types.h"
void hw_assert_ex(const char* expr_str, const char* file, int32_t line, int32_t expr);
#define hw_assert(expr) hw_assert_ex(#expr, __FILE__, __LINE__, expr)
#endif

View File

@ -0,0 +1,7 @@
#ifndef _BIFROST_BOOT_H
#define _BIFROST_BOOT_H
#include <bifrost_hs_boot.h>
#endif // _BIFROST_BOOT_H

View File

@ -0,0 +1,7 @@
#ifndef _BIFROST_CONTEXT_H
#define _BIFROST_CONTEXT_H
#include <bifrost_hs_context.h>
#endif // _BIFROST_CONTEXT_H

View File

@ -0,0 +1,28 @@
/*-------------------------------------------------------
|
| bifrost_dpc.h
|
| Contains Bifrost deferred procedure call APIs,
|
|--------------------------------------------------------
|
| Copyright ( C ) 2016 Microsoft Corp.
| All Rights Reserved
| Confidential and Proprietary
|
|--------------------------------------------------------
*/
#ifndef _BIFROST_DPC_H_
#define _BIFROST_DPC_H_
#include "bifrost_types.h"
#include "bifrost_statuscode.h"
hw_result_t hw_dpc_queue(uint32_t core, hw_callback_func_t proc, void* args);
hw_result_t ke_dpc_setup(uint32_t vec);
hw_result_t ke_dpc_drain(uint32_t core);
#endif

View File

@ -0,0 +1,45 @@
#ifndef _BIFROST_EVENT_H
#define _BIFROST_EVENT_H
#include "linked_list.h"
#include "bifrost_ref.h"
typedef enum
{
EVENT_TYPE_MANUAL,
EVENT_TYPE_AUTO
} hw_event_type_t;
typedef struct
{
hw_ref_node_t ref_node;
bool signaled;
linked_list_t waiting_threads;
hw_event_type_t type;
hw_spin_lock_t lock;
} hw_event_t;
typedef struct
{
linked_list_node_t list_node;
void* tcb;
hw_callback_func_t free_func;
} hw_event_node_t;
hw_result_t hw_event_wait(hw_handle_t event);
hw_result_t hw_event_reset(hw_handle_t event);
hw_result_t hw_event_signal(hw_handle_t handle);
hw_result_t hw_event_create(hw_handle_t *out, hw_event_type_t event_type);
hw_result_t ke_event_wait(hw_event_t *event, hw_event_node_t *node);
hw_result_t ke_event_reset(hw_event_t *event);
hw_result_t ke_event_signal(hw_event_t *event);
hw_result_t ke_event_init(hw_event_t *event, hw_event_type_t event_type);
#endif // _BIFROST_EVENT_H

View File

@ -0,0 +1,14 @@
#ifndef _BIFROST_INTR_H
#define _BIFROST_INTR_H
#include "bifrost_hs_intr.h"
uint32_t ke_get_system_tick();
hw_irql_t ke_raise_irql(hw_irql_t irql);
hw_irql_t ke_lower_irql(hw_irql_t irql);
void ke_timer_interrupt_handler(void *intr_stack, void *usr_context);
#endif // _BIFROST_INTR_H

View File

@ -0,0 +1,24 @@
#ifndef _BIFROST_LOCK_H_
#define _BIFROST_LOCK_H_
#include "bifrost_intr.h"
#include "bifrost_types.h"
typedef struct
{
int32_t lock;
} hw_spin_lock_t;
#define HW_LOCK_INITIALIZER {0}
void ke_spin_lock_init(hw_spin_lock_t *lock);
hw_irql_t ke_spin_lock_raise_irql(hw_spin_lock_t *lock, hw_irql_t irql);
void ke_spin_unlock_lower_irql(hw_spin_lock_t *lock, hw_irql_t irql);
void ke_spin_lock(hw_spin_lock_t *lock);
void ke_spin_unlock(hw_spin_lock_t *lock);
#endif

View File

@ -0,0 +1,175 @@
/*-------------------------------------------------------
|
| bifrost_macros.h
|
| Contains commonly used macros
|
|--------------------------------------------------------
|
| Copyright ( C ) 2013 Microsoft Corp.
| All Rights Reserved
| Confidential and Proprietary
|
|--------------------------------------------------------
*/
#ifndef _BIFROST_MACROS_H
#define _BIFROST_MACROS_H
//
// Some compile macros imply other macros
//
#ifdef BASRAM_HOSTMEM
#define BASRAM_ONLY
#endif
#ifdef BASRAM_ONLY
#define REDUCED_MEMORY_FOOTPRINT
#endif
//
// C-portable version of extern "C"
//
#ifdef __cplusplus
#define EXTERN_C extern "C"
#else
#define EXTERN_C extern
#endif
//
// Returns the stringified value of a macro
//
#define _STR(macro) #macro
#define STR(macro) _STR(macro)
//
// Combine two macros or statements
//
#define _CONCAT(x,y) x##y
#define CONCAT(x,y) _CONCAT(x,y)
//
// Test function defines
//
#define INIT_TEST CONCAT(init_, TEST_NAME)
#define RUN_TEST CONCAT(runTest_, TEST_NAME)
#define CLEANUP_TEST CONCAT(cleanup_, TEST_NAME)
//
// Bifrost V3 parameter retrieval
//
#define TEST_PARAMS_CLASS(tpName) \
class tpName; \
extern tpName CONCAT(TestParamsArray__, TEST_NAME)[];
#define TEST_VARS_CLASS(tpName) \
class tpName; \
extern tpName CONCAT(TestVarsArray__, TEST_NAME)[];
#define hw_getTestParamsPtr(void) hw_initTestParams((BifrostTestParams*) hw_getBufferPtr(CONCAT(TestParamsArray__, TEST_NAME)))
#define hw_getTestVarsPtr(void) hw_initTestVars((BifrostTestVars*) hw_getBufferPtr(CONCAT(TestVarsArray__, TEST_NAME)))
//
// Mimic C++11 static_assert to allow compile-time assertions.
//
// Makes use of the fact that failed assertions will try to redefine the
// dummy typedef to a different array size, which causes a build error.
//
// If you see "error: conflicting declaration" it means your assert does not hold.
//
#if !defined(static_assert)
#define static_assert_plain(val) extern char _static_assert_arr[1]; extern char _static_assert_arr[(val) != 0];
#define static_assert(val, ...) static_assert_plain(val); // For compat with C++ two-parameter version
#endif
//
// Simple macros for rounding an unsigned integer
// up or down to a multiple of some number
//
#define roundUpToMultipleUnsigned(num, multiple) ((multiple) ? (num) + ((num) % (multiple) ? (multiple) - ((num) % (multiple)) : 0) : 0)
#define roundDownToMultipleUnsigned(num, multiple) ((multiple) ? (num) - ((num) % (multiple)) : 0)
//
// Tensilica-specific defines: normal asserts are not functional at the moment.
// In other environments just use normal assert.
//
#ifdef __XTENSA__
#define assert hw_assert
#include <xtensa/config/core.h>
#else
#include <assert.h>
#endif
//
// Macro that returns number of elements in
// an array.
//
#if !defined(__cplusplus)
#define _countof(_Array) (sizeof(_Array) / sizeof(_Array[0]))
#else
extern "C++"
{
template <typename _CountofType, size_t _SizeOfArray>
char (&__countof_helper(_CountofType (&_Array)[_SizeOfArray]))[_SizeOfArray];
#define _countof(_Array) sizeof(__countof_helper(_Array))
}
#endif
// Macros for using the RDTSCP time stamp counters in x86 environments
#ifdef __X86__
#define RDTSCP() ( \
{ \
UINT64 ret_val; \
asm volatile (".byte 15, 1, 249 \n" \
"shl $0x20,%%rdx \n" \
"or %%rdx,%%rax \n" \
: "=a" (ret_val) \
: \
: "rcx", "rdx"); \
\
ret_val; \
})
#define RDTSC() ( \
{ \
UINT64 ret_val; \
asm volatile (".byte 15, 49 \n" \
"shl $0x20,%%rdx \n" \
"or %%rdx,%%rax \n" \
: "=a" (ret_val) \
: \
: "rdx"); \
ret_val; \
})
#endif
//
// Macros for bit manipulation
//
// Single bit
#define BIT_SET(data, position) ((data) | (1 << (position)))
#define BIT_CLEAR(data, position) ((data) | ~(1 << position))
#define BIT_FLIP(data, position) ((data) ^ (1 << (position)))
#define BIT_CHECK(data, position) (((data) & (1 << (position))) != 0)
// Bits
#define BITS_SET(data, mask) ((data) | (mask))
#define BITS_CLEAR(data, mask) ((data) & (~(mask)))
#define BITS_FLIP(data, mask) ((data) ^ (mask))
#define BITS_GET(data, mask) ((data) & (mask))
// Target's valid data is right-aligned (bit 0 to bit bit_count-1 contains valid data to set)
// input_value is the number that will be operated on, target contains the bits that will be set at specified location
#define BITS_SET32(input_value, lsb_position, bit_count, target) ((input_value&(~((0xffffffff>>(32 - bit_count))<<lsb_position)))|(target<<lsb_position))
#define BITS_SET64(input_value, lsb_position, bit_count, target) ((input_value&(~((0xffffffffffffffffull>>(64 - bit_count))<<lsb_position)))|(target<<lsb_position))
//Returned data is right-aligned (bit 0 to bit bit_count-1 contains valid data got from input_value)
#define BITS_GET32(input_value, lsb_position, bit_count) ((input_value>>lsb_position)&(0xffffffff>>(32 - bit_count)))
#define BITS_GET64(input_value, lsb_position, bit_count) ((input_value>>lsb_position)&(0xffffffffffffffffull>>(64 - bit_count)))
#define OBTAIN_STRUCT_ADDR(member_addr, member_name, struct_name) ((struct_name*)((char*)(member_addr)-(char*)(&(((struct_name*)0)->member_name))))
#endif

View File

@ -0,0 +1,6 @@
#ifndef _BIFROST_MEM_H
#define _BIFROST_MEM_H
#include <bifrost_hs_mem.h>
#endif // _BIFROST_MEM_H

View File

@ -0,0 +1,6 @@
#ifndef _BIFROST_PRINT_H
#define _BIFROST_PRINT_H
void hw_printf(const char *format, ...);
#endif

View File

@ -0,0 +1,40 @@
#ifndef _BIFROST_REF_H
#define _BIFROST_REF_H
#include "bifrost_types.h"
#include "bifrost_statuscode.h"
#include "avl_tree.h"
#include "bifrost_lock.h"
typedef struct
{
int32_t ref_count;
hw_callback_func_t callback;
} hw_ref_node_t;
#define HW_HANDLE_BASE 0x80000000
#define HW_HANDLE_CURRENT_THREAD 0x1
//
// All functions are hw since users or kernel devs should not be
// specifying where the allocations take place
//
hw_result_t ke_reference_setup();
hw_result_t ke_reference_create(hw_ref_node_t *ref,
hw_callback_func_t free_func);
hw_result_t ke_reference_obj(hw_ref_node_t *ref);
hw_result_t ke_dereference_obj(hw_ref_node_t *ref);
// HANDLES
hw_result_t hw_open_obj_by_handle(hw_handle_t handle, hw_ref_node_t **out);
hw_result_t hw_create_handle(hw_ref_node_t *ref, hw_handle_t *out);
hw_result_t hw_close_handle(hw_handle_t handle);
#endif

View File

@ -0,0 +1,50 @@
/*-------------------------------------------------------
|
| bifrost_rwlock.h
|
| Contains Bifrost readers-writer lock APIs,
|
|--------------------------------------------------------
|
| Copyright ( C ) 2016 Microsoft Corp.
| All Rights Reserved
| Confidential and Proprietary
|
|--------------------------------------------------------
*/
#ifndef _BIFROST_RWLOCK_H_
#define _BIFROST_RWLOCK_H_
#include "bifrost_lock.h"
#include "bifrost_intr.h"
#include "bifrost_types.h"
typedef struct
{
hw_spin_lock_t w_mutex;
hw_spin_lock_t r_mutex;
hw_spin_lock_t res_lock;
hw_spin_lock_t r_try;
uint32_t reader_ct;
uint32_t writer_ct;
} hw_rwlock_t;
void ke_rwlock_init(hw_rwlock_t *lock);
void ke_reader_lock(hw_rwlock_t *lock);
void ke_reader_unlock(hw_rwlock_t *lock);
hw_irql_t ke_reader_lock_raise_irql(hw_rwlock_t *lock, hw_irql_t irq);
void ke_reader_unlock_lower_irql(hw_rwlock_t *lock, hw_irql_t irq);
void ke_writer_lock(hw_rwlock_t *lock);
void ke_writer_unlock(hw_rwlock_t *lock);
hw_irql_t ke_writer_lock_raise_irql(hw_rwlock_t *lock, hw_irql_t irq);
void ke_writer_unlock_lower_irql(hw_rwlock_t *lock, hw_irql_t irq);
#endif

View File

@ -0,0 +1,61 @@
/*-------------------------------------------------------
|
| bifrost_semaphore.h
|
| Contains Bifrost semaphore APIs,
|
|--------------------------------------------------------
|
| Copyright ( C ) 2016 Microsoft Corp.
| All Rights Reserved
| Confidential and Proprietary
|
|--------------------------------------------------------
*/
#ifndef _BIFROST_SEMAPHORE_H_
#define _BIFROST_SEMAPHORE_H_
#include "linked_list.h"
#include "bifrost_lock.h"
#include "bifrost_statuscode.h"
#include "bifrost_ref.h"
// this library depends on thread_block and thread_resume
// any functions called by thread_block and thread_resume should not use semaphores
typedef struct
{
hw_ref_node_t ref_node;
hw_spin_lock_t lock;
linked_list_t block_list;
int32_t count;
} hw_sem_t;
typedef struct
{
linked_list_node_t node;
void *tcb;
int32_t quota;
hw_callback_func_t free_callback;
} hw_sem_node_t;
hw_result_t hw_sem_create(hw_handle_t *out, int32_t count);
hw_result_t hw_sem_wait(hw_handle_t handle, int32_t quota);
hw_result_t hw_sem_signal(hw_handle_t handle, int32_t quota);
hw_result_t hw_sem_trywait(hw_handle_t handle, int32_t quota);
hw_result_t ke_sem_init(hw_sem_t *sem, int32_t count);
hw_result_t ke_sem_signal(hw_sem_t *sem, int32_t quota);
hw_result_t ke_sem_wait(hw_sem_t *sem, hw_sem_node_t *node, int quota);
hw_result_t ke_sem_trywait(hw_sem_t *sem, int32_t quota);
#endif

View File

@ -0,0 +1,170 @@
#ifndef _BIFROST_STATUSCODE_H
#define _BIFROST_STATUSCODE_H
#include "bifrost_types.h"
//
// HW_RESULTs are 16-bit values layed out as follows:
//
// 1 1 1 1 1 1
// 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
// +-+---------------+-------------+
// |S| Facility | Code |
// +-+---------------+-------------+
//
// where
//
// S - Severity - indicates success/fail
//
// 0 - Success
// 1 - Fail
//
// Facility - is the facility code
//
// Code - is the facility's status code
//
//
// Severity values
//
#define HW_RESULT_SEVERITY_SUCCESS 0
#define HW_RESULT_SEVERITY_ERROR 1
//
// Generic test for success on any status value (non-negative numbers
// indicate success).
//
#define HW_SUCCESS(hr) (((uint16_t)(hr)) >> 15 == 0)
//
// Return the code
//
#define HW_RESULT_CODE(hr) ((hr) & 0x7F)
//
// Return the facility
//
#define HW_RESULT_FACILITY(hr) (((hr) & 0x7F80) >> 7)
//
// Return the severity
//
#define HW_RESULT_SEVERITY(hr) (((hr) >> 15) & 0x1)
//
// Create an HW_RESULT value from component pieces
//
#define MAKE_HW_RESULT(sev, fac, code) \
(((uint16_t)(sev)<<15) | ((uint16_t)(fac)<<7) | ((uint16_t)(code)))
#define HW_RESULT_FACILITY_THREAD 6
#define HW_RESULT_FACILITY_DPC 1
#define HW_RESULT_FACILITY_SEM 2
#define HW_RESULT_FACILITY_REF 3
#define HW_RESULT_FACILITY_APC 4
#define HW_RESULT_FACILITY_EVENT 4
#define HW_RESULT_FACILITY_TIMER 5
#define HW_RESULT_NO_FACILITY 0
typedef enum
{
STATUS_SUCCESS = MAKE_HW_RESULT(HW_RESULT_SEVERITY_SUCCESS,
HW_RESULT_NO_FACILITY, 0),
THREAD_STATUS_INVALID_ARGUMENT = MAKE_HW_RESULT(HW_RESULT_SEVERITY_ERROR,
HW_RESULT_FACILITY_THREAD, 1),
THREAD_STATUS_INVALID_STATE = MAKE_HW_RESULT(HW_RESULT_SEVERITY_ERROR,
HW_RESULT_FACILITY_THREAD, 2),
THREAD_STATUS_UNINITIALIZED = MAKE_HW_RESULT(HW_RESULT_SEVERITY_ERROR,
HW_RESULT_FACILITY_THREAD, 3),
THREAD_STATUS_OUT_OF_MEMORY = MAKE_HW_RESULT(HW_RESULT_SEVERITY_ERROR,
HW_RESULT_FACILITY_THREAD, 4),
THREAD_STATUS_ID_OVERFLOW = MAKE_HW_RESULT(HW_RESULT_SEVERITY_ERROR,
HW_RESULT_FACILITY_THREAD, 5),
DPC_STATUS_NOT_ENOUGH_MEM = MAKE_HW_RESULT(HW_RESULT_SEVERITY_ERROR,
HW_RESULT_FACILITY_DPC, 1),
DPC_STATUS_INVALID_ARGUMENTS = MAKE_HW_RESULT(HW_RESULT_SEVERITY_ERROR,
HW_RESULT_FACILITY_DPC, 2),
DPC_STATUS_NOT_INITIALIZED = MAKE_HW_RESULT(HW_RESULT_SEVERITY_ERROR,
HW_RESULT_FACILITY_DPC, 3),
SEM_STATUS_CANNOT_ALLOCATE_MEM = MAKE_HW_RESULT(HW_RESULT_SEVERITY_ERROR,
HW_RESULT_FACILITY_SEM, 1),
SEM_STATUS_OCCUPIED = MAKE_HW_RESULT(HW_RESULT_SEVERITY_ERROR,
HW_RESULT_FACILITY_SEM, 2),
SEM_STATUS_INVALID_ARGUMENTS = MAKE_HW_RESULT(HW_RESULT_SEVERITY_ERROR,
HW_RESULT_FACILITY_SEM, 3),
SEM_STATUS_INVALID_CONTEXT = MAKE_HW_RESULT(HW_RESULT_SEVERITY_ERROR,
HW_RESULT_FACILITY_SEM, 4),
REF_STATUS_CANNOT_ALLOCATE_MEM = MAKE_HW_RESULT(HW_RESULT_SEVERITY_ERROR,
HW_RESULT_FACILITY_REF, 1),
REF_STATUS_HANDLE_NOT_FOUND = MAKE_HW_RESULT(HW_RESULT_SEVERITY_ERROR,
HW_RESULT_FACILITY_REF, 2),
REF_STATUS_INVALID_ARGUMENTS = MAKE_HW_RESULT(HW_RESULT_SEVERITY_ERROR,
HW_RESULT_FACILITY_REF, 3),
REF_STATUS_HANDLE_DUPLICATE = MAKE_HW_RESULT(HW_RESULT_SEVERITY_ERROR,
HW_RESULT_FACILITY_REF, 4),
REF_STATUS_UNINITIALIZED = MAKE_HW_RESULT(HW_RESULT_SEVERITY_ERROR,
HW_RESULT_FACILITY_REF, 5),
REF_STATUS_REF_FREED = MAKE_HW_RESULT(HW_RESULT_SEVERITY_ERROR,
HW_RESULT_FACILITY_REF, 6),
REF_STATUS_NO_EFFECT = MAKE_HW_RESULT(HW_RESULT_SEVERITY_SUCCESS,
HW_RESULT_FACILITY_REF, 7),
APC_STATUS_CANNOT_ALLOCATE_MEM = MAKE_HW_RESULT(HW_RESULT_SEVERITY_ERROR,
HW_RESULT_FACILITY_APC, 1),
APC_STATUS_INVALID_ARGUMENTS = MAKE_HW_RESULT(HW_RESULT_SEVERITY_ERROR,
HW_RESULT_FACILITY_APC, 2),
APC_STATUS_NOT_INITIALIZED = MAKE_HW_RESULT(HW_RESULT_SEVERITY_ERROR,
HW_RESULT_FACILITY_APC, 3),
EVENT_STATUS_CANNOT_ALLOCATE_MEM = MAKE_HW_RESULT(HW_RESULT_SEVERITY_ERROR,
HW_RESULT_FACILITY_EVENT, 1),
EVENT_STATUS_INVALID_ARGUMENTS = MAKE_HW_RESULT(HW_RESULT_SEVERITY_ERROR,
HW_RESULT_FACILITY_EVENT, 2),
TIMER_STATUS_SUCCESS = MAKE_HW_RESULT(HW_RESULT_SEVERITY_SUCCESS,
HW_RESULT_FACILITY_TIMER, 0),
TIMER_STATUS_CANNOT_ALLOCATE_MEM = MAKE_HW_RESULT(HW_RESULT_SEVERITY_ERROR,
HW_RESULT_FACILITY_TIMER, 1),
TIMER_STATUS_INVALID_ARGUMENTS = MAKE_HW_RESULT(HW_RESULT_SEVERITY_ERROR,
HW_RESULT_FACILITY_TIMER, 2),
TIMER_STATUS_NOT_INITIALIZED = MAKE_HW_RESULT(HW_RESULT_SEVERITY_ERROR,
HW_RESULT_FACILITY_TIMER, 3),
} hw_result_t;
#endif

View File

@ -0,0 +1,12 @@
#ifndef _BIFROST_STDLIB_H
#define _BIFROST_STDLIB_H
#include "bifrost_types.h"
int32_t hw_memcmp(const void *ptr1, const void *ptr2, const size_t len);
void hw_memset(void *ptr, uint8_t value, size_t len);
#define OBTAIN_STRUCT_ADDR(member_addr, member_name, struct_name) ((struct_name*)((char*)(member_addr)-(char*)(&(((struct_name*)0)->member_name))))
#endif

View File

@ -0,0 +1,178 @@
/*-------------------------------------------------------
|
| bifrost_thread.h
|
| Contains Bifrost threading APIs,
|
|--------------------------------------------------------
|
| Copyright ( C ) 2016 Microsoft Corp.
| All Rights Reserved
| Confidential and Proprietary
|
|--------------------------------------------------------
*/
#ifndef _BIFROST_THREAD_H_
#define _BIFROST_THREAD_H_
#include "linked_list.h"
#include "avl_tree.h"
#include "bifrost_statuscode.h"
#include "bifrost_lock.h"
#include "bifrost_event.h"
#include "bifrost_ref.h"
#define THREAD_DEFAULT_STACK_SIZE 0x4000
#define THREAD_INVALID_PID -1
#define THREAD_EXIT_CODE_TERMINATED 0xDEADDEAD
typedef enum
{
STATE_NEW,
STATE_BLOCK,
STATE_READY,
STATE_EXIT,
STATE_RUN,
STATE_NUM,
STATE_OUTSIDE
} hw_thread_state_t;
typedef enum
{
PRIORITY_HIGHEST = 0,
PRIORITY_HIGH,
PRIORITY_DEFAULT,
PRIORITY_LOW,
PRIORITY_LOWEST,
PRIORITY_LEVEL_NUM
} hw_thread_priority_t;
typedef struct
{
void *prev_context;
void *next_context;
} hw_thread_schedule_info_t;
typedef struct
{
linked_list_node_t list_node;
avl_tree_node_t tree_node;
int32_t thread_id;
uint32_t core_id;
void *stack_ptr;
uint64_t regs[16];
void (*proc)(void *);
uint32_t stack_size;
void *args;
int32_t exit_code;
_Bool initialized;
// apc stuff
hw_spin_lock_t apc_lock;
linked_list_t apc_list;
// state is guarded by the scheduler lock
hw_thread_state_t state;
linked_list_node_t scheduler_queue_node;
bool in_scheduler_queue;
hw_thread_priority_t priority;
hw_thread_state_t location;
// thread exit event obj
hw_event_t thread_exit_event;
// reference manager
hw_ref_node_t ref_node;
} hw_tcb_t;
// ==================
// HW Functions
// ==================
hw_result_t hw_thread_create(void (*proc)(void *),
void *args,
hw_thread_priority_t priority,
uint32_t stack_size,
hw_handle_t *thread_handle);
hw_result_t hw_thread_start(hw_handle_t thread_handle);
hw_result_t hw_thread_block(hw_handle_t thread_handle);
hw_result_t hw_thread_resume(hw_handle_t thread_handle);
hw_result_t hw_thread_terminate(hw_handle_t thread_handle);
hw_result_t hw_thread_get_exit_code(hw_handle_t thread_handle, int32_t *exit_code);
hw_result_t hw_thread_open(int32_t thread_id, hw_handle_t *out);
hw_result_t hw_thread_assert_state(hw_handle_t thread_handle, hw_thread_state_t state);
// ==================
// Ke Functions
// ==================
hw_result_t ke_thread_open(int32_t id, hw_tcb_t **out);
hw_result_t ke_thread_create(hw_tcb_t *tcb,
void (*proc)(void *),
void *args,
hw_thread_priority_t priority,
uint32_t stack_size,
void *stack_ptr);
hw_result_t ke_thread_start(hw_tcb_t *tcb);
hw_result_t ke_thread_terminate(hw_tcb_t *tcb);
hw_result_t ke_thread_get_exit_code(hw_tcb_t *tcb, int32_t *exit_code);
void ke_thread_exit(int32_t exit_code);
hw_result_t ke_thread_block(hw_tcb_t *tcb);
hw_result_t ke_thread_resume(hw_tcb_t *tcb);
hw_result_t ke_thread_yield(uint32_t core);
bool ke_query_and_clear_scheduler_dpc(uint32_t core);
hw_result_t ke_queue_scheduler_dpc(uint32_t core);
// ==========================
// Thread Context Routines. No Ref/Deref needed
// N.B. current impl of hw_current_thread needs close_handle
// ==========================
int32_t hw_current_thread_id();
hw_handle_t hw_current_thread();
hw_result_t hw_wait_for_thread_exit(hw_handle_t handle);
hw_result_t hw_thread_sleep(uint32_t millis);
void hw_thread_exit(int32_t exit_code);
int32_t ke_current_thread_id();
hw_tcb_t *ke_current_thread();
void ke_thread_exit(int32_t exit_code);
// ==========================
// MISC Routines
// ==========================
hw_result_t hw_thread_setup();
void ke_thread_schedule(void *info, void *up);
#endif

View File

@ -0,0 +1,67 @@
#ifndef _BIFROST_TIMER_H
#define _BIFROST_TIMER_H
#include "linked_list.h"
#include "bifrost_types.h"
#include "bifrost_statuscode.h"
#include "bifrost_lock.h"
#include "bifrost_ref.h"
typedef enum
{
TIMER_TYPE_MANUAL_RESET,
TIMER_TYPE_AUTO_RESET
} hw_timer_type_t;
typedef struct
{
hw_ref_node_t ref_node;
linked_list_node_t list_node;
linked_list_t waiting_threads;
uint32_t tick;
uint32_t elapsed_tick;
hw_spin_lock_t lock;
hw_timer_type_t timer_type;
bool signaled;
bool periodic;
bool active;
} hw_timer_t;
typedef struct
{
linked_list_node_t list_node;
void *tcb;
hw_callback_func_t free_func;
} hw_timer_node_t;
// NOTE THAT timers are not shared between cores
hw_result_t hw_timer_create(hw_handle_t *out,
hw_timer_type_t type);
hw_result_t hw_timer_wait(hw_handle_t timer_handle);
hw_result_t hw_timer_set(hw_handle_t timer_handle, uint32_t tick, bool periodic);
hw_result_t hw_timer_cancel(hw_handle_t timer_handle);
// KEs
void ke_timer_tick(void *kp, void *up);
hw_result_t ke_timer_setup();
hw_result_t ke_timer_init(hw_timer_t *timer,
hw_timer_type_t type);
hw_result_t ke_timer_wait(hw_timer_t *timer, hw_timer_node_t *node);
hw_result_t ke_timer_set(hw_timer_t *timer, uint32_t tick, bool periodic);
hw_result_t ke_timer_cancel(hw_timer_t *timer);
bool ke_query_and_clear_timer_dpc(uint32_t core);
hw_result_t ke_queue_timer_dpc(uint32_t core);
#endif

View File

@ -0,0 +1,51 @@
#include "bifrost_alloc.h"
#include "bifrost_print.h"
#include "bifrost_thread.h"
#include "bifrost_dpc.h"
#include "bifrost_boot.h"
#include "bifrost_apc.h"
#include "bifrost_timer.h"
extern void driver(void *par);
hw_arch_bootinfo_t info;
_Bool global_init_finished = false;
int kmain(void)
{
hw_handle_t driver_handle;
ke_hal_setup(&info);
hw_printf("Initializing...\n");
uint32_t coreid = ke_get_current_core();
// global inits only need to be done on one core
if(coreid == 0)
{
hw_alloc_setup();
ke_reference_setup();
global_init_finished = true;
}
while(!global_init_finished);
ke_lower_irql(HW_IRQL_USER_LEVEL);
// core specific stuff init
hw_thread_setup();
ke_apc_setup(info.int_info.apc_vec);
ke_dpc_setup(info.int_info.dpc_vec);
ke_timer_setup();
ke_register_intr_handler(info.int_info.timer_vec, ke_timer_interrupt_handler, NULL);
hw_printf("Initialization completed.\n");
hw_thread_create(driver, NULL, PRIORITY_DEFAULT, THREAD_DEFAULT_STACK_SIZE, &driver_handle);
hw_thread_start(driver_handle);
ke_set_timer_timeout(1);
while(1);
}

View File

@ -0,0 +1,16 @@
#ifndef _BIFROST_HS_BOOT_H_
#define _BIFROST_HS_BOOT_H_
#include "stdint.h"
#include "bifrost_hs_intr.h"
#include "bifrost_hs_mem.h"
typedef struct {
hw_arch_intr_info_t int_info;
uint32_t mem_count;
hw_arch_memory_info_t mem_info[];
} hw_arch_bootinfo_t;
extern int32_t ke_hal_setup(hw_arch_bootinfo_t *bootinfo);
#endif

View File

@ -0,0 +1,10 @@
#ifndef _BIFROST_HS_CONTEXT_H_
#define _BIFROST_HS_CONTEXT_H_
#include "bifrost_hs_intr.h"
// This function saves intr_context to old_context
// it then switches stack to the new_context and constructs an exception frame on the stack
// it guarantees that it takes
extern void ke_context_switch(void *intr_context, void *old_context, void *new_context);
extern void ke_create_context(void *context, void *pc, void *sp, hw_irql_t irql, void *arg);
#endif

View File

@ -0,0 +1,47 @@
#ifndef _BIFROST_HS_INTR_H_
#define _BIFROST_HS_INTR_H_
#include "bifrost_types.h"
// HAL-specific macros
#define HW_IRQL_USER_LEVEL (0)
#define HW_IRQL_APC_LEVEL (1)
#define HW_IRQL_DPC_LEVEL (2)
#define HW_IRQL_DISABLED_LEVEL (3)
typedef uint32_t hw_irql_t;
extern hw_irql_t ke_get_irql();
extern hw_irql_t ke_set_irql(hw_irql_t irql);
// Interrupt
// user context is used for sharing one intr_handler with multiple interrupts
// it is used by the user to identify which interrupt happened
typedef void (*hw_intr_handler_t)(void * intr_stack, void* usr_context);
typedef void (*hw_exc_handler_t)(uint64_t pc, uint64_t sp, uint64_t error_code);
typedef struct
{
uint32_t timer_vec;
uint32_t apc_vec;
uint32_t dpc_vec;
} hw_arch_intr_info_t;
typedef enum
{
general_protection_exc,
page_fault_exc,
unsupported_thr_fatal_exc,
unsupported_thr_nonfatal_exc,
div_by_zero_exc,
debug_exc,
unrecoverable_exc,
invalid_op_exc
} hw_exc_type_t;
extern void ke_trigger_intr(uint32_t core, uint32_t vec);
extern hw_intr_handler_t ke_register_intr_handler(uint32_t vec, hw_intr_handler_t handler, void *context);
extern void ke_register_exc_handler(hw_exc_type_t type, hw_exc_handler_t handler);
extern void ke_set_timer_timeout(uint32_t timeout);
extern uint32_t ke_get_current_core();
#endif

View File

@ -0,0 +1,21 @@
#ifndef _BIFROST_HS_MEM_H_
#define _BIFROST_HS_MEM_H_
#include <stdint.h>
// Memory map
typedef struct {
uintptr_t base;
uintptr_t size;
uint32_t attr;
} hw_arch_memory_info_t;
// Caching
#define HW_CACHELINE_SIZE (64)
extern void ke_flush_addr(void *addr, uint32_t num_of_cacheline);
// Atomics
extern int32_t ke_interlocked_exchange(int32_t *addr, int32_t val);
extern int32_t ke_interlocked_compare_exchange(int32_t *addr, int32_t compare, int32_t val);
extern int32_t ke_interlocked_increment(int32_t *addr, int32_t val);
#endif

View File

@ -0,0 +1,16 @@
#ifndef _BIFROST_TYPES_H_
#define _BIFROST_TYPES_H_
#include <stdint.h>
#include <stddef.h>
#include <stdbool.h>
#include <stdarg.h>
typedef void (*hw_callback_func_t)(void* kp, void* up);
typedef uint32_t hw_handle_t;
#define TRUE (true)
#define FALSE (false)
#endif

Some files were not shown because too many files have changed in this diff Show More