interrupt and timer

This commit is contained in:
quackerd 2024-10-31 10:45:03 -04:00
parent 6b34443bee
commit 1eed961317
36 changed files with 1336 additions and 975 deletions

View File

@ -55,7 +55,7 @@ env.Append(CFLAGS = [ "-Wshadow", "-Wno-typedef-redefinition" ])
if env["ARCH"] == "amd64":
env.Append(CPPFLAGS = [ "-target", "x86_64-freebsd-freebsd-elf" ])
elif env["ARCH"] == "arm64":
env.Append(CPPFLAGS = [ "-target", "arm64-freebsd-freebsd-elf" ])
env.Append(CPPFLAGS = [ "-target", "arm64-freebsd-freebsd-elf", "-ffixed-x18" ,"-march=armv8-a+lse"])
env.Append(LINKFLAGS = [ "-fuse-ld=lld", "-Wl,-maarch64elf" ])
else:
print("Unsupported architecture: " + env["ARCH"])

255
include/stdatomic.h Normal file
View File

@ -0,0 +1,255 @@
/* Copyright (C) 2013-2024 Free Software Foundation, Inc.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3, or (at your option)
any later version.
GCC is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
/* ISO C11 Standard: 7.17 Atomics <stdatomic.h>. */
#ifndef _STDATOMIC_H
#define _STDATOMIC_H
typedef enum
{
memory_order_relaxed = __ATOMIC_RELAXED,
memory_order_consume = __ATOMIC_CONSUME,
memory_order_acquire = __ATOMIC_ACQUIRE,
memory_order_release = __ATOMIC_RELEASE,
memory_order_acq_rel = __ATOMIC_ACQ_REL,
memory_order_seq_cst = __ATOMIC_SEQ_CST
} memory_order;
typedef _Atomic _Bool atomic_bool;
typedef _Atomic char atomic_char;
typedef _Atomic signed char atomic_schar;
typedef _Atomic unsigned char atomic_uchar;
typedef _Atomic short atomic_short;
typedef _Atomic unsigned short atomic_ushort;
typedef _Atomic int atomic_int;
typedef _Atomic unsigned int atomic_uint;
typedef _Atomic long atomic_long;
typedef _Atomic unsigned long atomic_ulong;
typedef _Atomic long long atomic_llong;
typedef _Atomic unsigned long long atomic_ullong;
#ifdef __CHAR8_TYPE__
typedef _Atomic __CHAR8_TYPE__ atomic_char8_t;
#endif
typedef _Atomic __CHAR16_TYPE__ atomic_char16_t;
typedef _Atomic __CHAR32_TYPE__ atomic_char32_t;
typedef _Atomic __WCHAR_TYPE__ atomic_wchar_t;
typedef _Atomic __INT_LEAST8_TYPE__ atomic_int_least8_t;
typedef _Atomic __UINT_LEAST8_TYPE__ atomic_uint_least8_t;
typedef _Atomic __INT_LEAST16_TYPE__ atomic_int_least16_t;
typedef _Atomic __UINT_LEAST16_TYPE__ atomic_uint_least16_t;
typedef _Atomic __INT_LEAST32_TYPE__ atomic_int_least32_t;
typedef _Atomic __UINT_LEAST32_TYPE__ atomic_uint_least32_t;
typedef _Atomic __INT_LEAST64_TYPE__ atomic_int_least64_t;
typedef _Atomic __UINT_LEAST64_TYPE__ atomic_uint_least64_t;
typedef _Atomic __INT_FAST8_TYPE__ atomic_int_fast8_t;
typedef _Atomic __UINT_FAST8_TYPE__ atomic_uint_fast8_t;
typedef _Atomic __INT_FAST16_TYPE__ atomic_int_fast16_t;
typedef _Atomic __UINT_FAST16_TYPE__ atomic_uint_fast16_t;
typedef _Atomic __INT_FAST32_TYPE__ atomic_int_fast32_t;
typedef _Atomic __UINT_FAST32_TYPE__ atomic_uint_fast32_t;
typedef _Atomic __INT_FAST64_TYPE__ atomic_int_fast64_t;
typedef _Atomic __UINT_FAST64_TYPE__ atomic_uint_fast64_t;
typedef _Atomic __INTPTR_TYPE__ atomic_intptr_t;
typedef _Atomic __UINTPTR_TYPE__ atomic_uintptr_t;
typedef _Atomic __SIZE_TYPE__ atomic_size_t;
typedef _Atomic __PTRDIFF_TYPE__ atomic_ptrdiff_t;
typedef _Atomic __INTMAX_TYPE__ atomic_intmax_t;
typedef _Atomic __UINTMAX_TYPE__ atomic_uintmax_t;
#if !(defined __STDC_VERSION__ && __STDC_VERSION__ > 201710L)
#define ATOMIC_VAR_INIT(VALUE) (VALUE)
#endif
/* Initialize an atomic object pointed to by PTR with VAL. */
#define atomic_init(PTR, VAL) \
atomic_store_explicit (PTR, VAL, __ATOMIC_RELAXED)
#define kill_dependency(Y) \
__extension__ \
({ \
__auto_type __kill_dependency_tmp = (Y); \
__kill_dependency_tmp; \
})
extern void atomic_thread_fence (memory_order);
#define atomic_thread_fence(MO) __atomic_thread_fence (MO)
extern void atomic_signal_fence (memory_order);
#define atomic_signal_fence(MO) __atomic_signal_fence (MO)
#define atomic_is_lock_free(OBJ) __atomic_is_lock_free (sizeof (*(OBJ)), (OBJ))
#define ATOMIC_BOOL_LOCK_FREE __GCC_ATOMIC_BOOL_LOCK_FREE
#define ATOMIC_CHAR_LOCK_FREE __GCC_ATOMIC_CHAR_LOCK_FREE
#ifdef __GCC_ATOMIC_CHAR8_T_LOCK_FREE
#define ATOMIC_CHAR8_T_LOCK_FREE __GCC_ATOMIC_CHAR8_T_LOCK_FREE
#endif
#define ATOMIC_CHAR16_T_LOCK_FREE __GCC_ATOMIC_CHAR16_T_LOCK_FREE
#define ATOMIC_CHAR32_T_LOCK_FREE __GCC_ATOMIC_CHAR32_T_LOCK_FREE
#define ATOMIC_WCHAR_T_LOCK_FREE __GCC_ATOMIC_WCHAR_T_LOCK_FREE
#define ATOMIC_SHORT_LOCK_FREE __GCC_ATOMIC_SHORT_LOCK_FREE
#define ATOMIC_INT_LOCK_FREE __GCC_ATOMIC_INT_LOCK_FREE
#define ATOMIC_LONG_LOCK_FREE __GCC_ATOMIC_LONG_LOCK_FREE
#define ATOMIC_LLONG_LOCK_FREE __GCC_ATOMIC_LLONG_LOCK_FREE
#define ATOMIC_POINTER_LOCK_FREE __GCC_ATOMIC_POINTER_LOCK_FREE
/* Note that these macros require __auto_type to remove
_Atomic qualifiers (and const qualifiers, if those are valid on
macro operands).
Also note that the header file uses the generic form of __atomic
builtins, which requires the address to be taken of the value
parameter, and then we pass that value on. This allows the macros
to work for any type, and the compiler is smart enough to convert
these to lock-free _N variants if possible, and throw away the
temps. */
#define atomic_store_explicit(PTR, VAL, MO) \
__extension__ \
({ \
__auto_type __atomic_store_ptr = (PTR); \
__typeof__ ((void)0, *__atomic_store_ptr) __atomic_store_tmp = (VAL); \
__atomic_store (__atomic_store_ptr, &__atomic_store_tmp, (MO)); \
})
#define atomic_store(PTR, VAL) \
atomic_store_explicit (PTR, VAL, __ATOMIC_SEQ_CST)
#define atomic_load_explicit(PTR, MO) \
__extension__ \
({ \
__auto_type __atomic_load_ptr = (PTR); \
__typeof__ ((void)0, *__atomic_load_ptr) __atomic_load_tmp; \
__atomic_load (__atomic_load_ptr, &__atomic_load_tmp, (MO)); \
__atomic_load_tmp; \
})
#define atomic_load(PTR) atomic_load_explicit (PTR, __ATOMIC_SEQ_CST)
#define atomic_exchange_explicit(PTR, VAL, MO) \
__extension__ \
({ \
__auto_type __atomic_exchange_ptr = (PTR); \
__typeof__ ((void)0, *__atomic_exchange_ptr) __atomic_exchange_val = (VAL); \
__typeof__ ((void)0, *__atomic_exchange_ptr) __atomic_exchange_tmp; \
__atomic_exchange (__atomic_exchange_ptr, &__atomic_exchange_val, \
&__atomic_exchange_tmp, (MO)); \
__atomic_exchange_tmp; \
})
#define atomic_exchange(PTR, VAL) \
atomic_exchange_explicit (PTR, VAL, __ATOMIC_SEQ_CST)
#define atomic_compare_exchange_strong_explicit(PTR, VAL, DES, SUC, FAIL) \
__extension__ \
({ \
__auto_type __atomic_compare_exchange_ptr = (PTR); \
__typeof__ ((void)0, *__atomic_compare_exchange_ptr) __atomic_compare_exchange_tmp \
= (DES); \
__atomic_compare_exchange (__atomic_compare_exchange_ptr, (VAL), \
&__atomic_compare_exchange_tmp, 0, \
(SUC), (FAIL)); \
})
#define atomic_compare_exchange_strong(PTR, VAL, DES) \
atomic_compare_exchange_strong_explicit (PTR, VAL, DES, __ATOMIC_SEQ_CST, \
__ATOMIC_SEQ_CST)
#define atomic_compare_exchange_weak_explicit(PTR, VAL, DES, SUC, FAIL) \
__extension__ \
({ \
__auto_type __atomic_compare_exchange_ptr = (PTR); \
__typeof__ ((void)0, *__atomic_compare_exchange_ptr) __atomic_compare_exchange_tmp \
= (DES); \
__atomic_compare_exchange (__atomic_compare_exchange_ptr, (VAL), \
&__atomic_compare_exchange_tmp, 1, \
(SUC), (FAIL)); \
})
#define atomic_compare_exchange_weak(PTR, VAL, DES) \
atomic_compare_exchange_weak_explicit (PTR, VAL, DES, __ATOMIC_SEQ_CST, \
__ATOMIC_SEQ_CST)
#define atomic_fetch_add(PTR, VAL) __atomic_fetch_add ((PTR), (VAL), \
__ATOMIC_SEQ_CST)
#define atomic_fetch_add_explicit(PTR, VAL, MO) \
__atomic_fetch_add ((PTR), (VAL), (MO))
#define atomic_fetch_sub(PTR, VAL) __atomic_fetch_sub ((PTR), (VAL), \
__ATOMIC_SEQ_CST)
#define atomic_fetch_sub_explicit(PTR, VAL, MO) \
__atomic_fetch_sub ((PTR), (VAL), (MO))
#define atomic_fetch_or(PTR, VAL) __atomic_fetch_or ((PTR), (VAL), \
__ATOMIC_SEQ_CST)
#define atomic_fetch_or_explicit(PTR, VAL, MO) \
__atomic_fetch_or ((PTR), (VAL), (MO))
#define atomic_fetch_xor(PTR, VAL) __atomic_fetch_xor ((PTR), (VAL), \
__ATOMIC_SEQ_CST)
#define atomic_fetch_xor_explicit(PTR, VAL, MO) \
__atomic_fetch_xor ((PTR), (VAL), (MO))
#define atomic_fetch_and(PTR, VAL) __atomic_fetch_and ((PTR), (VAL), \
__ATOMIC_SEQ_CST)
#define atomic_fetch_and_explicit(PTR, VAL, MO) \
__atomic_fetch_and ((PTR), (VAL), (MO))
typedef _Atomic struct
{
#if __GCC_ATOMIC_TEST_AND_SET_TRUEVAL == 1
_Bool __val;
#else
unsigned char __val;
#endif
} atomic_flag;
#define ATOMIC_FLAG_INIT { 0 }
extern _Bool atomic_flag_test_and_set (volatile atomic_flag *);
#define atomic_flag_test_and_set(PTR) \
__atomic_test_and_set ((PTR), __ATOMIC_SEQ_CST)
extern _Bool atomic_flag_test_and_set_explicit (volatile atomic_flag *,
memory_order);
#define atomic_flag_test_and_set_explicit(PTR, MO) \
__atomic_test_and_set ((PTR), (MO))
extern void atomic_flag_clear (volatile atomic_flag *);
#define atomic_flag_clear(PTR) __atomic_clear ((PTR), __ATOMIC_SEQ_CST)
extern void atomic_flag_clear_explicit (volatile atomic_flag *, memory_order);
#define atomic_flag_clear_explicit(PTR, MO) __atomic_clear ((PTR), (MO))
#if defined __STDC_VERSION__ && __STDC_VERSION__ > 201710L
#define __STDC_VERSION_STDATOMIC_H__ 202311L
#endif
#endif /* _STDATOMIC_H */

View File

@ -43,7 +43,7 @@ src_arm64 = [
# ARM64
"arm64/debug.c",
"arm64/disasm.c",
"arm64/irq.c",
"arm64/trap.c",
"arm64/machine.c",
"arm64/mp.c",
"arm64/pci.c",
@ -51,13 +51,12 @@ src_arm64 = [
"arm64/support.S",
"arm64/switch.S",
"arm64/thread.c",
"arm64/time.c",
"arm64/trap.c",
"arm64/timer.c",
"arm64/trapentry.S",
"arm64/pdcache.c",
"arm64/gic.c",
# Devices
"dev/arm64/uart.c",
"dev/arm64/gic.c",
# Metal
"arm64/metal.c",
"arm64/paging.c",

View File

@ -69,6 +69,7 @@ Debug_Breakpoint(TrapFrame *tf)
frames[CPU()] = tf;
// Should probably force all cores into the debugger
while(atomic_swap_uint64(&debugLock, 1) == 1) {
// Wait to acquire debugger lock
}

View File

@ -57,7 +57,7 @@ PMap_Init()
systemAS.tables = PAGETABLE_ENTRIES / 2 + 1;
systemAS.mappings = 0;
if (!systemAS.root)
PANIC("Cannot allocate system page table");
Panic("Cannot allocate system page table");
for (i = 0; i < PAGETABLE_ENTRIES / 2; i++)
systemAS.root->entries[i] = 0;
@ -66,7 +66,7 @@ PMap_Init()
PageTable *pgtbl = PAlloc_AllocPage();
PageEntry pte = DMVA2PA((uint64_t)pgtbl) | PTE_W | PTE_P;
if (!pgtbl)
PANIC("Not enough memory!");
Panic("Not enough memory!");
systemAS.root->entries[i] = pte;

View File

@ -1,6 +1,7 @@
#include <stdbool.h>
#include <stdint.h>
#include <stdatomic.h>
#include <sys/kassert.h>
#include <sys/kconfig.h>
@ -9,7 +10,6 @@
#include <machine/cpu.h>
#include <machine/cpuop.h>
#include <machine/atomic.h>
#include <machine/trap.h>
#include <machine/mp.h>
@ -49,8 +49,8 @@ Debug_ResumeCPUs()
void
Debug_HaltIPI(TrapFrame *tf)
{
MP_SetState(CPUSTATE_HALTED);
__sync_fetch_and_add(&debugHalted, 1);
MP_GetCPUState()->state = CPUSTATE_HALTED;
atomic_fetch_add(&debugHalted, 1);
frames[CPU()] = tf;
@ -58,8 +58,8 @@ Debug_HaltIPI(TrapFrame *tf)
pause();
}
__sync_fetch_and_sub(&debugHalted, 1);
MP_SetState(CPUSTATE_BOOTED);
atomic_fetch_sub(&debugHalted, 1);
MP_GetCPUState()->state = CPUSTATE_BOOTED;
}
void
@ -68,7 +68,7 @@ Debug_Breakpoint(TrapFrame *tf)
frames[CPU()] = tf;
// Should probably force all cores into the debugger
while(atomic_swap_uint64(&debugLock, 1) == 1) {
while(atomic_exchange(&debugLock, 1) == 1) {
// Wait to acquire debugger lock
}
@ -80,7 +80,7 @@ Debug_Breakpoint(TrapFrame *tf)
// Resume all processors
Debug_ResumeCPUs();
atomic_set_uint64(&debugLock, 0);
atomic_store(&debugLock, 0);
}
static void

View File

@ -1,9 +1,11 @@
#include <machine/pmap.h>
#include <machine/cpuop.h>
#include <machine/mp.h>
#include <machine/bootinfo.h>
#include <sys/cdefs.h>
#include <errno.h>
#include <sys/kassert.h>
#include "gic.h"
#include <machine/gic.h>
#include <sys/contrib/libfdt/libfdt.h>
@ -217,14 +219,14 @@ static struct gic gic = {
.max_rd = 0
};
static bool
static int
gic_discover(void)
{
const void * dtb = kbootinfo.dtb_addr;
const int offset = fdt_node_offset_by_compatible(dtb, -1, "arm,gic-v3");
if (offset < 0) {
return false;
return ENOENT;
}
int lenp;
@ -232,7 +234,7 @@ gic_discover(void)
if (prop == NULL || lenp != 8 * sizeof(uint32_t)) {
// invalid gicv3 dtb spec
return false;
return EINVAL;
}
const uint32_t * ptr = (const uint32_t *)prop->data;
@ -252,7 +254,7 @@ gic_discover(void)
gic.max_rd = index;
return true;
return 0;
}
static void
@ -268,32 +270,18 @@ gic_enable(void)
// enable all interrupt groups and routing, disable security
// set SRE bits to enable system registers
__asm__ volatile (
"mrs x0, ICC_SRE_EL1;"
"orr x0, x0, #0x1;"
"msr ICC_SRE_EL1, x0;"
:
:
: "x0"
);
}
uint64_t sre = SYSREG_GET(ICC_SRE_EL1);
sre |= 1;
SYSREG_SET(ICC_SRE_EL1, sre);
uint32_t
gic_get_affinity(void)
{
uint64_t aff;
__asm__ volatile (
"MRS x0, MPIDR_EL1;"
"UBFX x1, x0, #32, #8;"
"BFI w0, w1, #24, #8;"
"MOV %0, x0;"
: "=r" (aff)
:
: "x0", "x1"
);
return aff;
// __asm__ volatile (
// "mrs x0, ICC_SRE_EL1;"
// "orr x0, x0, #0x1;"
// "msr ICC_SRE_EL1, x0;"
// :
// :
// : "x0"
// );
}
int
@ -366,9 +354,10 @@ gic_is_valid_extppi(uint32_t rd, uint32_t ID)
int
gic_disable_intr(uint32_t rd, uint32_t intid)
gic_disable_intr(uint32_t intid)
{
uint32_t bank;
const uint32_t rd = MP_GetCPUState()->gic_redist_id;
if (rd > gic.max_rd)
return EINVAL;
@ -417,102 +406,6 @@ gic_disable_intr(uint32_t rd, uint32_t intid)
return 0;
}
int
gic_enable_intr(uint32_t rdid, uint32_t intid)
{
uint32_t bank;
if (rdid > gic.max_rd) {
return EINVAL;
}
if (intid < 31) {
// SGI or PPI
gic.gic_rdist[rdid].sgis.GICR_ISENABLER[0] = (1 << intid);
} else if (intid < 1020) {
// SPI
bank = intid/32; // There are 32 IDs per register, need to work out which register to access
intid = intid & 0x1f; // ... and which bit within the register
intid = 1 << intid; // Move a '1' into the correct bit position
gic.gic_dist->GICD_ISENABLER[bank] = intid;
} else if ((intid > 1055) && (intid < 1120)) {
// Extended PPI
// Check Ext PPI implemented
if (!gic_is_valid_extppi(rdid, intid))
return EINVAL;
intid = intid - 1024;
bank = intid / 32; // There are 32 IDs per register, need to work out which register to access
intid = intid & 0x1F; // ... and which bit within the register
intid = 1 << intid; // Move a '1' into the correct bit position
gic.gic_rdist[rdid].sgis.GICR_ISENABLER[bank] = intid;
} else if ((intid > 4095) && (intid < 5120)) {
// Extended SPI
// Check Ext SPI implemented
if (!gic_is_valid_extspi(rdid, intid))
return EINVAL;
intid = intid - 4096;
bank = intid / 32; // There are 32 IDs per register, need to work out which register to access
intid = intid & 0x1F; // ... and which bit within the register
intid = 1 << intid; // Move a '1' into the correct bit position
gic.gic_dist->GICD_ISENABLERE[bank] = intid;
} else {
return EINVAL;
}
return 0;
}
void
gic_set_prio_mask(unsigned int mask)
{
uint64_t _mask = mask & 0xFF;
__asm__ volatile (
"mov x0, %0;"
"msr icc_pmr_el1, x0;"
"dsb sy;"
:
: "r" (_mask)
: "x0"
);
}
int
gic_set_intr_prio(uint32_t rdid, uint32_t intid, uint8_t prio)
{
if (rdid > gic.max_rd) {
return EINVAL;
}
if (intid < 31) {
// SGI or PPI
gic.gic_rdist[rdid].sgis.GICR_IPRIORITYR[intid] = prio;
} else if (intid < 1020) {
// SPI
gic.gic_dist->GICD_IPRIORITYR[intid] = prio;
} else if ((intid > 1055) && (intid < 1120)) {
// Extended PPI
// Check Ext PPI implemented
if (!gic_is_valid_extppi(rdid, intid))
return EINVAL;
intid = intid - 1024;
gic.gic_rdist[rdid].sgis.GICR_IPRIORITYR[intid] = prio;
} else if ((intid > 4095) && (intid < 5120)) {
// Extended SPI
// Check Ext SPI implemented
if (!gic_is_valid_extspi(rdid, intid))
return EINVAL;
gic.gic_dist->GICD_IPRIORITYRE[(intid-4096)] = prio;
} else {
return EINVAL;
}
return 0;
}
#define GICV3_GROUP0 (0)
#define GICV3_GROUP1_SECURE (1)
#define GICV3_GROUP1_NON_SECURE (2)
@ -545,10 +438,10 @@ gic_calc_group_mod(uint32_t intid, uint32_t sec, uint32_t * _group, uint32_t * _
}
static int
gic_set_intr_group(uint32_t rdid, uint32_t intid, uint32_t sec)
gic_set_intr_group(uint32_t intid, uint32_t sec)
{
uint32_t bank, group, mod;
const uint32_t rdid = MP_GetCPUState()->gic_redist_id;
if (rdid > gic.max_rd)
return EINVAL;
@ -629,52 +522,153 @@ gic_set_intr_group(uint32_t rdid, uint32_t intid, uint32_t sec)
return 0;
}
int
gic_enable_intr(uint32_t intid)
{
uint32_t bank;
const uint32_t rdid = MP_GetCPUState()->gic_redist_id;
if (rdid > gic.max_rd) {
return EINVAL;
}
int ret = gic_set_intr_prio(intid, 0);
if (ret != 0) {
return ret;
}
ret = gic_set_intr_group(intid, GICV3_GROUP1_NON_SECURE);
if (ret != 0) {
return ret;
}
if (intid < 31) {
// SGI or PPI
gic.gic_rdist[rdid].sgis.GICR_ISENABLER[0] = (1 << intid);
} else if (intid < 1020) {
// SPI
bank = intid/32; // There are 32 IDs per register, need to work out which register to access
intid = intid & 0x1f; // ... and which bit within the register
intid = 1 << intid; // Move a '1' into the correct bit position
gic.gic_dist->GICD_ISENABLER[bank] = intid;
} else if ((intid > 1055) && (intid < 1120)) {
// Extended PPI
// Check Ext PPI implemented
if (!gic_is_valid_extppi(rdid, intid))
return EINVAL;
intid = intid - 1024;
bank = intid / 32; // There are 32 IDs per register, need to work out which register to access
intid = intid & 0x1F; // ... and which bit within the register
intid = 1 << intid; // Move a '1' into the correct bit position
gic.gic_rdist[rdid].sgis.GICR_ISENABLER[bank] = intid;
} else if ((intid > 4095) && (intid < 5120)) {
// Extended SPI
// Check Ext SPI implemented
if (!gic_is_valid_extspi(rdid, intid))
return EINVAL;
intid = intid - 4096;
bank = intid / 32; // There are 32 IDs per register, need to work out which register to access
intid = intid & 0x1F; // ... and which bit within the register
intid = 1 << intid; // Move a '1' into the correct bit position
gic.gic_dist->GICD_ISENABLERE[bank] = intid;
} else {
return EINVAL;
}
return 0;
}
void
gic_set_prio_mask(unsigned int mask)
{
uint64_t _mask = mask & 0xFF;
SYSREG_SET(ICC_PMR_EL1, _mask);
data_barrier();
// __asm__ volatile (
// "mov x0, %0;"
// "msr icc_pmr_el1, x0;"
// "dsb sy;"
// :
// : "r" (_mask)
// : "x0"
// );
}
int
gic_set_intr_prio(uint32_t intid, uint8_t prio)
{
const uint32_t rdid = MP_GetCPUState()->gic_redist_id;
if (rdid > gic.max_rd) {
return EINVAL;
}
if (intid < 31) {
// SGI or PPI
gic.gic_rdist[rdid].sgis.GICR_IPRIORITYR[intid] = prio;
} else if (intid < 1020) {
// SPI
gic.gic_dist->GICD_IPRIORITYR[intid] = prio;
} else if ((intid > 1055) && (intid < 1120)) {
// Extended PPI
// Check Ext PPI implemented
if (!gic_is_valid_extppi(rdid, intid))
return EINVAL;
intid = intid - 1024;
gic.gic_rdist[rdid].sgis.GICR_IPRIORITYR[intid] = prio;
} else if ((intid > 4095) && (intid < 5120)) {
// Extended SPI
// Check Ext SPI implemented
if (!gic_is_valid_extspi(rdid, intid))
return EINVAL;
gic.gic_dist->GICD_IPRIORITYRE[(intid-4096)] = prio;
} else {
return EINVAL;
}
return 0;
}
void
gic_send_eoi(int intr)
{
intr = intr & 0xFFFFFF;
__asm__ volatile (
"mov x0, %x0;"
"msr ICC_EOIR1_EL1, x0;"
"dsb sy;"
:
: "r" (intr)
: "x0"
);
}
unsigned int
gic_ack_intr(void)
{
unsigned int ret;
__asm__ volatile (
"mrs x0, ICC_IAR1_EL1;"
"mov %x0, x0;"
: "=r" (ret)
:
: "x0"
);
return ret;
uint64_t _intr = intr & 0xFFFFFF;
SYSREG_SET(ICC_EOIR1_EL1, _intr);
data_barrier();
}
int
gic_init(void)
{
int ret;
if (!gic_discover()) {
return ENOENT;
if ((ret = gic_discover()) != 0) {
return ret;
}
gic_enable();
return gic_redist_init();
}
int
gic_redist_init(void)
{
int ret;
unsigned int rd;
struct CPUState * cpustate = MP_GetCPUState();
// Get the ID of the Redistributor connected to this PE
ret = gic_get_redist_id(gic_get_affinity(), &rd);
ret = gic_get_redist_id(cpustate->mpid, &rd);
if (ret != 0) {
return ret;
}
cpustate->gic_redist_id = rd;
kprintf("GIC: CPU %d, MPID 0x%lx, Redist ID 0x%lx.\n", cpustate->id, cpustate->mpid, cpustate->gic_redist_id);
// Mark this core as being active
gic_wakeup_redist(rd);
@ -682,28 +676,23 @@ gic_init(void)
// Configure the CPU interface
// This assumes that the SRE bits are already set
// enable group 1 interrupts
__asm__ volatile (
"MRS x0, ICC_IGRPEN1_EL1;"
"ORR w0, w0, #1;"
"MSR ICC_IGRPEN1_EL1, x0;"
"ISB"
:
:
: "x0"
);
uint64_t igrpen1 = SYSREG_GET(ICC_IGRPEN1_EL1);
igrpen1 |= 1;
SYSREG_SET(ICC_IGRPEN1_EL1, igrpen1);
inst_barrier();
// __asm__ volatile (
// "MRS x0, ICC_IGRPEN1_EL1;"
// "ORR w0, w0, #1;"
// "MSR ICC_IGRPEN1_EL1, x0;"
// "ISB"
// :
// :
// : "x0"
// );
// unmask all interrupt priorities
gic_set_prio_mask(0xff);
// Non-secure EL1 Physical Timer (INTID 30)
ret = gic_set_intr_prio(rd, 30, 0);
if (ret != 0) {
return ret;
}
ret = gic_set_intr_group(rd, 30, GICV3_GROUP1_NON_SECURE);
if (ret != 0) {
return ret;
}
return gic_enable_intr(rd, 30);
return 0;
}

View File

@ -1,38 +0,0 @@
#ifndef __ATOMIC_H__
#define __ATOMIC_H__
static INLINE uint64_t
atomic_swap_uint32(volatile uint32_t *dst, uint32_t newval)
{
uint32_t retval;
__asm__ volatile(".arch_extension lse; swp %w2, %w0, [%w1]; .arch_extension nolse;"
: "=r" (retval)
: "r" (dst), "r" (newval)
: "memory");
return newval;
}
static INLINE uint64_t
atomic_swap_uint64(volatile uint64_t *dst, uint64_t newval)
{
uint64_t retval;
__asm__ volatile(".arch_extension lse; swp %2, %0, [%1]; .arch_extension nolse;"
: "=r" (retval)
: "r" (dst), "r" (newval)
: "memory");
return retval;
}
static inline void
atomic_set_uint64(volatile uint64_t *dst, uint64_t newval)
{
*dst = newval;
}
#endif /* __ATOMIC_H__ */

View File

@ -11,12 +11,12 @@
static ALWAYS_INLINE INLINE void enable_interrupts()
{
__asm__ volatile("msr daifclr, #(0x0002)\n");
__asm__ volatile("msr daifclr, #0b1111");
}
static ALWAYS_INLINE INLINE void disable_interrupts()
{
__asm__ volatile("msr daifset, #(0x0002)\n");
__asm__ volatile("msr daifset, #0b1111");
}
static ALWAYS_INLINE INLINE void hlt()
@ -26,15 +26,7 @@ static ALWAYS_INLINE INLINE void hlt()
static ALWAYS_INLINE INLINE void flushtlb()
{
uint64_t zero = 0;
__asm__ volatile (
"msr ttbr0_el1, %x0;"
"msr ttbr1_el1, %x0;"
"TLBI VMALLE1;"
:
: "r" (zero)
:
);
__asm__ volatile("TLBI VMALLE1");
}
static ALWAYS_INLINE INLINE void pause()
@ -47,5 +39,37 @@ static ALWAYS_INLINE INLINE void breakpoint()
__asm__ volatile("brk #0");
}
static ALWAYS_INLINE INLINE void inst_barrier()
{
__asm__ volatile("isb");
}
static ALWAYS_INLINE INLINE void data_barrier()
{
__asm__ volatile("dsb sy");
}
#define SYSREG_GET(name) \
({ \
uint64_t _tmp; \
__asm__ volatile ( \
"mrs %0, " #name \
: "=r" (_tmp) \
: \
: \
); \
_tmp; \
})
#define SYSREG_SET(name, var) \
({ \
__asm__ volatile ( \
"msr " #name ", %0" \
: \
: "r" (var) \
: \
); \
})
#endif /* __ARM64OP_H__ */

32
sys/arm64/include/gic.h Normal file
View File

@ -0,0 +1,32 @@
#pragma once
#include <stdint.h>
uint32_t
gic_get_affinity(void);
void
gic_set_prio_mask(unsigned int mask);
int
gic_set_intr_prio(uint32_t intid, uint8_t prio);
void
gic_send_eoi(int intr);
// initialize global GIC
int
gic_init(void);
// per core GIC redist init
int
gic_redist_init(void);
int
gic_enable_intr(uint32_t intid);
int
gic_disable_intr(uint32_t intid);
int
gic_get_redist_id(uint32_t affinity, unsigned int *id);

View File

@ -0,0 +1,280 @@
#pragma once
#define AARCH_REG_X0 (0)
#define AARCH_REG_X1 (1)
#define AARCH_REG_X2 (2)
#define AARCH_REG_X3 (3)
#define AARCH_REG_X4 (4)
#define AARCH_REG_X5 (5)
#define AARCH_REG_X6 (6)
#define AARCH_REG_X7 (7)
#define AARCH_REG_X8 (8)
#define AARCH_REG_X9 (9)
#define AARCH_REG_X10 (10)
#define AARCH_REG_X11 (11)
#define AARCH_REG_X12 (12)
#define AARCH_REG_X13 (13)
#define AARCH_REG_X14 (14)
#define AARCH_REG_X15 (15)
#define AARCH_REG_X16 (16)
#define AARCH_REG_X17 (17)
#define AARCH_REG_X18 (18)
#define AARCH_REG_X19 (19)
#define AARCH_REG_X20 (20)
#define AARCH_REG_X21 (21)
#define AARCH_REG_X22 (22)
#define AARCH_REG_X23 (23)
#define AARCH_REG_X24 (24)
#define AARCH_REG_X25 (25)
#define AARCH_REG_X26 (26)
#define AARCH_REG_X27 (27)
#define AARCH_REG_X28 (28)
#define AARCH_REG_X29 (29)
#define AARCH_REG_X30 (30)
#define AARCH_REG_X31 (31)
#define METAL_REG_MO0 (0)
#define METAL_REG_MO1 (1)
#define METAL_REG_MO2 (2)
#define METAL_REG_MO3 (3)
#define METAL_REG_MO4 (4)
#define METAL_REG_MO5 (5)
#define METAL_REG_MR0 (6)
#define METAL_REG_MR1 (7)
#define METAL_REG_MR2 (8)
#define METAL_REG_MR3 (9)
#define METAL_REG_MI0 (10)
#define METAL_REG_MI1 (11)
#define METAL_REG_MI2 (12)
#define METAL_REG_MI3 (13)
#define METAL_REG_MI4 (14)
#define METAL_REG_MI5 (15)
#define METAL_REG_MLR (METAL_REG_MI5)
#define METAL_REG_MIR0 (METAL_REG_MI0)
#define METAL_REG_MIR1 (METAL_REG_MI1)
#define METAL_REG_MIR2 (METAL_REG_MI2)
#define METAL_REG_MER0 (METAL_REG_MI0)
#define METAL_REG_MER1 (METAL_REG_MI1)
#define METAL_REG_MER2 (METAL_REG_MI2)
#define METAL_REG_MSPSR (METAL_REG_MI3)
#define METAL_REG_MSR (16)
#define METAL_REG_MBR (17)
#define METAL_REG_MIB (18)
#define METAL_REG_MEB (19)
#define METAL_REG_MTP (20)
#define METAL_REG_MG5 (21)
#define METAL_REG_MG6 (22)
#define METAL_REG_MG7 (23)
#define METAL_REG_MG8 (24)
#define METAL_REG_MG9 (25)
#define METAL_REG_MG10 (26)
#define METAL_REG_MG11 (27)
#define METAL_REG_MG12 (28)
#define METAL_REG_MG13 (29)
#define METAL_REG_MG14 (30)
#define METAL_REG_MG15 (31)
#define _METAL_STR(x) #x
#define METAL_STR(x) _METAL_STR(x)
#define _METAL_GAS_ENCODE(x) ".word " METAL_STR(x) ";"
#define METAL_GAS_ENCODE(x) _METAL_GAS_ENCODE(x)
// metal insts defs
#define METAL_WMR_ENCODING(mreg, greg) (0xd61f2C00 | ((mreg) << 5) | ((greg) << 0))
#define METAL_WMR_GAS(mreg, greg) METAL_GAS_ENCODE(METAL_WMR_ENCODING(mreg, greg))
#define MASM_WMR(mreg, greg) .word METAL_WMR_ENCODING(mreg, greg)
#define METAL_WMR(reg, var) do { __asm__ volatile (\
"mov x0, %x0;" \
METAL_WMR_GAS(reg, AARCH_REG_X0)\
: \
: "r" (var)\
: "x0" \
); } while(0)
#define METAL_RMR_ENCODING(mreg, greg) (0xd61f2800 | ((mreg) << 5) | ((greg) << 0))
#define METAL_RMR_GAS(mreg, greg) METAL_GAS_ENCODE(METAL_RMR_ENCODING(mreg,greg))
#define MASM_RMR(mreg, greg) .word METAL_RMR_ENCODING(mreg, greg)
#define METAL_RMR(reg, var) do { \
__asm__ volatile ( \
METAL_RMR_GAS(reg, AARCH_REG_X0) \
"mov %x0, x0;"\
: "+r" (var) \
: \
: "x0" \
); } while(0)
//
// we need to preserve the stack pointer between mroutine calls
// mroutines never return values using stacks
// and since "mexit"s always occur before "return"s
// the function epilogue is not run before returning
// which may destroy the stack if local variables are defined
//
// we do this using the mroutine stub function to wrap mroutine calls
//
#define METAL_MENTER_ENCODING(mroutine) (0xd61f2000 | ((mroutine) << 0))
#define METAL_MENTER_GAS(mroutine) METAL_GAS_ENCODE(METAL_MENTER_ENCODING(mroutine))
#define METAL_MENTER(mroutine) do { \
__asm__ volatile ( \
METAL_MENTER_GAS(mroutine) \
: \
: \
: \
); \
} while(0)
#define METAL_MEXIT_ENCODING(mreg) (0xd61f2400 | ((mreg) << 0))
#define METAL_MEXIT_GAS(mreg) METAL_GAS_ENCODE(METAL_MEXIT_ENCODING(mreg))
#define METAL_MEXIT(mreg) do { \
__asm__ volatile ( \
METAL_MEXIT_GAS(mreg) \
: \
: \
: \
); } while(0)
#define MEXIT_FLAG_IIM (1 << 2) // this mexit masks instruction intercept for the first instruction after mexit
#define MEXIT_FLAG_RFI (1 << 1) // this mexit is a return from intercept (also restores CPSR from MSPSR)
#define MEXIT_FLAG_ID (1 << 0) // this mexit masks interrupts for the first instruction after mexit
#define MEXIT_FLAG_EIM (1 << 3) // this mexit masks exception intercept for the first instruction after mexit
// do not provide C version of RAR/WAR for the current bank
// can't decide which GP register to use as temp
#define METAL_RAR_ENCODING(idxmreg, dstmreg) (0xd61f3000 | ((idxmreg) << 5) | ((dstmreg) << 0))
#define METAL_RAR_GAS(idxmreg, dstmreg) METAL_GAS_ENCODE(METAL_RAR_ENCODING(idxmreg, dstmreg))
#define METAL_WAR_ENCODING(idxmreg, srcmreg) (0xd61f3400 | ((idxmreg) << 5) | ((srcmreg) << 0))
#define METAL_WAR_GAS(idxmreg, dstmreg) METAL_GAS_ENCODE(METAL_WAR_ENCODING(idxmreg, dstmreg))
#define METAL_RPR_ENCODING(idxmreg, dstmreg) (0xd61f4000 | ((idxmreg) << 5) | ((dstmreg) << 0))
#define METAL_RPR_GAS(idxmreg, dstmreg) METAL_GAS_ENCODE(METAL_RPR_ENCODING(idxmreg, dstmreg))
#define METAL_RPR(idxvar, var) do { __asm__ volatile (\
METAL_RMR_GAS(METAL_REG_MR0, AARCH_REG_X1) \
METAL_RMR_GAS(METAL_REG_MR1, AARCH_REG_X2) \
\
"mov x0, %x1;" \
METAL_WMR_GAS(METAL_REG_MR0, AARCH_REG_X0) \
METAL_RPR_GAS(METAL_REG_MR0, METAL_REG_MR1) \
METAL_RMR_GAS(METAL_REG_MR1, AARCH_REG_X0) \
"mov %x0, x0;" \
\
METAL_WMR_GAS(METAL_REG_MR0, AARCH_REG_X1) \
METAL_WMR_GAS(METAL_REG_MR1, AARCH_REG_X2) \
: "+r" (var) \
: "r" (idxvar)\
: "x0", "x1", "x2" \
); } while(0)
#define METAL_WPR_ENCODING(idxmreg, srcmreg) (0xd61f4400 | ((idxmreg) << 5) | ((srcmreg) << 0))
#define METAL_WPR_GAS(idxmreg, srcmreg) METAL_GAS_ENCODE(METAL_WPR_ENCODING(idxmreg, srcmreg))
#define METAL_WPR(idxvar, var) do { __asm__ volatile (\
METAL_RMR_GAS(METAL_REG_MR0, AARCH_REG_X1) \
METAL_RMR_GAS(METAL_REG_MR1, AARCH_REG_X2) \
\
"mov x0, %x0;" \
METAL_WMR_GAS(METAL_REG_MR0, AARCH_REG_X0) \
"mov x0, %x1;" \
METAL_WMR_GAS(METAL_REG_MR1, AARCH_REG_X0) \
\
METAL_WPR_GAS(METAL_REG_MR0, METAL_REG_MR1) \
\
METAL_WMR_GAS(METAL_REG_MR0, AARCH_REG_X1) \
METAL_WMR_GAS(METAL_REG_MR1, AARCH_REG_X2) \
: \
: "r" (idxvar), "r" (var) \
: "x0", "x1", "x2" \
); } while(0)
#define METAL_MCLI_ENCODING (0xd61f3800)
#define METAL_MCLI_GAS METAL_GAS_ENCODE(METAL_MCLI_ENCODING)
#define METAL_MCLI do { \
__asm__ volatile ( \
METAL_MCLI_GAS \
: \
: \
: \
); } while(0)
#define METAL_MSTI_ENCODING (0xd61f3C00)
#define METAL_MSTI_GAS METAL_GAS_ENCODE(METAL_MSTI_ENCODING)
#define METAL_MSTI do { \
__asm__ volatile ( \
METAL_MSTI_GAS \
: \
: \
: \
); } while(0)
#define _METAL_WTLB_SHIFT_RM (0)
#define _METAL_WTLB_SHIFT_RN (5)
#define _METAL_WTLB_SHIFT_RL (10)
#define _METAL_WTLB_ENCODING(rl, rn, rm) ".word " METAL_STR(0xd63f8000 | (rl << _METAL_WTLB_SHIFT_RL) | (rn << _METAL_WTLB_SHIFT_RN) | (rm << _METAL_WTLB_SHIFT_RM))
#define METAL_WTLB(descreg, inforeg, vaddrreg) do { __asm__ volatile (\
_METAL_WTLB_ENCODING(descreg, vaddrreg, inforeg)); \
} while (0)
#define _METAL_RTLB_SHIFT_RM (0)
#define _METAL_RTLB_SHIFT_RN (5)
#define _METAL_RTLB_SHIFT_RL (10)
#define _METAL_RTLB_ENCODING(rl, rn, rm) ".word " METAL_STR(0xd61f8000 | (rl << _METAL_RTLB_SHIFT_RL) | (rn << _METAL_RTLB_SHIFT_RN) | (rm << _METAL_RTLB_SHIFT_RM))
#define METAL_RTLB(descreg, inforeg, vaddrreg) do { __asm__ volatile (\
_METAL_RTLB_ENCODING(descreg, vaddrreg, inforeg)); \
} while (0)
#define METAL_PMEMOP_MODE_NORMAL (0)
#define METAL_PMEMOP_MODE_PRE (1)
#define METAL_PMEMOP_MODE_POST (2)
// PSTR
#define _METAL_PSTR_TEMPLATE(var, paddr, cmd) do { __asm__ volatile (\
"mov x0, %x0;" \
"mov x1, %x1;" \
"mov x2, #0;" \
cmd \
: \
: "r" (var), "r" (paddr) \
: "x0", "x1", "x2" \
); } while (0)
#define METAL_PSTRR8_ENCODING(dReg, bReg, oReg) (0x8c400000 | ((dReg) << 0) | ((bReg) << 5) | ((oReg) << 10))
#define METAL_PSTRR8_GAS(dReg, bReg, oReg) METAL_GAS_ENCODE(METAL_PSTRR8_ENCODING(dReg, bReg, oReg))
#define METAL_PSTR8(var, paddr) _METAL_PSTR_TEMPLATE(var, paddr, METAL_PSTRR8_GAS(AARCH_REG_X0, AARCH_REG_X1, AARCH_REG_X2))
#define METAL_PSTRR16_ENCODING(dReg, bReg, oReg) (0x8cc00000 | ((dReg) << 0) | ((bReg) << 5) | ((oReg) << 10))
#define METAL_PSTRR16_GAS(dReg, bReg, oReg) METAL_GAS_ENCODE(METAL_PSTRR16_ENCODING(dReg, bReg, oReg))
#define METAL_PSTR16(var, paddr) _METAL_PSTR_TEMPLATE(var, paddr, METAL_PSTRR16_GAS(AARCH_REG_X0, AARCH_REG_X1, AARCH_REG_X2))
#define METAL_PSTRR32_ENCODING(dReg, bReg, oReg) (0x8d400000 | ((dReg) << 0) | ((bReg) << 5) | ((oReg) << 10))
#define METAL_PSTRR32_GAS(dReg, bReg, oReg) METAL_GAS_ENCODE(METAL_PSTRR32_ENCODING(dReg, bReg, oReg))
#define METAL_PSTR32(var, paddr) _METAL_PSTR_TEMPLATE(var, paddr, METAL_PSTRR32_GAS(AARCH_REG_X0, AARCH_REG_X1, AARCH_REG_X2))
#define METAL_PSTRR64_ENCODING(dReg, bReg, oReg) (0x8dc00000 | ((dReg) << 0) | ((bReg) << 5) | ((oReg) << 10))
#define METAL_PSTRR64_GAS(dReg, bReg, oReg) METAL_GAS_ENCODE(METAL_PSTRR64_ENCODING(dReg, bReg, oReg))
#define METAL_PSTR64(var, paddr) _METAL_PSTR_TEMPLATE(var, paddr, METAL_PSTRR64_GAS(AARCH_REG_X0, AARCH_REG_X1, AARCH_REG_X2))
// PLDR
#define _METAL_PLDR_TEMPLATE(var, paddr, cmd) do { __asm__ volatile (\
"mov x1, %x1;" \
"mov x2, #0;" \
cmd \
"mov %x0, x0;" \
: "+r" (var) \
: "r" (paddr) \
: "x0", "x1", "x2" \
); } while (0)
#define METAL_PLDRR8_ENCODING(dReg, bReg, oReg) (0x8c000000 | ((dReg) << 0) | ((bReg) << 5) | ((oReg) << 10))
#define METAL_PLDRR8_GAS(dReg, bReg, oReg) METAL_GAS_ENCODE(METAL_PLDRR8_ENCODING(dReg, bReg, oReg))
#define METAL_PLDR8(var, paddr) _METAL_PLDR_TEMPLATE(var, paddr, METAL_PLDRR8_GAS(AARCH_REG_X0, AARCH_REG_X1, AARCH_REG_X2))
#define METAL_PLDRR16_ENCODING(dReg, bReg, oReg) (0x8c800000 | ((dReg) << 0) | ((bReg) << 5) | ((oReg) << 10))
#define METAL_PLDRR16_GAS(dReg, bReg, oReg) METAL_GAS_ENCODE(METAL_PLDRR16_ENCODING(dReg, bReg, oReg))
#define METAL_PLDR16(var, paddr) _METAL_PLDR_TEMPLATE(var, paddr, METAL_PLDRR16_GAS(AARCH_REG_X0, AARCH_REG_X1, AARCH_REG_X2))
#define METAL_PLDRR32_ENCODING(dReg, bReg, oReg) (0x8d000000 | ((dReg) << 0) | ((bReg) << 5) | ((oReg) << 10))
#define METAL_PLDRR32_GAS(dReg, bReg, oReg) METAL_GAS_ENCODE(METAL_PLDRR32_ENCODING(dReg, bReg, oReg))
#define METAL_PLDR32(var, paddr) _METAL_PLDR_TEMPLATE(var, paddr, METAL_PLDRR32_GAS(AARCH_REG_X0, AARCH_REG_X1, AARCH_REG_X2))
#define METAL_PLDRR64_ENCODING(dReg, bReg, oReg) (0x8d800000 | ((dReg) << 0) | ((bReg) << 5) | ((oReg) << 10))
#define METAL_PLDRR64_GAS(dReg, bReg, oReg) METAL_GAS_ENCODE(METAL_PLDRR64_ENCODING(dReg, bReg, oReg))
#define METAL_PLDR64(var, paddr) _METAL_PLDR_TEMPLATE(var, paddr, METAL_PLDRR64_GAS(AARCH_REG_X0, AARCH_REG_X1, AARCH_REG_X2))

View File

@ -6,284 +6,7 @@ extern "C" {
#include <stdint.h>
#include <stddef.h>
#define AARCH_REG_X0 (0)
#define AARCH_REG_X1 (1)
#define AARCH_REG_X2 (2)
#define AARCH_REG_X3 (3)
#define AARCH_REG_X4 (4)
#define AARCH_REG_X5 (5)
#define AARCH_REG_X6 (6)
#define AARCH_REG_X7 (7)
#define AARCH_REG_X8 (8)
#define AARCH_REG_X9 (9)
#define AARCH_REG_X10 (10)
#define AARCH_REG_X11 (11)
#define AARCH_REG_X12 (12)
#define AARCH_REG_X13 (13)
#define AARCH_REG_X14 (14)
#define AARCH_REG_X15 (15)
#define AARCH_REG_X16 (16)
#define AARCH_REG_X17 (17)
#define AARCH_REG_X18 (18)
#define AARCH_REG_X19 (19)
#define AARCH_REG_X20 (20)
#define AARCH_REG_X21 (21)
#define AARCH_REG_X22 (22)
#define AARCH_REG_X23 (23)
#define AARCH_REG_X24 (24)
#define AARCH_REG_X25 (25)
#define AARCH_REG_X26 (26)
#define AARCH_REG_X27 (27)
#define AARCH_REG_X28 (28)
#define AARCH_REG_X29 (29)
#define AARCH_REG_X30 (30)
#define AARCH_REG_X31 (31)
#define METAL_REG_MO0 (0)
#define METAL_REG_MO1 (1)
#define METAL_REG_MO2 (2)
#define METAL_REG_MO3 (3)
#define METAL_REG_MO4 (4)
#define METAL_REG_MO5 (5)
#define METAL_REG_MR0 (6)
#define METAL_REG_MR1 (7)
#define METAL_REG_MR2 (8)
#define METAL_REG_MR3 (9)
#define METAL_REG_MI0 (10)
#define METAL_REG_MI1 (11)
#define METAL_REG_MI2 (12)
#define METAL_REG_MI3 (13)
#define METAL_REG_MI4 (14)
#define METAL_REG_MI5 (15)
#define METAL_REG_MLR (METAL_REG_MI5)
#define METAL_REG_MIR0 (METAL_REG_MI0)
#define METAL_REG_MIR1 (METAL_REG_MI1)
#define METAL_REG_MIR2 (METAL_REG_MI2)
#define METAL_REG_MER0 (METAL_REG_MI0)
#define METAL_REG_MER1 (METAL_REG_MI1)
#define METAL_REG_MER2 (METAL_REG_MI2)
#define METAL_REG_MSPSR (METAL_REG_MI3)
#define METAL_REG_MSR (16)
#define METAL_REG_MBR (17)
#define METAL_REG_MIB (18)
#define METAL_REG_MEB (19)
#define METAL_REG_MTP (20)
#define METAL_REG_MG5 (21)
#define METAL_REG_MG6 (22)
#define METAL_REG_MG7 (23)
#define METAL_REG_MG8 (24)
#define METAL_REG_MG9 (25)
#define METAL_REG_MG10 (26)
#define METAL_REG_MG11 (27)
#define METAL_REG_MG12 (28)
#define METAL_REG_MG13 (29)
#define METAL_REG_MG14 (30)
#define METAL_REG_MG15 (31)
#define _METAL_STR(x) #x
#define METAL_STR(x) _METAL_STR(x)
#define _METAL_GAS_ENCODE(x) ".word " METAL_STR(x) ";"
#define METAL_GAS_ENCODE(x) _METAL_GAS_ENCODE(x)
// metal insts defs
#define METAL_WMR_ENCODING(mreg, greg) (0xd61f2C00 | ((mreg) << 5) | ((greg) << 0))
#define METAL_WMR_GAS(mreg, greg) METAL_GAS_ENCODE(METAL_WMR_ENCODING(mreg, greg))
#define METAL_WMR(reg, var) do { __asm__ volatile (\
"mov x0, %x0;" \
METAL_WMR_GAS(reg, AARCH_REG_X0)\
: \
: "r" (var)\
: "x0" \
); } while(0)
#define METAL_RMR_ENCODING(mreg, greg) (0xd61f2800 | ((mreg) << 5) | ((greg) << 0))
#define METAL_RMR_GAS(mreg, greg) METAL_GAS_ENCODE(METAL_RMR_ENCODING(mreg,greg))
#define METAL_RMR(reg, var) do { \
__asm__ volatile ( \
METAL_RMR_GAS(reg, AARCH_REG_X0) \
"mov %x0, x0;"\
: "+r" (var) \
: \
: "x0" \
); } while(0)
//
// we need to preserve the stack pointer between mroutine calls
// mroutines never return values using stacks
// and since "mexit"s always occur before "return"s
// the function epilogue is not run before returning
// which may destroy the stack if local variables are defined
//
// we do this using the mroutine stub function to wrap mroutine calls
//
#define METAL_MENTER_ENCODING(mroutine) (0xd61f2000 | ((mroutine) << 0))
#define METAL_MENTER_GAS(mroutine) METAL_GAS_ENCODE(METAL_MENTER_ENCODING(mroutine))
#define METAL_MENTER(mroutine) do { \
__asm__ volatile ( \
METAL_MENTER_GAS(mroutine) \
: \
: \
: \
); \
} while(0)
#define METAL_MEXIT_ENCODING(mreg) (0xd61f2400 | ((mreg) << 0))
#define METAL_MEXIT_GAS(mreg) METAL_GAS_ENCODE(METAL_MEXIT_ENCODING(mreg))
#define METAL_MEXIT(mreg) do { \
__asm__ volatile ( \
METAL_MEXIT_GAS(mreg) \
: \
: \
: \
); } while(0)
#define MEXIT_FLAG_IIM (1 << 2) // this mexit masks instruction intercept for the first instruction after mexit
#define MEXIT_FLAG_RFI (1 << 1) // this mexit is a return from intercept (also restores CPSR from MSPSR)
#define MEXIT_FLAG_ID (1 << 0) // this mexit masks interrupts for the first instruction after mexit
#define MEXIT_FLAG_EIM (1 << 3) // this mexit masks exception intercept for the first instruction after mexit
// do not provide C version of RAR/WAR for the current bank
// can't decide which GP register to use as temp
#define METAL_RAR_ENCODING(idxmreg, dstmreg) (0xd61f3000 | ((idxmreg) << 5) | ((dstmreg) << 0))
#define METAL_RAR_GAS(idxmreg, dstmreg) METAL_GAS_ENCODE(METAL_RAR_ENCODING(idxmreg, dstmreg))
#define METAL_WAR_ENCODING(idxmreg, srcmreg) (0xd61f3400 | ((idxmreg) << 5) | ((srcmreg) << 0))
#define METAL_WAR_GAS(idxmreg, dstmreg) METAL_GAS_ENCODE(METAL_WAR_ENCODING(idxmreg, dstmreg))
#define METAL_RPR_ENCODING(idxmreg, dstmreg) (0xd61f4000 | ((idxmreg) << 5) | ((dstmreg) << 0))
#define METAL_RPR_GAS(idxmreg, dstmreg) METAL_GAS_ENCODE(METAL_RPR_ENCODING(idxmreg, dstmreg))
#define METAL_RPR(idxvar, var) do { __asm__ volatile (\
METAL_RMR_GAS(METAL_REG_MR0, AARCH_REG_X1) \
METAL_RMR_GAS(METAL_REG_MR1, AARCH_REG_X2) \
\
"mov x0, %x1;" \
METAL_WMR_GAS(METAL_REG_MR0, AARCH_REG_X0) \
METAL_RPR_GAS(METAL_REG_MR0, METAL_REG_MR1) \
METAL_RMR_GAS(METAL_REG_MR1, AARCH_REG_X0) \
"mov %x0, x0;" \
\
METAL_WMR_GAS(METAL_REG_MR0, AARCH_REG_X1) \
METAL_WMR_GAS(METAL_REG_MR1, AARCH_REG_X2) \
: "+r" (var) \
: "r" (idxvar)\
: "x0", "x1", "x2" \
); } while(0)
#define METAL_WPR_ENCODING(idxmreg, srcmreg) (0xd61f4400 | ((idxmreg) << 5) | ((srcmreg) << 0))
#define METAL_WPR_GAS(idxmreg, srcmreg) METAL_GAS_ENCODE(METAL_WPR_ENCODING(idxmreg, srcmreg))
#define METAL_WPR(idxvar, var) do { __asm__ volatile (\
METAL_RMR_GAS(METAL_REG_MR0, AARCH_REG_X1) \
METAL_RMR_GAS(METAL_REG_MR1, AARCH_REG_X2) \
\
"mov x0, %x0;" \
METAL_WMR_GAS(METAL_REG_MR0, AARCH_REG_X0) \
"mov x0, %x1;" \
METAL_WMR_GAS(METAL_REG_MR1, AARCH_REG_X0) \
\
METAL_WPR_GAS(METAL_REG_MR0, METAL_REG_MR1) \
\
METAL_WMR_GAS(METAL_REG_MR0, AARCH_REG_X1) \
METAL_WMR_GAS(METAL_REG_MR1, AARCH_REG_X2) \
: \
: "r" (idxvar), "r" (var) \
: "x0", "x1", "x2" \
); } while(0)
#define METAL_MCLI_ENCODING (0xd61f3800)
#define METAL_MCLI_GAS METAL_GAS_ENCODE(METAL_MCLI_ENCODING)
#define METAL_MCLI do { \
__asm__ volatile ( \
METAL_MCLI_GAS \
: \
: \
: \
); } while(0)
#define METAL_MSTI_ENCODING (0xd61f3C00)
#define METAL_MSTI_GAS METAL_GAS_ENCODE(METAL_MSTI_ENCODING)
#define METAL_MSTI do { \
__asm__ volatile ( \
METAL_MSTI_GAS \
: \
: \
: \
); } while(0)
#define _METAL_WTLB_SHIFT_RM (0)
#define _METAL_WTLB_SHIFT_RN (5)
#define _METAL_WTLB_SHIFT_RL (10)
#define _METAL_WTLB_ENCODING(rl, rn, rm) ".word " METAL_STR(0xd63f8000 | (rl << _METAL_WTLB_SHIFT_RL) | (rn << _METAL_WTLB_SHIFT_RN) | (rm << _METAL_WTLB_SHIFT_RM))
#define METAL_WTLB(descreg, inforeg, vaddrreg) do { __asm__ volatile (\
_METAL_WTLB_ENCODING(descreg, vaddrreg, inforeg)); \
} while (0)
#define _METAL_RTLB_SHIFT_RM (0)
#define _METAL_RTLB_SHIFT_RN (5)
#define _METAL_RTLB_SHIFT_RL (10)
#define _METAL_RTLB_ENCODING(rl, rn, rm) ".word " METAL_STR(0xd61f8000 | (rl << _METAL_RTLB_SHIFT_RL) | (rn << _METAL_RTLB_SHIFT_RN) | (rm << _METAL_RTLB_SHIFT_RM))
#define METAL_RTLB(descreg, inforeg, vaddrreg) do { __asm__ volatile (\
_METAL_RTLB_ENCODING(descreg, vaddrreg, inforeg)); \
} while (0)
#define METAL_PMEMOP_MODE_NORMAL (0)
#define METAL_PMEMOP_MODE_PRE (1)
#define METAL_PMEMOP_MODE_POST (2)
// PSTR
#define _METAL_PSTR_TEMPLATE(var, paddr, cmd) do { __asm__ volatile (\
"mov x0, %x0;" \
"mov x1, %x1;" \
"mov x2, #0;" \
cmd \
: \
: "r" (var), "r" (paddr) \
: "x0", "x1", "x2" \
); } while (0)
#define METAL_PSTRR8_ENCODING(dReg, bReg, oReg) (0x8c400000 | ((dReg) << 0) | ((bReg) << 5) | ((oReg) << 10))
#define METAL_PSTRR8_GAS(dReg, bReg, oReg) METAL_GAS_ENCODE(METAL_PSTRR8_ENCODING(dReg, bReg, oReg))
#define METAL_PSTR8(var, paddr) _METAL_PSTR_TEMPLATE(var, paddr, METAL_PSTRR8_GAS(AARCH_REG_X0, AARCH_REG_X1, AARCH_REG_X2))
#define METAL_PSTRR16_ENCODING(dReg, bReg, oReg) (0x8cc00000 | ((dReg) << 0) | ((bReg) << 5) | ((oReg) << 10))
#define METAL_PSTRR16_GAS(dReg, bReg, oReg) METAL_GAS_ENCODE(METAL_PSTRR16_ENCODING(dReg, bReg, oReg))
#define METAL_PSTR16(var, paddr) _METAL_PSTR_TEMPLATE(var, paddr, METAL_PSTRR16_GAS(AARCH_REG_X0, AARCH_REG_X1, AARCH_REG_X2))
#define METAL_PSTRR32_ENCODING(dReg, bReg, oReg) (0x8d400000 | ((dReg) << 0) | ((bReg) << 5) | ((oReg) << 10))
#define METAL_PSTRR32_GAS(dReg, bReg, oReg) METAL_GAS_ENCODE(METAL_PSTRR32_ENCODING(dReg, bReg, oReg))
#define METAL_PSTR32(var, paddr) _METAL_PSTR_TEMPLATE(var, paddr, METAL_PSTRR32_GAS(AARCH_REG_X0, AARCH_REG_X1, AARCH_REG_X2))
#define METAL_PSTRR64_ENCODING(dReg, bReg, oReg) (0x8dc00000 | ((dReg) << 0) | ((bReg) << 5) | ((oReg) << 10))
#define METAL_PSTRR64_GAS(dReg, bReg, oReg) METAL_GAS_ENCODE(METAL_PSTRR64_ENCODING(dReg, bReg, oReg))
#define METAL_PSTR64(var, paddr) _METAL_PSTR_TEMPLATE(var, paddr, METAL_PSTRR64_GAS(AARCH_REG_X0, AARCH_REG_X1, AARCH_REG_X2))
// PLDR
#define _METAL_PLDR_TEMPLATE(var, paddr, cmd) do { __asm__ volatile (\
"mov x1, %x1;" \
"mov x2, #0;" \
cmd \
"mov %x0, x0;" \
: "+r" (var) \
: "r" (paddr) \
: "x0", "x1", "x2" \
); } while (0)
#define METAL_PLDRR8_ENCODING(dReg, bReg, oReg) (0x8c000000 | ((dReg) << 0) | ((bReg) << 5) | ((oReg) << 10))
#define METAL_PLDRR8_GAS(dReg, bReg, oReg) METAL_GAS_ENCODE(METAL_PLDRR8_ENCODING(dReg, bReg, oReg))
#define METAL_PLDR8(var, paddr) _METAL_PLDR_TEMPLATE(var, paddr, METAL_PLDRR8_GAS(AARCH_REG_X0, AARCH_REG_X1, AARCH_REG_X2))
#define METAL_PLDRR16_ENCODING(dReg, bReg, oReg) (0x8c800000 | ((dReg) << 0) | ((bReg) << 5) | ((oReg) << 10))
#define METAL_PLDRR16_GAS(dReg, bReg, oReg) METAL_GAS_ENCODE(METAL_PLDRR16_ENCODING(dReg, bReg, oReg))
#define METAL_PLDR16(var, paddr) _METAL_PLDR_TEMPLATE(var, paddr, METAL_PLDRR16_GAS(AARCH_REG_X0, AARCH_REG_X1, AARCH_REG_X2))
#define METAL_PLDRR32_ENCODING(dReg, bReg, oReg) (0x8d000000 | ((dReg) << 0) | ((bReg) << 5) | ((oReg) << 10))
#define METAL_PLDRR32_GAS(dReg, bReg, oReg) METAL_GAS_ENCODE(METAL_PLDRR32_ENCODING(dReg, bReg, oReg))
#define METAL_PLDR32(var, paddr) _METAL_PLDR_TEMPLATE(var, paddr, METAL_PLDRR32_GAS(AARCH_REG_X0, AARCH_REG_X1, AARCH_REG_X2))
#define METAL_PLDRR64_ENCODING(dReg, bReg, oReg) (0x8d800000 | ((dReg) << 0) | ((bReg) << 5) | ((oReg) << 10))
#define METAL_PLDRR64_GAS(dReg, bReg, oReg) METAL_GAS_ENCODE(METAL_PLDRR64_ENCODING(dReg, bReg, oReg))
#define METAL_PLDR64(var, paddr) _METAL_PLDR_TEMPLATE(var, paddr, METAL_PLDRR64_GAS(AARCH_REG_X0, AARCH_REG_X1, AARCH_REG_X2))
#include <machine/metalasm.h>
// Mroutine helpers

View File

@ -2,22 +2,38 @@
#ifndef __MACHINE_MP_H__
#define __MACHINE_MP_H__
#include <sys/kconfig.h>
#include <stdint.h>
#include <sys/kassert.h>
#define CPUSTATE_NOT_PRESENT 0
#define CPUSTATE_BOOTED 1
#define CPUSTATE_HALTED 2
#define CPUSTATE_MAX 2
void MP_Init();
void MP_InitAP();
void MP_SetState(int state);
int MP_GetCPUs();
/* Cross Calls */
typedef int (*CrossCallCB)(void *);
void MP_CrossCallTrap();
int MP_CrossCall(CrossCallCB cb, void *arg);
uint32_t LAPIC_CPU();
struct CPUState {
unsigned int id;
uint32_t mpid;
uint32_t gic_redist_id;
volatile int state;
};
register uint64_t _cpu __asm__("x18");
static inline unsigned int CPU(void)
{
return _cpu;
}
void MP_InitCore(void);
void MP_BootAPs(void);
unsigned int MP_GetCPUs(void);
struct CPUState * MP_GetCPUState(void);
#endif /* __MACHINE_MP__ */

19
sys/arm64/include/timer.h Normal file
View File

@ -0,0 +1,19 @@
#pragma once
#include <stdint.h>
#include <machine/cpuop.h>
static inline uint64_t
Time_GetTSC()
{
return SYSREG_GET(CNTPCT_EL0);
}
static inline uint64_t
Time_GetTSCFreq()
{
return SYSREG_GET(CNTFRQ_EL0);
}
void
PTimer_Init();

View File

@ -2,79 +2,101 @@
#ifndef __TRAP_H__
#define __TRAP_H__
#define T_UNKNOWN 0x00 /* Unknown */
#define T_WFIWFE 0x01 /* WFI/WFE */
#define T_SIMDFP 0x07
#define T_ILLSTATE 0x0e /* Illegal Execution State */
#define T_SYSINST 0x18 /* System Instruction */
#define T_INSTABRT_L 0x20 /* Instruction Abort (EL0) */
#define T_INSTABRT 0x21 /* Instruction Abort */
#define T_PCAC 0x22 /* PC Alignment Check */
#define T_SPAC 0x26 /* SP Alignment Check */
#define T_DATAABRT_L 0x24 /* Data Abort (EL0) */
#include <sys/queue.h>
#include <stdint.h>
// #define T_UNKNOWN 0x00 /* Unknown */
// #define T_WFIWFE 0x01 /* WFI/WFE */
// #define T_SIMDFP 0x07
// #define T_ILLSTATE 0x0e /* Illegal Execution State */
// #define T_SYSINST 0x18 /* System Instruction */
// #define T_INSTABRT_L 0x20 /* Instruction Abort (EL0) */
// #define T_INSTABRT 0x21 /* Instruction Abort */
// #define T_PCAC 0x22 /* PC Alignment Check */
// #define T_SPAC 0x26 /* SP Alignment Check */
// #define T_DATAABRT_L 0x24 /* Data Abort (EL0) */
//#define T_DATAABRT_L 0x25 /* Data Abort (EL0) */
#define T_SERROR 0x2f /* SError */
#define T_DBGBRK_EL0 0x32 /* Breakpoint (EL0) */
#define T_DBGBRK_EL1 0x33 /* Breakpoint (EL1) */
#define T_DBGSTP_EL0 0x32 /* Step (EL0) */
#define T_DBGSTP_EL1 0x33 /* Step (EL1) */
#define T_DBGWP_EL0 0x34 /* Watchpoint (EL0) */
#define T_DBGWP_EL1 0x35 /* Watchpoint (EL1) */
#define T_BRK 0x3c /* Breakpoint */
// #define T_SERROR 0x2f /* SError */
// #define T_DBGBRK_EL0 0x32 /* Breakpoint (EL0) */
// #define T_DBGBRK_EL1 0x33 /* Breakpoint (EL1) */
// #define T_DBGSTP_EL0 0x32 /* Step (EL0) */
// #define T_DBGSTP_EL1 0x33 /* Step (EL1) */
// #define T_DBGWP_EL0 0x34 /* Watchpoint (EL0) */
// #define T_DBGWP_EL1 0x35 /* Watchpoint (EL1) */
// #define T_BRK 0x3c /* Breakpoint */
#define T_CPU_LAST T_BRK
// #define T_CPU_LAST T_BRK
// IRQs
#define T_IRQ_BASE 32
#define T_IRQ_LEN 24
#define T_IRQ_MAX (T_IRQ_BASE + T_IRQ_LEN - 1)
#define T_CROSSCALL 61 /* Cross Call (IPI) */
#define T_DEBUGIPI 62 /* Kernel Debugger Halt (IPI) */
// #define T_CROSSCALL 61 /* Cross Call (IPI) */
// #define T_DEBUGIPI 62 /* Kernel Debugger Halt (IPI) */
//#define T_UNKNOWN 63 /* Unknown Trap */
#define T_MAX 64
// IRQs
#define T_IRQ_MAX (8192)
#define T_TYPE_IRQ (1)
#define T_TYPE_EXC (0)
typedef struct TrapFrame
{
uint64_t r15;
uint64_t r14;
uint64_t r13;
uint64_t r12;
uint64_t r11;
uint64_t r10;
uint64_t r9;
uint64_t type;
uint64_t intid;
uint64_t esr;
uint64_t far;
uint64_t elr;
uint64_t spsr;
uint64_t mi0;
uint64_t mi1;
uint64_t mi2;
uint64_t mi3;
uint64_t mi4;
uint64_t mi5;
uint64_t mr0;
uint64_t mr1;
uint64_t mr2;
uint64_t mr3;
uint64_t mo0;
uint64_t mo1;
uint64_t mo2;
uint64_t mo3;
uint64_t mo4;
uint64_t mo5;
uint64_t r0;
uint64_t r1;
uint64_t r2;
uint64_t r3;
uint64_t r4;
uint64_t r5;
uint64_t r6;
uint64_t r7;
uint64_t r8;
uint64_t rbp;
uint64_t rdi;
uint64_t rsi;
uint64_t rdx;
uint64_t rcx;
uint64_t rbx;
uint64_t ds;
uint64_t rax;
uint64_t vector;
uint32_t errcode;
uint32_t _unused0;
uint64_t rip;
uint16_t cs;
uint16_t _unused1;
uint16_t _unused2;
uint16_t _unused3;
uint64_t rflags;
uint64_t rsp;
uint16_t ss;
uint16_t _unused4;
uint16_t _unused5;
uint16_t _unused6;
uint64_t r9;
uint64_t r10;
uint64_t r11;
uint64_t r12;
uint64_t r13;
uint64_t r14;
uint64_t r15;
uint64_t r16;
uint64_t r17;
uint64_t r18;
uint64_t r19;
uint64_t r20;
uint64_t r21;
uint64_t r22;
uint64_t r23;
uint64_t r24;
uint64_t r25;
uint64_t r26;
uint64_t r27;
uint64_t r28;
uint64_t r29;
uint64_t r30;
uint64_t r31;
} TrapFrame;
void Trap_Init();
void Trap_InitAP();
void Trap_Dump(TrapFrame *tf);
void Trap_Pop(TrapFrame *tf);
#endif /* __TRAP_H__ */

View File

@ -1,53 +0,0 @@
#include <stdint.h>
#include <sys/kassert.h>
#include <sys/irq.h>
#include <machine/trap.h>
// #include <machine/ioapic.h>
LIST_HEAD(IRQHandlerList, IRQHandler);
struct IRQHandlerList handlers[T_IRQ_LEN];
void
IRQ_Init()
{
int i;
for (i = 0; i < T_IRQ_LEN; i++)
{
LIST_INIT(&handlers[i]);
}
}
void
IRQ_Handler(int irq)
{
struct IRQHandler *h;
LIST_FOREACH(h, &handlers[irq], link)
{
h->cb(h->arg);
}
}
void
IRQ_Register(int irq, struct IRQHandler *h)
{
ASSERT(irq < T_IRQ_LEN);
LIST_INSERT_HEAD(&handlers[irq], h, link);
//IOAPIC_Enable(irq);
}
void
IRQ_Unregister(int irq, struct IRQHandler *h)
{
LIST_REMOVE(h, link);
if (LIST_EMPTY(&handlers[irq])) {
//IOAPIC_Disable(irq);
}
}

View File

@ -19,16 +19,15 @@
#include <sys/vfs.h>
#include <sys/elf64.h>
#include "../dev/arm64/gic.h"
#include "../dev/console.h"
#include <machine/cpuop.h>
#include <machine/metal.h>
#include <machine/mrt.h>
#include <machine/pdcache.h>
#include <machine/timer.h>
extern void KTime_Init();
extern void KTimer_Init();
extern void RTC_Init();
extern void PCI_Init();
extern void MachineBoot_AddMem();
extern void Loader_LoadInit();
@ -58,12 +57,14 @@ Machine_SyscallInit()
void
Machine_EarlyInit()
{
MP_InitCore();
Spinlock_EarlyInit();
Critical_Init();
Critical_Enter();
WaitChannel_EarlyInit();
Console_Init();
PAlloc_Init();
kprintf("Boot CPU: id = %d, mpid = 0x%x.\n", CPU(), MP_GetCPUState()->mpid);
}
static void
@ -95,44 +96,30 @@ void Machine_Init()
PAlloc_LateInit();
MachineBoot_AddMem();
// initialize GIC
kprintf("Initializing GIC ...\n");
if (gic_init() != 0) {
PANIC("GIC initialization failed!\n");
}
kprintf("GIC Initialized!\n");
//enable hardware timer
// __asm__ volatile (
// "mrs x1, CNTFRQ_EL0;"
// "msr CNTP_TVAL_EL0, x1;"
// "mov x0, #1;"
// "msr CNTP_CTL_EL0, x0;"
// "msr DAIFClr, #0b1111;"
// :
// :
// : "x0", "x1"
// );
while(1){
hlt();
}
/*
* Initialize Interrupts
*/
IRQ_Init();
/*
* Initialize Time Keeping
*/
KTime_Init();
//RTC_Init(); // Finishes initializing KTime
/*
* Initialize Interrupts
*/
IRQ_Init();
// XXX: disable interrupts here to avoid the timer interrupt firing before KTimer is initialized
// alternatively, break the dependency or use an "initialized" flag in KTimer_Process
disable_interrupts();
PTimer_Init();
KTimer_Init();
enable_interrupts();
while(1){
hlt();
}
// GICv3
Thread_Init();
KTimer_Init(); // Depends on RTC and KTime
/*
* Initialize Additional Processors
*/
@ -181,14 +168,12 @@ void Machine_InitAP()
Critical_Enter();
// Setup CPU state
Trap_InitAP();
//Trap_InitAP();
PMap_InitAP();
Machine_SyscallInit();
// Setup LAPIC
// Boot processor
MP_InitAP();
MP_InitCore();
Thread_InitAP();
Critical_Exit();

View File

@ -2,13 +2,13 @@
#include <stdbool.h>
#include <stdint.h>
#include <string.h>
#include <stdatomic.h>
#include <sys/kassert.h>
#include <sys/kconfig.h>
#include <sys/kdebug.h>
#include <sys/kmem.h>
#include <sys/ktime.h>
#include <sys/mp.h>
#include <machine/cpu.h>
#include <machine/cpuop.h>
@ -19,9 +19,7 @@
extern uint8_t mpstart_begin[];
extern uint8_t mpstart_end[];
extern AS systemAS;
#define MP_WAITTIME 250000000ULL
// #define MP_WAITTIME 250000000ULL
typedef struct CrossCallFrame {
CrossCallCB cb;
@ -31,163 +29,143 @@ typedef struct CrossCallFrame {
volatile int status[MAX_CPUS];
} CrossCallFrame;
const char *CPUStateToString[] = {
static const char *CPUStateToString[] = {
"NOT PRESENT",
"BOOTED",
"HALTED",
};
typedef struct CPUState {
int state;
UnixEpochNS heartbeat;
CrossCallFrame *frame;
} CPUState;
static struct CPUState cpu_states[MAX_CPUS] = {0};
static unsigned int cpu_count = 0;
volatile static bool booted;
volatile static int lastCPU;
volatile static CPUState cpus[MAX_CPUS];
struct CPUState * MP_GetCPUState(void)
{
ASSERT(CPU() < cpu_count);
return &cpu_states[CPU()];
}
void
MP_Init()
MP_BootAPs(void)
{
int i;
kprintf("Booting on CPU %u\n", CPU());
// Boot APs
NOT_IMPLEMENTED();
}
cpus[CPU()].state = CPUSTATE_BOOTED;
cpus[CPU()].frame = NULL;
for (i = 1; i < MAX_CPUS; i++) {
cpus[i].state = CPUSTATE_NOT_PRESENT;
cpus[i].frame = NULL;
void
MP_InitCore(void)
{
int cpuid = atomic_fetch_add(&cpu_count, 1);
// make sure CPU() macro works
if (cpuid >= MAX_CPUS) {
Panic("Too many CPUs.");
}
/*
* XXX: We really should read from the MP Table, but this appears to be
* reliable for now.
*/
lastCPU = 0;
/*
for (i = 1; i < MAX_CPUS; i++) {
if (MPBootAP(i) < 0)
break;
cpu_states[cpuid].state = CPUSTATE_BOOTED;
cpu_states[cpuid].id = cpuid;
const uint64_t mpidr = SYSREG_GET(mpidr_el1);
const uint32_t mpid = (mpidr & 0xFFFFFF) | ((mpidr >> 8) & 0xFF000000);
cpu_states[cpuid].mpid = mpid;
lastCPU = i;
}
*/
lastCPU++;
// set x18 properly on this cpu
_cpu = cpuid;
// back up to this core's software id register
SYSREG_SET(tpidr_el1, (uint64_t)cpuid);
}
void
MP_InitAP()
{
kprintf("AP %d booted!\n", CPU());
cpus[CPU()].state = CPUSTATE_BOOTED;
booted = 1;
}
void
MP_SetState(int state)
{
ASSERT(state > 0 && state <= CPUSTATE_MAX);
cpus[CPU()].state = state;
}
int
MP_GetCPUs()
{
return lastCPU;
}
void
MP_CrossCallTrap()
{
int c;
// int c;
Critical_Enter();
// Critical_Enter();
for (c = 0; c <= lastCPU; c++) {
CrossCallFrame *frame = cpus[c].frame;
if (frame == NULL)
continue;
// for (c = 0; c <= lastCPU; c++) {
// volatile CrossCallFrame *frame = cpu_states[c].frame;
// if (frame == NULL)
// continue;
if (frame->done[CPU()] == 1)
continue;
// if (frame->done[CPU()] == 1)
// continue;
frame->status[CPU()] = (frame->cb)(frame->arg);
frame->done[CPU()] = 1;
// frame->status[CPU()] = (frame->cb)(frame->arg);
// frame->done[CPU()] = 1;
// Increment
__sync_add_and_fetch(&frame->count, 1);
}
// // Increment
// __sync_add_and_fetch(&frame->count, 1);
// }
Critical_Exit();
// Critical_Exit();
NOT_IMPLEMENTED();
}
// XXX: The thread should not be migrated in the middle of this call.
int
MP_CrossCall(CrossCallCB cb, void *arg)
{
volatile CrossCallFrame frame;
// volatile CrossCallFrame frame;
// Setup frame
memset((void *)&frame, 0, sizeof(frame));
frame.cb = cb;
frame.arg = arg;
frame.count = 1;
// // Setup frame
// memset((void *)&frame, 0, sizeof(frame));
// frame.cb = cb;
// frame.arg = arg;
// frame.count = 1;
Critical_Enter();
// Critical_Enter();
cpus[CPU()].frame = (CrossCallFrame *)&frame;
// cpu_states[CPU()].frame = (volatile CrossCallFrame *)&frame;
/*
if (LAPIC_Broadcast(T_CROSSCALL) < 0)
return -1;
*/
// /*
// if (LAPIC_Broadcast(T_CROSSCALL) < 0)
// return -1;
// */
// Run on the local CPU
frame.status[CPU()] = cb(arg);
frame.done[CPU()] = 1;
// // Run on the local CPU
// frame.status[CPU()] = cb(arg);
// frame.done[CPU()] = 1;
// Wait for all to respond
while (frame.count < lastCPU) {
// Check for timeout
// // Wait for all to respond
// while (frame.count < lastCPU) {
// // Check for timeout
// XXX: Should dump the crosscall frame
}
cpus[CPU()].frame = NULL;
Critical_Exit();
// // XXX: Should dump the crosscall frame
// }
// cpu_states[CPU()].frame = NULL;
// Critical_Exit();
NOT_IMPLEMENTED();
return 0;
}
static int
static int UNUSED
MPPing(UNUSED void *arg)
{
//kprintf("CPU %d Ack\n", CPU());
NOT_IMPLEMENTED();
return 0;
}
static void
static void UNUSED
Debug_CrossCall(UNUSED int argc, UNUSED const char *argv[])
{
int i;
UnixEpochNS startTS, stopTS;
// int i;
// UnixEpochNS startTS, stopTS;
startTS = KTime_GetEpochNS();
for (i = 0; i < 32; i++) {
MP_CrossCall(&MPPing, NULL);
}
stopTS = KTime_GetEpochNS();
// startTS = KTime_GetEpochNS();
// for (i = 0; i < 32; i++) {
// MP_CrossCall(&MPPing, NULL);
// }
// stopTS = KTime_GetEpochNS();
// XXX: Print min and max
kprintf("Average CrossCall Latency: %llu ns\n",
(stopTS - startTS) / 32ULL);
// // XXX: Print min and max
// kprintf("Average CrossCall Latency: %llu ns\n",
// (stopTS - startTS) / 32ULL);
return;
// return;
NOT_IMPLEMENTED();
}
REGISTER_DBGCMD(crosscall, "Ping crosscall", Debug_CrossCall);
//REGISTER_DBGCMD(crosscall, "Ping crosscall", Debug_CrossCall);
static void
Debug_CPUS(UNUSED int argc, UNUSED const char *argv[])
@ -195,8 +173,8 @@ Debug_CPUS(UNUSED int argc, UNUSED const char *argv[])
int c;
for (c = 0; c < MAX_CPUS; c++) {
if (cpus[c].state != CPUSTATE_NOT_PRESENT) {
kprintf("CPU %d: %s\n", c, CPUStateToString[cpus[c].state]);
if (cpu_states[c].state != CPUSTATE_NOT_PRESENT) {
kprintf("CPU %d: %s\n", c, CPUStateToString[cpu_states[c].state]);
}
}
}

View File

@ -70,7 +70,9 @@ void vm_early_paging_init()
// :
// : "x0"
// );
uint64_t zero = 0;
SYSREG_SET(ttbr0_el1, zero);
SYSREG_SET(ttbr1_el1, zero);
// reset page tables
flushtlb();

View File

@ -75,20 +75,20 @@ PMap_Init()
systemAS.xmem_tbl = PMapAllocPageTable();
if (!systemAS.dmap_tbl)
PANIC("Cannot allocate dmap page table");
Panic("Cannot allocate dmap page table");
if (!systemAS.xmem_tbl)
PANIC("Cannot allocate xmem page table");
Panic("Cannot allocate xmem page table");
// Setup system mappings
if(!PMap_SystemLMap(0x0, MEM_DIRECTMAP_BASE,
512, PROT_ALL)) // 128GB RWX
{
PANIC("Cannot setup direct map");
Panic("Cannot setup direct map");
}
if(!PMap_SystemLMap(0x0, MEM_DIRECTMAP_DEV_BASE,
512, PROT_ALL | MAP_NOCACHE)) // 128GB Device
{
PANIC("Cannot setup device map");
Panic("Cannot setup device map");
}
PMap_LoadAS(&systemAS);
@ -176,7 +176,7 @@ PMap_LoadAS(AS *space)
METAL_MENTER(MRT_SET_MPTB_IDX);
METAL_MROUTINE_GETRET(MRT_SET_MPTB_RET_STATUS, ret);
if (ret != 0)
PANIC("Failed to load DMAP page table.");
Panic("Failed to load DMAP page table.");
// set xmem region
idx = MRT_SET_MPTB_XMEM;
@ -186,7 +186,7 @@ PMap_LoadAS(AS *space)
METAL_MENTER(MRT_SET_MPTB_IDX);
METAL_MROUTINE_GETRET(MRT_SET_MPTB_RET_STATUS, ret);
if (ret != 0)
PANIC("Failed to load XMEM page table.");
Panic("Failed to load XMEM page table.");
// set userspace
idx = MRT_SET_MPTB_USER;
@ -196,7 +196,7 @@ PMap_LoadAS(AS *space)
METAL_MENTER(MRT_SET_MPTB_IDX);
METAL_MROUTINE_GETRET(MRT_SET_MPTB_RET_STATUS, ret);
if (ret != 0)
PANIC("Failed to load USER page table.");
Panic("Failed to load USER page table.");
flushtlb();
}
@ -211,7 +211,7 @@ PMap_MapPage(uint64_t phys, uint64_t virt, uint64_t pages, uint32_t pgshift, str
if (pd == NULL) {
// XXX: if this fails and not the first page to be mapped
// the this function inflicts side effects
PANIC("Out of pdcache nodes.\n");
Panic("Out of pdcache nodes.\n");
}
pd->vaddr = (virt & ~((1ull << pgshift) - 1)) + i * (1ull << pgshift);

View File

@ -1,35 +0,0 @@
#include <stdbool.h>
#include <stdint.h>
#include <sys/kassert.h>
#include <sys/kdebug.h>
#include <sys/ktime.h>
#include <machine/cpu.h>
#include <machine/cpuop.h>
uint64_t
Time_GetTSC()
{
uint64_t ui;
__asm__ volatile("mrs %0, CNTVCT_EL0" : "=&r" (ui));
return ui;
}
uint64_t
Time_GetTSCFreq()
{
uint64_t ui;
__asm__ volatile("mrs %0, CNTFRQ_EL0" : "=&r" (ui));
return ui;
}
static void
Debug_ReadTSC(UNUSED int argc, UNUSED const char *argv[])
{
kprintf("RDTSC: %lld\n", Time_GetTSC());
}
REGISTER_DBGCMD(readtsc, "Print current timestamp", Debug_ReadTSC);

72
sys/arm64/timer.c Normal file
View File

@ -0,0 +1,72 @@
#include <stdbool.h>
#include <stdint.h>
#include <sys/kassert.h>
#include <sys/kdebug.h>
#include <sys/ktime.h>
#include <sys/irq.h>
#include <sys/ktimer.h>
#include <machine/gic.h>
#include <machine/cpu.h>
#include <machine/cpuop.h>
#include <machine/timer.h>
#define KTIMER_HZ (1)
#define KTIMER_IRQ (30)
// hardcode to 2024/03/14 12:34:56 AM EST
#define KTIMER_EPOCH (1710390896ull)
static struct IRQHandler _irqh;
static inline void
PTimer_Reschedule()
{
const uint64_t next_tsc = Time_GetTSCFreq() / KTIMER_HZ + SYSREG_GET(CNTP_CVAL_EL0);
SYSREG_SET(CNTP_CVAL_EL0, next_tsc);
}
static void
PTimer_Tick(UNUSED void * arg)
{
PTimer_Reschedule();
gic_send_eoi(KTIMER_IRQ);
KTimer_Process();
kprintf("Hardware timer tick, epoch = %llu, tsc = %llu, CVAL = %llu.\n", KTime_GetEpoch(), Time_GetTSC(), SYSREG_GET(CNTP_CVAL_EL0));
}
void
PTimer_Init()
{
const uint64_t tsc_freq = Time_GetTSCFreq();
_irqh.irq = KTIMER_IRQ;
_irqh.arg = NULL,
_irqh.cb = PTimer_Tick;
IRQ_Register(KTIMER_IRQ, &_irqh);
// read the tsc and populate counter with the current value
const uint64_t tsc = Time_GetTSC();
SYSREG_SET(CNTP_CVAL_EL0, tsc);
// reschedule the next tick
PTimer_Reschedule();
// enable timer
const uint64_t cntp_ctl_el0 = 1;
SYSREG_SET(CNTP_CTL_EL0, cntp_ctl_el0);
KTime_SetTime(KTIMER_EPOCH, tsc, tsc_freq);
kprintf("Hardware timer: epoch = %llu, tsc = %llu, tsc freq = %llu.\n", KTIMER_EPOCH, tsc, tsc_freq);
}
static void
Debug_ReadTSC(UNUSED int argc, UNUSED const char *argv[])
{
kprintf("RDTSC: %lld\n", Time_GetTSC());
}
REGISTER_DBGCMD(readtsc, "Print current timestamp", Debug_ReadTSC);

View File

@ -1,73 +1,94 @@
#include <stdbool.h>
#include <stdint.h>
#include <sys/kconfig.h>
#include <sys/kassert.h>
#include <sys/kdebug.h>
#include <sys/kmem.h>
#include <sys/ktime.h>
#include <sys/spinlock.h>
#include <sys/irq.h>
#include <sys/syscall.h>
#include <sys/mp.h>
#include <sys/kdebug.h>
#include <machine/cpu.h>
#include <machine/cpuop.h>
#include <machine/gic.h>
#include <machine/trap.h>
#include <machine/mp.h>
#if defined(_aarch64_)
#include <dev/arm64/uart.h>
#endif
#include <sys/thread.h>
extern uint64_t trap_table[T_MAX];
extern void trap_pop(TrapFrame *tf);
extern void Debug_Breakpoint(TrapFrame *tf);
extern void Debug_HaltIPI(TrapFrame *tf);
extern void KTimer_Process();
LIST_HEAD(IRQHandlerList, IRQHandler);
static struct IRQHandlerList handlers[T_IRQ_MAX];
static uint64_t intStats[256];
void
Trap_Init()
IRQ_Init()
{
kprintf("Initializing IRQ ...\n");
if (gic_init() != 0) {
Panic("GIC initialization failed!\n");
}
int i;
kprintf("Initializing IDT... ");
for (i = 0; i < 256; i++) {
for (i = 0; i < T_IRQ_MAX; i++)
{
intStats[i] = 0;
LIST_INIT(&handlers[i]);
}
kprintf("Done!\n");
enable_interrupts();
kprintf("Initialized IRQ!\n");
}
void
Trap_InitAP()
IRQ_Handler(int irq)
{
struct IRQHandler *h;
LIST_FOREACH(h, &handlers[irq], link)
{
h->cb(h->arg);
}
}
void
Trap_Dump(TrapFrame *tf)
IRQ_Register(int irq, struct IRQHandler *h)
{
ASSERT(irq < T_IRQ_MAX);
LIST_INSERT_HEAD(&handlers[irq], h, link);
if (gic_enable_intr(irq) != 0) {
Panic("Failed to enable interrupt.\n");
}
}
void
IRQ_Unregister(int irq, struct IRQHandler *h)
{
LIST_REMOVE(h, link);
if (LIST_EMPTY(&handlers[irq])) {
if (gic_disable_intr(irq) != 0) {
Panic("Failed to disable interrupt.\n");
}
}
}
void
Trap_Dump(UNUSED TrapFrame *tf)
{
kprintf("CPU %d\n", CPU());
kprintf("Interrupt %d Error Code: %016llx\n",
tf->vector, tf->errcode);
// kprintf("Interrupt %d Error Code: %016llx\n",
// tf->vector, tf->errcode);
}
void
Trap_StackDump(TrapFrame *tf)
Trap_StackDump(UNUSED TrapFrame *tf)
{
uint64_t rsp;
uint64_t *data;
// uint64_t rsp;
// uint64_t *data;
// XXX: This should use safe copy
for (rsp = tf->rsp; (rsp & 0xFFF) != 0; rsp += 8) {
data = (uint64_t *)rsp;
kprintf("%016llx: %016llx\n", rsp, *data);
}
// for (rsp = tf->rsp; (rsp & 0xFFF) != 0; rsp += 8) {
// data = (uint64_t *)rsp;
// kprintf("%016llx: %016llx\n", rsp, *data);
// }
}
extern int copy_unsafe(void *to, void *from, uintptr_t len);
@ -79,28 +100,14 @@ extern void copystr_unsafe_done(void);
extern void copystr_unsafe_fault(void);
void
trap_entry(void)
trap_entry(TrapFrame * tf)
{
uint64_t id;
__asm__ volatile (
"mrs %x0, ICC_IAR1_EL1;"
: "=r" (id)
:
:
);
kprintf("Unhandled irq: 0x%x\n", id);
if (id == 30) {
__asm__ volatile (
"mrs x1, CNTFRQ_EL0;"
"msr CNTP_TVAL_EL0, x1;"
"msr ICC_EOIR1_EL1, %x0;"
"dsb sy;"
:
: "r" (id)
: "x1"
);
if (tf->type == T_TYPE_EXC) {
Panic("Unexpected exception: Type = 0x%llx, IntID = 0x%llx, ESR = 0x%llx, FAR = 0x%llx, ELR = 0x%llx, SPSR = 0x%llx.\n", tf->type, tf->intid, tf->esr, tf->far, tf->elr, tf->spsr);
} else {
kprintf("IRQ: Type = 0x%llx, IntID = 0x%llx, ESR = 0x%llx, FAR = 0x%llx, ELR = 0x%llx, SPSR = 0x%llx.\n", tf->type, tf->intid, tf->esr, tf->far, tf->elr, tf->spsr);
// irq
IRQ_Handler(tf->intid);
}
}
@ -110,11 +117,11 @@ Debug_Traps(UNUSED int argc, UNUSED const char *argv[])
int i;
kprintf("Trap Interrupts Trap Interrupts\n");
for (i = 0; i < T_MAX / 2; i++)
for (i = 0; i < T_IRQ_MAX / 2; i++)
{
kprintf("%-4d %-12d %-4d %-12d\n",
i, intStats[i],
T_MAX / 2 + i, intStats[T_MAX / 2 + i]);
T_IRQ_MAX / 2 + i, intStats[T_IRQ_MAX / 2 + i]);
}
}

View File

@ -1,13 +1,9 @@
#include <machine/metalasm.h>
/*
* Trap Handlers
*/
.extern trap_entry
.text
trap_locore_entry:
stp lr, lr, [sp, #-16]!
.macro trapframe_save_common type
stp x30, x31, [sp, #-16]!
stp x28, x29, [sp, #-16]!
stp x26, x27, [sp, #-16]!
stp x24, x25, [sp, #-16]!
@ -23,16 +19,95 @@ trap_locore_entry:
stp x4, x5, [sp, #-16]!
stp x2, x3, [sp, #-16]!
stp x0, x1, [sp, #-16]!
; mrs x0, spsr_el1
; mrs x1, elr_el1
; stp x0, x1, [sp, #-16]!
.extern trap_entry
bl trap_entry
// Metal Registers
MASM_RMR(METAL_REG_MO4, AARCH_REG_X0)
MASM_RMR(METAL_REG_MO5, AARCH_REG_X1)
stp x0, x1, [sp, #-16]!
MASM_RMR(METAL_REG_MO2, AARCH_REG_X0)
MASM_RMR(METAL_REG_MO3, AARCH_REG_X1)
stp x0, x1, [sp, #-16]!
MASM_RMR(METAL_REG_MO0, AARCH_REG_X0)
MASM_RMR(METAL_REG_MO1, AARCH_REG_X1)
stp x0, x1, [sp, #-16]!
MASM_RMR(METAL_REG_MR2, AARCH_REG_X0)
MASM_RMR(METAL_REG_MR3, AARCH_REG_X1)
stp x0, x1, [sp, #-16]!
MASM_RMR(METAL_REG_MR0, AARCH_REG_X0)
MASM_RMR(METAL_REG_MR1, AARCH_REG_X1)
stp x0, x1, [sp, #-16]!
MASM_RMR(METAL_REG_MI4, AARCH_REG_X0)
MASM_RMR(METAL_REG_MI5, AARCH_REG_X1)
stp x0, x1, [sp, #-16]!
MASM_RMR(METAL_REG_MI2, AARCH_REG_X0)
MASM_RMR(METAL_REG_MI3, AARCH_REG_X1)
stp x0, x1, [sp, #-16]!
MASM_RMR(METAL_REG_MI0, AARCH_REG_X0)
MASM_RMR(METAL_REG_MI1, AARCH_REG_X1)
stp x0, x1, [sp, #-16]!
// ELR and SPSR
mrs x0, elr_el1
mrs x1, spsr_el1
stp x0, x1, [sp, #-16]!
// FAR and ESR
mrs x0, esr_el1
mrs x1, far_el1
stp x0, x1, [sp, #-16]!
// IAR and type
mov x0, #\type
mrs x1, ICC_IAR1_EL1
stp x0, x1, [sp, #-16]!
.endm
.macro trapframe_restore_common
add sp, sp, #32 // skip status, IAR, FAR and ESR
ldp x0, x1, [sp], #16
msr elr_el1, x0
msr spsr_el1, x1
ldp x0, x1, [sp], #16
MASM_WMR(METAL_REG_MI0, AARCH_REG_X0)
MASM_WMR(METAL_REG_MI1, AARCH_REG_X1)
ldp x0, x1, [sp], #16
MASM_WMR(METAL_REG_MI2, AARCH_REG_X0)
MASM_WMR(METAL_REG_MI3, AARCH_REG_X1)
ldp x0, x1, [sp], #16
MASM_WMR(METAL_REG_MI4, AARCH_REG_X0)
MASM_WMR(METAL_REG_MI5, AARCH_REG_X1)
ldp x0, x1, [sp], #16
MASM_WMR(METAL_REG_MR0, AARCH_REG_X0)
MASM_WMR(METAL_REG_MR1, AARCH_REG_X1)
ldp x0, x1, [sp], #16
MASM_WMR(METAL_REG_MR2, AARCH_REG_X0)
MASM_WMR(METAL_REG_MR3, AARCH_REG_X1)
ldp x0, x1, [sp], #16
MASM_WMR(METAL_REG_MO0, AARCH_REG_X0)
MASM_WMR(METAL_REG_MO1, AARCH_REG_X1)
ldp x0, x1, [sp], #16
MASM_WMR(METAL_REG_MO2, AARCH_REG_X0)
MASM_WMR(METAL_REG_MO3, AARCH_REG_X1)
ldp x0, x1, [sp], #16
MASM_WMR(METAL_REG_MO4, AARCH_REG_X0)
MASM_WMR(METAL_REG_MO5, AARCH_REG_X1)
; ldp x0, x1, [sp], #16
; msr spsr_el1, x0
; msr elr_el1, x1
ldp x0, x1, [sp], #16
ldp x2, x3, [sp], #16
ldp x4, x5, [sp], #16
@ -48,8 +123,25 @@ trap_locore_entry:
ldp x24, x25, [sp], #16
ldp x26, x27, [sp], #16
ldp x28, x29, [sp], #16
ldp lr, lr, [sp], #16
ldp x30, x31, [sp], #16
// daif bits are stored in PSTATE and are automatically re-enabled by eret
.endm
.extern trap_entry
.text
interrupt_locore_entry:
trapframe_save_common 1
mov x0, sp
bl trap_entry
trapframe_restore_common
eret
exception_locore_entry:
trapframe_save_common 0
mov x0, sp
bl trap_entry
trapframe_restore_common
eret
.global _evt
@ -66,9 +158,10 @@ b _halt
// same exception level, spX: sync, irq, fiq, serror
.balign 0x80
b exception_locore_entry
b _halt
.balign 0x80
b trap_locore_entry
b interrupt_locore_entry
b _halt
.balign 0x80
b _halt

View File

@ -1,29 +0,0 @@
#pragma once
#include <stdint.h>
uint32_t
gic_get_affinity(void);
void
gic_set_prio_mask(unsigned int mask);
int
gic_set_intr_prio(uint32_t rdid, uint32_t intid, uint8_t prio);
void
gic_send_eoi(int intr);
unsigned int
gic_ack_intr(void);
int
gic_init(void);
int
gic_enable_intr(uint32_t rdid, uint32_t intid);
int
gic_disable_intr(uint32_t rd, uint32_t intid);
int
gic_get_redist_id(uint32_t affinity, unsigned int *id);

View File

@ -90,7 +90,7 @@ static void
uart_init(struct uart *dev)
{
if (!uart_discover(dev)) {
PANIC("Failed to discover uart");
Panic("Failed to discover uart");
}
uint32_t cr = uart_readreg(dev, UART_CR_OFFSET);
@ -157,4 +157,5 @@ void
Serial_Init(void)
{
uart_init(&g_uart0);
kprintf("Initialized PL011 UART, base = 0x%llx.\n", g_uart0.base);
}

View File

@ -100,10 +100,16 @@ Console_EnqueueKey(char key)
#if defined(__aarch64__)
void
Panic(const char *str)
Panic(const char *str, ...)
{
Serial_Send(str, strlen(str));
va_list args;
va_start(args, str);
kvprintf(str, args);
va_end(args);
// Serial_Send(str, strlen(str));
while (true) {
Halt();
}
}
#endif

View File

@ -14,6 +14,7 @@
#define NO_RETURN __attribute__((noreturn))
#define UNREACHABLE __builtin_unreachable
#define PRINTF_LIKE __attribute__((format(printf, 1, 2)))
#define UNUSED __attribute__((unused))

View File

@ -4,6 +4,7 @@
#include <sys/cdefs.h>
#include <sys/sysctl.h>
#include <stdarg.h>
#define ASSERT(_x) \
if (!(_x)) { \
@ -15,9 +16,8 @@
Debug_Assert("NOT_IMPLEMENTED(): %s %s:%d\n", \
__FUNCTION__, __FILE__, __LINE__); \
}
#define PANIC Panic
NO_RETURN void Panic(const char *str);
NO_RETURN void Panic(const char *str, ...);
#if defined(__aarch64__)
NO_RETURN extern void
@ -31,6 +31,7 @@ Halt(void)
#endif
int kprintf(const char *fmt, ...);
int kvprintf(const char *fmt, va_list ap);
NO_RETURN void Debug_Assert(const char *fmt, ...);
#define static_assert _Static_assert

View File

@ -2,7 +2,9 @@
#ifndef __SYS_KTIME_H__
#define __SYS_KTIME_H__
uint64_t Time_GetTSC();
//XXX: breaks x86
#include <machine/timer.h>
#include <stdint.h>
typedef struct KTime {
int sec;

View File

@ -7,11 +7,7 @@ uint32_t LAPIC_CPU();
#define CPU LAPIC_CPU
#elif defined(__aarch64__)
static inline unsigned int ARM_CPU()
{
return 0;
}
#define CPU ARM_CPU
#include <machine/mp.h>
#endif
#endif /* __MP_H__ */

View File

@ -2,6 +2,7 @@
#include <stddef.h>
#include <stdbool.h>
#include <stdint.h>
#include <stdatomic.h>
#include <sys/kassert.h>
#include <sys/queue.h>
@ -61,14 +62,14 @@ void
KTimer_Retain(KTimerEvent *evt)
{
ASSERT(evt->refCount != 0);
__sync_fetch_and_add(&evt->refCount, 1);
atomic_fetch_add(&evt->refCount, 1);
}
void
KTimer_Release(KTimerEvent *evt)
{
ASSERT(evt->refCount != 0);
if (__sync_fetch_and_sub(&evt->refCount, 1) == 1) {
if (atomic_fetch_sub(&evt->refCount, 1) == 1) {
KTimerEvent_Free(evt);
}
}

View File

@ -75,7 +75,8 @@ static void printnum(void (*func)(int, void*),void *handle,
}
}
int kvprintf(char const *fmt, void (*func)(int,void *), void *handle, va_list ap)
static int
_kvprintf(char const *fmt, void (*func)(int,void *), void *handle, va_list ap)
{
const char *p;
int ch;
@ -199,7 +200,8 @@ again:
return 0;
}
void consoleputc(int c,void* handle)
static void
consoleputc(int c,void* handle)
{
if (c == '\n') {
Console_Putc('\r');
@ -207,13 +209,20 @@ void consoleputc(int c,void* handle)
Console_Putc(c);
}
int kprintf(const char *fmt, ...)
int
kvprintf(const char *fmt, va_list ap)
{
return _kvprintf(fmt, consoleputc, 0, ap);
}
int
kprintf(const char *fmt, ...)
{
int ret;
va_list ap;
va_start(ap, fmt);
ret = kvprintf(fmt, consoleputc, 0, ap);
ret = kvprintf(fmt, ap);
va_end(ap);
return ret;
@ -224,7 +233,7 @@ void Debug_Assert(const char *fmt, ...)
va_list ap;
va_start(ap, fmt);
kvprintf(fmt, consoleputc, 0, ap);
kvprintf(fmt, ap);
va_end(ap);
#if 0

View File

@ -6,6 +6,7 @@
#include <stdbool.h>
#include <stdint.h>
#include <string.h>
#include <stdatomic.h>
#include <errno.h>
#include <sys/syscall.h>
@ -171,7 +172,7 @@ void
Process_Retain(Process *proc)
{
ASSERT(proc->refCount != 0);
__sync_fetch_and_add(&proc->refCount, 1);
atomic_fetch_add(&proc->refCount, 1);
}
/**
@ -185,7 +186,7 @@ void
Process_Release(Process *proc)
{
ASSERT(proc->refCount != 0);
if (__sync_fetch_and_sub(&proc->refCount, 1) == 1) {
if (atomic_fetch_sub(&proc->refCount, 1) == 1) {
Process_Destroy(proc);
}
}

View File

@ -7,6 +7,7 @@
#include <stdbool.h>
#include <stdint.h>
#include <string.h>
#include <stdatomic.h>
#include <sys/kassert.h>
#include <sys/kconfig.h>
@ -15,7 +16,6 @@
#include <sys/mp.h>
#include <sys/spinlock.h>
#include <machine/atomic.h>
#include <machine/cpu.h>
#include <machine/metal.h>
#include <machine/cpuop.h>
@ -82,7 +82,7 @@ Spinlock_Lock(Spinlock *lock) __NO_LOCK_ANALYSIS
Critical_Enter();
startTSC = Time_GetTSC();
while (atomic_swap_uint64(&lock->lock, 1) == 1)
while (atomic_exchange(&lock->lock, 1) == 1)
{
if (lock->type == SPINLOCK_TYPE_RECURSIVE && lock->cpu == CPU()) {
break;
@ -120,7 +120,7 @@ Spinlock_Unlock(Spinlock *lock) __NO_LOCK_ANALYSIS
if (lock->rCount == 0) {
lock->cpu = 0;
lock->lockTime += Time_GetTSC() - lock->lockedTSC;
atomic_set_uint64(&lock->lock, 0);
atomic_store(&lock->lock, 0);
}
Critical_Exit();

View File

@ -6,6 +6,7 @@
#include <stdbool.h>
#include <stdint.h>
#include <string.h>
#include <stdatomic.h>
#include <errno.h>
#include <sys/syscall.h>
@ -254,7 +255,7 @@ void
Thread_Retain(Thread *thr)
{
ASSERT(thr->refCount != 0);
__sync_fetch_and_add(&thr->refCount, 1);
atomic_fetch_add(&thr->refCount, 1);
}
/**
@ -266,7 +267,7 @@ void
Thread_Release(Thread *thr)
{
ASSERT(thr->refCount != 0);
if (__sync_fetch_and_sub(&thr->refCount, 1) == 1) {
if (atomic_fetch_sub(&thr->refCount, 1) == 1) {
Thread_Destroy(thr);
}
}