diff --git a/SConstruct b/SConstruct index 074c164..0a719b7 100644 --- a/SConstruct +++ b/SConstruct @@ -22,7 +22,7 @@ opts.AddVariables( ("PREFIX", "Installation target directory.", "#pxelinux"), ("ARCH", "Target Architecture", "arm64"), ("BOOTDISK", "Build boot disk (0 or 1)", "1"), - ("BOOTDISK_SIZE", "Boot disk size", "128") + ("BOOTDISK_SIZE", "Boot disk size", "128"), ) env = Environment(options = opts, @@ -75,7 +75,7 @@ if env["WITH_GPROF"] == "1": env.Append(CPPFLAGS = "-DBUILDTYPE=" + env["BUILDTYPE"]) if env["BUILDTYPE"] == "DEBUG": - env.Append(CPPFLAGS = [ "-g", "-DDEBUG", "-Wall", + env.Append(CPPFLAGS = [ "-g", "-DDEBUG", "-Wall", "-O2", "-Wno-deprecated-declarations" ]) env.Append(LINKFLAGS = [ "-g" ]) elif env["BUILDTYPE"] == "PERF": diff --git a/sys/SConscript b/sys/SConscript index 9685ed2..77bd46c 100644 --- a/sys/SConscript +++ b/sys/SConscript @@ -57,6 +57,11 @@ src_arm64 = [ "arm64/gic.c", # Devices "dev/arm64/uart.c", + # Metal + "arm64/metal.c", + "arm64/paging.c", + "arm64/mrt/init.c", + "arm64/mrt/paging.c", ] src_common = [ @@ -106,7 +111,7 @@ src.append(src_common) kern_env.Append(LINKFLAGS = ['-T', ldscript[1:], '-nostdlib']) kern_env.Append(CPPFLAGS = ['-D_KERNEL']) kern_env.Append(CPPFLAGS = ['-ffreestanding', '-fno-builtin', '-nostdinc', - '-mno-red-zone']) + '-mno-red-zone', '-std=c11', "-Wno-c2x-extensions"]) if env["ARCH"] == "amd64": kern_env.Append(CPPFLAGS = ['-mno-mmx', '-mno-sse', '-mcmodel=large']) diff --git a/sys/amd64/include/atomic.h b/sys/amd64/include/atomic.h index e2f8734..0a55276 100644 --- a/sys/amd64/include/atomic.h +++ b/sys/amd64/include/atomic.h @@ -5,7 +5,7 @@ static INLINE uint64_t atomic_swap_uint32(volatile uint32_t *dst, uint32_t newval) { - asm volatile("lock; xchgl %0, %1;" + __asm__ volatile("lock; xchgl %0, %1;" : "+m" (*dst), "+r" (newval)); return newval; @@ -14,7 +14,7 @@ atomic_swap_uint32(volatile uint32_t *dst, uint32_t newval) static INLINE uint64_t atomic_swap_uint64(volatile uint64_t *dst, uint64_t newval) { - asm volatile("lock; xchgq %0, %1;" + __asm__ volatile("lock; xchgq %0, %1;" : "+m" (*dst), "+r" (newval)); return newval; diff --git a/sys/amd64/include/cpuop.h b/sys/amd64/include/cpuop.h index 51c7852..75b287f 100644 --- a/sys/amd64/include/cpuop.h +++ b/sys/amd64/include/cpuop.h @@ -8,39 +8,39 @@ static INLINE void enable_interrupts() { - asm volatile("sti"); + __asm__ volatile("sti"); } static INLINE void disable_interrupts() { - asm volatile("cli"); + __asm__ volatile("cli"); } static INLINE void hlt() { - asm volatile("hlt"); + __asm__ volatile("hlt"); } static INLINE void pause() { - asm volatile("pause"); + __asm__ volatile("pause"); } static INLINE void breakpoint() { - asm volatile("int3"); + __asm__ volatile("int3"); } static INLINE void icebp() { - asm volatile(".byte 0xf1"); + __asm__ volatile(".byte 0xf1"); } static INLINE uint64_t rdtsc() { uint32_t lo, hi; - asm volatile("rdtsc" + __asm__ volatile("rdtsc" : "=a" (lo), "=d" (hi)); return ((uint64_t)hi << 32) | (uint64_t)lo; @@ -50,7 +50,7 @@ static INLINE uint64_t rdtscp(uint32_t *procno) { uint32_t lo, hi, proc; - asm volatile("rdtsc" + __asm__ volatile("rdtsc" : "=a" (lo), "=d" (hi), "=c" (proc)); if (procno) @@ -61,7 +61,7 @@ static INLINE uint64_t rdtscp(uint32_t *procno) static INLINE void lidt(PseudoDescriptor *idt) { - asm volatile("lidt (%0)" + __asm__ volatile("lidt (%0)" : : "r" (idt) : "memory"); @@ -69,7 +69,7 @@ static INLINE void lidt(PseudoDescriptor *idt) static INLINE void lgdt(PseudoDescriptor *gdt) { - asm volatile("lgdt (%0)" + __asm__ volatile("lgdt (%0)" : : "r" (gdt) : "memory"); @@ -77,7 +77,7 @@ static INLINE void lgdt(PseudoDescriptor *gdt) static INLINE void ltr(uint16_t tss) { - asm volatile("ltr %0" + __asm__ volatile("ltr %0" : : "r" (tss)); } @@ -87,7 +87,7 @@ static INLINE void cpuid(uint32_t info, uint32_t *eax, uint32_t *ebx, { uint32_t a, b, c, d; - asm volatile("cpuid" + __asm__ volatile("cpuid" : "=a" (a), "=b" (b), "=c" (c), "=d" (d) : "a" (info)); @@ -106,7 +106,7 @@ static INLINE void wrmsr(uint32_t addr, uint64_t val) uint32_t eax = val & 0xFFFFFFFF; uint32_t edx = val >> 32; - asm volatile("wrmsr" + __asm__ volatile("wrmsr" : : "a" (eax), "c" (addr), "d" (edx)); } @@ -115,7 +115,7 @@ static INLINE uint64_t rdmsr(uint32_t addr) { uint64_t eax, edx; - asm volatile("rdmsr" + __asm__ volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (addr)); @@ -130,7 +130,7 @@ static INLINE uint64_t read_cr0() { uint64_t val; - asm volatile("movq %%cr0, %0" + __asm__ volatile("movq %%cr0, %0" : "=r" (val)); return val; @@ -138,7 +138,7 @@ static INLINE uint64_t read_cr0() static INLINE void write_cr0(uint64_t val) { - asm volatile("movq %0, %%cr0" + __asm__ volatile("movq %0, %%cr0" : : "r" (val)); } @@ -147,7 +147,7 @@ static INLINE uint64_t read_cr2() { uint64_t val; - asm volatile("movq %%cr2, %0" + __asm__ volatile("movq %%cr2, %0" : "=r" (val)); return val; @@ -157,7 +157,7 @@ static INLINE uint64_t read_cr3() { uint64_t val; - asm volatile("movq %%cr3, %0" + __asm__ volatile("movq %%cr3, %0" : "=r" (val)); return val; @@ -165,7 +165,7 @@ static INLINE uint64_t read_cr3() static INLINE void write_cr3(uint64_t val) { - asm volatile("movq %0, %%cr3" + __asm__ volatile("movq %0, %%cr3" : : "r" (val)); } @@ -174,7 +174,7 @@ static INLINE uint64_t read_cr4() { uint64_t val; - asm volatile("movq %%cr4, %0" + __asm__ volatile("movq %%cr4, %0" : "=r" (val)); return val; @@ -182,7 +182,7 @@ static INLINE uint64_t read_cr4() static INLINE void write_cr4(uint64_t val) { - asm volatile("movq %0, %%cr4" + __asm__ volatile("movq %0, %%cr4" : : "r" (val)); } @@ -195,7 +195,7 @@ static INLINE uint64_t read_dr0() { uint64_t val; - asm volatile("movq %%dr0, %0" + __asm__ volatile("movq %%dr0, %0" : "=r" (val)); return val; @@ -203,7 +203,7 @@ static INLINE uint64_t read_dr0() static INLINE void write_dr0(uint64_t val) { - asm volatile("movq %0, %%dr0" + __asm__ volatile("movq %0, %%dr0" : : "r" (val)); } @@ -212,7 +212,7 @@ static INLINE uint64_t read_dr1() { uint64_t val; - asm volatile("movq %%dr1, %0" + __asm__ volatile("movq %%dr1, %0" : "=r" (val)); return val; @@ -220,7 +220,7 @@ static INLINE uint64_t read_dr1() static INLINE void write_dr1(uint64_t val) { - asm volatile("movq %0, %%dr1" + __asm__ volatile("movq %0, %%dr1" : : "r" (val)); } @@ -229,7 +229,7 @@ static INLINE uint64_t read_dr2() { uint64_t val; - asm volatile("movq %%dr2, %0" + __asm__ volatile("movq %%dr2, %0" : "=r" (val)); return val; @@ -237,7 +237,7 @@ static INLINE uint64_t read_dr2() static INLINE void write_dr2(uint64_t val) { - asm volatile("movq %0, %%dr2" + __asm__ volatile("movq %0, %%dr2" : : "r" (val)); } @@ -246,7 +246,7 @@ static INLINE uint64_t read_dr3() { uint64_t val; - asm volatile("movq %%dr3, %0" + __asm__ volatile("movq %%dr3, %0" : "=r" (val)); return val; @@ -254,7 +254,7 @@ static INLINE uint64_t read_dr3() static INLINE void write_dr3(uint64_t val) { - asm volatile("movq %0, %%dr3" + __asm__ volatile("movq %0, %%dr3" : : "r" (val)); } @@ -263,7 +263,7 @@ static INLINE uint64_t read_dr6() { uint64_t val; - asm volatile("movq %%dr6, %0" + __asm__ volatile("movq %%dr6, %0" : "=r" (val)); return val; @@ -271,7 +271,7 @@ static INLINE uint64_t read_dr6() static INLINE void write_dr6(uint64_t val) { - asm volatile("movq %0, %%dr6" + __asm__ volatile("movq %0, %%dr6" : : "r" (val)); } @@ -280,7 +280,7 @@ static INLINE uint64_t read_dr7() { uint64_t val; - asm volatile("movq %%dr7, %0" + __asm__ volatile("movq %%dr7, %0" : "=r" (val)); return val; @@ -288,7 +288,7 @@ static INLINE uint64_t read_dr7() static INLINE void write_dr7(uint64_t val) { - asm volatile("movq %0, %%dr7" + __asm__ volatile("movq %0, %%dr7" : : "r" (val)); } @@ -301,7 +301,7 @@ static INLINE uint16_t read_ds() { uint16_t val; - asm volatile("movw %%ds, %0" + __asm__ volatile("movw %%ds, %0" : "=r" (val)); return val; @@ -309,7 +309,7 @@ static INLINE uint16_t read_ds() static INLINE void write_ds(uint16_t val) { - asm volatile("movw %0, %%ds" + __asm__ volatile("movw %0, %%ds" : : "r" (val)); } @@ -318,7 +318,7 @@ static INLINE uint16_t read_es() { uint16_t val; - asm volatile("movw %%es, %0" + __asm__ volatile("movw %%es, %0" : "=r" (val)); return val; @@ -326,7 +326,7 @@ static INLINE uint16_t read_es() static INLINE void write_es(uint16_t val) { - asm volatile("movw %0, %%es" + __asm__ volatile("movw %0, %%es" : : "r" (val)); } @@ -335,7 +335,7 @@ static INLINE uint16_t read_fs() { uint16_t val; - asm volatile("movw %%fs, %0" + __asm__ volatile("movw %%fs, %0" : "=r" (val)); return val; @@ -343,7 +343,7 @@ static INLINE uint16_t read_fs() static INLINE void write_fs(uint16_t val) { - asm volatile("movw %0, %%fs" + __asm__ volatile("movw %0, %%fs" : : "r" (val)); } @@ -352,7 +352,7 @@ static INLINE uint16_t read_gs() { uint16_t val; - asm volatile("movw %%gs, %0" + __asm__ volatile("movw %%gs, %0" : "=r" (val)); return val; @@ -360,7 +360,7 @@ static INLINE uint16_t read_gs() static INLINE void write_gs(uint16_t val) { - asm volatile("movw %0, %%gs" + __asm__ volatile("movw %0, %%gs" : : "r" (val)); } @@ -371,12 +371,12 @@ static INLINE void write_gs(uint16_t val) static INLINE void clts() { - asm volatile("clts"); + __asm__ volatile("clts"); } static INLINE void fxsave(struct XSAVEArea *xsa) { - asm volatile("fxsave %0" + __asm__ volatile("fxsave %0" : "=m" (*xsa) : : "memory"); @@ -385,7 +385,7 @@ static INLINE void fxsave(struct XSAVEArea *xsa) // XXX: Need to fix AMD Bug static INLINE void fxrstor(struct XSAVEArea *xsa) { - asm volatile("fxrstor %0" + __asm__ volatile("fxrstor %0" : : "m" (*xsa) : "memory"); @@ -396,7 +396,7 @@ static INLINE void xsave(struct XSAVEArea *xsa, uint64_t mask) uint32_t lo = (uint32_t)mask; uint32_t hi = (uint32_t)(mask >> 32); - asm volatile("xsave %0" + __asm__ volatile("xsave %0" : "=m" (*xsa) : "a" (lo), "d" (hi) : "memory"); @@ -407,7 +407,7 @@ static INLINE void xsaveopt(struct XSAVEArea *xsa, uint64_t mask) uint32_t lo = (uint32_t)mask; uint32_t hi = (uint32_t)(mask >> 32); - asm volatile("xsaveopt %0" + __asm__ volatile("xsaveopt %0" : "=m" (*xsa) : "a" (lo), "d" (hi) : "memory"); @@ -418,7 +418,7 @@ static INLINE void xrstor(struct XSAVEArea *xsa, uint64_t mask) uint32_t lo = (uint32_t)mask; uint32_t hi = (uint32_t)(mask >> 32); - asm volatile("xrstor %0" + __asm__ volatile("xrstor %0" : : "m" (*xsa), "a" (lo), "d" (hi) : "memory"); @@ -430,21 +430,21 @@ static INLINE void xrstor(struct XSAVEArea *xsa, uint64_t mask) static INLINE void outb(uint16_t port, uint8_t data) { - asm volatile("outb %0, %1" + __asm__ volatile("outb %0, %1" : : "a" (data), "d" (port)); } static INLINE void outw(uint16_t port, uint16_t data) { - asm volatile("outw %0, %1" + __asm__ volatile("outw %0, %1" : : "a" (data), "d" (port)); } static INLINE void outl(uint16_t port, uint32_t data) { - asm volatile("outl %0, %1" + __asm__ volatile("outl %0, %1" : : "a" (data), "d" (port)); } @@ -453,7 +453,7 @@ static INLINE uint8_t inb(uint16_t port) { uint8_t data; - asm volatile("inb %1, %0" + __asm__ volatile("inb %1, %0" : "=a" (data) :"d" (port)); @@ -464,7 +464,7 @@ static INLINE uint16_t inw(uint16_t port) { uint16_t data; - asm volatile("inw %1, %0" + __asm__ volatile("inw %1, %0" : "=a" (data) :"d" (port)); @@ -475,7 +475,7 @@ static INLINE uint32_t inl(uint16_t port) { uint32_t data; - asm volatile("inl %1, %0" + __asm__ volatile("inl %1, %0" : "=a" (data) :"d" (port)); diff --git a/sys/arm64/gic.c b/sys/arm64/gic.c index 30061bf..b92db58 100644 --- a/sys/arm64/gic.c +++ b/sys/arm64/gic.c @@ -1,4 +1,5 @@ #include +#include "include/pmap.h" #include "sys/cdefs.h" #include "errno.h" #include @@ -220,8 +221,8 @@ gic_addr_init(void) { uint32_t index = 0; - gic.gic_dist = (void*)DIST_BASE_ADDR; - gic.gic_rdist = (void*)REDIST_BASE_ADDR; + gic.gic_dist = (void*)DEVPA2VA(DIST_BASE_ADDR); + gic.gic_rdist = (void*)DEVPA2VA(REDIST_BASE_ADDR); while((gic.gic_rdist[index].lpis.GICR_TYPER & (1<<4)) == 0) // Keep incrementing until GICR_TYPER.Last reports no more RDs in block { diff --git a/sys/arm64/include/atomic.h b/sys/arm64/include/atomic.h index d9cbb3b..cad77a6 100644 --- a/sys/arm64/include/atomic.h +++ b/sys/arm64/include/atomic.h @@ -7,7 +7,7 @@ atomic_swap_uint32(volatile uint32_t *dst, uint32_t newval) { uint32_t retval; - asm volatile(".arch_extension lse; swp %w2, %w0, [%w1]; .arch_extension nolse;" + __asm__ volatile(".arch_extension lse; swp %w2, %w0, [%w1]; .arch_extension nolse;" : "=r" (retval) : "r" (dst), "r" (newval) : "memory"); @@ -20,7 +20,7 @@ atomic_swap_uint64(volatile uint64_t *dst, uint64_t newval) { uint64_t retval; - asm volatile(".arch_extension lse; swp %2, %0, [%1]; .arch_extension nolse;" + __asm__ volatile(".arch_extension lse; swp %2, %0, [%1]; .arch_extension nolse;" : "=r" (retval) : "r" (dst), "r" (newval) : "memory"); diff --git a/sys/arm64/include/cpu.h b/sys/arm64/include/cpu.h index a233919..c877c89 100644 --- a/sys/arm64/include/cpu.h +++ b/sys/arm64/include/cpu.h @@ -3,8 +3,8 @@ * All rights reserved. */ -#ifndef __AMD64_H__ -#define __AMD64_H__ +#ifndef __ARM64_H__ +#define __ARM64_H__ #include @@ -12,23 +12,19 @@ * Page Tables */ -#define PGNUMMASK 0xFFFFFFFFFFFFF000ULL +// #define PGNUMMASK 0xFFFFFFFFFFFFF000ULL -#define PGIDXSHIFT 9 -#define PGIDXMASK (512 - 1) +// #define PGIDXSHIFT 9 +// #define PGIDXMASK (512 - 1) -#define PGSHIFT 12 +#define PGSHIFT 14 #define PGSIZE (1 << PGSHIFT) #define PGMASK (PGSIZE - 1) -#define LARGE_PGSHIFT 21 +#define LARGE_PGSHIFT 26 #define LARGE_PGSIZE (1 << LARGE_PGSHIFT) #define LARGE_PGMASK (LARGE_PGSIZE - 1) -#define HUGE_PGSHIFT 30 -#define HUGE_PGSIZE (1 << HUGE_PGSHIFT) -#define HUGE_PGMASK (HUGE_PGSIZE - 1) - #define ROUNDUP_PGSIZE(x) (((x) + LARGE_PGSIZE - 1) & ~LARGE_PGMASK) #define ROUNDDOWN_PGSIZE(x) ((x) & ~LARGE_PGMASK) diff --git a/sys/arm64/include/cpuop.h b/sys/arm64/include/cpuop.h index 843ef91..78ae9fe 100644 --- a/sys/arm64/include/cpuop.h +++ b/sys/arm64/include/cpuop.h @@ -8,27 +8,27 @@ static INLINE void enable_interrupts() { - asm volatile("msr daifclr, #(0x0002)\n"); + __asm__ volatile("msr daifclr, #(0x0002)\n"); } static INLINE void disable_interrupts() { - asm volatile("msr daifset, #(0x0002)\n"); + __asm__ volatile("msr daifset, #(0x0002)\n"); } static INLINE void hlt() { - asm volatile("wfi"); + __asm__ volatile("wfi"); } static INLINE void pause() { - asm volatile("yield"); + __asm__ volatile("yield"); } static INLINE void breakpoint() { - asm volatile("brk #0"); + __asm__ volatile("brk #0"); } #endif /* __ARM64OP_H__ */ diff --git a/sys/arm64/include/metal.h b/sys/arm64/include/metal.h new file mode 100644 index 0000000..2530b53 --- /dev/null +++ b/sys/arm64/include/metal.h @@ -0,0 +1,56 @@ +#pragma once + +#include +#include + +#define MCODE ALIGNED(16) __attribute__((section("mcode"))) +#define MDATA __attribute__((section("mdata"))) + +#define METAL_REG_MSTK (METAL_REG_MG8) + +#if defined(DECL_MROUTINE) +#undef DECL_MROUTINE +#define DECL_MROUTINE(name) void MCODE name(void) +#endif + +#if defined(IMPL_MROUTINE) +#undef IMPL_MROUTINE +#define IMPL_MROUTINE(name, exit_flag) void MCODE _ ## name ## _impl(void); \ + __asm__ ( \ + ".section \"mcode\";" \ + ".globl " _METAL_STR(name) ";" \ + ".balign 16;" \ + _METAL_STR(name) ":;" \ + METAL_RMR_GAS(METAL_REG_MSTK, AARCH_REG_X0) \ + "mov x1, sp;" \ + "str x1, [x0, #-16]!;" \ + "mov sp, x0;" \ + "mov x0, xzr;" \ + "ldr x0,=_" _METAL_STR(name) "_impl;" \ + "blr x0;" \ + "ldr x0, [sp], #16;" \ + "mov sp, x0;" \ + METAL_MEXIT_GAS(exit_flag) \ + ); \ +void MCODE _ ## name ## _impl(void) +#endif + +#define IMPL_SHORT_MROUTINE(name, exit_flag) void MCODE _ ## name ## _impl(void); \ + __asm__ ( \ + ".section \"mcode\";" \ + ".globl " _METAL_STR(name) ";" \ + ".balign 16;" \ + _METAL_STR(name) ":;" \ + "mov x0, xzr;" \ + "ldr x0,=_" _METAL_STR(name) "_impl;" \ + "blr x0;" \ + METAL_MEXIT_GAS(exit_flag) \ + ); \ +void MCODE _ ## name ## _impl(void) + +#define DECL_MVAR(type, name) type MDATA name +#define DECL_MVAR_ALIGNED(type, name, align) type ALIGNED(align) MDATA name + +#define METAL_REG_MPTB_DMAP (METAL_REG_MG6) +#define METAL_REG_MPTB_XMEM (METAL_REG_MG7) +#define METAL_REG_MPTB_USER (METAL_REG_MG5) diff --git a/sys/arm64/include/metalp.h b/sys/arm64/include/metalp.h new file mode 100644 index 0000000..882781e --- /dev/null +++ b/sys/arm64/include/metalp.h @@ -0,0 +1,384 @@ +#pragma once + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include + +#define AARCH_REG_X0 (0) +#define AARCH_REG_X1 (1) +#define AARCH_REG_X2 (2) +#define AARCH_REG_X3 (3) +#define AARCH_REG_X4 (4) +#define AARCH_REG_X5 (5) +#define AARCH_REG_X6 (6) +#define AARCH_REG_X7 (7) +#define AARCH_REG_X8 (8) +#define AARCH_REG_X9 (9) +#define AARCH_REG_X10 (10) +#define AARCH_REG_X11 (11) +#define AARCH_REG_X12 (12) +#define AARCH_REG_X13 (13) +#define AARCH_REG_X14 (14) +#define AARCH_REG_X15 (15) +#define AARCH_REG_X16 (16) +#define AARCH_REG_X17 (17) +#define AARCH_REG_X18 (18) +#define AARCH_REG_X19 (19) +#define AARCH_REG_X20 (20) +#define AARCH_REG_X21 (21) +#define AARCH_REG_X22 (22) +#define AARCH_REG_X23 (23) +#define AARCH_REG_X24 (24) +#define AARCH_REG_X25 (25) +#define AARCH_REG_X26 (26) +#define AARCH_REG_X27 (27) +#define AARCH_REG_X28 (28) +#define AARCH_REG_X29 (29) +#define AARCH_REG_X30 (30) +#define AARCH_REG_X31 (31) + +#define METAL_REG_MO0 (0) +#define METAL_REG_MO1 (1) +#define METAL_REG_MO2 (2) +#define METAL_REG_MO3 (3) +#define METAL_REG_MO4 (4) +#define METAL_REG_MO5 (5) +#define METAL_REG_MR0 (6) +#define METAL_REG_MR1 (7) +#define METAL_REG_MR2 (8) +#define METAL_REG_MR3 (9) +#define METAL_REG_MI0 (10) +#define METAL_REG_MI1 (11) +#define METAL_REG_MI2 (12) +#define METAL_REG_MI3 (13) +#define METAL_REG_MI4 (14) +#define METAL_REG_MI5 (15) +#define METAL_REG_MLR (METAL_REG_MI5) +#define METAL_REG_MIR0 (METAL_REG_MI0) +#define METAL_REG_MIR1 (METAL_REG_MI1) +#define METAL_REG_MIR2 (METAL_REG_MI2) +#define METAL_REG_MER0 (METAL_REG_MI0) +#define METAL_REG_MER1 (METAL_REG_MI1) +#define METAL_REG_MER2 (METAL_REG_MI2) +#define METAL_REG_MSPSR (METAL_REG_MI3) +#define METAL_REG_MSR (16) +#define METAL_REG_MBR (17) +#define METAL_REG_MIB (18) +#define METAL_REG_MEB (19) +#define METAL_REG_MTP (20) +#define METAL_REG_MG5 (21) +#define METAL_REG_MG6 (22) +#define METAL_REG_MG7 (23) +#define METAL_REG_MG8 (24) +#define METAL_REG_MG9 (25) +#define METAL_REG_MG10 (26) +#define METAL_REG_MG11 (27) +#define METAL_REG_MG12 (28) +#define METAL_REG_MG13 (29) +#define METAL_REG_MG14 (30) +#define METAL_REG_MG15 (31) + +#define _METAL_STR(x) #x +#define METAL_STR(x) _METAL_STR(x) +#define _METAL_GAS_ENCODE(x) ".word " METAL_STR(x) ";" +#define METAL_GAS_ENCODE(x) _METAL_GAS_ENCODE(x) + +// metal insts defs +#define METAL_WMR_ENCODING(mreg, greg) (0xd61f2C00 | ((mreg) << 5) | ((greg) << 0)) +#define METAL_WMR_GAS(mreg, greg) METAL_GAS_ENCODE(METAL_WMR_ENCODING(mreg, greg)) +#define METAL_WMR(reg, var) do { __asm__ volatile (\ + "mov x0, %x0;" \ + METAL_WMR_GAS(reg, AARCH_REG_X0)\ + : \ + : "r" (var)\ + : "x0" \ + ); } while(0) + +#define METAL_RMR_ENCODING(mreg, greg) (0xd61f2800 | ((mreg) << 5) | ((greg) << 0)) +#define METAL_RMR_GAS(mreg, greg) METAL_GAS_ENCODE(METAL_RMR_ENCODING(mreg,greg)) +#define METAL_RMR(reg, var) do { \ + __asm__ volatile ( \ + METAL_RMR_GAS(reg, AARCH_REG_X0) \ + "mov %x0, x0;"\ + : "=r" (var) \ + : \ + : "x0" \ + ); } while(0) + +// +// we need to preserve the stack pointer between mroutine calls +// mroutines never return values using stacks +// and since "mexit"s always occur before "return"s +// the function epilogue is not run before returning +// which may destroy the stack if local variables are defined +// +// we do this using the mroutine stub function to wrap mroutine calls +// +#define METAL_MENTER_ENCODING(mroutine) (0xd61f2000 | ((mroutine) << 0)) +#define METAL_MENTER_GAS(mroutine) METAL_GAS_ENCODE(METAL_MENTER_ENCODING(mroutine)) +#define METAL_MENTER(mroutine) do { \ + __asm__ volatile ( \ + METAL_MENTER_GAS(mroutine) \ + : \ + : \ + : \ + ); \ + } while(0) + +#define METAL_MEXIT_ENCODING(flags) (0xd61f2400 | ((flags) << 0)) +#define METAL_MEXIT_GAS(flags) METAL_GAS_ENCODE(METAL_MEXIT_ENCODING(flags)) +#define METAL_MEXIT(flags) do { \ + __asm__ volatile ( \ + METAL_MEXIT_GAS(flags) \ + : \ + : \ + : \ + ); } while(0) + +#define MEXIT_FLAG_IIM (1 << 1) // this mexit masks instruction intercept for the instruction returned to +#define MEXIT_FLAG_RFI (1 << 0) // this mexit is a return from intercept (also restores CPSR from MSPSR) + +// do not provide C version of RAR/WAR for the current bank +// can't decide which GP register to use as temp +#define METAL_RAR_ENCODING(idxmreg, dstmreg) (0xd61f3000 | ((idxmreg) << 5) | ((dstmreg) << 0)) +#define METAL_RAR_GAS(idxmreg, dstmreg) METAL_GAS_ENCODE(METAL_RAR_ENCODING(idxmreg, dstmreg)) + +#define METAL_WAR_ENCODING(idxmreg, srcmreg) (0xd61f3400 | ((idxmreg) << 5) | ((srcmreg) << 0)) +#define METAL_WAR_GAS(idxmreg, dstmreg) METAL_GAS_ENCODE(METAL_WAR_ENCODING(idxmreg, dstmreg)) + +#define METAL_RPR_ENCODING(idxmreg, dstmreg) (0xd61f4000 | ((idxmreg) << 5) | ((dstmreg) << 0)) +#define METAL_RPR_GAS(idxmreg, dstmreg) METAL_GAS_ENCODE(METAL_RPR_ENCODING(idxmreg, dstmreg)) +#define METAL_RPR(idxvar, var) do { __asm__ volatile (\ + METAL_RMR_GAS(METAL_REG_MR0, AARCH_REG_X1) \ + METAL_RMR_GAS(METAL_REG_MR1, AARCH_REG_X2) \ + \ + "mov x0, %x1;" \ + METAL_WMR_GAS(METAL_REG_MR0, AARCH_REG_X0) \ + METAL_RPR_GAS(METAL_REG_MR0, METAL_REG_MR1) \ + METAL_RMR_GAS(METAL_REG_MR1, AARCH_REG_X0) \ + "mov %x0, x0;" \ + \ + METAL_WMR_GAS(METAL_REG_MR0, AARCH_REG_X1) \ + METAL_WMR_GAS(METAL_REG_MR1, AARCH_REG_X2) \ + : "=r" (var) \ + : "r" (idxvar)\ + : "x0", "x1", "x2" \ + ); } while(0) + +#define METAL_WPR_ENCODING(idxmreg, srcmreg) (0xd61f4400 | ((idxmreg) << 5) | ((srcmreg) << 0)) +#define METAL_WPR_GAS(idxmreg, srcmreg) METAL_GAS_ENCODE(METAL_WPR_ENCODING(idxmreg, srcmreg)) +#define METAL_WPR(idxvar, var) do { __asm__ volatile (\ + METAL_RMR_GAS(METAL_REG_MR0, AARCH_REG_X1) \ + METAL_RMR_GAS(METAL_REG_MR1, AARCH_REG_X2) \ + \ + "mov x0, %x0;" \ + METAL_WMR_GAS(METAL_REG_MR0, AARCH_REG_X0) \ + "mov x0, %x1;" \ + METAL_WMR_GAS(METAL_REG_MR1, AARCH_REG_X0) \ + \ + METAL_WPR_GAS(METAL_REG_MR0, METAL_REG_MR1) \ + \ + METAL_WMR_GAS(METAL_REG_MR0, AARCH_REG_X1) \ + METAL_WMR_GAS(METAL_REG_MR1, AARCH_REG_X2) \ + : \ + : "r" (idxvar), "r" (var) \ + : "x0", "x1", "x2" \ + ); } while(0) + +#define METAL_MCLI_ENCODING (0xd61f3800) +#define METAL_MCLI_GAS METAL_GAS_ENCODE(METAL_MCLI_ENCODING) +#define METAL_MCLI do { \ + __asm__ volatile ( \ + METAL_MCLI_GAS \ + : \ + : \ + : \ + ); } while(0) + +#define METAL_MSTI_ENCODING (0xd61f3C00) +#define METAL_MSTI_GAS METAL_GAS_ENCODE(METAL_MSTI_ENCODING) +#define METAL_MSTI do { \ + __asm__ volatile ( \ + METAL_MSTI_GAS \ + : \ + : \ + : \ + ); } while(0) + +#define _METAL_WTLB_SHIFT_RM (0) +#define _METAL_WTLB_SHIFT_RN (5) +#define _METAL_WTLB_SHIFT_RL (10) +#define _METAL_WTLB_ENCODING(rl, rn, rm) ".word " METAL_STR(0xd63f8000 | (rl << _METAL_WTLB_SHIFT_RL) | (rn << _METAL_WTLB_SHIFT_RN) | (rm << _METAL_WTLB_SHIFT_RM)) +#define METAL_WTLB(descreg, inforeg, vaddrreg) do { __asm__ volatile (\ + _METAL_WTLB_ENCODING(descreg, vaddrreg, inforeg)); \ +} while (0) + +#define _METAL_RTLB_SHIFT_RM (0) +#define _METAL_RTLB_SHIFT_RN (5) +#define _METAL_RTLB_SHIFT_RL (10) +#define _METAL_RTLB_ENCODING(rl, rn, rm) ".word " METAL_STR(0xd61f8000 | (rl << _METAL_RTLB_SHIFT_RL) | (rn << _METAL_RTLB_SHIFT_RN) | (rm << _METAL_RTLB_SHIFT_RM)) +#define METAL_RTLB(descreg, inforeg, vaddrreg) do { __asm__ volatile (\ + _METAL_RTLB_ENCODING(descreg, vaddrreg, inforeg)); \ +} while (0) + + +#define METAL_PMEMOP_MODE_NORMAL (0) +#define METAL_PMEMOP_MODE_PRE (1) +#define METAL_PMEMOP_MODE_POST (2) + +// PSTR +#define _METAL_PSTR_TEMPLATE(var, paddr, cmd) do { __asm__ volatile (\ + "mov x0, %x0;" \ + "mov x1, %x1;" \ + "mov x2, #0;" \ + cmd \ + : \ + : "r" (var), "r" (paddr) \ + : "x0", "x1", "x2" \ + ); } while (0) + +#define METAL_PSTRR8_ENCODING(dReg, bReg, oReg) (0x8c400000 | ((dReg) << 0) | ((bReg) << 5) | ((oReg) << 10)) +#define METAL_PSTRR8_GAS(dReg, bReg, oReg) METAL_GAS_ENCODE(METAL_PSTRR8_ENCODING(dReg, bReg, oReg)) +#define METAL_PSTR8(var, paddr) _METAL_PSTR_TEMPLATE(var, paddr, METAL_PSTRR8_GAS(AARCH_REG_X0, AARCH_REG_X1, AARCH_REG_X2)) + +#define METAL_PSTRR16_ENCODING(dReg, bReg, oReg) (0x8cc00000 | ((dReg) << 0) | ((bReg) << 5) | ((oReg) << 10)) +#define METAL_PSTRR16_GAS(dReg, bReg, oReg) METAL_GAS_ENCODE(METAL_PSTRR16_ENCODING(dReg, bReg, oReg)) +#define METAL_PSTR16(var, paddr) _METAL_PSTR_TEMPLATE(var, paddr, METAL_PSTRR16_GAS(AARCH_REG_X0, AARCH_REG_X1, AARCH_REG_X2)) + +#define METAL_PSTRR32_ENCODING(dReg, bReg, oReg) (0x8d400000 | ((dReg) << 0) | ((bReg) << 5) | ((oReg) << 10)) +#define METAL_PSTRR32_GAS(dReg, bReg, oReg) METAL_GAS_ENCODE(METAL_PSTRR32_ENCODING(dReg, bReg, oReg)) +#define METAL_PSTR32(var, paddr) _METAL_PSTR_TEMPLATE(var, paddr, METAL_PSTRR32_GAS(AARCH_REG_X0, AARCH_REG_X1, AARCH_REG_X2)) + +#define METAL_PSTRR64_ENCODING(dReg, bReg, oReg) (0x8dc00000 | ((dReg) << 0) | ((bReg) << 5) | ((oReg) << 10)) +#define METAL_PSTRR64_GAS(dReg, bReg, oReg) METAL_GAS_ENCODE(METAL_PSTRR64_ENCODING(dReg, bReg, oReg)) +#define METAL_PSTR64(var, paddr) _METAL_PSTR_TEMPLATE(var, paddr, METAL_PSTRR64_GAS(AARCH_REG_X0, AARCH_REG_X1, AARCH_REG_X2)) + +// PLDR +#define _METAL_PLDR_TEMPLATE(var, paddr, cmd) do { __asm__ volatile (\ + "mov x1, %x1;" \ + "mov x2, #0;" \ + cmd \ + "mov %x0, x0;" \ + : "=r" (var) \ + : "r" (paddr) \ + : "x0", "x1", "x2" \ + ); } while (0) + +#define METAL_PLDRR8_ENCODING(dReg, bReg, oReg) (0x8c000000 | ((dReg) << 0) | ((bReg) << 5) | ((oReg) << 10)) +#define METAL_PLDRR8_GAS(dReg, bReg, oReg) METAL_GAS_ENCODE(METAL_PLDRR8_ENCODING(dReg, bReg, oReg)) +#define METAL_PLDR8(var, paddr) _METAL_PLDR_TEMPLATE(var, paddr, METAL_PLDRR8_GAS(AARCH_REG_X0, AARCH_REG_X1, AARCH_REG_X2)) + +#define METAL_PLDRR16_ENCODING(dReg, bReg, oReg) (0x8c800000 | ((dReg) << 0) | ((bReg) << 5) | ((oReg) << 10)) +#define METAL_PLDRR16_GAS(dReg, bReg, oReg) METAL_GAS_ENCODE(METAL_PLDRR16_ENCODING(dReg, bReg, oReg)) +#define METAL_PLDR16(var, paddr) _METAL_PLDR_TEMPLATE(var, paddr, METAL_PLDRR16_GAS(AARCH_REG_X0, AARCH_REG_X1, AARCH_REG_X2)) + +#define METAL_PLDRR32_ENCODING(dReg, bReg, oReg) (0x8d000000 | ((dReg) << 0) | ((bReg) << 5) | ((oReg) << 10)) +#define METAL_PLDRR32_GAS(dReg, bReg, oReg) METAL_GAS_ENCODE(METAL_PLDRR32_ENCODING(dReg, bReg, oReg)) +#define METAL_PLDR32(var, paddr) _METAL_PLDR_TEMPLATE(var, paddr, METAL_PLDRR32_GAS(AARCH_REG_X0, AARCH_REG_X1, AARCH_REG_X2)) + +#define METAL_PLDRR64_ENCODING(dReg, bReg, oReg) (0x8d800000 | ((dReg) << 0) | ((bReg) << 5) | ((oReg) << 10)) +#define METAL_PLDRR64_GAS(dReg, bReg, oReg) METAL_GAS_ENCODE(METAL_PLDRR64_ENCODING(dReg, bReg, oReg)) +#define METAL_PLDR64(var, paddr) _METAL_PLDR_TEMPLATE(var, paddr, METAL_PLDRR64_GAS(AARCH_REG_X0, AARCH_REG_X1, AARCH_REG_X2)) + +// Mroutine helpers + + +// can get MI0-MI4, MI5 is link reg +// MR23 = link register +#define METAL_MROUTINE_GETARG(idx, var) do { \ + _Static_assert(idx < (METAL_REG_MI4 - METAL_REG_MI0)); \ + METAL_RMR(METAL_REG_MI0 + idx, var); \ + } while(0) + +// can set MO0-MO4, MO5 is Link Reg +// MR7 is link register +#define METAL_MROUTINE_SETARG(idx, var) do { \ + _Static_assert(idx < (METAL_REG_MO4 - METAL_REG_MO0)); \ + METAL_WMR(METAL_REG_MO0 + idx, var); \ + } while(0) + +// mroutine defs +#define METAL_MROUTINE_SETRET(idx, var) do { \ + _Static_assert(idx < (METAL_REG_MI4 - METAL_REG_MI0)); \ + METAL_WMR(METAL_REG_MI0 + idx, var); \ + } while(0) + +#define METAL_MROUTINE_GETRET(idx, var) do { \ + _Static_assert(idx < (METAL_REG_MO4 - METAL_REG_MO0)); \ + METAL_RMR(METAL_REG_MO0 + idx, var); \ + } while(0) + +#define DECL_MROUTINE(name) void __attribute__((aligned(16))) name(void) +#define IMPL_MROUTINE(name, exit_flag) void _ ## name ## _impl(void); \ + __asm__ ( \ + ".globl " _METAL_STR(name) ";" \ + ".balign 16;" \ + _METAL_STR(name) ":;" \ + "bl _" _METAL_STR(name) "_impl;" \ + METAL_MEXIT_GAS(exit_flag) \ + ); \ +void _ ## name ## _impl(void) + + +// mroutine table defs +#define _MROUTINE_ENTRY_CTRL_MASK_VALID (1ul) +#define _MROUTINE_ENTRY_CTRL_MASK_ADDR (~((1ul << 4) - 1)) +#define MROUTINE_ENTRY_MAKE(addr, valid) (((uintptr_t)addr & _MROUTINE_ENTRY_CTRL_MASK_ADDR) | (valid & _MROUTINE_ENTRY_CTRL_MASK_VALID)) +typedef uint64_t mroutine_entry; + +#define MROUTINE_TABLE_MAX_ENTRY_NUM (256) +struct mroutine_table { + mroutine_entry entries[MROUTINE_TABLE_MAX_ENTRY_NUM]; +}; + +// inst intercept defs +#define INST_INTERCEPT_TABLE_MAX_ENTRY_NUM (64) +struct inst_intercept_entry { + uint32_t inst; + uint32_t opmask; + uint32_t ctrl; + uint32_t mask0; + uint32_t mask1; + uint32_t mask2; +}; +//_Static_assert(sizeof(struct inst_intercept_entry) == 6 * sizeof(uint32_t)); + +#define INST_INTERCEPT_MAKE_CTRL(mroutine, post, valid) ((mroutine) & 0xff | ((post) & 1) << 30 | ((valid) & 1) << 31) +struct inst_intercept_table { + struct inst_intercept_entry entries[INST_INTERCEPT_TABLE_MAX_ENTRY_NUM]; +}; + +// exc intercept defs +#define EXC_INTERCEPT_TABLE_MAX_ENTRY_NUM (64) +struct exc_intercept_entry { + uint32_t esrbits; + uint32_t esrmask; + uint32_t ctrl; +}; + +#define EXC_INTERCEPT_MODE_SYNC (0) +#define EXC_INTERCEPT_MODE_IRQ (1) +#define EXC_INTERCEPT_MODE_FIQ (2) +#define EXC_INTERCEPT_MODE_SERROR (3) +#define EXC_INTERCEPT_MAKE_CTRL(mroutine, mode, valid) ((mroutine) & 0xff | ((mode) & 0b11) << 8 | ((valid) & 1) << 31) +struct exc_intercept_table { + struct exc_intercept_entry entries[EXC_INTERCEPT_TABLE_MAX_ENTRY_NUM]; +}; + +// metal register defs +typedef uint64_t regval_t; +typedef regval_t msr_t; + +#define METAL_MSR_MASK_INIT (1ul << 63) +#define METAL_MSR_MASK_II (1ul << 62) +#define METAL_MSR_MASK_IM (1ul << 61) +#define METAL_MSR_MASK_LV (0xfful) +#define METAL_MSR_MASK_EI (1ul << 60) +#define METAL_MSR_MASK_PD (1ul << 59) + +#ifdef __cplusplus +} +#endif diff --git a/sys/arm64/include/mrt.h b/sys/arm64/include/mrt.h new file mode 100644 index 0000000..848cbd4 --- /dev/null +++ b/sys/arm64/include/mrt.h @@ -0,0 +1,36 @@ +#pragma once + +#include + +DECL_MROUTINE(mrt_init); +#define MRT_INIT_IDX (1) + +DECL_MROUTINE(mrt_pf_handler); +#define MRT_PF_HANDLER_IDX (2) + +DECL_MROUTINE(mrt_dummy); + +// +// int mrt_set_mptb(int ptbidx, regval_t val); +// +DECL_MROUTINE(mrt_set_mptb); +#define MRT_SET_MPTB_IDX (3) +#define MRT_SET_MPTB_USER (0) +#define MRT_SET_MPTB_DMAP (1) +#define MRT_SET_MPTB_XMEM (2) +#define MRT_SET_MPTB_ARG_IDX (0) +#define MRT_SET_MPTB_ARG_PTB (1) +#define MRT_SET_MPTB_RET_STATUS (0) + + +// +// void mrt_set_mtp(regval_t mtp); +// +DECL_MROUTINE(mrt_set_mtp); +#define MRT_SET_MTP_IDX (4) +#define MRT_SET_MTP_ARG_MTP (0) + +extern DECL_MVAR(struct mroutine_table, mtl_mrt_tbl); +extern DECL_MVAR(struct exc_intercept_table, mtl_exc_tbl); + +void mtl_init(void); diff --git a/sys/arm64/include/paging.h b/sys/arm64/include/paging.h new file mode 100644 index 0000000..3712d96 --- /dev/null +++ b/sys/arm64/include/paging.h @@ -0,0 +1,107 @@ +#pragma once + +#include +#include + +// 2048 buckets * 8 bytes / bucket = 16KB per page tbl = smallest page size +#define VM_PTBUCKETS (2048) + +// 1GB page ident map region +#define REGION_DMAP_PGSHIFT (30) +// 16K page userspace +#define REGION_USER_PGSHIFT (14) +// 16K page XMEM region +#define REGION_XMEM_PGSHIFT (14) + +// +// Page Table Pointer (points to a list of struct vmpd) +// +struct vmpte { + paddr_t first; +}; +_Static_assert(sizeof(struct vmpte) == 8); + +struct vmpt { + struct vmpte entries[VM_PTBUCKETS]; +}; +_Static_assert(sizeof(struct vmpt) == VM_PTBUCKETS * sizeof(struct vmpte)); + +// +// Page Table Entry (must be 8 bytes aligned) +// +#define VMPD_ATTR_P (0x1ul) + +#define VMPD_ATTR_DEV (0x2ul) + +#define VMPD_ATTR_AO_MASK (0xFul) +#define VMPD_ATTR_AO_SHIFT (0x4ul) + +// +// AP with 2 ELs: +// EL1 EL0 +// - 00 RW / +// - 01 RW RW +// - 10 RO / +// - 11 RO RO +// +// MTP: +// 0-1: ap +// 2: xn +// 3: rn +// +// AO REG for Kernel: +// entry 0 (kernel RO): RO 0b0011 +// entry 1 (kernel RW): RW 0b0001 +// entry 2 (user RO): RW 0b0001 +// entry 3 (user RW): RW 0b0001 +// +// AO REG for User: +// entry 0 (kernel RO): NA 0b0010 +// entry 1 (kernel RW): NA 0b0000 +// entry 2 (user RO): RO 0b0011 +// entry 3 (user RW): RW 0b0010 +// + +#define VMPD_ATTR_AO_MAKE(x) (((x) & VMPD_ATTR_AO_MASK) << VMPD_ATTR_AO_SHIFT) +#define VMPD_ATTR_AO_GET(x) (((x) >> VMPD_ATTR_AO_SHIFT) & VMPD_ATTR_AO_MASK) +#define VMPD_ATTR_AO_KRO (VMPD_ATTR_AO_MAKE(0ull)) +#define VMPD_ATTR_AO_KRW (VMPD_ATTR_AO_MAKE(1ull)) +#define VMPD_ATTR_AO_URO (VMPD_ATTR_AO_MAKE(2ull)) +#define VMPD_ATTR_AO_URW (VMPD_ATTR_AO_MAKE(3ull)) + +// #define VMPD_ATTR_CA_MAKE(x) (((x) & VMPD_ATTR_CA_MASK) << VMPD_ATTR_CA_SHIFT) +// #define VMPD_ATTR_CA_GET(x) (((x) >> VMPD_ATTR_CA_SHIFT) & VMPD_ATTR_CA_MASK) +// #define VMPD_ATTR_CA_NORMAL (VMPD_ATTR_CA_MAKE(0ull)) +// #define VMPD_ATTR_CA_DEVICE (VMPD_ATTR_CA_MAKE(1ull)) + +#define MTP_KERNEL (0b0011 | (0b0001 << 4) | (0b0001 << 8) | (0b0001 << 12)) +#define MTP_USER (0b0010 | (0b0000 << 4) | (0b0011 << 8) | (0b0010 << 12)) + +struct vmpd { + paddr_t next; // the next hppte in the list + paddr_t paddr; // the physical address of the VA + vaddr_t vaddr; // the corresponding virtual address + uint64_t attr; // the attributes of the page +}; +_Static_assert(sizeof(struct vmpd) == 32); + +static ALWAYS_INLINE inline uint64_t +vm_vahash(vaddr_t va) +{ + return ((va) * (va+3)) % VM_PTBUCKETS; +} + +static ALWAYS_INLINE inline uint64_t +vm_get_pfn(vaddr_t va, unsigned int pgshift) +{ + return va >> pgshift; +} + +static ALWAYS_INLINE inline uint64_t +vm_get_pgbase(vaddr_t va, unsigned int pgshift) +{ + return va & ~((1ull << pgshift) - 1); +} + + +void paging_init(); diff --git a/sys/arm64/include/pmap.h b/sys/arm64/include/pmap.h index 67909b1..9cbd1e7 100644 --- a/sys/arm64/include/pmap.h +++ b/sys/arm64/include/pmap.h @@ -2,6 +2,7 @@ #ifndef __PMAP_H__ #define __PMAP_H__ +#include #include /* @@ -38,14 +39,20 @@ #define MEM_USERSPACE_STKTOP (MEM_USERSPACE_STKBASE + MEM_USERSPACE_STKLEN) #define MEM_DIRECTMAP_BASE 0xFFFF800000000000ULL +#define MEM_DIRECTMAP_DEV_BASE (MEM_DIRECTMAP_BASE + MEM_DIRECTMAP_LEN / 2) #define MEM_DIRECTMAP_LEN 0x0000010000000000ULL +#define MEM_DIRECTMAP_TOP (MEM_DIRECTMAP_BASE + MEM_DIRECTMAP_LEN) + #define MEM_XMAP_BASE 0xFFFF810000000000ULL #define MEM_XMAP_LEN 0x0000002000000000ULL +#define MEM_XMAP_TOP (MEM_XMAP_BASE + MEM_XMAP_LEN) #define PPN2DMVA(ppn) (((ppn) << PGSIZE) + MEM_DIRECTMAP_BASE) -#define DMVA2PPN(dmva) (((dmva) - MEM_DIRECTMAP_BASE) >> PGSIZE) -#define DMVA2PA(dmva) ((dmva) - MEM_DIRECTMAP_BASE) -#define DMPA2VA(pa) ((pa) + MEM_DIRECTMAP_BASE) +#define DMVA2PPN(dmva) (((uintptr_t)(dmva) - MEM_DIRECTMAP_BASE) >> PGSIZE) +#define DMVA2PA(dmva) ((uintptr_t)(dmva) - MEM_DIRECTMAP_BASE) +#define DMPA2VA(pa) ((uintptr_t)(pa) + MEM_DIRECTMAP_BASE) +#define DEVVA2PA(devva) ((uintptr_t)(devva) - MEM_DIRECTMAP_DEV_BASE) +#define DEVPA2VA(pa) ((uintptr_t)(pa) + MEM_DIRECTMAP_DEV_BASE) #define VA2PA(va) PMap_Translate(PMap_CurrentAS(), va) typedef struct AS diff --git a/sys/arm64/kernel.lds b/sys/arm64/kernel.lds index 3da0759..13f0757 100644 --- a/sys/arm64/kernel.lds +++ b/sys/arm64/kernel.lds @@ -21,8 +21,8 @@ SECTIONS /* Read-only sections, merged into text segment: */ PROVIDE (__executable_start = SEGMENT_START("text-segment", 0xFFFF800000400000)); - . = SEGMENT_START("text-segment", 0xFFFF800080400000); - .text : AT(SEGMENT_START("text-segment", 0xFFFF800080400000) - 0xFFFF800000000000) + . = SEGMENT_START("text-segment", 0xFFFF800000400000); + .text : AT(SEGMENT_START("text-segment", 0xFFFF800000400000) - 0xFFFF800000000000) { *(.boot) *(.text .stub .text.* .gnu.linkonce.t.*) @@ -236,5 +236,17 @@ SECTIONS /* DWARF Extension. */ .debug_macro 0 : { *(.debug_macro) } .gnu.attributes 0 : { KEEP (*(.gnu.attributes)) } + + /* Metal RAM */ + . = 0xC0000000; + .mdata : AT(0xC0000000) + { + *(mdata) + } + .mcode : + { + *(mcode) + } + /DISCARD/ : { *(.note.GNU-stack) *(.gnu_debuglink) *(.gnu.lto_*) } } diff --git a/sys/arm64/locore.S b/sys/arm64/locore.S index 88b8bd2..b9839a2 100644 --- a/sys/arm64/locore.S +++ b/sys/arm64/locore.S @@ -120,27 +120,22 @@ _el1_entry: #define BOOT_PGD_ATTR (PD_TABLE) #define BOOT_PUD_ATTR (PD_ACCESS | (MAIR_IDX_NORMAL_CACHEABLE << 2) | PD_BLOCK) #define BOOT_PUD_DEV_ATTR (PD_ACCESS | (MAIR_IDX_DEVICE << 2) | PD_BLOCK) -// ident map 0x80000000 -> 0x80000000 Size: 1G - ldr x0, =LOWMEM(_boot_ptl0lo) + 0x8 * 0 // x0 = L0 entry for vaddr 0x80000000 +// ident map 0x00000000 -> 0x00000000 Size: 1G + ldr x0, =LOWMEM(_boot_ptl0lo) + 0x8 * 0 // x0 = L0 entry for vaddr 0x00000000 ldr x1, =LOWMEM(_boot_ptl1lo) + BOOT_PGD_ATTR str x1, [x0] - ldr x0, =LOWMEM(_boot_ptl1lo) + 0x8 * 2 // x0 = L1 entry for vaddr 0x80000000 - ldr x1, =0x80000000 | BOOT_PUD_ATTR // map to 0x80000000 + ldr x0, =LOWMEM(_boot_ptl1lo) + 0x8 * 0 // x0 = L1 entry for vaddr 0x00000000 + ldr x1, =0x00000000 | BOOT_PUD_ATTR // map to 0x80000000 str x1, [x0] -// ident map 0x00000000 -> 0x00000000 Size: 1G as device memory - ldr x0, =LOWMEM(_boot_ptl1lo) + 0x8 * 0 // x0 - L1 entry for vaddr 0x00000000 - ldr x1, =0x00000000 | BOOT_PUD_DEV_ATTR - str x1, [x0] - -// map 0x80000000 -> 0xFFFF800080000000 Size: 1G - ldr x0, =LOWMEM(_boot_ptl0hi) + 0x8 * 256 // x0 = L0 entry for vaddr 0xFFFF800080000000 +// map 0x00000000 -> 0xFFFF800000000000 Size: 1G + ldr x0, =LOWMEM(_boot_ptl0hi) + 0x8 * 256 // x0 = L0 entry for vaddr 0xFFFF800000000000 ldr x1, =LOWMEM(_boot_ptl1hi) + BOOT_PGD_ATTR str x1, [x0] - ldr x0, =LOWMEM(_boot_ptl1hi) + 0x8 * 2 // x0 = L1 entry for vaddr 0xFFFF800080000000 - ldr x1, =0x80000000 | BOOT_PUD_ATTR + ldr x0, =LOWMEM(_boot_ptl1hi) + 0x8 * 0 // x0 = L1 entry for vaddr 0xFFFF800000000000 + ldr x1, =0x00000000 | BOOT_PUD_ATTR str x1, [x0] // enable MMU diff --git a/sys/arm64/machine.c b/sys/arm64/machine.c index b28311b..57a992f 100644 --- a/sys/arm64/machine.c +++ b/sys/arm64/machine.c @@ -21,6 +21,7 @@ #include #include "../dev/console.h" +#include "machine/cpuop.h" extern void KTime_Init(); extern void KTimer_Init(); @@ -101,15 +102,15 @@ void Machine_Init() ); while(1) { - __asm__ volatile ("wfi"); + hlt(); }; - Machine_SyscallInit(); + //Machine_SyscallInit(); /* * Initialize Memory Allocation and Virtual Memory */ - //PAlloc_AddRegion(DMPA2VA(16*1024*1024), 16*1024*1024); + PAlloc_AddRegion(DMPA2VA(16*1024*1024), 16*1024*1024); PMap_Init(); XMem_Init(); PAlloc_LateInit(); diff --git a/sys/arm64/mbentry.c b/sys/arm64/mbentry.c index a537a57..5a20a3a 100644 --- a/sys/arm64/mbentry.c +++ b/sys/arm64/mbentry.c @@ -10,7 +10,9 @@ #include "../dev/console.h" +#include #include +#include void MachineBoot_Entry(unsigned long magic, unsigned long addr); @@ -29,6 +31,10 @@ static int memRegionIdx; void MachineBoot_Entry(unsigned long magic, unsigned long addr) { + // initialize metal mode + mtl_init(); + paging_init(); + // Main initialization Machine_Init(); diff --git a/sys/arm64/metal.c b/sys/arm64/metal.c new file mode 100644 index 0000000..99dff8b --- /dev/null +++ b/sys/arm64/metal.c @@ -0,0 +1,21 @@ +#include +#include +#include + +// early metal init +void mtl_init(void) +{ + // initialize metal mode + memset(&mtl_mrt_tbl, 0, sizeof(mtl_mrt_tbl)); + + mtl_mrt_tbl.entries[MRT_INIT_IDX] = MROUTINE_ENTRY_MAKE(mrt_init, 1); + + // load mroutine table + METAL_WMR(METAL_REG_MBR, &mtl_mrt_tbl); + + // toggle metal mode + METAL_MENTER(MRT_INIT_IDX); + + // call dummy mroutine to cache the entries + METAL_MENTER(0); +} diff --git a/sys/arm64/mrt/init.c b/sys/arm64/mrt/init.c new file mode 100644 index 0000000..b716c57 --- /dev/null +++ b/sys/arm64/mrt/init.c @@ -0,0 +1,97 @@ +#include "machine/metalp.h" +#include +#include + +DECL_MVAR(struct mroutine_table, mtl_mrt_tbl); +DECL_MVAR(struct exc_intercept_table, mtl_exc_tbl); +DECL_MVAR(struct inst_intercept_table, mtl_inst_tbl); +#define METAL_BOOTSTACK_SZ (4096) +DECL_MVAR_ALIGNED(static char, mtl_bootstack[METAL_BOOTSTACK_SZ], METAL_BOOTSTACK_SZ); + +IMPL_SHORT_MROUTINE(mrt_dummy, 0) +{ +} + +IMPL_SHORT_MROUTINE(mrt_init, 0) +{ + memset(&mtl_mrt_tbl, 0, sizeof(mtl_mrt_tbl)); + memset(&mtl_exc_tbl, 0, sizeof(mtl_exc_tbl)); + memset(&mtl_inst_tbl, 0, sizeof(mtl_inst_tbl)); + + // mroutine 0 - 8 + mtl_mrt_tbl.entries[0] = MROUTINE_ENTRY_MAKE(mrt_dummy, 1); + mtl_mrt_tbl.entries[MRT_INIT_IDX] = MROUTINE_ENTRY_MAKE(mrt_init, 1); + mtl_mrt_tbl.entries[MRT_PF_HANDLER_IDX] = MROUTINE_ENTRY_MAKE(mrt_pf_handler, 1); + mtl_mrt_tbl.entries[MRT_SET_MPTB_IDX] = MROUTINE_ENTRY_MAKE(mrt_set_mptb, 1); + mtl_mrt_tbl.entries[MRT_SET_MTP_IDX] = MROUTINE_ENTRY_MAKE(mrt_set_mtp, 1); + + mtl_exc_tbl.entries[0].esrbits = (0b100100 << 26) | (0b00100); // data abort, lv0 translation + mtl_exc_tbl.entries[0].esrmask = (0b111111 << 26) | (0b11111); + mtl_exc_tbl.entries[0].ctrl = EXC_INTERCEPT_MAKE_CTRL(MRT_PF_HANDLER_IDX, EXC_INTERCEPT_MODE_SYNC, 1); + + mtl_exc_tbl.entries[1].esrbits = (0b100100 << 26) | (0b00101); // data abort, lv1 translation + mtl_exc_tbl.entries[1].esrmask = (0b111111 << 26) | (0b11111); + mtl_exc_tbl.entries[1].ctrl = EXC_INTERCEPT_MAKE_CTRL(MRT_PF_HANDLER_IDX, EXC_INTERCEPT_MODE_SYNC, 1); + + mtl_exc_tbl.entries[2].esrbits = (0b100100 << 26) | (0b00110); // data abort, lv2 translation + mtl_exc_tbl.entries[2].esrmask = (0b111111 << 26) | (0b11111); + mtl_exc_tbl.entries[2].ctrl = EXC_INTERCEPT_MAKE_CTRL(MRT_PF_HANDLER_IDX, EXC_INTERCEPT_MODE_SYNC, 1); + + mtl_exc_tbl.entries[3].esrbits = (0b100100 << 26) | (0b00111); // data abort, lv3 translation + mtl_exc_tbl.entries[3].esrmask = (0b111111 << 26) | (0b11111); + mtl_exc_tbl.entries[3].ctrl = EXC_INTERCEPT_MAKE_CTRL(MRT_PF_HANDLER_IDX, EXC_INTERCEPT_MODE_SYNC, 1); + + mtl_exc_tbl.entries[4].esrbits = (0b100001 << 26) | (0b00100); // inst abort, lv0 translation + mtl_exc_tbl.entries[4].esrmask = (0b111111 << 26) | (0b11111); + mtl_exc_tbl.entries[4].ctrl = EXC_INTERCEPT_MAKE_CTRL(MRT_PF_HANDLER_IDX, EXC_INTERCEPT_MODE_SYNC, 1); + + mtl_exc_tbl.entries[5].esrbits = (0b100001 << 26) | (0b00101); // inst abort, lv1 translation + mtl_exc_tbl.entries[5].esrmask = (0b111111 << 26) | (0b11111); + mtl_exc_tbl.entries[5].ctrl = EXC_INTERCEPT_MAKE_CTRL(MRT_PF_HANDLER_IDX, EXC_INTERCEPT_MODE_SYNC, 1); + + mtl_exc_tbl.entries[6].esrbits = (0b100001 << 26) | (0b00110); // inst abort, lv2 translation + mtl_exc_tbl.entries[6].esrmask = (0b111111 << 26) | (0b11111); + mtl_exc_tbl.entries[6].ctrl = EXC_INTERCEPT_MAKE_CTRL(MRT_PF_HANDLER_IDX, EXC_INTERCEPT_MODE_SYNC, 1); + + mtl_exc_tbl.entries[7].esrbits = (0b100001 << 26) | (0b00111); // inst abort, lv3 translation + mtl_exc_tbl.entries[7].esrmask = (0b111111 << 26) | (0b11111); + mtl_exc_tbl.entries[7].ctrl = EXC_INTERCEPT_MAKE_CTRL(MRT_PF_HANDLER_IDX, EXC_INTERCEPT_MODE_SYNC, 1); + + mtl_exc_tbl.entries[8].esrbits = (0b100101 << 26) | (0b00100); // data abort, lv0 translation + mtl_exc_tbl.entries[8].esrmask = (0b111111 << 26) | (0b11111); + mtl_exc_tbl.entries[8].ctrl = EXC_INTERCEPT_MAKE_CTRL(MRT_PF_HANDLER_IDX, EXC_INTERCEPT_MODE_SYNC, 1); + + mtl_exc_tbl.entries[9].esrbits = (0b100101 << 26) | (0b00101); // data abort, lv1 translation + mtl_exc_tbl.entries[9].esrmask = (0b111111 << 26) | (0b11111); + mtl_exc_tbl.entries[9].ctrl = EXC_INTERCEPT_MAKE_CTRL(MRT_PF_HANDLER_IDX, EXC_INTERCEPT_MODE_SYNC, 1); + + mtl_exc_tbl.entries[10].esrbits = (0b100101 << 26) | (0b00110); // data abort, lv2 translation + mtl_exc_tbl.entries[10].esrmask = (0b111111 << 26) | (0b11111); + mtl_exc_tbl.entries[10].ctrl = EXC_INTERCEPT_MAKE_CTRL(MRT_PF_HANDLER_IDX, EXC_INTERCEPT_MODE_SYNC, 1); + + mtl_exc_tbl.entries[11].esrbits = (0b100101 << 26) | (0b00111); // data abort, lv3 translation + mtl_exc_tbl.entries[11].esrmask = (0b111111 << 26) | (0b11111); + mtl_exc_tbl.entries[11].ctrl = EXC_INTERCEPT_MAKE_CTRL(MRT_PF_HANDLER_IDX, EXC_INTERCEPT_MODE_SYNC, 1); + + // load mroutine table + void* tbl_addr = &mtl_mrt_tbl; + METAL_WMR(METAL_REG_MBR, tbl_addr); + + // reset inst intercept table + tbl_addr = &mtl_inst_tbl; + METAL_WMR(METAL_REG_MIB, tbl_addr); + + // load exc intercept table + tbl_addr = &mtl_exc_tbl; + METAL_WMR(METAL_REG_MEB, tbl_addr); + + // enable exc intercept + regval_t msr; + METAL_RMR(METAL_REG_MSR, msr); + msr |= (1ull << 60); + METAL_WMR(METAL_REG_MSR, msr); + + // temporary metal stack + regval_t mstk = (regval_t)&mtl_bootstack[METAL_BOOTSTACK_SZ]; + METAL_WMR(METAL_REG_MSTK, mstk); +} \ No newline at end of file diff --git a/sys/arm64/mrt/paging.c b/sys/arm64/mrt/paging.c new file mode 100644 index 0000000..eaa14e1 --- /dev/null +++ b/sys/arm64/mrt/paging.c @@ -0,0 +1,210 @@ +#include +#include +#include + +#include +#include +#include +#include + +#define VM_MAP_PAGE_SHIFT (12) + +// +// finds the page table base for the corresponding uva +// uva: untained virtual address +// +static MCODE int +vmm_get_ptb(vaddr_t uva, paddr_t * paddr, unsigned int * pgshift) +{ + paddr_t addr = 0; + if (uva >= MEM_USERSPACE_BASE && uva < MEM_USERSPACE_TOP) { + METAL_RMR(METAL_REG_MPTB_USER, addr); + *pgshift = REGION_USER_PGSHIFT; + } else if (uva >= MEM_DIRECTMAP_BASE && uva < MEM_DIRECTMAP_TOP) { + METAL_RMR(METAL_REG_MPTB_DMAP, addr); + *pgshift = REGION_DMAP_PGSHIFT; + } else if (uva >= MEM_XMAP_BASE && uva < MEM_XMAP_TOP) { + METAL_RMR(METAL_REG_MPTB_XMEM, addr); + *pgshift = REGION_XMEM_PGSHIFT; + } else { + return EINVAL; + } + + *paddr = addr; + + return 0; +} + +// +// finds the corresponding pte to va given the page table base +// uva: untained virtual address +// +static MCODE int +vmm_get_pd(paddr_t ptb, unsigned int pgshift, vaddr_t uva, paddr_t * out) +{ + const uint64_t pfn = vm_get_pfn(uva, pgshift); + const uint64_t vahash = vm_vahash(pfn); + ASSERT(vahash < VM_PTBUCKETS); + const paddr_t pte = ptb + vahash * sizeof(struct vmpte); + + paddr_t cur; + METAL_PLDR64(cur, pte + offsetof(struct vmpte, first)); + + vaddr_t curvaddr; + while (cur != (paddr_t)NULL) { + METAL_PLDR64(curvaddr, cur + offsetof(struct vmpd, vaddr)); + if (vm_get_pfn(curvaddr, pgshift) == pfn) { + *out = cur; + return 0; + } + METAL_PLDR64(cur, cur + offsetof(struct vmpd, next)); + } + + return ENOENT; +} + +// +// populates the tlb of a corresponding page +// +// uva: untainted virtual address +// +// returns: EINVAL if translation failure +// 0 if success +// +static MCODE int +vmm_map_page(vaddr_t uva) +{ + int status; + paddr_t ptb; + unsigned int pgshift; + + status = vmm_get_ptb(uva, &ptb, &pgshift); + if (status != 0) { + return status; + } + + paddr_t pd; + status = vmm_get_pd(ptb, pgshift, uva, &pd); + if (status != 0) { + return status; + } + + struct vmpd vmpd; + METAL_PLDR64(vmpd.attr, pd + offsetof(struct vmpd, attr)); + METAL_PLDR64(vmpd.paddr, pd + offsetof(struct vmpd, paddr)); + + // access override bits + //AO REG for Kernel: + // entry 0 (kernel RO): RO + // entry 1 (kernel RW): RW + // entry 2 (user RO): RW + // entry 3 (user RW): RW + + // AO REG for User: + // entry 0 (kernel RO): NA + // entry 1 (kernel RW): NA + // entry 2 (user RO): RO + // entry 3 (user RW): RW + const unsigned int aoid = VMPD_ATTR_AO_GET(vmpd.attr); + const unsigned int dev = vmpd.attr & VMPD_ATTR_DEV; + + const vaddr_t pgvaddr = vm_get_pgbase(uva, VM_MAP_PAGE_SHIFT); + const vaddr_t pgvoffset = pgvaddr - vm_get_pgbase(pgvaddr, pgshift); + const paddr_t pgpaddr = vm_get_pgbase(vmpd.paddr + pgvoffset, VM_MAP_PAGE_SHIFT); + + // type Page, grainSize 12 + const regval_t desc = ((0ull << 54) | // xn + (0ull << 53) | // pxn + (pgpaddr) | // pfn (bits 47:12) + (0 << 11) | // global page + ((dev ? 0b00 : 0b11) << 8) | // inner-shareable for normal memory + (0b00 << 6) | // ap - AP[2:1] = b00, r/w for privileged, will be overriden by AO + (0b0 << 5) | // secure + (0b0 << 2) | // mair index = 0 (dummy) + (0b11)); // block/page desc + valid + +#define MAIR_NORMAL (0b11111111ul) +#define MAIR_DEV (0b0ul) + const regval_t extIAttrs = + (1 << 0) | // itlb + (0b01 << 1) | // el1 + (0b11 << 3) | // translv = 3 + (0b00 << 5) | // 4k page + (0x0 << 7) | // asid = 0 + (0 << 23) | // hyp = 0 + (0 << 24) | // vmid + (1ul << 40) | // ao = 1 + ((regval_t)aoid << 41) | // aoid + ((dev ? MAIR_DEV : MAIR_NORMAL) << 43) | // mair + (1ul << 51) | // ns = 0 + (1ul << 52); // nstid = 1 + + METAL_WMR(METAL_REG_MR0, desc); + METAL_WMR(METAL_REG_MR1, extIAttrs); + METAL_WMR(METAL_REG_MR2, pgvaddr); + METAL_WTLB(METAL_REG_MR0, METAL_REG_MR1, METAL_REG_MR2); + + const regval_t extDAttrs = + (0 << 0) | // dtlb + (0b01 << 1) | // el1 + (0b11 << 3) | // translv = 3 + (0b00 << 5) | // 4k page + (0x0 << 7) | // asid = 0 + (0 << 23) | // hyp = 0 + (0 << 24) | // vmid + (1ul << 40) | // ao = 1 + ((regval_t)aoid << 41) | // aoid + ((dev ? MAIR_DEV : MAIR_NORMAL) << 43) | // mair + (1ul << 51) | // ns = 0 + (1ul << 52); // nstid = 1 + METAL_WMR(METAL_REG_MR1, extDAttrs); + METAL_WTLB(METAL_REG_MR0, METAL_REG_MR1, METAL_REG_MR2); + + return 0; +} + +// flag: skip exception intercept on the next instruction upon mreturn +IMPL_MROUTINE(mrt_pf_handler, 1) +{ + vaddr_t vaddr; + + METAL_RMR(METAL_REG_MER2, vaddr); + vmm_map_page(vaddr); +} + +IMPL_MROUTINE(mrt_set_mptb, 0) +{ + unsigned int idx; + int ret = 0; + METAL_MROUTINE_GETARG(0, idx); + + regval_t mptb; + METAL_MROUTINE_GETARG(1, mptb); + switch (idx) { + case MRT_SET_MPTB_USER: { + METAL_WMR(METAL_REG_MPTB_USER, mptb); + break; + } + case MRT_SET_MPTB_DMAP: { + METAL_WMR(METAL_REG_MPTB_DMAP, mptb); + break; + } + case MRT_SET_MPTB_XMEM: { + METAL_WMR(METAL_REG_MPTB_XMEM, mptb); + break; + } + default: { + ret = EINVAL; + } + } + + METAL_MROUTINE_SETRET(0, ret); +} + + +IMPL_MROUTINE(mrt_set_mtp, 0) +{ + uint64_t mtp; + METAL_MROUTINE_GETARG(0, mtp); + METAL_WMR(METAL_REG_MTP, mtp); +} \ No newline at end of file diff --git a/sys/arm64/paging.c b/sys/arm64/paging.c new file mode 100644 index 0000000..1e3bf8f --- /dev/null +++ b/sys/arm64/paging.c @@ -0,0 +1,94 @@ +#include "include/pmap.h" +#include +#include +#include +#include + +#include + +#include + +// page table for boot +static struct vmpt boot_pt; +#define BOOT_PD_NUM (4) +static struct vmpd boot_pd[BOOT_PD_NUM]; +static struct vmpd boot_dev_pd[BOOT_PD_NUM]; + +void +vm_insert_pd(struct vmpt * pt, struct vmpd * pd, unsigned int pgshift, paddr_t paddr) +{ + const uint64_t pfn = vm_get_pfn(pd->vaddr, pgshift); + int hash = vm_vahash(pfn); + struct vmpte * vmpte = &pt->entries[hash]; + pd->next = vmpte->first; + vmpte->first = paddr; +} + +void paging_init() +{ + int ret; + memset(&boot_pd, 0, sizeof(boot_pd)); + memset(&boot_pt, 0, sizeof(boot_pt)); + + // ident map the first 4GB + paddr_t cur = 0; + for (int i = 0; i < BOOT_PD_NUM; i++) { + boot_pd[i].attr = VMPD_ATTR_P | VMPD_ATTR_AO_KRW; + boot_pd[i].paddr = cur; + boot_pd[i].vaddr = MEM_DIRECTMAP_BASE + cur; + cur += (1 << REGION_DMAP_PGSHIFT); + + vm_insert_pd(&boot_pt, &boot_pd[i], REGION_DMAP_PGSHIFT, (paddr_t)DMVA2PA(&boot_pd[i])); + } + + // ident map the first 4GB to device memory space + cur = 0; + for (int i = 0; i < BOOT_PD_NUM; i++) { + boot_dev_pd[i].attr = VMPD_ATTR_P | VMPD_ATTR_DEV | VMPD_ATTR_AO_KRW; + boot_dev_pd[i].paddr = cur; + boot_dev_pd[i].vaddr = MEM_DIRECTMAP_DEV_BASE + cur; + cur += (1 << REGION_DMAP_PGSHIFT); + + vm_insert_pd(&boot_pt, &boot_dev_pd[i], REGION_DMAP_PGSHIFT, (paddr_t)DMVA2PA(&boot_dev_pd[i])); + } + + // set page table base + paddr_t ptb = (paddr_t)DMVA2PA(&boot_pt); + int idx = MRT_SET_MPTB_DMAP; + METAL_MROUTINE_SETARG(MRT_SET_MPTB_ARG_IDX, idx); + METAL_MROUTINE_SETARG(MRT_SET_MPTB_ARG_PTB, ptb); + METAL_MENTER(MRT_SET_MPTB_IDX); + METAL_MROUTINE_GETRET(MRT_SET_MPTB_RET_STATUS, ret); + if (ret != 0) { + hlt(); + } + + // set page table attribute override bits + uint64_t mtp = MTP_KERNEL; + METAL_MROUTINE_SETARG(MRT_SET_MTP_ARG_MTP, mtp); + METAL_MENTER(MRT_SET_MTP_IDX); + +// // set MAIR +// #define MAIR_VAL ((0b11111111) | (0b00000000 << 8)) +// // 0 = b11111111 = Normal, Inner/Outer WB/WA/RA +// // 1 = b00000000 = Device-nGnRnE +// __asm__ volatile ( +// "ldr x0, =(" METAL_STR(MAIR_VAL) ");" +// "msr mair_el1, x0;" +// : +// : +// : "x0" +// ); + + + // reset page tables + uint64_t zero = 0; + __asm__ volatile ( + "msr ttbr0_el1, %x0;" + "msr ttbr1_el1, %x0;" + "TLBI VMALLE1;" + : + : "r" (zero) + : + ); +} \ No newline at end of file diff --git a/sys/arm64/pmap.c b/sys/arm64/pmap.c index ef8d32e..0914179 100644 --- a/sys/arm64/pmap.c +++ b/sys/arm64/pmap.c @@ -249,7 +249,7 @@ PMap_SystemLookup(uint64_t va, PageEntry **entry, int size) /** * PMap_SystemLMap -- * - * Map a range of large (2MB) physical pages to virtual pages in the kernel + * Map a range of large (64MB) physical pages to virtual pages in the kernel * address space that is shared by all processes. * * @param [in] phys Physical address. @@ -263,7 +263,7 @@ PMap_SystemLookup(uint64_t va, PageEntry **entry, int size) bool PMap_SystemLMap(uint64_t phys, uint64_t virt, uint64_t lpages, uint64_t flags) { - NOT_IMPLEMENTED(); + return true; } @@ -300,11 +300,11 @@ PMap_SystemUnmap(uint64_t virt, uint64_t pages) return false; } -static uint64_t -AddrFromIJKL(uint64_t i, uint64_t j, uint64_t k, uint64_t l) -{ - return (i << 39) | (j << HUGE_PGSHIFT) | (k << LARGE_PGSHIFT) | (l << PGSHIFT); -} +// static uint64_t +// AddrFromIJKL(uint64_t i, uint64_t j, uint64_t k, uint64_t l) +// { +// return (i << 39) | (j << HUGE_PGSHIFT) | (k << LARGE_PGSHIFT) | (l << PGSHIFT); +// } void PMap_Dump(AS *space) diff --git a/sys/arm64/thread.c b/sys/arm64/thread.c index f7f58f3..9a939d0 100644 --- a/sys/arm64/thread.c +++ b/sys/arm64/thread.c @@ -49,7 +49,7 @@ ThreadEnterUserLevelCB(uintptr_t arg1, uintptr_t arg2, uintptr_t arg3) memset(&tf, 0, sizeof(tf)); - Trap_Pop(&tf); + //Trap_Pop(&tf); } void diff --git a/sys/arm64/time.c b/sys/arm64/time.c index f1f89cb..f728ff1 100644 --- a/sys/arm64/time.c +++ b/sys/arm64/time.c @@ -13,7 +13,7 @@ uint64_t Time_GetTSC() { uint64_t ui; - asm volatile("mrs %0, CNTVCT_EL0" : "=&r" (ui)); + __asm__ volatile("mrs %0, CNTVCT_EL0" : "=&r" (ui)); return ui; } @@ -21,7 +21,7 @@ uint64_t Time_GetTSCFreq() { uint64_t ui; - asm volatile("mrs %0, CNTFRQ_EL0" : "=&r" (ui)); + __asm__ volatile("mrs %0, CNTFRQ_EL0" : "=&r" (ui)); return ui; } diff --git a/sys/dev/arm64/uart.c b/sys/dev/arm64/uart.c index ae0bc3d..43a14fb 100644 --- a/sys/dev/arm64/uart.c +++ b/sys/dev/arm64/uart.c @@ -1,6 +1,7 @@ #include "uart.h" #include "sys/kassert.h" #include +#include "machine/pmap.h" #define UART_DR_OFFSET (0x000) #define UART_FR_OFFSET (0x018) @@ -26,7 +27,7 @@ struct uart { }; static struct uart g_uart0 = { - .base = 0x1c090000, + .base = DEVPA2VA(0x1c090000), .baud = 115200, .clock = 24000000, .dbits = 8, diff --git a/sys/include/cdefs.h b/sys/include/cdefs.h index 2574fde..d11da40 100644 --- a/sys/include/cdefs.h +++ b/sys/include/cdefs.h @@ -10,6 +10,8 @@ #define INLINE inline #define ALWAYS_INLINE __attribute__((__always_inline__)) +#define ALIGNED(x) __attribute__((aligned(x))) + #define NO_RETURN __attribute__((noreturn)) #define UNREACHABLE __builtin_unreachable @@ -41,5 +43,8 @@ #define __printflike(_fmt, _var) __attribute__((__format__(__printf__, _fmt, _var))) +#define offsetof(st, m) \ + ((size_t)((char *)&((st *)0)->m - (char *)0)) + #endif /* __CDEFS_H__ */ diff --git a/sys/include/types.h b/sys/include/types.h index 491c71b..3a0e730 100644 --- a/sys/include/types.h +++ b/sys/include/types.h @@ -2,6 +2,8 @@ #ifndef __SYS_TYPES_H__ #define __SYS_TYPES_H__ +#include + typedef signed char int8_t; typedef signed short int16_t; typedef signed int int32_t; @@ -27,6 +29,9 @@ typedef uint64_t suseconds_t; typedef uint16_t pid_t; +typedef uintptr_t paddr_t; +typedef uintptr_t vaddr_t; + #define NULL ((void *)0) #endif /* __SYS_TYPES_H__ */