large TLB entries

This commit is contained in:
quackerd 2024-10-18 04:53:11 -04:00
parent c156515ce0
commit 48bb727881
7 changed files with 133 additions and 78 deletions

View File

@ -74,7 +74,7 @@ _Static_assert(sizeof(struct vmpt) == PGSIZE);
// #define VMPD_ATTR_CA_NORMAL (VMPD_ATTR_CA_MAKE(0ull))
// #define VMPD_ATTR_CA_DEVICE (VMPD_ATTR_CA_MAKE(1ull))
#define MTP_KERNEL (0b0010 | (0b0000 << 4) | (0b0010 << 8) | (0b0000 << 12))
#define MTP_KERNEL (0b0010 | (0b0000 << 4) | (0b0000 << 8) | (0b0000 << 12))
#define MTP_USER (0b0010 | (0b0000 << 4) | (0b0011 << 8) | (0b0010 << 12))
struct vmpd {
@ -103,5 +103,15 @@ vm_get_pgbase(vaddr_t va, unsigned int pgshift)
return va & ~((1ull << pgshift) - 1);
}
static ALWAYS_INLINE inline void
vm_insert_pd(struct vmpt * pt, struct vmpd * pd, unsigned int pgshift, paddr_t paddr)
{
const uint64_t pfn = vm_get_pfn(pd->vaddr, pgshift);
int hash = vm_vahash(pfn);
struct vmpte * vmpte = &pt->entries[hash];
pd->next = vmpte->first;
vmpte->first = paddr;
}
void paging_init();

View File

@ -90,29 +90,35 @@ void Machine_Init()
kprintf("Initialized GIC.\n");
// enable hardware timer
__asm__ volatile (
"mrs x1, CNTFRQ_EL0;"
"msr CNTP_TVAL_EL0, x1;"
"mov x0, #1;"
"msr CNTP_CTL_EL0, x0;"
"msr DAIFClr, #0b1111;"
:
:
: "x0", "x1"
);
// __asm__ volatile (
// "mrs x1, CNTFRQ_EL0;"
// "msr CNTP_TVAL_EL0, x1;"
// "mov x0, #1;"
// "msr CNTP_CTL_EL0, x0;"
// "msr DAIFClr, #0b1111;"
// :
// :
// : "x0", "x1"
// );
while(1){
hlt();
}
// while(1){
// hlt();
// }
Machine_SyscallInit();
//Machine_SyscallInit();
/*
* Initialize Memory Allocation and Virtual Memory
*/
PAlloc_AddRegion(DMPA2VA(16*1024*1024), 16*1024*1024);
pdcache_init();
while(1){
hlt();
}
PMap_Init();
while(1){
hlt();
}
XMem_Init();
PAlloc_LateInit();
MachineBoot_AddMem();

View File

@ -28,6 +28,17 @@ static uintptr_t memRegionStart[MAX_REGIONS];
static uintptr_t memRegionLen[MAX_REGIONS];
static int memRegionIdx;
static void
machineboot_init_mem_regions()
{
// kernel loaded at 4MB
// 2GB to 3GB
memRegionStart[0] = 0x80000000;
// 1GB
memRegionLen[0] = 0x40000000;
memRegionIdx = 1;
}
void
MachineBoot_Entry(UNUSED unsigned long magic, UNUSED unsigned long addr)
{
@ -35,6 +46,9 @@ MachineBoot_Entry(UNUSED unsigned long magic, UNUSED unsigned long addr)
mtl_init();
paging_init();
// initialize memory regions
machineboot_init_mem_regions();
// Main initialization
Machine_Init();

View File

@ -1,3 +1,4 @@
#include "machine/cpu.h"
#include <machine/metal.h>
#include <machine/mrt.h>
#include <machine/paging.h>
@ -35,6 +36,24 @@ vmm_get_ptb(vaddr_t uva, paddr_t * paddr, unsigned int * pgshift)
return 0;
}
static inline MCODE void
vmm_calc_tlb_bits(unsigned int pgshift, unsigned int * extlowbits, unsigned int * tlblowbits)
{
if (pgshift >= 30) {
// 1GB
*extlowbits = (0b00ull << 5) | (0b01ull << 3); // 4k granule + lv1 translv (1GB)
*tlblowbits = 0b01; // block
} else if (pgshift >= 21) {
// 2MB
*extlowbits = (0b00ull << 5) | (0b10ull << 3); // 4k granule + lv2 translv (2MB)
*tlblowbits = 0b01; // block
} else {
// 4k
*extlowbits = (0b00ull << 5) | (0b11ull << 3); // 4k granule + lv3 translv (4KB)
*tlblowbits = 0b11; // page
}
}
//
// finds the corresponding pte to va given the page table base
// uva: untained virtual address
@ -115,6 +134,8 @@ vmm_map_page(vaddr_t uva, int itlb)
const vaddr_t pgvaddr = vm_get_pgbase(uva, VM_MAP_PAGE_SHIFT);
const vaddr_t pgvoffset = pgvaddr - vm_get_pgbase(pgvaddr, pgshift);
const paddr_t pgpaddr = vm_get_pgbase(vmpd.paddr + pgvoffset, VM_MAP_PAGE_SHIFT);
unsigned int extbits, tlbbits;
vmm_calc_tlb_bits(pgshift, &extbits, &tlbbits);
// type Page, grainSize 12
const regval_t desc = ((0ull << 54) | // xn
@ -125,26 +146,25 @@ vmm_map_page(vaddr_t uva, int itlb)
(0b00 << 6) | // ap - AP[2:1] = b00, r/w for privileged, will be overriden by AO
(0b0 << 5) | // secure
(0b0 << 2) | // mair index = 0 (dummy)
(0b11)); // block/page desc + valid
tlbbits); // caculated page / block
#define MAIR_NORMAL (0b11111111ul)
#define MAIR_DEV (0b0ul)
const regval_t extIAttrs =
((itlb & 0x1) << 0) | // itlb
const regval_t extAttrs =
((itlb & 0x1) << 0) | // itlb or dtlb
(0b01 << 1) | // el1
(0b11 << 3) | // translv = 3
(0b00 << 5) | // 4k page
extbits | // calculated page size
(0x0 << 7) | // asid = 0
(0 << 23) | // hyp = 0
(0 << 24) | // vmid
(1ul << 40) | // ao = 1
(1ull << 40) | // ao = 1
((regval_t)aoid << 41) | // aoid
((dev ? MAIR_DEV : MAIR_NORMAL) << 43) | // mair
(1ul << 51) | // ns = 0
(1ul << 52); // nstid = 1
(1ull << 51) | // ns = 0
(1ull << 52); // nstid = 1
METAL_WMR(METAL_REG_MR0, desc);
METAL_WMR(METAL_REG_MR1, extIAttrs);
METAL_WMR(METAL_REG_MR1, extAttrs);
METAL_WMR(METAL_REG_MR2, pgvaddr);
METAL_WTLB(METAL_REG_MR0, METAL_REG_MR1, METAL_REG_MR2);

View File

@ -1,4 +1,3 @@
#include "include/pmap.h"
#include <machine/metalp.h>
#include <machine/paging.h>
#include <machine/pmap.h>
@ -14,15 +13,6 @@ static struct vmpt boot_pt;
static struct vmpd boot_pd[BOOT_PD_NUM];
static struct vmpd boot_dev_pd[BOOT_PD_NUM];
void
vm_insert_pd(struct vmpt * pt, struct vmpd * pd, unsigned int pgshift, paddr_t paddr)
{
const uint64_t pfn = vm_get_pfn(pd->vaddr, pgshift);
int hash = vm_vahash(pfn);
struct vmpte * vmpte = &pt->entries[hash];
pd->next = vmpte->first;
vmpte->first = paddr;
}
void paging_init()
{

View File

@ -8,6 +8,7 @@
#include <sys/thread.h>
#include <sys/mutex.h>
#include <sys/spinlock.h>
#include <machine/pdcache.h>
struct pdcache_info {
void * next;

View File

@ -8,6 +8,7 @@
#include <sys/kdebug.h>
#include <sys/kmem.h>
#include <sys/mp.h>
#include <sys/mman.h>
#include <machine/cpu.h>
#include <machine/cpuop.h>
@ -20,6 +21,27 @@
AS systemAS;
AS *currentAS[MAX_CPUS];
static uint64_t
PMapProtToPdattr(uint64_t flags, int priv)
{
uint64_t pdattr = 0;
ASSERT(flags != 0);
if ((flags & PROT_READ) && (flags & PROT_WRITE)) {
pdattr |= VMPD_ATTR_P | (priv ? VMPD_ATTR_AO_KRW : VMPD_ATTR_AO_URW);
} else if (flags & PROT_READ) {
pdattr |= VMPD_ATTR_P | (priv ? VMPD_ATTR_AO_KRO : VMPD_ATTR_AO_URO);
}
ASSERT(pdattr != 0);
if (flags & MAP_NOCACHE)
pdattr |= VMPD_ATTR_DEV;
return pdattr;
}
/**
* PMapAllocPageTable --
*
@ -58,9 +80,9 @@ PMap_Init()
// Setup system mappings
PMap_SystemLMap(0x0, MEM_DIRECTMAP_BASE,
128, VMPD_ATTR_P | VMPD_ATTR_AO_KRW); // 128GB RWX
128, PROT_ALL); // 128GB RWX
PMap_SystemLMap(0x0, MEM_DIRECTMAP_DEV_BASE,
128, VMPD_ATTR_P | VMPD_ATTR_AO_KRW | VMPD_ATTR_DEV); // 128GB Device
128, PROT_ALL | MAP_NOCACHE); // 128GB Device
PMap_LoadAS(&systemAS);
@ -166,6 +188,29 @@ PMap_LoadAS(AS *space)
flushtlb();
}
static bool
PMap_MapPage(uint64_t phys, uint64_t virt, uint64_t pages, uint32_t pgshift, struct vmpt * tbl, uint64_t flags)
{
unsigned int i = 0;
while (i < pages) {
// allocate a new pd
struct vmpd * pd = pdcache_alloc();
if (pd == NULL) {
// XXX: if this fails and not the first page to be mapped
// the this function inflicts side effects
return false;
}
pd->vaddr = virt & ~((1ull << pgshift) - 1) + i * (1ull << pgshift);
pd->paddr = phys & ~((1ull << pgshift) - 1) + i * (1ull << pgshift);
pd->attr = flags;
vm_insert_pd(tbl, pd, pgshift, DMVA2PA(pd));
}
return true;
}
/**
* PMap_Translate --
@ -197,10 +242,13 @@ PMap_Translate(UNUSED AS *space, UNUSED uintptr_t va)
* @retval false On failure
*/
bool
PMap_Map(UNUSED AS *as, UNUSED uint64_t phys, UNUSED uint64_t virt, UNUSED uint64_t pages, UNUSED uint64_t flags)
PMap_Map(AS *as, uint64_t phys, uint64_t virt, uint64_t pages, uint64_t flags)
{
NOT_IMPLEMENTED();
return true;
// virt address must be within the usermap region
if (virt < MEM_USERSPACE_BASE || virt + pages * (1ull << PGSHIFT) > MEM_USERSPACE_TOP) {
return false;
}
return PMap_MapPage(phys, virt, pages, PGSHIFT,as->user_tbl, PMapProtToPdattr(flags, false));
}
/**
@ -278,27 +326,10 @@ bool
PMap_SystemLMap(uint64_t phys, uint64_t virt, uint64_t lpages, uint64_t flags)
{
// virt address must be within the dmap (l map) region
if (virt < MEM_DIRECTMAP_BASE && virt >= MEM_DIRECTMAP_TOP) {
if (virt < MEM_DIRECTMAP_BASE || virt + lpages * (1ull << LARGE_PGSHIFT) > MEM_DIRECTMAP_TOP) {
return false;
}
unsigned int i = 0;
while (i < lpages) {
// allocate a new pd
struct vmpd * pd = pdcache_alloc();
if (pd == NULL) {
// XXX: if this fails and not the first page to be mapped
// the this function inflicts side effects
return false;
}
pd->vaddr = virt & ~((1ull << REGION_DMAP_PGSHIFT) - 1) + i * (1ull << REGION_DMAP_PGSHIFT);
pd->paddr = phys & ~((1ull << REGION_DMAP_PGSHIFT) - 1) + i * (1ull << REGION_DMAP_PGSHIFT);
pd->attr = flags;
// vm_insert_pd(systemAS.dmap_tbl, pd, REGION_DMAP_PGSHIFT, DMVA2PA(pd));
}
return true;
return PMap_MapPage(phys, virt, lpages, LARGE_PGSHIFT, systemAS.dmap_tbl, PMapProtToPdattr(flags, true));
}
/**
@ -319,28 +350,11 @@ bool
PMap_SystemMap(uint64_t phys, uint64_t virt, uint64_t pages, uint64_t flags)
{
// virt address must be within the dmap (l map) region
if (virt < MEM_XMAP_BASE && virt >= MEM_XMAP_TOP) {
if (virt < MEM_XMAP_BASE || virt + pages * (1ull << PGSHIFT) > MEM_XMAP_TOP) {
return false;
}
unsigned int i = 0;
while (i < pages) {
// allocate a new pd
struct vmpd * pd = pdcache_alloc();
if (pd == NULL) {
// XXX: if this fails and not the first page to be mapped
// the this function inflicts side effects
return false;
}
pd->vaddr = virt & ~((1ull << REGION_XMEM_PGSHIFT) - 1) + i * (1ull << REGION_XMEM_PGSHIFT);
pd->paddr = phys & ~((1ull << REGION_XMEM_PGSHIFT) - 1) + i * (1ull << REGION_XMEM_PGSHIFT);
pd->attr = flags;
// vm_insert_pd(systemAS.xmem_tbl, pd, REGION_XMEM_PGSHIFT, DMVA2PA(pd));
}
return true;
return PMap_MapPage(phys, virt, pages, PGSHIFT, systemAS.xmem_tbl, PMapProtToPdattr(flags, true));
}
/**