pmap stuff
This commit is contained in:
parent
48bb727881
commit
a7ed1c3ffd
@ -12,19 +12,10 @@
|
||||
* Page Tables
|
||||
*/
|
||||
|
||||
// #define PGNUMMASK 0xFFFFFFFFFFFFF000ULL
|
||||
|
||||
// #define PGIDXSHIFT 9
|
||||
// #define PGIDXMASK (512 - 1)
|
||||
|
||||
#define PGSHIFT 14
|
||||
#define PGSHIFT (14)
|
||||
#define PGSIZE (1 << PGSHIFT)
|
||||
#define PGMASK (PGSIZE - 1)
|
||||
|
||||
#define LARGE_PGSHIFT 26
|
||||
#define LARGE_PGSIZE (1 << LARGE_PGSHIFT)
|
||||
#define LARGE_PGMASK (LARGE_PGSIZE - 1)
|
||||
|
||||
#define ROUNDUP_PGSIZE(x) (((x) + LARGE_PGSIZE - 1) & ~LARGE_PGMASK)
|
||||
#define ROUNDDOWN_PGSIZE(x) ((x) & ~LARGE_PGMASK)
|
||||
|
||||
|
@ -47,8 +47,6 @@
|
||||
#define MEM_XMAP_LEN 0x0000002000000000ULL
|
||||
#define MEM_XMAP_TOP (MEM_XMAP_BASE + MEM_XMAP_LEN)
|
||||
|
||||
#define PPN2DMVA(ppn) (((ppn) << PGSIZE) + MEM_DIRECTMAP_BASE)
|
||||
#define DMVA2PPN(dmva) (((uintptr_t)(dmva) - MEM_DIRECTMAP_BASE) >> PGSIZE)
|
||||
#define DMVA2PA(dmva) ((uintptr_t)(dmva) - MEM_DIRECTMAP_BASE)
|
||||
#define DMPA2VA(pa) ((uintptr_t)(pa) + MEM_DIRECTMAP_BASE)
|
||||
#define DEVVA2PA(devva) ((uintptr_t)(devva) - MEM_DIRECTMAP_DEV_BASE)
|
||||
|
@ -112,9 +112,6 @@ void Machine_Init()
|
||||
*/
|
||||
PAlloc_AddRegion(DMPA2VA(16*1024*1024), 16*1024*1024);
|
||||
pdcache_init();
|
||||
while(1){
|
||||
hlt();
|
||||
}
|
||||
PMap_Init();
|
||||
while(1){
|
||||
hlt();
|
||||
|
@ -79,7 +79,7 @@ pdcache_grow()
|
||||
pdc.first = first;
|
||||
pdc.total += free;
|
||||
|
||||
kprintf("Growing pdcache: +%d nodes, total: %d nodes.\n", free, pdc.total);
|
||||
kprintf("\nGrowing pdcache: +%d nodes, first: 0x%llx, free: %d nodes, total: %d nodes.\n", free, pdc.first, pdc.free, pdc.total);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -99,6 +99,7 @@ pdcache_alloc()
|
||||
ASSERT(pdc.free > 0 && pdc.first != NULL);
|
||||
struct pdcache_info * cur = pdc.first;
|
||||
pdc.first = cur->next;
|
||||
pdc.free--;
|
||||
Spinlock_Unlock(&pdc.lock);
|
||||
|
||||
memset(cur, 0, sizeof(struct vmpd));
|
||||
@ -112,7 +113,7 @@ pdcache_free(struct vmpd * pd)
|
||||
{
|
||||
Spinlock_Lock(&pdc.lock);
|
||||
struct pdcache_info * ret = (struct pdcache_info *)pd;
|
||||
struct pdcache_info * old = pdc.first->next;
|
||||
struct pdcache_info * old = pdc.first;
|
||||
|
||||
pdc.first = ret;
|
||||
ret->next = old;
|
||||
|
@ -79,10 +79,16 @@ PMap_Init()
|
||||
PANIC("Cannot allocate xmem page table");
|
||||
|
||||
// Setup system mappings
|
||||
PMap_SystemLMap(0x0, MEM_DIRECTMAP_BASE,
|
||||
128, PROT_ALL); // 128GB RWX
|
||||
PMap_SystemLMap(0x0, MEM_DIRECTMAP_DEV_BASE,
|
||||
128, PROT_ALL | MAP_NOCACHE); // 128GB Device
|
||||
if(!PMap_SystemLMap(0x0, MEM_DIRECTMAP_BASE,
|
||||
4, PROT_ALL)) // 128GB RWX
|
||||
{
|
||||
PANIC("Cannot setup direct map");
|
||||
}
|
||||
if(!PMap_SystemLMap(0x0, MEM_DIRECTMAP_DEV_BASE,
|
||||
4, PROT_ALL | MAP_NOCACHE)) // 128GB Device
|
||||
{
|
||||
PANIC("Cannot setup device map");
|
||||
}
|
||||
|
||||
PMap_LoadAS(&systemAS);
|
||||
|
||||
@ -162,24 +168,30 @@ PMap_LoadAS(AS *space)
|
||||
|
||||
int ret;
|
||||
// set dmap region
|
||||
METAL_MROUTINE_SETARG(MRT_SET_MPTB_ARG_IDX, MRT_SET_MPTB_DMAP);
|
||||
METAL_MROUTINE_SETARG(MRT_SET_MPTB_ARG_IDX, space->dmap_tbl);
|
||||
int idx = MRT_SET_MPTB_DMAP;
|
||||
paddr_t paddr = DMVA2PA(space->dmap_tbl);
|
||||
METAL_MROUTINE_SETARG(MRT_SET_MPTB_ARG_IDX, idx);
|
||||
METAL_MROUTINE_SETARG(MRT_SET_MPTB_ARG_PTB, paddr);
|
||||
METAL_MENTER(MRT_SET_MPTB_IDX);
|
||||
METAL_MROUTINE_GETRET(MRT_SET_MPTB_RET_STATUS, ret);
|
||||
if (ret != 0)
|
||||
PANIC("Failed to load DMAP page table.");
|
||||
|
||||
// set xmem region
|
||||
METAL_MROUTINE_SETARG(MRT_SET_MPTB_ARG_IDX, MRT_SET_MPTB_XMEM);
|
||||
METAL_MROUTINE_SETARG(MRT_SET_MPTB_ARG_IDX, space->xmem_tbl);
|
||||
idx = MRT_SET_MPTB_XMEM;
|
||||
paddr = DMVA2PA(space->xmem_tbl);
|
||||
METAL_MROUTINE_SETARG(MRT_SET_MPTB_ARG_IDX, idx);
|
||||
METAL_MROUTINE_SETARG(MRT_SET_MPTB_ARG_PTB, paddr);
|
||||
METAL_MENTER(MRT_SET_MPTB_IDX);
|
||||
METAL_MROUTINE_GETRET(MRT_SET_MPTB_RET_STATUS, ret);
|
||||
if (ret != 0)
|
||||
PANIC("Failed to load XMEM page table.");
|
||||
|
||||
// set userspace
|
||||
METAL_MROUTINE_SETARG(MRT_SET_MPTB_ARG_IDX, MRT_SET_MPTB_USER);
|
||||
METAL_MROUTINE_SETARG(MRT_SET_MPTB_ARG_IDX, space->user_tbl);
|
||||
idx = MRT_SET_MPTB_USER;
|
||||
paddr = DMVA2PA(space->user_tbl);
|
||||
METAL_MROUTINE_SETARG(MRT_SET_MPTB_ARG_IDX, idx);
|
||||
METAL_MROUTINE_SETARG(MRT_SET_MPTB_ARG_PTB, paddr);
|
||||
METAL_MENTER(MRT_SET_MPTB_IDX);
|
||||
METAL_MROUTINE_GETRET(MRT_SET_MPTB_RET_STATUS, ret);
|
||||
if (ret != 0)
|
||||
@ -188,7 +200,7 @@ PMap_LoadAS(AS *space)
|
||||
flushtlb();
|
||||
}
|
||||
|
||||
static bool
|
||||
static void
|
||||
PMap_MapPage(uint64_t phys, uint64_t virt, uint64_t pages, uint32_t pgshift, struct vmpt * tbl, uint64_t flags)
|
||||
{
|
||||
unsigned int i = 0;
|
||||
@ -198,17 +210,19 @@ PMap_MapPage(uint64_t phys, uint64_t virt, uint64_t pages, uint32_t pgshift, str
|
||||
if (pd == NULL) {
|
||||
// XXX: if this fails and not the first page to be mapped
|
||||
// the this function inflicts side effects
|
||||
return false;
|
||||
PANIC("Out of pdcache nodes.\n");
|
||||
}
|
||||
|
||||
pd->vaddr = virt & ~((1ull << pgshift) - 1) + i * (1ull << pgshift);
|
||||
pd->paddr = phys & ~((1ull << pgshift) - 1) + i * (1ull << pgshift);
|
||||
pd->vaddr = (virt & ~((1ull << pgshift) - 1)) + i * (1ull << pgshift);
|
||||
pd->paddr = (phys & ~((1ull << pgshift) - 1)) + i * (1ull << pgshift);
|
||||
pd->attr = flags;
|
||||
|
||||
vm_insert_pd(tbl, pd, pgshift, DMVA2PA(pd));
|
||||
}
|
||||
|
||||
return true;
|
||||
i++;
|
||||
|
||||
//kprintf("mapping 0x%llx to 0x%llx, attr: 0x%llx\n", pd->vaddr, pd->paddr, pd->attr);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -245,10 +259,13 @@ bool
|
||||
PMap_Map(AS *as, uint64_t phys, uint64_t virt, uint64_t pages, uint64_t flags)
|
||||
{
|
||||
// virt address must be within the usermap region
|
||||
if (virt < MEM_USERSPACE_BASE || virt + pages * (1ull << PGSHIFT) > MEM_USERSPACE_TOP) {
|
||||
if (virt < MEM_USERSPACE_BASE || virt + pages * (1ull << REGION_USER_PGSHIFT) > MEM_USERSPACE_TOP) {
|
||||
return false;
|
||||
}
|
||||
return PMap_MapPage(phys, virt, pages, PGSHIFT,as->user_tbl, PMapProtToPdattr(flags, false));
|
||||
|
||||
PMap_MapPage(phys, virt, pages, REGION_USER_PGSHIFT,as->user_tbl, PMapProtToPdattr(flags, false));
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -326,10 +343,12 @@ bool
|
||||
PMap_SystemLMap(uint64_t phys, uint64_t virt, uint64_t lpages, uint64_t flags)
|
||||
{
|
||||
// virt address must be within the dmap (l map) region
|
||||
if (virt < MEM_DIRECTMAP_BASE || virt + lpages * (1ull << LARGE_PGSHIFT) > MEM_DIRECTMAP_TOP) {
|
||||
if (virt < MEM_DIRECTMAP_BASE || virt + lpages * (1ull << REGION_DMAP_PGSHIFT) > MEM_DIRECTMAP_TOP) {
|
||||
return false;
|
||||
}
|
||||
return PMap_MapPage(phys, virt, lpages, LARGE_PGSHIFT, systemAS.dmap_tbl, PMapProtToPdattr(flags, true));
|
||||
|
||||
PMap_MapPage(phys, virt, lpages, REGION_DMAP_PGSHIFT, systemAS.dmap_tbl, PMapProtToPdattr(flags, true));
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -349,12 +368,13 @@ PMap_SystemLMap(uint64_t phys, uint64_t virt, uint64_t lpages, uint64_t flags)
|
||||
bool
|
||||
PMap_SystemMap(uint64_t phys, uint64_t virt, uint64_t pages, uint64_t flags)
|
||||
{
|
||||
// virt address must be within the dmap (l map) region
|
||||
if (virt < MEM_XMAP_BASE || virt + pages * (1ull << PGSHIFT) > MEM_XMAP_TOP) {
|
||||
// virt address must be within the xmem region
|
||||
if (virt < MEM_XMAP_BASE || virt + pages * (1ull << REGION_XMEM_PGSHIFT) > MEM_XMAP_TOP) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return PMap_MapPage(phys, virt, pages, PGSHIFT, systemAS.xmem_tbl, PMapProtToPdattr(flags, true));
|
||||
PMap_MapPage(phys, virt, pages, REGION_XMEM_PGSHIFT, systemAS.xmem_tbl, PMapProtToPdattr(flags, true));
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
|
Loading…
Reference in New Issue
Block a user