metal-cos/sys/arm64/pmap.c
2024-11-04 05:11:18 -05:00

412 lines
9.2 KiB
C

#include "include/cpu.h"
#include <stdbool.h>
#include <stdint.h>
#include <string.h>
#include <sys/kconfig.h>
#include <sys/kassert.h>
#include <sys/kdebug.h>
#include <sys/kmem.h>
#include <sys/mp.h>
#include <sys/mman.h>
#include <machine/cpu.h>
#include <machine/cpuop.h>
#include <machine/pmap.h>
#include <machine/paging.h>
#include <machine/pdcache.h>
#include <machine/mrt.h>
AS systemAS;
AS *currentAS[MAX_CPUS];
static uint64_t
PMapProtToPdattr(uint64_t flags, int priv)
{
uint64_t pdattr = 0;
ASSERT(flags != 0);
if ((flags & PROT_READ) && (flags & PROT_WRITE)) {
pdattr |= VMPD_ATTR_P | (priv ? VMPD_ATTR_AO_KRW : VMPD_ATTR_AO_URW);
} else if (flags & PROT_READ) {
pdattr |= VMPD_ATTR_P | (priv ? VMPD_ATTR_AO_KRO : VMPD_ATTR_AO_URO);
}
ASSERT(pdattr != 0);
if (flags & MAP_NOCACHE)
pdattr |= VMPD_ATTR_DEV;
return pdattr;
}
/**
* PMapAllocPageTable --
*
* Allocates and initializes a page table.
*
* @return Newly created PageTable.
*/
static struct vmpt *
PMapAllocPageTable()
{
struct vmpt * vmpt = PAlloc_AllocPage();
if (vmpt) {
memset(vmpt, 0, sizeof(struct vmpt));
}
return vmpt;
}
void
PMap_Init()
{
kprintf("Initializing PMAP ... ");
// Setup global state
for (int i = 0; i < MAX_CPUS; i++) {
currentAS[i] = 0;
}
// Allocate system page table
systemAS.dmap_tbl = PMapAllocPageTable();
systemAS.xmem_tbl = PMapAllocPageTable();
if (!systemAS.dmap_tbl)
Panic("Cannot allocate dmap page table");
if (!systemAS.xmem_tbl)
Panic("Cannot allocate xmem page table");
// Setup system mappings
if(!PMap_SystemLMap(0x0, MEM_DIRECTMAP_BASE,
512, PROT_ALL)) // 128GB RWX
{
Panic("Cannot setup direct map");
}
if(!PMap_SystemLMap(0x0, MEM_DIRECTMAP_DEV_BASE,
512, PROT_ALL | MAP_NOCACHE)) // 128GB Device
{
Panic("Cannot setup device map");
}
PMap_LoadAS(&systemAS);
kprintf("Done!\n");
}
void
PMap_InitAP()
{
PMap_LoadAS(&systemAS);
}
/**
* PMap_NewAS --
*
* Create a new address space.
*
* @return Newly created address space.
*/
AS*
PMap_NewAS()
{
AS *as = PAlloc_AllocPage();
if (!as)
return NULL;
as->dmap_tbl = systemAS.dmap_tbl;
as->xmem_tbl = systemAS.xmem_tbl;
as->user_tbl = PMapAllocPageTable();
return as;
}
/**
* PMap_DestroyAS --
*
* Destroys an address space and releases the physical pages.
*
* @param [in] space Address space to destroy.
*/
void
PMap_DestroyAS(AS *space)
{
// free usertables
UNUSED struct vmpt * pt = space->user_tbl;
// release space itself
PAlloc_Release(space);
// XXX: release all pdcaches
NOT_IMPLEMENTED();
}
/**
* PMap_CurrentAS --
*
* Get the current address space on this CPU.
*
* @return Current address space.
*/
AS *
PMap_CurrentAS()
{
return currentAS[CPU()];
}
/**
* PMap_LoadAS --
*
* Load an address space into the CPU. Reloads the CR3 register in x86-64 that
* points the physical page tables and flushes the TLB entries.
*
* @param [in] space Address space to load.
*/
void
PMap_LoadAS(AS *space)
{
currentAS[CPU()] = space;
int ret;
// set dmap region
int idx = MRT_SET_MPTB_DMAP;
paddr_t paddr = DMVA2PA(space->dmap_tbl);
MRT_SETARG(MRT_SET_MPTB_ARG_IDX, idx);
MRT_SETARG(MRT_SET_MPTB_ARG_PTB, paddr);
MENTER(MRT_SET_MPTB_IDX);
MRT_GETRET(MRT_SET_MPTB_RET_STATUS, ret);
if (ret != 0)
Panic("Failed to load DMAP page table.");
// set xmem region
idx = MRT_SET_MPTB_XMEM;
paddr = DMVA2PA(space->xmem_tbl);
MRT_SETARG(MRT_SET_MPTB_ARG_IDX, idx);
MRT_SETARG(MRT_SET_MPTB_ARG_PTB, paddr);
MENTER(MRT_SET_MPTB_IDX);
MRT_GETRET(MRT_SET_MPTB_RET_STATUS, ret);
if (ret != 0)
Panic("Failed to load XMEM page table.");
// set userspace
idx = MRT_SET_MPTB_USER;
paddr = DMVA2PA(space->user_tbl);
MRT_SETARG(MRT_SET_MPTB_ARG_IDX, idx);
MRT_SETARG(MRT_SET_MPTB_ARG_PTB, paddr);
MENTER(MRT_SET_MPTB_IDX);
MRT_GETRET(MRT_SET_MPTB_RET_STATUS, ret);
if (ret != 0)
Panic("Failed to load USER page table.");
flushtlb();
}
static void
PMap_MapPage(uint64_t phys, uint64_t virt, uint64_t pages, uint32_t pgshift, struct vmpt * tbl, uint64_t flags)
{
unsigned int i = 0;
while (i < pages) {
// allocate a new pd
struct vmpd * pd = pdcache_alloc();
if (pd == NULL) {
// XXX: if this fails and not the first page to be mapped
// the this function inflicts side effects
Panic("Out of pdcache nodes.\n");
}
pd->vaddr = (virt & ~((1ull << pgshift) - 1)) + i * (1ull << pgshift);
pd->paddr = (phys & ~((1ull << pgshift) - 1)) + i * (1ull << pgshift);
pd->attr = flags;
vm_insert_pd(tbl, pd, pgshift, DMVA2PA(pd));
i++;
//kprintf("mapping 0x%llx to 0x%llx, attr: 0x%llx\n", pd->vaddr, pd->paddr, pd->attr);
}
}
/**
* PMap_Translate --
*
* Translates a virtual address to physical address for a given address space.
*
* @param [in] space Address space we wish to lookup a mapping in.
* @param [in] va Virtual address we wish to translate.
*/
uintptr_t
PMap_Translate(UNUSED AS *space, UNUSED uintptr_t va)
{
NOT_IMPLEMENTED();
return 0;
}
/**
* PMap_Map --
*
* Map a physical to virtual mapping in an address space.
*
* @param [in] as Address space.
* @param [in] phys Physical address.
* @param [in] virt Virtual address.
* @param [in] pages Pages to map in.
* @param [in] flags Flags to apply to the mapping.
*
* @retval true On success
* @retval false On failure
*/
bool
PMap_Map(AS *as, uint64_t phys, uint64_t virt, uint64_t pages, uint64_t flags)
{
// virt address must be within the usermap region
if (virt < MEM_USERSPACE_BASE || virt + pages * PGSIZE > MEM_USERSPACE_TOP) {
return false;
}
PMap_MapPage(phys, virt, pages, PGSHIFT,as->user_tbl, PMapProtToPdattr(flags, false));
return true;
}
/**
* PMap_Unmap --
*
* Unmap a range of addresses.
*
* @param [in] as Address space.
* @param [in] va Virtual address.
* @param [in] pages Pages to map in.
*
* @retval true On success
* @retval false On failure
*/
bool
PMap_Unmap(UNUSED AS *as, UNUSED uint64_t va, UNUSED uint64_t pages)
{
NOT_IMPLEMENTED();
return true;
}
/**
* PMap_AllocMap --
*
* Map a virtual mapping in an address space and back it by newly allocated
* memory.
*
* @param [in] as Address space.
* @param [in] virt Virtual address.
* @param [in] pages Pages to map in.
* @param [in] flags Flags to apply to the mapping.
*
* @retval true On success
* @retval false On failure
*/
bool
PMap_AllocMap(UNUSED AS *as, UNUSED uint64_t virt, UNUSED uint64_t len, UNUSED uint64_t flags)
{
NOT_IMPLEMENTED();
return true;
}
/**
* PMap_SystemLookup --
*
* Lookup a kernel virtual address in a page table and return a pointer to the
* page entry. This function allocates page tables as necessary to fill in the
* 4-level heirarchy.
*
* @param [in] va Virtual address to lookup.
* @param [out] entry Pointer will point to the PageEntry.
* @param [in] size Page size we want to use.
*/
// void
// PMap_SystemLookup(uint64_t va, PageEntry **entry, int size)
// {
// PMapLookupEntry(&systemAS, va, entry, size);
// }
/**
* PMap_SystemLMap --
*
* Map a range of large (1GB) physical pages to virtual pages in the kernel
* address space that is shared by all processes.
*
* @param [in] phys Physical address.
* @param [in] virt Virtual address.
* @param [in] lpages Large pages to map in.
* @param [in] flags Flags to apply to the mapping.
*
* @retval true On success
* @retval false On failure
*/
bool
PMap_SystemLMap(uint64_t phys, uint64_t virt, uint64_t lpages, uint64_t flags)
{
// virt address must be within the dmap (l map) region
if (virt < MEM_DIRECTMAP_BASE || virt + lpages * LARGE_PGSIZE > MEM_DIRECTMAP_TOP) {
return false;
}
PMap_MapPage(phys, virt, lpages, LARGE_PGSHIFT, systemAS.dmap_tbl, PMapProtToPdattr(flags, true));
return true;
}
/**
* PMap_SystemLMap --
*
* Map a range of physical pages to virtual pages in the kernel address space
* that is shared by all processes.
*
* @param [in] phys Physical address.
* @param [in] virt Virtual address.
* @param [in] pages Pages to map in.
* @param [in] flags Flags to apply to the mapping.
*
* @retval true On success
* @retval false On failure
*/
bool
PMap_SystemMap(uint64_t phys, uint64_t virt, uint64_t pages, uint64_t flags)
{
// virt address must be within the xmem region
if (virt < MEM_XMAP_BASE || virt + pages * PGSIZE > MEM_XMAP_TOP) {
return false;
}
PMap_MapPage(phys, virt, pages, PGSHIFT, systemAS.xmem_tbl, PMapProtToPdattr(flags, true));
return true;
}
/**
* PMap_SystemUnmap --
*
* We do not currently use this!
*/
bool
PMap_SystemUnmap(UNUSED uint64_t virt, UNUSED uint64_t pages)
{
NOT_IMPLEMENTED();
return false;
}
void
PMap_Dump(UNUSED AS *space)
{
NOT_IMPLEMENTED();
return;
}
static void
Debug_PMapDump(UNUSED int argc, UNUSED const char *argv[])
{
PMap_Dump(currentAS[CPU()]);
}
REGISTER_DBGCMD(pmapdump, "Dump memory mappings", Debug_PMapDump);