Use PROT/MAP flags rather than PTEs to improve portability.
This commit is contained in:
parent
d1a7bc9020
commit
51b61e3316
@ -12,7 +12,7 @@
|
||||
* Page Tables
|
||||
*/
|
||||
|
||||
#define PGNUMMASK 0xFFFFFFFFFFFFF000ULL
|
||||
#define PGNUMMASK 0x7FFFFFFFFFFFF000ULL
|
||||
|
||||
#define PGIDXSHIFT 9
|
||||
#define PGIDXMASK (512 - 1)
|
||||
|
@ -6,6 +6,7 @@
|
||||
#include <sys/kassert.h>
|
||||
#include <sys/kdebug.h>
|
||||
#include <sys/kmem.h>
|
||||
#include <sys/mman.h>
|
||||
|
||||
#include <machine/cpu.h>
|
||||
#include <machine/cpuop.h>
|
||||
@ -15,6 +16,30 @@
|
||||
AS systemAS;
|
||||
AS *currentAS[MAX_CPUS];
|
||||
|
||||
static uint64_t
|
||||
PMapProtToPTE(uint64_t flags)
|
||||
{
|
||||
uint64_t pte = 0;
|
||||
|
||||
ASSERT(flags != 0);
|
||||
|
||||
if (flags & PROT_READ)
|
||||
pte |= PTE_P;
|
||||
if (flags & PROT_WRITE)
|
||||
pte |= PTE_P|PTE_W;
|
||||
if (flags & PROT_EXEC)
|
||||
pte |= PTE_P;
|
||||
else
|
||||
pte |= PTE_NX;
|
||||
|
||||
ASSERT(pte != 0);
|
||||
|
||||
if (flags & MAP_NOCACHE)
|
||||
pte |= PTE_PCD;
|
||||
|
||||
return pte;
|
||||
}
|
||||
|
||||
void
|
||||
PMap_Init()
|
||||
{
|
||||
@ -52,11 +77,11 @@ PMap_Init()
|
||||
|
||||
// Setup system mappings
|
||||
PMap_SystemLMap(0x0, MEM_DIRECTMAP_BASE + 0x0,
|
||||
3*512, 0); // 3GB RWX
|
||||
3*512, PROT_ALL); // 3GB RWX
|
||||
PMap_SystemLMap(0xC0000000, MEM_DIRECTMAP_BASE + 0xC0000000,
|
||||
512, PTE_NX|PTE_PCD); // 1GB RW + PCD
|
||||
512, PROT_ALL|MAP_NOCACHE); // 1GB RW + PCD
|
||||
PMap_SystemLMap(0x100000000, MEM_DIRECTMAP_BASE + 0x100000000,
|
||||
60*512, 0); // 60GB RWX
|
||||
60*512, PROT_ALL); // 60GB RWX
|
||||
|
||||
PMap_LoadAS(&systemAS);
|
||||
|
||||
@ -248,7 +273,7 @@ PMap_Translate(AS *space, uintptr_t va)
|
||||
if ((pte & PTE_PS) == PTE_PS) {
|
||||
// Handle 2MB pages
|
||||
entry = &table->entries[k];
|
||||
return (*entry & ~(LARGE_PGMASK | PTE_NX)) + (va & LARGE_PGMASK);
|
||||
return (*entry & PGNUMMASK) + (va & LARGE_PGMASK);
|
||||
}
|
||||
if (pte == 0) {
|
||||
ASSERT(pte);
|
||||
@ -259,7 +284,7 @@ PMap_Translate(AS *space, uintptr_t va)
|
||||
// Handle 4KB pages
|
||||
entry = &table->entries[l];
|
||||
|
||||
return (*entry & ~(PGMASK | PTE_NX)) + (va & PGMASK);
|
||||
return (*entry & PGNUMMASK) + (va & PGMASK);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -356,6 +381,7 @@ PMap_Map(AS *as, uint64_t phys, uint64_t virt, uint64_t pages, uint64_t flags)
|
||||
{
|
||||
int i;
|
||||
PageEntry *entry;
|
||||
uint64_t pteflags = PMapProtToPTE(flags);
|
||||
|
||||
for (i = 0; i < pages; i++) {
|
||||
uint64_t va = virt + PGSIZE * i;
|
||||
@ -365,7 +391,7 @@ PMap_Map(AS *as, uint64_t phys, uint64_t virt, uint64_t pages, uint64_t flags)
|
||||
return false;
|
||||
}
|
||||
|
||||
*entry = (phys + PGSIZE * i) | PTE_P | PTE_W | PTE_U | flags;
|
||||
*entry = (phys + PGSIZE * i) | PTE_U | pteflags;
|
||||
}
|
||||
|
||||
return true;
|
||||
@ -425,6 +451,7 @@ PMap_AllocMap(AS *as, uint64_t virt, uint64_t len, uint64_t flags)
|
||||
int i;
|
||||
uint64_t pages = (len + PGSIZE - 1) / PGSIZE;
|
||||
PageEntry *entry;
|
||||
uint64_t pteflags = PMapProtToPTE(flags);
|
||||
|
||||
ASSERT((virt & PGMASK) == 0);
|
||||
|
||||
@ -438,7 +465,7 @@ PMap_AllocMap(AS *as, uint64_t virt, uint64_t len, uint64_t flags)
|
||||
|
||||
if ((*entry & PTE_P) != PTE_P) {
|
||||
void *pg = PAlloc_AllocPage();
|
||||
*entry = (uint64_t)DMVA2PA(pg) | PTE_P | PTE_U | flags;
|
||||
*entry = (uint64_t)DMVA2PA(pg) | PTE_U | pteflags;
|
||||
}
|
||||
}
|
||||
|
||||
@ -481,6 +508,7 @@ PMap_SystemLMap(uint64_t phys, uint64_t virt, uint64_t lpages, uint64_t flags)
|
||||
{
|
||||
int i;
|
||||
PageEntry *entry;
|
||||
uint64_t pteflags = PMapProtToPTE(flags);
|
||||
|
||||
for (i = 0; i < lpages; i++) {
|
||||
uint64_t va = virt + LARGE_PGSIZE * i;
|
||||
@ -490,7 +518,7 @@ PMap_SystemLMap(uint64_t phys, uint64_t virt, uint64_t lpages, uint64_t flags)
|
||||
return false;
|
||||
}
|
||||
|
||||
*entry = (phys + LARGE_PGSIZE * i) | PTE_P | PTE_W | PTE_PS | flags;
|
||||
*entry = (phys + LARGE_PGSIZE * i) | PTE_PS | pteflags;
|
||||
}
|
||||
|
||||
return true;
|
||||
@ -515,6 +543,7 @@ PMap_SystemMap(uint64_t phys, uint64_t virt, uint64_t pages, uint64_t flags)
|
||||
{
|
||||
int i;
|
||||
PageEntry *entry;
|
||||
uint64_t pteflags = PMapProtToPTE(flags);
|
||||
|
||||
for (i = 0; i < pages; i++) {
|
||||
uint64_t va = virt + PGSIZE * i;
|
||||
@ -524,7 +553,7 @@ PMap_SystemMap(uint64_t phys, uint64_t virt, uint64_t pages, uint64_t flags)
|
||||
return false;
|
||||
}
|
||||
|
||||
*entry = (phys + PGSIZE * i) | PTE_P | PTE_W | flags;
|
||||
*entry = (phys + PGSIZE * i) | pteflags;
|
||||
}
|
||||
|
||||
return true;
|
||||
|
@ -6,6 +6,7 @@
|
||||
#include <sys/kassert.h>
|
||||
#include <sys/kdebug.h>
|
||||
#include <sys/kmem.h>
|
||||
#include <sys/mman.h>
|
||||
|
||||
#include <machine/cpu.h>
|
||||
#include <machine/cpuop.h>
|
||||
@ -106,7 +107,7 @@ XMem_Allocate(XMem *xmem, uintptr_t length)
|
||||
if (pg == NULL)
|
||||
return false;
|
||||
|
||||
PMap_SystemMap(DMVA2PA((uint64_t)pg), xmem->base + off, 1, 0);
|
||||
PMap_SystemMap(DMVA2PA((uint64_t)pg), xmem->base + off, 1, PROT_ALL);
|
||||
|
||||
xmem->length += PGSIZE;
|
||||
}
|
||||
|
@ -6,11 +6,16 @@
|
||||
#define PROT_READ 0x01
|
||||
#define PROT_WRITE 0x02
|
||||
#define PROT_EXEC 0x04
|
||||
#define PROT_ALL (PROT_READ|PROT_WRITE|PROT_EXEC)
|
||||
|
||||
#define MAP_FILE 0x0010
|
||||
#define MAP_ANON 0x0020
|
||||
#define MAP_FIXED 0x0040
|
||||
|
||||
#ifdef _KERNEL
|
||||
#define MAP_NOCACHE 0x1000
|
||||
#endif
|
||||
|
||||
|
||||
#ifdef _KERNEL
|
||||
#else /* _KERNEL */
|
||||
|
@ -13,8 +13,8 @@
|
||||
#include <sys/queue.h>
|
||||
#include <sys/disk.h>
|
||||
#include <sys/elf64.h>
|
||||
#include <sys/mman.h>
|
||||
|
||||
#include <machine/cpu.h>
|
||||
#include <machine/trap.h>
|
||||
#include <machine/pmap.h>
|
||||
#include <sys/thread.h>
|
||||
@ -158,7 +158,7 @@ Loader_Load(Thread *thr, VNode *vn, void *buf, uint64_t len)
|
||||
memsz += phdr[i].p_vaddr - va;
|
||||
|
||||
Log(loader, "AllocMap %016llx %08llx\n", va, memsz);
|
||||
if (!PMap_AllocMap(as, va, memsz, PTE_W)) {
|
||||
if (!PMap_AllocMap(as, va, memsz, PROT_ALL)) {
|
||||
// XXX: Cleanup!
|
||||
ASSERT(false);
|
||||
return false;
|
||||
@ -166,7 +166,7 @@ Loader_Load(Thread *thr, VNode *vn, void *buf, uint64_t len)
|
||||
}
|
||||
}
|
||||
|
||||
PMap_AllocMap(as, MEM_USERSPACE_STKBASE, MEM_USERSPACE_STKLEN, PTE_W);
|
||||
PMap_AllocMap(as, MEM_USERSPACE_STKBASE, MEM_USERSPACE_STKLEN, PROT_READ|PROT_WRITE);
|
||||
|
||||
for (i = 0; i < ehdr->e_phnum; i++)
|
||||
{
|
||||
@ -238,6 +238,7 @@ Loader_LoadInit()
|
||||
*/
|
||||
PMap_LoadAS(thr->space); // Reload CR3
|
||||
|
||||
#if defined(__x86_64__)
|
||||
/*
|
||||
* Pass in zero arguments with null pointers to init
|
||||
*/
|
||||
@ -263,6 +264,9 @@ Loader_LoadInit()
|
||||
tf.rflags = RFLAGS_IF;
|
||||
tf.rdi = rsp;
|
||||
Trap_Pop(&tf);
|
||||
#elif defined(__aarch64__)
|
||||
NOT_IMPLEMENTED();
|
||||
#endif
|
||||
|
||||
/*
|
||||
* We should never reach this point!
|
||||
|
@ -228,7 +228,7 @@ Syscall_MMap(uint64_t addr, uint64_t len, uint64_t prot)
|
||||
Thread *cur = Sched_Current();
|
||||
bool status;
|
||||
|
||||
status = PMap_AllocMap(cur->space, addr, len, PTE_W);
|
||||
status = PMap_AllocMap(cur->space, addr, len, prot);
|
||||
Thread_Release(cur);
|
||||
if (!status) {
|
||||
// XXX: Need to unmap PMap_Unmap(cur->space, addr, pgs);
|
||||
|
@ -15,6 +15,7 @@
|
||||
#include <sys/kdebug.h>
|
||||
#include <sys/kmem.h>
|
||||
#include <sys/ktime.h>
|
||||
#include <sys/mman.h>
|
||||
#include <sys/mp.h>
|
||||
#include <sys/spinlock.h>
|
||||
#include <sys/thread.h>
|
||||
@ -171,7 +172,7 @@ Thread_UThreadCreate(Thread *oldThr, uint64_t rip, uint64_t arg)
|
||||
proc->ustackNext += MEM_USERSPACE_STKLEN;
|
||||
Spinlock_Unlock(&proc->lock);
|
||||
|
||||
PMap_AllocMap(thr->space, thr->ustack, MEM_USERSPACE_STKLEN, PTE_W);
|
||||
PMap_AllocMap(thr->space, thr->ustack, MEM_USERSPACE_STKLEN, PROT_READ|PROT_WRITE);
|
||||
// XXX: Check failure
|
||||
|
||||
Thread_InitArch(thr);
|
||||
|
Loading…
Reference in New Issue
Block a user