Restrict setting PTE execute permissions on RISC-V.

Previously, RISC-V was enabling execute permissions in PTEs for any
readable page.  Now, execute permissions are only enabled if they were
explicitly specified (e.g. via PROT_EXEC to mmap).  The one exception
is that the initial kernel mapping in locore still maps all of the
kernel RWX.

While here, change the fault type passed to vm_fault and
pmap_fault_fixup to only include a single VM_PROT_* value representing
the faulting access to match other architectures rather than passing a
bitmask.

Reviewed by:	markj
Sponsored by:	DARPA
Differential Revision:	https://reviews.freebsd.org/D17783
This commit is contained in:
jhb 2018-11-01 22:23:15 +00:00
parent 1d7c7b58ca
commit cb644e0694
4 changed files with 17 additions and 10 deletions

View File

@ -78,7 +78,7 @@ typedef uint64_t pn_t; /* page number */
#define PTE_V (1 << 0) /* Valid */
#define PTE_RWX (PTE_R | PTE_W | PTE_X)
#define PTE_RX (PTE_R | PTE_X)
#define PTE_KERN (PTE_V | PTE_RWX | PTE_A | PTE_D)
#define PTE_KERN (PTE_V | PTE_R | PTE_W | PTE_A | PTE_D)
#define PTE_PPN0_S 10
#define PTE_PPN1_S 19

View File

@ -94,7 +94,7 @@ _start:
add t3, t4, t2
li t5, 0
2:
li t0, (PTE_KERN)
li t0, (PTE_KERN | PTE_X)
slli t2, t4, PTE_PPN1_S /* << PTE_PPN1_S */
or t5, t0, t2
sd t5, (s1) /* Store PTE entry to position */

View File

@ -2010,7 +2010,7 @@ pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
}
int
pmap_fault_fixup(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
pmap_fault_fixup(pmap_t pmap, vm_offset_t va, vm_prot_t ftype)
{
pt_entry_t orig_l3;
pt_entry_t new_l3;
@ -2027,12 +2027,13 @@ pmap_fault_fixup(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
orig_l3 = pmap_load(l3);
if ((orig_l3 & PTE_V) == 0 ||
((prot & VM_PROT_WRITE) != 0 && (orig_l3 & PTE_W) == 0) ||
((prot & VM_PROT_READ) != 0 && (orig_l3 & PTE_R) == 0))
(ftype == VM_PROT_WRITE && (orig_l3 & PTE_W) == 0) ||
(ftype == VM_PROT_EXECUTE && (orig_l3 & PTE_X) == 0) ||
(ftype == VM_PROT_READ && (orig_l3 & PTE_R) == 0))
goto done;
new_l3 = orig_l3 | PTE_A;
if ((prot & VM_PROT_WRITE) != 0)
if (ftype == VM_PROT_WRITE)
new_l3 |= PTE_D;
if (orig_l3 != new_l3) {
@ -2088,7 +2089,9 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
pa = VM_PAGE_TO_PHYS(m);
pn = (pa / PAGE_SIZE);
new_l3 = PTE_V | PTE_R | PTE_X | PTE_A;
new_l3 = PTE_V | PTE_R | PTE_A;
if (prot & VM_PROT_EXECUTE)
new_l3 |= PTE_X;
if (flags & VM_PROT_WRITE)
new_l3 |= PTE_D;
if (prot & VM_PROT_WRITE)
@ -2464,7 +2467,9 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
pa = VM_PAGE_TO_PHYS(m);
pn = (pa / PAGE_SIZE);
entry = (PTE_V | PTE_R | PTE_X);
entry = PTE_V | PTE_R;
if (prot & VM_PROT_EXECUTE)
entry |= PTE_X;
entry |= (pn << PTE_PPN0_S);
/*

View File

@ -207,9 +207,11 @@ data_abort(struct trapframe *frame, int lower)
if ((frame->tf_scause == EXCP_FAULT_STORE) ||
(frame->tf_scause == EXCP_STORE_PAGE_FAULT)) {
ftype = (VM_PROT_READ | VM_PROT_WRITE);
ftype = VM_PROT_WRITE;
} else if (frame->tf_scause == EXCP_INST_PAGE_FAULT) {
ftype = VM_PROT_EXECUTE;
} else {
ftype = (VM_PROT_READ);
ftype = VM_PROT_READ;
}
if (pmap_fault_fixup(map->pmap, va, ftype))