Support RISC-V implementations that do not manage the A and D bits

(e.g. RocketChip, lowRISC and derivatives).

RISC-V page table entries support A (accessed) and D (dirty) bits. The
spec makes hardware support for these bits optional. Implementations that
do not manage these bits in hardware raise page faults for accesses to a
valid page without A set and writes to a writable page without D set.
Check for these types of faults when handling a page fault and fixup the
PTE without calling vm_fault if they occur.

Reviewed by:	jhb, markj
Approved by:	re (gjb)
Sponsored by:	DARPA, AFRL
Differential Revision:	https://reviews.freebsd.org/D17424
This commit is contained in:
Ruslan Bukin 2018-10-18 15:25:07 +00:00
parent 3c8efd61f5
commit b977d81946
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=339423
5 changed files with 54 additions and 12 deletions

View File

@ -153,6 +153,8 @@ bool pmap_get_tables(pmap_t, vm_offset_t, pd_entry_t **, pd_entry_t **,
#define pmap_page_is_mapped(m) (!TAILQ_EMPTY(&(m)->md.pv_list))
int pmap_fault_fixup(pmap_t, vm_offset_t, vm_prot_t);
#endif /* _KERNEL */
#endif /* !LOCORE */

View File

@ -1,6 +1,6 @@
/*-
* Copyright (c) 2014 Andrew Turner
* Copyright (c) 2015-2016 Ruslan Bukin <br@bsdpad.com>
* Copyright (c) 2015-2018 Ruslan Bukin <br@bsdpad.com>
* All rights reserved.
*
* Portions of this software were developed by SRI International and the
@ -65,7 +65,7 @@ typedef uint64_t pn_t; /* page number */
#define Ln_ENTRIES (1 << 9)
#define Ln_ADDR_MASK (Ln_ENTRIES - 1)
/* Bits 9:7 are reserved for software */
/* Bits 9:8 are reserved for software */
#define PTE_SW_MANAGED (1 << 9)
#define PTE_SW_WIRED (1 << 8)
#define PTE_D (1 << 7) /* Dirty */
@ -78,6 +78,7 @@ typedef uint64_t pn_t; /* page number */
#define PTE_V (1 << 0) /* Valid */
#define PTE_RWX (PTE_R | PTE_W | PTE_X)
#define PTE_RX (PTE_R | PTE_X)
#define PTE_KERN (PTE_V | PTE_RWX | PTE_A | PTE_D)
#define PTE_PPN0_S 10
#define PTE_PPN1_S 19

View File

@ -94,7 +94,7 @@ _start:
add t3, t4, t2
li t5, 0
2:
li t0, (PTE_V | PTE_RWX | PTE_D)
li t0, (PTE_KERN)
slli t2, t4, PTE_PPN1_S /* << PTE_PPN1_S */
or t5, t0, t2
sd t5, (s1) /* Store PTE entry to position */
@ -126,7 +126,7 @@ _start:
mv s2, s11
srli s2, s2, PAGE_SHIFT
li t0, (PTE_V | PTE_RWX | PTE_D)
li t0, (PTE_KERN)
slli t2, s2, PTE_PPN0_S /* << PTE_PPN0_S */
or t0, t0, t2

View File

@ -15,7 +15,7 @@
* All rights reserved.
* Copyright (c) 2014 The FreeBSD Foundation
* All rights reserved.
* Copyright (c) 2015-2017 Ruslan Bukin <br@bsdpad.com>
* Copyright (c) 2015-2018 Ruslan Bukin <br@bsdpad.com>
* All rights reserved.
*
* This code is derived from software contributed to Berkeley by
@ -487,7 +487,7 @@ pmap_bootstrap_dmap(vm_offset_t kern_l1, vm_paddr_t min_pa, vm_paddr_t max_pa)
/* superpages */
pn = (pa / PAGE_SIZE);
entry = (PTE_V | PTE_RWX);
entry = PTE_KERN;
entry |= (pn << PTE_PPN0_S);
pmap_load_store(&l1[l1_slot], entry);
}
@ -965,7 +965,7 @@ pmap_kenter_device(vm_offset_t sva, vm_size_t size, vm_paddr_t pa)
KASSERT(l3 != NULL, ("Invalid page table, va: 0x%lx", va));
pn = (pa / PAGE_SIZE);
entry = (PTE_V | PTE_RWX);
entry = PTE_KERN;
entry |= (pn << PTE_PPN0_S);
pmap_load_store(l3, entry);
@ -1063,7 +1063,7 @@ pmap_qenter(vm_offset_t sva, vm_page_t *ma, int count)
pn = (pa / PAGE_SIZE);
l3 = pmap_l3(kernel_pmap, va);
entry = (PTE_V | PTE_RWX);
entry = PTE_KERN;
entry |= (pn << PTE_PPN0_S);
pmap_load_store(l3, entry);
@ -1465,7 +1465,8 @@ pmap_growkernel(vm_offset_t addr)
continue; /* try again */
}
l2 = pmap_l1_to_l2(l1, kernel_vm_end);
if ((pmap_load(l2) & PTE_A) != 0) {
if ((pmap_load(l2) & PTE_V) != 0 &&
(pmap_load(l2) & PTE_RWX) == 0) {
kernel_vm_end = (kernel_vm_end + L2_SIZE) & ~L2_OFFSET;
if (kernel_vm_end - 1 >= vm_map_max(kernel_map)) {
kernel_vm_end = vm_map_max(kernel_map);
@ -2008,6 +2009,41 @@ pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
PMAP_UNLOCK(pmap);
}
int
pmap_fault_fixup(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
{
pt_entry_t orig_l3;
pt_entry_t new_l3;
pt_entry_t *l3;
l3 = pmap_l3(pmap, va);
if (l3 == NULL)
return (0);
orig_l3 = pmap_load(l3);
if ((orig_l3 & PTE_V) == 0 ||
((prot & VM_PROT_WRITE) != 0 && (orig_l3 & PTE_W) == 0) ||
((prot & VM_PROT_READ) != 0 && (orig_l3 & PTE_R) == 0))
return (0);
new_l3 = orig_l3 | PTE_A;
if ((prot & VM_PROT_WRITE) != 0)
new_l3 |= PTE_D;
if (orig_l3 != new_l3) {
pmap_load_store(l3, new_l3);
pmap_invalidate_page(pmap, va);
return (1);
}
/*
* XXX: This case should never happen since it means
* the PTE shouldn't have resulted in a fault.
*/
return (0);
}
/*
* Insert the given physical page (p) at
* the specified virtual address (v) in the
@ -2415,8 +2451,7 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
pa = VM_PAGE_TO_PHYS(m);
pn = (pa / PAGE_SIZE);
/* RISCVTODO: check permissions */
entry = (PTE_V | PTE_RWX);
entry = (PTE_V | PTE_R | PTE_X);
entry |= (pn << PTE_PPN0_S);
/*

View File

@ -1,5 +1,5 @@
/*-
* Copyright (c) 2015-2017 Ruslan Bukin <br@bsdpad.com>
* Copyright (c) 2015-2018 Ruslan Bukin <br@bsdpad.com>
* All rights reserved.
*
* Portions of this software were developed by SRI International and the
@ -212,6 +212,9 @@ data_abort(struct trapframe *frame, int lower)
ftype = (VM_PROT_READ);
}
if (pmap_fault_fixup(map->pmap, va, ftype))
goto done;
if (map != kernel_map) {
/*
* Keep swapout from messing with us during this
@ -256,6 +259,7 @@ data_abort(struct trapframe *frame, int lower)
}
}
done:
if (lower)
userret(td, frame);
}