Use inline tests for individual PTE bits in the RISC-V pmap.

Inline tests for PTE_* bits are easy to read and don't really require a
predicate function, and predicates which operate on a pt_entry_t are
inconvenient when working with L1 and L2 page table entries.

Reviewed by:	jhb
MFC after:	1 week
Sponsored by:	The FreeBSD Foundation
Differential Revision:	https://reviews.freebsd.org/D18461
This commit is contained in:
markj 2018-12-11 02:15:56 +00:00
parent 6122d43af0
commit 32498fda11

View File

@ -356,36 +356,6 @@ pmap_l3(pmap_t pmap, vm_offset_t va)
return (pmap_l2_to_l3(l2, va));
}
static __inline int
pmap_is_write(pt_entry_t entry)
{
return (entry & PTE_W);
}
static __inline int
pmap_l3_valid(pt_entry_t l3)
{
return (l3 & PTE_V);
}
static inline int
pmap_page_accessed(pt_entry_t pte)
{
return (pte & PTE_A);
}
/* Checks if the page is dirty. */
static inline int
pmap_page_dirty(pt_entry_t pte)
{
return (pte & PTE_D);
}
static __inline void
pmap_resident_count_inc(pmap_t pmap, int count)
{
@ -898,7 +868,7 @@ pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
retry:
l3p = pmap_l3(pmap, va);
if (l3p != NULL && (l3 = pmap_load(l3p)) != 0) {
if ((pmap_is_write(l3)) || ((prot & VM_PROT_WRITE) == 0)) {
if ((l3 & PTE_W) != 0 || (prot & VM_PROT_WRITE) == 0) {
phys = PTE_TO_PHYS(l3);
if (vm_page_pa_tryrelock(pmap, phys, &pa))
goto retry;
@ -1777,7 +1747,7 @@ pmap_remove_l3(pmap_t pmap, pt_entry_t *l3, vm_offset_t va,
if (old_l3 & PTE_SW_MANAGED) {
phys = PTE_TO_PHYS(old_l3);
m = PHYS_TO_VM_PAGE(phys);
if (pmap_page_dirty(old_l3))
if ((old_l3 & PTE_D) != 0)
vm_page_dirty(m);
if (old_l3 & PTE_A)
vm_page_aflag_set(m, PGA_REFERENCED);
@ -1935,7 +1905,7 @@ pmap_remove_all(vm_page_t m)
/*
* Update the vm_page_t clean and reference bits.
*/
if (pmap_page_dirty(tl3))
if ((tl3 & PTE_D) != 0)
vm_page_dirty(m);
pmap_unuse_l3(pmap, pv->pv_va, pmap_load(l2), &free);
TAILQ_REMOVE(&m->md.pv_list, pv, pv_next);
@ -1997,9 +1967,9 @@ pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
for (l3p = pmap_l2_to_l3(l2, sva); sva != va_next; l3p++,
sva += L3_SIZE) {
l3 = pmap_load(l3p);
if (pmap_l3_valid(l3)) {
if ((l3 & PTE_V) != 0) {
entry = pmap_load(l3p);
entry &= ~(PTE_W);
entry &= ~PTE_W;
pmap_load_store(l3p, entry);
/* XXX: Use pmap_invalidate_range */
pmap_invalidate_page(pmap, sva);
@ -2186,7 +2156,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
/*
* Is the specified virtual address already mapped?
*/
if (pmap_l3_valid(orig_l3)) {
if ((orig_l3 & PTE_V) != 0) {
/*
* Wiring change, just update stats. We don't worry about
* wiring PT pages as they remain resident as long as there
@ -2217,10 +2187,9 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
/*
* No, might be a protection or wiring change.
*/
if ((orig_l3 & PTE_SW_MANAGED) != 0) {
if (pmap_is_write(new_l3))
vm_page_aflag_set(m, PGA_WRITEABLE);
}
if ((orig_l3 & PTE_SW_MANAGED) != 0 &&
(new_l3 & PTE_W) != 0)
vm_page_aflag_set(m, PGA_WRITEABLE);
goto validate;
}
@ -2245,7 +2214,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
* concurrent calls to pmap_page_test_mappings() and
* pmap_ts_referenced().
*/
if (pmap_page_dirty(orig_l3))
if ((orig_l3 & PTE_D) != 0)
vm_page_dirty(om);
if ((orig_l3 & PTE_A) != 0)
vm_page_aflag_set(om, PGA_REFERENCED);
@ -2278,7 +2247,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
CHANGE_PV_LIST_LOCK_TO_PHYS(&lock, pa);
TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
m->md.pv_gen++;
if (pmap_is_write(new_l3))
if ((new_l3 & PTE_W) != 0)
vm_page_aflag_set(m, PGA_WRITEABLE);
}
@ -2298,8 +2267,8 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
pmap_invalidate_page(pmap, va);
KASSERT(PTE_TO_PHYS(orig_l3) == pa,
("pmap_enter: invalid update"));
if (pmap_page_dirty(orig_l3) &&
(orig_l3 & PTE_SW_MANAGED) != 0)
if ((orig_l3 & (PTE_D | PTE_SW_MANAGED)) ==
(PTE_D | PTE_SW_MANAGED))
vm_page_dirty(m);
} else {
pmap_load_store(l3, new_l3);
@ -2840,7 +2809,7 @@ pmap_remove_pages(pmap_t pmap)
/*
* Update the vm_page_t clean/reference bits.
*/
if (pmap_page_dirty(tl3))
if ((tl3 & PTE_D) != 0)
vm_page_dirty(m);
CHANGE_PV_LIST_LOCK_TO_VM_PAGE(&lock, m);
@ -3044,11 +3013,11 @@ pmap_remove_write(vm_page_t m)
retry:
oldl3 = pmap_load(l3);
if (pmap_is_write(oldl3)) {
newl3 = oldl3 & ~(PTE_W);
if ((oldl3 & PTE_W) != 0) {
newl3 = oldl3 & ~PTE_W;
if (!atomic_cmpset_long(l3, oldl3, newl3))
goto retry;
/* TODO: use pmap_page_dirty(oldl3) ? */
/* TODO: check for PTE_D? */
if ((oldl3 & PTE_A) != 0)
vm_page_dirty(m);
pmap_invalidate_page(pmap, pv->pv_va);
@ -3129,7 +3098,7 @@ pmap_ts_referenced(vm_page_t m)
l3 = pmap_l2_to_l3(l2, pv->pv_va);
old_l3 = pmap_load(l3);
if (pmap_page_dirty(old_l3))
if ((old_l3 & PTE_D) != 0)
vm_page_dirty(m);
if ((old_l3 & PTE_A) != 0) {
if (safe_to_clear_referenced(pmap, old_l3)) {
@ -3271,9 +3240,9 @@ pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa)
val = MINCORE_INCORE;
}
if (pmap_page_dirty(tpte))
if ((tpte & PTE_D) != 0)
val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER;
if (pmap_page_accessed(tpte))
if ((tpte & PTE_A) != 0)
val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER;
managed = (tpte & PTE_SW_MANAGED) == PTE_SW_MANAGED;
}