Add a VA_IS_CLEANMAP() macro.

This macro returns true if a provided virtual address is contained
in the kernel's clean submap.

In CHERI kernels, the buffer cache and transient I/O map are allocated
as separate regions.  Abstracting this check reduces the diff relative
to FreeBSD.  It is perhaps slightly more readable as well.

Reviewed by:	kib
Obtained from:	CheriBSD
Sponsored by:	DARPA
Differential Revision:	https://reviews.freebsd.org/D28710
This commit is contained in:
John Baldwin 2021-02-17 16:32:11 -08:00
parent fa3bd463ce
commit 67932460c7
8 changed files with 16 additions and 17 deletions

View File

@ -6747,8 +6747,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
KASSERT(va < UPT_MIN_ADDRESS || va >= UPT_MAX_ADDRESS, KASSERT(va < UPT_MIN_ADDRESS || va >= UPT_MAX_ADDRESS,
("pmap_enter: invalid to pmap_enter page table pages (va: 0x%lx)", ("pmap_enter: invalid to pmap_enter page table pages (va: 0x%lx)",
va)); va));
KASSERT((m->oflags & VPO_UNMANAGED) != 0 || va < kmi.clean_sva || KASSERT((m->oflags & VPO_UNMANAGED) != 0 || !VA_IS_CLEANMAP(va),
va >= kmi.clean_eva,
("pmap_enter: managed mapping within the clean submap")); ("pmap_enter: managed mapping within the clean submap"));
if ((m->oflags & VPO_UNMANAGED) == 0) if ((m->oflags & VPO_UNMANAGED) == 0)
VM_PAGE_OBJECT_BUSY_ASSERT(m); VM_PAGE_OBJECT_BUSY_ASSERT(m);
@ -7262,7 +7261,7 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
{ {
pt_entry_t newpte, *pte, PG_V; pt_entry_t newpte, *pte, PG_V;
KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva || KASSERT(!VA_IS_CLEANMAP(va) ||
(m->oflags & VPO_UNMANAGED) != 0, (m->oflags & VPO_UNMANAGED) != 0,
("pmap_enter_quick_locked: managed mapping within the clean submap")); ("pmap_enter_quick_locked: managed mapping within the clean submap"));
PG_V = pmap_valid_bit(pmap); PG_V = pmap_valid_bit(pmap);

View File

@ -3873,8 +3873,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
KASSERT(va < UPT2V_MIN_ADDRESS || va >= UPT2V_MAX_ADDRESS, KASSERT(va < UPT2V_MIN_ADDRESS || va >= UPT2V_MAX_ADDRESS,
("%s: invalid to pmap_enter page table pages (va: 0x%x)", __func__, ("%s: invalid to pmap_enter page table pages (va: 0x%x)", __func__,
va)); va));
KASSERT((m->oflags & VPO_UNMANAGED) != 0 || va < kmi.clean_sva || KASSERT((m->oflags & VPO_UNMANAGED) != 0 || !VA_IS_CLEANMAP(va),
va >= kmi.clean_eva,
("%s: managed mapping within the clean submap", __func__)); ("%s: managed mapping within the clean submap", __func__));
if ((m->oflags & VPO_UNMANAGED) == 0) if ((m->oflags & VPO_UNMANAGED) == 0)
VM_PAGE_OBJECT_BUSY_ASSERT(m); VM_PAGE_OBJECT_BUSY_ASSERT(m);
@ -4535,7 +4534,7 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
struct spglist free; struct spglist free;
uint32_t l2prot; uint32_t l2prot;
KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva || KASSERT(!VA_IS_CLEANMAP(va) ||
(m->oflags & VPO_UNMANAGED) != 0, (m->oflags & VPO_UNMANAGED) != 0,
("%s: managed mapping within the clean submap", __func__)); ("%s: managed mapping within the clean submap", __func__));
rw_assert(&pvh_global_lock, RA_WLOCKED); rw_assert(&pvh_global_lock, RA_WLOCKED);

View File

@ -4438,7 +4438,7 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
vm_paddr_t pa; vm_paddr_t pa;
int lvl; int lvl;
KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva || KASSERT(!VA_IS_CLEANMAP(va) ||
(m->oflags & VPO_UNMANAGED) != 0, (m->oflags & VPO_UNMANAGED) != 0,
("pmap_enter_quick_locked: managed mapping within the clean submap")); ("pmap_enter_quick_locked: managed mapping within the clean submap"));
PMAP_LOCK_ASSERT(pmap, MA_OWNED); PMAP_LOCK_ASSERT(pmap, MA_OWNED);

View File

@ -3654,7 +3654,7 @@ __CONCAT(PMTYPE, enter)(pmap_t pmap, vm_offset_t va, vm_page_t m,
("pmap_enter: invalid to pmap_enter into trampoline (va: 0x%x)", ("pmap_enter: invalid to pmap_enter into trampoline (va: 0x%x)",
va)); va));
KASSERT(pmap != kernel_pmap || (m->oflags & VPO_UNMANAGED) != 0 || KASSERT(pmap != kernel_pmap || (m->oflags & VPO_UNMANAGED) != 0 ||
va < kmi.clean_sva || va >= kmi.clean_eva, !VA_IS_CLEANMAP(va),
("pmap_enter: managed mapping within the clean submap")); ("pmap_enter: managed mapping within the clean submap"));
if ((m->oflags & VPO_UNMANAGED) == 0) if ((m->oflags & VPO_UNMANAGED) == 0)
VM_PAGE_OBJECT_BUSY_ASSERT(m); VM_PAGE_OBJECT_BUSY_ASSERT(m);
@ -4108,8 +4108,8 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
{ {
pt_entry_t newpte, *pte; pt_entry_t newpte, *pte;
KASSERT(pmap != kernel_pmap || va < kmi.clean_sva || KASSERT(pmap != kernel_pmap || !VA_IS_CLEANMAP(va) ||
va >= kmi.clean_eva || (m->oflags & VPO_UNMANAGED) != 0, (m->oflags & VPO_UNMANAGED) != 0,
("pmap_enter_quick_locked: managed mapping within the clean submap")); ("pmap_enter_quick_locked: managed mapping within the clean submap"));
rw_assert(&pvh_global_lock, RA_WLOCKED); rw_assert(&pvh_global_lock, RA_WLOCKED);
PMAP_LOCK_ASSERT(pmap, MA_OWNED); PMAP_LOCK_ASSERT(pmap, MA_OWNED);

View File

@ -2125,8 +2125,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
va &= ~PAGE_MASK; va &= ~PAGE_MASK;
KASSERT(va <= VM_MAX_KERNEL_ADDRESS, ("pmap_enter: toobig")); KASSERT(va <= VM_MAX_KERNEL_ADDRESS, ("pmap_enter: toobig"));
KASSERT((m->oflags & VPO_UNMANAGED) != 0 || va < kmi.clean_sva || KASSERT((m->oflags & VPO_UNMANAGED) != 0 || !VA_IS_CLEANMAP(va),
va >= kmi.clean_eva,
("pmap_enter: managed mapping within the clean submap")); ("pmap_enter: managed mapping within the clean submap"));
if ((m->oflags & VPO_UNMANAGED) == 0) if ((m->oflags & VPO_UNMANAGED) == 0)
VM_PAGE_OBJECT_BUSY_ASSERT(m); VM_PAGE_OBJECT_BUSY_ASSERT(m);
@ -2328,7 +2327,7 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
pt_entry_t *pte, npte; pt_entry_t *pte, npte;
vm_paddr_t pa; vm_paddr_t pa;
KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva || KASSERT(!VA_IS_CLEANMAP(va) ||
(m->oflags & VPO_UNMANAGED) != 0, (m->oflags & VPO_UNMANAGED) != 0,
("pmap_enter_quick_locked: managed mapping within the clean submap")); ("pmap_enter_quick_locked: managed mapping within the clean submap"));
rw_assert(&pvh_global_lock, RA_WLOCKED); rw_assert(&pvh_global_lock, RA_WLOCKED);

View File

@ -2813,8 +2813,7 @@ mmu_radix_enter(pmap_t pmap, vm_offset_t va, vm_page_t m,
CTR6(KTR_PMAP, "pmap_enter(%p, %#lx, %p, %#x, %#x, %d)", pmap, va, CTR6(KTR_PMAP, "pmap_enter(%p, %#lx, %p, %#x, %#x, %d)", pmap, va,
m, prot, flags, psind); m, prot, flags, psind);
KASSERT(va <= VM_MAX_KERNEL_ADDRESS, ("pmap_enter: toobig")); KASSERT(va <= VM_MAX_KERNEL_ADDRESS, ("pmap_enter: toobig"));
KASSERT((m->oflags & VPO_UNMANAGED) != 0 || va < kmi.clean_sva || KASSERT((m->oflags & VPO_UNMANAGED) != 0 || !VA_IS_CLEANMAP(va),
va >= kmi.clean_eva,
("pmap_enter: managed mapping within the clean submap")); ("pmap_enter: managed mapping within the clean submap"));
if ((m->oflags & VPO_UNMANAGED) == 0) if ((m->oflags & VPO_UNMANAGED) == 0)
VM_PAGE_OBJECT_BUSY_ASSERT(m); VM_PAGE_OBJECT_BUSY_ASSERT(m);
@ -3298,7 +3297,7 @@ mmu_radix_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
pt_entry_t *pte; pt_entry_t *pte;
vm_paddr_t pa; vm_paddr_t pa;
KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva || KASSERT(!VA_IS_CLEANMAP(va) ||
(m->oflags & VPO_UNMANAGED) != 0, (m->oflags & VPO_UNMANAGED) != 0,
("mmu_radix_enter_quick_locked: managed mapping within the clean submap")); ("mmu_radix_enter_quick_locked: managed mapping within the clean submap"));
PMAP_LOCK_ASSERT(pmap, MA_OWNED); PMAP_LOCK_ASSERT(pmap, MA_OWNED);

View File

@ -3103,7 +3103,7 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
pd_entry_t *l2; pd_entry_t *l2;
pt_entry_t *l3, newl3; pt_entry_t *l3, newl3;
KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva || KASSERT(!VA_IS_CLEANMAP(va) ||
(m->oflags & VPO_UNMANAGED) != 0, (m->oflags & VPO_UNMANAGED) != 0,
("pmap_enter_quick_locked: managed mapping within the clean submap")); ("pmap_enter_quick_locked: managed mapping within the clean submap"));
rw_assert(&pvh_global_lock, RA_LOCKED); rw_assert(&pvh_global_lock, RA_LOCKED);

View File

@ -145,6 +145,9 @@ struct kva_md_info {
vm_offset_t clean_eva; vm_offset_t clean_eva;
}; };
#define VA_IS_CLEANMAP(va) \
((va) >= kmi.clean_sva && (va) < kmi.clean_eva)
extern struct kva_md_info kmi; extern struct kva_md_info kmi;
extern void vm_ksubmap_init(struct kva_md_info *); extern void vm_ksubmap_init(struct kva_md_info *);