From c8f59059d84ab4771bc58372636e1075a0cd6fa2 Mon Sep 17 00:00:00 2001 From: Alan Cox Date: Sun, 16 Jun 2019 22:13:27 +0000 Subject: [PATCH] Three changes to arm64's pmap_unwire(): Implement wiring changes on superpage mappings. Previously, a superpage mapping was unconditionally demoted by pmap_unwire(), even if the wiring change applied to the entire superpage mapping. Rewrite a comment to use the arm64 names for bits in a page table entry. Previously, the bits were referred to by their x86 names. Use atomic_"op"_64() instead of atomic_"op"_long() to update a page table entry in order to match the prevailing style in this file. MFC after: 10 days --- sys/arm64/arm64/pmap.c | 24 ++++++++++++++++++------ 1 file changed, 18 insertions(+), 6 deletions(-) diff --git a/sys/arm64/arm64/pmap.c b/sys/arm64/arm64/pmap.c index 56dff57b0219..18e4d1d31a39 100644 --- a/sys/arm64/arm64/pmap.c +++ b/sys/arm64/arm64/pmap.c @@ -3767,9 +3767,21 @@ pmap_unwire(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) continue; if ((pmap_load(l2) & ATTR_DESCR_MASK) == L2_BLOCK) { - l3 = pmap_demote_l2(pmap, l2, sva); - if (l3 == NULL) + if ((pmap_load(l2) & ATTR_SW_WIRED) == 0) + panic("pmap_unwire: l2 %#jx is missing " + "ATTR_SW_WIRED", (uintmax_t)pmap_load(l2)); + + /* + * Are we unwiring the entire large page? If not, + * demote the mapping and fall through. + */ + if (sva + L2_SIZE == va_next && eva >= va_next) { + atomic_clear_64(l2, ATTR_SW_WIRED); + pmap_resident_count_dec(pmap, L2_SIZE / + PAGE_SIZE); continue; + } else if (pmap_demote_l2(pmap, l2, sva) == NULL) + panic("pmap_unwire: demotion failed"); } KASSERT((pmap_load(l2) & ATTR_DESCR_MASK) == L2_TABLE, ("pmap_unwire: Invalid l2 entry after demotion")); @@ -3785,11 +3797,11 @@ pmap_unwire(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) "ATTR_SW_WIRED", (uintmax_t)pmap_load(l3)); /* - * PG_W must be cleared atomically. Although the pmap - * lock synchronizes access to PG_W, another processor - * could be setting PG_M and/or PG_A concurrently. + * ATTR_SW_WIRED must be cleared atomically. Although + * the pmap lock synchronizes access to ATTR_SW_WIRED, + * the System MMU may write to the entry concurrently. */ - atomic_clear_long(l3, ATTR_SW_WIRED); + atomic_clear_64(l3, ATTR_SW_WIRED); pmap->pm_stats.wired_count--; } }