From 22c7bcb842258be4d12355c3e2e97c0deb88d735 Mon Sep 17 00:00:00 2001 From: Alan Cox Date: Sun, 23 Jun 2019 21:06:56 +0000 Subject: [PATCH] pmap_enter_quick_locked() never replaces a valid mapping, so it need not perform a TLB invalidation. A barrier suffices. (See r343876.) Add a comment to pmap_enter_quick_locked() in order to highlight the fact that it does not replace valid mappings. Correct a typo in one of pmap_enter()'s comments. MFC after: 1 week --- sys/arm64/arm64/pmap.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/sys/arm64/arm64/pmap.c b/sys/arm64/arm64/pmap.c index 8afee14fbdd1..ff0c0611743e 100644 --- a/sys/arm64/arm64/pmap.c +++ b/sys/arm64/arm64/pmap.c @@ -3408,7 +3408,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, __func__, pmap, va, new_l3); } } else { - /* New mappig */ + /* New mapping */ pmap_load_store(l3, new_l3); dsb(ishst); } @@ -3706,6 +3706,9 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, l3 = pmap_l2_to_l3(pde, va); } + /* + * Abort if a mapping already exists. + */ if (pmap_load(l3) != 0) { if (mpte != NULL) { mpte->wire_count--; @@ -3755,7 +3758,7 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, cpu_icache_sync_range(PHYS_TO_DMAP(pa), PAGE_SIZE); pmap_load_store(l3, l3_val); - pmap_invalidate_page(pmap, va); + dsb(ishst); return (mpte); }