From acf4dc71d612cd16fe55545b5fcbed19965a20a3 Mon Sep 17 00:00:00 2001 From: Svatopluk Kraus Date: Tue, 15 Dec 2015 13:17:40 +0000 Subject: [PATCH] Flush intermediate TLB cache when L2 page table is unlinked. This fixes an issue observed on Cortex A7 (RPi2) and on Cortex A15 (Jetson TK1) causing various memory corruptions. It turned out that even L2 page table with no valid mapping might be a subject of such caching. Note that not all platforms have intermediate TLB caching implemented. An open question is if this fix is sufficient for all platforms with this feature. Approved by: kib (mentor) --- sys/arm/arm/pmap-v6-new.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/sys/arm/arm/pmap-v6-new.c b/sys/arm/arm/pmap-v6-new.c index 2196f916b672..6865888127ce 100644 --- a/sys/arm/arm/pmap-v6-new.c +++ b/sys/arm/arm/pmap-v6-new.c @@ -2508,8 +2508,13 @@ pmap_unwire_pt2pg(pmap_t pmap, vm_offset_t va, vm_page_t m) KASSERT(m->md.pt2_wirecount[i] == 0, ("%s: pmap %p PT2 %u (PG %p) wired", __func__, pmap, i, m)); opte1 = pte1_load(pte1p); - if (pte1_is_link(opte1)) + if (pte1_is_link(opte1)) { pte1_clear(pte1p); + /* + * Flush intermediate TLB cache. + */ + pmap_tlb_flush(pmap, (m->pindex + i) << PTE1_SHIFT); + } #ifdef INVARIANTS else KASSERT((opte1 == 0) || pte1_is_section(opte1),