Flush intermediate TLB cache when L2 page table is unlinked.
This fixes an issue observed on Cortex A7 (RPi2) and on Cortex A15 (Jetson TK1) causing various memory corruptions. It turned out that even L2 page table with no valid mapping might be a subject of such caching. Note that not all platforms have intermediate TLB caching implemented. An open question is if this fix is sufficient for all platforms with this feature. Approved by: kib (mentor)
This commit is contained in:
parent
290bb03c0c
commit
acf4dc71d6
@ -2508,8 +2508,13 @@ pmap_unwire_pt2pg(pmap_t pmap, vm_offset_t va, vm_page_t m)
|
||||
KASSERT(m->md.pt2_wirecount[i] == 0,
|
||||
("%s: pmap %p PT2 %u (PG %p) wired", __func__, pmap, i, m));
|
||||
opte1 = pte1_load(pte1p);
|
||||
if (pte1_is_link(opte1))
|
||||
if (pte1_is_link(opte1)) {
|
||||
pte1_clear(pte1p);
|
||||
/*
|
||||
* Flush intermediate TLB cache.
|
||||
*/
|
||||
pmap_tlb_flush(pmap, (m->pindex + i) << PTE1_SHIFT);
|
||||
}
|
||||
#ifdef INVARIANTS
|
||||
else
|
||||
KASSERT((opte1 == 0) || pte1_is_section(opte1),
|
||||
|
Loading…
x
Reference in New Issue
Block a user