From 5a08df100b58911396e0cc1403f0504bc68461bd Mon Sep 17 00:00:00 2001 From: Brandon Bergren Date: Mon, 29 Mar 2021 17:59:19 -0500 Subject: [PATCH] [PowerPC] Fix 32-bit Book-E panic due to pve leak On an INVARIANTS kernel on 32-bit Book-E, we were panicing when running the libproc tests. This was caused by extra pv entries being generated accidentally by the pmap icache invalidation code. Use the same VA (i.e. 0) when freeing the temporary mapping, instead of some arbitrary address within the zero page. Failure to do this was causing kernel-side icache syncing to leak PVE entries when invalidating icache for a non page-aligned address, which would later result in pages erroneously showing up as mapped to vm_page. This bug was introduced in r347354 in 2019. Reviewed by: jhibbits (in irc) Sponsored by: Tag1 Consulting, Inc. --- sys/powerpc/booke/pmap_32.c | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/sys/powerpc/booke/pmap_32.c b/sys/powerpc/booke/pmap_32.c index a9f8af0565f0..924eb223a2b6 100644 --- a/sys/powerpc/booke/pmap_32.c +++ b/sys/powerpc/booke/pmap_32.c @@ -748,14 +748,23 @@ mmu_booke_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz) sync_sz = min(sync_sz, sz); if (valid) { if (!active) { - /* Create a mapping in the active pmap. */ + /* + * Create a mapping in the active pmap. + * + * XXX: We use the zero page here, because + * it isn't likely to be in use. + * If we ever decide to support + * security.bsd.map_at_zero on Book-E, change + * this to some other address that isn't + * normally mappable. + */ addr = 0; m = PHYS_TO_VM_PAGE(pa); PMAP_LOCK(pmap); pte_enter(pmap, m, addr, PTE_SR | PTE_VALID, FALSE); - addr += (va & PAGE_MASK); - __syncicache((void *)addr, sync_sz); + __syncicache((void *)(addr + (va & PAGE_MASK)), + sync_sz); pte_remove(pmap, addr, PTBL_UNHOLD); PMAP_UNLOCK(pmap); } else