diff --git a/sys/mips/include/pmap.h b/sys/mips/include/pmap.h index 15b07d345fad..8392d8a1ca48 100644 --- a/sys/mips/include/pmap.h +++ b/sys/mips/include/pmap.h @@ -74,6 +74,7 @@ struct md_page { }; #define PV_TABLE_REF 0x02 /* referenced */ +#define PV_MEMATTR_UNCACHEABLE 0x04 #define ASID_BITS 8 #define ASIDGEN_BITS (32 - ASID_BITS) @@ -165,7 +166,6 @@ extern vm_paddr_t dump_avail[PHYS_AVAIL_ENTRIES + 2]; #define pmap_page_get_memattr(m) VM_MEMATTR_DEFAULT #define pmap_page_is_mapped(m) (!TAILQ_EMPTY(&(m)->md.pv_list)) #define pmap_page_is_write_mapped(m) (((m)->aflags & PGA_WRITEABLE) != 0) -#define pmap_page_set_memattr(m, ma) (void)0 void pmap_bootstrap(void); void *pmap_mapdev(vm_paddr_t, vm_size_t); @@ -179,6 +179,7 @@ void pmap_kenter_temporary_free(vm_paddr_t pa); void pmap_flush_pvcache(vm_page_t m); int pmap_emulate_modified(pmap_t pmap, vm_offset_t va); void pmap_grow_direct_page_cache(void); +void pmap_page_set_memattr(vm_page_t, vm_memattr_t); #endif /* _KERNEL */ diff --git a/sys/mips/mips/pmap.c b/sys/mips/mips/pmap.c index 292ce8030770..7bf3ee08bc98 100644 --- a/sys/mips/mips/pmap.c +++ b/sys/mips/mips/pmap.c @@ -314,6 +314,15 @@ pmap_lmem_unmap(void) } #endif /* !__mips_n64 */ +static __inline int +is_cacheable_page(vm_paddr_t pa, vm_page_t m) +{ + + return ((m->md.pv_flags & PV_MEMATTR_UNCACHEABLE) == 0 && + is_cacheable_mem(pa)); + +} + /* * Page table entry lookup routines. */ @@ -2009,7 +2018,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, newpte |= PTE_W; if (is_kernel_pmap(pmap)) newpte |= PTE_G; - if (is_cacheable_mem(pa)) + if (is_cacheable_page(pa, m)) newpte |= PTE_C_CACHE; else newpte |= PTE_C_UNCACHED; @@ -2280,7 +2289,7 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, if ((m->oflags & VPO_UNMANAGED) == 0) *pte |= PTE_MANAGED; - if (is_cacheable_mem(pa)) + if (is_cacheable_page(pa, m)) *pte |= PTE_C_CACHE; else *pte |= PTE_C_UNCACHED; @@ -2650,9 +2659,12 @@ pmap_quick_enter_page(vm_page_t m) pa = VM_PAGE_TO_PHYS(m); - if (MIPS_DIRECT_MAPPABLE(pa)) - return (MIPS_PHYS_TO_DIRECT(pa)); - + if (MIPS_DIRECT_MAPPABLE(pa)) { + if (m->md.pv_flags & PV_MEMATTR_UNCACHEABLE) + return (MIPS_PHYS_TO_DIRECT_UNCACHED(pa)); + else + return (MIPS_PHYS_TO_DIRECT(pa)); + } critical_enter(); sysm = &sysmap_lmem[PCPU_GET(cpuid)]; @@ -2660,7 +2672,7 @@ pmap_quick_enter_page(vm_page_t m) pte = pmap_pte(kernel_pmap, sysm->base); *pte = TLBLO_PA_TO_PFN(pa) | PTE_D | PTE_V | PTE_G | - (is_cacheable_mem(pa) ? PTE_C_CACHE : PTE_C_UNCACHED); + (is_cacheable_page(pa, m) ? PTE_C_CACHE : PTE_C_UNCACHED); sysm->valid1 = 1; return (sysm->base); @@ -3520,3 +3532,27 @@ pmap_flush_pvcache(vm_page_t m) } } } + +void +pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma) +{ + + /* + * It appears that this function can only be called before any mappings + * for the page are established. If this ever changes, this code will + * need to walk the pv_list and make each of the existing mappings + * uncacheable, being careful to sync caches and PTEs (and maybe + * invalidate TLB?) for any current mapping it modifies. + */ + if (TAILQ_FIRST(&m->md.pv_list) != NULL) + panic("Can't change memattr on page with existing mappings"); + + /* + * The only memattr we support is UNCACHEABLE, translate the (semi-)MI + * representation of that into our internal flag in the page MD struct. + */ + if (ma == VM_MEMATTR_UNCACHEABLE) + m->md.pv_flags |= PV_MEMATTR_UNCACHEABLE; + else + m->md.pv_flags &= ~PV_MEMATTR_UNCACHEABLE; +}