Implement pmap_change_attr and related APIs on MIPS

On platforms that have uncached-accelerate cache attribute, map it
to VM_MEMATTR_WRITE_COMBINING. Otherwise, leave write comining
undefined.

Reviewed by:	adrian, jhb (glance)
Differential Revision:	https://reviews.freebsd.org/D8894
This commit is contained in:
Alexander Kabaev 2016-12-28 02:55:26 +00:00
parent e67ac203a6
commit a0e41d3784
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=310650
5 changed files with 165 additions and 60 deletions

View File

@ -171,6 +171,10 @@
#define MIPS_CCA_CACHED MIPS_CCA_CCS
#endif
#if defined(CPU_XBURST)
#define MIPS_CCA_UA 0x01
#endif
#ifndef MIPS_CCA_UNCACHED
#define MIPS_CCA_UNCACHED MIPS_CCA_UC
#endif
@ -188,6 +192,16 @@
#endif
#endif
/*
* Use uncached-accelerated mode for write-combining maps, if one is defined,
* otherwise fall back to uncached
*/
#ifndef MIPS_CCA_WC
#ifdef MIPS_CCA_UA
#define MIPS_CCA_WC MIPS_CCA_UA
#endif
#endif
#define MIPS_PHYS_TO_XKPHYS(cca,x) \
((0x2ULL << 62) | ((unsigned long long)(cca) << 59) | (x))
#define MIPS_PHYS_TO_XKPHYS_CACHED(x) \

View File

@ -74,7 +74,8 @@ struct md_page {
};
#define PV_TABLE_REF 0x02 /* referenced */
#define PV_MEMATTR_UNCACHEABLE 0x04
#define PV_MEMATTR_MASK 0xf0 /* store vm_memattr_t here */
#define PV_MEMATTR_SHIFT 0x04
#define ASID_BITS 8
#define ASIDGEN_BITS (32 - ASID_BITS)
@ -163,22 +164,24 @@ extern vm_offset_t virtual_end;
extern vm_paddr_t dump_avail[PHYS_AVAIL_ENTRIES + 2];
#define pmap_page_get_memattr(m) VM_MEMATTR_DEFAULT
#define pmap_page_get_memattr(m) (((m)->md.pv_flags & PV_MEMATTR_MASK) >> PV_MEMATTR_SHIFT)
#define pmap_page_is_mapped(m) (!TAILQ_EMPTY(&(m)->md.pv_list))
#define pmap_page_is_write_mapped(m) (((m)->aflags & PGA_WRITEABLE) != 0)
void pmap_bootstrap(void);
void *pmap_mapdev(vm_paddr_t, vm_size_t);
void *pmap_mapdev_attr(vm_paddr_t, vm_size_t, vm_memattr_t);
void pmap_unmapdev(vm_offset_t, vm_size_t);
vm_offset_t pmap_steal_memory(vm_size_t size);
void pmap_kenter(vm_offset_t va, vm_paddr_t pa);
void pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int attr);
void pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, vm_memattr_t attr);
void pmap_kremove(vm_offset_t va);
void *pmap_kenter_temporary(vm_paddr_t pa, int i);
void pmap_kenter_temporary_free(vm_paddr_t pa);
void pmap_flush_pvcache(vm_page_t m);
int pmap_emulate_modified(pmap_t pmap, vm_offset_t va);
void pmap_page_set_memattr(vm_page_t, vm_memattr_t);
int pmap_change_attr(vm_offset_t, vm_size_t, vm_memattr_t);
#endif /* _KERNEL */

View File

@ -132,8 +132,10 @@ typedef pt_entry_t *pd_entry_t;
* it is matched.
*/
#define PTE_C(attr) ((attr & 0x07) << 3)
#define PTE_C_MASK (PTE_C(0x07))
#define PTE_C_UNCACHED (PTE_C(MIPS_CCA_UNCACHED))
#define PTE_C_CACHE (PTE_C(MIPS_CCA_CACHED))
#define PTE_C_WC (PTE_C(MIPS_CCA_WC))
#define PTE_D 0x04
#define PTE_V 0x02
#define PTE_G 0x01
@ -158,6 +160,7 @@ typedef pt_entry_t *pd_entry_t;
#define pte_clear(pte, bit) (*(pte) &= ~(bit))
#define pte_set(pte, bit) (*(pte) |= (bit))
#define pte_test(pte, bit) ((*(pte) & (bit)) == (bit))
#define pte_cache_bits(pte) ((*(pte) >> 3) & 0x07)
/* Assembly support for PTE access*/
#ifdef LOCORE

View File

@ -32,7 +32,11 @@
#include <machine/pte.h>
/* Memory attributes. */
#define VM_MEMATTR_UNCACHEABLE ((vm_memattr_t)PTE_C_UNCACHED)
#define VM_MEMATTR_DEFAULT ((vm_memattr_t)PTE_C_CACHE)
#define VM_MEMATTR_UNCACHEABLE ((vm_memattr_t)MIPS_CCA_UNCACHED)
#define VM_MEMATTR_WRITE_BACK ((vm_memattr_t)MIPS_CCA_CACHED)
#define VM_MEMATTR_DEFAULT VM_MEMATTR_WRITE_BACK
#ifdef MIPS_CCA_WC
#define VM_MEMATTR_WRITE_COMBINING ((vm_memattr_t)MIPS_CCA_WC)
#endif
#endif /* !_MACHINE_VM_H_ */

View File

@ -189,10 +189,10 @@ static void pmap_update_page_action(void *arg);
* The highmem area does not have a KSEG0 mapping, and we need a mechanism to
* do temporary per-CPU mappings for pmap_zero_page, pmap_copy_page etc.
*
* At bootup, we reserve 2 virtual pages per CPU for mapping highmem pages. To
* At bootup, we reserve 2 virtual pages per CPU for mapping highmem pages. To
* access a highmem physical address on a CPU, we map the physical address to
* the reserved virtual address for the CPU in the kernel pagetable. This is
* done with interrupts disabled(although a spinlock and sched_pin would be
* the reserved virtual address for the CPU in the kernel pagetable. This is
* done with interrupts disabled(although a spinlock and sched_pin would be
* sufficient).
*/
struct local_sysmaps {
@ -303,7 +303,7 @@ pmap_lmem_map2(vm_paddr_t phys1, vm_paddr_t phys2)
return (0);
}
static __inline vm_offset_t
static __inline vm_offset_t
pmap_lmem_unmap(void)
{
@ -312,12 +312,18 @@ pmap_lmem_unmap(void)
#endif /* !__mips_n64 */
static __inline int
is_cacheable_page(vm_paddr_t pa, vm_page_t m)
pmap_pte_cache_bits(vm_paddr_t pa, vm_page_t m)
{
vm_memattr_t ma;
return ((m->md.pv_flags & PV_MEMATTR_UNCACHEABLE) == 0 &&
is_cacheable_mem(pa));
ma = pmap_page_get_memattr(m);
if (ma == VM_MEMATTR_WRITE_BACK && !is_cacheable_mem(pa))
ma = VM_MEMATTR_UNCACHEABLE;
return PTE_C(ma);
}
#define PMAP_PTE_SET_CACHE_BITS(pte, ps, m) { \
pte &= ~PTE_C_MASK; \
pte |= pmap_pte_cache_bits(pa, m); \
}
/*
@ -359,7 +365,7 @@ pmap_pdpe_to_pde(pd_entry_t *pdpe, vm_offset_t va)
return (pdpe);
}
static __inline
static __inline
pd_entry_t *pmap_pde(pmap_t pmap, vm_offset_t va)
{
@ -423,7 +429,7 @@ pmap_steal_memory(vm_size_t size)
* Bootstrap the system enough to run with virtual memory. This
* assumes that the phys_avail array has been initialized.
*/
static void
static void
pmap_create_kernel_pagetable(void)
{
int i, j;
@ -486,7 +492,7 @@ void
pmap_bootstrap(void)
{
int i;
int need_local_mappings = 0;
int need_local_mappings = 0;
/* Sort. */
again:
@ -600,7 +606,7 @@ pmap_page_init(vm_page_t m)
{
TAILQ_INIT(&m->md.pv_list);
m->md.pv_flags = 0;
m->md.pv_flags = VM_MEMATTR_DEFAULT << PV_MEMATTR_SHIFT;
}
/*
@ -635,8 +641,8 @@ pmap_call_on_active_cpus(pmap_t pmap, void (*fn)(void *), void *arg)
pmap->pm_asid[cpu].gen = 0;
}
cpuid = PCPU_GET(cpuid);
/*
* XXX: barrier/locking for active?
/*
* XXX: barrier/locking for active?
*
* Take a snapshot of active here, any further changes are ignored.
* tlb update/invalidate should be harmless on inactive CPUs
@ -819,7 +825,7 @@ pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
* add a wired page to the kva
*/
void
pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int attr)
pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, vm_memattr_t ma)
{
pt_entry_t *pte;
pt_entry_t opte, npte;
@ -830,7 +836,7 @@ pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int attr)
pte = pmap_pte(kernel_pmap, va);
opte = *pte;
npte = TLBLO_PA_TO_PFN(pa) | attr | PTE_D | PTE_V | PTE_G;
npte = TLBLO_PA_TO_PFN(pa) | PTE_C(ma) | PTE_D | PTE_V | PTE_G;
*pte = npte;
if (pte_test(&opte, PTE_V) && opte != npte)
pmap_update_page(kernel_pmap, va, npte);
@ -843,7 +849,7 @@ pmap_kenter(vm_offset_t va, vm_paddr_t pa)
KASSERT(is_cacheable_mem(pa),
("pmap_kenter: memory at 0x%lx is not cacheable", (u_long)pa));
pmap_kenter_attr(va, pa, PTE_C_CACHE);
pmap_kenter_attr(va, pa, VM_MEMATTR_DEFAULT);
}
/*
@ -1144,11 +1150,11 @@ _pmap_allocpte(pmap_t pmap, unsigned ptepindex, u_int flags)
int segindex = ptepindex >> (SEGSHIFT - PDRSHIFT);
int pdeindex = ptepindex & (NPDEPG - 1);
vm_page_t pg;
pdep = &pmap->pm_segtab[segindex];
if (*pdep == NULL) {
if (*pdep == NULL) {
/* recurse for allocating page dir */
if (_pmap_allocpte(pmap, NUPDE + segindex,
if (_pmap_allocpte(pmap, NUPDE + segindex,
flags) == NULL) {
/* alloc failed, release current */
--m->wire_count;
@ -1680,7 +1686,7 @@ pmap_try_insert_pv_entry(pmap_t pmap, vm_page_t mpte, vm_offset_t va,
* pmap_remove_pte: do the things to unmap a page in a process
*/
static int
pmap_remove_pte(struct pmap *pmap, pt_entry_t *ptq, vm_offset_t va,
pmap_remove_pte(struct pmap *pmap, pt_entry_t *ptq, vm_offset_t va,
pd_entry_t pde)
{
pt_entry_t oldpte;
@ -1864,7 +1870,7 @@ pmap_remove_all(vm_page_t m)
PMAP_LOCK(pmap);
/*
* If it's last mapping writeback all caches from
* If it's last mapping writeback all caches from
* the page being destroyed
*/
if (TAILQ_NEXT(pv, pv_list) == NULL)
@ -2030,10 +2036,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
newpte |= PTE_W;
if (is_kernel_pmap(pmap))
newpte |= PTE_G;
if (is_cacheable_page(pa, m))
newpte |= PTE_C_CACHE;
else
newpte |= PTE_C_UNCACHED;
PMAP_PTE_SET_CACHE_BITS(newpte, pa, m);
mpte = NULL;
@ -2218,7 +2221,7 @@ static vm_page_t
pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
vm_prot_t prot, vm_page_t mpte)
{
pt_entry_t *pte;
pt_entry_t *pte, npte;
vm_paddr_t pa;
KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva ||
@ -2297,18 +2300,16 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
/*
* Now validate mapping with RO protection
*/
*pte = PTE_RO | TLBLO_PA_TO_PFN(pa) | PTE_V;
npte = PTE_RO | TLBLO_PA_TO_PFN(pa) | PTE_V;
if ((m->oflags & VPO_UNMANAGED) == 0)
*pte |= PTE_MANAGED;
npte |= PTE_MANAGED;
if (is_cacheable_page(pa, m))
*pte |= PTE_C_CACHE;
else
*pte |= PTE_C_UNCACHED;
PMAP_PTE_SET_CACHE_BITS(npte, pa, m);
if (is_kernel_pmap(pmap))
*pte |= PTE_G;
*pte = npte | PTE_G;
else {
*pte = npte;
/*
* Sync I & D caches. Do this only if the target pmap
* belongs to the current process. Otherwise, an
@ -2649,12 +2650,12 @@ pmap_quick_enter_page(vm_page_t m)
#else
vm_paddr_t pa;
struct local_sysmaps *sysm;
pt_entry_t *pte;
pt_entry_t *pte, npte;
pa = VM_PAGE_TO_PHYS(m);
if (MIPS_DIRECT_MAPPABLE(pa)) {
if (m->md.pv_flags & PV_MEMATTR_UNCACHEABLE)
if (pmap_page_get_memattr(m) != VM_MEMATTR_WRITE_BACK)
return (MIPS_PHYS_TO_DIRECT_UNCACHED(pa));
else
return (MIPS_PHYS_TO_DIRECT(pa));
@ -2665,8 +2666,9 @@ pmap_quick_enter_page(vm_page_t m)
KASSERT(sysm->valid1 == 0, ("pmap_quick_enter_page: PTE busy"));
pte = pmap_pte(kernel_pmap, sysm->base);
*pte = TLBLO_PA_TO_PFN(pa) | PTE_D | PTE_V | PTE_G |
(is_cacheable_page(pa, m) ? PTE_C_CACHE : PTE_C_UNCACHED);
npte = TLBLO_PA_TO_PFN(pa) | PTE_D | PTE_V | PTE_G;
PMAP_PTE_SET_CACHE_BITS(npte, pa, m);
*pte = npte;
sysm->valid1 = 1;
return (sysm->base);
@ -3146,26 +3148,26 @@ pmap_is_referenced(vm_page_t m)
* Use XKPHYS uncached for 64 bit, and KSEG1 where possible for 32 bit.
*/
void *
pmap_mapdev(vm_paddr_t pa, vm_size_t size)
pmap_mapdev_attr(vm_paddr_t pa, vm_size_t size, vm_memattr_t ma)
{
vm_offset_t va, tmpva, offset;
/*
* KSEG1 maps only first 512M of phys address space. For
/*
* KSEG1 maps only first 512M of phys address space. For
* pa > 0x20000000 we should make proper mapping * using pmap_kenter.
*/
if (MIPS_DIRECT_MAPPABLE(pa + size - 1))
if (MIPS_DIRECT_MAPPABLE(pa + size - 1) && ma == VM_MEMATTR_UNCACHEABLE)
return ((void *)MIPS_PHYS_TO_DIRECT_UNCACHED(pa));
else {
offset = pa & PAGE_MASK;
size = roundup(size + offset, PAGE_SIZE);
va = kva_alloc(size);
if (!va)
panic("pmap_mapdev: Couldn't alloc kernel virtual memory");
pa = trunc_page(pa);
for (tmpva = va; size > 0;) {
pmap_kenter_attr(tmpva, pa, PTE_C_UNCACHED);
pmap_kenter_attr(tmpva, pa, ma);
size -= PAGE_SIZE;
tmpva += PAGE_SIZE;
pa += PAGE_SIZE;
@ -3175,6 +3177,12 @@ pmap_mapdev(vm_paddr_t pa, vm_size_t size)
return ((void *)(va + offset));
}
void *
pmap_mapdev(vm_paddr_t pa, vm_size_t size)
{
return pmap_mapdev_attr(pa, size, VM_MEMATTR_UNCACHEABLE);
}
void
pmap_unmapdev(vm_offset_t va, vm_size_t size)
{
@ -3220,7 +3228,7 @@ pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa)
* This may falsely report the given address as
* MINCORE_REFERENCED. Unfortunately, due to the lack of
* per-PTE reference information, it is impossible to
* determine if the address is MINCORE_REFERENCED.
* determine if the address is MINCORE_REFERENCED.
*/
m = PHYS_TO_VM_PAGE(pa);
if ((m->aflags & PGA_REFERENCED) != 0)
@ -3500,7 +3508,7 @@ pmap_kextract(vm_offset_t va)
mapped = (va >= MIPS_KSEG2_START || va < MIPS_KSEG2_END);
#if defined(__mips_n64)
mapped = mapped || (va >= MIPS_XKSEG_START || va < MIPS_XKSEG_END);
#endif
#endif
/*
* Kernel virtual.
*/
@ -3524,7 +3532,7 @@ pmap_kextract(vm_offset_t va)
}
void
void
pmap_flush_pvcache(vm_page_t m)
{
pv_entry_t pv;
@ -3551,12 +3559,85 @@ pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma)
if (TAILQ_FIRST(&m->md.pv_list) != NULL)
panic("Can't change memattr on page with existing mappings");
/*
* The only memattr we support is UNCACHEABLE, translate the (semi-)MI
* representation of that into our internal flag in the page MD struct.
*/
if (ma == VM_MEMATTR_UNCACHEABLE)
m->md.pv_flags |= PV_MEMATTR_UNCACHEABLE;
else
m->md.pv_flags &= ~PV_MEMATTR_UNCACHEABLE;
/* Clean memattr portion of pv_flags */
m->md.pv_flags &= ~PV_MEMATTR_MASK;
m->md.pv_flags |= (ma << PV_MEMATTR_SHIFT) & PV_MEMATTR_MASK;
}
static __inline void
pmap_pte_attr(pt_entry_t *pte, vm_memattr_t ma)
{
u_int npte;
npte = *(u_int *)pte;
npte &= ~PTE_C_MASK;
npte |= PTE_C(ma);
*pte = npte;
}
int
pmap_change_attr(vm_offset_t sva, vm_size_t size, vm_memattr_t ma)
{
pd_entry_t *pde, *pdpe;
pt_entry_t *pte;
vm_offset_t ova, eva, va, va_next;
pmap_t pmap;
ova = sva;
eva = sva + size;
if (eva < sva)
return (EINVAL);
pmap = kernel_pmap;
PMAP_LOCK(pmap);
for (; sva < eva; sva = va_next) {
pdpe = pmap_segmap(pmap, sva);
#ifdef __mips_n64
if (*pdpe == 0) {
va_next = (sva + NBSEG) & ~SEGMASK;
if (va_next < sva)
va_next = eva;
continue;
}
#endif
va_next = (sva + NBPDR) & ~PDRMASK;
if (va_next < sva)
va_next = eva;
pde = pmap_pdpe_to_pde(pdpe, sva);
if (*pde == NULL)
continue;
/*
* Limit our scan to either the end of the va represented
* by the current page table page, or to the end of the
* range being removed.
*/
if (va_next > eva)
va_next = eva;
va = va_next;
for (pte = pmap_pde_to_pte(pde, sva); sva != va_next; pte++,
sva += PAGE_SIZE) {
if (!pte_test(pte, PTE_V) || pte_cache_bits(pte) == ma) {
if (va != va_next) {
pmap_invalidate_range(pmap, va, sva);
va = va_next;
}
continue;
}
if (va == va_next)
va = sva;
pmap_pte_attr(pte, ma);
}
if (va != va_next)
pmap_invalidate_range(pmap, va, sva);
}
PMAP_UNLOCK(pmap);
/* Flush caches to be in the safe side */
mips_dcache_wbinv_range(ova, size);
return 0;
}