powerpc/booke: Fix pmap_mapdev_attr() for multi-TLB1 entry mappings

Also, fix pmap_change_attr() to ignore non-kernel mappings.

* Fix a masking bug in mmu_booke_mapdev_attr() which caused it to align
  mappings to the smallest mapping alignment, instead of the largest.  This
  caused mappings to be potentially pessimally aligned, using more TLB
  entries than necessary.
* Return existing mappings from mmu_booke_mapdev_attr() that span more than
  one TLB1 entry.  The drm-current-kmod drivers map discontiguous segments
  of the GPU, resulting in more than one TLB entry being used to satisfy the
  mapping.
* Ignore non-kernel mappings in mmu_booke_change_attr().  There's a bug in
  the linuxkpi layer that causes it to actually try to change physical
  address mappings, instead of virtual addresses.  amd64 doesn't encounter
  this because it ignores non-kernel mappings.

With this it's possible to use drm-current-kmod on Book-E.
This commit is contained in:
Justin Hibbits 2019-11-06 04:40:12 +00:00
parent 7ef518c05a
commit b5d5429449
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=354369

View File

@ -3441,31 +3441,57 @@ mmu_booke_mapdev(mmu_t mmu, vm_paddr_t pa, vm_size_t size)
return (mmu_booke_mapdev_attr(mmu, pa, size, VM_MEMATTR_DEFAULT));
}
static int
tlb1_find_pa(vm_paddr_t pa, tlb_entry_t *e)
{
int i;
for (i = 0; i < TLB1_ENTRIES; i++) {
tlb1_read_entry(e, i);
if ((e->mas1 & MAS1_VALID) == 0)
return (i);
}
return (-1);
}
static void *
mmu_booke_mapdev_attr(mmu_t mmu, vm_paddr_t pa, vm_size_t size, vm_memattr_t ma)
{
tlb_entry_t e;
vm_paddr_t tmppa;
void *res;
uintptr_t va, tmpva;
vm_size_t sz;
int i;
int wimge;
/*
* Check if this is premapped in TLB1. Note: this should probably also
* check whether a sequence of TLB1 entries exist that match the
* requirement, but now only checks the easy case.
* Check if this is premapped in TLB1.
*/
sz = size;
tmppa = pa;
va = ~0;
wimge = tlb_calc_wimg(pa, ma);
for (i = 0; i < TLB1_ENTRIES; i++) {
tlb1_read_entry(&e, i);
if (!(e.mas1 & MAS1_VALID))
continue;
if (pa >= e.phys &&
(pa + size) <= (e.phys + e.size) &&
(ma == VM_MEMATTR_DEFAULT ||
tlb_calc_wimg(pa, ma) ==
(e.mas2 & (MAS2_WIMGE_MASK & ~_TLB_ENTRY_SHARED))))
return (void *)(e.virt +
(vm_offset_t)(pa - e.phys));
if (wimge != (e.mas2 & (MAS2_WIMGE_MASK & ~_TLB_ENTRY_SHARED)))
continue;
if (tmppa >= e.phys && tmppa < e.phys + e.size) {
va = e.virt + (pa - e.phys);
tmppa = e.phys + e.size;
sz -= MIN(sz, e.size);
while (sz > 0 && (i = tlb1_find_pa(tmppa, &e)) != -1) {
if (wimge != (e.mas2 & (MAS2_WIMGE_MASK & ~_TLB_ENTRY_SHARED)))
break;
sz -= MIN(sz, e.size);
tmppa = e.phys + e.size;
}
if (sz != 0)
break;
return ((void *)va);
}
}
size = roundup(size, PAGE_SIZE);
@ -3489,7 +3515,7 @@ mmu_booke_mapdev_attr(mmu_t mmu, vm_paddr_t pa, vm_size_t size, vm_memattr_t ma)
*/
do {
tmpva = tlb1_map_base;
sz = ffsl(((1 << flsl(size-1)) - 1) & pa);
sz = ffsl((~((1 << flsl(size-1)) - 1)) & pa);
sz = sz ? min(roundup(sz + 3, 4), flsl(size) - 1) : flsl(size) - 1;
va = roundup(tlb1_map_base, 1 << sz) | (((1 << sz) - 1) & pa);
#ifdef __powerpc64__
@ -3569,6 +3595,23 @@ mmu_booke_change_attr(mmu_t mmu, vm_offset_t addr, vm_size_t sz,
int i, j;
tlb_entry_t e;
addr = trunc_page(addr);
/* Only allow changes to mapped kernel addresses. This includes:
* - KVA
* - DMAP (powerpc64)
* - Device mappings
*/
if (addr <= VM_MAXUSER_ADDRESS ||
#ifdef __powerpc64__
(addr >= tlb1_map_base && addr < DMAP_BASE_ADDRESS) ||
(addr > DMAP_MAX_ADDRESS && addr < VM_MIN_KERNEL_ADDRESS) ||
#else
(addr >= tlb1_map_base && addr < VM_MIN_KERNEL_ADDRESS) ||
#endif
(addr > VM_MAX_KERNEL_ADDRESS))
return (EINVAL);
/* Check TLB1 mappings */
for (i = 0; i < TLB1_ENTRIES; i++) {
tlb1_read_entry(&e, i);