From 1bde6707a74242f38dbef00a4205cf7b171924da Mon Sep 17 00:00:00 2001 From: nwhitehorn Date: Mon, 29 Jan 2018 04:33:41 +0000 Subject: [PATCH] Remove hard-coded trap-handling logic involving the segmented memory model used with hashed page tables on AIM and place it into a new, modular pmap function called pmap_decode_kernel_ptr(). This function is the inverse of pmap_map_user_ptr(). With POWER9 radix tables, which mapping to use becomes more complex than just AIM/BOOKE and it is best to have it in the same place as pmap_map_user_ptr(). Reviewed by: jhibbits --- sys/powerpc/aim/mmu_oea.c | 28 ++++++++++++++++++++++++++++ sys/powerpc/aim/mmu_oea64.c | 28 ++++++++++++++++++++++++++++ sys/powerpc/booke/pmap.c | 22 ++++++++++++++++++++++ sys/powerpc/include/pmap.h | 2 ++ sys/powerpc/powerpc/mmu_if.m | 16 ++++++++++++++++ sys/powerpc/powerpc/pmap_dispatch.c | 8 ++++++++ sys/powerpc/powerpc/trap.c | 27 +++++++++------------------ 7 files changed, 113 insertions(+), 18 deletions(-) diff --git a/sys/powerpc/aim/mmu_oea.c b/sys/powerpc/aim/mmu_oea.c index bed6224739d6..2f990479ae2e 100644 --- a/sys/powerpc/aim/mmu_oea.c +++ b/sys/powerpc/aim/mmu_oea.c @@ -322,6 +322,8 @@ vm_offset_t moea_quick_enter_page(mmu_t mmu, vm_page_t m); void moea_quick_remove_page(mmu_t mmu, vm_offset_t addr); static int moea_map_user_ptr(mmu_t mmu, pmap_t pm, volatile const void *uaddr, void **kaddr, size_t ulen, size_t *klen); +static int moea_decode_kernel_ptr(mmu_t mmu, vm_offset_t addr, + int *is_user, vm_offset_t *decoded_addr); static mmu_method_t moea_methods[] = { @@ -374,6 +376,7 @@ static mmu_method_t moea_methods[] = { MMUMETHOD(mmu_scan_init, moea_scan_init), MMUMETHOD(mmu_dumpsys_map, moea_dumpsys_map), MMUMETHOD(mmu_map_user_ptr, moea_map_user_ptr), + MMUMETHOD(mmu_decode_kernel_ptr, moea_decode_kernel_ptr), { 0, 0 } }; @@ -1587,6 +1590,31 @@ moea_map_user_ptr(mmu_t mmu, pmap_t pm, volatile const void *uaddr, return (0); } +/* + * Figure out where a given kernel pointer (usually in a fault) points + * to from the VM's perspective, potentially remapping into userland's + * address space. + */ +static int +moea_decode_kernel_ptr(mmu_t mmu, vm_offset_t addr, int *is_user, + vm_offset_t *decoded_addr) +{ + vm_offset_t user_sr; + + if ((addr >> ADDR_SR_SHFT) == (USER_ADDR >> ADDR_SR_SHFT)) { + user_sr = curthread->td_pcb->pcb_cpu.aim.usr_segm; + addr &= ADDR_PIDX | ADDR_POFF; + addr |= user_sr << ADDR_SR_SHFT; + *decoded_addr = addr; + *is_user = 1; + } else { + *decoded_addr = addr; + *is_user = 0; + } + + return (0); +} + /* * Map a range of physical addresses into kernel virtual address space. * diff --git a/sys/powerpc/aim/mmu_oea64.c b/sys/powerpc/aim/mmu_oea64.c index b42d0a62732a..8141c7e74a8e 100644 --- a/sys/powerpc/aim/mmu_oea64.c +++ b/sys/powerpc/aim/mmu_oea64.c @@ -288,6 +288,8 @@ vm_offset_t moea64_quick_enter_page(mmu_t mmu, vm_page_t m); void moea64_quick_remove_page(mmu_t mmu, vm_offset_t addr); static int moea64_map_user_ptr(mmu_t mmu, pmap_t pm, volatile const void *uaddr, void **kaddr, size_t ulen, size_t *klen); +static int moea64_decode_kernel_ptr(mmu_t mmu, vm_offset_t addr, + int *is_user, vm_offset_t *decoded_addr); static mmu_method_t moea64_methods[] = { @@ -339,6 +341,7 @@ static mmu_method_t moea64_methods[] = { MMUMETHOD(mmu_scan_init, moea64_scan_init), MMUMETHOD(mmu_dumpsys_map, moea64_dumpsys_map), MMUMETHOD(mmu_map_user_ptr, moea64_map_user_ptr), + MMUMETHOD(mmu_decode_kernel_ptr, moea64_decode_kernel_ptr), { 0, 0 } }; @@ -1909,6 +1912,31 @@ moea64_map_user_ptr(mmu_t mmu, pmap_t pm, volatile const void *uaddr, return (0); } +/* + * Figure out where a given kernel pointer (usually in a fault) points + * to from the VM's perspective, potentially remapping into userland's + * address space. + */ +static int +moea64_decode_kernel_ptr(mmu_t mmu, vm_offset_t addr, int *is_user, + vm_offset_t *decoded_addr) +{ + vm_offset_t user_sr; + + if ((addr >> ADDR_SR_SHFT) == (USER_ADDR >> ADDR_SR_SHFT)) { + user_sr = curthread->td_pcb->pcb_cpu.aim.usr_segm; + addr &= ADDR_PIDX | ADDR_POFF; + addr |= user_sr << ADDR_SR_SHFT; + *decoded_addr = addr; + *is_user = 1; + } else { + *decoded_addr = addr; + *is_user = 0; + } + + return (0); +} + /* * Map a range of physical addresses into kernel virtual address space. * diff --git a/sys/powerpc/booke/pmap.c b/sys/powerpc/booke/pmap.c index 69b24ad26139..e6c207ef4552 100644 --- a/sys/powerpc/booke/pmap.c +++ b/sys/powerpc/booke/pmap.c @@ -382,6 +382,8 @@ static int mmu_booke_change_attr(mmu_t mmu, vm_offset_t addr, vm_size_t sz, vm_memattr_t mode); static int mmu_booke_map_user_ptr(mmu_t mmu, pmap_t pm, volatile const void *uaddr, void **kaddr, size_t ulen, size_t *klen); +static int mmu_booke_decode_kernel_ptr(mmu_t mmu, vm_offset_t addr, + int *is_user, vm_offset_t *decoded_addr); static mmu_method_t mmu_booke_methods[] = { @@ -436,6 +438,7 @@ static mmu_method_t mmu_booke_methods[] = { MMUMETHOD(mmu_unmapdev, mmu_booke_unmapdev), MMUMETHOD(mmu_change_attr, mmu_booke_change_attr), MMUMETHOD(mmu_map_user_ptr, mmu_booke_map_user_ptr), + MMUMETHOD(mmu_decode_kernel_ptr, mmu_booke_decode_kernel_ptr), /* dumpsys() support */ MMUMETHOD(mmu_dumpsys_map, mmu_booke_dumpsys_map), @@ -2291,6 +2294,25 @@ mmu_booke_map_user_ptr(mmu_t mmu, pmap_t pm, volatile const void *uaddr, return (0); } +/* + * Figure out where a given kernel pointer (usually in a fault) points + * to from the VM's perspective, potentially remapping into userland's + * address space. + */ +static int +mmu_booke_decode_kernel_ptr(mmu_t mmu, vm_offset_t addr, int *is_user, + vm_offset_t *decoded_addr) +{ + + if (addr < VM_MAXUSER_ADDRESS) + *is_user = 1; + else + *is_user = 0; + + *decoded_addr = addr; + return (0); +} + /* * Initialize pmap associated with process 0. */ diff --git a/sys/powerpc/include/pmap.h b/sys/powerpc/include/pmap.h index a5c94346f7ea..d96de742e500 100644 --- a/sys/powerpc/include/pmap.h +++ b/sys/powerpc/include/pmap.h @@ -262,6 +262,8 @@ void pmap_page_set_memattr(vm_page_t, vm_memattr_t); int pmap_change_attr(vm_offset_t, vm_size_t, vm_memattr_t); int pmap_map_user_ptr(pmap_t pm, volatile const void *uaddr, void **kaddr, size_t ulen, size_t *klen); +int pmap_decode_kernel_ptr(vm_offset_t addr, int *is_user, + vm_offset_t *decoded_addr); void pmap_deactivate(struct thread *); vm_paddr_t pmap_kextract(vm_offset_t); int pmap_dev_direct_mapped(vm_paddr_t, vm_size_t); diff --git a/sys/powerpc/powerpc/mmu_if.m b/sys/powerpc/powerpc/mmu_if.m index 3f9ca8be125d..4320fc7299ed 100644 --- a/sys/powerpc/powerpc/mmu_if.m +++ b/sys/powerpc/powerpc/mmu_if.m @@ -839,6 +839,21 @@ METHOD int map_user_ptr { size_t *_klen; }; +/** + * @brief Decode a kernel pointer, as visible to the current thread, + * by setting whether it corresponds to a user or kernel address and + * the address in the respective memory maps to which the address as + * seen in the kernel corresponds. This is essentially the inverse of + * MMU_MAP_USER_PTR() above and is used in kernel-space fault handling. + * Returns 0 on success or EFAULT if the address could not be mapped. + */ +METHOD int decode_kernel_ptr { + mmu_t _mmu; + vm_offset_t addr; + int *is_user; + vm_offset_t *decoded_addr; +}; + /** * @brief Reverse-map a kernel virtual address * @@ -998,3 +1013,4 @@ METHOD int change_attr { vm_size_t _sz; vm_memattr_t _mode; } DEFAULT mmu_null_change_attr; + diff --git a/sys/powerpc/powerpc/pmap_dispatch.c b/sys/powerpc/powerpc/pmap_dispatch.c index 09ea014b4995..709ba3a3e37d 100644 --- a/sys/powerpc/powerpc/pmap_dispatch.c +++ b/sys/powerpc/powerpc/pmap_dispatch.c @@ -520,6 +520,14 @@ pmap_map_user_ptr(pmap_t pm, volatile const void *uaddr, void **kaddr, return (MMU_MAP_USER_PTR(mmu_obj, pm, uaddr, kaddr, ulen, klen)); } +int +pmap_decode_kernel_ptr(vm_offset_t addr, int *is_user, vm_offset_t *decoded) +{ + + CTR2(KTR_PMAP, "%s(%#jx)", __func__, (uintmax_t)addr); + return (MMU_DECODE_KERNEL_PTR(mmu_obj, addr, is_user, decoded)); +} + boolean_t pmap_dev_direct_mapped(vm_paddr_t pa, vm_size_t size) { diff --git a/sys/powerpc/powerpc/trap.c b/sys/powerpc/powerpc/trap.c index 9ad7595d4ad7..8bccc9f3858e 100644 --- a/sys/powerpc/powerpc/trap.c +++ b/sys/powerpc/powerpc/trap.c @@ -393,7 +393,8 @@ trap(struct trapframe *frame) break; #if defined(__powerpc64__) && defined(AIM) case EXC_DSE: - if ((frame->dar & SEGMENT_MASK) == USER_ADDR) { + if (td->td_pcb->pcb_cpu.aim.usr_vsid != 0 && + (frame->dar & SEGMENT_MASK) == USER_ADDR) { __asm __volatile ("slbmte %0, %1" :: "r"(td->td_pcb->pcb_cpu.aim.usr_vsid), "r"(USER_SLB_SLBE)); @@ -731,10 +732,7 @@ trap_pfault(struct trapframe *frame, int user) struct proc *p; vm_map_t map; vm_prot_t ftype; - int rv; -#ifdef AIM - register_t user_sr; -#endif + int rv, is_user; td = curthread; p = td->td_proc; @@ -759,21 +757,14 @@ trap_pfault(struct trapframe *frame, int user) KASSERT(p->p_vmspace != NULL, ("trap_pfault: vmspace NULL")); map = &p->p_vmspace->vm_map; } else { -#ifdef BOOKE - if (eva < VM_MAXUSER_ADDRESS) { -#else - if ((eva >> ADDR_SR_SHFT) == (USER_ADDR >> ADDR_SR_SHFT)) { -#endif - map = &p->p_vmspace->vm_map; + rv = pmap_decode_kernel_ptr(eva, &is_user, &eva); + if (rv != 0) + return (SIGSEGV); -#ifdef AIM - user_sr = td->td_pcb->pcb_cpu.aim.usr_segm; - eva &= ADDR_PIDX | ADDR_POFF; - eva |= user_sr << ADDR_SR_SHFT; -#endif - } else { + if (is_user) + map = &p->p_vmspace->vm_map; + else map = kernel_map; - } } va = trunc_page(eva);