Remove hard-coded trap-handling logic involving the segmented memory model
used with hashed page tables on AIM and place it into a new, modular pmap function called pmap_decode_kernel_ptr(). This function is the inverse of pmap_map_user_ptr(). With POWER9 radix tables, which mapping to use becomes more complex than just AIM/BOOKE and it is best to have it in the same place as pmap_map_user_ptr(). Reviewed by: jhibbits
This commit is contained in:
parent
a8714f3fe3
commit
1bde6707a7
@ -322,6 +322,8 @@ vm_offset_t moea_quick_enter_page(mmu_t mmu, vm_page_t m);
|
|||||||
void moea_quick_remove_page(mmu_t mmu, vm_offset_t addr);
|
void moea_quick_remove_page(mmu_t mmu, vm_offset_t addr);
|
||||||
static int moea_map_user_ptr(mmu_t mmu, pmap_t pm,
|
static int moea_map_user_ptr(mmu_t mmu, pmap_t pm,
|
||||||
volatile const void *uaddr, void **kaddr, size_t ulen, size_t *klen);
|
volatile const void *uaddr, void **kaddr, size_t ulen, size_t *klen);
|
||||||
|
static int moea_decode_kernel_ptr(mmu_t mmu, vm_offset_t addr,
|
||||||
|
int *is_user, vm_offset_t *decoded_addr);
|
||||||
|
|
||||||
|
|
||||||
static mmu_method_t moea_methods[] = {
|
static mmu_method_t moea_methods[] = {
|
||||||
@ -374,6 +376,7 @@ static mmu_method_t moea_methods[] = {
|
|||||||
MMUMETHOD(mmu_scan_init, moea_scan_init),
|
MMUMETHOD(mmu_scan_init, moea_scan_init),
|
||||||
MMUMETHOD(mmu_dumpsys_map, moea_dumpsys_map),
|
MMUMETHOD(mmu_dumpsys_map, moea_dumpsys_map),
|
||||||
MMUMETHOD(mmu_map_user_ptr, moea_map_user_ptr),
|
MMUMETHOD(mmu_map_user_ptr, moea_map_user_ptr),
|
||||||
|
MMUMETHOD(mmu_decode_kernel_ptr, moea_decode_kernel_ptr),
|
||||||
|
|
||||||
{ 0, 0 }
|
{ 0, 0 }
|
||||||
};
|
};
|
||||||
@ -1587,6 +1590,31 @@ moea_map_user_ptr(mmu_t mmu, pmap_t pm, volatile const void *uaddr,
|
|||||||
return (0);
|
return (0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Figure out where a given kernel pointer (usually in a fault) points
|
||||||
|
* to from the VM's perspective, potentially remapping into userland's
|
||||||
|
* address space.
|
||||||
|
*/
|
||||||
|
static int
|
||||||
|
moea_decode_kernel_ptr(mmu_t mmu, vm_offset_t addr, int *is_user,
|
||||||
|
vm_offset_t *decoded_addr)
|
||||||
|
{
|
||||||
|
vm_offset_t user_sr;
|
||||||
|
|
||||||
|
if ((addr >> ADDR_SR_SHFT) == (USER_ADDR >> ADDR_SR_SHFT)) {
|
||||||
|
user_sr = curthread->td_pcb->pcb_cpu.aim.usr_segm;
|
||||||
|
addr &= ADDR_PIDX | ADDR_POFF;
|
||||||
|
addr |= user_sr << ADDR_SR_SHFT;
|
||||||
|
*decoded_addr = addr;
|
||||||
|
*is_user = 1;
|
||||||
|
} else {
|
||||||
|
*decoded_addr = addr;
|
||||||
|
*is_user = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
return (0);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Map a range of physical addresses into kernel virtual address space.
|
* Map a range of physical addresses into kernel virtual address space.
|
||||||
*
|
*
|
||||||
|
@ -288,6 +288,8 @@ vm_offset_t moea64_quick_enter_page(mmu_t mmu, vm_page_t m);
|
|||||||
void moea64_quick_remove_page(mmu_t mmu, vm_offset_t addr);
|
void moea64_quick_remove_page(mmu_t mmu, vm_offset_t addr);
|
||||||
static int moea64_map_user_ptr(mmu_t mmu, pmap_t pm,
|
static int moea64_map_user_ptr(mmu_t mmu, pmap_t pm,
|
||||||
volatile const void *uaddr, void **kaddr, size_t ulen, size_t *klen);
|
volatile const void *uaddr, void **kaddr, size_t ulen, size_t *klen);
|
||||||
|
static int moea64_decode_kernel_ptr(mmu_t mmu, vm_offset_t addr,
|
||||||
|
int *is_user, vm_offset_t *decoded_addr);
|
||||||
|
|
||||||
|
|
||||||
static mmu_method_t moea64_methods[] = {
|
static mmu_method_t moea64_methods[] = {
|
||||||
@ -339,6 +341,7 @@ static mmu_method_t moea64_methods[] = {
|
|||||||
MMUMETHOD(mmu_scan_init, moea64_scan_init),
|
MMUMETHOD(mmu_scan_init, moea64_scan_init),
|
||||||
MMUMETHOD(mmu_dumpsys_map, moea64_dumpsys_map),
|
MMUMETHOD(mmu_dumpsys_map, moea64_dumpsys_map),
|
||||||
MMUMETHOD(mmu_map_user_ptr, moea64_map_user_ptr),
|
MMUMETHOD(mmu_map_user_ptr, moea64_map_user_ptr),
|
||||||
|
MMUMETHOD(mmu_decode_kernel_ptr, moea64_decode_kernel_ptr),
|
||||||
|
|
||||||
{ 0, 0 }
|
{ 0, 0 }
|
||||||
};
|
};
|
||||||
@ -1909,6 +1912,31 @@ moea64_map_user_ptr(mmu_t mmu, pmap_t pm, volatile const void *uaddr,
|
|||||||
return (0);
|
return (0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Figure out where a given kernel pointer (usually in a fault) points
|
||||||
|
* to from the VM's perspective, potentially remapping into userland's
|
||||||
|
* address space.
|
||||||
|
*/
|
||||||
|
static int
|
||||||
|
moea64_decode_kernel_ptr(mmu_t mmu, vm_offset_t addr, int *is_user,
|
||||||
|
vm_offset_t *decoded_addr)
|
||||||
|
{
|
||||||
|
vm_offset_t user_sr;
|
||||||
|
|
||||||
|
if ((addr >> ADDR_SR_SHFT) == (USER_ADDR >> ADDR_SR_SHFT)) {
|
||||||
|
user_sr = curthread->td_pcb->pcb_cpu.aim.usr_segm;
|
||||||
|
addr &= ADDR_PIDX | ADDR_POFF;
|
||||||
|
addr |= user_sr << ADDR_SR_SHFT;
|
||||||
|
*decoded_addr = addr;
|
||||||
|
*is_user = 1;
|
||||||
|
} else {
|
||||||
|
*decoded_addr = addr;
|
||||||
|
*is_user = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
return (0);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Map a range of physical addresses into kernel virtual address space.
|
* Map a range of physical addresses into kernel virtual address space.
|
||||||
*
|
*
|
||||||
|
@ -382,6 +382,8 @@ static int mmu_booke_change_attr(mmu_t mmu, vm_offset_t addr,
|
|||||||
vm_size_t sz, vm_memattr_t mode);
|
vm_size_t sz, vm_memattr_t mode);
|
||||||
static int mmu_booke_map_user_ptr(mmu_t mmu, pmap_t pm,
|
static int mmu_booke_map_user_ptr(mmu_t mmu, pmap_t pm,
|
||||||
volatile const void *uaddr, void **kaddr, size_t ulen, size_t *klen);
|
volatile const void *uaddr, void **kaddr, size_t ulen, size_t *klen);
|
||||||
|
static int mmu_booke_decode_kernel_ptr(mmu_t mmu, vm_offset_t addr,
|
||||||
|
int *is_user, vm_offset_t *decoded_addr);
|
||||||
|
|
||||||
|
|
||||||
static mmu_method_t mmu_booke_methods[] = {
|
static mmu_method_t mmu_booke_methods[] = {
|
||||||
@ -436,6 +438,7 @@ static mmu_method_t mmu_booke_methods[] = {
|
|||||||
MMUMETHOD(mmu_unmapdev, mmu_booke_unmapdev),
|
MMUMETHOD(mmu_unmapdev, mmu_booke_unmapdev),
|
||||||
MMUMETHOD(mmu_change_attr, mmu_booke_change_attr),
|
MMUMETHOD(mmu_change_attr, mmu_booke_change_attr),
|
||||||
MMUMETHOD(mmu_map_user_ptr, mmu_booke_map_user_ptr),
|
MMUMETHOD(mmu_map_user_ptr, mmu_booke_map_user_ptr),
|
||||||
|
MMUMETHOD(mmu_decode_kernel_ptr, mmu_booke_decode_kernel_ptr),
|
||||||
|
|
||||||
/* dumpsys() support */
|
/* dumpsys() support */
|
||||||
MMUMETHOD(mmu_dumpsys_map, mmu_booke_dumpsys_map),
|
MMUMETHOD(mmu_dumpsys_map, mmu_booke_dumpsys_map),
|
||||||
@ -2291,6 +2294,25 @@ mmu_booke_map_user_ptr(mmu_t mmu, pmap_t pm, volatile const void *uaddr,
|
|||||||
return (0);
|
return (0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Figure out where a given kernel pointer (usually in a fault) points
|
||||||
|
* to from the VM's perspective, potentially remapping into userland's
|
||||||
|
* address space.
|
||||||
|
*/
|
||||||
|
static int
|
||||||
|
mmu_booke_decode_kernel_ptr(mmu_t mmu, vm_offset_t addr, int *is_user,
|
||||||
|
vm_offset_t *decoded_addr)
|
||||||
|
{
|
||||||
|
|
||||||
|
if (addr < VM_MAXUSER_ADDRESS)
|
||||||
|
*is_user = 1;
|
||||||
|
else
|
||||||
|
*is_user = 0;
|
||||||
|
|
||||||
|
*decoded_addr = addr;
|
||||||
|
return (0);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Initialize pmap associated with process 0.
|
* Initialize pmap associated with process 0.
|
||||||
*/
|
*/
|
||||||
|
@ -262,6 +262,8 @@ void pmap_page_set_memattr(vm_page_t, vm_memattr_t);
|
|||||||
int pmap_change_attr(vm_offset_t, vm_size_t, vm_memattr_t);
|
int pmap_change_attr(vm_offset_t, vm_size_t, vm_memattr_t);
|
||||||
int pmap_map_user_ptr(pmap_t pm, volatile const void *uaddr,
|
int pmap_map_user_ptr(pmap_t pm, volatile const void *uaddr,
|
||||||
void **kaddr, size_t ulen, size_t *klen);
|
void **kaddr, size_t ulen, size_t *klen);
|
||||||
|
int pmap_decode_kernel_ptr(vm_offset_t addr, int *is_user,
|
||||||
|
vm_offset_t *decoded_addr);
|
||||||
void pmap_deactivate(struct thread *);
|
void pmap_deactivate(struct thread *);
|
||||||
vm_paddr_t pmap_kextract(vm_offset_t);
|
vm_paddr_t pmap_kextract(vm_offset_t);
|
||||||
int pmap_dev_direct_mapped(vm_paddr_t, vm_size_t);
|
int pmap_dev_direct_mapped(vm_paddr_t, vm_size_t);
|
||||||
|
@ -839,6 +839,21 @@ METHOD int map_user_ptr {
|
|||||||
size_t *_klen;
|
size_t *_klen;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Decode a kernel pointer, as visible to the current thread,
|
||||||
|
* by setting whether it corresponds to a user or kernel address and
|
||||||
|
* the address in the respective memory maps to which the address as
|
||||||
|
* seen in the kernel corresponds. This is essentially the inverse of
|
||||||
|
* MMU_MAP_USER_PTR() above and is used in kernel-space fault handling.
|
||||||
|
* Returns 0 on success or EFAULT if the address could not be mapped.
|
||||||
|
*/
|
||||||
|
METHOD int decode_kernel_ptr {
|
||||||
|
mmu_t _mmu;
|
||||||
|
vm_offset_t addr;
|
||||||
|
int *is_user;
|
||||||
|
vm_offset_t *decoded_addr;
|
||||||
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Reverse-map a kernel virtual address
|
* @brief Reverse-map a kernel virtual address
|
||||||
*
|
*
|
||||||
@ -998,3 +1013,4 @@ METHOD int change_attr {
|
|||||||
vm_size_t _sz;
|
vm_size_t _sz;
|
||||||
vm_memattr_t _mode;
|
vm_memattr_t _mode;
|
||||||
} DEFAULT mmu_null_change_attr;
|
} DEFAULT mmu_null_change_attr;
|
||||||
|
|
||||||
|
@ -520,6 +520,14 @@ pmap_map_user_ptr(pmap_t pm, volatile const void *uaddr, void **kaddr,
|
|||||||
return (MMU_MAP_USER_PTR(mmu_obj, pm, uaddr, kaddr, ulen, klen));
|
return (MMU_MAP_USER_PTR(mmu_obj, pm, uaddr, kaddr, ulen, klen));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int
|
||||||
|
pmap_decode_kernel_ptr(vm_offset_t addr, int *is_user, vm_offset_t *decoded)
|
||||||
|
{
|
||||||
|
|
||||||
|
CTR2(KTR_PMAP, "%s(%#jx)", __func__, (uintmax_t)addr);
|
||||||
|
return (MMU_DECODE_KERNEL_PTR(mmu_obj, addr, is_user, decoded));
|
||||||
|
}
|
||||||
|
|
||||||
boolean_t
|
boolean_t
|
||||||
pmap_dev_direct_mapped(vm_paddr_t pa, vm_size_t size)
|
pmap_dev_direct_mapped(vm_paddr_t pa, vm_size_t size)
|
||||||
{
|
{
|
||||||
|
@ -393,7 +393,8 @@ trap(struct trapframe *frame)
|
|||||||
break;
|
break;
|
||||||
#if defined(__powerpc64__) && defined(AIM)
|
#if defined(__powerpc64__) && defined(AIM)
|
||||||
case EXC_DSE:
|
case EXC_DSE:
|
||||||
if ((frame->dar & SEGMENT_MASK) == USER_ADDR) {
|
if (td->td_pcb->pcb_cpu.aim.usr_vsid != 0 &&
|
||||||
|
(frame->dar & SEGMENT_MASK) == USER_ADDR) {
|
||||||
__asm __volatile ("slbmte %0, %1" ::
|
__asm __volatile ("slbmte %0, %1" ::
|
||||||
"r"(td->td_pcb->pcb_cpu.aim.usr_vsid),
|
"r"(td->td_pcb->pcb_cpu.aim.usr_vsid),
|
||||||
"r"(USER_SLB_SLBE));
|
"r"(USER_SLB_SLBE));
|
||||||
@ -731,10 +732,7 @@ trap_pfault(struct trapframe *frame, int user)
|
|||||||
struct proc *p;
|
struct proc *p;
|
||||||
vm_map_t map;
|
vm_map_t map;
|
||||||
vm_prot_t ftype;
|
vm_prot_t ftype;
|
||||||
int rv;
|
int rv, is_user;
|
||||||
#ifdef AIM
|
|
||||||
register_t user_sr;
|
|
||||||
#endif
|
|
||||||
|
|
||||||
td = curthread;
|
td = curthread;
|
||||||
p = td->td_proc;
|
p = td->td_proc;
|
||||||
@ -759,21 +757,14 @@ trap_pfault(struct trapframe *frame, int user)
|
|||||||
KASSERT(p->p_vmspace != NULL, ("trap_pfault: vmspace NULL"));
|
KASSERT(p->p_vmspace != NULL, ("trap_pfault: vmspace NULL"));
|
||||||
map = &p->p_vmspace->vm_map;
|
map = &p->p_vmspace->vm_map;
|
||||||
} else {
|
} else {
|
||||||
#ifdef BOOKE
|
rv = pmap_decode_kernel_ptr(eva, &is_user, &eva);
|
||||||
if (eva < VM_MAXUSER_ADDRESS) {
|
if (rv != 0)
|
||||||
#else
|
return (SIGSEGV);
|
||||||
if ((eva >> ADDR_SR_SHFT) == (USER_ADDR >> ADDR_SR_SHFT)) {
|
|
||||||
#endif
|
|
||||||
map = &p->p_vmspace->vm_map;
|
|
||||||
|
|
||||||
#ifdef AIM
|
if (is_user)
|
||||||
user_sr = td->td_pcb->pcb_cpu.aim.usr_segm;
|
map = &p->p_vmspace->vm_map;
|
||||||
eva &= ADDR_PIDX | ADDR_POFF;
|
else
|
||||||
eva |= user_sr << ADDR_SR_SHFT;
|
|
||||||
#endif
|
|
||||||
} else {
|
|
||||||
map = kernel_map;
|
map = kernel_map;
|
||||||
}
|
|
||||||
}
|
}
|
||||||
va = trunc_page(eva);
|
va = trunc_page(eva);
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user