Start to clean up arm64 address space selection
On arm64 we should use bit 55 of the address to decide if aan address is a user or kernel address. Add a new macro with this check and a second to ensure the address is in teh canonical form, i.e. the top bits are all zero or all one. This will help with supporting future cpu features, including Top Byte Ignore, Pointer Authentication, and Memory Tagging. Reviewed by: kib Sponsored by: The FreeBSD Foundation Differential Revision: https://reviews.freebsd.org/D31179
This commit is contained in:
parent
a40cf4175c
commit
b7a78d573a
@ -377,7 +377,10 @@ set_dbregs(struct thread *td, struct dbreg *regs)
|
||||
addr = regs->db_breakregs[i].dbr_addr;
|
||||
ctrl = regs->db_breakregs[i].dbr_ctrl;
|
||||
|
||||
/* Don't let the user set a breakpoint on a kernel address. */
|
||||
/*
|
||||
* Don't let the user set a breakpoint on a kernel or
|
||||
* non-canonical user address.
|
||||
*/
|
||||
if (addr >= VM_MAXUSER_ADDRESS)
|
||||
return (EINVAL);
|
||||
|
||||
@ -412,7 +415,10 @@ set_dbregs(struct thread *td, struct dbreg *regs)
|
||||
addr = regs->db_watchregs[i].dbw_addr;
|
||||
ctrl = regs->db_watchregs[i].dbw_ctrl;
|
||||
|
||||
/* Don't let the user set a watchpoint on a kernel address. */
|
||||
/*
|
||||
* Don't let the user set a watchpoint on a kernel or
|
||||
* non-canonical user address.
|
||||
*/
|
||||
if (addr >= VM_MAXUSER_ADDRESS)
|
||||
return (EINVAL);
|
||||
|
||||
|
@ -466,11 +466,13 @@ pmap_l1_to_l2(pd_entry_t *l1p, vm_offset_t va)
|
||||
|
||||
l1 = pmap_load(l1p);
|
||||
|
||||
KASSERT(ADDR_IS_CANONICAL(va),
|
||||
("%s: Address not in canonical form: %lx", __func__, va));
|
||||
/*
|
||||
* The valid bit may be clear if pmap_update_entry() is concurrently
|
||||
* modifying the entry, so for KVA only the entry type may be checked.
|
||||
*/
|
||||
KASSERT(va >= VM_MAX_USER_ADDRESS || (l1 & ATTR_DESCR_VALID) != 0,
|
||||
KASSERT(ADDR_IS_KERNEL(va) || (l1 & ATTR_DESCR_VALID) != 0,
|
||||
("%s: L1 entry %#lx for %#lx is invalid", __func__, l1, va));
|
||||
KASSERT((l1 & ATTR_DESCR_TYPE_MASK) == ATTR_DESCR_TYPE_TABLE,
|
||||
("%s: L1 entry %#lx for %#lx is a leaf", __func__, l1, va));
|
||||
@ -498,11 +500,13 @@ pmap_l2_to_l3(pd_entry_t *l2p, vm_offset_t va)
|
||||
|
||||
l2 = pmap_load(l2p);
|
||||
|
||||
KASSERT(ADDR_IS_CANONICAL(va),
|
||||
("%s: Address not in canonical form: %lx", __func__, va));
|
||||
/*
|
||||
* The valid bit may be clear if pmap_update_entry() is concurrently
|
||||
* modifying the entry, so for KVA only the entry type may be checked.
|
||||
*/
|
||||
KASSERT(va >= VM_MAX_USER_ADDRESS || (l2 & ATTR_DESCR_VALID) != 0,
|
||||
KASSERT(ADDR_IS_KERNEL(va) || (l2 & ATTR_DESCR_VALID) != 0,
|
||||
("%s: L2 entry %#lx for %#lx is invalid", __func__, l2, va));
|
||||
KASSERT((l2 & ATTR_DESCR_TYPE_MASK) == ATTR_DESCR_TYPE_TABLE,
|
||||
("%s: L2 entry %#lx for %#lx is a leaf", __func__, l2, va));
|
||||
@ -1580,7 +1584,9 @@ pmap_qremove(vm_offset_t sva, int count)
|
||||
vm_offset_t va;
|
||||
int lvl;
|
||||
|
||||
KASSERT(sva >= VM_MIN_KERNEL_ADDRESS, ("usermode va %lx", sva));
|
||||
KASSERT(ADDR_IS_CANONICAL(sva),
|
||||
("%s: Address not in canonical form: %lx", __func__, sva));
|
||||
KASSERT(ADDR_IS_KERNEL(sva), ("usermode va %lx", sva));
|
||||
|
||||
va = sva;
|
||||
while (count-- > 0) {
|
||||
@ -1700,7 +1706,9 @@ pmap_unuse_pt(pmap_t pmap, vm_offset_t va, pd_entry_t ptepde,
|
||||
{
|
||||
vm_page_t mpte;
|
||||
|
||||
if (va >= VM_MAXUSER_ADDRESS)
|
||||
KASSERT(ADDR_IS_CANONICAL(va),
|
||||
("%s: Address not in canonical form: %lx", __func__, va));
|
||||
if (ADDR_IS_KERNEL(va))
|
||||
return (0);
|
||||
KASSERT(ptepde != 0, ("pmap_unuse_pt: ptepde != 0"));
|
||||
mpte = PHYS_TO_VM_PAGE(ptepde & ~ATTR_MASK);
|
||||
@ -1960,17 +1968,20 @@ pmap_alloc_l2(pmap_t pmap, vm_offset_t va, vm_page_t *l2pgp,
|
||||
vm_page_t l2pg;
|
||||
vm_pindex_t l2pindex;
|
||||
|
||||
KASSERT(ADDR_IS_CANONICAL(va),
|
||||
("%s: Address not in canonical form: %lx", __func__, va));
|
||||
|
||||
retry:
|
||||
l1 = pmap_l1(pmap, va);
|
||||
if (l1 != NULL && (pmap_load(l1) & ATTR_DESCR_MASK) == L1_TABLE) {
|
||||
l2 = pmap_l1_to_l2(l1, va);
|
||||
if (va < VM_MAXUSER_ADDRESS) {
|
||||
if (!ADDR_IS_KERNEL(va)) {
|
||||
/* Add a reference to the L2 page. */
|
||||
l2pg = PHYS_TO_VM_PAGE(pmap_load(l1) & ~ATTR_MASK);
|
||||
l2pg->ref_count++;
|
||||
} else
|
||||
l2pg = NULL;
|
||||
} else if (va < VM_MAXUSER_ADDRESS) {
|
||||
} else if (!ADDR_IS_KERNEL(va)) {
|
||||
/* Allocate a L2 page. */
|
||||
l2pindex = pmap_l2_pindex(va) >> Ln_ENTRIES_SHIFT;
|
||||
l2pg = _pmap_alloc_l3(pmap, NUL2E + l2pindex, lockp);
|
||||
@ -2936,11 +2947,15 @@ pmap_remove_l3_range(pmap_t pmap, pd_entry_t l2e, vm_offset_t sva,
|
||||
vm_offset_t va;
|
||||
vm_page_t l3pg, m;
|
||||
|
||||
KASSERT(ADDR_IS_CANONICAL(sva),
|
||||
("%s: Start address not in canonical form: %lx", __func__, sva));
|
||||
KASSERT(ADDR_IS_CANONICAL(eva) || eva == VM_MAX_USER_ADDRESS,
|
||||
("%s: End address not in canonical form: %lx", __func__, eva));
|
||||
|
||||
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
|
||||
KASSERT(rounddown2(sva, L2_SIZE) + L2_SIZE == roundup2(eva, L2_SIZE),
|
||||
("pmap_remove_l3_range: range crosses an L3 page table boundary"));
|
||||
l3pg = sva < VM_MAXUSER_ADDRESS ? PHYS_TO_VM_PAGE(l2e & ~ATTR_MASK) :
|
||||
NULL;
|
||||
l3pg = !ADDR_IS_KERNEL(sva) ? PHYS_TO_VM_PAGE(l2e & ~ATTR_MASK) : NULL;
|
||||
va = eva;
|
||||
for (l3 = pmap_l2_to_l3(&l2e, sva); sva != eva; l3++, sva += L3_SIZE) {
|
||||
if (!pmap_l3_valid(pmap_load(l3))) {
|
||||
@ -3701,6 +3716,9 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
|
||||
boolean_t nosleep;
|
||||
int lvl, rv;
|
||||
|
||||
KASSERT(ADDR_IS_CANONICAL(va),
|
||||
("%s: Address not in canonical form: %lx", __func__, va));
|
||||
|
||||
va = trunc_page(va);
|
||||
if ((m->oflags & VPO_UNMANAGED) == 0)
|
||||
VM_PAGE_OBJECT_BUSY_ASSERT(m);
|
||||
@ -3712,7 +3730,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
|
||||
if ((flags & PMAP_ENTER_WIRED) != 0)
|
||||
new_l3 |= ATTR_SW_WIRED;
|
||||
if (pmap->pm_stage == PM_STAGE1) {
|
||||
if (va < VM_MAXUSER_ADDRESS)
|
||||
if (!ADDR_IS_KERNEL(va))
|
||||
new_l3 |= ATTR_S1_AP(ATTR_S1_AP_USER) | ATTR_S1_PXN;
|
||||
else
|
||||
new_l3 |= ATTR_S1_UXN;
|
||||
@ -3779,7 +3797,7 @@ retry:
|
||||
pde = pmap_pde(pmap, va, &lvl);
|
||||
if (pde != NULL && lvl == 2) {
|
||||
l3 = pmap_l2_to_l3(pde, va);
|
||||
if (va < VM_MAXUSER_ADDRESS && mpte == NULL) {
|
||||
if (!ADDR_IS_KERNEL(va) && mpte == NULL) {
|
||||
mpte = PHYS_TO_VM_PAGE(pmap_load(pde) & ~ATTR_MASK);
|
||||
mpte->ref_count++;
|
||||
}
|
||||
@ -3789,7 +3807,7 @@ retry:
|
||||
if ((pmap_load(l2) & ATTR_DESCR_MASK) == L2_BLOCK &&
|
||||
(l3 = pmap_demote_l2_locked(pmap, l2, va, &lock)) != NULL) {
|
||||
l3 = &l3[pmap_l3_index(va)];
|
||||
if (va < VM_MAXUSER_ADDRESS) {
|
||||
if (!ADDR_IS_KERNEL(va)) {
|
||||
mpte = PHYS_TO_VM_PAGE(
|
||||
pmap_load(l2) & ~ATTR_MASK);
|
||||
mpte->ref_count++;
|
||||
@ -3798,7 +3816,7 @@ retry:
|
||||
}
|
||||
/* We need to allocate an L3 table. */
|
||||
}
|
||||
if (va < VM_MAXUSER_ADDRESS) {
|
||||
if (!ADDR_IS_KERNEL(va)) {
|
||||
nosleep = (flags & PMAP_ENTER_NOSLEEP) != 0;
|
||||
|
||||
/*
|
||||
@ -4023,6 +4041,8 @@ pmap_enter_2mpage(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
|
||||
|
||||
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
|
||||
PMAP_ASSERT_STAGE1(pmap);
|
||||
KASSERT(ADDR_IS_CANONICAL(va),
|
||||
("%s: Address not in canonical form: %lx", __func__, va));
|
||||
|
||||
new_l2 = (pd_entry_t)(VM_PAGE_TO_PHYS(m) | ATTR_DEFAULT |
|
||||
ATTR_S1_IDX(m->md.pv_memattr) | ATTR_S1_AP(ATTR_S1_AP_RO) |
|
||||
@ -4034,7 +4054,7 @@ pmap_enter_2mpage(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
|
||||
if ((prot & VM_PROT_EXECUTE) == 0 ||
|
||||
m->md.pv_memattr == VM_MEMATTR_DEVICE)
|
||||
new_l2 |= ATTR_S1_XN;
|
||||
if (va < VM_MAXUSER_ADDRESS)
|
||||
if (!ADDR_IS_KERNEL(va))
|
||||
new_l2 |= ATTR_S1_AP(ATTR_S1_AP_USER) | ATTR_S1_PXN;
|
||||
else
|
||||
new_l2 |= ATTR_S1_UXN;
|
||||
@ -4081,6 +4101,8 @@ pmap_enter_l2(pmap_t pmap, vm_offset_t va, pd_entry_t new_l2, u_int flags,
|
||||
vm_page_t l2pg, mt;
|
||||
|
||||
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
|
||||
KASSERT(ADDR_IS_CANONICAL(va),
|
||||
("%s: Address not in canonical form: %lx", __func__, va));
|
||||
|
||||
if ((l2 = pmap_alloc_l2(pmap, va, &l2pg, (flags &
|
||||
PMAP_ENTER_NOSLEEP) != 0 ? NULL : lockp)) == NULL) {
|
||||
@ -4095,9 +4117,10 @@ pmap_enter_l2(pmap_t pmap, vm_offset_t va, pd_entry_t new_l2, u_int flags,
|
||||
if ((old_l2 = pmap_load(l2)) != 0) {
|
||||
KASSERT(l2pg == NULL || l2pg->ref_count > 1,
|
||||
("pmap_enter_l2: l2pg's ref count is too low"));
|
||||
if ((flags & PMAP_ENTER_NOREPLACE) != 0 && (va <
|
||||
VM_MAXUSER_ADDRESS || (old_l2 & ATTR_DESCR_MASK) ==
|
||||
L2_BLOCK || !pmap_every_pte_zero(old_l2 & ~ATTR_MASK))) {
|
||||
if ((flags & PMAP_ENTER_NOREPLACE) != 0 &&
|
||||
(!ADDR_IS_KERNEL(va) ||
|
||||
(old_l2 & ATTR_DESCR_MASK) == L2_BLOCK ||
|
||||
!pmap_every_pte_zero(old_l2 & ~ATTR_MASK))) {
|
||||
if (l2pg != NULL)
|
||||
l2pg->ref_count--;
|
||||
CTR2(KTR_PMAP, "pmap_enter_l2: failure for va %#lx"
|
||||
@ -4111,7 +4134,7 @@ pmap_enter_l2(pmap_t pmap, vm_offset_t va, pd_entry_t new_l2, u_int flags,
|
||||
else
|
||||
pmap_remove_l3_range(pmap, old_l2, va, va + L2_SIZE,
|
||||
&free, lockp);
|
||||
if (va < VM_MAXUSER_ADDRESS) {
|
||||
if (!ADDR_IS_KERNEL(va)) {
|
||||
vm_page_free_pages_toq(&free, true);
|
||||
KASSERT(pmap_load(l2) == 0,
|
||||
("pmap_enter_l2: non-zero L2 entry %p", l2));
|
||||
@ -4260,13 +4283,15 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
|
||||
("pmap_enter_quick_locked: managed mapping within the clean submap"));
|
||||
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
|
||||
PMAP_ASSERT_STAGE1(pmap);
|
||||
KASSERT(ADDR_IS_CANONICAL(va),
|
||||
("%s: Address not in canonical form: %lx", __func__, va));
|
||||
|
||||
CTR2(KTR_PMAP, "pmap_enter_quick_locked: %p %lx", pmap, va);
|
||||
/*
|
||||
* In the case that a page table page is not
|
||||
* resident, we are creating it here.
|
||||
*/
|
||||
if (va < VM_MAXUSER_ADDRESS) {
|
||||
if (!ADDR_IS_KERNEL(va)) {
|
||||
vm_pindex_t l2pindex;
|
||||
|
||||
/*
|
||||
@ -4350,7 +4375,7 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
|
||||
if ((prot & VM_PROT_EXECUTE) == 0 ||
|
||||
m->md.pv_memattr == VM_MEMATTR_DEVICE)
|
||||
l3_val |= ATTR_S1_XN;
|
||||
if (va < VM_MAXUSER_ADDRESS)
|
||||
if (!ADDR_IS_KERNEL(va))
|
||||
l3_val |= ATTR_S1_AP(ATTR_S1_AP_USER) | ATTR_S1_PXN;
|
||||
else
|
||||
l3_val |= ATTR_S1_UXN;
|
||||
@ -6110,6 +6135,9 @@ pmap_demote_l2_locked(pmap_t pmap, pt_entry_t *l2, vm_offset_t va,
|
||||
|
||||
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
|
||||
PMAP_ASSERT_STAGE1(pmap);
|
||||
KASSERT(ADDR_IS_CANONICAL(va),
|
||||
("%s: Address not in canonical form: %lx", __func__, va));
|
||||
|
||||
l3 = NULL;
|
||||
oldl2 = pmap_load(l2);
|
||||
KASSERT((oldl2 & ATTR_DESCR_MASK) == L2_BLOCK,
|
||||
@ -6149,7 +6177,7 @@ pmap_demote_l2_locked(pmap_t pmap, pt_entry_t *l2, vm_offset_t va,
|
||||
* so the direct map region is the only part of the
|
||||
* kernel address space that must be handled here.
|
||||
*/
|
||||
KASSERT(va < VM_MAXUSER_ADDRESS || VIRT_IN_DMAP(va),
|
||||
KASSERT(!ADDR_IS_KERNEL(va) || VIRT_IN_DMAP(va),
|
||||
("pmap_demote_l2: No saved mpte for va %#lx", va));
|
||||
|
||||
/*
|
||||
@ -6174,7 +6202,7 @@ pmap_demote_l2_locked(pmap_t pmap, pt_entry_t *l2, vm_offset_t va,
|
||||
goto fail;
|
||||
}
|
||||
|
||||
if (va < VM_MAXUSER_ADDRESS) {
|
||||
if (!ADDR_IS_KERNEL(va)) {
|
||||
ml3->ref_count = NL3PG;
|
||||
pmap_resident_count_inc(pmap, 1);
|
||||
}
|
||||
@ -6537,7 +6565,10 @@ pmap_sync_icache(pmap_t pmap, vm_offset_t va, vm_size_t sz)
|
||||
{
|
||||
|
||||
PMAP_ASSERT_STAGE1(pmap);
|
||||
if (va >= VM_MIN_KERNEL_ADDRESS) {
|
||||
KASSERT(ADDR_IS_CANONICAL(va),
|
||||
("%s: Address not in canonical form: %lx", __func__, va));
|
||||
|
||||
if (ADDR_IS_KERNEL(va)) {
|
||||
cpu_icache_sync_range(va, sz);
|
||||
} else {
|
||||
u_int len, offset;
|
||||
|
@ -262,8 +262,14 @@ data_abort(struct thread *td, struct trapframe *frame, uint64_t esr,
|
||||
else {
|
||||
intr_enable();
|
||||
|
||||
/* We received a TBI/PAC/etc. fault from the kernel */
|
||||
if (!ADDR_IS_CANONICAL(far)) {
|
||||
error = KERN_INVALID_ADDRESS;
|
||||
goto bad_far;
|
||||
}
|
||||
|
||||
/* The top bit tells us which range to use */
|
||||
if (far >= VM_MAXUSER_ADDRESS) {
|
||||
if (ADDR_IS_KERNEL(far)) {
|
||||
map = kernel_map;
|
||||
} else {
|
||||
map = &p->p_vmspace->vm_map;
|
||||
@ -307,6 +313,7 @@ data_abort(struct thread *td, struct trapframe *frame, uint64_t esr,
|
||||
/* Fault in the page. */
|
||||
error = vm_fault_trap(map, far, ftype, VM_FAULT_NORMAL, &sig, &ucode);
|
||||
if (error != KERN_SUCCESS) {
|
||||
bad_far:
|
||||
if (lower) {
|
||||
call_trapsignal(td, sig, ucode, (void *)far,
|
||||
ESR_ELx_EXCEPTION(esr));
|
||||
|
@ -156,6 +156,13 @@
|
||||
#define VM_MIN_KERNEL_ADDRESS (0xffff000000000000UL)
|
||||
#define VM_MAX_KERNEL_ADDRESS (0xffff008000000000UL)
|
||||
|
||||
/* If true addr is in the kernel address space */
|
||||
#define ADDR_IS_KERNEL(addr) (((addr) & (1ul << 55)) == (1ul << 55))
|
||||
/* If true addr is in its canonical form (i.e. no TBI, PAC, etc.) */
|
||||
#define ADDR_IS_CANONICAL(addr) \
|
||||
(((addr) & 0xffff000000000000UL) == 0 || \
|
||||
((addr) & 0xffff000000000000UL) == 0xffff000000000000UL)
|
||||
|
||||
/* 95 TiB maximum for the direct map region */
|
||||
#define DMAP_MIN_ADDRESS (0xffffa00000000000UL)
|
||||
#define DMAP_MAX_ADDRESS (0xffffff0000000000UL)
|
||||
|
Loading…
x
Reference in New Issue
Block a user