Split out the stage 1 pte bits and add the stage 2 bits
In preperation for adding bhyve support to arm64 we need to split the stage 1 and stage 2 pte fields to allow future changes to create stage 2 page tables. MFC after: 1 month Sponsored by: Innovate UK Differential Revision: https://reviews.freebsd.org/D23669
This commit is contained in:
parent
990a56e866
commit
d153d023f5
@ -215,10 +215,10 @@ efi_create_1t1_map(struct efi_md *map, int ndesc, int descsz)
|
||||
|
||||
printf("MAP %lx mode %x pages %lu\n", p->md_phys, mode, p->md_pages);
|
||||
|
||||
l3_attr = ATTR_DEFAULT | ATTR_IDX(mode) | ATTR_AP(ATTR_AP_RW) |
|
||||
ATTR_nG | L3_PAGE;
|
||||
l3_attr = ATTR_DEFAULT | ATTR_S1_IDX(mode) |
|
||||
ATTR_S1_AP(ATTR_S1_AP_RW) | ATTR_S1_nG | L3_PAGE;
|
||||
if (mode == VM_MEMATTR_DEVICE || p->md_attr & EFI_MD_ATTR_XP)
|
||||
l3_attr |= ATTR_UXN | ATTR_PXN;
|
||||
l3_attr |= ATTR_S1_XN;
|
||||
|
||||
VM_OBJECT_WLOCK(obj_1t1_pt);
|
||||
for (va = p->md_phys, idx = 0; idx < p->md_pages; idx++,
|
||||
|
@ -461,7 +461,7 @@ common:
|
||||
bl link_l0_pagetable
|
||||
|
||||
/*
|
||||
* Build the TTBR0 maps. As TTBR0 maps, they must specify ATTR_nG.
|
||||
* Build the TTBR0 maps. As TTBR0 maps, they must specify ATTR_S1_nG.
|
||||
* They are only needed early on, so the VA = PA map is uncached.
|
||||
*/
|
||||
add x27, x24, #PAGE_SIZE
|
||||
@ -469,7 +469,7 @@ common:
|
||||
mov x6, x27 /* The initial page table */
|
||||
#if defined(SOCDEV_PA) && defined(SOCDEV_VA)
|
||||
/* Create a table for the UART */
|
||||
mov x7, #(ATTR_nG | ATTR_IDX(VM_MEMATTR_DEVICE))
|
||||
mov x7, #(ATTR_S1_nG | ATTR_S1_IDX(VM_MEMATTR_DEVICE))
|
||||
mov x8, #(SOCDEV_VA) /* VA start */
|
||||
mov x9, #(SOCDEV_PA) /* PA start */
|
||||
mov x10, #1
|
||||
@ -481,7 +481,7 @@ common:
|
||||
cbz x19, 1f
|
||||
|
||||
/* Create the identity mapping for FDT data (2 MiB max) */
|
||||
mov x7, #(ATTR_nG | ATTR_IDX(VM_MEMATTR_UNCACHEABLE))
|
||||
mov x7, #(ATTR_S1_nG | ATTR_S1_IDX(VM_MEMATTR_UNCACHEABLE))
|
||||
mov x9, x0
|
||||
mov x8, x0 /* VA start (== PA start) */
|
||||
mov x10, #1
|
||||
@ -491,7 +491,7 @@ common:
|
||||
#endif
|
||||
|
||||
/* Create the VA = PA map */
|
||||
mov x7, #(ATTR_nG | ATTR_IDX(VM_MEMATTR_UNCACHEABLE))
|
||||
mov x7, #(ATTR_S1_nG | ATTR_S1_IDX(VM_MEMATTR_UNCACHEABLE))
|
||||
mov x9, x27
|
||||
mov x8, x9 /* VA start (== PA start) */
|
||||
mov x10, #1
|
||||
@ -639,7 +639,7 @@ build_l2_block_pagetable:
|
||||
lsl x12, x7, #2
|
||||
orr x12, x12, #L2_BLOCK
|
||||
orr x12, x12, #(ATTR_AF)
|
||||
orr x12, x12, #(ATTR_UXN)
|
||||
orr x12, x12, #(ATTR_S1_UXN)
|
||||
#ifdef SMP
|
||||
orr x12, x12, ATTR_SH(ATTR_SH_IS)
|
||||
#endif
|
||||
|
@ -213,8 +213,8 @@ __FBSDID("$FreeBSD$");
|
||||
|
||||
/*
|
||||
* The presence of this flag indicates that the mapping is writeable.
|
||||
* If the ATTR_AP_RO bit is also set, then the mapping is clean, otherwise it is
|
||||
* dirty. This flag may only be set on managed mappings.
|
||||
* If the ATTR_S1_AP_RO bit is also set, then the mapping is clean, otherwise
|
||||
* it is dirty. This flag may only be set on managed mappings.
|
||||
*
|
||||
* The DBM bit is reserved on ARMv8.0 but it seems we can safely treat it
|
||||
* as a software managed bit.
|
||||
@ -590,11 +590,11 @@ pmap_pte_dirty(pt_entry_t pte)
|
||||
{
|
||||
|
||||
KASSERT((pte & ATTR_SW_MANAGED) != 0, ("pte %#lx is unmanaged", pte));
|
||||
KASSERT((pte & (ATTR_AP_RW_BIT | ATTR_SW_DBM)) != 0,
|
||||
KASSERT((pte & (ATTR_S1_AP_RW_BIT | ATTR_SW_DBM)) != 0,
|
||||
("pte %#lx is writeable and missing ATTR_SW_DBM", pte));
|
||||
|
||||
return ((pte & (ATTR_AP_RW_BIT | ATTR_SW_DBM)) ==
|
||||
(ATTR_AP(ATTR_AP_RW) | ATTR_SW_DBM));
|
||||
return ((pte & (ATTR_S1_AP_RW_BIT | ATTR_SW_DBM)) ==
|
||||
(ATTR_S1_AP(ATTR_S1_AP_RW) | ATTR_SW_DBM));
|
||||
}
|
||||
|
||||
static __inline void
|
||||
@ -699,8 +699,10 @@ pmap_bootstrap_dmap(vm_offset_t kern_l1, vm_paddr_t min_pa,
|
||||
l2_slot = pmap_l2_index(va);
|
||||
KASSERT(l2_slot != 0, ("..."));
|
||||
pmap_store(&l2[l2_slot],
|
||||
(pa & ~L2_OFFSET) | ATTR_DEFAULT | ATTR_XN |
|
||||
ATTR_IDX(VM_MEMATTR_WRITE_BACK) | L2_BLOCK);
|
||||
(pa & ~L2_OFFSET) | ATTR_DEFAULT |
|
||||
ATTR_S1_XN |
|
||||
ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK) |
|
||||
L2_BLOCK);
|
||||
}
|
||||
KASSERT(va == (pa - dmap_phys_base + DMAP_MIN_ADDRESS),
|
||||
("..."));
|
||||
@ -711,8 +713,8 @@ pmap_bootstrap_dmap(vm_offset_t kern_l1, vm_paddr_t min_pa,
|
||||
pa += L1_SIZE, va += L1_SIZE) {
|
||||
l1_slot = ((va - DMAP_MIN_ADDRESS) >> L1_SHIFT);
|
||||
pmap_store(&pagetable_dmap[l1_slot],
|
||||
(pa & ~L1_OFFSET) | ATTR_DEFAULT | ATTR_XN |
|
||||
ATTR_IDX(VM_MEMATTR_WRITE_BACK) | L1_BLOCK);
|
||||
(pa & ~L1_OFFSET) | ATTR_DEFAULT | ATTR_S1_XN |
|
||||
ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK) | L1_BLOCK);
|
||||
}
|
||||
|
||||
/* Create L2 mappings at the end of the region */
|
||||
@ -736,8 +738,10 @@ pmap_bootstrap_dmap(vm_offset_t kern_l1, vm_paddr_t min_pa,
|
||||
pa += L2_SIZE, va += L2_SIZE) {
|
||||
l2_slot = pmap_l2_index(va);
|
||||
pmap_store(&l2[l2_slot],
|
||||
(pa & ~L2_OFFSET) | ATTR_DEFAULT | ATTR_XN |
|
||||
ATTR_IDX(VM_MEMATTR_WRITE_BACK) | L2_BLOCK);
|
||||
(pa & ~L2_OFFSET) | ATTR_DEFAULT |
|
||||
ATTR_S1_XN |
|
||||
ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK) |
|
||||
L2_BLOCK);
|
||||
}
|
||||
}
|
||||
|
||||
@ -801,7 +805,7 @@ pmap_bootstrap_l3(vm_offset_t l1pt, vm_offset_t va, vm_offset_t l3_start)
|
||||
|
||||
pa = pmap_early_vtophys(l1pt, l3pt);
|
||||
pmap_store(&l2[l2_slot],
|
||||
(pa & ~Ln_TABLE_MASK) | ATTR_UXN | L2_TABLE);
|
||||
(pa & ~Ln_TABLE_MASK) | ATTR_S1_UXN | L2_TABLE);
|
||||
l3pt += PAGE_SIZE;
|
||||
}
|
||||
|
||||
@ -1162,7 +1166,7 @@ pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
|
||||
(lvl < 3 && (tpte & ATTR_DESCR_MASK) == L1_BLOCK),
|
||||
("pmap_extract_and_hold: Invalid pte at L%d: %lx", lvl,
|
||||
tpte & ATTR_DESCR_MASK));
|
||||
if (((tpte & ATTR_AP_RW_BIT) == ATTR_AP(ATTR_AP_RW)) ||
|
||||
if (((tpte & ATTR_S1_AP_RW_BIT) == ATTR_S1_AP(ATTR_S1_AP_RW)) ||
|
||||
((prot & VM_PROT_WRITE) == 0)) {
|
||||
switch(lvl) {
|
||||
case 1:
|
||||
@ -1238,8 +1242,8 @@ pmap_kenter(vm_offset_t sva, vm_size_t size, vm_paddr_t pa, int mode)
|
||||
KASSERT((size & PAGE_MASK) == 0,
|
||||
("pmap_kenter: Mapping is not page-sized"));
|
||||
|
||||
attr = ATTR_DEFAULT | ATTR_AP(ATTR_AP_RW) | ATTR_XN | ATTR_IDX(mode) |
|
||||
L3_PAGE;
|
||||
attr = ATTR_DEFAULT | ATTR_S1_AP(ATTR_S1_AP_RW) | ATTR_S1_XN |
|
||||
ATTR_S1_IDX(mode) | L3_PAGE;
|
||||
va = sva;
|
||||
while (size != 0) {
|
||||
pde = pmap_pde(kernel_pmap, va, &lvl);
|
||||
@ -1353,8 +1357,9 @@ pmap_qenter(vm_offset_t sva, vm_page_t *ma, int count)
|
||||
("pmap_qenter: Invalid level %d", lvl));
|
||||
|
||||
m = ma[i];
|
||||
pa = VM_PAGE_TO_PHYS(m) | ATTR_DEFAULT | ATTR_AP(ATTR_AP_RW) |
|
||||
ATTR_XN | ATTR_IDX(m->md.pv_memattr) | L3_PAGE;
|
||||
pa = VM_PAGE_TO_PHYS(m) | ATTR_DEFAULT |
|
||||
ATTR_S1_AP(ATTR_S1_AP_RW) | ATTR_S1_XN |
|
||||
ATTR_S1_IDX(m->md.pv_memattr) | L3_PAGE;
|
||||
pte = pmap_l2_to_l3(pde, va);
|
||||
pmap_load_store(pte, pa);
|
||||
|
||||
@ -2952,7 +2957,8 @@ pmap_protect_l2(pmap_t pmap, pt_entry_t *l2, vm_offset_t sva, pt_entry_t mask,
|
||||
* pages.
|
||||
*/
|
||||
if ((old_l2 & ATTR_SW_MANAGED) != 0 &&
|
||||
(nbits & ATTR_AP(ATTR_AP_RO)) != 0 && pmap_pte_dirty(old_l2)) {
|
||||
(nbits & ATTR_S1_AP(ATTR_S1_AP_RO)) != 0 &&
|
||||
pmap_pte_dirty(old_l2)) {
|
||||
m = PHYS_TO_VM_PAGE(old_l2 & ~ATTR_MASK);
|
||||
for (mt = m; mt < &m[L2_SIZE / PAGE_SIZE]; mt++)
|
||||
vm_page_dirty(mt);
|
||||
@ -2987,12 +2993,12 @@ pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
|
||||
|
||||
mask = nbits = 0;
|
||||
if ((prot & VM_PROT_WRITE) == 0) {
|
||||
mask |= ATTR_AP_RW_BIT | ATTR_SW_DBM;
|
||||
nbits |= ATTR_AP(ATTR_AP_RO);
|
||||
mask |= ATTR_S1_AP_RW_BIT | ATTR_SW_DBM;
|
||||
nbits |= ATTR_S1_AP(ATTR_S1_AP_RO);
|
||||
}
|
||||
if ((prot & VM_PROT_EXECUTE) == 0) {
|
||||
mask |= ATTR_XN;
|
||||
nbits |= ATTR_XN;
|
||||
mask |= ATTR_S1_XN;
|
||||
nbits |= ATTR_S1_XN;
|
||||
}
|
||||
if (mask == 0)
|
||||
return;
|
||||
@ -3063,7 +3069,7 @@ pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
|
||||
* update the page's dirty field.
|
||||
*/
|
||||
if ((l3 & ATTR_SW_MANAGED) != 0 &&
|
||||
(nbits & ATTR_AP(ATTR_AP_RO)) != 0 &&
|
||||
(nbits & ATTR_S1_AP(ATTR_S1_AP_RO)) != 0 &&
|
||||
pmap_pte_dirty(l3))
|
||||
vm_page_dirty(PHYS_TO_VM_PAGE(l3 & ~ATTR_MASK));
|
||||
|
||||
@ -3215,8 +3221,8 @@ pmap_promote_l2(pmap_t pmap, pd_entry_t *l2, vm_offset_t va,
|
||||
return;
|
||||
}
|
||||
|
||||
if ((newl2 & (ATTR_AP_RW_BIT | ATTR_SW_DBM)) ==
|
||||
(ATTR_AP(ATTR_AP_RO) | ATTR_SW_DBM)) {
|
||||
if ((newl2 & (ATTR_S1_AP_RW_BIT | ATTR_SW_DBM)) ==
|
||||
(ATTR_S1_AP(ATTR_S1_AP_RO) | ATTR_SW_DBM)) {
|
||||
if (!atomic_fcmpset_64(l2, &newl2, newl2 & ~ATTR_SW_DBM))
|
||||
goto setl2;
|
||||
newl2 &= ~ATTR_SW_DBM;
|
||||
@ -3226,8 +3232,8 @@ pmap_promote_l2(pmap_t pmap, pd_entry_t *l2, vm_offset_t va,
|
||||
for (l3 = firstl3 + NL3PG - 1; l3 > firstl3; l3--) {
|
||||
oldl3 = pmap_load(l3);
|
||||
setl3:
|
||||
if ((oldl3 & (ATTR_AP_RW_BIT | ATTR_SW_DBM)) ==
|
||||
(ATTR_AP(ATTR_AP_RO) | ATTR_SW_DBM)) {
|
||||
if ((oldl3 & (ATTR_S1_AP_RW_BIT | ATTR_SW_DBM)) ==
|
||||
(ATTR_S1_AP(ATTR_S1_AP_RO) | ATTR_SW_DBM)) {
|
||||
if (!atomic_fcmpset_64(l3, &oldl3, oldl3 &
|
||||
~ATTR_SW_DBM))
|
||||
goto setl3;
|
||||
@ -3305,27 +3311,27 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
|
||||
if ((m->oflags & VPO_UNMANAGED) == 0)
|
||||
VM_PAGE_OBJECT_BUSY_ASSERT(m);
|
||||
pa = VM_PAGE_TO_PHYS(m);
|
||||
new_l3 = (pt_entry_t)(pa | ATTR_DEFAULT | ATTR_IDX(m->md.pv_memattr) |
|
||||
new_l3 = (pt_entry_t)(pa | ATTR_DEFAULT | ATTR_S1_IDX(m->md.pv_memattr) |
|
||||
L3_PAGE);
|
||||
if ((prot & VM_PROT_WRITE) == 0)
|
||||
new_l3 |= ATTR_AP(ATTR_AP_RO);
|
||||
new_l3 |= ATTR_S1_AP(ATTR_S1_AP_RO);
|
||||
if ((prot & VM_PROT_EXECUTE) == 0 ||
|
||||
m->md.pv_memattr == VM_MEMATTR_DEVICE)
|
||||
new_l3 |= ATTR_XN;
|
||||
new_l3 |= ATTR_S1_XN;
|
||||
if ((flags & PMAP_ENTER_WIRED) != 0)
|
||||
new_l3 |= ATTR_SW_WIRED;
|
||||
if (va < VM_MAXUSER_ADDRESS)
|
||||
new_l3 |= ATTR_AP(ATTR_AP_USER) | ATTR_PXN;
|
||||
new_l3 |= ATTR_S1_AP(ATTR_S1_AP_USER) | ATTR_S1_PXN;
|
||||
else
|
||||
new_l3 |= ATTR_UXN;
|
||||
new_l3 |= ATTR_S1_UXN;
|
||||
if (pmap != kernel_pmap)
|
||||
new_l3 |= ATTR_nG;
|
||||
new_l3 |= ATTR_S1_nG;
|
||||
if ((m->oflags & VPO_UNMANAGED) == 0) {
|
||||
new_l3 |= ATTR_SW_MANAGED;
|
||||
if ((prot & VM_PROT_WRITE) != 0) {
|
||||
new_l3 |= ATTR_SW_DBM;
|
||||
if ((flags & VM_PROT_WRITE) == 0)
|
||||
new_l3 |= ATTR_AP(ATTR_AP_RO);
|
||||
new_l3 |= ATTR_S1_AP(ATTR_S1_AP_RO);
|
||||
}
|
||||
}
|
||||
|
||||
@ -3507,7 +3513,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
|
||||
*/
|
||||
if ((prot & VM_PROT_EXECUTE) && pmap != kernel_pmap &&
|
||||
m->md.pv_memattr == VM_MEMATTR_WRITE_BACK &&
|
||||
(opa != pa || (orig_l3 & ATTR_XN)))
|
||||
(opa != pa || (orig_l3 & ATTR_S1_XN)))
|
||||
cpu_icache_sync_range(PHYS_TO_DMAP(pa), PAGE_SIZE);
|
||||
|
||||
/*
|
||||
@ -3577,20 +3583,21 @@ pmap_enter_2mpage(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
|
||||
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
|
||||
|
||||
new_l2 = (pd_entry_t)(VM_PAGE_TO_PHYS(m) | ATTR_DEFAULT |
|
||||
ATTR_IDX(m->md.pv_memattr) | ATTR_AP(ATTR_AP_RO) | L2_BLOCK);
|
||||
ATTR_S1_IDX(m->md.pv_memattr) | ATTR_S1_AP(ATTR_S1_AP_RO) |
|
||||
L2_BLOCK);
|
||||
if ((m->oflags & VPO_UNMANAGED) == 0) {
|
||||
new_l2 |= ATTR_SW_MANAGED;
|
||||
new_l2 &= ~ATTR_AF;
|
||||
}
|
||||
if ((prot & VM_PROT_EXECUTE) == 0 ||
|
||||
m->md.pv_memattr == VM_MEMATTR_DEVICE)
|
||||
new_l2 |= ATTR_XN;
|
||||
new_l2 |= ATTR_S1_XN;
|
||||
if (va < VM_MAXUSER_ADDRESS)
|
||||
new_l2 |= ATTR_AP(ATTR_AP_USER) | ATTR_PXN;
|
||||
new_l2 |= ATTR_S1_AP(ATTR_S1_AP_USER) | ATTR_S1_PXN;
|
||||
else
|
||||
new_l2 |= ATTR_UXN;
|
||||
new_l2 |= ATTR_S1_UXN;
|
||||
if (pmap != kernel_pmap)
|
||||
new_l2 |= ATTR_nG;
|
||||
new_l2 |= ATTR_S1_nG;
|
||||
return (pmap_enter_l2(pmap, va, new_l2, PMAP_ENTER_NOSLEEP |
|
||||
PMAP_ENTER_NOREPLACE | PMAP_ENTER_NORECLAIM, NULL, lockp) ==
|
||||
KERN_SUCCESS);
|
||||
@ -3887,17 +3894,17 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
|
||||
pmap_resident_count_inc(pmap, 1);
|
||||
|
||||
pa = VM_PAGE_TO_PHYS(m);
|
||||
l3_val = pa | ATTR_DEFAULT | ATTR_IDX(m->md.pv_memattr) |
|
||||
ATTR_AP(ATTR_AP_RO) | L3_PAGE;
|
||||
l3_val = pa | ATTR_DEFAULT | ATTR_S1_IDX(m->md.pv_memattr) |
|
||||
ATTR_S1_AP(ATTR_S1_AP_RO) | L3_PAGE;
|
||||
if ((prot & VM_PROT_EXECUTE) == 0 ||
|
||||
m->md.pv_memattr == VM_MEMATTR_DEVICE)
|
||||
l3_val |= ATTR_XN;
|
||||
l3_val |= ATTR_S1_XN;
|
||||
if (va < VM_MAXUSER_ADDRESS)
|
||||
l3_val |= ATTR_AP(ATTR_AP_USER) | ATTR_PXN;
|
||||
l3_val |= ATTR_S1_AP(ATTR_S1_AP_USER) | ATTR_S1_PXN;
|
||||
else
|
||||
l3_val |= ATTR_UXN;
|
||||
l3_val |= ATTR_S1_UXN;
|
||||
if (pmap != kernel_pmap)
|
||||
l3_val |= ATTR_nG;
|
||||
l3_val |= ATTR_S1_nG;
|
||||
|
||||
/*
|
||||
* Now validate mapping with RO protection
|
||||
@ -4084,7 +4091,7 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
|
||||
mask = ATTR_AF | ATTR_SW_WIRED;
|
||||
nbits = 0;
|
||||
if ((srcptepaddr & ATTR_SW_DBM) != 0)
|
||||
nbits |= ATTR_AP_RW_BIT;
|
||||
nbits |= ATTR_S1_AP_RW_BIT;
|
||||
pmap_store(l2, (srcptepaddr & ~mask) | nbits);
|
||||
pmap_resident_count_inc(dst_pmap, L2_SIZE /
|
||||
PAGE_SIZE);
|
||||
@ -4133,7 +4140,7 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
|
||||
mask = ATTR_AF | ATTR_SW_WIRED;
|
||||
nbits = 0;
|
||||
if ((ptetemp & ATTR_SW_DBM) != 0)
|
||||
nbits |= ATTR_AP_RW_BIT;
|
||||
nbits |= ATTR_S1_AP_RW_BIT;
|
||||
pmap_store(dst_pte, (ptetemp & ~mask) | nbits);
|
||||
pmap_resident_count_inc(dst_pmap, 1);
|
||||
} else {
|
||||
@ -4607,8 +4614,8 @@ pmap_page_test_mappings(vm_page_t m, boolean_t accessed, boolean_t modified)
|
||||
mask = 0;
|
||||
value = 0;
|
||||
if (modified) {
|
||||
mask |= ATTR_AP_RW_BIT;
|
||||
value |= ATTR_AP(ATTR_AP_RW);
|
||||
mask |= ATTR_S1_AP_RW_BIT;
|
||||
value |= ATTR_S1_AP(ATTR_S1_AP_RW);
|
||||
}
|
||||
if (accessed) {
|
||||
mask |= ATTR_AF | ATTR_DESCR_MASK;
|
||||
@ -4641,8 +4648,8 @@ pmap_page_test_mappings(vm_page_t m, boolean_t accessed, boolean_t modified)
|
||||
mask = 0;
|
||||
value = 0;
|
||||
if (modified) {
|
||||
mask |= ATTR_AP_RW_BIT;
|
||||
value |= ATTR_AP(ATTR_AP_RW);
|
||||
mask |= ATTR_S1_AP_RW_BIT;
|
||||
value |= ATTR_S1_AP(ATTR_S1_AP_RW);
|
||||
}
|
||||
if (accessed) {
|
||||
mask |= ATTR_AF | ATTR_DESCR_MASK;
|
||||
@ -4785,10 +4792,10 @@ pmap_remove_write(vm_page_t m)
|
||||
retry:
|
||||
if ((oldpte & ATTR_SW_DBM) != 0) {
|
||||
if (!atomic_fcmpset_long(pte, &oldpte,
|
||||
(oldpte | ATTR_AP_RW_BIT) & ~ATTR_SW_DBM))
|
||||
(oldpte | ATTR_S1_AP_RW_BIT) & ~ATTR_SW_DBM))
|
||||
goto retry;
|
||||
if ((oldpte & ATTR_AP_RW_BIT) ==
|
||||
ATTR_AP(ATTR_AP_RW))
|
||||
if ((oldpte & ATTR_S1_AP_RW_BIT) ==
|
||||
ATTR_S1_AP(ATTR_S1_AP_RW))
|
||||
vm_page_dirty(m);
|
||||
pmap_invalidate_page(pmap, pv->pv_va);
|
||||
}
|
||||
@ -5066,7 +5073,8 @@ pmap_advise(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, int advice)
|
||||
vm_page_dirty(m);
|
||||
}
|
||||
while (!atomic_fcmpset_long(l3, &oldl3,
|
||||
(oldl3 & ~ATTR_AF) | ATTR_AP(ATTR_AP_RO)))
|
||||
(oldl3 & ~ATTR_AF) |
|
||||
ATTR_S1_AP(ATTR_S1_AP_RO)))
|
||||
cpu_spinwait();
|
||||
} else if ((oldl3 & ATTR_AF) != 0)
|
||||
pmap_clear_bits(l3, ATTR_AF);
|
||||
@ -5140,7 +5148,7 @@ pmap_clear_modify(vm_page_t m)
|
||||
l3 = pmap_l2_to_l3(l2, va);
|
||||
oldl3 = pmap_load(l3);
|
||||
while (!atomic_fcmpset_long(l3, &oldl3,
|
||||
(oldl3 & ~ATTR_SW_DBM) | ATTR_AP(ATTR_AP_RO)))
|
||||
(oldl3 & ~ATTR_SW_DBM) | ATTR_S1_AP(ATTR_S1_AP_RO)))
|
||||
cpu_spinwait();
|
||||
vm_page_dirty(m);
|
||||
pmap_invalidate_page(pmap, va);
|
||||
@ -5164,8 +5172,8 @@ pmap_clear_modify(vm_page_t m)
|
||||
l3 = pmap_l2_to_l3(l2, pv->pv_va);
|
||||
oldl3 = pmap_load(l3);
|
||||
if (pmap_l3_valid(oldl3) &&
|
||||
(oldl3 & (ATTR_AP_RW_BIT | ATTR_SW_DBM)) == ATTR_SW_DBM) {
|
||||
pmap_set_bits(l3, ATTR_AP(ATTR_AP_RO));
|
||||
(oldl3 & (ATTR_S1_AP_RW_BIT | ATTR_SW_DBM)) == ATTR_SW_DBM){
|
||||
pmap_set_bits(l3, ATTR_S1_AP(ATTR_S1_AP_RO));
|
||||
pmap_invalidate_page(pmap, pv->pv_va);
|
||||
}
|
||||
PMAP_UNLOCK(pmap);
|
||||
@ -5249,8 +5257,8 @@ pmap_mapbios(vm_paddr_t pa, vm_size_t size)
|
||||
/* Insert L2_BLOCK */
|
||||
l2 = pmap_l1_to_l2(pde, va);
|
||||
pmap_load_store(l2,
|
||||
pa | ATTR_DEFAULT | ATTR_XN |
|
||||
ATTR_IDX(VM_MEMATTR_WRITE_BACK) | L2_BLOCK);
|
||||
pa | ATTR_DEFAULT | ATTR_S1_XN |
|
||||
ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK) | L2_BLOCK);
|
||||
|
||||
va += L2_SIZE;
|
||||
pa += L2_SIZE;
|
||||
@ -5417,7 +5425,7 @@ pmap_change_attr_locked(vm_offset_t va, vm_size_t size, int mode)
|
||||
if (pte == NULL)
|
||||
return (EINVAL);
|
||||
|
||||
if ((pmap_load(pte) & ATTR_IDX_MASK) == ATTR_IDX(mode)) {
|
||||
if ((pmap_load(pte) & ATTR_S1_IDX_MASK) == ATTR_S1_IDX(mode)) {
|
||||
/*
|
||||
* We already have the correct attribute,
|
||||
* ignore this entry.
|
||||
@ -5458,10 +5466,10 @@ pmap_change_attr_locked(vm_offset_t va, vm_size_t size, int mode)
|
||||
case 3:
|
||||
/* Update the entry */
|
||||
l3 = pmap_load(pte);
|
||||
l3 &= ~ATTR_IDX_MASK;
|
||||
l3 |= ATTR_IDX(mode);
|
||||
l3 &= ~ATTR_S1_IDX_MASK;
|
||||
l3 |= ATTR_S1_IDX(mode);
|
||||
if (mode == VM_MEMATTR_DEVICE)
|
||||
l3 |= ATTR_XN;
|
||||
l3 |= ATTR_S1_XN;
|
||||
|
||||
pmap_update_entry(kernel_pmap, pte, l3, tmpva,
|
||||
PAGE_SIZE);
|
||||
@ -5659,8 +5667,8 @@ pmap_demote_l2_locked(pmap_t pmap, pt_entry_t *l2, vm_offset_t va,
|
||||
l3phys = VM_PAGE_TO_PHYS(ml3);
|
||||
l3 = (pt_entry_t *)PHYS_TO_DMAP(l3phys);
|
||||
newl3 = (oldl2 & ~ATTR_DESCR_MASK) | L3_PAGE;
|
||||
KASSERT((oldl2 & (ATTR_AP_RW_BIT | ATTR_SW_DBM)) !=
|
||||
(ATTR_AP(ATTR_AP_RO) | ATTR_SW_DBM),
|
||||
KASSERT((oldl2 & (ATTR_S1_AP_RW_BIT | ATTR_SW_DBM)) !=
|
||||
(ATTR_S1_AP(ATTR_S1_AP_RO) | ATTR_SW_DBM),
|
||||
("pmap_demote_l2: L2 entry is writeable but not dirty"));
|
||||
|
||||
/*
|
||||
@ -5771,7 +5779,7 @@ pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *pap)
|
||||
if (lvl != 3)
|
||||
val |= MINCORE_SUPER;
|
||||
if ((managed && pmap_pte_dirty(tpte)) || (!managed &&
|
||||
(tpte & ATTR_AP_RW_BIT) == ATTR_AP(ATTR_AP_RW)))
|
||||
(tpte & ATTR_S1_AP_RW_BIT) == ATTR_S1_AP(ATTR_S1_AP_RW)))
|
||||
val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER;
|
||||
if ((tpte & ATTR_AF) == ATTR_AF)
|
||||
val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER;
|
||||
@ -6035,8 +6043,9 @@ pmap_fault(pmap_t pmap, uint64_t esr, uint64_t far)
|
||||
ptep = pmap_pte(pmap, far, &lvl);
|
||||
if (ptep != NULL &&
|
||||
((pte = pmap_load(ptep)) & ATTR_SW_DBM) != 0) {
|
||||
if ((pte & ATTR_AP_RW_BIT) == ATTR_AP(ATTR_AP_RO)) {
|
||||
pmap_clear_bits(ptep, ATTR_AP_RW_BIT);
|
||||
if ((pte & ATTR_S1_AP_RW_BIT) ==
|
||||
ATTR_S1_AP(ATTR_S1_AP_RO)) {
|
||||
pmap_clear_bits(ptep, ATTR_S1_AP_RW_BIT);
|
||||
pmap_invalidate_page(pmap, far);
|
||||
}
|
||||
rv = KERN_SUCCESS;
|
||||
@ -6210,18 +6219,18 @@ sysctl_kmaps_dump(struct sbuf *sb, struct pmap_kernel_map_range *range,
|
||||
if (eva <= range->sva)
|
||||
return;
|
||||
|
||||
index = range->attrs & ATTR_IDX_MASK;
|
||||
index = range->attrs & ATTR_S1_IDX_MASK;
|
||||
switch (index) {
|
||||
case ATTR_IDX(VM_MEMATTR_DEVICE):
|
||||
case ATTR_S1_IDX(VM_MEMATTR_DEVICE):
|
||||
mode = "DEV";
|
||||
break;
|
||||
case ATTR_IDX(VM_MEMATTR_UNCACHEABLE):
|
||||
case ATTR_S1_IDX(VM_MEMATTR_UNCACHEABLE):
|
||||
mode = "UC";
|
||||
break;
|
||||
case ATTR_IDX(VM_MEMATTR_WRITE_BACK):
|
||||
case ATTR_S1_IDX(VM_MEMATTR_WRITE_BACK):
|
||||
mode = "WB";
|
||||
break;
|
||||
case ATTR_IDX(VM_MEMATTR_WRITE_THROUGH):
|
||||
case ATTR_S1_IDX(VM_MEMATTR_WRITE_THROUGH):
|
||||
mode = "WT";
|
||||
break;
|
||||
default:
|
||||
@ -6234,9 +6243,9 @@ sysctl_kmaps_dump(struct sbuf *sb, struct pmap_kernel_map_range *range,
|
||||
|
||||
sbuf_printf(sb, "0x%016lx-0x%016lx r%c%c%c %3s %d %d %d %d\n",
|
||||
range->sva, eva,
|
||||
(range->attrs & ATTR_AP_RW_BIT) == ATTR_AP_RW ? 'w' : '-',
|
||||
(range->attrs & ATTR_PXN) != 0 ? '-' : 'x',
|
||||
(range->attrs & ATTR_AP_USER) != 0 ? 'u' : 's',
|
||||
(range->attrs & ATTR_S1_AP_RW_BIT) == ATTR_S1_AP_RW ? 'w' : '-',
|
||||
(range->attrs & ATTR_S1_PXN) != 0 ? '-' : 'x',
|
||||
(range->attrs & ATTR_S1_AP_USER) != 0 ? 'u' : 's',
|
||||
mode, range->l1blocks, range->l2blocks, range->l3contig,
|
||||
range->l3pages);
|
||||
|
||||
@ -6277,14 +6286,14 @@ sysctl_kmaps_check(struct sbuf *sb, struct pmap_kernel_map_range *range,
|
||||
{
|
||||
pt_entry_t attrs;
|
||||
|
||||
attrs = l0e & (ATTR_AP_MASK | ATTR_XN);
|
||||
attrs |= l1e & (ATTR_AP_MASK | ATTR_XN);
|
||||
attrs = l0e & (ATTR_S1_AP_MASK | ATTR_S1_XN);
|
||||
attrs |= l1e & (ATTR_S1_AP_MASK | ATTR_S1_XN);
|
||||
if ((l1e & ATTR_DESCR_MASK) == L1_BLOCK)
|
||||
attrs |= l1e & ATTR_IDX_MASK;
|
||||
attrs |= l2e & (ATTR_AP_MASK | ATTR_XN);
|
||||
attrs |= l1e & ATTR_S1_IDX_MASK;
|
||||
attrs |= l2e & (ATTR_S1_AP_MASK | ATTR_S1_XN);
|
||||
if ((l2e & ATTR_DESCR_MASK) == L2_BLOCK)
|
||||
attrs |= l2e & ATTR_IDX_MASK;
|
||||
attrs |= l3e & (ATTR_AP_MASK | ATTR_XN | ATTR_IDX_MASK);
|
||||
attrs |= l2e & ATTR_S1_IDX_MASK;
|
||||
attrs |= l3e & (ATTR_S1_AP_MASK | ATTR_S1_XN | ATTR_S1_IDX_MASK);
|
||||
|
||||
if (range->sva > va || !sysctl_kmaps_match(range, attrs)) {
|
||||
sysctl_kmaps_dump(sb, range, va);
|
||||
|
@ -39,35 +39,50 @@ typedef uint64_t pt_entry_t; /* page table entry */
|
||||
#endif
|
||||
|
||||
/* Block and Page attributes */
|
||||
#define ATTR_MASK_H UINT64_C(0xfffc000000000000)
|
||||
#define ATTR_MASK_L UINT64_C(0x0000000000000fff)
|
||||
#define ATTR_MASK (ATTR_MASK_H | ATTR_MASK_L)
|
||||
#define ATTR_MASK_H UINT64_C(0xfffc000000000000)
|
||||
#define ATTR_MASK_L UINT64_C(0x0000000000000fff)
|
||||
#define ATTR_MASK (ATTR_MASK_H | ATTR_MASK_L)
|
||||
/* Bits 58:55 are reserved for software */
|
||||
#define ATTR_SW_UNUSED2 (1UL << 58)
|
||||
#define ATTR_SW_UNUSED1 (1UL << 57)
|
||||
#define ATTR_SW_MANAGED (1UL << 56)
|
||||
#define ATTR_SW_WIRED (1UL << 55)
|
||||
#define ATTR_UXN (1UL << 54)
|
||||
#define ATTR_PXN (1UL << 53)
|
||||
#define ATTR_XN (ATTR_PXN | ATTR_UXN)
|
||||
#define ATTR_CONTIGUOUS (1UL << 52)
|
||||
#define ATTR_DBM (1UL << 51)
|
||||
#define ATTR_nG (1 << 11)
|
||||
#define ATTR_AF (1 << 10)
|
||||
#define ATTR_SH(x) ((x) << 8)
|
||||
#define ATTR_SH_MASK ATTR_SH(3)
|
||||
#define ATTR_SH_NS 0 /* Non-shareable */
|
||||
#define ATTR_SH_OS 2 /* Outer-shareable */
|
||||
#define ATTR_SH_IS 3 /* Inner-shareable */
|
||||
#define ATTR_AP_RW_BIT (1 << 7)
|
||||
#define ATTR_AP(x) ((x) << 6)
|
||||
#define ATTR_AP_MASK ATTR_AP(3)
|
||||
#define ATTR_AP_RW (0 << 1)
|
||||
#define ATTR_AP_RO (1 << 1)
|
||||
#define ATTR_AP_USER (1 << 0)
|
||||
#define ATTR_NS (1 << 5)
|
||||
#define ATTR_IDX(x) ((x) << 2)
|
||||
#define ATTR_IDX_MASK (7 << 2)
|
||||
#define ATTR_SW_UNUSED2 (1UL << 58)
|
||||
#define ATTR_SW_UNUSED1 (1UL << 57)
|
||||
#define ATTR_SW_MANAGED (1UL << 56)
|
||||
#define ATTR_SW_WIRED (1UL << 55)
|
||||
|
||||
#define ATTR_S1_UXN (1UL << 54)
|
||||
#define ATTR_S1_PXN (1UL << 53)
|
||||
#define ATTR_S1_XN (ATTR_S1_PXN | ATTR_S1_UXN)
|
||||
|
||||
#define ATTR_S2_XN(x) ((x) << 53)
|
||||
#define ATTR_S2_XN_MASK ATTR_S2_XN(3)
|
||||
#define ATTR_S2_XN_NONE 0 /* Allow execution at EL0 & EL1 */
|
||||
#define ATTR_S2_XN_EL1 1 /* Allow execution at EL0 */
|
||||
#define ATTR_S2_XN_ALL 2 /* No execution */
|
||||
#define ATTR_S2_XN_EL0 3 /* Allow execution at EL1 */
|
||||
|
||||
#define ATTR_CONTIGUOUS (1UL << 52)
|
||||
#define ATTR_DBM (1UL << 51)
|
||||
#define ATTR_S1_nG (1 << 11)
|
||||
#define ATTR_AF (1 << 10)
|
||||
#define ATTR_SH(x) ((x) << 8)
|
||||
#define ATTR_SH_MASK ATTR_SH(3)
|
||||
#define ATTR_SH_NS 0 /* Non-shareable */
|
||||
#define ATTR_SH_OS 2 /* Outer-shareable */
|
||||
#define ATTR_SH_IS 3 /* Inner-shareable */
|
||||
|
||||
#define ATTR_S1_AP_RW_BIT (1 << 7)
|
||||
#define ATTR_S1_AP(x) ((x) << 6)
|
||||
#define ATTR_S1_AP_MASK ATTR_S1_AP(3)
|
||||
#define ATTR_S1_AP_RW (0 << 1)
|
||||
#define ATTR_S1_AP_RO (1 << 1)
|
||||
#define ATTR_S1_AP_USER (1 << 0)
|
||||
#define ATTR_S1_NS (1 << 5)
|
||||
#define ATTR_S1_IDX(x) ((x) << 2)
|
||||
#define ATTR_S1_IDX_MASK (7 << 2)
|
||||
|
||||
#define ATTR_S2_S2AP(x) ((x) << 6)
|
||||
#define ATTR_S1_S2AP_MASK ATTR_S2_S2AP(3)
|
||||
#define ATTR_S2_MEMATTR(x) ((x) << 2)
|
||||
#define ATTR_S2_MEMATTR_MASK ATTR_S2_MEMATTR(0xf)
|
||||
|
||||
#define ATTR_DEFAULT (ATTR_AF | ATTR_SH(ATTR_SH_IS))
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user