Introduce pmap_store(), and use it to replace pmap_load_store() in places

where the page table entry was previously invalid.  (Note that I did not
replace pmap_load_store() when it was followed by a TLB invalidation, even
if we are not using the return value from pmap_load_store().)

Correct an error in pmap_enter().  A test for determining when to set
PGA_WRITEABLE was always true, even if the mapping was read only.

In pmap_enter_l2(), when replacing an empty kernel page table page by a
superpage mapping, clear the old l2 entry and issue a TLB invalidation.  My
reading of the ARM architecture manual leads me to believe that the TLB
could hold an intermediate entry referencing the empty kernel page table
page even though it contains no valid mappings.

Replace a couple direct uses of atomic_clear_64() by the new
pmap_clear_bits().

In a couple comments, replace the term "paging-structure caches", which is
an Intel-specific term for the caches that hold intermediate entries in the
page table, with wording that is more consistent with the ARM architecture
manual.

Reviewed by:	markj
X-MFC after:	r350004
Differential Revision:	https://reviews.freebsd.org/D20998
This commit is contained in:
Alan Cox 2019-07-21 03:26:26 +00:00
parent b982c7ee20
commit 1a4cb969d1
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=350191

View File

@ -328,6 +328,7 @@ static __inline vm_page_t pmap_remove_pt_page(pmap_t pmap, vm_offset_t va);
#define pmap_load_clear(table) atomic_swap_64(table, 0)
#define pmap_load_store(table, entry) atomic_swap_64(table, entry)
#define pmap_set_bits(table, bits) atomic_set_64(table, bits)
#define pmap_store(table, entry) atomic_store_64(table, entry)
/********************/
/* Inline functions */
@ -637,7 +638,7 @@ pmap_bootstrap_dmap(vm_offset_t kern_l1, vm_paddr_t min_pa,
(vm_offset_t)l2);
freemempos += PAGE_SIZE;
pmap_load_store(&pagetable_dmap[l1_slot],
pmap_store(&pagetable_dmap[l1_slot],
(l2_pa & ~Ln_TABLE_MASK) | L1_TABLE);
memset(l2, 0, PAGE_SIZE);
@ -655,7 +656,7 @@ pmap_bootstrap_dmap(vm_offset_t kern_l1, vm_paddr_t min_pa,
l2_slot = pmap_l2_index(va);
KASSERT(l2_slot != 0, ("..."));
pmap_load_store(&l2[l2_slot],
pmap_store(&l2[l2_slot],
(pa & ~L2_OFFSET) | ATTR_DEFAULT | ATTR_XN |
ATTR_IDX(CACHED_MEMORY) | L2_BLOCK);
}
@ -667,7 +668,7 @@ pmap_bootstrap_dmap(vm_offset_t kern_l1, vm_paddr_t min_pa,
(physmap[i + 1] - pa) >= L1_SIZE;
pa += L1_SIZE, va += L1_SIZE) {
l1_slot = ((va - DMAP_MIN_ADDRESS) >> L1_SHIFT);
pmap_load_store(&pagetable_dmap[l1_slot],
pmap_store(&pagetable_dmap[l1_slot],
(pa & ~L1_OFFSET) | ATTR_DEFAULT | ATTR_XN |
ATTR_IDX(CACHED_MEMORY) | L1_BLOCK);
}
@ -682,7 +683,7 @@ pmap_bootstrap_dmap(vm_offset_t kern_l1, vm_paddr_t min_pa,
(vm_offset_t)l2);
freemempos += PAGE_SIZE;
pmap_load_store(&pagetable_dmap[l1_slot],
pmap_store(&pagetable_dmap[l1_slot],
(l2_pa & ~Ln_TABLE_MASK) | L1_TABLE);
memset(l2, 0, PAGE_SIZE);
@ -692,7 +693,7 @@ pmap_bootstrap_dmap(vm_offset_t kern_l1, vm_paddr_t min_pa,
for (; va < DMAP_MAX_ADDRESS && pa < physmap[i + 1];
pa += L2_SIZE, va += L2_SIZE) {
l2_slot = pmap_l2_index(va);
pmap_load_store(&l2[l2_slot],
pmap_store(&l2[l2_slot],
(pa & ~L2_OFFSET) | ATTR_DEFAULT | ATTR_XN |
ATTR_IDX(CACHED_MEMORY) | L2_BLOCK);
}
@ -727,7 +728,7 @@ pmap_bootstrap_l2(vm_offset_t l1pt, vm_offset_t va, vm_offset_t l2_start)
KASSERT(l1_slot < Ln_ENTRIES, ("Invalid L1 index"));
pa = pmap_early_vtophys(l1pt, l2pt);
pmap_load_store(&l1[l1_slot],
pmap_store(&l1[l1_slot],
(pa & ~Ln_TABLE_MASK) | L1_TABLE);
l2pt += PAGE_SIZE;
}
@ -757,7 +758,7 @@ pmap_bootstrap_l3(vm_offset_t l1pt, vm_offset_t va, vm_offset_t l3_start)
KASSERT(l2_slot < Ln_ENTRIES, ("Invalid L2 index"));
pa = pmap_early_vtophys(l1pt, l3pt);
pmap_load_store(&l2[l2_slot],
pmap_store(&l2[l2_slot],
(pa & ~Ln_TABLE_MASK) | L2_TABLE);
l3pt += PAGE_SIZE;
}
@ -1540,7 +1541,7 @@ _pmap_alloc_l3(pmap_t pmap, vm_pindex_t ptepindex, struct rwlock **lockp)
l0index = ptepindex - (NUL2E + NUL1E);
l0 = &pmap->pm_l0[l0index];
pmap_load_store(l0, VM_PAGE_TO_PHYS(m) | L0_TABLE);
pmap_store(l0, VM_PAGE_TO_PHYS(m) | L0_TABLE);
} else if (ptepindex >= NUL2E) {
vm_pindex_t l0index, l1index;
pd_entry_t *l0, *l1;
@ -1566,7 +1567,7 @@ _pmap_alloc_l3(pmap_t pmap, vm_pindex_t ptepindex, struct rwlock **lockp)
l1 = (pd_entry_t *)PHYS_TO_DMAP(pmap_load(l0) & ~ATTR_MASK);
l1 = &l1[ptepindex & Ln_ADDR_MASK];
pmap_load_store(l1, VM_PAGE_TO_PHYS(m) | L1_TABLE);
pmap_store(l1, VM_PAGE_TO_PHYS(m) | L1_TABLE);
} else {
vm_pindex_t l0index, l1index;
pd_entry_t *l0, *l1, *l2;
@ -1608,7 +1609,7 @@ _pmap_alloc_l3(pmap_t pmap, vm_pindex_t ptepindex, struct rwlock **lockp)
l2 = (pd_entry_t *)PHYS_TO_DMAP(pmap_load(l1) & ~ATTR_MASK);
l2 = &l2[ptepindex & Ln_ADDR_MASK];
pmap_load_store(l2, VM_PAGE_TO_PHYS(m) | L2_TABLE);
pmap_store(l2, VM_PAGE_TO_PHYS(m) | L2_TABLE);
}
pmap_resident_count_inc(pmap, 1);
@ -1781,7 +1782,7 @@ pmap_growkernel(vm_offset_t addr)
if ((nkpg->flags & PG_ZERO) == 0)
pmap_zero_page(nkpg);
paddr = VM_PAGE_TO_PHYS(nkpg);
pmap_load_store(l1, paddr | L1_TABLE);
pmap_store(l1, paddr | L1_TABLE);
continue; /* try again */
}
l2 = pmap_l1_to_l2(l1, kernel_vm_end);
@ -3018,7 +3019,7 @@ pmap_update_entry(pmap_t pmap, pd_entry_t *pte, pd_entry_t newpte,
pmap_invalidate_range_nopin(pmap, va, va + size);
/* Create the new mapping */
pmap_load_store(pte, newpte);
pmap_store(pte, newpte);
dsb(ishst);
critical_exit();
@ -3304,12 +3305,9 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
/*
* No, might be a protection or wiring change.
*/
if ((orig_l3 & ATTR_SW_MANAGED) != 0) {
if ((new_l3 & ATTR_AP(ATTR_AP_RW)) ==
ATTR_AP(ATTR_AP_RW)) {
vm_page_aflag_set(m, PGA_WRITEABLE);
}
}
if ((orig_l3 & ATTR_SW_MANAGED) != 0 &&
(new_l3 & ATTR_SW_DBM) != 0)
vm_page_aflag_set(m, PGA_WRITEABLE);
goto validate;
}
@ -3415,7 +3413,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
}
} else {
/* New mapping */
pmap_load_store(l3, new_l3);
pmap_store(l3, new_l3);
dsb(ishst);
}
@ -3517,12 +3515,16 @@ pmap_enter_l2(pmap_t pmap, vm_offset_t va, pd_entry_t new_l2, u_int flags,
vm_page_free_pages_toq(&free, true);
if (va >= VM_MAXUSER_ADDRESS) {
/*
* Both pmap_remove_l2() and pmap_remove_l3() will
* leave the kernel page table page zero filled.
* Both pmap_remove_l2() and pmap_remove_l3_range()
* will leave the kernel page table page zero filled.
* Nonetheless, the TLB could have an intermediate
* entry for the kernel page table page.
*/
mt = PHYS_TO_VM_PAGE(pmap_load(l2) & ~ATTR_MASK);
if (pmap_insert_pt_page(pmap, mt, false))
panic("pmap_enter_l2: trie insert failed");
pmap_clear(l2);
pmap_invalidate_page(pmap, va);
} else
KASSERT(pmap_load(l2) == 0,
("pmap_enter_l2: non-zero L2 entry %p", l2));
@ -3536,10 +3538,13 @@ pmap_enter_l2(pmap_t pmap, vm_offset_t va, pd_entry_t new_l2, u_int flags,
SLIST_INIT(&free);
if (pmap_unwire_l3(pmap, va, l2pg, &free)) {
/*
* Although "va" is not mapped, paging-structure
* caches could nonetheless have entries that
* Although "va" is not mapped, the TLB could
* nonetheless have intermediate entries that
* refer to the freed page table pages.
* Invalidate those entries.
*
* XXX redundant invalidation (See
* _pmap_unwire_l3().)
*/
pmap_invalidate_page(pmap, va);
vm_page_free_pages_toq(&free, true);
@ -3564,7 +3569,7 @@ pmap_enter_l2(pmap_t pmap, vm_offset_t va, pd_entry_t new_l2, u_int flags,
/*
* Map the superpage.
*/
(void)pmap_load_store(l2, new_l2);
pmap_store(l2, new_l2);
dsb(ishst);
atomic_add_long(&pmap_l2_mappings, 1);
@ -3767,7 +3772,7 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
m->md.pv_memattr == VM_MEMATTR_WRITE_BACK)
cpu_icache_sync_range(PHYS_TO_DMAP(pa), PAGE_SIZE);
pmap_load_store(l3, l3_val);
pmap_store(l3, l3_val);
dsb(ishst);
return (mpte);
@ -3840,7 +3845,7 @@ pmap_unwire(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
* demote the mapping and fall through.
*/
if (sva + L2_SIZE == va_next && eva >= va_next) {
atomic_clear_64(l2, ATTR_SW_WIRED);
pmap_clear_bits(l2, ATTR_SW_WIRED);
pmap->pm_stats.wired_count -= L2_SIZE /
PAGE_SIZE;
continue;
@ -3865,7 +3870,7 @@ pmap_unwire(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
* the pmap lock synchronizes access to ATTR_SW_WIRED,
* the System MMU may write to the entry concurrently.
*/
atomic_clear_64(l3, ATTR_SW_WIRED);
pmap_clear_bits(l3, ATTR_SW_WIRED);
pmap->pm_stats.wired_count--;
}
}
@ -3944,8 +3949,7 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
nbits = 0;
if ((srcptepaddr & ATTR_SW_DBM) != 0)
nbits |= ATTR_AP_RW_BIT;
(void)pmap_load_store(l2,
(srcptepaddr & ~mask) | nbits);
pmap_store(l2, (srcptepaddr & ~mask) | nbits);
pmap_resident_count_inc(dst_pmap, L2_SIZE /
PAGE_SIZE);
atomic_add_long(&pmap_l2_mappings, 1);
@ -3994,8 +3998,7 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
nbits = 0;
if ((ptetemp & ATTR_SW_DBM) != 0)
nbits |= ATTR_AP_RW_BIT;
(void)pmap_load_store(dst_pte,
(ptetemp & ~mask) | nbits);
pmap_store(dst_pte, (ptetemp & ~mask) | nbits);
pmap_resident_count_inc(dst_pmap, 1);
} else {
SLIST_INIT(&free);
@ -4003,8 +4006,8 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
&free)) {
/*
* Although "addr" is not mapped,
* paging-structure caches could
* nonetheless have entries that refer
* the TLB could nonetheless have
* intermediate entries that refer
* to the freed page table pages.
* Invalidate those entries.
*