Because of AArch64's weak memory consistency model, we need to include a
memory barrier between the stores for initializing a page table page and the store for adding that page to the page table. Otherwise, a page table walk by another processor's MMU could see the page table page before it sees the initialized entries. Simplify pmap_growkernel(). In particular, eliminate an unnecessary TLB invalidation. Reviewed by: andrew, markj MFC after: 1 week Differential Revision: https://reviews.freebsd.org/D21126
This commit is contained in:
parent
60f2e2e702
commit
072a067fb8
@ -1524,6 +1524,16 @@ _pmap_alloc_l3(pmap_t pmap, vm_pindex_t ptepindex, struct rwlock **lockp)
|
||||
if ((m->flags & PG_ZERO) == 0)
|
||||
pmap_zero_page(m);
|
||||
|
||||
/*
|
||||
* Because of AArch64's weak memory consistency model, we must have a
|
||||
* barrier here to ensure that the stores for zeroing "m", whether by
|
||||
* pmap_zero_page() or an earlier function, are visible before adding
|
||||
* "m" to the page table. Otherwise, a page table walk by another
|
||||
* processor's MMU could see the mapping to "m" and a stale, non-zero
|
||||
* PTE within "m".
|
||||
*/
|
||||
dmb(ishst);
|
||||
|
||||
/*
|
||||
* Map the pagetable page into the process address space, if
|
||||
* it isn't already there.
|
||||
@ -1775,12 +1785,14 @@ pmap_growkernel(vm_offset_t addr)
|
||||
panic("pmap_growkernel: no memory to grow kernel");
|
||||
if ((nkpg->flags & PG_ZERO) == 0)
|
||||
pmap_zero_page(nkpg);
|
||||
/* See the dmb() in _pmap_alloc_l3(). */
|
||||
dmb(ishst);
|
||||
paddr = VM_PAGE_TO_PHYS(nkpg);
|
||||
pmap_store(l1, paddr | L1_TABLE);
|
||||
continue; /* try again */
|
||||
}
|
||||
l2 = pmap_l1_to_l2(l1, kernel_vm_end);
|
||||
if ((pmap_load(l2) & ATTR_AF) != 0) {
|
||||
if (pmap_load(l2) != 0) {
|
||||
kernel_vm_end = (kernel_vm_end + L2_SIZE) & ~L2_OFFSET;
|
||||
if (kernel_vm_end - 1 >= vm_map_max(kernel_map)) {
|
||||
kernel_vm_end = vm_map_max(kernel_map);
|
||||
@ -1796,9 +1808,10 @@ pmap_growkernel(vm_offset_t addr)
|
||||
panic("pmap_growkernel: no memory to grow kernel");
|
||||
if ((nkpg->flags & PG_ZERO) == 0)
|
||||
pmap_zero_page(nkpg);
|
||||
/* See the dmb() in _pmap_alloc_l3(). */
|
||||
dmb(ishst);
|
||||
paddr = VM_PAGE_TO_PHYS(nkpg);
|
||||
pmap_load_store(l2, paddr | L2_TABLE);
|
||||
pmap_invalidate_page(kernel_pmap, kernel_vm_end);
|
||||
pmap_store(l2, paddr | L2_TABLE);
|
||||
|
||||
kernel_vm_end = (kernel_vm_end + L2_SIZE) & ~L2_OFFSET;
|
||||
if (kernel_vm_end - 1 >= vm_map_max(kernel_map)) {
|
||||
@ -5531,6 +5544,10 @@ pmap_demote_l2_locked(pmap_t pmap, pt_entry_t *l2, vm_offset_t va,
|
||||
/*
|
||||
* If the page table page is not leftover from an earlier promotion,
|
||||
* or the mapping attributes have changed, (re)initialize the L3 table.
|
||||
*
|
||||
* When pmap_update_entry() clears the old L2 mapping, it (indirectly)
|
||||
* performs a dsb(). That dsb() ensures that the stores for filling
|
||||
* "l3" are visible before "l3" is added to the page table.
|
||||
*/
|
||||
if (ml3->valid == 0 || (l3[0] & ATTR_MASK) != (newl3 & ATTR_MASK))
|
||||
pmap_fill_l3(l3, newl3);
|
||||
|
Loading…
Reference in New Issue
Block a user