When a copy-on-write fault occurs, pmap_enter() is called on to replace the
mapping to the old read-only page with a mapping to the new read-write page. To destroy the old mapping, pmap_enter() must destroy its page table and PV entries and invalidate its TLB entry. This change simply invalidates that TLB entry a little earlier, specifically, on amd64 and arm64, before the PV list lock is held. Reviewed by: kib, markj MFC after: 1 week Differential Revision: https://reviews.freebsd.org/D23027
This commit is contained in:
parent
31c251a046
commit
1c3a241032
@ -6131,8 +6131,10 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
|
||||
*/
|
||||
if ((origpte & (PG_M | PG_RW)) == (PG_M | PG_RW))
|
||||
vm_page_dirty(om);
|
||||
if ((origpte & PG_A) != 0)
|
||||
if ((origpte & PG_A) != 0) {
|
||||
pmap_invalidate_page(pmap, va);
|
||||
vm_page_aflag_set(om, PGA_REFERENCED);
|
||||
}
|
||||
CHANGE_PV_LIST_LOCK_TO_PHYS(&lock, opa);
|
||||
pv = pmap_pvh_remove(&om->md, pmap, va);
|
||||
KASSERT(pv != NULL,
|
||||
@ -6144,9 +6146,13 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
|
||||
((om->flags & PG_FICTITIOUS) != 0 ||
|
||||
TAILQ_EMPTY(&pa_to_pvh(opa)->pv_list)))
|
||||
vm_page_aflag_clear(om, PGA_WRITEABLE);
|
||||
}
|
||||
if ((origpte & PG_A) != 0)
|
||||
} else {
|
||||
/*
|
||||
* Since this mapping is unmanaged, assume that PG_A
|
||||
* is set.
|
||||
*/
|
||||
pmap_invalidate_page(pmap, va);
|
||||
}
|
||||
origpte = 0;
|
||||
} else {
|
||||
/*
|
||||
|
@ -3449,8 +3449,10 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
|
||||
*/
|
||||
if (pmap_pte_dirty(orig_l3))
|
||||
vm_page_dirty(om);
|
||||
if ((orig_l3 & ATTR_AF) != 0)
|
||||
if ((orig_l3 & ATTR_AF) != 0) {
|
||||
pmap_invalidate_page(pmap, va);
|
||||
vm_page_aflag_set(om, PGA_REFERENCED);
|
||||
}
|
||||
CHANGE_PV_LIST_LOCK_TO_PHYS(&lock, opa);
|
||||
pv = pmap_pvh_remove(&om->md, pmap, va);
|
||||
if ((m->oflags & VPO_UNMANAGED) != 0)
|
||||
@ -3460,8 +3462,11 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
|
||||
((om->flags & PG_FICTITIOUS) != 0 ||
|
||||
TAILQ_EMPTY(&pa_to_pvh(opa)->pv_list)))
|
||||
vm_page_aflag_clear(om, PGA_WRITEABLE);
|
||||
} else {
|
||||
KASSERT((orig_l3 & ATTR_AF) != 0,
|
||||
("pmap_enter: unmanaged mapping lacks ATTR_AF"));
|
||||
pmap_invalidate_page(pmap, va);
|
||||
}
|
||||
pmap_invalidate_page(pmap, va);
|
||||
orig_l3 = 0;
|
||||
} else {
|
||||
/*
|
||||
|
@ -3798,8 +3798,10 @@ __CONCAT(PMTYPE, enter)(pmap_t pmap, vm_offset_t va, vm_page_t m,
|
||||
*/
|
||||
if ((origpte & (PG_M | PG_RW)) == (PG_M | PG_RW))
|
||||
vm_page_dirty(om);
|
||||
if ((origpte & PG_A) != 0)
|
||||
if ((origpte & PG_A) != 0) {
|
||||
pmap_invalidate_page_int(pmap, va);
|
||||
vm_page_aflag_set(om, PGA_REFERENCED);
|
||||
}
|
||||
pv = pmap_pvh_remove(&om->md, pmap, va);
|
||||
KASSERT(pv != NULL,
|
||||
("pmap_enter: no PV entry for %#x", va));
|
||||
@ -3810,9 +3812,13 @@ __CONCAT(PMTYPE, enter)(pmap_t pmap, vm_offset_t va, vm_page_t m,
|
||||
((om->flags & PG_FICTITIOUS) != 0 ||
|
||||
TAILQ_EMPTY(&pa_to_pvh(opa)->pv_list)))
|
||||
vm_page_aflag_clear(om, PGA_WRITEABLE);
|
||||
}
|
||||
if ((origpte & PG_A) != 0)
|
||||
} else {
|
||||
/*
|
||||
* Since this mapping is unmanaged, assume that PG_A
|
||||
* is set.
|
||||
*/
|
||||
pmap_invalidate_page_int(pmap, va);
|
||||
}
|
||||
origpte = 0;
|
||||
} else {
|
||||
/*
|
||||
|
Loading…
Reference in New Issue
Block a user