diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c index a7bca24136b8..967e56330789 100644 --- a/sys/amd64/amd64/pmap.c +++ b/sys/amd64/amd64/pmap.c @@ -4085,6 +4085,26 @@ pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot) PG_RW = pmap_rw_bit(pmap); anychanged = FALSE; + /* + * Although this function delays and batches the invalidation + * of stale TLB entries, it does not need to call + * pmap_delayed_invl_started() and + * pmap_delayed_invl_finished(), because it does not + * ordinarily destroy mappings. Stale TLB entries from + * protection-only changes need only be invalidated before the + * pmap lock is released, because protection-only changes do + * not destroy PV entries. Even operations that iterate over + * a physical page's PV list of mappings, like + * pmap_remove_write(), acquire the pmap lock for each + * mapping. Consequently, for protection-only changes, the + * pmap lock suffices to synchronize both page table and TLB + * updates. + * + * This function only destroys a mapping if pmap_demote_pde() + * fails. In that case, stale TLB entries are immediately + * invalidated. + */ + PMAP_LOCK(pmap); for (; sva < eva; sva = va_next) { @@ -5469,6 +5489,15 @@ pmap_page_is_mapped(vm_page_t m) * no processor is currently accessing the user address space. In * particular, a page table entry's dirty bit won't change state once * this function starts. + * + * Although this function destroys all of the pmap's managed, + * non-wired mappings, it can delay and batch the invalidation of TLB + * entries without calling pmap_delayed_invl_started() and + * pmap_delayed_invl_finished(). Because the pmap is not active on + * any other processor, none of these TLB entries will ever be used + * before their eventual invalidation. Consequently, there is no need + * for either pmap_remove_all() or pmap_remove_write() to wait for + * that eventual TLB invalidation. */ void pmap_remove_pages(pmap_t pmap)