Refine the fix from r312954. Specifically, add a new PDE-only flag,

PG_PROMOTED, that indicates whether lingering 4KB page mappings might
need to be flushed on a PDE change that restricts or destroys a 2MB
page mapping.  This flag allows the pmap to avoid range invalidations
that are both unnecessary and costly.

Reviewed by:	kib, markj
MFC after:	6 weeks
Differential Revision:	https://reviews.freebsd.org/D9665
This commit is contained in:
Alan Cox 2017-02-26 19:54:02 +00:00
parent e0a254f6df
commit 0314966858
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=314310
4 changed files with 81 additions and 60 deletions

View File

@ -613,6 +613,8 @@ static vm_page_t pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va,
vm_page_t m, vm_prot_t prot, vm_page_t mpte, struct rwlock **lockp);
static void pmap_fill_ptp(pt_entry_t *firstpte, pt_entry_t newpte);
static int pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte);
static void pmap_invalidate_pde_page(pmap_t pmap, vm_offset_t va,
pd_entry_t pde);
static void pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int mode);
static void pmap_pde_attr(pd_entry_t *pde, int cache_bits, int mask);
static void pmap_promote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va,
@ -1838,6 +1840,27 @@ pmap_update_pde(pmap_t pmap, vm_offset_t va, pd_entry_t *pde, pd_entry_t newpde)
}
#endif /* !SMP */
static void
pmap_invalidate_pde_page(pmap_t pmap, vm_offset_t va, pd_entry_t pde)
{
/*
* When the PDE has PG_PROMOTED set, the 2MB page mapping was created
* by a promotion that did not invalidate the 512 4KB page mappings
* that might exist in the TLB. Consequently, at this point, the TLB
* may hold both 4KB and 2MB page mappings for the address range [va,
* va + NBPDR). Therefore, the entire range must be invalidated here.
* In contrast, when PG_PROMOTED is clear, the TLB will not hold any
* 4KB page mappings for the address range [va, va + NBPDR), and so a
* single INVLPG suffices to invalidate the 2MB page mapping from the
* TLB.
*/
if ((pde & PG_PROMOTED) != 0)
pmap_invalidate_range(pmap, va, va + NBPDR - 1);
else
pmap_invalidate_page(pmap, va);
}
#define PMAP_CLFLUSH_THRESHOLD (2 * 1024 * 1024)
void
@ -3472,7 +3495,8 @@ pmap_demote_pde_locked(pmap_t pmap, pd_entry_t *pde, vm_offset_t va,
SLIST_INIT(&free);
sva = trunc_2mpage(va);
pmap_remove_pde(pmap, pde, sva, &free, lockp);
pmap_invalidate_range(pmap, sva, sva + NBPDR - 1);
if ((oldpde & PG_G) == 0)
pmap_invalidate_pde_page(pmap, sva, oldpde);
pmap_free_zero_pages(&free);
CTR2(KTR_PMAP, "pmap_demote_pde: failure for va %#lx"
" in pmap %p", va, pmap);
@ -3612,25 +3636,8 @@ pmap_remove_pde(pmap_t pmap, pd_entry_t *pdq, vm_offset_t sva,
oldpde = pte_load_clear(pdq);
if (oldpde & PG_W)
pmap->pm_stats.wired_count -= NBPDR / PAGE_SIZE;
/*
* When workaround_erratum383 is false, a promotion to a 2M
* page mapping does not invalidate the 512 4K page mappings
* from the TLB. Consequently, at this point, the TLB may
* hold both 4K and 2M page mappings. Therefore, the entire
* range of addresses must be invalidated here. In contrast,
* when workaround_erratum383 is true, a promotion does
* invalidate the 512 4K page mappings, and so a single INVLPG
* suffices to invalidate the 2M page mapping.
*/
if ((oldpde & PG_G) != 0) {
if (workaround_erratum383)
pmap_invalidate_page(kernel_pmap, sva);
else
pmap_invalidate_range(kernel_pmap, sva,
sva + NBPDR - 1);
}
if ((oldpde & PG_G) != 0)
pmap_invalidate_pde_page(kernel_pmap, sva, oldpde);
pmap_resident_count_dec(pmap, NBPDR / PAGE_SIZE);
if (oldpde & PG_MANAGED) {
CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, oldpde & PG_PS_FRAME);
@ -4010,16 +4017,16 @@ pmap_protect_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t sva, vm_prot_t prot)
if ((prot & VM_PROT_EXECUTE) == 0)
newpde |= pg_nx;
if (newpde != oldpde) {
if (!atomic_cmpset_long(pde, oldpde, newpde))
/*
* As an optimization to future operations on this PDE, clear
* PG_PROMOTED. The impending invalidation will remove any
* lingering 4KB page mappings from the TLB.
*/
if (!atomic_cmpset_long(pde, oldpde, newpde & ~PG_PROMOTED))
goto retry;
if (oldpde & PG_G) {
/* See pmap_remove_pde() for explanation. */
if (workaround_erratum383)
pmap_invalidate_page(kernel_pmap, sva);
else
pmap_invalidate_range(kernel_pmap, sva,
sva + NBPDR - 1);
} else
if ((oldpde & PG_G) != 0)
pmap_invalidate_pde_page(kernel_pmap, sva, oldpde);
else
anychanged = TRUE;
}
return (anychanged);
@ -4272,7 +4279,7 @@ pmap_promote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va,
if (workaround_erratum383)
pmap_update_pde(pmap, va, pde, PG_PS | newpde);
else
pde_store(pde, PG_PS | newpde);
pde_store(pde, PG_PROMOTED | PG_PS | newpde);
atomic_add_long(&pmap_pde_promotions, 1);
CTR2(KTR_PMAP, "pmap_promote_pde: success for va %#lx"
@ -4585,7 +4592,8 @@ pmap_enter_pde(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
pmap_resident_count_inc(pmap, NBPDR / PAGE_SIZE);
/*
* Map the superpage.
* Map the superpage. (This is not a promoted mapping; there will not
* be any lingering 4KB page mappings in the TLB.)
*/
pde_store(pde, newpde);

View File

@ -109,6 +109,7 @@
#define PG_MANAGED X86_PG_AVAIL2
#define EPT_PG_EMUL_V X86_PG_AVAIL(52)
#define EPT_PG_EMUL_RW X86_PG_AVAIL(53)
#define PG_PROMOTED X86_PG_AVAIL(54) /* PDE only */
#define PG_FRAME (0x000ffffffffff000ul)
#define PG_PS_FRAME (0x000fffffffe00000ul)

View File

@ -301,6 +301,8 @@ static vm_page_t pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va,
vm_page_t m, vm_prot_t prot, vm_page_t mpte);
static void pmap_flush_page(vm_page_t m);
static int pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte);
static void pmap_invalidate_pde_page(pmap_t pmap, vm_offset_t va,
pd_entry_t pde);
static void pmap_fill_ptp(pt_entry_t *firstpte, pt_entry_t newpte);
static boolean_t pmap_is_modified_pvh(struct md_page *pvh);
static boolean_t pmap_is_referenced_pvh(struct md_page *pvh);
@ -1259,6 +1261,27 @@ pmap_update_pde(pmap_t pmap, vm_offset_t va, pd_entry_t *pde, pd_entry_t newpde)
}
#endif /* !SMP */
static void
pmap_invalidate_pde_page(pmap_t pmap, vm_offset_t va, pd_entry_t pde)
{
/*
* When the PDE has PG_PROMOTED set, the 2- or 4MB page mapping was
* created by a promotion that did not invalidate the 512 or 1024 4KB
* page mappings that might exist in the TLB. Consequently, at this
* point, the TLB may hold both 4KB and 2- or 4MB page mappings for
* the address range [va, va + NBPDR). Therefore, the entire range
* must be invalidated here. In contrast, when PG_PROMOTED is clear,
* the TLB will not hold any 4KB page mappings for the address range
* [va, va + NBPDR), and so a single INVLPG suffices to invalidate the
* 2- or 4MB page mapping from the TLB.
*/
if ((pde & PG_PROMOTED) != 0)
pmap_invalidate_range(pmap, va, va + NBPDR - 1);
else
pmap_invalidate_page(pmap, va);
}
#define PMAP_CLFLUSH_THRESHOLD (2 * 1024 * 1024)
void
@ -2649,7 +2672,8 @@ pmap_demote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va)
SLIST_INIT(&free);
sva = trunc_4mpage(va);
pmap_remove_pde(pmap, pde, sva, &free);
pmap_invalidate_range(pmap, sva, sva + NBPDR - 1);
if ((oldpde & PG_G) == 0)
pmap_invalidate_pde_page(pmap, sva, oldpde);
pmap_free_zero_pages(&free);
CTR2(KTR_PMAP, "pmap_demote_pde: failure for va %#x"
" in pmap %p", va, pmap);
@ -2819,23 +2843,9 @@ pmap_remove_pde(pmap_t pmap, pd_entry_t *pdq, vm_offset_t sva,
/*
* Machines that don't support invlpg, also don't support
* PG_G.
*
* When workaround_erratum383 is false, a promotion to a 2M/4M
* page mapping does not invalidate the 512/1024 4K page mappings
* from the TLB. Consequently, at this point, the TLB may
* hold both 4K and 2M/4M page mappings. Therefore, the entire
* range of addresses must be invalidated here. In contrast,
* when workaround_erratum383 is true, a promotion does
* invalidate the 512/1024 4K page mappings, and so a single INVLPG
* suffices to invalidate the 2M/4M page mapping.
*/
if ((oldpde & PG_G) != 0) {
if (workaround_erratum383)
pmap_invalidate_page(kernel_pmap, sva);
else
pmap_invalidate_range(kernel_pmap, sva,
sva + NBPDR - 1);
}
if ((oldpde & PG_G) != 0)
pmap_invalidate_pde_page(kernel_pmap, sva, oldpde);
pmap->pm_stats.resident_count -= NBPDR / PAGE_SIZE;
if (oldpde & PG_MANAGED) {
@ -3143,16 +3153,16 @@ pmap_protect_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t sva, vm_prot_t prot)
newpde |= pg_nx;
#endif
if (newpde != oldpde) {
if (!pde_cmpset(pde, oldpde, newpde))
/*
* As an optimization to future operations on this PDE, clear
* PG_PROMOTED. The impending invalidation will remove any
* lingering 4KB page mappings from the TLB.
*/
if (!pde_cmpset(pde, oldpde, newpde & ~PG_PROMOTED))
goto retry;
if (oldpde & PG_G) {
/* See pmap_remove_pde() for explanation. */
if (workaround_erratum383)
pmap_invalidate_page(kernel_pmap, sva);
else
pmap_invalidate_range(kernel_pmap, sva,
sva + NBPDR - 1);
} else
if ((oldpde & PG_G) != 0)
pmap_invalidate_pde_page(kernel_pmap, sva, oldpde);
else
anychanged = TRUE;
}
return (anychanged);
@ -3437,9 +3447,9 @@ pmap_promote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va)
if (workaround_erratum383)
pmap_update_pde(pmap, va, pde, PG_PS | newpde);
else if (pmap == kernel_pmap)
pmap_kenter_pde(va, PG_PS | newpde);
pmap_kenter_pde(va, PG_PROMOTED | PG_PS | newpde);
else
pde_store(pde, PG_PS | newpde);
pde_store(pde, PG_PROMOTED | PG_PS | newpde);
pmap_pde_promotions++;
CTR2(KTR_PMAP, "pmap_promote_pde: success for va %#x"
@ -3722,7 +3732,8 @@ pmap_enter_pde(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
pmap->pm_stats.resident_count += NBPDR / PAGE_SIZE;
/*
* Map the superpage.
* Map the superpage. (This is not a promoted mapping; there will not
* be any lingering 4KB page mappings in the TLB.)
*/
pde_store(pde, newpde);

View File

@ -71,6 +71,7 @@
/* Our various interpretations of the above */
#define PG_W PG_AVAIL1 /* "Wired" pseudoflag */
#define PG_MANAGED PG_AVAIL2
#define PG_PROMOTED PG_AVAIL3 /* PDE only */
#if defined(PAE) || defined(PAE_TABLES)
#define PG_FRAME (0x000ffffffffff000ull)
#define PG_PS_FRAME (0x000fffffffe00000ull)