[1] According to the x86 architectural specifications, no virtual-to-

physical page mapping should span two or more MTRRs of different types.
Add a pmap function, pmap_demote_DMAP(), by which the MTRR module can
ensure that the direct map region doesn't have such a mapping.

[2] Fix a couple of nearby style errors in amd64_mrset().

[3] Re-enable the use of 1GB page mappings for implementing the direct
map.  (See also r197580 and r213897.)

Tested by:	kib@ on a Westmere-family processor [3]
MFC after:	3 weeks
This commit is contained in:
Alan Cox 2010-10-27 16:46:37 +00:00
parent baa0988a1e
commit 92ababa777
3 changed files with 90 additions and 6 deletions

View File

@ -35,6 +35,10 @@ __FBSDID("$FreeBSD$");
#include <sys/smp.h>
#include <sys/sysctl.h>
#include <vm/vm.h>
#include <vm/vm_param.h>
#include <vm/pmap.h>
#include <machine/cputypes.h>
#include <machine/md_var.h>
#include <machine/specialreg.h>
@ -527,9 +531,9 @@ static int
amd64_mrset(struct mem_range_softc *sc, struct mem_range_desc *mrd, int *arg)
{
struct mem_range_desc *targ;
int error = 0;
int error, i;
switch(*arg) {
switch (*arg) {
case MEMRANGE_SET_UPDATE:
/*
* Make sure that what's being asked for is even
@ -568,6 +572,21 @@ amd64_mrset(struct mem_range_softc *sc, struct mem_range_desc *mrd, int *arg)
return (EOPNOTSUPP);
}
/*
* Ensure that the direct map region does not contain any mappings
* that span MTRRs of different types. However, the fixed MTRRs can
* be ignored, because a large page mapping the first 1 MB of physical
* memory is a special case that the processor handles. The entire
* TLB will be invalidated by amd64_mrstore(), so pmap_demote_DMAP()
* needn't do it.
*/
i = (sc->mr_cap & MR686_FIXMTRR) ? MTRR_N64K + MTRR_N16K + MTRR_N4K : 0;
mrd = sc->mr_desc + i;
for (; i < sc->mr_ndesc; i++, mrd++) {
if (mrd->mr_flags & MDF_ACTIVE)
pmap_demote_DMAP(mrd->mr_base, mrd->mr_len, FALSE);
}
/* Update the hardware. */
amd64_mrstore(sc);
@ -657,6 +676,21 @@ amd64_mrinit(struct mem_range_softc *sc)
if (mrd->mr_flags & MDF_ACTIVE)
mrd->mr_flags |= MDF_FIRMWARE;
}
/*
* Ensure that the direct map region does not contain any mappings
* that span MTRRs of different types. However, the fixed MTRRs can
* be ignored, because a large page mapping the first 1 MB of physical
* memory is a special case that the processor handles. Invalidate
* any old TLB entries that might hold inconsistent memory type
* information.
*/
i = (sc->mr_cap & MR686_FIXMTRR) ? MTRR_N64K + MTRR_N16K + MTRR_N4K : 0;
mrd = sc->mr_desc + i;
for (; i < sc->mr_ndesc; i++, mrd++) {
if (mrd->mr_flags & MDF_ACTIVE)
pmap_demote_DMAP(mrd->mr_base, mrd->mr_len, TRUE);
}
}
/*

View File

@ -462,7 +462,7 @@ create_pagetables(vm_paddr_t *firstaddr)
if (ndmpdp < 4) /* Minimum 4GB of dirmap */
ndmpdp = 4;
DMPDPphys = allocpages(firstaddr, NDMPML4E);
if (TRUE || (amd_feature & AMDID_PAGE1GB) == 0)
if ((amd_feature & AMDID_PAGE1GB) == 0)
DMPDphys = allocpages(firstaddr, ndmpdp);
dmaplimit = (vm_paddr_t)ndmpdp << PDPSHIFT;
@ -494,11 +494,16 @@ create_pagetables(vm_paddr_t *firstaddr)
((pdp_entry_t *)KPDPphys)[i + KPDPI] |= PG_RW | PG_V | PG_U;
}
/* Now set up the direct map space using either 2MB or 1GB pages */
/* Preset PG_M and PG_A because demotion expects it */
if (TRUE || (amd_feature & AMDID_PAGE1GB) == 0) {
/*
* Now, set up the direct map region using either 2MB or 1GB pages.
* Later, if pmap_mapdev{_attr}() uses the direct map for non-write-
* back memory, pmap_change_attr() will demote any 2MB or 1GB page
* mappings that are partially used.
*/
if ((amd_feature & AMDID_PAGE1GB) == 0) {
for (i = 0; i < NPDEPG * ndmpdp; i++) {
((pd_entry_t *)DMPDphys)[i] = (vm_paddr_t)i << PDRSHIFT;
/* Preset PG_M and PG_A because demotion expects it. */
((pd_entry_t *)DMPDphys)[i] |= PG_RW | PG_V | PG_PS |
PG_G | PG_M | PG_A;
}
@ -512,6 +517,7 @@ create_pagetables(vm_paddr_t *firstaddr)
for (i = 0; i < ndmpdp; i++) {
((pdp_entry_t *)DMPDPphys)[i] =
(vm_paddr_t)i << PDPSHIFT;
/* Preset PG_M and PG_A because demotion expects it. */
((pdp_entry_t *)DMPDPphys)[i] |= PG_RW | PG_V | PG_PS |
PG_G | PG_M | PG_A;
}
@ -4947,6 +4953,49 @@ pmap_change_attr_locked(vm_offset_t va, vm_size_t size, int mode)
return (error);
}
/*
* Demotes any mapping within the direct map region that covers more than the
* specified range of physical addresses. This range's size must be a power
* of two and its starting address must be a multiple of its size. Since the
* demotion does not change any attributes of the mapping, a TLB invalidation
* is not mandatory. The caller may, however, request a TLB invalidation.
*/
void
pmap_demote_DMAP(vm_paddr_t base, vm_size_t len, boolean_t invalidate)
{
pdp_entry_t *pdpe;
pd_entry_t *pde;
vm_offset_t va;
boolean_t changed;
if (len < NBPDP) {
va = PHYS_TO_DMAP(base);
changed = FALSE;
PMAP_LOCK(kernel_pmap);
pdpe = pmap_pdpe(kernel_pmap, va);
if ((*pdpe & PG_V) == 0)
panic("pmap_demote_DMAP: invalid PDPE");
if ((*pdpe & PG_PS) != 0) {
if (!pmap_demote_pdpe(kernel_pmap, pdpe, va))
panic("pmap_demote_DMAP: PDPE failed");
changed = TRUE;
}
if (len < NBPDR) {
pde = pmap_pdpe_to_pde(pdpe, va);
if ((*pde & PG_V) == 0)
panic("pmap_demote_DMAP: invalid PDE");
if ((*pde & PG_PS) != 0) {
if (!pmap_demote_pde(kernel_pmap, pde, va))
panic("pmap_demote_DMAP: PDE failed");
changed = TRUE;
}
}
if (changed && invalidate)
pmap_invalidate_page(kernel_pmap, va);
PMAP_UNLOCK(kernel_pmap);
}
}
/*
* perform the pmap work for mincore
*/

View File

@ -307,6 +307,7 @@ extern vm_offset_t virtual_end;
void pmap_bootstrap(vm_paddr_t *);
int pmap_change_attr(vm_offset_t, vm_size_t, int);
void pmap_demote_DMAP(vm_paddr_t base, vm_size_t len, boolean_t invalidate);
void pmap_init_pat(void);
void pmap_kenter(vm_offset_t va, vm_paddr_t pa);
void *pmap_kenter_temporary(vm_paddr_t pa, int i);