Don't reset memory attributes when mapping physical addresses for ACPI.

Previously, AcpiOsMemory was using pmap_mapbios which would always map
the requested address Write-Back (WB).  For several AMD Ryzen laptops,
the BIOS uses AcpiOsMemory to directly access the PCI MCFG region in
order to access PCI config registers.  This has the side effect of
remapping the MCFG region in the direct map as WB instead of UC
hanging the laptops during boot.

On the one laptop I examined in detail, the _PIC global method used to
switch from 8259A PICs to I/O APICs uses a pair of PCI config space
registers at offset 0x84 in the device at 0:0:0 to as a pair of
address/data registers to access an indirect register in the chipset
and clear a single bit to switch modes.

To fix, alter the semantics of pmap_mapbios() such that it does not
modify the attributes of any existing mappings and instead uses the
existing attributes.  If a new mapping is created, this new mapping
uses WB (the default memory attribute).

Special thanks to the gentleman whose name I don't have who brought
two affected laptops to the hacker lounge at BSDCan.  Direct access to
the affected systems permitted finding the root cause within an hour
or so.

PR:		231760, 236899
Reviewed by:	kib, alc
MFC after:	2 weeks
Differential Revision:	https://reviews.freebsd.org/D20327
This commit is contained in:
John Baldwin 2019-08-03 01:36:05 +00:00
parent a58383d257
commit c45cbc7a1f
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=350551
4 changed files with 60 additions and 27 deletions

View File

@ -1097,6 +1097,13 @@ static caddr_t crashdumpmap;
#define PMAP_ENTER_NORECLAIM 0x1000000 /* Don't reclaim PV entries. */
#define PMAP_ENTER_NOREPLACE 0x2000000 /* Don't replace mappings. */
/*
* Internal flags for pmap_mapdev_internal() and
* pmap_change_attr_locked().
*/
#define MAPDEV_FLUSHCACHE 0x0000001 /* Flush cache after mapping. */
#define MAPDEV_SETATTR 0x0000002 /* Modify existing attrs. */
static void free_pv_chunk(struct pv_chunk *pc);
static void free_pv_entry(pmap_t pmap, pv_entry_t pv);
static pv_entry_t get_pv_entry(pmap_t pmap, struct rwlock **lockp);
@ -1117,7 +1124,7 @@ static pv_entry_t pmap_pvh_remove(struct md_page *pvh, pmap_t pmap,
vm_offset_t va);
static int pmap_change_attr_locked(vm_offset_t va, vm_size_t size, int mode,
bool noflush);
int flags);
static boolean_t pmap_demote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va);
static boolean_t pmap_demote_pde_locked(pmap_t pmap, pd_entry_t *pde,
vm_offset_t va, struct rwlock **lockp);
@ -7711,7 +7718,7 @@ pmap_pde_attr(pd_entry_t *pde, int cache_bits, int mask)
* NOT real memory.
*/
static void *
pmap_mapdev_internal(vm_paddr_t pa, vm_size_t size, int mode, bool noflush)
pmap_mapdev_internal(vm_paddr_t pa, vm_size_t size, int mode, int flags)
{
struct pmap_preinit_mapping *ppim;
vm_offset_t va, offset;
@ -7745,7 +7752,8 @@ pmap_mapdev_internal(vm_paddr_t pa, vm_size_t size, int mode, bool noflush)
for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) {
ppim = pmap_preinit_mapping + i;
if (ppim->pa == pa && ppim->sz == size &&
ppim->mode == mode)
(ppim->mode == mode ||
(flags & MAPDEV_SETATTR) == 0))
return ((void *)(ppim->va + offset));
}
/*
@ -7754,9 +7762,12 @@ pmap_mapdev_internal(vm_paddr_t pa, vm_size_t size, int mode, bool noflush)
*/
if (pa < dmaplimit && pa + size <= dmaplimit) {
va = PHYS_TO_DMAP(pa);
PMAP_LOCK(kernel_pmap);
i = pmap_change_attr_locked(va, size, mode, noflush);
PMAP_UNLOCK(kernel_pmap);
if ((flags & MAPDEV_SETATTR) != 0) {
PMAP_LOCK(kernel_pmap);
i = pmap_change_attr_locked(va, size, mode, flags);
PMAP_UNLOCK(kernel_pmap);
} else
i = 0;
if (!i)
return ((void *)(va + offset));
}
@ -7767,7 +7778,7 @@ pmap_mapdev_internal(vm_paddr_t pa, vm_size_t size, int mode, bool noflush)
for (tmpsize = 0; tmpsize < size; tmpsize += PAGE_SIZE)
pmap_kenter_attr(va + tmpsize, pa + tmpsize, mode);
pmap_invalidate_range(kernel_pmap, va, va + tmpsize);
if (!noflush)
if ((flags & MAPDEV_FLUSHCACHE) != 0)
pmap_invalidate_cache_range(va, va + tmpsize);
return ((void *)(va + offset));
}
@ -7776,28 +7787,31 @@ void *
pmap_mapdev_attr(vm_paddr_t pa, vm_size_t size, int mode)
{
return (pmap_mapdev_internal(pa, size, mode, false));
return (pmap_mapdev_internal(pa, size, mode, MAPDEV_FLUSHCACHE |
MAPDEV_SETATTR));
}
void *
pmap_mapdev(vm_paddr_t pa, vm_size_t size)
{
return (pmap_mapdev_internal(pa, size, PAT_UNCACHEABLE, false));
return (pmap_mapdev_attr(pa, size, PAT_UNCACHEABLE));
}
void *
pmap_mapdev_pciecfg(vm_paddr_t pa, vm_size_t size)
{
return (pmap_mapdev_internal(pa, size, PAT_UNCACHEABLE, true));
return (pmap_mapdev_internal(pa, size, PAT_UNCACHEABLE,
MAPDEV_SETATTR));
}
void *
pmap_mapbios(vm_paddr_t pa, vm_size_t size)
{
return (pmap_mapdev_internal(pa, size, PAT_WRITE_BACK, false));
return (pmap_mapdev_internal(pa, size, PAT_WRITE_BACK,
MAPDEV_FLUSHCACHE));
}
void
@ -7936,13 +7950,13 @@ pmap_change_attr(vm_offset_t va, vm_size_t size, int mode)
int error;
PMAP_LOCK(kernel_pmap);
error = pmap_change_attr_locked(va, size, mode, false);
error = pmap_change_attr_locked(va, size, mode, MAPDEV_FLUSHCACHE);
PMAP_UNLOCK(kernel_pmap);
return (error);
}
static int
pmap_change_attr_locked(vm_offset_t va, vm_size_t size, int mode, bool noflush)
pmap_change_attr_locked(vm_offset_t va, vm_size_t size, int mode, int flags)
{
vm_offset_t base, offset, tmpva;
vm_paddr_t pa_start, pa_end, pa_end1;
@ -8059,7 +8073,7 @@ pmap_change_attr_locked(vm_offset_t va, vm_size_t size, int mode, bool noflush)
/* Run ended, update direct map. */
error = pmap_change_attr_locked(
PHYS_TO_DMAP(pa_start),
pa_end - pa_start, mode, noflush);
pa_end - pa_start, mode, flags);
if (error != 0)
break;
/* Start physical address run. */
@ -8089,7 +8103,7 @@ pmap_change_attr_locked(vm_offset_t va, vm_size_t size, int mode, bool noflush)
/* Run ended, update direct map. */
error = pmap_change_attr_locked(
PHYS_TO_DMAP(pa_start),
pa_end - pa_start, mode, noflush);
pa_end - pa_start, mode, flags);
if (error != 0)
break;
/* Start physical address run. */
@ -8117,7 +8131,7 @@ pmap_change_attr_locked(vm_offset_t va, vm_size_t size, int mode, bool noflush)
/* Run ended, update direct map. */
error = pmap_change_attr_locked(
PHYS_TO_DMAP(pa_start),
pa_end - pa_start, mode, noflush);
pa_end - pa_start, mode, flags);
if (error != 0)
break;
/* Start physical address run. */
@ -8132,7 +8146,7 @@ pmap_change_attr_locked(vm_offset_t va, vm_size_t size, int mode, bool noflush)
pa_end1 = MIN(pa_end, dmaplimit);
if (pa_start != pa_end1)
error = pmap_change_attr_locked(PHYS_TO_DMAP(pa_start),
pa_end1 - pa_start, mode, noflush);
pa_end1 - pa_start, mode, flags);
}
/*
@ -8141,7 +8155,7 @@ pmap_change_attr_locked(vm_offset_t va, vm_size_t size, int mode, bool noflush)
*/
if (changed) {
pmap_invalidate_range(kernel_pmap, base, tmpva);
if (!noflush)
if ((flags & MAPDEV_FLUSHCACHE) != 0)
pmap_invalidate_cache_range(base, tmpva);
}
return (error);

View File

@ -5404,10 +5404,12 @@ pmap_pde_attr(pd_entry_t *pde, int cache_bits)
* NOT real memory.
*/
static void *
__CONCAT(PMTYPE, mapdev_attr)(vm_paddr_t pa, vm_size_t size, int mode)
__CONCAT(PMTYPE, mapdev_attr)(vm_paddr_t pa, vm_size_t size, int mode,
int flags)
{
struct pmap_preinit_mapping *ppim;
vm_offset_t va, offset;
vm_page_t m;
vm_size_t tmpsize;
int i;
@ -5415,9 +5417,11 @@ __CONCAT(PMTYPE, mapdev_attr)(vm_paddr_t pa, vm_size_t size, int mode)
size = round_page(offset + size);
pa = pa & PG_FRAME;
if (pa < PMAP_MAP_LOW && pa + size <= PMAP_MAP_LOW)
if (pa < PMAP_MAP_LOW && pa + size <= PMAP_MAP_LOW) {
va = pa + PMAP_MAP_LOW;
else if (!pmap_initialized) {
if ((flags & MAPDEV_SETATTR) == 0)
return ((void *)(va + offset));
} else if (!pmap_initialized) {
va = 0;
for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) {
ppim = pmap_preinit_mapping + i;
@ -5440,15 +5444,25 @@ __CONCAT(PMTYPE, mapdev_attr)(vm_paddr_t pa, vm_size_t size, int mode)
for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) {
ppim = pmap_preinit_mapping + i;
if (ppim->pa == pa && ppim->sz == size &&
ppim->mode == mode)
(ppim->mode == mode ||
(flags & MAPDEV_SETATTR) == 0))
return ((void *)(ppim->va + offset));
}
va = kva_alloc(size);
if (va == 0)
panic("%s: Couldn't allocate KVA", __func__);
}
for (tmpsize = 0; tmpsize < size; tmpsize += PAGE_SIZE)
for (tmpsize = 0; tmpsize < size; tmpsize += PAGE_SIZE) {
if ((flags & MAPDEV_SETATTR) == 0 && pmap_initialized) {
m = PHYS_TO_VM_PAGE(pa);
if (m != NULL && VM_PAGE_TO_PHYS(m) == pa) {
pmap_kenter_attr(va + tmpsize, pa + tmpsize,
m->md.pat_mode);
continue;
}
}
pmap_kenter_attr(va + tmpsize, pa + tmpsize, mode);
}
pmap_invalidate_range_int(kernel_pmap, va, va + tmpsize);
pmap_invalidate_cache_range(va, va + size);
return ((void *)(va + offset));

View File

@ -776,21 +776,23 @@ void *
pmap_mapdev_attr(vm_paddr_t pa, vm_size_t size, int mode)
{
return (pmap_methods_ptr->pm_mapdev_attr(pa, size, mode));
return (pmap_methods_ptr->pm_mapdev_attr(pa, size, mode,
MAPDEV_SETATTR));
}
void *
pmap_mapdev(vm_paddr_t pa, vm_size_t size)
{
return (pmap_methods_ptr->pm_mapdev_attr(pa, size, PAT_UNCACHEABLE));
return (pmap_methods_ptr->pm_mapdev_attr(pa, size, PAT_UNCACHEABLE,
MAPDEV_SETATTR));
}
void *
pmap_mapbios(vm_paddr_t pa, vm_size_t size)
{
return (pmap_methods_ptr->pm_mapdev_attr(pa, size, PAT_WRITE_BACK));
return (pmap_methods_ptr->pm_mapdev_attr(pa, size, PAT_WRITE_BACK, 0));
}
void

View File

@ -34,6 +34,9 @@
#ifndef _MACHINE_PMAP_BASE_H_
#define _MACHINE_PMAP_BASE_H_
/* Internal flags for pmap_mapdev_attr(). */
#define MAPDEV_SETATTR 0x0000001 /* Modify existing attrs. */
struct pmap_methods {
void (*pm_ksetrw)(vm_offset_t);
void (*pm_remap_lower)(bool);
@ -93,7 +96,7 @@ struct pmap_methods {
boolean_t (*pm_is_referenced)(vm_page_t);
void (*pm_remove_write)(vm_page_t);
int (*pm_ts_referenced)(vm_page_t);
void *(*pm_mapdev_attr)(vm_paddr_t, vm_size_t, int);
void *(*pm_mapdev_attr)(vm_paddr_t, vm_size_t, int, int);
void (*pm_unmapdev)(vm_offset_t, vm_size_t);
void (*pm_page_set_memattr)(vm_page_t, vm_memattr_t);
vm_paddr_t (*pm_extract)(pmap_t, vm_offset_t);