Better support memory mapped console devices, such as VGA and EFI

frame buffers and memory mapped UARTs.

1.  Delay calling cninit() until after pmap_bootstrap(). This makes
    sure we have PMAP initialized enough to add translations. Keep
    kdb_init() after cninit() so that we have console when we need
    to break into the debugger on boot.
2.  Unfortunately, the ATPIC code had be moved as well so as to
    avoid a spurious trap #30. The reason for which is not known
    at this time.
3.  In pmap_mapdev_attr(), when we need to map a device prior to the
    VM system being initialized, use virtual_avail as the KVA to map
    the device at. In particular, avoid using the direct map on amd64
    because we can't demote by virtue of not being able to allocate
    yet. Keep track of the translation.
    Re-use the translation after the VM has been initialized to not
    waste KVA and to satisfy the assumption in uart(4) that the handle
    returned for the low-level console is the same as later returned
    when the device is probed and attached.
4.  In pmap_unmapdev() remove the mapping from the table when called
    pre-init. Otherwise keep the mapping. During bus probe and attach
    device resources are mapped and unmapped multiple times, which
    would have us destroy the mapping used by the low-level console.
5.  In pmap_init(), set pmap_initialized to signal that we're not
    pre-init anymore. On amd64, bring the direct map in sync with the
    translations created at that time.
6.  Implement bus_space_map() and bus_space_unmap() for real: when
    the tag corresponds to memory space, call the corresponding
    pmap_mapdev() and pmap_unmapdev() functions to construct and
    actual handle.
7.  In efifb.c and vt_vga.c, remove the crutches and hacks and simply
    call pmap_mapdev_attr() or bus_space_map() as desired.

Notes:
1.  uart(4) already used bus_space_map() during low-level console
    setup but since serial ports have traditionally been I/O port
    based, the lack of a proper implementation for said function
    was not a problem. It has always supported memory mapped UARTs
    for low-level consoles by setting hw.uart.console accordingly.
2.  The use of the direct map on amd64 without setting caching
    attributes has been a bigger problem than previously thought.
    This change has the fortunate (and unexpected) side-effect of
    fixing various EFI frame buffer problems (though not all).

PR: 191564, 194952

Special thanks to:
1.  XipLink, Inc -- generously donated an Intel Bay Trail E3800
    based eval board (ADLE3800PC).
2.  The FreeBSD Foundation, in particular emaste@ -- for UEFI
    support in general and testing.
3.  Everyone who tested the proposed for PR 191564.
4.  jhb@ and kib@ for being a soundboard and applying a clue bat
    if so needed.
This commit is contained in:
Marcel Moolenaar 2015-08-12 15:26:32 +00:00
parent 548afe2bec
commit 7ef5e8bc80
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=286667
10 changed files with 301 additions and 145 deletions

View File

@ -1625,38 +1625,6 @@ hammer_time(u_int64_t modulep, u_int64_t physfree)
MODINFO_METADATA | MODINFOMD_EFI_MAP) != NULL)
vty_set_preferred(VTY_VT);
/*
* Initialize the console before we print anything out.
*/
cninit();
#ifdef DEV_ISA
#ifdef DEV_ATPIC
elcr_probe();
atpic_startup();
#else
/* Reset and mask the atpics and leave them shut down. */
atpic_reset();
/*
* Point the ICU spurious interrupt vectors at the APIC spurious
* interrupt handler.
*/
setidt(IDT_IO_INTS + 7, IDTVEC(spuriousint), SDT_SYSIGT, SEL_KPL, 0);
setidt(IDT_IO_INTS + 15, IDTVEC(spuriousint), SDT_SYSIGT, SEL_KPL, 0);
#endif
#else
#error "have you forgotten the isa device?";
#endif
kdb_init();
#ifdef KDB
if (boothowto & RB_KDB)
kdb_enter(KDB_WHY_BOOTFLAGS,
"Boot flags requested debugger");
#endif
identify_cpu(); /* Final stage of CPU initialization */
initializecpu(); /* Initialize CPU registers */
initializecpucache();
@ -1693,6 +1661,35 @@ hammer_time(u_int64_t modulep, u_int64_t physfree)
/* now running on new page tables, configured,and u/iom is accessible */
cninit();
#ifdef DEV_ISA
#ifdef DEV_ATPIC
elcr_probe();
atpic_startup();
#else
/* Reset and mask the atpics and leave them shut down. */
atpic_reset();
/*
* Point the ICU spurious interrupt vectors at the APIC spurious
* interrupt handler.
*/
setidt(IDT_IO_INTS + 7, IDTVEC(spuriousint), SDT_SYSIGT, SEL_KPL, 0);
setidt(IDT_IO_INTS + 15, IDTVEC(spuriousint), SDT_SYSIGT, SEL_KPL, 0);
#endif
#else
#error "have you forgotten the isa device?";
#endif
kdb_init();
#ifdef KDB
if (boothowto & RB_KDB)
kdb_enter(KDB_WHY_BOOTFLAGS,
"Boot flags requested debugger");
#endif
msgbufinit(msgbufp, msgbufsize);
fpuinit();

View File

@ -363,6 +363,18 @@ static u_int64_t DMPDphys; /* phys addr of direct mapped level 2 */
static u_int64_t DMPDPphys; /* phys addr of direct mapped level 3 */
static int ndmpdpphys; /* number of DMPDPphys pages */
/*
* pmap_mapdev support pre initialization (i.e. console)
*/
#define PMAP_PREINIT_MAPPING_COUNT 8
static struct pmap_preinit_mapping {
vm_paddr_t pa;
vm_offset_t va;
vm_size_t sz;
int mode;
} pmap_preinit_mapping[PMAP_PREINIT_MAPPING_COUNT];
static int pmap_initialized;
static struct rwlock_padalign pvh_global_lock;
/*
@ -1016,6 +1028,7 @@ pmap_page_init(vm_page_t m)
void
pmap_init(void)
{
struct pmap_preinit_mapping *ppim;
vm_page_t mpte;
vm_size_t s;
int i, pv_npg;
@ -1083,6 +1096,22 @@ pmap_init(void)
M_WAITOK | M_ZERO);
for (i = 0; i < pv_npg; i++)
TAILQ_INIT(&pv_table[i].pv_list);
pmap_initialized = 1;
for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) {
ppim = pmap_preinit_mapping + i;
if (ppim->va == 0)
continue;
/* Make the direct map consistent */
if (ppim->pa < dmaplimit && ppim->pa + ppim->sz < dmaplimit) {
(void)pmap_change_attr(PHYS_TO_DMAP(ppim->pa),
ppim->sz, ppim->mode);
}
if (!bootverbose)
continue;
printf("PPIM %u: PA=%#lx, VA=%#lx, size=%#lx, mode=%#x\n", i,
ppim->pa, ppim->va, ppim->sz, ppim->mode);
}
}
static SYSCTL_NODE(_vm_pmap, OID_AUTO, pde, CTLFLAG_RD, 0,
@ -6105,24 +6134,54 @@ pmap_pde_attr(pd_entry_t *pde, int cache_bits, int mask)
void *
pmap_mapdev_attr(vm_paddr_t pa, vm_size_t size, int mode)
{
struct pmap_preinit_mapping *ppim;
vm_offset_t va, offset;
vm_size_t tmpsize;
int i;
/*
* If the specified range of physical addresses fits within the direct
* map window, use the direct map.
*/
if (pa < dmaplimit && pa + size < dmaplimit) {
va = PHYS_TO_DMAP(pa);
if (!pmap_change_attr(va, size, mode))
return ((void *)va);
}
offset = pa & PAGE_MASK;
size = round_page(offset + size);
va = kva_alloc(size);
if (!va)
panic("pmap_mapdev: Couldn't alloc kernel virtual memory");
pa = trunc_page(pa);
if (!pmap_initialized) {
va = 0;
for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) {
ppim = pmap_preinit_mapping + i;
if (ppim->va == 0) {
ppim->pa = pa;
ppim->sz = size;
ppim->mode = mode;
ppim->va = virtual_avail;
virtual_avail += size;
va = ppim->va;
break;
}
}
if (va == 0)
panic("%s: too many preinit mappings", __func__);
} else {
/*
* If we have a preinit mapping, re-use it.
*/
for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) {
ppim = pmap_preinit_mapping + i;
if (ppim->pa == pa && ppim->sz == size &&
ppim->mode == mode)
return ((void *)(ppim->va + offset));
}
/*
* If the specified range of physical addresses fits within
* the direct map window, use the direct map.
*/
if (pa < dmaplimit && pa + size < dmaplimit) {
va = PHYS_TO_DMAP(pa);
if (!pmap_change_attr(va, size, mode))
return ((void *)(va + offset));
}
va = kva_alloc(size);
if (va == 0)
panic("%s: Couldn't allocate KVA", __func__);
}
for (tmpsize = 0; tmpsize < size; tmpsize += PAGE_SIZE)
pmap_kenter_attr(va + tmpsize, pa + tmpsize, mode);
pmap_invalidate_range(kernel_pmap, va, va + tmpsize);
@ -6147,15 +6206,32 @@ pmap_mapbios(vm_paddr_t pa, vm_size_t size)
void
pmap_unmapdev(vm_offset_t va, vm_size_t size)
{
vm_offset_t base, offset;
struct pmap_preinit_mapping *ppim;
vm_offset_t offset;
int i;
/* If we gave a direct map region in pmap_mapdev, do nothing */
if (va >= DMAP_MIN_ADDRESS && va < DMAP_MAX_ADDRESS)
return;
base = trunc_page(va);
offset = va & PAGE_MASK;
size = round_page(offset + size);
kva_free(base, size);
va = trunc_page(va);
for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) {
ppim = pmap_preinit_mapping + i;
if (ppim->va == va && ppim->sz == size) {
if (pmap_initialized)
return;
ppim->pa = 0;
ppim->va = 0;
ppim->sz = 0;
ppim->mode = 0;
if (va + size == virtual_avail)
virtual_avail = va;
return;
}
}
if (pmap_initialized)
kva_free(va, size);
}
/*

View File

@ -592,6 +592,7 @@ x86/isa/nmi.c standard
x86/isa/orm.c optional isa
x86/pci/pci_bus.c optional pci
x86/pci/qpi.c optional pci
x86/x86/bus_machdep.c standard
x86/x86/busdma_bounce.c standard
x86/x86/busdma_machdep.c standard
x86/x86/cpu_machdep.c standard

View File

@ -586,6 +586,7 @@ x86/isa/nmi.c standard
x86/isa/orm.c optional isa
x86/pci/pci_bus.c optional pci
x86/pci/qpi.c optional pci
x86/x86/bus_machdep.c standard
x86/x86/busdma_bounce.c standard
x86/x86/busdma_machdep.c standard
x86/x86/cpu_machdep.c standard

View File

@ -51,7 +51,6 @@ __FBSDID("$FreeBSD$");
static vd_init_t vt_efifb_init;
static vd_probe_t vt_efifb_probe;
static void vt_efifb_remap(void *efifb_data);
static struct vt_driver vt_efifb_driver = {
.vd_name = "efifb",
@ -71,8 +70,6 @@ static struct vt_driver vt_efifb_driver = {
static struct fb_info local_info;
VT_DRIVER_DECLARE(vt_efifb, vt_efifb_driver);
SYSINIT(efifb_remap, SI_SUB_KMEM, SI_ORDER_ANY, vt_efifb_remap, &local_info);
static int
vt_efifb_probe(struct vt_device *vd)
{
@ -137,12 +134,8 @@ vt_efifb_init(struct vt_device *vd)
info->fb_size = info->fb_height * info->fb_stride;
info->fb_pbase = efifb->fb_addr;
/*
* Use the direct map as a crutch until pmap is available. Once pmap
* is online, the framebuffer will be remapped by vt_efifb_remap()
* using pmap_mapdev_attr().
*/
info->fb_vbase = PHYS_TO_DMAP(efifb->fb_addr);
info->fb_vbase = (intptr_t)pmap_mapdev_attr(info->fb_pbase,
info->fb_size, VM_MEMATTR_WRITE_COMBINING);
/* Get pixel storage size. */
info->fb_bpp = info->fb_stride / info->fb_width * 8;
@ -158,21 +151,3 @@ vt_efifb_init(struct vt_device *vd)
return (CN_INTERNAL);
}
static void
vt_efifb_remap(void *xinfo)
{
struct fb_info *info = xinfo;
if (info->fb_pbase == 0)
return;
/*
* Remap as write-combining. This massively improves performance and
* happens very early in kernel initialization, when everything is
* still single-threaded and interrupts are off, so replacing the
* mapping address is safe.
*/
info->fb_vbase = (intptr_t)pmap_mapdev_attr(info->fb_pbase,
info->fb_size, VM_MEMATTR_WRITE_COMBINING);
}

View File

@ -46,13 +46,6 @@ __FBSDID("$FreeBSD$");
#include <machine/bus.h>
#if defined(__amd64__) || defined(__i386__)
#include <vm/vm.h>
#include <vm/pmap.h>
#include <machine/pmap.h>
#include <machine/vmparam.h>
#endif /* __amd64__ || __i386__ */
struct vga_softc {
bus_space_tag_t vga_fb_tag;
bus_space_handle_t vga_fb_handle;
@ -1228,13 +1221,16 @@ vga_init(struct vt_device *vd)
#if defined(__amd64__) || defined(__i386__)
sc->vga_fb_tag = X86_BUS_SPACE_MEM;
sc->vga_fb_handle = KERNBASE + VGA_MEM_BASE;
sc->vga_reg_tag = X86_BUS_SPACE_IO;
sc->vga_reg_handle = VGA_REG_BASE;
#else
# error "Architecture not yet supported!"
#endif
bus_space_map(sc->vga_fb_tag, VGA_MEM_BASE, VGA_MEM_SIZE, 0,
&sc->vga_fb_handle);
bus_space_map(sc->vga_reg_tag, VGA_REG_BASE, VGA_REG_SIZE, 0,
&sc->vga_reg_handle);
TUNABLE_INT_FETCH("hw.vga.textmode", &textmode);
if (textmode) {
vd->vd_flags |= VDF_TEXTMODE;

View File

@ -2612,6 +2612,40 @@ init386(first)
*/
clock_init();
finishidentcpu(); /* Final stage of CPU initialization */
setidt(IDT_UD, &IDTVEC(ill), SDT_SYS386TGT, SEL_KPL,
GSEL(GCODE_SEL, SEL_KPL));
setidt(IDT_GP, &IDTVEC(prot), SDT_SYS386TGT, SEL_KPL,
GSEL(GCODE_SEL, SEL_KPL));
initializecpu(); /* Initialize CPU registers */
initializecpucache();
/* pointer to selector slot for %fs/%gs */
PCPU_SET(fsgs_gdt, &gdt[GUFS_SEL].sd);
dblfault_tss.tss_esp = dblfault_tss.tss_esp0 = dblfault_tss.tss_esp1 =
dblfault_tss.tss_esp2 = (int)&dblfault_stack[sizeof(dblfault_stack)];
dblfault_tss.tss_ss = dblfault_tss.tss_ss0 = dblfault_tss.tss_ss1 =
dblfault_tss.tss_ss2 = GSEL(GDATA_SEL, SEL_KPL);
#if defined(PAE) || defined(PAE_TABLES)
dblfault_tss.tss_cr3 = (int)IdlePDPT;
#else
dblfault_tss.tss_cr3 = (int)IdlePTD;
#endif
dblfault_tss.tss_eip = (int)dblfault_handler;
dblfault_tss.tss_eflags = PSL_KERNEL;
dblfault_tss.tss_ds = dblfault_tss.tss_es =
dblfault_tss.tss_gs = GSEL(GDATA_SEL, SEL_KPL);
dblfault_tss.tss_fs = GSEL(GPRIV_SEL, SEL_KPL);
dblfault_tss.tss_cs = GSEL(GCODE_SEL, SEL_KPL);
dblfault_tss.tss_ldt = GSEL(GLDT_SEL, SEL_KPL);
vm86_initialize();
getmemsize(first);
init_param2(physmem);
/* now running on new page tables, configured,and u/iom is accessible */
/*
* Initialize the console before we print anything out.
*/
@ -2652,40 +2686,6 @@ init386(first)
kdb_enter(KDB_WHY_BOOTFLAGS, "Boot flags requested debugger");
#endif
finishidentcpu(); /* Final stage of CPU initialization */
setidt(IDT_UD, &IDTVEC(ill), SDT_SYS386TGT, SEL_KPL,
GSEL(GCODE_SEL, SEL_KPL));
setidt(IDT_GP, &IDTVEC(prot), SDT_SYS386TGT, SEL_KPL,
GSEL(GCODE_SEL, SEL_KPL));
initializecpu(); /* Initialize CPU registers */
initializecpucache();
/* pointer to selector slot for %fs/%gs */
PCPU_SET(fsgs_gdt, &gdt[GUFS_SEL].sd);
dblfault_tss.tss_esp = dblfault_tss.tss_esp0 = dblfault_tss.tss_esp1 =
dblfault_tss.tss_esp2 = (int)&dblfault_stack[sizeof(dblfault_stack)];
dblfault_tss.tss_ss = dblfault_tss.tss_ss0 = dblfault_tss.tss_ss1 =
dblfault_tss.tss_ss2 = GSEL(GDATA_SEL, SEL_KPL);
#if defined(PAE) || defined(PAE_TABLES)
dblfault_tss.tss_cr3 = (int)IdlePDPT;
#else
dblfault_tss.tss_cr3 = (int)IdlePTD;
#endif
dblfault_tss.tss_eip = (int)dblfault_handler;
dblfault_tss.tss_eflags = PSL_KERNEL;
dblfault_tss.tss_ds = dblfault_tss.tss_es =
dblfault_tss.tss_gs = GSEL(GDATA_SEL, SEL_KPL);
dblfault_tss.tss_fs = GSEL(GPRIV_SEL, SEL_KPL);
dblfault_tss.tss_cs = GSEL(GCODE_SEL, SEL_KPL);
dblfault_tss.tss_ldt = GSEL(GLDT_SEL, SEL_KPL);
vm86_initialize();
getmemsize(first);
init_param2(physmem);
/* now running on new page tables, configured,and u/iom is accessible */
msgbufinit(msgbufp, msgbufsize);
#ifdef DEV_NPX
npxinit(true);

View File

@ -228,6 +228,18 @@ SYSCTL_INT(_vm_pmap, OID_AUTO, pg_ps_enabled, CTLFLAG_RDTUN | CTLFLAG_NOFETCH,
#define PAT_INDEX_SIZE 8
static int pat_index[PAT_INDEX_SIZE]; /* cache mode to PAT index conversion */
/*
* pmap_mapdev support pre initialization (i.e. console)
*/
#define PMAP_PREINIT_MAPPING_COUNT 8
static struct pmap_preinit_mapping {
vm_paddr_t pa;
vm_offset_t va;
vm_size_t sz;
int mode;
} pmap_preinit_mapping[PMAP_PREINIT_MAPPING_COUNT];
static int pmap_initialized;
static struct rwlock_padalign pvh_global_lock;
/*
@ -744,6 +756,7 @@ pmap_ptelist_init(vm_offset_t *head, void *base, int npages)
void
pmap_init(void)
{
struct pmap_preinit_mapping *ppim;
vm_page_t mpte;
vm_size_t s;
int i, pv_npg;
@ -827,6 +840,17 @@ pmap_init(void)
UMA_ZONE_VM | UMA_ZONE_NOFREE);
uma_zone_set_allocf(pdptzone, pmap_pdpt_allocf);
#endif
pmap_initialized = 1;
if (!bootverbose)
return;
for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) {
ppim = pmap_preinit_mapping + i;
if (ppim->va == 0)
continue;
printf("PPIM %u: PA=%#jx, VA=%#x, size=%#x, mode=%#x\n", i,
(uintmax_t)ppim->pa, ppim->va, ppim->sz, ppim->mode);
}
}
@ -5071,8 +5095,10 @@ pmap_pde_attr(pd_entry_t *pde, int cache_bits)
void *
pmap_mapdev_attr(vm_paddr_t pa, vm_size_t size, int mode)
{
struct pmap_preinit_mapping *ppim;
vm_offset_t va, offset;
vm_size_t tmpsize;
int i;
offset = pa & PAGE_MASK;
size = round_page(offset + size);
@ -5080,11 +5106,36 @@ pmap_mapdev_attr(vm_paddr_t pa, vm_size_t size, int mode)
if (pa < KERNLOAD && pa + size <= KERNLOAD)
va = KERNBASE + pa;
else
else if (!pmap_initialized) {
va = 0;
for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) {
ppim = pmap_preinit_mapping + i;
if (ppim->va == 0) {
ppim->pa = pa;
ppim->sz = size;
ppim->mode = mode;
ppim->va = virtual_avail;
virtual_avail += size;
va = ppim->va;
break;
}
}
if (va == 0)
panic("%s: too many preinit mappings", __func__);
} else {
/*
* If we have a preinit mapping, re-use it.
*/
for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) {
ppim = pmap_preinit_mapping + i;
if (ppim->pa == pa && ppim->sz == size &&
ppim->mode == mode)
return ((void *)(ppim->va + offset));
}
va = kva_alloc(size);
if (!va)
panic("pmap_mapdev: Couldn't alloc kernel virtual memory");
if (va == 0)
panic("%s: Couldn't allocate KVA", __func__);
}
for (tmpsize = 0; tmpsize < size; tmpsize += PAGE_SIZE)
pmap_kenter_attr(va + tmpsize, pa + tmpsize, mode);
pmap_invalidate_range(kernel_pmap, va, va + tmpsize);
@ -5109,14 +5160,31 @@ pmap_mapbios(vm_paddr_t pa, vm_size_t size)
void
pmap_unmapdev(vm_offset_t va, vm_size_t size)
{
vm_offset_t base, offset;
struct pmap_preinit_mapping *ppim;
vm_offset_t offset;
int i;
if (va >= KERNBASE && va + size <= KERNBASE + KERNLOAD)
return;
base = trunc_page(va);
offset = va & PAGE_MASK;
size = round_page(offset + size);
kva_free(base, size);
va = trunc_page(va);
for (i = 0; i < PMAP_PREINIT_MAPPING_COUNT; i++) {
ppim = pmap_preinit_mapping + i;
if (ppim->va == va && ppim->sz == size) {
if (pmap_initialized)
return;
ppim->pa = 0;
ppim->va = 0;
ppim->sz = 0;
ppim->mode = 0;
if (va + size == virtual_avail)
virtual_avail = va;
return;
}
}
if (pmap_initialized)
kva_free(va, size);
}
/*

View File

@ -130,32 +130,15 @@
* Map a region of device bus space into CPU virtual address space.
*/
static __inline int bus_space_map(bus_space_tag_t t, bus_addr_t addr,
bus_size_t size, int flags,
bus_space_handle_t *bshp);
static __inline int
bus_space_map(bus_space_tag_t t __unused, bus_addr_t addr,
bus_size_t size __unused, int flags __unused,
bus_space_handle_t *bshp)
{
*bshp = addr;
return (0);
}
int bus_space_map(bus_space_tag_t tag, bus_addr_t addr, bus_size_t size,
int flags, bus_space_handle_t *bshp);
/*
* Unmap a region of device bus space.
*/
static __inline void bus_space_unmap(bus_space_tag_t t, bus_space_handle_t bsh,
bus_size_t size);
static __inline void
bus_space_unmap(bus_space_tag_t t __unused, bus_space_handle_t bsh __unused,
bus_size_t size __unused)
{
}
void bus_space_unmap(bus_space_tag_t tag, bus_space_handle_t bsh,
bus_size_t size);
/*
* Get a new handle for a subregion of an already-mapped area of bus space.

59
sys/x86/x86/bus_machdep.c Normal file
View File

@ -0,0 +1,59 @@
/*-
* Copyright (c) 2015 Marcel Moolenaar
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/systm.h>
#include <x86/bus.h>
#include <vm/vm.h>
#include <vm/pmap.h>
/*
* Implementation of bus_space_map(), which effectively is a thin
* wrapper around pmap_mapdev() for memory mapped I/O space. It's
* implemented here and not in <x86/bus.h> to avoid pollution.
*/
int
bus_space_map(bus_space_tag_t tag, bus_addr_t addr, bus_size_t size,
int flags __unused, bus_space_handle_t *bshp)
{
*bshp = (tag == X86_BUS_SPACE_MEM)
? (uintptr_t)pmap_mapdev(addr, size)
: addr;
return (0);
}
void
bus_space_unmap(bus_space_tag_t tag, bus_space_handle_t bsh, bus_size_t size)
{
if (tag == X86_BUS_SPACE_MEM)
pmap_unmapdev(bsh, size);
}