Add support for memory attributes (pmap_mapdev_attr() and friends) on

PowerPC/AIM. This is currently stubbed out on Book-E, since I have no
idea how to implement it there.
This commit is contained in:
nwhitehorn 2010-09-30 18:14:12 +00:00
parent b4413dafb1
commit d3610bff0a
7 changed files with 290 additions and 51 deletions

View File

@ -328,9 +328,12 @@ void moea_deactivate(mmu_t, struct thread *);
void moea_cpu_bootstrap(mmu_t, int);
void moea_bootstrap(mmu_t, vm_offset_t, vm_offset_t);
void *moea_mapdev(mmu_t, vm_offset_t, vm_size_t);
void *moea_mapdev_attr(mmu_t, vm_offset_t, vm_size_t, vm_memattr_t);
void moea_unmapdev(mmu_t, vm_offset_t, vm_size_t);
vm_offset_t moea_kextract(mmu_t, vm_offset_t);
void moea_kenter_attr(mmu_t, vm_offset_t, vm_offset_t, vm_memattr_t);
void moea_kenter(mmu_t, vm_offset_t, vm_offset_t);
void moea_page_set_memattr(mmu_t mmu, vm_page_t m, vm_memattr_t ma);
boolean_t moea_dev_direct_mapped(mmu_t, vm_offset_t, vm_size_t);
static void moea_sync_icache(mmu_t, pmap_t, vm_offset_t, vm_size_t);
@ -366,14 +369,17 @@ static mmu_method_t moea_methods[] = {
MMUMETHOD(mmu_zero_page_idle, moea_zero_page_idle),
MMUMETHOD(mmu_activate, moea_activate),
MMUMETHOD(mmu_deactivate, moea_deactivate),
MMUMETHOD(mmu_page_set_memattr, moea_page_set_memattr),
/* Internal interfaces */
MMUMETHOD(mmu_bootstrap, moea_bootstrap),
MMUMETHOD(mmu_cpu_bootstrap, moea_cpu_bootstrap),
MMUMETHOD(mmu_mapdev_attr, moea_mapdev_attr),
MMUMETHOD(mmu_mapdev, moea_mapdev),
MMUMETHOD(mmu_unmapdev, moea_unmapdev),
MMUMETHOD(mmu_kextract, moea_kextract),
MMUMETHOD(mmu_kenter, moea_kenter),
MMUMETHOD(mmu_kenter_attr, moea_kenter_attr),
MMUMETHOD(mmu_dev_direct_mapped,moea_dev_direct_mapped),
{ 0, 0 }
@ -381,6 +387,40 @@ static mmu_method_t moea_methods[] = {
MMU_DEF(oea_mmu, MMU_TYPE_OEA, moea_methods, 0);
static __inline uint32_t
moea_calc_wimg(vm_offset_t pa, vm_memattr_t ma)
{
uint32_t pte_lo;
int i;
if (ma != VM_MEMATTR_DEFAULT) {
switch (ma) {
case VM_MEMATTR_UNCACHEABLE:
return (PTE_I | PTE_G);
case VM_MEMATTR_WRITE_COMBINING:
case VM_MEMATTR_WRITE_BACK:
case VM_MEMATTR_PREFETCHABLE:
return (PTE_I);
case VM_MEMATTR_WRITE_THROUGH:
return (PTE_W | PTE_M);
}
}
/*
* Assume the page is cache inhibited and access is guarded unless
* it's in our available memory array.
*/
pte_lo = PTE_I | PTE_G;
for (i = 0; i < pregions_sz; i++) {
if ((pa >= pregions[i].mr_start) &&
(pa < (pregions[i].mr_start + pregions[i].mr_size))) {
pte_lo = PTE_M;
break;
}
}
return pte_lo;
}
static void
tlbie(vm_offset_t va)
@ -1086,7 +1126,7 @@ moea_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
struct pvo_head *pvo_head;
uma_zone_t zone;
vm_page_t pg;
u_int pte_lo, pvo_flags, was_exec, i;
u_int pte_lo, pvo_flags, was_exec;
int error;
if (!moea_initialized) {
@ -1128,19 +1168,7 @@ moea_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
}
}
/*
* Assume the page is cache inhibited and access is guarded unless
* it's in our available memory array.
*/
pte_lo = PTE_I | PTE_G;
for (i = 0; i < pregions_sz; i++) {
if ((VM_PAGE_TO_PHYS(m) >= pregions[i].mr_start) &&
(VM_PAGE_TO_PHYS(m) <
(pregions[i].mr_start + pregions[i].mr_size))) {
pte_lo = PTE_M;
break;
}
}
pte_lo = moea_calc_wimg(VM_PAGE_TO_PHYS(m), VM_MEMATTR_DEFAULT);
if (prot & VM_PROT_WRITE) {
pte_lo |= PTE_BW;
@ -1415,15 +1443,54 @@ moea_ts_referenced(mmu_t mmu, vm_page_t m)
return (moea_clear_bit(m, PTE_REF));
}
/*
* Modify the WIMG settings of all mappings for a page.
*/
void
moea_page_set_memattr(mmu_t mmu, vm_page_t m, vm_memattr_t ma)
{
struct pvo_entry *pvo;
struct pte *pt;
pmap_t pmap;
u_int lo;
vm_page_lock_queues();
lo = moea_calc_wimg(VM_PAGE_TO_PHYS(m), ma);
LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
pmap = pvo->pvo_pmap;
PMAP_LOCK(pmap);
mtx_lock(&moea_table_mutex);
pt = moea_pvo_to_pte(pvo, -1);
pvo->pvo_pte.pte.pte_lo &= ~PTE_WIMG;
pvo->pvo_pte.pte.pte_lo |= lo;
if (pt != NULL) {
moea_pte_change(pt, &pvo->pvo_pte.pte,
pvo->pvo_vaddr);
if (pvo->pvo_pmap == kernel_pmap)
isync();
}
mtx_unlock(&moea_table_mutex);
PMAP_UNLOCK(pmap);
}
m->md.mdpg_cache_attrs = ma;
vm_page_unlock_queues();
}
/*
* Map a wired page into kernel virtual address space.
*/
void
moea_kenter(mmu_t mmu, vm_offset_t va, vm_offset_t pa)
{
moea_kenter_attr(mmu, va, pa, VM_MEMATTR_DEFAULT);
}
void
moea_kenter_attr(mmu_t mmu, vm_offset_t va, vm_offset_t pa, vm_memattr_t ma)
{
u_int pte_lo;
int error;
int i;
#if 0
if (va < VM_MIN_KERNEL_ADDRESS)
@ -1431,14 +1498,7 @@ moea_kenter(mmu_t mmu, vm_offset_t va, vm_offset_t pa)
va);
#endif
pte_lo = PTE_I | PTE_G;
for (i = 0; i < pregions_sz; i++) {
if ((pa >= pregions[i].mr_start) &&
(pa < (pregions[i].mr_start + pregions[i].mr_size))) {
pte_lo = PTE_M;
break;
}
}
pte_lo = moea_calc_wimg(pa, ma);
PMAP_LOCK(kernel_pmap);
error = moea_pvo_enter(kernel_pmap, moea_upvo_zone,
@ -2431,6 +2491,13 @@ moea_dev_direct_mapped(mmu_t mmu, vm_offset_t pa, vm_size_t size)
*/
void *
moea_mapdev(mmu_t mmu, vm_offset_t pa, vm_size_t size)
{
return (moea_mapdev_attr(mmu, pa, size, VM_MEMATTR_DEFAULT));
}
void *
moea_mapdev_attr(mmu_t mmu, vm_offset_t pa, vm_size_t size, vm_memattr_t ma)
{
vm_offset_t va, tmpva, ppa, offset;
int i;
@ -2454,7 +2521,7 @@ moea_mapdev(mmu_t mmu, vm_offset_t pa, vm_size_t size)
panic("moea_mapdev: Couldn't alloc kernel virtual memory");
for (tmpva = va; size > 0;) {
moea_kenter(mmu, tmpva, ppa);
moea_kenter_attr(mmu, tmpva, ppa, ma);
tlbie(tmpva);
size -= PAGE_SIZE;
tmpva += PAGE_SIZE;

View File

@ -423,8 +423,11 @@ void moea64_zero_page_idle(mmu_t, vm_page_t);
void moea64_activate(mmu_t, struct thread *);
void moea64_deactivate(mmu_t, struct thread *);
void *moea64_mapdev(mmu_t, vm_offset_t, vm_size_t);
void *moea64_mapdev_attr(mmu_t, vm_offset_t, vm_size_t, vm_memattr_t);
void moea64_unmapdev(mmu_t, vm_offset_t, vm_size_t);
vm_offset_t moea64_kextract(mmu_t, vm_offset_t);
void moea64_page_set_memattr(mmu_t, vm_page_t m, vm_memattr_t ma);
void moea64_kenter_attr(mmu_t, vm_offset_t, vm_offset_t, vm_memattr_t ma);
void moea64_kenter(mmu_t, vm_offset_t, vm_offset_t);
boolean_t moea64_dev_direct_mapped(mmu_t, vm_offset_t, vm_size_t);
static void moea64_sync_icache(mmu_t, pmap_t, vm_offset_t, vm_size_t);
@ -461,14 +464,17 @@ static mmu_method_t moea64_methods[] = {
MMUMETHOD(mmu_zero_page_idle, moea64_zero_page_idle),
MMUMETHOD(mmu_activate, moea64_activate),
MMUMETHOD(mmu_deactivate, moea64_deactivate),
MMUMETHOD(mmu_page_set_memattr, moea64_page_set_memattr),
/* Internal interfaces */
MMUMETHOD(mmu_bootstrap, moea64_bootstrap),
MMUMETHOD(mmu_cpu_bootstrap, moea64_cpu_bootstrap),
MMUMETHOD(mmu_mapdev, moea64_mapdev),
MMUMETHOD(mmu_mapdev_attr, moea64_mapdev_attr),
MMUMETHOD(mmu_unmapdev, moea64_unmapdev),
MMUMETHOD(mmu_kextract, moea64_kextract),
MMUMETHOD(mmu_kenter, moea64_kenter),
MMUMETHOD(mmu_kenter_attr, moea64_kenter_attr),
MMUMETHOD(mmu_dev_direct_mapped,moea64_dev_direct_mapped),
{ 0, 0 }
@ -632,11 +638,24 @@ moea64_pte_change(struct lpte *pt, struct lpte *pvo_pt, uint64_t vpn)
}
static __inline uint64_t
moea64_calc_wimg(vm_offset_t pa)
moea64_calc_wimg(vm_offset_t pa, vm_memattr_t ma)
{
uint64_t pte_lo;
int i;
if (ma != VM_MEMATTR_DEFAULT) {
switch (ma) {
case VM_MEMATTR_UNCACHEABLE:
return (LPTE_I | LPTE_G);
case VM_MEMATTR_WRITE_COMBINING:
case VM_MEMATTR_WRITE_BACK:
case VM_MEMATTR_PREFETCHABLE:
return (LPTE_I);
case VM_MEMATTR_WRITE_THROUGH:
return (LPTE_W | LPTE_M);
}
}
/*
* Assume the page is cache inhibited and access is guarded unless
* it's in our available memory array.
@ -1376,7 +1395,7 @@ void moea64_set_scratchpage_pa(int which, vm_offset_t pa) {
moea64_scratchpage_pte[which]->pte_lo &=
~(LPTE_WIMG | LPTE_RPGN);
moea64_scratchpage_pte[which]->pte_lo |=
moea64_calc_wimg(pa) | (uint64_t)pa;
moea64_calc_wimg(pa, VM_MEMATTR_DEFAULT) | (uint64_t)pa;
EIEIO();
moea64_scratchpage_pte[which]->pte_hi |= LPTE_VALID;
@ -1524,7 +1543,7 @@ moea64_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
zone = moea64_upvo_zone;
}
pte_lo = moea64_calc_wimg(VM_PAGE_TO_PHYS(m));
pte_lo = moea64_calc_wimg(VM_PAGE_TO_PHYS(m), pmap_page_get_memattr(m));
if (prot & VM_PROT_WRITE) {
pte_lo |= LPTE_BW;
@ -1891,24 +1910,49 @@ moea64_ts_referenced(mmu_t mmu, vm_page_t m)
return (moea64_clear_bit(m, LPTE_REF));
}
/*
* Modify the WIMG settings of all mappings for a page.
*/
void
moea64_page_set_memattr(mmu_t mmu, vm_page_t m, vm_memattr_t ma)
{
struct pvo_entry *pvo;
struct lpte *pt;
pmap_t pmap;
uint64_t lo;
vm_page_lock_queues();
lo = moea64_calc_wimg(VM_PAGE_TO_PHYS(m), ma);
LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
pmap = pvo->pvo_pmap;
PMAP_LOCK(pmap);
LOCK_TABLE();
pt = moea64_pvo_to_pte(pvo);
pvo->pvo_pte.lpte.pte_lo &= ~LPTE_WIMG;
pvo->pvo_pte.lpte.pte_lo |= lo;
if (pt != NULL) {
moea64_pte_change(pt, &pvo->pvo_pte.lpte,
pvo->pvo_vpn);
if (pvo->pvo_pmap == kernel_pmap)
isync();
}
UNLOCK_TABLE();
PMAP_UNLOCK(pmap);
}
m->md.mdpg_cache_attrs = ma;
vm_page_unlock_queues();
}
/*
* Map a wired page into kernel virtual address space.
*/
void
moea64_kenter(mmu_t mmu, vm_offset_t va, vm_offset_t pa)
moea64_kenter_attr(mmu_t mmu, vm_offset_t va, vm_offset_t pa, vm_memattr_t ma)
{
uint64_t pte_lo;
int error;
#if 0
if (!pmap_bootstrapped) {
if (va >= VM_MIN_KERNEL_ADDRESS && va < virtual_end)
panic("Trying to enter an address in KVA -- %#"
PRIxPTR "!\n",pa);
}
#endif
pte_lo = moea64_calc_wimg(pa);
pte_lo = moea64_calc_wimg(pa, ma);
PMAP_LOCK(kernel_pmap);
error = moea64_pvo_enter(kernel_pmap, moea64_upvo_zone,
@ -1928,6 +1972,13 @@ moea64_kenter(mmu_t mmu, vm_offset_t va, vm_offset_t pa)
PMAP_UNLOCK(kernel_pmap);
}
void
moea64_kenter(mmu_t mmu, vm_offset_t va, vm_offset_t pa)
{
moea64_kenter_attr(mmu, va, pa, VM_MEMATTR_DEFAULT);
}
/*
* Extract the physical page address associated with the given kernel virtual
* address.
@ -2976,7 +3027,7 @@ moea64_dev_direct_mapped(mmu_t mmu, vm_offset_t pa, vm_size_t size)
* NOT real memory.
*/
void *
moea64_mapdev(mmu_t mmu, vm_offset_t pa, vm_size_t size)
moea64_mapdev_attr(mmu_t mmu, vm_offset_t pa, vm_size_t size, vm_memattr_t ma)
{
vm_offset_t va, tmpva, ppa, offset;
@ -2990,7 +3041,7 @@ moea64_mapdev(mmu_t mmu, vm_offset_t pa, vm_size_t size)
panic("moea64_mapdev: Couldn't alloc kernel virtual memory");
for (tmpva = va; size > 0;) {
moea64_kenter(mmu, tmpva, ppa);
moea64_kenter_attr(mmu, tmpva, ppa, ma);
size -= PAGE_SIZE;
tmpva += PAGE_SIZE;
ppa += PAGE_SIZE;
@ -2999,6 +3050,13 @@ moea64_mapdev(mmu_t mmu, vm_offset_t pa, vm_size_t size)
return ((void *)(va + offset));
}
void *
moea64_mapdev(mmu_t mmu, vm_offset_t pa, vm_size_t size)
{
return moea64_mapdev_attr(mmu, pa, size, VM_MEMATTR_DEFAULT);
}
void
moea64_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size)
{

View File

@ -122,13 +122,13 @@ struct pvo_entry {
LIST_HEAD(pvo_head, pvo_entry);
struct md_page {
u_int64_t mdpg_attrs;
u_int64_t mdpg_attrs;
vm_memattr_t mdpg_cache_attrs;
struct pvo_head mdpg_pvoh;
};
#define pmap_page_get_memattr(m) VM_MEMATTR_DEFAULT
#define pmap_page_get_memattr(m) ((m)->md.mdpg_cache_attrs)
#define pmap_page_is_mapped(m) (!LIST_EMPTY(&(m)->md.mdpg_pvoh))
#define pmap_page_set_memattr(m, ma) (void)0
/*
* Return the VSID corresponding to a given virtual address.
@ -187,7 +187,6 @@ struct md_page {
#define pmap_page_get_memattr(m) VM_MEMATTR_DEFAULT
#define pmap_page_is_mapped(m) (!TAILQ_EMPTY(&(m)->md.pv_list))
#define pmap_page_set_memattr(m, ma) (void)0
#endif /* AIM */
@ -209,9 +208,12 @@ extern struct pmap kernel_pmap_store;
void pmap_bootstrap(vm_offset_t, vm_offset_t);
void pmap_kenter(vm_offset_t va, vm_offset_t pa);
void pmap_kenter_attr(vm_offset_t va, vm_offset_t pa, vm_memattr_t);
void pmap_kremove(vm_offset_t);
void *pmap_mapdev(vm_offset_t, vm_size_t);
void *pmap_mapdev_attr(vm_offset_t, vm_size_t, vm_memattr_t);
void pmap_unmapdev(vm_offset_t, vm_size_t);
void pmap_page_set_memattr(vm_page_t, vm_memattr_t);
void pmap_deactivate(struct thread *);
vm_offset_t pmap_kextract(vm_offset_t);
int pmap_dev_direct_mapped(vm_offset_t, vm_size_t);

View File

@ -32,11 +32,13 @@
#include <machine/pte.h>
/* Memory attributes. */
#define VM_MEMATTR_CACHING_INHIBIT ((vm_memattr_t)PTE_I)
#define VM_MEMATTR_GUARD ((vm_memattr_t)PTE_G)
#define VM_MEMATTR_MEMORY_COHERENCE ((vm_memattr_t)PTE_M)
#define VM_MEMATTR_WRITE_THROUGH ((vm_memattr_t)PTE_W)
#define VM_MEMATTR_DEFAULT 0
#define VM_MEMATTR_UNCACHEABLE 0x01
#define VM_MEMATTR_UNCACHED VM_MEMATTR_UNCACHEABLE
#define VM_MEMATTR_CACHEABLE 0x02
#define VM_MEMATTR_WRITE_COMBINING 0x04
#define VM_MEMATTR_WRITE_BACK 0x08
#define VM_MEMATTR_WRITE_THROUGH 0x10
#define VM_MEMATTR_PREFETCHABLE 0x20
#endif /* !_MACHINE_VM_H_ */

View File

@ -60,6 +60,7 @@ __FBSDID("$FreeBSD$");
static struct {
bus_addr_t addr;
bus_size_t size;
int flags;
} earlyboot_mappings[MAX_EARLYBOOT_MAPPINGS];
static int earlyboot_map_idx = 0;
@ -72,9 +73,11 @@ __ppc_ba(bus_space_handle_t bsh, bus_size_t ofs)
}
static int
bs_gen_map(bus_addr_t addr, bus_size_t size __unused, int flags __unused,
bs_gen_map(bus_addr_t addr, bus_size_t size, int flags,
bus_space_handle_t *bshp)
{
vm_memattr_t ma;
/*
* Record what we did if we haven't enabled the MMU yet. We
* will need to remap it as soon as the MMU comes up.
@ -84,10 +87,20 @@ bs_gen_map(bus_addr_t addr, bus_size_t size __unused, int flags __unused,
("%s: too many early boot mapping requests", __func__));
earlyboot_mappings[earlyboot_map_idx].addr = addr;
earlyboot_mappings[earlyboot_map_idx].size = size;
earlyboot_mappings[earlyboot_map_idx].flags = flags;
earlyboot_map_idx++;
*bshp = addr;
} else {
*bshp = (bus_space_handle_t)pmap_mapdev(addr,size);
ma = VM_MEMATTR_DEFAULT;
switch (flags) {
case BUS_SPACE_MAP_CACHEABLE:
ma = VM_MEMATTR_CACHEABLE;
break;
case BUS_SPACE_MAP_PREFETCHABLE:
ma = VM_MEMATTR_PREFETCHABLE;
break;
}
*bshp = (bus_space_handle_t)pmap_mapdev_attr(addr, size, ma);
}
return (0);
@ -98,6 +111,7 @@ bs_remap_earlyboot(void)
{
int i;
vm_offset_t pa, spa;
vm_memattr_t ma;
for (i = 0; i < earlyboot_map_idx; i++) {
spa = earlyboot_mappings[i].addr;
@ -105,9 +119,19 @@ bs_remap_earlyboot(void)
== 0)
continue;
ma = VM_MEMATTR_DEFAULT;
switch (earlyboot_mappings[i].flags) {
case BUS_SPACE_MAP_CACHEABLE:
ma = VM_MEMATTR_CACHEABLE;
break;
case BUS_SPACE_MAP_PREFETCHABLE:
ma = VM_MEMATTR_PREFETCHABLE;
break;
}
pa = trunc_page(spa);
while (pa < spa + earlyboot_mappings[i].size) {
pmap_kenter(pa,pa);
pmap_kenter_attr(pa, pa, ma);
pa += PAGE_SIZE;
}
}

View File

@ -111,6 +111,24 @@ CODE {
{
return (NULL);
}
static void *mmu_null_mapdev_attr(mmu_t mmu, vm_offset_t pa,
vm_size_t size, vm_memattr_t ma)
{
return MMU_MAPDEV(mmu, pa, size);
}
static void mmu_null_kenter_attr(mmu_t mmu, vm_offset_t va,
vm_offset_t pa, vm_memattr_t ma)
{
MMU_KENTER(mmu, va, pa);
}
static void mmu_null_page_set_memattr(mmu_t mmu, vm_page_t m,
vm_memattr_t ma)
{
return;
}
};
@ -747,6 +765,37 @@ METHOD void * mapdev {
vm_size_t _size;
};
/**
* @brief Create a kernel mapping for a given physical address range.
* Called by bus code on behalf of device drivers. The mapping does not
* have to be a virtual address: it can be a direct-mapped physical address
* if that is supported by the MMU.
*
* @param _pa start physical address
* @param _size size in bytes of mapping
* @param _attr cache attributes
*
* @retval addr address of mapping.
*/
METHOD void * mapdev_attr {
mmu_t _mmu;
vm_offset_t _pa;
vm_size_t _size;
vm_memattr_t _attr;
} DEFAULT mmu_null_mapdev_attr;
/**
* @brief Change cache control attributes for a page. Should modify all
* mappings for that page.
*
* @param _m page to modify
* @param _ma new cache control attributes
*/
METHOD void page_set_memattr {
mmu_t _mmu;
vm_page_t _pg;
vm_memattr_t _ma;
} DEFAULT mmu_null_page_set_memattr;
/**
* @brief Remove the mapping created by mapdev. Called when a driver
@ -787,6 +836,19 @@ METHOD void kenter {
vm_offset_t _pa;
};
/**
* @brief Map a wired page into kernel virtual address space
*
* @param _va mapping virtual address
* @param _pa mapping physical address
* @param _ma mapping cache control attributes
*/
METHOD void kenter_attr {
mmu_t _mmu;
vm_offset_t _va;
vm_offset_t _pa;
vm_memattr_t _ma;
} DEFAULT mmu_null_kenter_attr;
/**
* @brief Determine if the given physical address range has been direct-mapped.

View File

@ -433,6 +433,22 @@ pmap_mapdev(vm_offset_t pa, vm_size_t size)
return (MMU_MAPDEV(mmu_obj, pa, size));
}
void *
pmap_mapdev_attr(vm_offset_t pa, vm_size_t size, vm_memattr_t attr)
{
CTR3(KTR_PMAP, "%s(%#x, %#x)", __func__, pa, size);
return (MMU_MAPDEV_ATTR(mmu_obj, pa, size, attr));
}
void
pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma)
{
CTR3(KTR_PMAP, "%s(%#x, %#x)", __func__, pa, size);
return (MMU_PAGE_SET_MEMATTR(mmu_obj, m, ma));
}
void
pmap_unmapdev(vm_offset_t va, vm_size_t size)
{
@ -457,6 +473,14 @@ pmap_kenter(vm_offset_t va, vm_offset_t pa)
MMU_KENTER(mmu_obj, va, pa);
}
void
pmap_kenter_attr(vm_offset_t va, vm_offset_t pa, vm_memattr_t ma)
{
CTR3(KTR_PMAP, "%s(%#x, %#x)", __func__, va, pa);
MMU_KENTER_ATTR(mmu_obj, va, pa, ma);
}
boolean_t
pmap_dev_direct_mapped(vm_offset_t pa, vm_size_t size)
{