Introduce pmap_change_prot() for amd64.
This updates the protection attributes of subranges of the kernel map. Unlike pmap_protect(), which is typically used for user mappings, pmap_change_prot() does not perform lazy upgrades of protections. pmap_change_prot() also updates the aliasing range of the direct map. Reviewed by: kib MFC after: 1 month Sponsored by: Netflix Differential Revision: https://reviews.freebsd.org/D21758
This commit is contained in:
parent
b0130de08d
commit
341d641470
@ -1138,10 +1138,11 @@ static caddr_t crashdumpmap;
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* Internal flags for pmap_mapdev_internal() and
|
* Internal flags for pmap_mapdev_internal() and
|
||||||
* pmap_change_attr_locked().
|
* pmap_change_props_locked().
|
||||||
*/
|
*/
|
||||||
#define MAPDEV_FLUSHCACHE 0x0000001 /* Flush cache after mapping. */
|
#define MAPDEV_FLUSHCACHE 0x00000001 /* Flush cache after mapping. */
|
||||||
#define MAPDEV_SETATTR 0x0000002 /* Modify existing attrs. */
|
#define MAPDEV_SETATTR 0x00000002 /* Modify existing attrs. */
|
||||||
|
#define MAPDEV_ASSERTVALID 0x00000004 /* Assert mapping validity. */
|
||||||
|
|
||||||
TAILQ_HEAD(pv_chunklist, pv_chunk);
|
TAILQ_HEAD(pv_chunklist, pv_chunk);
|
||||||
|
|
||||||
@ -1165,8 +1166,8 @@ static void pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va);
|
|||||||
static pv_entry_t pmap_pvh_remove(struct md_page *pvh, pmap_t pmap,
|
static pv_entry_t pmap_pvh_remove(struct md_page *pvh, pmap_t pmap,
|
||||||
vm_offset_t va);
|
vm_offset_t va);
|
||||||
|
|
||||||
static int pmap_change_attr_locked(vm_offset_t va, vm_size_t size, int mode,
|
static int pmap_change_props_locked(vm_offset_t va, vm_size_t size,
|
||||||
int flags);
|
vm_prot_t prot, int mode, int flags);
|
||||||
static boolean_t pmap_demote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va);
|
static boolean_t pmap_demote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va);
|
||||||
static boolean_t pmap_demote_pde_locked(pmap_t pmap, pd_entry_t *pde,
|
static boolean_t pmap_demote_pde_locked(pmap_t pmap, pd_entry_t *pde,
|
||||||
vm_offset_t va, struct rwlock **lockp);
|
vm_offset_t va, struct rwlock **lockp);
|
||||||
@ -1189,14 +1190,13 @@ static void pmap_invalidate_pde_page(pmap_t pmap, vm_offset_t va,
|
|||||||
static void pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int mode);
|
static void pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int mode);
|
||||||
static vm_page_t pmap_large_map_getptp_unlocked(void);
|
static vm_page_t pmap_large_map_getptp_unlocked(void);
|
||||||
static vm_paddr_t pmap_large_map_kextract(vm_offset_t va);
|
static vm_paddr_t pmap_large_map_kextract(vm_offset_t va);
|
||||||
static void pmap_pde_attr(pd_entry_t *pde, int cache_bits, int mask);
|
|
||||||
#if VM_NRESERVLEVEL > 0
|
#if VM_NRESERVLEVEL > 0
|
||||||
static void pmap_promote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va,
|
static void pmap_promote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va,
|
||||||
struct rwlock **lockp);
|
struct rwlock **lockp);
|
||||||
#endif
|
#endif
|
||||||
static boolean_t pmap_protect_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t sva,
|
static boolean_t pmap_protect_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t sva,
|
||||||
vm_prot_t prot);
|
vm_prot_t prot);
|
||||||
static void pmap_pte_attr(pt_entry_t *pte, int cache_bits, int mask);
|
static void pmap_pte_props(pt_entry_t *pte, u_long bits, u_long mask);
|
||||||
static void pmap_pti_add_kva_locked(vm_offset_t sva, vm_offset_t eva,
|
static void pmap_pti_add_kva_locked(vm_offset_t sva, vm_offset_t eva,
|
||||||
bool exec);
|
bool exec);
|
||||||
static pdp_entry_t *pmap_pti_pdpe(vm_offset_t va);
|
static pdp_entry_t *pmap_pti_pdpe(vm_offset_t va);
|
||||||
@ -7900,38 +7900,18 @@ restart:
|
|||||||
* Miscellaneous support routines follow
|
* Miscellaneous support routines follow
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/* Adjust the cache mode for a 4KB page mapped via a PTE. */
|
/* Adjust the properties for a leaf page table entry. */
|
||||||
static __inline void
|
static __inline void
|
||||||
pmap_pte_attr(pt_entry_t *pte, int cache_bits, int mask)
|
pmap_pte_props(pt_entry_t *pte, u_long bits, u_long mask)
|
||||||
{
|
{
|
||||||
u_int opte, npte;
|
u_long opte, npte;
|
||||||
|
|
||||||
/*
|
opte = *(u_long *)pte;
|
||||||
* The cache mode bits are all in the low 32-bits of the
|
|
||||||
* PTE, so we can just spin on updating the low 32-bits.
|
|
||||||
*/
|
|
||||||
do {
|
do {
|
||||||
opte = *(u_int *)pte;
|
|
||||||
npte = opte & ~mask;
|
npte = opte & ~mask;
|
||||||
npte |= cache_bits;
|
npte |= bits;
|
||||||
} while (npte != opte && !atomic_cmpset_int((u_int *)pte, opte, npte));
|
} while (npte != opte && !atomic_fcmpset_long((u_long *)pte, &opte,
|
||||||
}
|
npte));
|
||||||
|
|
||||||
/* Adjust the cache mode for a 2MB page mapped via a PDE. */
|
|
||||||
static __inline void
|
|
||||||
pmap_pde_attr(pd_entry_t *pde, int cache_bits, int mask)
|
|
||||||
{
|
|
||||||
u_int opde, npde;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* The cache mode bits are all in the low 32-bits of the
|
|
||||||
* PDE, so we can just spin on updating the low 32-bits.
|
|
||||||
*/
|
|
||||||
do {
|
|
||||||
opde = *(u_int *)pde;
|
|
||||||
npde = opde & ~mask;
|
|
||||||
npde |= cache_bits;
|
|
||||||
} while (npde != opde && !atomic_cmpset_int((u_int *)pde, opde, npde));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -7987,7 +7967,8 @@ pmap_mapdev_internal(vm_paddr_t pa, vm_size_t size, int mode, int flags)
|
|||||||
va = PHYS_TO_DMAP(pa);
|
va = PHYS_TO_DMAP(pa);
|
||||||
if ((flags & MAPDEV_SETATTR) != 0) {
|
if ((flags & MAPDEV_SETATTR) != 0) {
|
||||||
PMAP_LOCK(kernel_pmap);
|
PMAP_LOCK(kernel_pmap);
|
||||||
i = pmap_change_attr_locked(va, size, mode, flags);
|
i = pmap_change_props_locked(va, size,
|
||||||
|
PROT_NONE, mode, flags);
|
||||||
PMAP_UNLOCK(kernel_pmap);
|
PMAP_UNLOCK(kernel_pmap);
|
||||||
} else
|
} else
|
||||||
i = 0;
|
i = 0;
|
||||||
@ -8173,21 +8154,46 @@ pmap_change_attr(vm_offset_t va, vm_size_t size, int mode)
|
|||||||
int error;
|
int error;
|
||||||
|
|
||||||
PMAP_LOCK(kernel_pmap);
|
PMAP_LOCK(kernel_pmap);
|
||||||
error = pmap_change_attr_locked(va, size, mode, MAPDEV_FLUSHCACHE);
|
error = pmap_change_props_locked(va, size, PROT_NONE, mode,
|
||||||
|
MAPDEV_FLUSHCACHE);
|
||||||
|
PMAP_UNLOCK(kernel_pmap);
|
||||||
|
return (error);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Changes the specified virtual address range's protections to those
|
||||||
|
* specified by "prot". Like pmap_change_attr(), protections for aliases
|
||||||
|
* in the direct map are updated as well. Protections on aliasing mappings may
|
||||||
|
* be a subset of the requested protections; for example, mappings in the direct
|
||||||
|
* map are never executable.
|
||||||
|
*/
|
||||||
|
int
|
||||||
|
pmap_change_prot(vm_offset_t va, vm_size_t size, vm_prot_t prot)
|
||||||
|
{
|
||||||
|
int error;
|
||||||
|
|
||||||
|
/* Only supported within the kernel map. */
|
||||||
|
if (va < VM_MIN_KERNEL_ADDRESS)
|
||||||
|
return (EINVAL);
|
||||||
|
|
||||||
|
PMAP_LOCK(kernel_pmap);
|
||||||
|
error = pmap_change_props_locked(va, size, prot, -1,
|
||||||
|
MAPDEV_ASSERTVALID);
|
||||||
PMAP_UNLOCK(kernel_pmap);
|
PMAP_UNLOCK(kernel_pmap);
|
||||||
return (error);
|
return (error);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
pmap_change_attr_locked(vm_offset_t va, vm_size_t size, int mode, int flags)
|
pmap_change_props_locked(vm_offset_t va, vm_size_t size, vm_prot_t prot,
|
||||||
|
int mode, int flags)
|
||||||
{
|
{
|
||||||
vm_offset_t base, offset, tmpva;
|
vm_offset_t base, offset, tmpva;
|
||||||
vm_paddr_t pa_start, pa_end, pa_end1;
|
vm_paddr_t pa_start, pa_end, pa_end1;
|
||||||
pdp_entry_t *pdpe;
|
pdp_entry_t *pdpe;
|
||||||
pd_entry_t *pde;
|
pd_entry_t *pde, pde_bits, pde_mask;
|
||||||
pt_entry_t *pte;
|
pt_entry_t *pte, pte_bits, pte_mask;
|
||||||
int cache_bits_pte, cache_bits_pde, error;
|
int error;
|
||||||
boolean_t changed;
|
bool changed;
|
||||||
|
|
||||||
PMAP_LOCK_ASSERT(kernel_pmap, MA_OWNED);
|
PMAP_LOCK_ASSERT(kernel_pmap, MA_OWNED);
|
||||||
base = trunc_page(va);
|
base = trunc_page(va);
|
||||||
@ -8201,9 +8207,33 @@ pmap_change_attr_locked(vm_offset_t va, vm_size_t size, int mode, int flags)
|
|||||||
if (base < DMAP_MIN_ADDRESS)
|
if (base < DMAP_MIN_ADDRESS)
|
||||||
return (EINVAL);
|
return (EINVAL);
|
||||||
|
|
||||||
cache_bits_pde = pmap_cache_bits(kernel_pmap, mode, 1);
|
/*
|
||||||
cache_bits_pte = pmap_cache_bits(kernel_pmap, mode, 0);
|
* Construct our flag sets and masks. "bits" is the subset of
|
||||||
changed = FALSE;
|
* "mask" that will be set in each modified PTE.
|
||||||
|
*
|
||||||
|
* Mappings in the direct map are never allowed to be executable.
|
||||||
|
*/
|
||||||
|
pde_bits = pte_bits = 0;
|
||||||
|
pde_mask = pte_mask = 0;
|
||||||
|
if (mode != -1) {
|
||||||
|
pde_bits |= pmap_cache_bits(kernel_pmap, mode, true);
|
||||||
|
pde_mask |= X86_PG_PDE_CACHE;
|
||||||
|
pte_bits |= pmap_cache_bits(kernel_pmap, mode, false);
|
||||||
|
pte_mask |= X86_PG_PTE_CACHE;
|
||||||
|
}
|
||||||
|
if (prot != VM_PROT_NONE) {
|
||||||
|
if ((prot & VM_PROT_WRITE) != 0) {
|
||||||
|
pde_bits |= X86_PG_RW;
|
||||||
|
pte_bits |= X86_PG_RW;
|
||||||
|
}
|
||||||
|
if ((prot & VM_PROT_EXECUTE) == 0 ||
|
||||||
|
va < VM_MIN_KERNEL_ADDRESS) {
|
||||||
|
pde_bits |= pg_nx;
|
||||||
|
pte_bits |= pg_nx;
|
||||||
|
}
|
||||||
|
pde_mask |= X86_PG_RW | pg_nx;
|
||||||
|
pte_mask |= X86_PG_RW | pg_nx;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Pages that aren't mapped aren't supported. Also break down 2MB pages
|
* Pages that aren't mapped aren't supported. Also break down 2MB pages
|
||||||
@ -8211,15 +8241,18 @@ pmap_change_attr_locked(vm_offset_t va, vm_size_t size, int mode, int flags)
|
|||||||
*/
|
*/
|
||||||
for (tmpva = base; tmpva < base + size; ) {
|
for (tmpva = base; tmpva < base + size; ) {
|
||||||
pdpe = pmap_pdpe(kernel_pmap, tmpva);
|
pdpe = pmap_pdpe(kernel_pmap, tmpva);
|
||||||
if (pdpe == NULL || *pdpe == 0)
|
if (pdpe == NULL || *pdpe == 0) {
|
||||||
|
KASSERT((flags & MAPDEV_ASSERTVALID) == 0,
|
||||||
|
("%s: addr %#lx is not mapped", __func__, tmpva));
|
||||||
return (EINVAL);
|
return (EINVAL);
|
||||||
|
}
|
||||||
if (*pdpe & PG_PS) {
|
if (*pdpe & PG_PS) {
|
||||||
/*
|
/*
|
||||||
* If the current 1GB page already has the required
|
* If the current 1GB page already has the required
|
||||||
* memory type, then we need not demote this page. Just
|
* properties, then we need not demote this page. Just
|
||||||
* increment tmpva to the next 1GB page frame.
|
* increment tmpva to the next 1GB page frame.
|
||||||
*/
|
*/
|
||||||
if ((*pdpe & X86_PG_PDE_CACHE) == cache_bits_pde) {
|
if ((*pdpe & pde_mask) == pde_bits) {
|
||||||
tmpva = trunc_1gpage(tmpva) + NBPDP;
|
tmpva = trunc_1gpage(tmpva) + NBPDP;
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
@ -8238,15 +8271,18 @@ pmap_change_attr_locked(vm_offset_t va, vm_size_t size, int mode, int flags)
|
|||||||
return (ENOMEM);
|
return (ENOMEM);
|
||||||
}
|
}
|
||||||
pde = pmap_pdpe_to_pde(pdpe, tmpva);
|
pde = pmap_pdpe_to_pde(pdpe, tmpva);
|
||||||
if (*pde == 0)
|
if (*pde == 0) {
|
||||||
|
KASSERT((flags & MAPDEV_ASSERTVALID) == 0,
|
||||||
|
("%s: addr %#lx is not mapped", __func__, tmpva));
|
||||||
return (EINVAL);
|
return (EINVAL);
|
||||||
|
}
|
||||||
if (*pde & PG_PS) {
|
if (*pde & PG_PS) {
|
||||||
/*
|
/*
|
||||||
* If the current 2MB page already has the required
|
* If the current 2MB page already has the required
|
||||||
* memory type, then we need not demote this page. Just
|
* properties, then we need not demote this page. Just
|
||||||
* increment tmpva to the next 2MB page frame.
|
* increment tmpva to the next 2MB page frame.
|
||||||
*/
|
*/
|
||||||
if ((*pde & X86_PG_PDE_CACHE) == cache_bits_pde) {
|
if ((*pde & pde_mask) == pde_bits) {
|
||||||
tmpva = trunc_2mpage(tmpva) + NBPDR;
|
tmpva = trunc_2mpage(tmpva) + NBPDR;
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
@ -8265,24 +8301,27 @@ pmap_change_attr_locked(vm_offset_t va, vm_size_t size, int mode, int flags)
|
|||||||
return (ENOMEM);
|
return (ENOMEM);
|
||||||
}
|
}
|
||||||
pte = pmap_pde_to_pte(pde, tmpva);
|
pte = pmap_pde_to_pte(pde, tmpva);
|
||||||
if (*pte == 0)
|
if (*pte == 0) {
|
||||||
|
KASSERT((flags & MAPDEV_ASSERTVALID) == 0,
|
||||||
|
("%s: addr %#lx is not mapped", __func__, tmpva));
|
||||||
return (EINVAL);
|
return (EINVAL);
|
||||||
|
}
|
||||||
tmpva += PAGE_SIZE;
|
tmpva += PAGE_SIZE;
|
||||||
}
|
}
|
||||||
error = 0;
|
error = 0;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Ok, all the pages exist, so run through them updating their
|
* Ok, all the pages exist, so run through them updating their
|
||||||
* cache mode if required.
|
* properties if required.
|
||||||
*/
|
*/
|
||||||
|
changed = false;
|
||||||
pa_start = pa_end = 0;
|
pa_start = pa_end = 0;
|
||||||
for (tmpva = base; tmpva < base + size; ) {
|
for (tmpva = base; tmpva < base + size; ) {
|
||||||
pdpe = pmap_pdpe(kernel_pmap, tmpva);
|
pdpe = pmap_pdpe(kernel_pmap, tmpva);
|
||||||
if (*pdpe & PG_PS) {
|
if (*pdpe & PG_PS) {
|
||||||
if ((*pdpe & X86_PG_PDE_CACHE) != cache_bits_pde) {
|
if ((*pdpe & pde_mask) != pde_bits) {
|
||||||
pmap_pde_attr(pdpe, cache_bits_pde,
|
pmap_pte_props(pdpe, pde_bits, pde_mask);
|
||||||
X86_PG_PDE_CACHE);
|
changed = true;
|
||||||
changed = TRUE;
|
|
||||||
}
|
}
|
||||||
if (tmpva >= VM_MIN_KERNEL_ADDRESS &&
|
if (tmpva >= VM_MIN_KERNEL_ADDRESS &&
|
||||||
(*pdpe & PG_PS_FRAME) < dmaplimit) {
|
(*pdpe & PG_PS_FRAME) < dmaplimit) {
|
||||||
@ -8294,9 +8333,10 @@ pmap_change_attr_locked(vm_offset_t va, vm_size_t size, int mode, int flags)
|
|||||||
pa_end += NBPDP;
|
pa_end += NBPDP;
|
||||||
else {
|
else {
|
||||||
/* Run ended, update direct map. */
|
/* Run ended, update direct map. */
|
||||||
error = pmap_change_attr_locked(
|
error = pmap_change_props_locked(
|
||||||
PHYS_TO_DMAP(pa_start),
|
PHYS_TO_DMAP(pa_start),
|
||||||
pa_end - pa_start, mode, flags);
|
pa_end - pa_start, prot, mode,
|
||||||
|
flags);
|
||||||
if (error != 0)
|
if (error != 0)
|
||||||
break;
|
break;
|
||||||
/* Start physical address run. */
|
/* Start physical address run. */
|
||||||
@ -8309,10 +8349,9 @@ pmap_change_attr_locked(vm_offset_t va, vm_size_t size, int mode, int flags)
|
|||||||
}
|
}
|
||||||
pde = pmap_pdpe_to_pde(pdpe, tmpva);
|
pde = pmap_pdpe_to_pde(pdpe, tmpva);
|
||||||
if (*pde & PG_PS) {
|
if (*pde & PG_PS) {
|
||||||
if ((*pde & X86_PG_PDE_CACHE) != cache_bits_pde) {
|
if ((*pde & pde_mask) != pde_bits) {
|
||||||
pmap_pde_attr(pde, cache_bits_pde,
|
pmap_pte_props(pde, pde_bits, pde_mask);
|
||||||
X86_PG_PDE_CACHE);
|
changed = true;
|
||||||
changed = TRUE;
|
|
||||||
}
|
}
|
||||||
if (tmpva >= VM_MIN_KERNEL_ADDRESS &&
|
if (tmpva >= VM_MIN_KERNEL_ADDRESS &&
|
||||||
(*pde & PG_PS_FRAME) < dmaplimit) {
|
(*pde & PG_PS_FRAME) < dmaplimit) {
|
||||||
@ -8324,9 +8363,10 @@ pmap_change_attr_locked(vm_offset_t va, vm_size_t size, int mode, int flags)
|
|||||||
pa_end += NBPDR;
|
pa_end += NBPDR;
|
||||||
else {
|
else {
|
||||||
/* Run ended, update direct map. */
|
/* Run ended, update direct map. */
|
||||||
error = pmap_change_attr_locked(
|
error = pmap_change_props_locked(
|
||||||
PHYS_TO_DMAP(pa_start),
|
PHYS_TO_DMAP(pa_start),
|
||||||
pa_end - pa_start, mode, flags);
|
pa_end - pa_start, prot, mode,
|
||||||
|
flags);
|
||||||
if (error != 0)
|
if (error != 0)
|
||||||
break;
|
break;
|
||||||
/* Start physical address run. */
|
/* Start physical address run. */
|
||||||
@ -8337,10 +8377,9 @@ pmap_change_attr_locked(vm_offset_t va, vm_size_t size, int mode, int flags)
|
|||||||
tmpva = trunc_2mpage(tmpva) + NBPDR;
|
tmpva = trunc_2mpage(tmpva) + NBPDR;
|
||||||
} else {
|
} else {
|
||||||
pte = pmap_pde_to_pte(pde, tmpva);
|
pte = pmap_pde_to_pte(pde, tmpva);
|
||||||
if ((*pte & X86_PG_PTE_CACHE) != cache_bits_pte) {
|
if ((*pte & pte_mask) != pte_bits) {
|
||||||
pmap_pte_attr(pte, cache_bits_pte,
|
pmap_pte_props(pte, pte_bits, pte_mask);
|
||||||
X86_PG_PTE_CACHE);
|
changed = true;
|
||||||
changed = TRUE;
|
|
||||||
}
|
}
|
||||||
if (tmpva >= VM_MIN_KERNEL_ADDRESS &&
|
if (tmpva >= VM_MIN_KERNEL_ADDRESS &&
|
||||||
(*pte & PG_FRAME) < dmaplimit) {
|
(*pte & PG_FRAME) < dmaplimit) {
|
||||||
@ -8352,9 +8391,10 @@ pmap_change_attr_locked(vm_offset_t va, vm_size_t size, int mode, int flags)
|
|||||||
pa_end += PAGE_SIZE;
|
pa_end += PAGE_SIZE;
|
||||||
else {
|
else {
|
||||||
/* Run ended, update direct map. */
|
/* Run ended, update direct map. */
|
||||||
error = pmap_change_attr_locked(
|
error = pmap_change_props_locked(
|
||||||
PHYS_TO_DMAP(pa_start),
|
PHYS_TO_DMAP(pa_start),
|
||||||
pa_end - pa_start, mode, flags);
|
pa_end - pa_start, prot, mode,
|
||||||
|
flags);
|
||||||
if (error != 0)
|
if (error != 0)
|
||||||
break;
|
break;
|
||||||
/* Start physical address run. */
|
/* Start physical address run. */
|
||||||
@ -8368,8 +8408,8 @@ pmap_change_attr_locked(vm_offset_t va, vm_size_t size, int mode, int flags)
|
|||||||
if (error == 0 && pa_start != pa_end && pa_start < dmaplimit) {
|
if (error == 0 && pa_start != pa_end && pa_start < dmaplimit) {
|
||||||
pa_end1 = MIN(pa_end, dmaplimit);
|
pa_end1 = MIN(pa_end, dmaplimit);
|
||||||
if (pa_start != pa_end1)
|
if (pa_start != pa_end1)
|
||||||
error = pmap_change_attr_locked(PHYS_TO_DMAP(pa_start),
|
error = pmap_change_props_locked(PHYS_TO_DMAP(pa_start),
|
||||||
pa_end1 - pa_start, mode, flags);
|
pa_end1 - pa_start, prot, mode, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -425,6 +425,7 @@ void pmap_activate_sw(struct thread *);
|
|||||||
void pmap_bootstrap(vm_paddr_t *);
|
void pmap_bootstrap(vm_paddr_t *);
|
||||||
int pmap_cache_bits(pmap_t pmap, int mode, boolean_t is_pde);
|
int pmap_cache_bits(pmap_t pmap, int mode, boolean_t is_pde);
|
||||||
int pmap_change_attr(vm_offset_t, vm_size_t, int);
|
int pmap_change_attr(vm_offset_t, vm_size_t, int);
|
||||||
|
int pmap_change_prot(vm_offset_t, vm_size_t, vm_prot_t);
|
||||||
void pmap_demote_DMAP(vm_paddr_t base, vm_size_t len, boolean_t invalidate);
|
void pmap_demote_DMAP(vm_paddr_t base, vm_size_t len, boolean_t invalidate);
|
||||||
void pmap_flush_cache_range(vm_offset_t, vm_offset_t);
|
void pmap_flush_cache_range(vm_offset_t, vm_offset_t);
|
||||||
void pmap_flush_cache_phys_range(vm_paddr_t, vm_paddr_t, vm_memattr_t);
|
void pmap_flush_cache_phys_range(vm_paddr_t, vm_paddr_t, vm_memattr_t);
|
||||||
|
Loading…
x
Reference in New Issue
Block a user