Change pmap_enter(9) interface to take flags parameter and superpage
mapping size (currently unused). The flags includes the fault access bits, wired flag as PMAP_ENTER_WIRED, and a new flag PMAP_ENTER_NOSLEEP to indicate that pmap should not sleep. For powerpc aim both 32 and 64 bit, fix implementation to ensure that the requested mapping is created when PMAP_ENTER_NOSLEEP is not specified, in particular, wait for the available memory required to proceed. In collaboration with: alc Tested by: nwhitehorn (ppc aim32 and booke) Sponsored by: The FreeBSD Foundation and EMC / Isilon Storage Division MFC after: 2 weeks
This commit is contained in:
parent
e1ba604a96
commit
39ffa8c138
@ -4116,9 +4116,9 @@ pmap_promote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va,
|
||||
* or lose information. That is, this routine must actually
|
||||
* insert this page into the given map NOW.
|
||||
*/
|
||||
void
|
||||
pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
|
||||
vm_prot_t prot, boolean_t wired)
|
||||
int
|
||||
pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
|
||||
u_int flags, int8_t psind __unused)
|
||||
{
|
||||
struct rwlock *lock;
|
||||
pd_entry_t *pde;
|
||||
@ -4127,6 +4127,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
|
||||
pv_entry_t pv;
|
||||
vm_paddr_t opa, pa;
|
||||
vm_page_t mpte, om;
|
||||
boolean_t nosleep;
|
||||
|
||||
PG_A = pmap_accessed_bit(pmap);
|
||||
PG_G = pmap_global_bit(pmap);
|
||||
@ -4143,10 +4144,10 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
|
||||
va >= kmi.clean_eva,
|
||||
("pmap_enter: managed mapping within the clean submap"));
|
||||
if ((m->oflags & VPO_UNMANAGED) == 0 && !vm_page_xbusied(m))
|
||||
VM_OBJECT_ASSERT_WLOCKED(m->object);
|
||||
VM_OBJECT_ASSERT_LOCKED(m->object);
|
||||
pa = VM_PAGE_TO_PHYS(m);
|
||||
newpte = (pt_entry_t)(pa | PG_A | PG_V);
|
||||
if ((access & VM_PROT_WRITE) != 0)
|
||||
if ((flags & VM_PROT_WRITE) != 0)
|
||||
newpte |= PG_M;
|
||||
if ((prot & VM_PROT_WRITE) != 0)
|
||||
newpte |= PG_RW;
|
||||
@ -4154,7 +4155,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
|
||||
("pmap_enter: access includes VM_PROT_WRITE but prot doesn't"));
|
||||
if ((prot & VM_PROT_EXECUTE) == 0)
|
||||
newpte |= pg_nx;
|
||||
if (wired)
|
||||
if ((flags & PMAP_ENTER_WIRED) != 0)
|
||||
newpte |= PG_W;
|
||||
if (va < VM_MAXUSER_ADDRESS)
|
||||
newpte |= PG_U;
|
||||
@ -4196,7 +4197,15 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
|
||||
* Here if the pte page isn't mapped, or if it has been
|
||||
* deallocated.
|
||||
*/
|
||||
mpte = _pmap_allocpte(pmap, pmap_pde_pindex(va), &lock);
|
||||
nosleep = (flags & PMAP_ENTER_NOSLEEP) != 0;
|
||||
mpte = _pmap_allocpte(pmap, pmap_pde_pindex(va),
|
||||
nosleep ? NULL : &lock);
|
||||
if (mpte == NULL && nosleep) {
|
||||
KASSERT(lock == NULL, ("lock leaked for nosleep"));
|
||||
PMAP_UNLOCK(pmap);
|
||||
rw_runlock(&pvh_global_lock);
|
||||
return (KERN_RESOURCE_SHORTAGE);
|
||||
}
|
||||
goto retry;
|
||||
} else
|
||||
panic("pmap_enter: invalid page directory va=%#lx", va);
|
||||
@ -4328,6 +4337,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
|
||||
rw_wunlock(lock);
|
||||
rw_runlock(&pvh_global_lock);
|
||||
PMAP_UNLOCK(pmap);
|
||||
return (KERN_SUCCESS);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -231,8 +231,8 @@ static boolean_t pmap_pv_insert_section(pmap_t, vm_offset_t,
|
||||
static struct pv_entry *pmap_remove_pv(struct vm_page *, pmap_t, vm_offset_t);
|
||||
static int pmap_pvh_wired_mappings(struct md_page *, int);
|
||||
|
||||
static void pmap_enter_locked(pmap_t, vm_offset_t, vm_prot_t,
|
||||
vm_page_t, vm_prot_t, boolean_t, int);
|
||||
static int pmap_enter_locked(pmap_t, vm_offset_t, vm_page_t,
|
||||
vm_prot_t, u_int);
|
||||
static vm_paddr_t pmap_extract_locked(pmap_t pmap, vm_offset_t va);
|
||||
static void pmap_alloc_l1(pmap_t);
|
||||
static void pmap_free_l1(pmap_t);
|
||||
@ -2934,35 +2934,38 @@ pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
|
||||
* insert this page into the given map NOW.
|
||||
*/
|
||||
|
||||
void
|
||||
pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
|
||||
vm_prot_t prot, boolean_t wired)
|
||||
int
|
||||
pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
|
||||
u_int flags, int8_t psind __unused)
|
||||
{
|
||||
struct l2_bucket *l2b;
|
||||
int rv;
|
||||
|
||||
rw_wlock(&pvh_global_lock);
|
||||
PMAP_LOCK(pmap);
|
||||
pmap_enter_locked(pmap, va, access, m, prot, wired, M_WAITOK);
|
||||
/*
|
||||
* If both the l2b_occupancy and the reservation are fully
|
||||
* populated, then attempt promotion.
|
||||
*/
|
||||
l2b = pmap_get_l2_bucket(pmap, va);
|
||||
if ((l2b != NULL) && (l2b->l2b_occupancy == L2_PTE_NUM_TOTAL) &&
|
||||
sp_enabled && (m->flags & PG_FICTITIOUS) == 0 &&
|
||||
vm_reserv_level_iffullpop(m) == 0)
|
||||
pmap_promote_section(pmap, va);
|
||||
|
||||
rv = pmap_enter_locked(pmap, va, m, prot, flags);
|
||||
if (rv == KERN_SUCCESS) {
|
||||
/*
|
||||
* If both the l2b_occupancy and the reservation are fully
|
||||
* populated, then attempt promotion.
|
||||
*/
|
||||
l2b = pmap_get_l2_bucket(pmap, va);
|
||||
if (l2b != NULL && l2b->l2b_occupancy == L2_PTE_NUM_TOTAL &&
|
||||
sp_enabled && (m->flags & PG_FICTITIOUS) == 0 &&
|
||||
vm_reserv_level_iffullpop(m) == 0)
|
||||
pmap_promote_section(pmap, va);
|
||||
}
|
||||
PMAP_UNLOCK(pmap);
|
||||
rw_wunlock(&pvh_global_lock);
|
||||
return (rv);
|
||||
}
|
||||
|
||||
/*
|
||||
* The pvh global and pmap locks must be held.
|
||||
*/
|
||||
static void
|
||||
pmap_enter_locked(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
|
||||
vm_prot_t prot, boolean_t wired, int flags)
|
||||
static int
|
||||
pmap_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
|
||||
u_int flags)
|
||||
{
|
||||
struct l2_bucket *l2b = NULL;
|
||||
struct vm_page *om;
|
||||
@ -2980,9 +2983,8 @@ pmap_enter_locked(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
|
||||
pa = systempage.pv_pa;
|
||||
m = NULL;
|
||||
} else {
|
||||
KASSERT((m->oflags & VPO_UNMANAGED) != 0 ||
|
||||
vm_page_xbusied(m) || (flags & M_NOWAIT) != 0,
|
||||
("pmap_enter_locked: page %p is not busy", m));
|
||||
if ((m->oflags & VPO_UNMANAGED) == 0 && !vm_page_xbusied(m))
|
||||
VM_OBJECT_ASSERT_LOCKED(m->object);
|
||||
pa = VM_PAGE_TO_PHYS(m);
|
||||
}
|
||||
|
||||
@ -3003,12 +3005,12 @@ pmap_enter_locked(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
|
||||
|
||||
if (prot & VM_PROT_WRITE)
|
||||
nflags |= PVF_WRITE;
|
||||
if (wired)
|
||||
if ((flags & PMAP_ENTER_WIRED) != 0)
|
||||
nflags |= PVF_WIRED;
|
||||
|
||||
PDEBUG(1, printf("pmap_enter: pmap = %08x, va = %08x, m = %08x, "
|
||||
"prot = %x, wired = %x\n", (uint32_t) pmap, va, (uint32_t) m,
|
||||
prot, wired));
|
||||
"prot = %x, flags = %x\n", (uint32_t) pmap, va, (uint32_t) m,
|
||||
prot, flags));
|
||||
|
||||
if (pmap == pmap_kernel()) {
|
||||
l2b = pmap_get_l2_bucket(pmap, va);
|
||||
@ -3018,7 +3020,7 @@ pmap_enter_locked(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
|
||||
do_l2b_alloc:
|
||||
l2b = pmap_alloc_l2_bucket(pmap, va);
|
||||
if (l2b == NULL) {
|
||||
if (flags & M_WAITOK) {
|
||||
if ((flags & PMAP_ENTER_NOSLEEP) == 0) {
|
||||
PMAP_UNLOCK(pmap);
|
||||
rw_wunlock(&pvh_global_lock);
|
||||
VM_WAIT;
|
||||
@ -3026,7 +3028,7 @@ pmap_enter_locked(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
|
||||
PMAP_LOCK(pmap);
|
||||
goto do_l2b_alloc;
|
||||
}
|
||||
return;
|
||||
return (KERN_RESOURCE_SHORTAGE);
|
||||
}
|
||||
}
|
||||
|
||||
@ -3185,6 +3187,7 @@ pmap_enter_locked(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
|
||||
|
||||
if ((pmap != pmap_kernel()) && (pmap == &curproc->p_vmspace->vm_pmap))
|
||||
cpu_icache_sync_range(va, PAGE_SIZE);
|
||||
return (KERN_SUCCESS);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -3206,13 +3209,12 @@ pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
|
||||
vm_offset_t va;
|
||||
vm_page_t m;
|
||||
vm_pindex_t diff, psize;
|
||||
vm_prot_t access;
|
||||
|
||||
VM_OBJECT_ASSERT_LOCKED(m_start->object);
|
||||
|
||||
psize = atop(end - start);
|
||||
m = m_start;
|
||||
access = prot = prot & (VM_PROT_READ | VM_PROT_EXECUTE);
|
||||
prot &= VM_PROT_READ | VM_PROT_EXECUTE;
|
||||
rw_wlock(&pvh_global_lock);
|
||||
PMAP_LOCK(pmap);
|
||||
while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
|
||||
@ -3222,8 +3224,8 @@ pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
|
||||
pmap_enter_section(pmap, va, m, prot))
|
||||
m = &m[L1_S_SIZE / PAGE_SIZE - 1];
|
||||
else
|
||||
pmap_enter_locked(pmap, va, access, m, prot,
|
||||
FALSE, M_NOWAIT);
|
||||
pmap_enter_locked(pmap, va, m, prot,
|
||||
PMAP_ENTER_NOSLEEP);
|
||||
m = TAILQ_NEXT(m, listq);
|
||||
}
|
||||
PMAP_UNLOCK(pmap);
|
||||
@ -3242,12 +3244,11 @@ pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
|
||||
void
|
||||
pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
|
||||
{
|
||||
vm_prot_t access;
|
||||
|
||||
access = prot = prot & (VM_PROT_READ | VM_PROT_EXECUTE);
|
||||
prot &= VM_PROT_READ | VM_PROT_EXECUTE;
|
||||
rw_wlock(&pvh_global_lock);
|
||||
PMAP_LOCK(pmap);
|
||||
pmap_enter_locked(pmap, va, access, m, prot, FALSE, M_NOWAIT);
|
||||
pmap_enter_locked(pmap, va, m, prot, PMAP_ENTER_NOSLEEP);
|
||||
PMAP_UNLOCK(pmap);
|
||||
rw_wunlock(&pvh_global_lock);
|
||||
}
|
||||
@ -3499,8 +3500,8 @@ pmap_pinit(pmap_t pmap)
|
||||
pmap->pm_stats.resident_count = 1;
|
||||
if (vector_page < KERNBASE) {
|
||||
pmap_enter(pmap, vector_page,
|
||||
VM_PROT_READ, PHYS_TO_VM_PAGE(systempage.pv_pa),
|
||||
VM_PROT_READ, 1);
|
||||
PHYS_TO_VM_PAGE(systempage.pv_pa), VM_PROT_READ,
|
||||
PMAP_ENTER_WIRED, 0);
|
||||
}
|
||||
return (1);
|
||||
}
|
||||
|
@ -199,8 +199,8 @@ extern int last_fault_code;
|
||||
static void pmap_free_pv_entry (pv_entry_t);
|
||||
static pv_entry_t pmap_get_pv_entry(void);
|
||||
|
||||
static void pmap_enter_locked(pmap_t, vm_offset_t, vm_page_t,
|
||||
vm_prot_t, boolean_t, int);
|
||||
static int pmap_enter_locked(pmap_t, vm_offset_t, vm_page_t,
|
||||
vm_prot_t, u_int);
|
||||
static vm_paddr_t pmap_extract_locked(pmap_t pmap, vm_offset_t va);
|
||||
static void pmap_fix_cache(struct vm_page *, pmap_t, vm_offset_t);
|
||||
static void pmap_alloc_l1(pmap_t);
|
||||
@ -3208,24 +3208,26 @@ pmap_protect(pmap_t pm, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
|
||||
* insert this page into the given map NOW.
|
||||
*/
|
||||
|
||||
void
|
||||
pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
|
||||
vm_prot_t prot, boolean_t wired)
|
||||
int
|
||||
pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
|
||||
u_int flags, int8_t psind __unused)
|
||||
{
|
||||
int rv;
|
||||
|
||||
rw_wlock(&pvh_global_lock);
|
||||
PMAP_LOCK(pmap);
|
||||
pmap_enter_locked(pmap, va, m, prot, wired, M_WAITOK);
|
||||
rv = pmap_enter_locked(pmap, va, m, prot, flags);
|
||||
rw_wunlock(&pvh_global_lock);
|
||||
PMAP_UNLOCK(pmap);
|
||||
return (rv);
|
||||
}
|
||||
|
||||
/*
|
||||
* The pvh global and pmap locks must be held.
|
||||
*/
|
||||
static void
|
||||
static int
|
||||
pmap_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
|
||||
boolean_t wired, int flags)
|
||||
u_int flags)
|
||||
{
|
||||
struct l2_bucket *l2b = NULL;
|
||||
struct vm_page *opg;
|
||||
@ -3241,9 +3243,8 @@ pmap_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
|
||||
pa = systempage.pv_pa;
|
||||
m = NULL;
|
||||
} else {
|
||||
KASSERT((m->oflags & VPO_UNMANAGED) != 0 ||
|
||||
vm_page_xbusied(m) || (flags & M_NOWAIT) != 0,
|
||||
("pmap_enter_locked: page %p is not busy", m));
|
||||
if ((m->oflags & VPO_UNMANAGED) == 0 && !vm_page_xbusied(m))
|
||||
VM_OBJECT_ASSERT_LOCKED(m->object);
|
||||
pa = VM_PAGE_TO_PHYS(m);
|
||||
}
|
||||
nflags = 0;
|
||||
@ -3251,10 +3252,10 @@ pmap_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
|
||||
nflags |= PVF_WRITE;
|
||||
if (prot & VM_PROT_EXECUTE)
|
||||
nflags |= PVF_EXEC;
|
||||
if (wired)
|
||||
if ((flags & PMAP_ENTER_WIRED) != 0)
|
||||
nflags |= PVF_WIRED;
|
||||
PDEBUG(1, printf("pmap_enter: pmap = %08x, va = %08x, m = %08x, prot = %x, "
|
||||
"wired = %x\n", (uint32_t) pmap, va, (uint32_t) m, prot, wired));
|
||||
"flags = %x\n", (uint32_t) pmap, va, (uint32_t) m, prot, flags));
|
||||
|
||||
if (pmap == pmap_kernel()) {
|
||||
l2b = pmap_get_l2_bucket(pmap, va);
|
||||
@ -3264,7 +3265,7 @@ pmap_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
|
||||
do_l2b_alloc:
|
||||
l2b = pmap_alloc_l2_bucket(pmap, va);
|
||||
if (l2b == NULL) {
|
||||
if (flags & M_WAITOK) {
|
||||
if ((flags & PMAP_ENTER_NOSLEEP) == 0) {
|
||||
PMAP_UNLOCK(pmap);
|
||||
rw_wunlock(&pvh_global_lock);
|
||||
VM_WAIT;
|
||||
@ -3272,7 +3273,7 @@ pmap_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
|
||||
PMAP_LOCK(pmap);
|
||||
goto do_l2b_alloc;
|
||||
}
|
||||
return;
|
||||
return (KERN_RESOURCE_SHORTAGE);
|
||||
}
|
||||
}
|
||||
|
||||
@ -3486,6 +3487,7 @@ pmap_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
|
||||
if (m)
|
||||
pmap_fix_cache(m, pmap, va);
|
||||
}
|
||||
return (KERN_SUCCESS);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -3515,7 +3517,7 @@ pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
|
||||
PMAP_LOCK(pmap);
|
||||
while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
|
||||
pmap_enter_locked(pmap, start + ptoa(diff), m, prot &
|
||||
(VM_PROT_READ | VM_PROT_EXECUTE), FALSE, M_NOWAIT);
|
||||
(VM_PROT_READ | VM_PROT_EXECUTE), PMAP_ENTER_NOSLEEP);
|
||||
m = TAILQ_NEXT(m, listq);
|
||||
}
|
||||
rw_wunlock(&pvh_global_lock);
|
||||
@ -3538,7 +3540,7 @@ pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
|
||||
rw_wlock(&pvh_global_lock);
|
||||
PMAP_LOCK(pmap);
|
||||
pmap_enter_locked(pmap, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE),
|
||||
FALSE, M_NOWAIT);
|
||||
PMAP_ENTER_NOSLEEP);
|
||||
rw_wunlock(&pvh_global_lock);
|
||||
PMAP_UNLOCK(pmap);
|
||||
}
|
||||
@ -3769,9 +3771,8 @@ pmap_pinit(pmap_t pmap)
|
||||
bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
|
||||
pmap->pm_stats.resident_count = 1;
|
||||
if (vector_page < KERNBASE) {
|
||||
pmap_enter(pmap, vector_page,
|
||||
VM_PROT_READ, PHYS_TO_VM_PAGE(systempage.pv_pa),
|
||||
VM_PROT_READ, 1);
|
||||
pmap_enter(pmap, vector_page, PHYS_TO_VM_PAGE(systempage.pv_pa),
|
||||
VM_PROT_READ, PMAP_ENTER_WIRED | VM_PROT_READ, 0);
|
||||
}
|
||||
return (1);
|
||||
}
|
||||
|
@ -3458,9 +3458,9 @@ pmap_promote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va)
|
||||
* or lose information. That is, this routine must actually
|
||||
* insert this page into the given map NOW.
|
||||
*/
|
||||
void
|
||||
pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
|
||||
vm_prot_t prot, boolean_t wired)
|
||||
int
|
||||
pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
|
||||
u_int flags, int8_t psind)
|
||||
{
|
||||
pd_entry_t *pde;
|
||||
pt_entry_t *pte;
|
||||
@ -3468,17 +3468,19 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
|
||||
pv_entry_t pv;
|
||||
vm_paddr_t opa, pa;
|
||||
vm_page_t mpte, om;
|
||||
boolean_t invlva;
|
||||
boolean_t invlva, nosleep, wired;
|
||||
|
||||
va = trunc_page(va);
|
||||
mpte = NULL;
|
||||
wired = (flags & PMAP_ENTER_WIRED) != 0;
|
||||
nosleep = (flags & PMAP_ENTER_NOSLEEP) != 0;
|
||||
|
||||
KASSERT(va <= VM_MAX_KERNEL_ADDRESS, ("pmap_enter: toobig"));
|
||||
KASSERT(va < UPT_MIN_ADDRESS || va >= UPT_MAX_ADDRESS,
|
||||
("pmap_enter: invalid to pmap_enter page table pages (va: 0x%x)",
|
||||
va));
|
||||
if ((m->oflags & VPO_UNMANAGED) == 0 && !vm_page_xbusied(m))
|
||||
VM_OBJECT_ASSERT_WLOCKED(m->object);
|
||||
|
||||
mpte = NULL;
|
||||
VM_OBJECT_ASSERT_LOCKED(m->object);
|
||||
|
||||
rw_wlock(&pvh_global_lock);
|
||||
PMAP_LOCK(pmap);
|
||||
@ -3489,7 +3491,15 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
|
||||
* resident, we are creating it here.
|
||||
*/
|
||||
if (va < VM_MAXUSER_ADDRESS) {
|
||||
mpte = pmap_allocpte(pmap, va, M_WAITOK);
|
||||
mpte = pmap_allocpte(pmap, va, nosleep ? M_NOWAIT : M_WAITOK);
|
||||
if (mpte == NULL) {
|
||||
KASSERT(nosleep,
|
||||
("pmap_allocpte failed with sleep allowed"));
|
||||
sched_unpin();
|
||||
rw_wunlock(&pvh_global_lock);
|
||||
PMAP_UNLOCK(pmap);
|
||||
return (KERN_RESOURCE_SHORTAGE);
|
||||
}
|
||||
}
|
||||
|
||||
pde = pmap_pde(pmap, va);
|
||||
@ -3607,7 +3617,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
|
||||
*/
|
||||
if ((origpte & ~(PG_M|PG_A)) != newpte) {
|
||||
newpte |= PG_A;
|
||||
if ((access & VM_PROT_WRITE) != 0)
|
||||
if ((flags & VM_PROT_WRITE) != 0)
|
||||
newpte |= PG_M;
|
||||
if (origpte & PG_V) {
|
||||
invlva = FALSE;
|
||||
@ -3652,6 +3662,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
|
||||
sched_unpin();
|
||||
rw_wunlock(&pvh_global_lock);
|
||||
PMAP_UNLOCK(pmap);
|
||||
return (KERN_SUCCESS);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -298,9 +298,9 @@ static void pmap_remove_entry(struct pmap *pmap, vm_page_t m,
|
||||
static boolean_t pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va,
|
||||
vm_page_t m);
|
||||
|
||||
static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags);
|
||||
static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va, u_int flags);
|
||||
|
||||
static vm_page_t _pmap_allocpte(pmap_t pmap, u_int ptepindex, int flags);
|
||||
static vm_page_t _pmap_allocpte(pmap_t pmap, u_int ptepindex, u_int flags);
|
||||
static void _pmap_unwire_ptp(pmap_t pmap, vm_page_t m, vm_page_t *free);
|
||||
static pt_entry_t *pmap_pte_quick(pmap_t pmap, vm_offset_t va);
|
||||
static void pmap_pte_release(pt_entry_t *pte);
|
||||
@ -1546,21 +1546,17 @@ pmap_pinit(pmap_t pmap)
|
||||
* mapped correctly.
|
||||
*/
|
||||
static vm_page_t
|
||||
_pmap_allocpte(pmap_t pmap, u_int ptepindex, int flags)
|
||||
_pmap_allocpte(pmap_t pmap, u_int ptepindex, u_int flags)
|
||||
{
|
||||
vm_paddr_t ptema;
|
||||
vm_page_t m;
|
||||
|
||||
KASSERT((flags & (M_NOWAIT | M_WAITOK)) == M_NOWAIT ||
|
||||
(flags & (M_NOWAIT | M_WAITOK)) == M_WAITOK,
|
||||
("_pmap_allocpte: flags is neither M_NOWAIT nor M_WAITOK"));
|
||||
|
||||
/*
|
||||
* Allocate a page table page.
|
||||
*/
|
||||
if ((m = vm_page_alloc(NULL, ptepindex, VM_ALLOC_NOOBJ |
|
||||
VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) {
|
||||
if (flags & M_WAITOK) {
|
||||
if ((flags & PMAP_ENTER_NOSLEEP) == 0) {
|
||||
PMAP_UNLOCK(pmap);
|
||||
rw_wunlock(&pvh_global_lock);
|
||||
VM_WAIT;
|
||||
@ -1595,16 +1591,12 @@ _pmap_allocpte(pmap_t pmap, u_int ptepindex, int flags)
|
||||
}
|
||||
|
||||
static vm_page_t
|
||||
pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags)
|
||||
pmap_allocpte(pmap_t pmap, vm_offset_t va, u_int flags)
|
||||
{
|
||||
u_int ptepindex;
|
||||
pd_entry_t ptema;
|
||||
vm_page_t m;
|
||||
|
||||
KASSERT((flags & (M_NOWAIT | M_WAITOK)) == M_NOWAIT ||
|
||||
(flags & (M_NOWAIT | M_WAITOK)) == M_WAITOK,
|
||||
("pmap_allocpte: flags is neither M_NOWAIT nor M_WAITOK"));
|
||||
|
||||
/*
|
||||
* Calculate pagetable page index
|
||||
*/
|
||||
@ -1644,7 +1636,7 @@ pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags)
|
||||
CTR3(KTR_PMAP, "pmap_allocpte: pmap=%p va=0x%08x flags=0x%x",
|
||||
pmap, va, flags);
|
||||
m = _pmap_allocpte(pmap, ptepindex, flags);
|
||||
if (m == NULL && (flags & M_WAITOK))
|
||||
if (m == NULL && (flags & PMAP_ENTER_NOSLEEP) == 0)
|
||||
goto retry;
|
||||
|
||||
KASSERT(pmap->pm_pdir[ptepindex], ("ptepindex=%d did not get mapped", ptepindex));
|
||||
@ -2643,9 +2635,9 @@ pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
|
||||
* or lose information. That is, this routine must actually
|
||||
* insert this page into the given map NOW.
|
||||
*/
|
||||
void
|
||||
pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
|
||||
vm_prot_t prot, boolean_t wired)
|
||||
int
|
||||
pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
|
||||
u_int flags, int8_t psind __unused)
|
||||
{
|
||||
pd_entry_t *pde;
|
||||
pt_entry_t *pte;
|
||||
@ -2653,19 +2645,21 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
|
||||
pv_entry_t pv;
|
||||
vm_paddr_t opa, pa;
|
||||
vm_page_t mpte, om;
|
||||
boolean_t invlva;
|
||||
boolean_t invlva, wired;
|
||||
|
||||
CTR6(KTR_PMAP, "pmap_enter: pmap=%08p va=0x%08x access=0x%x ma=0x%08x prot=0x%x wired=%d",
|
||||
pmap, va, access, VM_PAGE_TO_MACH(m), prot, wired);
|
||||
CTR5(KTR_PMAP,
|
||||
"pmap_enter: pmap=%08p va=0x%08x ma=0x%08x prot=0x%x flags=0x%x",
|
||||
pmap, va, VM_PAGE_TO_MACH(m), prot, flags);
|
||||
va = trunc_page(va);
|
||||
KASSERT(va <= VM_MAX_KERNEL_ADDRESS, ("pmap_enter: toobig"));
|
||||
KASSERT(va < UPT_MIN_ADDRESS || va >= UPT_MAX_ADDRESS,
|
||||
("pmap_enter: invalid to pmap_enter page table pages (va: 0x%x)",
|
||||
va));
|
||||
if ((m->oflags & VPO_UNMANAGED) == 0 && !vm_page_xbusied(m))
|
||||
VM_OBJECT_ASSERT_WLOCKED(m->object);
|
||||
VM_OBJECT_ASSERT_LOCKED(m->object);
|
||||
|
||||
mpte = NULL;
|
||||
wired = (flags & PMAP_ENTER_WIRED) != 0;
|
||||
|
||||
rw_wlock(&pvh_global_lock);
|
||||
PMAP_LOCK(pmap);
|
||||
@ -2676,7 +2670,15 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
|
||||
* resident, we are creating it here.
|
||||
*/
|
||||
if (va < VM_MAXUSER_ADDRESS) {
|
||||
mpte = pmap_allocpte(pmap, va, M_WAITOK);
|
||||
mpte = pmap_allocpte(pmap, va, flags);
|
||||
if (mpte == NULL) {
|
||||
KASSERT((flags & PMAP_ENTER_NOSLEEP) != 0,
|
||||
("pmap_allocpte failed with sleep allowed"));
|
||||
sched_unpin();
|
||||
rw_wunlock(&pvh_global_lock);
|
||||
PMAP_UNLOCK(pmap);
|
||||
return (KERN_RESOURCE_SHORTAGE);
|
||||
}
|
||||
}
|
||||
|
||||
pde = pmap_pde(pmap, va);
|
||||
@ -2842,6 +2844,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
|
||||
sched_unpin();
|
||||
rw_wunlock(&pvh_global_lock);
|
||||
PMAP_UNLOCK(pmap);
|
||||
return (KERN_SUCCESS);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -2996,7 +2999,7 @@ pmap_enter_quick_locked(multicall_entry_t **mclpp, int *count, pmap_t pmap, vm_o
|
||||
mpte->wire_count++;
|
||||
} else {
|
||||
mpte = _pmap_allocpte(pmap, ptepindex,
|
||||
M_NOWAIT);
|
||||
PMAP_ENTER_NOSLEEP);
|
||||
if (mpte == NULL)
|
||||
return (mpte);
|
||||
}
|
||||
@ -3305,7 +3308,7 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
|
||||
*/
|
||||
if ((ptetemp & PG_MANAGED) != 0) {
|
||||
dstmpte = pmap_allocpte(dst_pmap, addr,
|
||||
M_NOWAIT);
|
||||
PMAP_ENTER_NOSLEEP);
|
||||
if (dstmpte == NULL)
|
||||
goto out;
|
||||
dst_pte = pmap_pte_quick(dst_pmap, addr);
|
||||
|
@ -177,8 +177,8 @@ static void pmap_invalidate_all(pmap_t pmap);
|
||||
static void pmap_invalidate_page(pmap_t pmap, vm_offset_t va);
|
||||
static void _pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m);
|
||||
|
||||
static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags);
|
||||
static vm_page_t _pmap_allocpte(pmap_t pmap, unsigned ptepindex, int flags);
|
||||
static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va, u_int flags);
|
||||
static vm_page_t _pmap_allocpte(pmap_t pmap, unsigned ptepindex, u_int flags);
|
||||
static int pmap_unuse_pt(pmap_t, vm_offset_t, pd_entry_t);
|
||||
static pt_entry_t init_pte_prot(vm_page_t m, vm_prot_t access, vm_prot_t prot);
|
||||
|
||||
@ -1094,20 +1094,16 @@ pmap_pinit(pmap_t pmap)
|
||||
* mapped correctly.
|
||||
*/
|
||||
static vm_page_t
|
||||
_pmap_allocpte(pmap_t pmap, unsigned ptepindex, int flags)
|
||||
_pmap_allocpte(pmap_t pmap, unsigned ptepindex, u_int flags)
|
||||
{
|
||||
vm_offset_t pageva;
|
||||
vm_page_t m;
|
||||
|
||||
KASSERT((flags & (M_NOWAIT | M_WAITOK)) == M_NOWAIT ||
|
||||
(flags & (M_NOWAIT | M_WAITOK)) == M_WAITOK,
|
||||
("_pmap_allocpte: flags is neither M_NOWAIT nor M_WAITOK"));
|
||||
|
||||
/*
|
||||
* Find or fabricate a new pagetable page
|
||||
*/
|
||||
if ((m = pmap_alloc_direct_page(ptepindex, VM_ALLOC_NORMAL)) == NULL) {
|
||||
if (flags & M_WAITOK) {
|
||||
if ((flags & PMAP_ENTER_NOSLEEP) == 0) {
|
||||
PMAP_UNLOCK(pmap);
|
||||
rw_wunlock(&pvh_global_lock);
|
||||
pmap_grow_direct_page_cache();
|
||||
@ -1164,16 +1160,12 @@ _pmap_allocpte(pmap_t pmap, unsigned ptepindex, int flags)
|
||||
}
|
||||
|
||||
static vm_page_t
|
||||
pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags)
|
||||
pmap_allocpte(pmap_t pmap, vm_offset_t va, u_int flags)
|
||||
{
|
||||
unsigned ptepindex;
|
||||
pd_entry_t *pde;
|
||||
vm_page_t m;
|
||||
|
||||
KASSERT((flags & (M_NOWAIT | M_WAITOK)) == M_NOWAIT ||
|
||||
(flags & (M_NOWAIT | M_WAITOK)) == M_WAITOK,
|
||||
("pmap_allocpte: flags is neither M_NOWAIT nor M_WAITOK"));
|
||||
|
||||
/*
|
||||
* Calculate pagetable page index
|
||||
*/
|
||||
@ -1197,7 +1189,7 @@ pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags)
|
||||
* deallocated.
|
||||
*/
|
||||
m = _pmap_allocpte(pmap, ptepindex, flags);
|
||||
if (m == NULL && (flags & M_WAITOK))
|
||||
if (m == NULL && (flags & PMAP_ENTER_NOSLEEP) == 0)
|
||||
goto retry;
|
||||
}
|
||||
return (m);
|
||||
@ -1994,9 +1986,9 @@ pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
|
||||
* or lose information. That is, this routine must actually
|
||||
* insert this page into the given map NOW.
|
||||
*/
|
||||
void
|
||||
pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
|
||||
vm_prot_t prot, boolean_t wired)
|
||||
int
|
||||
pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
|
||||
u_int flags, int8_t psind __unused)
|
||||
{
|
||||
vm_paddr_t pa, opa;
|
||||
pt_entry_t *pte;
|
||||
@ -2009,11 +2001,11 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
|
||||
KASSERT((m->oflags & VPO_UNMANAGED) != 0 || va < kmi.clean_sva ||
|
||||
va >= kmi.clean_eva,
|
||||
("pmap_enter: managed mapping within the clean submap"));
|
||||
KASSERT((m->oflags & VPO_UNMANAGED) != 0 || vm_page_xbusied(m),
|
||||
("pmap_enter: page %p is not busy", m));
|
||||
if ((m->oflags & VPO_UNMANAGED) == 0 && !vm_page_xbusied(m))
|
||||
VM_OBJECT_ASSERT_LOCKED(m->object);
|
||||
pa = VM_PAGE_TO_PHYS(m);
|
||||
newpte = TLBLO_PA_TO_PFN(pa) | init_pte_prot(m, access, prot);
|
||||
if (wired)
|
||||
newpte = TLBLO_PA_TO_PFN(pa) | init_pte_prot(m, flags, prot);
|
||||
if ((flags & PMAP_ENTER_WIRED) != 0)
|
||||
newpte |= PTE_W;
|
||||
if (is_kernel_pmap(pmap))
|
||||
newpte |= PTE_G;
|
||||
@ -2032,7 +2024,14 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
|
||||
* creating it here.
|
||||
*/
|
||||
if (va < VM_MAXUSER_ADDRESS) {
|
||||
mpte = pmap_allocpte(pmap, va, M_WAITOK);
|
||||
mpte = pmap_allocpte(pmap, va, flags);
|
||||
if (mpte == NULL) {
|
||||
KASSERT((flags & PMAP_ENTER_NOSLEEP) != 0,
|
||||
("pmap_allocpte failed with sleep allowed"));
|
||||
rw_wunlock(&pvh_global_lock);
|
||||
PMAP_UNLOCK(pmap);
|
||||
return (KERN_RESOURCE_SHORTAGE);
|
||||
}
|
||||
}
|
||||
pte = pmap_pte(pmap, va);
|
||||
|
||||
@ -2057,9 +2056,10 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
|
||||
* are valid mappings in them. Hence, if a user page is
|
||||
* wired, the PT page will be also.
|
||||
*/
|
||||
if (wired && !pte_test(&origpte, PTE_W))
|
||||
if (pte_test(&newpte, PTE_W) && !pte_test(&origpte, PTE_W))
|
||||
pmap->pm_stats.wired_count++;
|
||||
else if (!wired && pte_test(&origpte, PTE_W))
|
||||
else if (!pte_test(&newpte, PTE_W) && pte_test(&origpte,
|
||||
PTE_W))
|
||||
pmap->pm_stats.wired_count--;
|
||||
|
||||
KASSERT(!pte_test(&origpte, PTE_D | PTE_RO),
|
||||
@ -2123,7 +2123,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
|
||||
/*
|
||||
* Increment counters
|
||||
*/
|
||||
if (wired)
|
||||
if (pte_test(&newpte, PTE_W))
|
||||
pmap->pm_stats.wired_count++;
|
||||
|
||||
validate:
|
||||
@ -2170,6 +2170,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
|
||||
}
|
||||
rw_wunlock(&pvh_global_lock);
|
||||
PMAP_UNLOCK(pmap);
|
||||
return (KERN_SUCCESS);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -2235,7 +2236,7 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
|
||||
mpte->wire_count++;
|
||||
} else {
|
||||
mpte = _pmap_allocpte(pmap, ptepindex,
|
||||
M_NOWAIT);
|
||||
PMAP_ENTER_NOSLEEP);
|
||||
if (mpte == NULL)
|
||||
return (mpte);
|
||||
}
|
||||
|
@ -258,8 +258,8 @@ static struct pte *moea_pvo_to_pte(const struct pvo_entry *, int);
|
||||
/*
|
||||
* Utility routines.
|
||||
*/
|
||||
static void moea_enter_locked(pmap_t, vm_offset_t, vm_page_t,
|
||||
vm_prot_t, boolean_t);
|
||||
static int moea_enter_locked(pmap_t, vm_offset_t, vm_page_t,
|
||||
vm_prot_t, u_int, int8_t);
|
||||
static void moea_syncicache(vm_offset_t, vm_size_t);
|
||||
static boolean_t moea_query_bit(vm_page_t, int);
|
||||
static u_int moea_clear_bit(vm_page_t, int);
|
||||
@ -273,7 +273,8 @@ void moea_clear_modify(mmu_t, vm_page_t);
|
||||
void moea_copy_page(mmu_t, vm_page_t, vm_page_t);
|
||||
void moea_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset,
|
||||
vm_page_t *mb, vm_offset_t b_offset, int xfersize);
|
||||
void moea_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t, boolean_t);
|
||||
int moea_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t, u_int,
|
||||
int8_t);
|
||||
void moea_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_page_t,
|
||||
vm_prot_t);
|
||||
void moea_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t);
|
||||
@ -1100,16 +1101,25 @@ moea_zero_page_idle(mmu_t mmu, vm_page_t m)
|
||||
* target pmap with the protection requested. If specified the page
|
||||
* will be wired down.
|
||||
*/
|
||||
void
|
||||
int
|
||||
moea_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
|
||||
boolean_t wired)
|
||||
u_int flags, int8_t psind)
|
||||
{
|
||||
int error;
|
||||
|
||||
rw_wlock(&pvh_global_lock);
|
||||
PMAP_LOCK(pmap);
|
||||
moea_enter_locked(pmap, va, m, prot, wired);
|
||||
rw_wunlock(&pvh_global_lock);
|
||||
PMAP_UNLOCK(pmap);
|
||||
for (;;) {
|
||||
rw_wlock(&pvh_global_lock);
|
||||
PMAP_LOCK(pmap);
|
||||
error = moea_enter_locked(pmap, va, m, prot, flags, psind);
|
||||
rw_wunlock(&pvh_global_lock);
|
||||
PMAP_UNLOCK(pmap);
|
||||
if (error != ENOMEM)
|
||||
return (KERN_SUCCESS);
|
||||
if ((flags & PMAP_ENTER_NOSLEEP) != 0)
|
||||
return (KERN_RESOURCE_SHORTAGE);
|
||||
VM_OBJECT_ASSERT_UNLOCKED(m->object);
|
||||
VM_WAIT;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1119,9 +1129,9 @@ moea_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
|
||||
*
|
||||
* The global pvh and pmap must be locked.
|
||||
*/
|
||||
static void
|
||||
static int
|
||||
moea_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
|
||||
boolean_t wired)
|
||||
u_int flags, int8_t psind __unused)
|
||||
{
|
||||
struct pvo_head *pvo_head;
|
||||
uma_zone_t zone;
|
||||
@ -1154,7 +1164,7 @@ moea_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
|
||||
} else
|
||||
pte_lo |= PTE_BR;
|
||||
|
||||
if (wired)
|
||||
if ((flags & PMAP_ENTER_WIRED) != 0)
|
||||
pvo_flags |= PVO_WIRED;
|
||||
|
||||
error = moea_pvo_enter(pmap, zone, pvo_head, va, VM_PAGE_TO_PHYS(m),
|
||||
@ -1169,6 +1179,8 @@ moea_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
|
||||
if (pmap != kernel_pmap && error == ENOENT &&
|
||||
(pte_lo & (PTE_I | PTE_G)) == 0)
|
||||
moea_syncicache(VM_PAGE_TO_PHYS(m), PAGE_SIZE);
|
||||
|
||||
return (error);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1198,7 +1210,7 @@ moea_enter_object(mmu_t mmu, pmap_t pm, vm_offset_t start, vm_offset_t end,
|
||||
PMAP_LOCK(pm);
|
||||
while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
|
||||
moea_enter_locked(pm, start + ptoa(diff), m, prot &
|
||||
(VM_PROT_READ | VM_PROT_EXECUTE), FALSE);
|
||||
(VM_PROT_READ | VM_PROT_EXECUTE), 0, 0);
|
||||
m = TAILQ_NEXT(m, listq);
|
||||
}
|
||||
rw_wunlock(&pvh_global_lock);
|
||||
@ -1213,7 +1225,7 @@ moea_enter_quick(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_page_t m,
|
||||
rw_wlock(&pvh_global_lock);
|
||||
PMAP_LOCK(pm);
|
||||
moea_enter_locked(pm, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE),
|
||||
FALSE);
|
||||
0, 0);
|
||||
rw_wunlock(&pvh_global_lock);
|
||||
PMAP_UNLOCK(pm);
|
||||
}
|
||||
|
@ -267,7 +267,7 @@ int moea64_large_page_shift = 0;
|
||||
* PVO calls.
|
||||
*/
|
||||
static int moea64_pvo_enter(mmu_t, pmap_t, uma_zone_t, struct pvo_head *,
|
||||
vm_offset_t, vm_offset_t, uint64_t, int);
|
||||
vm_offset_t, vm_offset_t, uint64_t, int, int8_t);
|
||||
static void moea64_pvo_remove(mmu_t, struct pvo_entry *);
|
||||
static struct pvo_entry *moea64_pvo_find_va(pmap_t, vm_offset_t);
|
||||
|
||||
@ -287,7 +287,8 @@ void moea64_clear_modify(mmu_t, vm_page_t);
|
||||
void moea64_copy_page(mmu_t, vm_page_t, vm_page_t);
|
||||
void moea64_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset,
|
||||
vm_page_t *mb, vm_offset_t b_offset, int xfersize);
|
||||
void moea64_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t, boolean_t);
|
||||
int moea64_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t,
|
||||
u_int flags, int8_t psind);
|
||||
void moea64_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_page_t,
|
||||
vm_prot_t);
|
||||
void moea64_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t);
|
||||
@ -627,7 +628,7 @@ moea64_setup_direct_map(mmu_t mmup, vm_offset_t kernelstart,
|
||||
|
||||
moea64_pvo_enter(mmup, kernel_pmap, moea64_upvo_zone,
|
||||
NULL, pa, pa, pte_lo,
|
||||
PVO_WIRED | PVO_LARGE);
|
||||
PVO_WIRED | PVO_LARGE, 0);
|
||||
}
|
||||
}
|
||||
PMAP_UNLOCK(kernel_pmap);
|
||||
@ -1228,9 +1229,9 @@ moea64_zero_page_idle(mmu_t mmu, vm_page_t m)
|
||||
* will be wired down.
|
||||
*/
|
||||
|
||||
void
|
||||
int
|
||||
moea64_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
|
||||
vm_prot_t prot, boolean_t wired)
|
||||
vm_prot_t prot, u_int flags, int8_t psind)
|
||||
{
|
||||
struct pvo_head *pvo_head;
|
||||
uma_zone_t zone;
|
||||
@ -1264,15 +1265,23 @@ moea64_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
|
||||
if ((prot & VM_PROT_EXECUTE) == 0)
|
||||
pte_lo |= LPTE_NOEXEC;
|
||||
|
||||
if (wired)
|
||||
if ((flags & PMAP_ENTER_WIRED) != 0)
|
||||
pvo_flags |= PVO_WIRED;
|
||||
|
||||
LOCK_TABLE_WR();
|
||||
PMAP_LOCK(pmap);
|
||||
error = moea64_pvo_enter(mmu, pmap, zone, pvo_head, va,
|
||||
VM_PAGE_TO_PHYS(m), pte_lo, pvo_flags);
|
||||
PMAP_UNLOCK(pmap);
|
||||
UNLOCK_TABLE_WR();
|
||||
for (;;) {
|
||||
LOCK_TABLE_WR();
|
||||
PMAP_LOCK(pmap);
|
||||
error = moea64_pvo_enter(mmu, pmap, zone, pvo_head, va,
|
||||
VM_PAGE_TO_PHYS(m), pte_lo, pvo_flags, psind);
|
||||
PMAP_UNLOCK(pmap);
|
||||
UNLOCK_TABLE_WR();
|
||||
if (error != ENOMEM)
|
||||
break;
|
||||
if ((flags & PMAP_ENTER_NOSLEEP) != 0)
|
||||
return (KERN_RESOURCE_SHORTAGE);
|
||||
VM_OBJECT_ASSERT_UNLOCKED(m->object);
|
||||
VM_WAIT;
|
||||
}
|
||||
|
||||
/*
|
||||
* Flush the page from the instruction cache if this page is
|
||||
@ -1283,6 +1292,7 @@ moea64_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
|
||||
vm_page_aflag_set(m, PGA_EXECUTABLE);
|
||||
moea64_syncicache(mmu, pmap, va, VM_PAGE_TO_PHYS(m), PAGE_SIZE);
|
||||
}
|
||||
return (KERN_SUCCESS);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -1347,7 +1357,7 @@ moea64_enter_object(mmu_t mmu, pmap_t pm, vm_offset_t start, vm_offset_t end,
|
||||
m = m_start;
|
||||
while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
|
||||
moea64_enter(mmu, pm, start + ptoa(diff), m, prot &
|
||||
(VM_PROT_READ | VM_PROT_EXECUTE), FALSE);
|
||||
(VM_PROT_READ | VM_PROT_EXECUTE), PMAP_ENTER_NOSLEEP, 0);
|
||||
m = TAILQ_NEXT(m, listq);
|
||||
}
|
||||
}
|
||||
@ -1357,8 +1367,8 @@ moea64_enter_quick(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_page_t m,
|
||||
vm_prot_t prot)
|
||||
{
|
||||
|
||||
moea64_enter(mmu, pm, va, m,
|
||||
prot & (VM_PROT_READ | VM_PROT_EXECUTE), FALSE);
|
||||
moea64_enter(mmu, pm, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE),
|
||||
PMAP_ENTER_NOSLEEP, 0);
|
||||
}
|
||||
|
||||
vm_paddr_t
|
||||
@ -1446,7 +1456,8 @@ moea64_uma_page_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait)
|
||||
PMAP_LOCK(kernel_pmap);
|
||||
|
||||
moea64_pvo_enter(installed_mmu, kernel_pmap, moea64_upvo_zone,
|
||||
NULL, va, VM_PAGE_TO_PHYS(m), LPTE_M, PVO_WIRED | PVO_BOOTSTRAP);
|
||||
NULL, va, VM_PAGE_TO_PHYS(m), LPTE_M, PVO_WIRED | PVO_BOOTSTRAP,
|
||||
0);
|
||||
|
||||
if (needed_lock)
|
||||
PMAP_UNLOCK(kernel_pmap);
|
||||
@ -1668,7 +1679,7 @@ moea64_kenter_attr(mmu_t mmu, vm_offset_t va, vm_offset_t pa, vm_memattr_t ma)
|
||||
LOCK_TABLE_WR();
|
||||
PMAP_LOCK(kernel_pmap);
|
||||
error = moea64_pvo_enter(mmu, kernel_pmap, moea64_upvo_zone,
|
||||
NULL, va, pa, pte_lo, PVO_WIRED);
|
||||
NULL, va, pa, pte_lo, PVO_WIRED, 0);
|
||||
PMAP_UNLOCK(kernel_pmap);
|
||||
UNLOCK_TABLE_WR();
|
||||
|
||||
@ -2166,7 +2177,7 @@ moea64_bootstrap_alloc(vm_size_t size, u_int align)
|
||||
static int
|
||||
moea64_pvo_enter(mmu_t mmu, pmap_t pm, uma_zone_t zone,
|
||||
struct pvo_head *pvo_head, vm_offset_t va, vm_offset_t pa,
|
||||
uint64_t pte_lo, int flags)
|
||||
uint64_t pte_lo, int flags, int8_t psind __unused)
|
||||
{
|
||||
struct pvo_entry *pvo;
|
||||
uintptr_t pt;
|
||||
|
@ -146,8 +146,8 @@ static struct mtx copy_page_mutex;
|
||||
/* PMAP */
|
||||
/**************************************************************************/
|
||||
|
||||
static void mmu_booke_enter_locked(mmu_t, pmap_t, vm_offset_t, vm_page_t,
|
||||
vm_prot_t, boolean_t);
|
||||
static int mmu_booke_enter_locked(mmu_t, pmap_t, vm_offset_t, vm_page_t,
|
||||
vm_prot_t, u_int flags, int8_t psind);
|
||||
|
||||
unsigned int kptbl_min; /* Index of the first kernel ptbl. */
|
||||
unsigned int kernel_ptbls; /* Number of KVA ptbls. */
|
||||
@ -228,14 +228,14 @@ static struct ptbl_buf *ptbl_buf_alloc(void);
|
||||
static void ptbl_buf_free(struct ptbl_buf *);
|
||||
static void ptbl_free_pmap_ptbl(pmap_t, pte_t *);
|
||||
|
||||
static pte_t *ptbl_alloc(mmu_t, pmap_t, unsigned int);
|
||||
static pte_t *ptbl_alloc(mmu_t, pmap_t, unsigned int, boolean_t);
|
||||
static void ptbl_free(mmu_t, pmap_t, unsigned int);
|
||||
static void ptbl_hold(mmu_t, pmap_t, unsigned int);
|
||||
static int ptbl_unhold(mmu_t, pmap_t, unsigned int);
|
||||
|
||||
static vm_paddr_t pte_vatopa(mmu_t, pmap_t, vm_offset_t);
|
||||
static pte_t *pte_find(mmu_t, pmap_t, vm_offset_t);
|
||||
static void pte_enter(mmu_t, pmap_t, vm_page_t, vm_offset_t, uint32_t);
|
||||
static int pte_enter(mmu_t, pmap_t, vm_page_t, vm_offset_t, uint32_t, boolean_t);
|
||||
static int pte_remove(mmu_t, pmap_t, vm_offset_t, uint8_t);
|
||||
|
||||
static pv_entry_t pv_alloc(void);
|
||||
@ -272,8 +272,8 @@ static void mmu_booke_copy(mmu_t, pmap_t, pmap_t, vm_offset_t,
|
||||
static void mmu_booke_copy_page(mmu_t, vm_page_t, vm_page_t);
|
||||
static void mmu_booke_copy_pages(mmu_t, vm_page_t *,
|
||||
vm_offset_t, vm_page_t *, vm_offset_t, int);
|
||||
static void mmu_booke_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t,
|
||||
vm_prot_t, boolean_t);
|
||||
static int mmu_booke_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t,
|
||||
vm_prot_t, u_int flags, int8_t psind);
|
||||
static void mmu_booke_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t,
|
||||
vm_page_t, vm_prot_t);
|
||||
static void mmu_booke_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t,
|
||||
@ -558,14 +558,14 @@ ptbl_free_pmap_ptbl(pmap_t pmap, pte_t *ptbl)
|
||||
|
||||
/* Allocate page table. */
|
||||
static pte_t *
|
||||
ptbl_alloc(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx)
|
||||
ptbl_alloc(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx, boolean_t nosleep)
|
||||
{
|
||||
vm_page_t mtbl[PTBL_PAGES];
|
||||
vm_page_t m;
|
||||
struct ptbl_buf *pbuf;
|
||||
unsigned int pidx;
|
||||
pte_t *ptbl;
|
||||
int i;
|
||||
int i, j;
|
||||
|
||||
CTR4(KTR_PMAP, "%s: pmap = %p su = %d pdir_idx = %d", __func__, pmap,
|
||||
(pmap == kernel_pmap), pdir_idx);
|
||||
@ -588,9 +588,15 @@ ptbl_alloc(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx)
|
||||
pidx = (PTBL_PAGES * pdir_idx) + i;
|
||||
while ((m = vm_page_alloc(NULL, pidx,
|
||||
VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) {
|
||||
|
||||
PMAP_UNLOCK(pmap);
|
||||
rw_wunlock(&pvh_global_lock);
|
||||
if (nosleep) {
|
||||
ptbl_free_pmap_ptbl(pmap, ptbl);
|
||||
for (j = 0; j < i; j++)
|
||||
vm_page_free(mtbl[j]);
|
||||
atomic_subtract_int(&vm_cnt.v_wire_count, i);
|
||||
return (NULL);
|
||||
}
|
||||
VM_WAIT;
|
||||
rw_wlock(&pvh_global_lock);
|
||||
PMAP_LOCK(pmap);
|
||||
@ -885,8 +891,9 @@ pte_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, uint8_t flags)
|
||||
/*
|
||||
* Insert PTE for a given page and virtual address.
|
||||
*/
|
||||
static void
|
||||
pte_enter(mmu_t mmu, pmap_t pmap, vm_page_t m, vm_offset_t va, uint32_t flags)
|
||||
static int
|
||||
pte_enter(mmu_t mmu, pmap_t pmap, vm_page_t m, vm_offset_t va, uint32_t flags,
|
||||
boolean_t nosleep)
|
||||
{
|
||||
unsigned int pdir_idx = PDIR_IDX(va);
|
||||
unsigned int ptbl_idx = PTBL_IDX(va);
|
||||
@ -900,7 +907,11 @@ pte_enter(mmu_t mmu, pmap_t pmap, vm_page_t m, vm_offset_t va, uint32_t flags)
|
||||
|
||||
if (ptbl == NULL) {
|
||||
/* Allocate page table pages. */
|
||||
ptbl = ptbl_alloc(mmu, pmap, pdir_idx);
|
||||
ptbl = ptbl_alloc(mmu, pmap, pdir_idx, nosleep);
|
||||
if (ptbl == NULL) {
|
||||
KASSERT(nosleep, ("nosleep and NULL ptbl"));
|
||||
return (ENOMEM);
|
||||
}
|
||||
} else {
|
||||
/*
|
||||
* Check if there is valid mapping for requested
|
||||
@ -949,6 +960,7 @@ pte_enter(mmu_t mmu, pmap_t pmap, vm_page_t m, vm_offset_t va, uint32_t flags)
|
||||
|
||||
tlb_miss_unlock();
|
||||
mtx_unlock_spin(&tlbivax_mutex);
|
||||
return (0);
|
||||
}
|
||||
|
||||
/* Return the pa for the given pmap/va. */
|
||||
@ -1576,35 +1588,37 @@ mmu_booke_release(mmu_t mmu, pmap_t pmap)
|
||||
* target physical map with the protection requested. If specified the page
|
||||
* will be wired down.
|
||||
*/
|
||||
static void
|
||||
static int
|
||||
mmu_booke_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
|
||||
vm_prot_t prot, boolean_t wired)
|
||||
vm_prot_t prot, u_int flags, int8_t psind)
|
||||
{
|
||||
int error;
|
||||
|
||||
rw_wlock(&pvh_global_lock);
|
||||
PMAP_LOCK(pmap);
|
||||
mmu_booke_enter_locked(mmu, pmap, va, m, prot, wired);
|
||||
error = mmu_booke_enter_locked(mmu, pmap, va, m, prot, flags, psind);
|
||||
rw_wunlock(&pvh_global_lock);
|
||||
PMAP_UNLOCK(pmap);
|
||||
return (error);
|
||||
}
|
||||
|
||||
static void
|
||||
static int
|
||||
mmu_booke_enter_locked(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
|
||||
vm_prot_t prot, boolean_t wired)
|
||||
vm_prot_t prot, u_int pmap_flags, int8_t psind __unused)
|
||||
{
|
||||
pte_t *pte;
|
||||
vm_paddr_t pa;
|
||||
uint32_t flags;
|
||||
int su, sync;
|
||||
int error, su, sync;
|
||||
|
||||
pa = VM_PAGE_TO_PHYS(m);
|
||||
su = (pmap == kernel_pmap);
|
||||
sync = 0;
|
||||
|
||||
//debugf("mmu_booke_enter_locked: s (pmap=0x%08x su=%d tid=%d m=0x%08x va=0x%08x "
|
||||
// "pa=0x%08x prot=0x%08x wired=%d)\n",
|
||||
// "pa=0x%08x prot=0x%08x flags=%#x)\n",
|
||||
// (u_int32_t)pmap, su, pmap->pm_tid,
|
||||
// (u_int32_t)m, va, pa, prot, wired);
|
||||
// (u_int32_t)m, va, pa, prot, flags);
|
||||
|
||||
if (su) {
|
||||
KASSERT(((va >= virtual_avail) &&
|
||||
@ -1634,7 +1648,7 @@ mmu_booke_enter_locked(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
|
||||
flags &= ~(PTE_UW | PTE_UX | PTE_SW | PTE_SX | PTE_MODIFIED);
|
||||
|
||||
/* Wiring change, just update stats. */
|
||||
if (wired) {
|
||||
if ((pmap_flags & PMAP_ENTER_WIRED) != 0) {
|
||||
if (!PTE_ISWIRED(pte)) {
|
||||
flags |= PTE_WIRED;
|
||||
pmap->pm_stats.wired_count++;
|
||||
@ -1730,12 +1744,16 @@ mmu_booke_enter_locked(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
|
||||
}
|
||||
|
||||
/* If its wired update stats. */
|
||||
if (wired) {
|
||||
pmap->pm_stats.wired_count++;
|
||||
if ((pmap_flags & PMAP_ENTER_WIRED) != 0)
|
||||
flags |= PTE_WIRED;
|
||||
}
|
||||
|
||||
pte_enter(mmu, pmap, m, va, flags);
|
||||
error = pte_enter(mmu, pmap, m, va, flags,
|
||||
(pmap_flags & PMAP_ENTER_NOSLEEP) != 0);
|
||||
if (error != 0)
|
||||
return (KERN_RESOURCE_SHORTAGE);
|
||||
|
||||
if ((flags & PMAP_ENTER_WIRED) != 0)
|
||||
pmap->pm_stats.wired_count++;
|
||||
|
||||
/* Flush the real memory from the instruction cache. */
|
||||
if (prot & VM_PROT_EXECUTE)
|
||||
@ -1746,6 +1764,8 @@ mmu_booke_enter_locked(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
|
||||
__syncicache((void *)va, PAGE_SIZE);
|
||||
sync = 0;
|
||||
}
|
||||
|
||||
return (KERN_SUCCESS);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1775,7 +1795,8 @@ mmu_booke_enter_object(mmu_t mmu, pmap_t pmap, vm_offset_t start,
|
||||
PMAP_LOCK(pmap);
|
||||
while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
|
||||
mmu_booke_enter_locked(mmu, pmap, start + ptoa(diff), m,
|
||||
prot & (VM_PROT_READ | VM_PROT_EXECUTE), FALSE);
|
||||
prot & (VM_PROT_READ | VM_PROT_EXECUTE),
|
||||
PMAP_ENTER_NOSLEEP, 0);
|
||||
m = TAILQ_NEXT(m, listq);
|
||||
}
|
||||
rw_wunlock(&pvh_global_lock);
|
||||
@ -1790,7 +1811,8 @@ mmu_booke_enter_quick(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
|
||||
rw_wlock(&pvh_global_lock);
|
||||
PMAP_LOCK(pmap);
|
||||
mmu_booke_enter_locked(mmu, pmap, va, m,
|
||||
prot & (VM_PROT_READ | VM_PROT_EXECUTE), FALSE);
|
||||
prot & (VM_PROT_READ | VM_PROT_EXECUTE), PMAP_ENTER_NOSLEEP,
|
||||
0);
|
||||
rw_wunlock(&pvh_global_lock);
|
||||
PMAP_UNLOCK(pmap);
|
||||
}
|
||||
@ -2074,7 +2096,7 @@ mmu_booke_sync_icache(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_size_t sz)
|
||||
m = PHYS_TO_VM_PAGE(pa);
|
||||
PMAP_LOCK(pmap);
|
||||
pte_enter(mmu, pmap, m, addr,
|
||||
PTE_SR | PTE_VALID | PTE_UR);
|
||||
PTE_SR | PTE_VALID | PTE_UR, FALSE);
|
||||
__syncicache((void *)addr, PAGE_SIZE);
|
||||
pte_remove(mmu, pmap, addr, PTBL_UNHOLD);
|
||||
PMAP_UNLOCK(pmap);
|
||||
|
@ -224,15 +224,17 @@ METHOD void copy_pages {
|
||||
* @param _va mapping virtual address
|
||||
* @param _p mapping physical page
|
||||
* @param _prot mapping page protection
|
||||
* @param _wired TRUE if page will be wired
|
||||
* @param _flags pmap_enter flags
|
||||
* @param _psind superpage size index
|
||||
*/
|
||||
METHOD void enter {
|
||||
METHOD int enter {
|
||||
mmu_t _mmu;
|
||||
pmap_t _pmap;
|
||||
vm_offset_t _va;
|
||||
vm_page_t _p;
|
||||
vm_prot_t _prot;
|
||||
boolean_t _wired;
|
||||
u_int _flags;
|
||||
int8_t _psind;
|
||||
};
|
||||
|
||||
|
||||
|
@ -135,14 +135,14 @@ pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[],
|
||||
MMU_COPY_PAGES(mmu_obj, ma, a_offset, mb, b_offset, xfersize);
|
||||
}
|
||||
|
||||
void
|
||||
pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t p,
|
||||
vm_prot_t prot, boolean_t wired)
|
||||
int
|
||||
pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t p, vm_prot_t prot,
|
||||
u_int flags, int8_t psind)
|
||||
{
|
||||
|
||||
CTR6(KTR_PMAP, "pmap_enter(%p, %#x, %#x, %p, %#x, %u)", pmap, va,
|
||||
access, p, prot, wired);
|
||||
MMU_ENTER(mmu_obj, pmap, va, p, prot, wired);
|
||||
CTR6(KTR_PMAP, "pmap_enter(%p, %#x, %p, %#x, %x, %d)", pmap, va,
|
||||
p, prot, flags, psind);
|
||||
return (MMU_ENTER(mmu_obj, pmap, va, p, prot, flags, psind));
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -151,8 +151,8 @@ static int pmap_unwire_tte(pmap_t pm, pmap_t pm2, struct tte *tp,
|
||||
*
|
||||
* The page queues and pmap must be locked.
|
||||
*/
|
||||
static void pmap_enter_locked(pmap_t pm, vm_offset_t va, vm_page_t m,
|
||||
vm_prot_t prot, boolean_t wired);
|
||||
static int pmap_enter_locked(pmap_t pm, vm_offset_t va, vm_page_t m,
|
||||
vm_prot_t prot, u_int flags, int8_t psind);
|
||||
|
||||
extern int tl1_dmmu_miss_direct_patch_tsb_phys_1[];
|
||||
extern int tl1_dmmu_miss_direct_patch_tsb_phys_end_1[];
|
||||
@ -1461,16 +1461,18 @@ pmap_protect(pmap_t pm, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
|
||||
* target pmap with the protection requested. If specified the page
|
||||
* will be wired down.
|
||||
*/
|
||||
void
|
||||
pmap_enter(pmap_t pm, vm_offset_t va, vm_prot_t access, vm_page_t m,
|
||||
vm_prot_t prot, boolean_t wired)
|
||||
int
|
||||
pmap_enter(pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot,
|
||||
u_int flags, int8_t psind)
|
||||
{
|
||||
int rv;
|
||||
|
||||
rw_wlock(&tte_list_global_lock);
|
||||
PMAP_LOCK(pm);
|
||||
pmap_enter_locked(pm, va, m, prot, wired);
|
||||
rv = pmap_enter_locked(pm, va, m, prot, flags, psind);
|
||||
rw_wunlock(&tte_list_global_lock);
|
||||
PMAP_UNLOCK(pm);
|
||||
return (rv);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1480,14 +1482,15 @@ pmap_enter(pmap_t pm, vm_offset_t va, vm_prot_t access, vm_page_t m,
|
||||
*
|
||||
* The page queues and pmap must be locked.
|
||||
*/
|
||||
static void
|
||||
static int
|
||||
pmap_enter_locked(pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot,
|
||||
boolean_t wired)
|
||||
u_int flags, int8_t psind __unused)
|
||||
{
|
||||
struct tte *tp;
|
||||
vm_paddr_t pa;
|
||||
vm_page_t real;
|
||||
u_long data;
|
||||
boolean_t wired;
|
||||
|
||||
rw_assert(&tte_list_global_lock, RA_WLOCKED);
|
||||
PMAP_LOCK_ASSERT(pm, MA_OWNED);
|
||||
@ -1495,6 +1498,7 @@ pmap_enter_locked(pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot,
|
||||
VM_OBJECT_ASSERT_LOCKED(m->object);
|
||||
PMAP_STATS_INC(pmap_nenter);
|
||||
pa = VM_PAGE_TO_PHYS(m);
|
||||
wired = (flags & PMAP_ENTER_WIRED) != 0;
|
||||
|
||||
/*
|
||||
* If this is a fake page from the device_pager, but it covers actual
|
||||
@ -1608,6 +1612,8 @@ pmap_enter_locked(pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot,
|
||||
|
||||
tsb_tte_enter(pm, m, va, TS_8K, data);
|
||||
}
|
||||
|
||||
return (KERN_SUCCESS);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1637,7 +1643,7 @@ pmap_enter_object(pmap_t pm, vm_offset_t start, vm_offset_t end,
|
||||
PMAP_LOCK(pm);
|
||||
while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
|
||||
pmap_enter_locked(pm, start + ptoa(diff), m, prot &
|
||||
(VM_PROT_READ | VM_PROT_EXECUTE), FALSE);
|
||||
(VM_PROT_READ | VM_PROT_EXECUTE), 0, 0);
|
||||
m = TAILQ_NEXT(m, listq);
|
||||
}
|
||||
rw_wunlock(&tte_list_global_lock);
|
||||
@ -1651,7 +1657,7 @@ pmap_enter_quick(pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot)
|
||||
rw_wlock(&tte_list_global_lock);
|
||||
PMAP_LOCK(pm);
|
||||
pmap_enter_locked(pm, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE),
|
||||
FALSE);
|
||||
0, 0);
|
||||
rw_wunlock(&tte_list_global_lock);
|
||||
PMAP_UNLOCK(pm);
|
||||
}
|
||||
|
@ -97,6 +97,13 @@ struct thread;
|
||||
*/
|
||||
extern vm_offset_t kernel_vm_end;
|
||||
|
||||
/*
|
||||
* Flags for pmap_enter(). The bits in the low-order byte are reserved
|
||||
* for the protection code (vm_prot_t) that describes the fault type.
|
||||
*/
|
||||
#define PMAP_ENTER_NOSLEEP 0x0100
|
||||
#define PMAP_ENTER_WIRED 0x0200
|
||||
|
||||
void pmap_activate(struct thread *td);
|
||||
void pmap_advise(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
|
||||
int advice);
|
||||
@ -107,8 +114,8 @@ void pmap_copy(pmap_t, pmap_t, vm_offset_t, vm_size_t, vm_offset_t);
|
||||
void pmap_copy_page(vm_page_t, vm_page_t);
|
||||
void pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset,
|
||||
vm_page_t mb[], vm_offset_t b_offset, int xfersize);
|
||||
void pmap_enter(pmap_t, vm_offset_t, vm_prot_t, vm_page_t,
|
||||
vm_prot_t, boolean_t);
|
||||
int pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m,
|
||||
vm_prot_t prot, u_int flags, int8_t psind);
|
||||
void pmap_enter_object(pmap_t pmap, vm_offset_t start,
|
||||
vm_offset_t end, vm_page_t m_start, vm_prot_t prot);
|
||||
void pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m,
|
||||
|
@ -902,7 +902,8 @@ RetryFault:;
|
||||
* back on the active queue until later so that the pageout daemon
|
||||
* won't find it (yet).
|
||||
*/
|
||||
pmap_enter(fs.map->pmap, vaddr, fault_type, fs.m, prot, wired);
|
||||
pmap_enter(fs.map->pmap, vaddr, fs.m, prot,
|
||||
fault_type | (wired ? PMAP_ENTER_WIRED : 0), 0);
|
||||
if (faultcount != 1 && (fault_flags & VM_FAULT_CHANGE_WIRING) == 0 &&
|
||||
wired == 0)
|
||||
vm_fault_prefault(&fs, vaddr, faultcount, reqpage);
|
||||
@ -1318,7 +1319,8 @@ vm_fault_copy_entry(vm_map_t dst_map, vm_map_t src_map,
|
||||
* mapping is being replaced by a write-enabled
|
||||
* mapping, then wire that new mapping.
|
||||
*/
|
||||
pmap_enter(dst_map->pmap, vaddr, access, dst_m, prot, upgrade);
|
||||
pmap_enter(dst_map->pmap, vaddr, dst_m, prot,
|
||||
access | (upgrade ? PMAP_ENTER_WIRED : 0), 0);
|
||||
|
||||
/*
|
||||
* Mark it no longer busy, and put it on the active list.
|
||||
|
@ -202,8 +202,8 @@ kmem_alloc_attr(vmem_t *vmem, vm_size_t size, int flags, vm_paddr_t low,
|
||||
if ((flags & M_ZERO) && (m->flags & PG_ZERO) == 0)
|
||||
pmap_zero_page(m);
|
||||
m->valid = VM_PAGE_BITS_ALL;
|
||||
pmap_enter(kernel_pmap, addr + i, VM_PROT_ALL, m, VM_PROT_ALL,
|
||||
TRUE);
|
||||
pmap_enter(kernel_pmap, addr + i, m, VM_PROT_ALL,
|
||||
VM_PROT_ALL | PMAP_ENTER_WIRED, 0);
|
||||
}
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
return (addr);
|
||||
@ -255,7 +255,8 @@ kmem_alloc_contig(struct vmem *vmem, vm_size_t size, int flags, vm_paddr_t low,
|
||||
if ((flags & M_ZERO) && (m->flags & PG_ZERO) == 0)
|
||||
pmap_zero_page(m);
|
||||
m->valid = VM_PAGE_BITS_ALL;
|
||||
pmap_enter(kernel_pmap, tmp, VM_PROT_ALL, m, VM_PROT_ALL, true);
|
||||
pmap_enter(kernel_pmap, tmp, m, VM_PROT_ALL,
|
||||
VM_PROT_ALL | PMAP_ENTER_WIRED, 0);
|
||||
tmp += PAGE_SIZE;
|
||||
}
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
@ -378,8 +379,8 @@ kmem_back(vm_object_t object, vm_offset_t addr, vm_size_t size, int flags)
|
||||
KASSERT((m->oflags & VPO_UNMANAGED) != 0,
|
||||
("kmem_malloc: page %p is managed", m));
|
||||
m->valid = VM_PAGE_BITS_ALL;
|
||||
pmap_enter(kernel_pmap, addr + i, VM_PROT_ALL, m, VM_PROT_ALL,
|
||||
TRUE);
|
||||
pmap_enter(kernel_pmap, addr + i, m, VM_PROT_ALL,
|
||||
VM_PROT_ALL | PMAP_ENTER_WIRED, 0);
|
||||
}
|
||||
VM_OBJECT_WUNLOCK(object);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user