Change {_,}pmap_allocpte() so that they look for the flag PMAP_ENTER_NOSLEEP

instead of M_NOWAIT/M_WAITOK when deciding whether to sleep on page table
page allocation.  (The same functions in the i386/xen and mips pmap
implementations already use PMAP_ENTER_NOSLEEP.)

X-MFC with:	r269728
Sponsored by:	EMC / Isilon Storage Division
This commit is contained in:
Alan Cox 2014-08-11 17:45:41 +00:00
parent 79785bbf3b
commit 827a661da0

View File

@ -331,9 +331,9 @@ static void pmap_update_pde(pmap_t pmap, vm_offset_t va, pd_entry_t *pde,
pd_entry_t newpde);
static void pmap_update_pde_invalidate(vm_offset_t va, pd_entry_t newpde);
static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags);
static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va, u_int flags);
static vm_page_t _pmap_allocpte(pmap_t pmap, u_int ptepindex, int flags);
static vm_page_t _pmap_allocpte(pmap_t pmap, u_int ptepindex, u_int flags);
static void _pmap_unwire_ptp(pmap_t pmap, vm_page_t m, struct spglist *free);
static pt_entry_t *pmap_pte_quick(pmap_t pmap, vm_offset_t va);
static void pmap_pte_release(pt_entry_t *pte);
@ -1818,21 +1818,17 @@ pmap_pinit(pmap_t pmap)
* mapped correctly.
*/
static vm_page_t
_pmap_allocpte(pmap_t pmap, u_int ptepindex, int flags)
_pmap_allocpte(pmap_t pmap, u_int ptepindex, u_int flags)
{
vm_paddr_t ptepa;
vm_page_t m;
KASSERT((flags & (M_NOWAIT | M_WAITOK)) == M_NOWAIT ||
(flags & (M_NOWAIT | M_WAITOK)) == M_WAITOK,
("_pmap_allocpte: flags is neither M_NOWAIT nor M_WAITOK"));
/*
* Allocate a page table page.
*/
if ((m = vm_page_alloc(NULL, ptepindex, VM_ALLOC_NOOBJ |
VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) {
if (flags & M_WAITOK) {
if ((flags & PMAP_ENTER_NOSLEEP) == 0) {
PMAP_UNLOCK(pmap);
rw_wunlock(&pvh_global_lock);
VM_WAIT;
@ -1864,16 +1860,12 @@ _pmap_allocpte(pmap_t pmap, u_int ptepindex, int flags)
}
static vm_page_t
pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags)
pmap_allocpte(pmap_t pmap, vm_offset_t va, u_int flags)
{
u_int ptepindex;
pd_entry_t ptepa;
vm_page_t m;
KASSERT((flags & (M_NOWAIT | M_WAITOK)) == M_NOWAIT ||
(flags & (M_NOWAIT | M_WAITOK)) == M_WAITOK,
("pmap_allocpte: flags is neither M_NOWAIT nor M_WAITOK"));
/*
* Calculate pagetable page index
*/
@ -1906,7 +1898,7 @@ pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags)
* been deallocated.
*/
m = _pmap_allocpte(pmap, ptepindex, flags);
if (m == NULL && (flags & M_WAITOK))
if (m == NULL && (flags & PMAP_ENTER_NOSLEEP) == 0)
goto retry;
}
return (m);
@ -3468,12 +3460,11 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
pv_entry_t pv;
vm_paddr_t opa, pa;
vm_page_t mpte, om;
boolean_t invlva, nosleep, wired;
boolean_t invlva, wired;
va = trunc_page(va);
mpte = NULL;
wired = (flags & PMAP_ENTER_WIRED) != 0;
nosleep = (flags & PMAP_ENTER_NOSLEEP) != 0;
KASSERT(va <= VM_MAX_KERNEL_ADDRESS, ("pmap_enter: toobig"));
KASSERT(va < UPT_MIN_ADDRESS || va >= UPT_MAX_ADDRESS,
@ -3491,9 +3482,9 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
* resident, we are creating it here.
*/
if (va < VM_MAXUSER_ADDRESS) {
mpte = pmap_allocpte(pmap, va, nosleep ? M_NOWAIT : M_WAITOK);
mpte = pmap_allocpte(pmap, va, flags);
if (mpte == NULL) {
KASSERT(nosleep,
KASSERT((flags & PMAP_ENTER_NOSLEEP) != 0,
("pmap_allocpte failed with sleep allowed"));
sched_unpin();
rw_wunlock(&pvh_global_lock);
@ -3828,7 +3819,7 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
mpte->wire_count++;
} else {
mpte = _pmap_allocpte(pmap, ptepindex,
M_NOWAIT);
PMAP_ENTER_NOSLEEP);
if (mpte == NULL)
return (mpte);
}
@ -4154,7 +4145,7 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
*/
if ((ptetemp & PG_MANAGED) != 0) {
dstmpte = pmap_allocpte(dst_pmap, addr,
M_NOWAIT);
PMAP_ENTER_NOSLEEP);
if (dstmpte == NULL)
goto out;
dst_pte = pmap_pte_quick(dst_pmap, addr);