Introduce pmap_try_insert_pv_entry(), a function that conditionally creates

a pv entry if the number of entries is below the high water mark for pv
entries.

Use pmap_try_insert_pv_entry() in pmap_copy() instead of
pmap_insert_entry().  This avoids possible recursion on a pmap lock in
get_pv_entry().

Eliminate the explicit low-memory checks in pmap_copy().  The check that
the number of pv entries was below the high water mark was largely
ineffective because it was located in the outer loop rather than the
inner loop where pv entries were allocated.  Instead of checking, we
attempt the allocation and handle the failure.

Reviewed by: tegge
Reported by: kris
MFC after: 5 days
This commit is contained in:
alc 2006-04-02 05:45:05 +00:00
parent 08249d49bf
commit af01e3f809
2 changed files with 56 additions and 26 deletions

View File

@ -208,6 +208,8 @@ static void pmap_remove_page(pmap_t pmap, vm_offset_t va, pd_entry_t *pde);
static void pmap_remove_entry(struct pmap *pmap, vm_page_t m,
vm_offset_t va);
static void pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t m);
static boolean_t pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va,
vm_page_t m);
static vm_page_t pmap_allocpde(pmap_t pmap, vm_offset_t va, int flags);
static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags);
@ -1583,6 +1585,29 @@ pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t m)
m->md.pv_list_count++;
}
/*
* Conditionally create a pv entry.
*/
static boolean_t
pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m)
{
pv_entry_t pv;
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
if (pv_entry_count < pv_entry_high_water &&
(pv = uma_zalloc(pvzone, M_NOWAIT)) != NULL) {
pv_entry_count++;
pv->pv_va = va;
pv->pv_pmap = pmap;
TAILQ_INSERT_TAIL(&pmap->pm_pvlist, pv, pv_plist);
TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
m->md.pv_list_count++;
return (TRUE);
} else
return (FALSE);
}
/*
* pmap_remove_pte: do the things to unmap a page in a process
*/
@ -2370,7 +2395,6 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
vm_offset_t addr;
vm_offset_t end_addr = src_addr + len;
vm_offset_t va_next;
vm_page_t m;
if (dst_addr != src_addr)
return;
@ -2396,15 +2420,6 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
if (addr >= UPT_MIN_ADDRESS)
panic("pmap_copy: invalid to pmap_copy page tables");
/*
* Don't let optional prefaulting of pages make us go
* way below the low water mark of free pages or way
* above high water mark of used pv entries.
*/
if (cnt.v_free_count < cnt.v_free_reserved ||
pv_entry_count > pv_entry_high_water)
break;
pml4e = pmap_pml4e(src_pmap, addr);
if ((*pml4e & PG_V) == 0) {
va_next = (addr + NBPML4) & ~PML4MASK;
@ -2467,16 +2482,16 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
dst_pte = (pt_entry_t *)
PHYS_TO_DMAP(VM_PAGE_TO_PHYS(dstmpte));
dst_pte = &dst_pte[pmap_pte_index(addr)];
if (*dst_pte == 0) {
if (*dst_pte == 0 &&
pmap_try_insert_pv_entry(dst_pmap, addr,
PHYS_TO_VM_PAGE(ptetemp & PG_FRAME))) {
/*
* Clear the modified and
* accessed (referenced) bits
* during the copy.
*/
m = PHYS_TO_VM_PAGE(ptetemp & PG_FRAME);
*dst_pte = ptetemp & ~(PG_M | PG_A);
dst_pmap->pm_stats.resident_count++;
pmap_insert_entry(dst_pmap, addr, m);
} else
pmap_unwire_pte_hold(dst_pmap, addr, dstmpte);
if (dstmpte->wire_count >= srcmpte->wire_count)

View File

@ -263,6 +263,8 @@ static void pmap_remove_page(struct pmap *pmap, vm_offset_t va);
static void pmap_remove_entry(struct pmap *pmap, vm_page_t m,
vm_offset_t va);
static void pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t m);
static boolean_t pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va,
vm_page_t m);
static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags);
@ -1586,6 +1588,29 @@ pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t m)
m->md.pv_list_count++;
}
/*
* Conditionally create a pv entry.
*/
static boolean_t
pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m)
{
pv_entry_t pv;
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
if (pv_entry_count < pv_entry_high_water &&
(pv = uma_zalloc(pvzone, M_NOWAIT)) != NULL) {
pv_entry_count++;
pv->pv_va = va;
pv->pv_pmap = pmap;
TAILQ_INSERT_TAIL(&pmap->pm_pvlist, pv, pv_plist);
TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
m->md.pv_list_count++;
return (TRUE);
} else
return (FALSE);
}
/*
* pmap_remove_pte: do the things to unmap a page in a process
*/
@ -2341,7 +2366,6 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
vm_offset_t addr;
vm_offset_t end_addr = src_addr + len;
vm_offset_t pdnxt;
vm_page_t m;
if (dst_addr != src_addr)
return;
@ -2367,15 +2391,6 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
if (addr >= UPT_MIN_ADDRESS)
panic("pmap_copy: invalid to pmap_copy page tables");
/*
* Don't let optional prefaulting of pages make us go
* way below the low water mark of free pages or way
* above high water mark of used pv entries.
*/
if (cnt.v_free_count < cnt.v_free_reserved ||
pv_entry_count > pv_entry_high_water)
break;
pdnxt = (addr + NBPDR) & ~PDRMASK;
ptepindex = addr >> PDRSHIFT;
@ -2417,16 +2432,16 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
if (dstmpte == NULL)
break;
dst_pte = pmap_pte_quick(dst_pmap, addr);
if (*dst_pte == 0) {
if (*dst_pte == 0 &&
pmap_try_insert_pv_entry(dst_pmap, addr,
PHYS_TO_VM_PAGE(ptetemp & PG_FRAME))) {
/*
* Clear the modified and
* accessed (referenced) bits
* during the copy.
*/
m = PHYS_TO_VM_PAGE(ptetemp);
*dst_pte = ptetemp & ~(PG_M | PG_A);
dst_pmap->pm_stats.resident_count++;
pmap_insert_entry(dst_pmap, addr, m);
} else
pmap_unwire_pte_hold(dst_pmap, dstmpte);
if (dstmpte->wire_count >= srcmpte->wire_count)