Make several changes to pmap_enter_quick_locked():

1. Make the caller responsible for performing pmap_install().  This reduces
the number of times that pmap_install() is performed by
pmap_enter_object() from twice per page to twice overall.

2. Don't block if pmap_find_pte() is unable to allocate a PTE.  If it did
block, then it might wind up mapping a cache page.  Specifically, if
pmap_enter_quick_locked() slept when called from pmap_enter_object(), the
page daemon could change an active or inactive page into a cache page just
before it was to be mapped.

3. Bail out of pmap_enter_quick_locked() if pv entries aren't plentiful.
In other words, don't force the allocation of a pv entry if they aren't
readily available.

Reviewed by: marcel@
This commit is contained in:
Alan Cox 2006-06-27 05:05:05 +00:00
parent 8e0e1e2239
commit 9b123ca12a

View File

@ -242,6 +242,8 @@ static pmap_t pmap_install(pmap_t);
static void pmap_invalidate_all(pmap_t pmap);
static int pmap_remove_pte(pmap_t pmap, struct ia64_lpte *pte,
vm_offset_t va, pv_entry_t pv, int freepte);
static boolean_t pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va,
vm_page_t m);
vm_offset_t
pmap_steal_memory(vm_size_t size)
@ -862,6 +864,29 @@ retry:
return (allocated_pv);
}
/*
* Conditionally create a pv entry.
*/
static boolean_t
pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m)
{
pv_entry_t pv;
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
if (pv_entry_count < pv_entry_high_water &&
(pv = uma_zalloc(pvzone, M_NOWAIT)) != NULL) {
pv_entry_count++;
pv->pv_va = va;
pv->pv_pmap = pmap;
TAILQ_INSERT_TAIL(&pmap->pm_pvlist, pv, pv_plist);
TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
m->md.pv_list_count++;
return (TRUE);
} else
return (FALSE);
}
/*
* Add an ia64_lpte to the VHPT.
*/
@ -1643,16 +1668,20 @@ void
pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
vm_page_t m_start, vm_prot_t prot)
{
pmap_t oldpmap;
vm_page_t m;
vm_pindex_t diff, psize;
VM_OBJECT_LOCK_ASSERT(m_start->object, MA_OWNED);
psize = atop(end - start);
m = m_start;
PMAP_LOCK(pmap);
oldpmap = pmap_install(pmap);
while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
pmap_enter_quick_locked(pmap, start + ptoa(diff), m, prot);
m = TAILQ_NEXT(m, listq);
}
pmap_install(oldpmap);
PMAP_UNLOCK(pmap);
}
@ -1668,9 +1697,12 @@ pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
void
pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
{
pmap_t oldpmap;
PMAP_LOCK(pmap);
oldpmap = pmap_install(pmap);
pmap_enter_quick_locked(pmap, va, m, prot);
pmap_install(oldpmap);
PMAP_UNLOCK(pmap);
}
@ -1679,35 +1711,24 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
vm_prot_t prot)
{
struct ia64_lpte *pte;
pmap_t oldpmap;
boolean_t managed;
KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva ||
(m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0,
("pmap_enter_quick_locked: managed mapping within the clean submap"));
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
oldpmap = pmap_install(pmap);
while ((pte = pmap_find_pte(va)) == NULL) {
pmap_install(oldpmap);
PMAP_UNLOCK(pmap);
vm_page_busy(m);
vm_page_unlock_queues();
VM_OBJECT_UNLOCK(m->object);
VM_WAIT;
VM_OBJECT_LOCK(m->object);
vm_page_lock_queues();
vm_page_wakeup(m);
PMAP_LOCK(pmap);
oldpmap = pmap_install(pmap);
}
if ((pte = pmap_find_pte(va)) == NULL)
return;
if (!pmap_present(pte)) {
/* Enter on the PV list if its managed. */
/* Enter on the PV list if the page is managed. */
if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0) {
pmap_insert_entry(pmap, va, m);
if (!pmap_try_insert_pv_entry(pmap, va, m)) {
pmap_free_pte(pte, va);
return;
}
managed = TRUE;
} else
managed = FALSE;
@ -1721,8 +1742,6 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
prot & (VM_PROT_READ | VM_PROT_EXECUTE));
pmap_set_pte(pte, va, VM_PAGE_TO_PHYS(m), FALSE, managed);
}
pmap_install(oldpmap);
}
/*