Modify pmap_enter_quick() so that it expects the page queues to be locked

on entry and it assumes the responsibility for releasing the page queues
lock if it must sleep.

Remove a bogus comment from pmap_enter_quick().

Using the first change, modify vm_map_pmap_enter() so that the page queues
lock is acquired and released once, rather than each time that a page
is mapped.
This commit is contained in:
Alan Cox 2004-12-23 20:16:11 +00:00
parent dfa7bc486b
commit 1f70d62298
11 changed files with 19 additions and 30 deletions

View File

@ -1799,7 +1799,6 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
* 2. Not wired.
* 3. Read access.
* 4. No page table pages.
* 5. Tlbflush is deferred to calling procedure.
* 6. Page IS managed.
* but is *MUCH* faster than pmap_enter...
*/
@ -1810,7 +1809,8 @@ pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_page_t mpte)
register pt_entry_t *pte;
int managed;
vm_page_lock_queues();
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
PMAP_LOCK(pmap);
/*
@ -1905,7 +1905,6 @@ pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_page_t mpte)
*pte = pmap_phys_to_pte(VM_PAGE_TO_PHYS(m)) | PG_V | PG_KRE | PG_URE | managed;
out:
alpha_pal_imb(); /* XXX overkill? */
vm_page_unlock_queues();
PMAP_UNLOCK(pmap);
return mpte;
}

View File

@ -2007,7 +2007,6 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
* 2. Not wired.
* 3. Read access.
* 4. No page table pages.
* 5. Tlbflush is deferred to calling procedure.
* 6. Page IS managed.
* but is *MUCH* faster than pmap_enter...
*/
@ -2018,7 +2017,8 @@ pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_page_t mpte)
pt_entry_t *pte;
vm_paddr_t pa;
vm_page_lock_queues();
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
PMAP_LOCK(pmap);
/*
@ -2110,7 +2110,6 @@ pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_page_t mpte)
else
pte_store(pte, pa | PG_V | PG_U | PG_MANAGED);
out:
vm_page_unlock_queues();
PMAP_UNLOCK(pmap);
return mpte;
}

View File

@ -3409,7 +3409,6 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
* 2. Not wired.
* 3. Read access.
* 4. No page table pages.
* 5. Tlbflush is deferred to calling procedure.
* 6. Page IS managed.
* but is *MUCH* faster than pmap_enter...
*/
@ -3418,7 +3417,6 @@ vm_page_t
pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_page_t mpte)
{
vm_page_lock_queues();
vm_page_busy(m);
vm_page_unlock_queues();
VM_OBJECT_UNLOCK(m->object);
@ -3429,7 +3427,6 @@ pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_page_t mpte)
VM_OBJECT_LOCK(m->object);
vm_page_lock_queues();
vm_page_wakeup(m);
vm_page_unlock_queues();
return (NULL);
}

View File

@ -2048,7 +2048,6 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
* 2. Not wired.
* 3. Read access.
* 4. No page table pages.
* 5. Tlbflush is deferred to calling procedure.
* 6. Page IS managed.
* but is *MUCH* faster than pmap_enter...
*/
@ -2059,7 +2058,8 @@ pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_page_t mpte)
pt_entry_t *pte;
vm_paddr_t pa;
vm_page_lock_queues();
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
PMAP_LOCK(pmap);
/*
@ -2151,7 +2151,6 @@ pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_page_t mpte)
else
pte_store(pte, pa | PG_V | PG_U | PG_MANAGED);
out:
vm_page_unlock_queues();
PMAP_UNLOCK(pmap);
return mpte;
}

View File

@ -1612,7 +1612,6 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
* 2. Not wired.
* 3. Read access.
* 4. No page table pages.
* 5. Tlbflush is deferred to calling procedure.
* 6. Page IS managed.
* but is *MUCH* faster than pmap_enter...
*/
@ -1624,7 +1623,8 @@ pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_page_t mpte)
pmap_t oldpmap;
boolean_t managed;
vm_page_lock_queues();
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
PMAP_LOCK(pmap);
oldpmap = pmap_install(pmap);
@ -1666,7 +1666,6 @@ pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_page_t mpte)
pmap_set_pte(pte, va, VM_PAGE_TO_PHYS(m), FALSE, managed);
reinstall:
vm_page_unlock_queues();
pmap_install(oldpmap);
PMAP_UNLOCK(pmap);
return (NULL);

View File

@ -1048,7 +1048,6 @@ vm_page_t
pmap_enter_quick(pmap_t pm, vm_offset_t va, vm_page_t m, vm_page_t mpte)
{
vm_page_lock_queues();
vm_page_busy(m);
vm_page_unlock_queues();
VM_OBJECT_UNLOCK(m->object);
@ -1058,7 +1057,6 @@ pmap_enter_quick(pmap_t pm, vm_offset_t va, vm_page_t m, vm_page_t mpte)
VM_OBJECT_LOCK(m->object);
vm_page_lock_queues();
vm_page_wakeup(m);
vm_page_unlock_queues();
return (NULL);
}

View File

@ -1048,7 +1048,6 @@ vm_page_t
pmap_enter_quick(pmap_t pm, vm_offset_t va, vm_page_t m, vm_page_t mpte)
{
vm_page_lock_queues();
vm_page_busy(m);
vm_page_unlock_queues();
VM_OBJECT_UNLOCK(m->object);
@ -1058,7 +1057,6 @@ pmap_enter_quick(pmap_t pm, vm_offset_t va, vm_page_t m, vm_page_t mpte)
VM_OBJECT_LOCK(m->object);
vm_page_lock_queues();
vm_page_wakeup(m);
vm_page_unlock_queues();
return (NULL);
}

View File

@ -1048,7 +1048,6 @@ vm_page_t
pmap_enter_quick(pmap_t pm, vm_offset_t va, vm_page_t m, vm_page_t mpte)
{
vm_page_lock_queues();
vm_page_busy(m);
vm_page_unlock_queues();
VM_OBJECT_UNLOCK(m->object);
@ -1058,7 +1057,6 @@ pmap_enter_quick(pmap_t pm, vm_offset_t va, vm_page_t m, vm_page_t mpte)
VM_OBJECT_LOCK(m->object);
vm_page_lock_queues();
vm_page_wakeup(m);
vm_page_unlock_queues();
return (NULL);
}

View File

@ -1401,7 +1401,6 @@ vm_page_t
pmap_enter_quick(pmap_t pm, vm_offset_t va, vm_page_t m, vm_page_t mpte)
{
vm_page_lock_queues();
vm_page_busy(m);
vm_page_unlock_queues();
VM_OBJECT_UNLOCK(m->object);
@ -1411,7 +1410,6 @@ pmap_enter_quick(pmap_t pm, vm_offset_t va, vm_page_t m, vm_page_t mpte)
VM_OBJECT_LOCK(m->object);
vm_page_lock_queues();
vm_page_wakeup(m);
vm_page_unlock_queues();
return (NULL);
}

View File

@ -977,12 +977,11 @@ vm_fault_prefault(pmap_t pmap, vm_offset_t addra, vm_map_entry_t entry)
(m->busy == 0) &&
(m->flags & (PG_BUSY | PG_FICTITIOUS)) == 0) {
if ((m->queue - m->pc) == PQ_CACHE) {
vm_page_lock_queues();
vm_page_lock_queues();
if ((m->queue - m->pc) == PQ_CACHE)
vm_page_deactivate(m);
vm_page_unlock_queues();
}
mpte = pmap_enter_quick(pmap, addr, m, mpte);
vm_page_unlock_queues();
}
VM_OBJECT_UNLOCK(lobject);
}

View File

@ -1369,6 +1369,7 @@ vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, vm_prot_t prot,
vm_offset_t tmpidx;
int psize;
vm_page_t p, mpte;
boolean_t are_queues_locked;
if ((prot & VM_PROT_READ) == 0 || object == NULL)
return;
@ -1392,6 +1393,7 @@ vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, vm_prot_t prot,
psize = object->size - pindex;
}
are_queues_locked = FALSE;
mpte = NULL;
if ((p = TAILQ_FIRST(&object->memq)) != NULL) {
@ -1420,15 +1422,18 @@ vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, vm_prot_t prot,
if ((p->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL &&
(p->busy == 0) &&
(p->flags & (PG_BUSY | PG_FICTITIOUS)) == 0) {
if ((p->queue - p->pc) == PQ_CACHE) {
if (!are_queues_locked) {
are_queues_locked = TRUE;
vm_page_lock_queues();
vm_page_deactivate(p);
vm_page_unlock_queues();
}
if ((p->queue - p->pc) == PQ_CACHE)
vm_page_deactivate(p);
mpte = pmap_enter_quick(map->pmap,
addr + ptoa(tmpidx), p, mpte);
}
}
if (are_queues_locked)
vm_page_unlock_queues();
unlock_return:
VM_OBJECT_UNLOCK(object);
}