Introduce pmap_enter_locked() and use it to reimplement pmap_enter_quick().

Tested by: cognet@
This commit is contained in:
Alan Cox 2006-06-01 01:31:07 +00:00
parent f3b90d48bb
commit d49e4d3f55
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=159127

View File

@ -198,6 +198,8 @@ extern struct pv_addr systempage;
static void pmap_free_pv_entry (pv_entry_t);
static pv_entry_t pmap_get_pv_entry(void);
static void pmap_enter_locked(pmap_t, vm_offset_t, vm_page_t,
vm_prot_t, boolean_t);
static void pmap_vac_me_harder(struct vm_page *, pmap_t,
vm_offset_t);
static void pmap_vac_me_kpmap(struct vm_page *, pmap_t,
@ -3337,6 +3339,19 @@ void
pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
boolean_t wired)
{
vm_page_lock_queues();
pmap_enter_locked(pmap, va, m, prot, wired);
vm_page_unlock_queues();
}
/*
* The page queues and pmap must be locked.
*/
static void
pmap_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
boolean_t wired)
{
struct l2_bucket *l2b = NULL;
struct vm_page *opg;
struct pv_entry *pve = NULL;
@ -3345,7 +3360,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
u_int oflags;
vm_paddr_t pa;
vm_page_lock_queues();
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
if (va == vector_page) {
pa = systempage.pv_pa;
m = NULL;
@ -3561,7 +3576,6 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
if (m)
pmap_vac_me_harder(m, pmap, va);
}
vm_page_unlock_queues();
}
/*
@ -3578,15 +3592,9 @@ pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
vm_page_t mpte)
{
vm_page_busy(m);
vm_page_unlock_queues();
VM_OBJECT_UNLOCK(m->object);
mtx_lock(&Giant);
pmap_enter(pmap, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE), FALSE);
mtx_unlock(&Giant);
VM_OBJECT_LOCK(m->object);
vm_page_lock_queues();
vm_page_wakeup(m);
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
pmap_enter_locked(pmap, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE),
FALSE);
return (NULL);
}