diff --git a/sys/arm/arm/pmap.c b/sys/arm/arm/pmap.c index 6a753f433f18..45adf5167a86 100644 --- a/sys/arm/arm/pmap.c +++ b/sys/arm/arm/pmap.c @@ -198,6 +198,8 @@ extern struct pv_addr systempage; static void pmap_free_pv_entry (pv_entry_t); static pv_entry_t pmap_get_pv_entry(void); +static void pmap_enter_locked(pmap_t, vm_offset_t, vm_page_t, + vm_prot_t, boolean_t); static void pmap_vac_me_harder(struct vm_page *, pmap_t, vm_offset_t); static void pmap_vac_me_kpmap(struct vm_page *, pmap_t, @@ -3337,6 +3339,19 @@ void pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, boolean_t wired) { + + vm_page_lock_queues(); + pmap_enter_locked(pmap, va, m, prot, wired); + vm_page_unlock_queues(); +} + +/* + * The page queues and pmap must be locked. + */ +static void +pmap_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, + boolean_t wired) +{ struct l2_bucket *l2b = NULL; struct vm_page *opg; struct pv_entry *pve = NULL; @@ -3345,7 +3360,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, u_int oflags; vm_paddr_t pa; - vm_page_lock_queues(); + mtx_assert(&vm_page_queue_mtx, MA_OWNED); if (va == vector_page) { pa = systempage.pv_pa; m = NULL; @@ -3561,7 +3576,6 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, if (m) pmap_vac_me_harder(m, pmap, va); } - vm_page_unlock_queues(); } /* @@ -3578,15 +3592,9 @@ pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, vm_page_t mpte) { - vm_page_busy(m); - vm_page_unlock_queues(); - VM_OBJECT_UNLOCK(m->object); - mtx_lock(&Giant); - pmap_enter(pmap, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE), FALSE); - mtx_unlock(&Giant); - VM_OBJECT_LOCK(m->object); - vm_page_lock_queues(); - vm_page_wakeup(m); + mtx_assert(&vm_page_queue_mtx, MA_OWNED); + pmap_enter_locked(pmap, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE), + FALSE); return (NULL); }