From a623fedef78c184e77b8bee643151ea0bfcd7b23 Mon Sep 17 00:00:00 2001 From: Alan Cox Date: Sat, 28 Dec 2002 19:03:54 +0000 Subject: [PATCH] Two changes to kmem_malloc(): - Use VM_ALLOC_WIRED. - Perform vm_page_wakeup() after pmap_enter(), like we do everywhere else. --- sys/vm/vm_kern.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/sys/vm/vm_kern.c b/sys/vm/vm_kern.c index ac32d690e8de..82ed9d0e7aa3 100644 --- a/sys/vm/vm_kern.c +++ b/sys/vm/vm_kern.c @@ -354,9 +354,9 @@ kmem_malloc(map, size, flags) */ if ((flags & (M_NOWAIT|M_USE_RESERVE)) == M_NOWAIT) - pflags = VM_ALLOC_INTERRUPT; + pflags = VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED; else - pflags = VM_ALLOC_SYSTEM; + pflags = VM_ALLOC_SYSTEM | VM_ALLOC_WIRED; if (flags & M_ZERO) pflags |= VM_ALLOC_ZERO; @@ -391,6 +391,7 @@ retry: m = vm_page_lookup(kmem_object, OFF_TO_IDX(offset + i)); vm_page_lock_queues(); + vm_page_unwire(m, 0); vm_page_free(m); vm_page_unlock_queues(); } @@ -431,16 +432,13 @@ retry: vm_object_lock(kmem_object); m = vm_page_lookup(kmem_object, OFF_TO_IDX(offset + i)); vm_object_unlock(kmem_object); - vm_page_lock_queues(); - vm_page_wire(m); - vm_page_wakeup(m); - vm_page_unlock_queues(); /* * Because this is kernel_pmap, this call will not block. */ pmap_enter(kernel_pmap, addr + i, m, VM_PROT_ALL, 1); vm_page_lock_queues(); vm_page_flag_set(m, PG_WRITEABLE | PG_REFERENCED); + vm_page_wakeup(m); vm_page_unlock_queues(); } vm_map_unlock(map);