Change the free page queue lock from a spin mutex to a default (blocking)

mutex.  With the demise of Alpha support, there is no longer a reason for
it to be a spin mutex.
This commit is contained in:
Alan Cox 2007-02-05 06:02:55 +00:00
parent 64e740a352
commit 3ae3919d0b
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=166508
4 changed files with 22 additions and 22 deletions

View File

@ -198,7 +198,7 @@ contigmalloc1(
for (pass = 2; pass >= 0; pass--) {
vm_page_lock_queues();
again0:
mtx_lock_spin(&vm_page_queue_free_mtx);
mtx_lock(&vm_page_queue_free_mtx);
again:
/*
* Find first page in array that is free, within range,
@ -219,7 +219,7 @@ contigmalloc1(
*/
if ((i == cnt.v_page_count) ||
((VM_PAGE_TO_PHYS(&pga[i]) + size) > high)) {
mtx_unlock_spin(&vm_page_queue_free_mtx);
mtx_unlock(&vm_page_queue_free_mtx);
/*
* Instead of racing to empty the inactive/active
* queues, give up, even with more left to free,
@ -260,7 +260,7 @@ contigmalloc1(
goto again;
}
}
mtx_unlock_spin(&vm_page_queue_free_mtx);
mtx_unlock(&vm_page_queue_free_mtx);
for (i = start; i < (start + size / PAGE_SIZE); i++) {
vm_page_t m = &pga[i];
@ -283,7 +283,7 @@ contigmalloc1(
VM_OBJECT_UNLOCK(object);
}
}
mtx_lock_spin(&vm_page_queue_free_mtx);
mtx_lock(&vm_page_queue_free_mtx);
for (i = start; i < (start + size / PAGE_SIZE); i++) {
pqtype = pga[i].queue - pga[i].pc;
if (pqtype != PQ_FREE) {
@ -304,7 +304,7 @@ contigmalloc1(
m->wire_count = 0;
m->busy = 0;
}
mtx_unlock_spin(&vm_page_queue_free_mtx);
mtx_unlock(&vm_page_queue_free_mtx);
vm_page_unlock_queues();
/*
* We've found a contiguous chunk that meets are requirements.
@ -368,12 +368,12 @@ vm_contig_unqueue_free(vm_page_t m)
{
int error = 0;
mtx_lock_spin(&vm_page_queue_free_mtx);
mtx_lock(&vm_page_queue_free_mtx);
if ((m->queue - m->pc) == PQ_FREE)
vm_pageq_remove_nowakeup(m);
else
error = EAGAIN;
mtx_unlock_spin(&vm_page_queue_free_mtx);
mtx_unlock(&vm_page_queue_free_mtx);
if (error)
return (error);
m->valid = VM_PAGE_BITS_ALL;

View File

@ -254,7 +254,7 @@ vm_page_startup(vm_offset_t vaddr)
mtx_init(&vm_page_queue_mtx, "vm page queue mutex", NULL, MTX_DEF |
MTX_RECURSE);
mtx_init(&vm_page_queue_free_mtx, "vm page queue free mutex", NULL,
MTX_SPIN);
MTX_DEF);
/*
* Initialize the queue headers for the free queue, the active queue
@ -869,7 +869,7 @@ vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int req)
};
loop:
mtx_lock_spin(&vm_page_queue_free_mtx);
mtx_lock(&vm_page_queue_free_mtx);
if (cnt.v_free_count > cnt.v_free_reserved ||
(page_req == VM_ALLOC_SYSTEM &&
cnt.v_cache_count == 0 &&
@ -881,7 +881,7 @@ vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int req)
*/
m = vm_pageq_find(PQ_FREE, color, (req & VM_ALLOC_ZERO) != 0);
} else if (page_req != VM_ALLOC_INTERRUPT) {
mtx_unlock_spin(&vm_page_queue_free_mtx);
mtx_unlock(&vm_page_queue_free_mtx);
/*
* Allocatable from cache (non-interrupt only). On success,
* we must free the page and try again, thus ensuring that
@ -899,9 +899,9 @@ vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int req)
if (page_req != VM_ALLOC_SYSTEM)
return (NULL);
mtx_lock_spin(&vm_page_queue_free_mtx);
mtx_lock(&vm_page_queue_free_mtx);
if (cnt.v_free_count <= cnt.v_interrupt_free_min) {
mtx_unlock_spin(&vm_page_queue_free_mtx);
mtx_unlock(&vm_page_queue_free_mtx);
return (NULL);
}
m = vm_pageq_find(PQ_FREE, color, (req & VM_ALLOC_ZERO) != 0);
@ -913,7 +913,7 @@ vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int req)
/*
* Not allocatable from cache from interrupt, give up.
*/
mtx_unlock_spin(&vm_page_queue_free_mtx);
mtx_unlock(&vm_page_queue_free_mtx);
atomic_add_int(&vm_pageout_deficit, 1);
pagedaemon_wakeup();
return (NULL);
@ -957,7 +957,7 @@ vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int req)
m->busy = 0;
m->valid = 0;
KASSERT(m->dirty == 0, ("vm_page_alloc: free/cache page %p was dirty", m));
mtx_unlock_spin(&vm_page_queue_free_mtx);
mtx_unlock(&vm_page_queue_free_mtx);
if ((req & VM_ALLOC_NOOBJ) == 0)
vm_page_insert(m, object, pindex);
@ -1151,7 +1151,7 @@ vm_page_free_toq(vm_page_t m)
} else
VM_PAGE_SETQUEUE1(m, PQ_FREE);
pq = &vm_page_queues[VM_PAGE_GETQUEUE(m)];
mtx_lock_spin(&vm_page_queue_free_mtx);
mtx_lock(&vm_page_queue_free_mtx);
pq->lcnt++;
++(*pq->cnt);
@ -1165,7 +1165,7 @@ vm_page_free_toq(vm_page_t m)
} else {
TAILQ_INSERT_HEAD(&pq->pl, m, pageq);
}
mtx_unlock_spin(&vm_page_queue_free_mtx);
mtx_unlock(&vm_page_queue_free_mtx);
vm_page_free_wakeup();
}

View File

@ -198,9 +198,9 @@ vm_pageq_add_new_page(vm_paddr_t pa)
m->flags = 0;
m->pc = (pa >> PAGE_SHIFT) & PQ_COLORMASK;
pmap_page_init(m);
mtx_lock_spin(&vm_page_queue_free_mtx);
mtx_lock(&vm_page_queue_free_mtx);
vm_pageq_enqueue(m->pc + PQ_FREE, m);
mtx_unlock_spin(&vm_page_queue_free_mtx);
mtx_unlock(&vm_page_queue_free_mtx);
}
/*

View File

@ -103,14 +103,14 @@ vm_page_zero_idle(void)
static int free_rover;
vm_page_t m;
mtx_lock_spin(&vm_page_queue_free_mtx);
mtx_lock(&vm_page_queue_free_mtx);
zero_state = 0;
m = vm_pageq_find(PQ_FREE, free_rover, FALSE);
if (m != NULL && (m->flags & PG_ZERO) == 0) {
vm_pageq_remove_nowakeup(m);
mtx_unlock_spin(&vm_page_queue_free_mtx);
mtx_unlock(&vm_page_queue_free_mtx);
pmap_zero_page_idle(m);
mtx_lock_spin(&vm_page_queue_free_mtx);
mtx_lock(&vm_page_queue_free_mtx);
m->flags |= PG_ZERO;
vm_pageq_enqueue(PQ_FREE + m->pc, m);
++vm_page_zero_count;
@ -119,7 +119,7 @@ vm_page_zero_idle(void)
zero_state = 1;
}
free_rover = (free_rover + PQ_PRIME2) & PQ_COLORMASK;
mtx_unlock_spin(&vm_page_queue_free_mtx);
mtx_unlock(&vm_page_queue_free_mtx);
}
/* Called by vm_page_free to hint that a new page is available. */