Move vm_page_requeue() to the only file that uses it.

MFC after:	3 weeks
This commit is contained in:
Alan Cox 2012-10-13 20:19:43 +00:00
parent f81d134e7e
commit 7ecfabc7bb
3 changed files with 32 additions and 34 deletions

View File

@ -1867,28 +1867,6 @@ vm_waitpfault(void)
"pfault", 0);
}
/*
* vm_page_requeue:
*
* Move the given page to the tail of its present page queue.
*
* The page queues must be locked.
*/
void
vm_page_requeue(vm_page_t m)
{
struct vpgqueues *vpq;
int queue;
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
queue = m->queue;
KASSERT(queue != PQ_NONE,
("vm_page_requeue: page %p is not queued", m));
vpq = &vm_page_queues[queue];
TAILQ_REMOVE(&vpq->pl, m, pageq);
TAILQ_INSERT_TAIL(&vpq->pl, m, pageq);
}
/*
* vm_page_queue_remove:
*

View File

@ -392,7 +392,6 @@ void vm_page_readahead_finish(vm_page_t m);
void vm_page_reference(vm_page_t m);
void vm_page_remove (vm_page_t);
void vm_page_rename (vm_page_t, vm_object_t, vm_pindex_t);
void vm_page_requeue(vm_page_t m);
void vm_page_set_valid_range(vm_page_t m, int base, int size);
void vm_page_sleep(vm_page_t m, const char *msg);
vm_page_t vm_page_splay(vm_pindex_t, vm_page_t);

View File

@ -218,6 +218,7 @@ static void vm_req_vmdaemon(int req);
#endif
static boolean_t vm_pageout_page_lock(vm_page_t, vm_page_t *);
static void vm_pageout_page_stats(void);
static void vm_pageout_requeue(vm_page_t m);
/*
* Initialize a dummy page for marking the caller's place in the specified
@ -749,7 +750,7 @@ vm_pageout_object_deactivate_pages(pmap_t pmap, vm_object_t first_object,
vm_page_deactivate(p);
} else {
vm_page_lock_queues();
vm_page_requeue(p);
vm_pageout_requeue(p);
vm_page_unlock_queues();
}
} else {
@ -758,7 +759,7 @@ vm_pageout_object_deactivate_pages(pmap_t pmap, vm_object_t first_object,
ACT_ADVANCE)
p->act_count += ACT_ADVANCE;
vm_page_lock_queues();
vm_page_requeue(p);
vm_pageout_requeue(p);
vm_page_unlock_queues();
}
} else if (p->queue == PQ_INACTIVE)
@ -854,6 +855,26 @@ vm_pageout_map_deactivate_pages(map, desired)
}
#endif /* !defined(NO_SWAPPING) */
/*
* vm_pageout_requeue:
*
* Move the specified page to the tail of its present page queue.
*
* The page queues must be locked.
*/
static void
vm_pageout_requeue(vm_page_t m)
{
struct vpgqueues *vpq;
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
KASSERT(m->queue != PQ_NONE,
("vm_pageout_requeue: page %p is not queued", m));
vpq = &vm_page_queues[m->queue];
TAILQ_REMOVE(&vpq->pl, m, pageq);
TAILQ_INSERT_TAIL(&vpq->pl, m, pageq);
}
/*
* vm_pageout_scan does the dirty work for the pageout daemon.
*/
@ -1097,7 +1118,7 @@ vm_pageout_scan(int pass)
m->flags |= PG_WINATCFLS;
vm_page_lock_queues();
queues_locked = TRUE;
vm_page_requeue(m);
vm_pageout_requeue(m);
} else if (maxlaunder > 0) {
/*
* We always want to try to flush some dirty pages if
@ -1128,7 +1149,7 @@ vm_pageout_scan(int pass)
vm_page_unlock(m);
VM_OBJECT_UNLOCK(object);
queues_locked = TRUE;
vm_page_requeue(m);
vm_pageout_requeue(m);
goto relock_queues;
}
@ -1216,7 +1237,7 @@ vm_pageout_scan(int pass)
*/
if (m->hold_count) {
vm_page_unlock(m);
vm_page_requeue(m);
vm_pageout_requeue(m);
if (object->flags & OBJ_MIGHTBEDIRTY)
vnodes_skipped++;
goto unlock_and_continue;
@ -1321,7 +1342,7 @@ relock_queues:
(m->hold_count != 0)) {
vm_page_unlock(m);
VM_OBJECT_UNLOCK(object);
vm_page_requeue(m);
vm_pageout_requeue(m);
m = next;
continue;
}
@ -1358,7 +1379,7 @@ relock_queues:
* page activation count stats.
*/
if (actcount && (object->ref_count != 0)) {
vm_page_requeue(m);
vm_pageout_requeue(m);
} else {
m->act_count -= min(m->act_count, ACT_DECLINE);
if (vm_pageout_algorithm ||
@ -1376,7 +1397,7 @@ relock_queues:
vm_page_deactivate(m);
}
} else {
vm_page_requeue(m);
vm_pageout_requeue(m);
}
}
vm_page_unlock(m);
@ -1588,7 +1609,7 @@ vm_pageout_page_stats()
(m->hold_count != 0)) {
vm_page_unlock(m);
VM_OBJECT_UNLOCK(object);
vm_page_requeue(m);
vm_pageout_requeue(m);
m = next;
continue;
}
@ -1604,7 +1625,7 @@ vm_pageout_page_stats()
m->act_count += ACT_ADVANCE + actcount;
if (m->act_count > ACT_MAX)
m->act_count = ACT_MAX;
vm_page_requeue(m);
vm_pageout_requeue(m);
} else {
if (m->act_count == 0) {
/*
@ -1620,7 +1641,7 @@ vm_pageout_page_stats()
vm_page_deactivate(m);
} else {
m->act_count -= min(m->act_count, ACT_DECLINE);
vm_page_requeue(m);
vm_pageout_requeue(m);
}
}
vm_page_unlock(m);