Allow vm_page_free_prep() to dequeue pages without the page lock.
This is a step towards being able to free pages without the page lock held. The approach is simply to add an implementation of vm_page_dequeue_deferred() which does not assert that the page lock is held. Formally, the page lock is required to set PGA_DEQUEUE, but in the case of vm_page_free_prep() we get the same mutual exclusion for free by virtue of the fact that no other references to the page may exist. No functional change intended. Reviewed by: kib (previous version) MFC after: 2 weeks Sponsored by: Netflix Differential Revision: https://reviews.freebsd.org/D19065
This commit is contained in:
parent
ae1284109d
commit
93cad7dba5
@ -3175,7 +3175,11 @@ vm_pqbatch_submit_page(vm_page_t m, uint8_t queue)
|
||||
struct vm_pagequeue *pq;
|
||||
int domain;
|
||||
|
||||
vm_page_assert_locked(m);
|
||||
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
|
||||
("page %p is unmanaged", m));
|
||||
KASSERT(mtx_owned(vm_page_lockptr(m)) ||
|
||||
(m->object == NULL && (m->aflags & PGA_DEQUEUE) != 0),
|
||||
("missing synchronization for page %p", m));
|
||||
KASSERT(queue < PQ_COUNT, ("invalid queue %d", queue));
|
||||
|
||||
domain = vm_phys_domain(m);
|
||||
@ -3197,8 +3201,9 @@ vm_pqbatch_submit_page(vm_page_t m, uint8_t queue)
|
||||
|
||||
/*
|
||||
* The page may have been logically dequeued before we acquired the
|
||||
* page queue lock. In this case, the page lock prevents the page
|
||||
* from being logically enqueued elsewhere.
|
||||
* page queue lock. In this case, since we either hold the page lock
|
||||
* or the page is being freed, a different thread cannot be concurrently
|
||||
* enqueuing the page.
|
||||
*/
|
||||
if (__predict_true(m->queue == queue))
|
||||
vm_pqbatch_process_page(pq, m);
|
||||
@ -3289,6 +3294,30 @@ vm_page_dequeue_deferred(vm_page_t m)
|
||||
vm_pqbatch_submit_page(m, queue);
|
||||
}
|
||||
|
||||
/*
|
||||
* A variant of vm_page_dequeue_deferred() that does not assert the page
|
||||
* lock and is only to be called from vm_page_free_prep(). It is just an
|
||||
* open-coded implementation of vm_page_dequeue_deferred(). Because the
|
||||
* page is being freed, we can assume that nothing else is scheduling queue
|
||||
* operations on this page, so we get for free the mutual exclusion that
|
||||
* is otherwise provided by the page lock.
|
||||
*/
|
||||
static void
|
||||
vm_page_dequeue_deferred_free(vm_page_t m)
|
||||
{
|
||||
uint8_t queue;
|
||||
|
||||
KASSERT(m->object == NULL, ("page %p has an object reference", m));
|
||||
|
||||
if ((m->aflags & PGA_DEQUEUE) != 0)
|
||||
return;
|
||||
atomic_thread_fence_acq();
|
||||
if ((queue = m->queue) == PQ_NONE)
|
||||
return;
|
||||
vm_page_aflag_set(m, PGA_DEQUEUE);
|
||||
vm_pqbatch_submit_page(m, queue);
|
||||
}
|
||||
|
||||
/*
|
||||
* vm_page_dequeue:
|
||||
*
|
||||
@ -3474,7 +3503,7 @@ vm_page_free_prep(vm_page_t m)
|
||||
* dequeue.
|
||||
*/
|
||||
if ((m->oflags & VPO_UNMANAGED) == 0)
|
||||
vm_page_dequeue_deferred(m);
|
||||
vm_page_dequeue_deferred_free(m);
|
||||
|
||||
m->valid = 0;
|
||||
vm_page_undirty(m);
|
||||
|
@ -351,8 +351,10 @@ extern struct mtx_padalign pa_lock[];
|
||||
* queue, and cleared when the dequeue request is processed. A page may
|
||||
* have PGA_DEQUEUE set and PGA_ENQUEUED cleared, for instance if a dequeue
|
||||
* is requested after the page is scheduled to be enqueued but before it is
|
||||
* actually inserted into the page queue. The page lock must be held to set
|
||||
* this flag, and the queue lock for the page must be held to clear it.
|
||||
* actually inserted into the page queue. For allocated pages, the page lock
|
||||
* must be held to set this flag, but it may be set by vm_page_free_prep()
|
||||
* without the page lock held. The page queue lock must be held to clear the
|
||||
* PGA_DEQUEUE flag.
|
||||
*
|
||||
* PGA_REQUEUE is set when the page is scheduled to be enqueued or requeued
|
||||
* in its page queue. The page lock must be held to set this flag, and the
|
||||
|
Loading…
Reference in New Issue
Block a user