Simplify vm_page_dequeue() and fix an assertion.

- Add a vm_pagequeue_remove() function to physically remove a page
  from its queue and update the queue length.
- Remove vm_page_pagequeue_lockptr() and let vm_page_pagequeue()
  return NULL for dequeued pages.
- Avoid unnecessarily reloading the queue index if vm_page_dequeue()
  loses a race with a concurrent queue operation.
- Correct an always-true assertion: vm_page_dequeue() may be called
  from the page allocator with the page unlocked.  The assertion
  m->order == VM_NFREEORDER simply tests whether the page has been
  removed from the vm_phys free lists; instead, check whether the
  page belongs to an object.

Reviewed by:	kib
MFC after:	1 week
Sponsored by:	Netflix
Differential Revision:	https://reviews.freebsd.org/D21341
This commit is contained in:
markj 2019-08-21 16:11:12 +00:00
parent b6626553da
commit 3eed5d3310
3 changed files with 24 additions and 30 deletions

View File

@ -3056,21 +3056,15 @@ vm_waitpfault(struct domainset *dset, int timo)
mtx_unlock(&vm_domainset_lock);
}
struct vm_pagequeue *
static struct vm_pagequeue *
vm_page_pagequeue(vm_page_t m)
{
return (&vm_pagequeue_domain(m)->vmd_pagequeues[m->queue]);
}
static struct mtx *
vm_page_pagequeue_lockptr(vm_page_t m)
{
uint8_t queue;
if ((queue = atomic_load_8(&m->queue)) == PQ_NONE)
return (NULL);
return (&vm_pagequeue_domain(m)->vmd_pagequeues[queue].pq_mutex);
return (&vm_pagequeue_domain(m)->vmd_pagequeues[queue]);
}
static inline void
@ -3093,10 +3087,8 @@ vm_pqbatch_process_page(struct vm_pagequeue *pq, vm_page_t m)
m, pq, qflags));
if ((qflags & PGA_DEQUEUE) != 0) {
if (__predict_true((qflags & PGA_ENQUEUED) != 0)) {
TAILQ_REMOVE(&pq->pq_pl, m, plinks.q);
vm_pagequeue_cnt_dec(pq);
}
if (__predict_true((qflags & PGA_ENQUEUED) != 0))
vm_pagequeue_remove(pq, m);
vm_page_dequeue_complete(m);
} else if ((qflags & (PGA_REQUEUE | PGA_REQUEUE_HEAD)) != 0) {
if ((qflags & PGA_ENQUEUED) != 0)
@ -3299,16 +3291,14 @@ vm_page_dequeue_deferred_free(vm_page_t m)
void
vm_page_dequeue(vm_page_t m)
{
struct mtx *lock, *lock1;
struct vm_pagequeue *pq;
struct vm_pagequeue *pq, *pq1;
uint8_t aflags;
KASSERT(mtx_owned(vm_page_lockptr(m)) || m->order == VM_NFREEORDER,
KASSERT(mtx_owned(vm_page_lockptr(m)) || m->object == NULL,
("page %p is allocated and unlocked", m));
for (;;) {
lock = vm_page_pagequeue_lockptr(m);
if (lock == NULL) {
for (pq = vm_page_pagequeue(m);; pq = pq1) {
if (pq == NULL) {
/*
* A thread may be concurrently executing
* vm_page_dequeue_complete(). Ensure that all queue
@ -3327,27 +3317,24 @@ vm_page_dequeue(vm_page_t m)
* critical section.
*/
cpu_spinwait();
pq1 = vm_page_pagequeue(m);
continue;
}
mtx_lock(lock);
if ((lock1 = vm_page_pagequeue_lockptr(m)) == lock)
vm_pagequeue_lock(pq);
if ((pq1 = vm_page_pagequeue(m)) == pq)
break;
mtx_unlock(lock);
lock = lock1;
vm_pagequeue_unlock(pq);
}
KASSERT(lock == vm_page_pagequeue_lockptr(m),
KASSERT(pq == vm_page_pagequeue(m),
("%s: page %p migrated directly between queues", __func__, m));
KASSERT((m->aflags & PGA_DEQUEUE) != 0 ||
mtx_owned(vm_page_lockptr(m)),
("%s: queued unlocked page %p", __func__, m));
if ((m->aflags & PGA_ENQUEUED) != 0) {
pq = vm_page_pagequeue(m);
TAILQ_REMOVE(&pq->pq_pl, m, plinks.q);
vm_pagequeue_cnt_dec(pq);
}
if ((m->aflags & PGA_ENQUEUED) != 0)
vm_pagequeue_remove(pq, m);
vm_page_dequeue_complete(m);
mtx_unlock(lock);
vm_pagequeue_unlock(pq);
}
/*

View File

@ -552,7 +552,6 @@ void vm_page_launder(vm_page_t m);
vm_page_t vm_page_lookup (vm_object_t, vm_pindex_t);
vm_page_t vm_page_next(vm_page_t m);
int vm_page_pa_tryrelock(pmap_t, vm_paddr_t, vm_paddr_t *);
struct vm_pagequeue *vm_page_pagequeue(vm_page_t m);
vm_page_t vm_page_prev(vm_page_t m);
bool vm_page_ps_test(vm_page_t m, int flags, vm_page_t skip_m);
void vm_page_putfake(vm_page_t m);

View File

@ -198,6 +198,14 @@ vm_pagequeue_cnt_add(struct vm_pagequeue *pq, int addend)
#define vm_pagequeue_cnt_inc(pq) vm_pagequeue_cnt_add((pq), 1)
#define vm_pagequeue_cnt_dec(pq) vm_pagequeue_cnt_add((pq), -1)
static inline void
vm_pagequeue_remove(struct vm_pagequeue *pq, vm_page_t m)
{
TAILQ_REMOVE(&pq->pq_pl, m, plinks.q);
vm_pagequeue_cnt_dec(pq);
}
static inline void
vm_batchqueue_init(struct vm_batchqueue *bq)
{