Make vm_pqbatch_submit_page() externally visible.

It will become useful for the page daemon to be able to directly create
a batch queue entry for a page, and without modifying the page
structure.  Rename vm_pqbatch_submit_page() to vm_page_pqbatch_submit()
to keep the namespace consistent.  No functional change intended.

Reviewed by:	alc, kib
MFC after:	1 week
Sponsored by:	Netflix
Differential Revision:	https://reviews.freebsd.org/D21369
This commit is contained in:
Mark Johnston 2019-08-23 19:49:29 +00:00
parent e671edac06
commit 386eba08bd
3 changed files with 12 additions and 11 deletions

View File

@ -3130,8 +3130,8 @@ vm_pqbatch_process(struct vm_pagequeue *pq, struct vm_batchqueue *bq,
vm_batchqueue_init(bq);
}
static void
vm_pqbatch_submit_page(vm_page_t m, uint8_t queue)
void
vm_page_pqbatch_submit(vm_page_t m, uint8_t queue)
{
struct vm_batchqueue *bq;
struct vm_pagequeue *pq;
@ -3181,14 +3181,14 @@ vm_pqbatch_submit_page(vm_page_t m, uint8_t queue)
}
/*
* vm_page_drain_pqbatch: [ internal use only ]
* vm_page_pqbatch_drain: [ internal use only ]
*
* Force all per-CPU page queue batch queues to be drained. This is
* intended for use in severe memory shortages, to ensure that pages
* do not remain stuck in the batch queues.
*/
void
vm_page_drain_pqbatch(void)
vm_page_pqbatch_drain(void)
{
struct thread *td;
struct vm_domain *vmd;
@ -3253,7 +3253,7 @@ vm_page_dequeue_deferred(vm_page_t m)
if ((queue = vm_page_queue(m)) == PQ_NONE)
return;
vm_page_aflag_set(m, PGA_DEQUEUE);
vm_pqbatch_submit_page(m, queue);
vm_page_pqbatch_submit(m, queue);
}
/*
@ -3277,7 +3277,7 @@ vm_page_dequeue_deferred_free(vm_page_t m)
if ((queue = m->queue) == PQ_NONE)
return;
vm_page_aflag_set(m, PGA_DEQUEUE);
vm_pqbatch_submit_page(m, queue);
vm_page_pqbatch_submit(m, queue);
}
/*
@ -3352,7 +3352,7 @@ vm_page_enqueue(vm_page_t m, uint8_t queue)
m->queue = queue;
if ((m->aflags & PGA_REQUEUE) == 0)
vm_page_aflag_set(m, PGA_REQUEUE);
vm_pqbatch_submit_page(m, queue);
vm_page_pqbatch_submit(m, queue);
}
/*
@ -3372,7 +3372,7 @@ vm_page_requeue(vm_page_t m)
if ((m->aflags & PGA_REQUEUE) == 0)
vm_page_aflag_set(m, PGA_REQUEUE);
vm_pqbatch_submit_page(m, atomic_load_8(&m->queue));
vm_page_pqbatch_submit(m, atomic_load_8(&m->queue));
}
/*
@ -3700,7 +3700,7 @@ vm_page_deactivate_noreuse(vm_page_t m)
}
if ((m->aflags & PGA_REQUEUE_HEAD) == 0)
vm_page_aflag_set(m, PGA_REQUEUE_HEAD);
vm_pqbatch_submit_page(m, PQ_INACTIVE);
vm_page_pqbatch_submit(m, PQ_INACTIVE);
}
/*

View File

@ -542,7 +542,6 @@ void vm_page_deactivate(vm_page_t);
void vm_page_deactivate_noreuse(vm_page_t);
void vm_page_dequeue(vm_page_t m);
void vm_page_dequeue_deferred(vm_page_t m);
void vm_page_drain_pqbatch(void);
vm_page_t vm_page_find_least(vm_object_t, vm_pindex_t);
bool vm_page_free_prep(vm_page_t m);
vm_page_t vm_page_getfake(vm_paddr_t paddr, vm_memattr_t memattr);
@ -552,6 +551,8 @@ void vm_page_launder(vm_page_t m);
vm_page_t vm_page_lookup (vm_object_t, vm_pindex_t);
vm_page_t vm_page_next(vm_page_t m);
int vm_page_pa_tryrelock(pmap_t, vm_paddr_t, vm_paddr_t *);
void vm_page_pqbatch_drain(void);
void vm_page_pqbatch_submit(vm_page_t m, uint8_t queue);
vm_page_t vm_page_prev(vm_page_t m);
bool vm_page_ps_test(vm_page_t m, int flags, vm_page_t skip_m);
void vm_page_putfake(vm_page_t m);

View File

@ -409,7 +409,7 @@ vm_daemon(void)
* avoidance measure.
*/
if ((swapout_flags & VM_SWAP_NORMAL) != 0)
vm_page_drain_pqbatch();
vm_page_pqbatch_drain();
swapout_procs(swapout_flags);
}