From 386eba08bd1b1678ef846c13b12c2075f55510ae Mon Sep 17 00:00:00 2001 From: Mark Johnston Date: Fri, 23 Aug 2019 19:49:29 +0000 Subject: [PATCH] Make vm_pqbatch_submit_page() externally visible. It will become useful for the page daemon to be able to directly create a batch queue entry for a page, and without modifying the page structure. Rename vm_pqbatch_submit_page() to vm_page_pqbatch_submit() to keep the namespace consistent. No functional change intended. Reviewed by: alc, kib MFC after: 1 week Sponsored by: Netflix Differential Revision: https://reviews.freebsd.org/D21369 --- sys/vm/vm_page.c | 18 +++++++++--------- sys/vm/vm_page.h | 3 ++- sys/vm/vm_swapout.c | 2 +- 3 files changed, 12 insertions(+), 11 deletions(-) diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c index 9e1e308be117..324b46b695ab 100644 --- a/sys/vm/vm_page.c +++ b/sys/vm/vm_page.c @@ -3130,8 +3130,8 @@ vm_pqbatch_process(struct vm_pagequeue *pq, struct vm_batchqueue *bq, vm_batchqueue_init(bq); } -static void -vm_pqbatch_submit_page(vm_page_t m, uint8_t queue) +void +vm_page_pqbatch_submit(vm_page_t m, uint8_t queue) { struct vm_batchqueue *bq; struct vm_pagequeue *pq; @@ -3181,14 +3181,14 @@ vm_pqbatch_submit_page(vm_page_t m, uint8_t queue) } /* - * vm_page_drain_pqbatch: [ internal use only ] + * vm_page_pqbatch_drain: [ internal use only ] * * Force all per-CPU page queue batch queues to be drained. This is * intended for use in severe memory shortages, to ensure that pages * do not remain stuck in the batch queues. */ void -vm_page_drain_pqbatch(void) +vm_page_pqbatch_drain(void) { struct thread *td; struct vm_domain *vmd; @@ -3253,7 +3253,7 @@ vm_page_dequeue_deferred(vm_page_t m) if ((queue = vm_page_queue(m)) == PQ_NONE) return; vm_page_aflag_set(m, PGA_DEQUEUE); - vm_pqbatch_submit_page(m, queue); + vm_page_pqbatch_submit(m, queue); } /* @@ -3277,7 +3277,7 @@ vm_page_dequeue_deferred_free(vm_page_t m) if ((queue = m->queue) == PQ_NONE) return; vm_page_aflag_set(m, PGA_DEQUEUE); - vm_pqbatch_submit_page(m, queue); + vm_page_pqbatch_submit(m, queue); } /* @@ -3352,7 +3352,7 @@ vm_page_enqueue(vm_page_t m, uint8_t queue) m->queue = queue; if ((m->aflags & PGA_REQUEUE) == 0) vm_page_aflag_set(m, PGA_REQUEUE); - vm_pqbatch_submit_page(m, queue); + vm_page_pqbatch_submit(m, queue); } /* @@ -3372,7 +3372,7 @@ vm_page_requeue(vm_page_t m) if ((m->aflags & PGA_REQUEUE) == 0) vm_page_aflag_set(m, PGA_REQUEUE); - vm_pqbatch_submit_page(m, atomic_load_8(&m->queue)); + vm_page_pqbatch_submit(m, atomic_load_8(&m->queue)); } /* @@ -3700,7 +3700,7 @@ vm_page_deactivate_noreuse(vm_page_t m) } if ((m->aflags & PGA_REQUEUE_HEAD) == 0) vm_page_aflag_set(m, PGA_REQUEUE_HEAD); - vm_pqbatch_submit_page(m, PQ_INACTIVE); + vm_page_pqbatch_submit(m, PQ_INACTIVE); } /* diff --git a/sys/vm/vm_page.h b/sys/vm/vm_page.h index 9154af8fc894..021df5be637f 100644 --- a/sys/vm/vm_page.h +++ b/sys/vm/vm_page.h @@ -542,7 +542,6 @@ void vm_page_deactivate(vm_page_t); void vm_page_deactivate_noreuse(vm_page_t); void vm_page_dequeue(vm_page_t m); void vm_page_dequeue_deferred(vm_page_t m); -void vm_page_drain_pqbatch(void); vm_page_t vm_page_find_least(vm_object_t, vm_pindex_t); bool vm_page_free_prep(vm_page_t m); vm_page_t vm_page_getfake(vm_paddr_t paddr, vm_memattr_t memattr); @@ -552,6 +551,8 @@ void vm_page_launder(vm_page_t m); vm_page_t vm_page_lookup (vm_object_t, vm_pindex_t); vm_page_t vm_page_next(vm_page_t m); int vm_page_pa_tryrelock(pmap_t, vm_paddr_t, vm_paddr_t *); +void vm_page_pqbatch_drain(void); +void vm_page_pqbatch_submit(vm_page_t m, uint8_t queue); vm_page_t vm_page_prev(vm_page_t m); bool vm_page_ps_test(vm_page_t m, int flags, vm_page_t skip_m); void vm_page_putfake(vm_page_t m); diff --git a/sys/vm/vm_swapout.c b/sys/vm/vm_swapout.c index 27e3346af3a1..42b320fb92d5 100644 --- a/sys/vm/vm_swapout.c +++ b/sys/vm/vm_swapout.c @@ -409,7 +409,7 @@ vm_daemon(void) * avoidance measure. */ if ((swapout_flags & VM_SWAP_NORMAL) != 0) - vm_page_drain_pqbatch(); + vm_page_pqbatch_drain(); swapout_procs(swapout_flags); }