Initialize marker pages in vm_page_domain_init().

They were previously initialized by the corresponding page daemon
threads, but for vmd_inacthead this may be too late if
vm_page_deactivate_noreuse() is called during boot.

Reported and tested by:	cperciva
Reviewed by:	alc, kib
MFC after:	1 week
This commit is contained in:
Mark Johnston 2018-04-19 14:09:44 +00:00
parent 3ee9c3c4eb
commit 64b3893010
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=332771
4 changed files with 44 additions and 42 deletions

View File

@ -437,6 +437,23 @@ sysctl_vm_page_blacklist(SYSCTL_HANDLER_ARGS)
return (error);
}
/*
* Initialize a dummy page for use in scans of the specified paging queue.
* In principle, this function only needs to set the flag PG_MARKER.
* Nonetheless, it write busies and initializes the hold count to one as
* safety precautions.
*/
void
vm_page_init_marker(vm_page_t marker, int queue)
{
bzero(marker, sizeof(*marker));
marker->flags = PG_MARKER;
marker->busy_lock = VPB_SINGLE_EXCLUSIVER;
marker->queue = queue;
marker->hold_count = 1;
}
static void
vm_page_domain_init(int domain)
{
@ -464,9 +481,13 @@ vm_page_domain_init(int domain)
TAILQ_INIT(&pq->pq_pl);
mtx_init(&pq->pq_mutex, pq->pq_name, "vm pagequeue",
MTX_DEF | MTX_DUPOK);
vm_page_init_marker(&vmd->vmd_markers[i], i);
}
mtx_init(&vmd->vmd_free_mtx, "vm page free queue", NULL, MTX_DEF);
mtx_init(&vmd->vmd_pageout_mtx, "vm pageout lock", NULL, MTX_DEF);
vm_page_init_marker(&vmd->vmd_inacthead, PQ_INACTIVE);
TAILQ_INSERT_HEAD(&vmd->vmd_pagequeues[PQ_INACTIVE].pq_pl,
&vmd->vmd_inacthead, plinks.q);
snprintf(vmd->vmd_name, sizeof(vmd->vmd_name), "%d", domain);
}

View File

@ -490,6 +490,7 @@ void vm_page_free_phys_pglist(struct pglist *tq);
bool vm_page_free_prep(vm_page_t m, bool pagequeue_locked);
vm_page_t vm_page_getfake(vm_paddr_t paddr, vm_memattr_t memattr);
void vm_page_initfake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr);
void vm_page_init_marker(vm_page_t m, int queue);
int vm_page_insert (vm_page_t, vm_object_t, vm_pindex_t);
void vm_page_launder(vm_page_t m);
vm_page_t vm_page_lookup (vm_object_t, vm_pindex_t);

View File

@ -207,23 +207,6 @@ static int vm_pageout_launder(struct vm_domain *vmd, int launder,
static void vm_pageout_laundry_worker(void *arg);
static boolean_t vm_pageout_page_lock(vm_page_t, vm_page_t *);
/*
* Initialize a dummy page for marking the caller's place in the specified
* paging queue. In principle, this function only needs to set the flag
* PG_MARKER. Nonetheless, it write busies and initializes the hold count
* to one as safety precautions.
*/
static void
vm_pageout_init_marker(vm_page_t marker, u_short queue)
{
bzero(marker, sizeof(*marker));
marker->flags = PG_MARKER;
marker->busy_lock = VPB_SINGLE_EXCLUSIVER;
marker->queue = queue;
marker->hold_count = 1;
}
/*
* vm_pageout_fallback_object_lock:
*
@ -244,11 +227,11 @@ vm_pageout_fallback_object_lock(vm_page_t m, vm_page_t *next)
struct vm_page marker;
struct vm_pagequeue *pq;
boolean_t unchanged;
u_short queue;
vm_object_t object;
int queue;
queue = m->queue;
vm_pageout_init_marker(&marker, queue);
vm_page_init_marker(&marker, queue);
pq = vm_page_pagequeue(m);
object = m->object;
@ -293,14 +276,14 @@ vm_pageout_page_lock(vm_page_t m, vm_page_t *next)
struct vm_page marker;
struct vm_pagequeue *pq;
boolean_t unchanged;
u_short queue;
int queue;
vm_page_lock_assert(m, MA_NOTOWNED);
if (vm_page_trylock(m))
return (TRUE);
queue = m->queue;
vm_pageout_init_marker(&marker, queue);
vm_page_init_marker(&marker, queue);
pq = vm_page_pagequeue(m);
TAILQ_INSERT_AFTER(&pq->pq_pl, m, &marker, plinks.q);
@ -694,8 +677,8 @@ vm_pageout_launder(struct vm_domain *vmd, int launder, bool in_shortfall)
{
struct vm_pagequeue *pq;
vm_object_t object;
vm_page_t m, next;
int act_delta, error, maxscan, numpagedout, starting_target;
vm_page_t m, marker, next;
int act_delta, error, maxscan, numpagedout, queue, starting_target;
int vnodes_skipped;
bool pageout_ok, queue_locked;
@ -716,11 +699,14 @@ vm_pageout_launder(struct vm_domain *vmd, int launder, bool in_shortfall)
* swap devices are configured.
*/
if (atomic_load_acq_int(&swapdev_enabled))
pq = &vmd->vmd_pagequeues[PQ_UNSWAPPABLE];
queue = PQ_UNSWAPPABLE;
else
pq = &vmd->vmd_pagequeues[PQ_LAUNDRY];
queue = PQ_LAUNDRY;
scan:
pq = &vmd->vmd_pagequeues[queue];
marker = &vmd->vmd_markers[queue];
vm_pagequeue_lock(pq);
maxscan = pq->pq_cnt;
queue_locked = true;
@ -762,8 +748,7 @@ vm_pageout_launder(struct vm_domain *vmd, int launder, bool in_shortfall)
* Unlock the laundry queue, invalidating the 'next' pointer.
* Use a marker to remember our place in the laundry queue.
*/
TAILQ_INSERT_AFTER(&pq->pq_pl, m, &vmd->vmd_laundry_marker,
plinks.q);
TAILQ_INSERT_AFTER(&pq->pq_pl, m, marker, plinks.q);
vm_pagequeue_unlock(pq);
queue_locked = false;
@ -889,13 +874,13 @@ vm_pageout_launder(struct vm_domain *vmd, int launder, bool in_shortfall)
vm_pagequeue_lock(pq);
queue_locked = true;
}
next = TAILQ_NEXT(&vmd->vmd_laundry_marker, plinks.q);
TAILQ_REMOVE(&pq->pq_pl, &vmd->vmd_laundry_marker, plinks.q);
next = TAILQ_NEXT(marker, plinks.q);
TAILQ_REMOVE(&pq->pq_pl, marker, plinks.q);
}
vm_pagequeue_unlock(pq);
if (launder > 0 && pq == &vmd->vmd_pagequeues[PQ_UNSWAPPABLE]) {
pq = &vmd->vmd_pagequeues[PQ_LAUNDRY];
if (launder > 0 && queue == PQ_UNSWAPPABLE) {
queue = PQ_LAUNDRY;
goto scan;
}
@ -951,7 +936,6 @@ vm_pageout_laundry_worker(void *arg)
vmd = VM_DOMAIN(domain);
pq = &vmd->vmd_pagequeues[PQ_LAUNDRY];
KASSERT(vmd->vmd_segs != 0, ("domain without segments"));
vm_pageout_init_marker(&vmd->vmd_laundry_marker, PQ_LAUNDRY);
shortfall = 0;
in_shortfall = false;
@ -1105,7 +1089,7 @@ vm_pageout_laundry_worker(void *arg)
static bool
vm_pageout_scan(struct vm_domain *vmd, int pass, int shortage)
{
vm_page_t m, next;
vm_page_t m, marker, next;
struct vm_pagequeue *pq;
vm_object_t object;
long min_scan;
@ -1159,6 +1143,7 @@ vm_pageout_scan(struct vm_domain *vmd, int pass, int shortage)
* decisions for the inactive queue, only for the active queue.)
*/
pq = &vmd->vmd_pagequeues[PQ_INACTIVE];
marker = &vmd->vmd_markers[PQ_INACTIVE];
maxscan = pq->pq_cnt;
vm_pagequeue_lock(pq);
queue_locked = TRUE;
@ -1250,7 +1235,7 @@ vm_pageout_scan(struct vm_domain *vmd, int pass, int shortage)
* vm_page_free(), or vm_page_launder() is called. Use a
* marker to remember our place in the inactive queue.
*/
TAILQ_INSERT_AFTER(&pq->pq_pl, m, &vmd->vmd_marker, plinks.q);
TAILQ_INSERT_AFTER(&pq->pq_pl, m, marker, plinks.q);
vm_page_dequeue_locked(m);
vm_pagequeue_unlock(pq);
queue_locked = FALSE;
@ -1336,8 +1321,8 @@ vm_pageout_scan(struct vm_domain *vmd, int pass, int shortage)
vm_pagequeue_lock(pq);
queue_locked = TRUE;
}
next = TAILQ_NEXT(&vmd->vmd_marker, plinks.q);
TAILQ_REMOVE(&pq->pq_pl, &vmd->vmd_marker, plinks.q);
next = TAILQ_NEXT(marker, plinks.q);
TAILQ_REMOVE(&pq->pq_pl, marker, plinks.q);
}
vm_pagequeue_unlock(pq);
@ -1781,10 +1766,6 @@ vm_pageout_worker(void *arg)
KASSERT(vmd->vmd_segs != 0, ("domain without segments"));
vmd->vmd_last_active_scan = ticks;
vm_pageout_init_marker(&vmd->vmd_marker, PQ_INACTIVE);
vm_pageout_init_marker(&vmd->vmd_inacthead, PQ_INACTIVE);
TAILQ_INSERT_HEAD(&vmd->vmd_pagequeues[PQ_INACTIVE].pq_pl,
&vmd->vmd_inacthead, plinks.q);
/*
* The pageout daemon worker is never done, so loop forever.

View File

@ -107,8 +107,7 @@ struct vm_domain {
boolean_t vmd_oom;
int vmd_oom_seq;
int vmd_last_active_scan;
struct vm_page vmd_laundry_marker;
struct vm_page vmd_marker; /* marker for pagedaemon private use */
struct vm_page vmd_markers[PQ_COUNT]; /* markers for queue scans */
struct vm_page vmd_inacthead; /* marker for LRU-defeating insertions */
int vmd_pageout_wanted; /* (a, p) pageout daemon wait channel */