The vmem callback to reclaim kmem arena address space on low or

fragmented conditions currently just wakes up the pagedaemon.  The
kmem arena is significantly smaller then the total available physical
memory, which means that there are loads where kmem arena space could
be exhausted, while there is a lot of pages available still.  The
woken up pagedaemon sees vm_pages_needed != 0, verifies the condition
vm_paging_needed() which is false, clears the pass and returns back to
sleep, not calling neither uma_reclaim() nor lowmem handler.

To handle low kmem arena conditions, create additional pagedaemon
thread which calls uma_reclaim() directly.  The thread sleeps on the
dedicated channel and kmem_reclaim() wakes the thread in addition to
the pagedaemon.

Reported and tested by:	pho
Sponsored by:	The FreeBSD Foundation
MFC after:	2 weeks
This commit is contained in:
Konstantin Belousov 2015-05-09 20:08:36 +00:00
parent 4bc8ff0802
commit 44ec2b63c5
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=282690
4 changed files with 51 additions and 7 deletions

View File

@ -665,13 +665,15 @@ reallocf(void *addr, unsigned long size, struct malloc_type *mtp, int flags)
}
/*
* Wake the page daemon when we exhaust KVA. It will call the lowmem handler
* and uma_reclaim() callbacks in a context that is safe.
* Wake the uma reclamation pagedaemon thread when we exhaust KVA. It
* will call the lowmem handler and uma_reclaim() callbacks in a
* context that is safe.
*/
static void
kmem_reclaim(vmem_t *vm, int flags)
{
uma_reclaim_wakeup();
pagedaemon_wakeup();
}

View File

@ -690,4 +690,7 @@ struct uma_percpu_stat {
uint64_t _ups_reserved[5]; /* Reserved. */
};
void uma_reclaim_wakeup(void);
void uma_reclaim_worker(void *);
#endif /* _VM_UMA_H_ */

View File

@ -3222,16 +3222,17 @@ uma_find_refcnt(uma_zone_t zone, void *item)
}
/* See uma.h */
void
uma_reclaim(void)
static void
uma_reclaim_locked(bool kmem_danger)
{
#ifdef UMA_DEBUG
printf("UMA: vm asked us to release pages!\n");
#endif
sx_xlock(&uma_drain_lock);
sx_assert(&uma_drain_lock, SA_XLOCKED);
bucket_enable();
zone_foreach(zone_drain);
if (vm_page_count_min()) {
if (vm_page_count_min() || kmem_danger) {
cache_drain_safe(NULL);
zone_foreach(zone_drain);
}
@ -3243,9 +3244,42 @@ uma_reclaim(void)
zone_drain(slabzone);
zone_drain(slabrefzone);
bucket_zone_drain();
}
void
uma_reclaim(void)
{
sx_xlock(&uma_drain_lock);
uma_reclaim_locked(false);
sx_xunlock(&uma_drain_lock);
}
static int uma_reclaim_needed;
void
uma_reclaim_wakeup(void)
{
uma_reclaim_needed = 1;
wakeup(&uma_reclaim_needed);
}
void
uma_reclaim_worker(void *arg __unused)
{
sx_xlock(&uma_drain_lock);
for (;;) {
sx_sleep(&uma_reclaim_needed, &uma_drain_lock, PVM,
"umarcl", 0);
if (uma_reclaim_needed) {
uma_reclaim_needed = 0;
uma_reclaim_locked(true);
}
}
}
/* See uma.h */
int
uma_zone_exhausted(uma_zone_t zone)

View File

@ -1724,8 +1724,9 @@ vm_pageout_init(void)
static void
vm_pageout(void)
{
int error;
#if MAXMEMDOM > 1
int error, i;
int i;
#endif
swap_pager_swap_init();
@ -1739,6 +1740,10 @@ vm_pageout(void)
}
}
#endif
error = kthread_add(uma_reclaim_worker, NULL, curproc, NULL,
0, 0, "uma");
if (error != 0)
panic("starting uma_reclaim helper, error %d\n", error);
vm_pageout_worker((void *)(uintptr_t)0);
}