Swap in WKILLED processes.

Swapped-out process that is WKILLED must be swapped in as soon as
possible.  The reason is that such process can be killed by OOM and
its pages can be only freed if the process exits.  To exit, the kernel
stack of the process must be mapped.

When allocating pages for the stack of the WKILLED process on swap in,
use VM_ALLOC_SYSTEM requests to increase the chance of the allocation
to succeed.

Add counter of the swapped out processes to avoid unneeded iteration
over the allprocs list when there is no work to do, reducing the
allproc_lock ownership.

Reviewed by:	alc, markj (previous version)
Tested by:	pho
Sponsored by:	The FreeBSD Foundation
MFC after:	2 weeks
Differential revision:	https://reviews.freebsd.org/D16489
This commit is contained in:
Konstantin Belousov 2018-08-04 20:45:43 +00:00
parent 9f21643182
commit a70e9a1388
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=337330
4 changed files with 102 additions and 49 deletions

View File

@ -3064,6 +3064,23 @@ postsig(int sig)
return (1);
}
void
proc_wkilled(struct proc *p)
{
PROC_LOCK_ASSERT(p, MA_OWNED);
if ((p->p_flag & P_WKILLED) == 0) {
p->p_flag |= P_WKILLED;
/*
* Notify swapper that there is a process to swap in.
* The notification is racy, at worst it would take 10
* seconds for the swapper process to notice.
*/
if ((p->p_flag & (P_INMEM | P_SWAPPINGIN)) == 0)
wakeup(&proc0);
}
}
/*
* Kill the current process for stated reason.
*/
@ -3076,7 +3093,7 @@ killproc(struct proc *p, char *why)
p->p_comm);
log(LOG_ERR, "pid %d (%s), uid %d, was killed: %s\n", p->p_pid,
p->p_comm, p->p_ucred ? p->p_ucred->cr_uid : -1, why);
p->p_flag |= P_WKILLED;
proc_wkilled(p);
kern_psignal(p, SIGKILL);
}

View File

@ -1170,7 +1170,7 @@ kern_ptrace(struct thread *td, int req, pid_t pid, void *addr, int data)
* queue cannot accommodate any new signals.
*/
if (data == SIGKILL)
p->p_flag |= P_WKILLED;
proc_wkilled(p);
/*
* Unsuspend all threads. To leave a thread

View File

@ -1050,6 +1050,7 @@ struct proc *proc_realparent(struct proc *child);
void proc_reap(struct thread *td, struct proc *p, int *status, int options);
void proc_reparent(struct proc *child, struct proc *newparent);
void proc_set_traced(struct proc *p, bool stop);
void proc_wkilled(struct proc *p);
struct pstats *pstats_alloc(void);
void pstats_fork(struct pstats *src, struct pstats *dst);
void pstats_free(struct pstats *ps);

View File

@ -158,13 +158,14 @@ static struct mtx vm_daemon_mtx;
/* Allow for use by vm_pageout before vm_daemon is initialized. */
MTX_SYSINIT(vm_daemon, &vm_daemon_mtx, "vm daemon", MTX_DEF);
static int swapped_cnt;
static void swapclear(struct proc *);
static int swapout(struct proc *);
static void vm_swapout_map_deactivate_pages(vm_map_t, long);
static void vm_swapout_object_deactivate_pages(pmap_t, vm_object_t, long);
static void swapout_procs(int action);
static void vm_req_vmdaemon(int req);
static void vm_thread_swapin(struct thread *td);
static void vm_thread_swapout(struct thread *td);
/*
@ -563,7 +564,7 @@ vm_thread_swapout(struct thread *td)
* Bring the kernel stack for a specified thread back in.
*/
static void
vm_thread_swapin(struct thread *td)
vm_thread_swapin(struct thread *td, int oom_alloc)
{
vm_object_t ksobj;
vm_page_t ma[KSTACK_MAX_PAGES];
@ -572,7 +573,7 @@ vm_thread_swapin(struct thread *td)
pages = td->td_kstack_pages;
ksobj = td->td_kstack_obj;
VM_OBJECT_WLOCK(ksobj);
(void)vm_page_grab_pages(ksobj, 0, VM_ALLOC_NORMAL | VM_ALLOC_WIRED, ma,
(void)vm_page_grab_pages(ksobj, 0, oom_alloc | VM_ALLOC_WIRED, ma,
pages);
for (i = 0; i < pages;) {
vm_page_assert_xbusied(ma[i]);
@ -605,8 +606,10 @@ void
faultin(struct proc *p)
{
struct thread *td;
int oom_alloc;
PROC_LOCK_ASSERT(p, MA_OWNED);
/*
* If another process is swapping in this process,
* just wait until it finishes.
@ -616,7 +619,11 @@ faultin(struct proc *p)
msleep(&p->p_flag, &p->p_mtx, PVM, "faultin", 0);
return;
}
if ((p->p_flag & P_INMEM) == 0) {
oom_alloc = (p->p_flag & P_WKILLED) != 0 ? VM_ALLOC_SYSTEM :
VM_ALLOC_NORMAL;
/*
* Don't let another thread swap process p out while we are
* busy swapping it in.
@ -624,6 +631,10 @@ faultin(struct proc *p)
++p->p_lock;
p->p_flag |= P_SWAPPINGIN;
PROC_UNLOCK(p);
sx_xlock(&allproc_lock);
MPASS(swapped_cnt > 0);
swapped_cnt--;
sx_xunlock(&allproc_lock);
/*
* We hold no lock here because the list of threads
@ -631,14 +642,14 @@ faultin(struct proc *p)
* swapped out.
*/
FOREACH_THREAD_IN_PROC(p, td)
vm_thread_swapin(td);
vm_thread_swapin(td, oom_alloc);
PROC_LOCK(p);
swapclear(p);
p->p_swtick = ticks;
wakeup(&p->p_flag);
/* Allow other threads to swap p out now. */
wakeup(&p->p_flag);
--p->p_lock;
}
}
@ -648,26 +659,38 @@ faultin(struct proc *p)
* is enough space for them. Of course, if a process waits for a long
* time, it will be swapped in anyway.
*/
void
swapper(void)
static struct proc *
swapper_selector(void)
{
struct proc *p, *pp;
struct proc *p, *res;
struct thread *td;
int ppri, pri, slptime, swtime;
int min_flag, ppri, pri, slptime, swtime;
loop:
if (vm_page_count_min()) {
vm_wait_min();
goto loop;
}
pp = NULL;
sx_assert(&allproc_lock, SA_SLOCKED);
if (swapped_cnt == 0)
return (NULL);
res = NULL;
ppri = INT_MIN;
sx_slock(&allproc_lock);
min_flag = vm_page_count_min();
FOREACH_PROC_IN_SYSTEM(p) {
PROC_LOCK(p);
if (p->p_state == PRS_NEW ||
p->p_flag & (P_SWAPPINGOUT | P_SWAPPINGIN | P_INMEM)) {
if (p->p_state == PRS_NEW || (p->p_flag & (P_SWAPPINGOUT |
P_SWAPPINGIN | P_INMEM)) != 0) {
PROC_UNLOCK(p);
continue;
}
if (p->p_state == PRS_NORMAL && (p->p_flag & P_WKILLED) != 0) {
/*
* A swapped-out process might have mapped a
* large portion of the system's pages as
* anonymous memory. There is no other way to
* release the memory other than to kill the
* process, for which we need to swap it in.
*/
return (p);
}
if (min_flag) {
PROC_UNLOCK(p);
continue;
}
@ -690,7 +713,7 @@ swapper(void)
* selection.
*/
if (pri > ppri) {
pp = p;
res = p;
ppri = pri;
}
}
@ -698,33 +721,40 @@ swapper(void)
}
PROC_UNLOCK(p);
}
sx_sunlock(&allproc_lock);
if (res != NULL)
PROC_LOCK(res);
return (res);
}
/*
* Nothing to do, back to sleep.
*/
if ((p = pp) == NULL) {
tsleep(&proc0, PVM, "swapin", MAXSLP * hz / 2);
goto loop;
void
swapper(void)
{
struct proc *p;
for (;;) {
sx_slock(&allproc_lock);
p = swapper_selector();
sx_sunlock(&allproc_lock);
if (p == NULL) {
tsleep(&proc0, PVM, "swapin", MAXSLP * hz / 2);
} else {
PROC_LOCK_ASSERT(p, MA_OWNED);
/*
* Another process may be bringing or may have
* already brought this process in while we
* traverse all threads. Or, this process may
* have exited or even being swapped out
* again.
*/
if (p->p_state == PRS_NORMAL && (p->p_flag & (P_INMEM |
P_SWAPPINGOUT | P_SWAPPINGIN)) == 0) {
faultin(p);
}
PROC_UNLOCK(p);
}
}
PROC_LOCK(p);
/*
* Another process may be bringing or may have already
* brought this process in while we traverse all threads.
* Or, this process may even be being swapped out again.
*/
if (p->p_flag & (P_INMEM | P_SWAPPINGOUT | P_SWAPPINGIN)) {
PROC_UNLOCK(p);
goto loop;
}
/*
* We would like to bring someone in.
*/
faultin(p);
PROC_UNLOCK(p);
goto loop;
}
/*
@ -803,7 +833,12 @@ swapout_procs(int action)
didswap = true;
PROC_UNLOCK(p);
sx_slock(&allproc_lock);
if (didswap) {
sx_xlock(&allproc_lock);
swapped_cnt++;
sx_downgrade(&allproc_lock);
} else
sx_slock(&allproc_lock);
PRELE(p);
}
sx_sunlock(&allproc_lock);