From db4060a9d08464d5a3c608b6aa66924c8b0ca83d Mon Sep 17 00:00:00 2001 From: mjg Date: Thu, 29 Nov 2018 02:52:08 +0000 Subject: [PATCH] proc: create a dedicated lock for zombproc to ligthen the load on allproc_lock waitpid always takes proctree to evaluate the list, but only takes allproc if it can reap. With this patch allproc is no longer taken, which helps during poudriere -j 128. Discussed with: kib Sponsored by: The FreeBSD Foundation --- sys/kern/kern_exit.c | 6 ++++-- sys/kern/kern_fork.c | 8 ++++++++ sys/kern/kern_proc.c | 6 ++++-- sys/kern/kern_racct.c | 2 ++ sys/sys/proc.h | 1 + 5 files changed, 19 insertions(+), 4 deletions(-) diff --git a/sys/kern/kern_exit.c b/sys/kern/kern_exit.c index f88c7cf28f59..11610de083ce 100644 --- a/sys/kern/kern_exit.c +++ b/sys/kern/kern_exit.c @@ -432,8 +432,10 @@ exit1(struct thread *td, int rval, int signo) * Move proc from allproc queue to zombproc. */ sx_xlock(&allproc_lock); + sx_xlock(&zombproc_lock); LIST_REMOVE(p, p_list); LIST_INSERT_HEAD(&zombproc, p, p_list); + sx_xunlock(&zombproc_lock); sx_xunlock(&allproc_lock); /* @@ -871,9 +873,9 @@ proc_reap(struct thread *td, struct proc *p, int *status, int options) * Remove other references to this process to ensure we have an * exclusive reference. */ - sx_xlock(&allproc_lock); + sx_xlock(&zombproc_lock); LIST_REMOVE(p, p_list); /* off zombproc */ - sx_xunlock(&allproc_lock); + sx_xunlock(&zombproc_lock); sx_xlock(PIDHASHLOCK(p->p_pid)); LIST_REMOVE(p, p_hash); sx_xunlock(PIDHASHLOCK(p->p_pid)); diff --git a/sys/kern/kern_fork.c b/sys/kern/kern_fork.c index 050e4651d098..e0d1299ba78f 100644 --- a/sys/kern/kern_fork.c +++ b/sys/kern/kern_fork.c @@ -238,6 +238,7 @@ fork_findpid(int flags) struct proc *p; int trypid; static int pidchecked = 0; + bool locked_zomb = false; /* * Requires allproc_lock in order to iterate over the list @@ -318,6 +319,10 @@ again: } if (!doingzomb) { doingzomb = 1; + if (!locked_zomb) { + sx_slock(&zombproc_lock); + locked_zomb = true; + } p = LIST_FIRST(&zombproc); goto again; } @@ -331,6 +336,9 @@ again: else lastpid = trypid; + if (locked_zomb) + sx_sunlock(&zombproc_lock); + return (trypid); } diff --git a/sys/kern/kern_proc.c b/sys/kern/kern_proc.c index 4dfa374ad3e6..68eb2a28616e 100644 --- a/sys/kern/kern_proc.c +++ b/sys/kern/kern_proc.c @@ -125,6 +125,7 @@ u_long pgrphash; struct proclist allproc; struct proclist zombproc; struct sx __exclusive_cache_line allproc_lock; +struct sx __exclusive_cache_line zombproc_lock; struct sx __exclusive_cache_line proctree_lock; struct mtx __exclusive_cache_line ppeers_lock; uma_zone_t proc_zone; @@ -177,6 +178,7 @@ procinit(void) u_long i; sx_init(&allproc_lock, "allproc"); + sx_init(&zombproc_lock, "zombproc"); sx_init(&proctree_lock, "proctree"); mtx_init(&ppeers_lock, "p_peers", NULL, MTX_DEF); LIST_INIT(&allproc); @@ -1194,14 +1196,14 @@ zpfind(pid_t pid) { struct proc *p; - sx_slock(&allproc_lock); + sx_slock(&zombproc_lock); LIST_FOREACH(p, &zombproc, p_list) { if (p->p_pid == pid) { PROC_LOCK(p); break; } } - sx_sunlock(&allproc_lock); + sx_sunlock(&zombproc_lock); return (p); } diff --git a/sys/kern/kern_racct.c b/sys/kern/kern_racct.c index 644d514bb08e..f80bb64fa029 100644 --- a/sys/kern/kern_racct.c +++ b/sys/kern/kern_racct.c @@ -1228,11 +1228,13 @@ racctd(void) sx_slock(&allproc_lock); + sx_slock(&zombproc_lock); LIST_FOREACH(p, &zombproc, p_list) { PROC_LOCK(p); racct_set(p, RACCT_PCTCPU, 0); PROC_UNLOCK(p); } + sx_sunlock(&zombproc_lock); FOREACH_PROC_IN_SYSTEM(p) { PROC_LOCK(p); diff --git a/sys/sys/proc.h b/sys/sys/proc.h index 82f2ab343ddc..c578f8ba147a 100644 --- a/sys/sys/proc.h +++ b/sys/sys/proc.h @@ -959,6 +959,7 @@ extern u_long pgrphash; extern struct sx allproc_lock; extern int allproc_gen; +extern struct sx zombproc_lock; extern struct sx proctree_lock; extern struct mtx ppeers_lock; extern struct proc proc0; /* Process slot for swapper. */