thread: lockless zombie list manipulation

This gets rid of the most contended spinlock seen when creating/destroying
threads in a loop. (modulo kstack)

Tested by:	alfredo (ppc64), bdragon (ppc64)
This commit is contained in:
Mateusz Guzik 2020-11-11 18:43:51 +00:00
parent 54bf96fb4f
commit c5315f5196
2 changed files with 29 additions and 24 deletions

View File

@ -128,9 +128,7 @@ SDT_PROBE_DEFINE(proc, , , lwp__exit);
*/ */
static uma_zone_t thread_zone; static uma_zone_t thread_zone;
TAILQ_HEAD(, thread) zombie_threads = TAILQ_HEAD_INITIALIZER(zombie_threads); static __exclusive_cache_line struct thread *thread_zombies;
static struct mtx zombie_lock;
MTX_SYSINIT(zombie_lock, &zombie_lock, "zombie lock", MTX_SPIN);
static void thread_zombie(struct thread *); static void thread_zombie(struct thread *);
static int thread_unsuspend_one(struct thread *td, struct proc *p, static int thread_unsuspend_one(struct thread *td, struct proc *p,
@ -409,14 +407,20 @@ threadinit(void)
/* /*
* Place an unused thread on the zombie list. * Place an unused thread on the zombie list.
* Use the slpq as that must be unused by now.
*/ */
void void
thread_zombie(struct thread *td) thread_zombie(struct thread *td)
{ {
mtx_lock_spin(&zombie_lock); struct thread *ztd;
TAILQ_INSERT_HEAD(&zombie_threads, td, td_slpq);
mtx_unlock_spin(&zombie_lock); ztd = atomic_load_ptr(&thread_zombies);
for (;;) {
td->td_zombie = ztd;
if (atomic_fcmpset_rel_ptr((uintptr_t *)&thread_zombies,
(uintptr_t *)&ztd, (uintptr_t)td))
break;
continue;
}
} }
/* /*
@ -430,29 +434,27 @@ thread_stash(struct thread *td)
} }
/* /*
* Reap zombie resources. * Reap zombie threads.
*/ */
void void
thread_reap(void) thread_reap(void)
{ {
struct thread *td_first, *td_next; struct thread *itd, *ntd;
/* /*
* Don't even bother to lock if none at this instant, * Reading upfront is pessimal if followed by concurrent atomic_swap,
* we really don't care about the next instant. * but most of the time the list is empty.
*/ */
if (!TAILQ_EMPTY(&zombie_threads)) { if (thread_zombies == NULL)
mtx_lock_spin(&zombie_lock); return;
td_first = TAILQ_FIRST(&zombie_threads);
if (td_first) itd = (struct thread *)atomic_swap_ptr((uintptr_t *)&thread_zombies,
TAILQ_INIT(&zombie_threads); (uintptr_t)NULL);
mtx_unlock_spin(&zombie_lock); while (itd != NULL) {
while (td_first) { ntd = itd->td_zombie;
td_next = TAILQ_NEXT(td_first, td_slpq); thread_cow_free(itd);
thread_cow_free(td_first); thread_free(itd);
thread_free(td_first); itd = ntd;
td_first = td_next;
}
} }
} }

View File

@ -229,7 +229,10 @@ struct thread {
struct proc *td_proc; /* (*) Associated process. */ struct proc *td_proc; /* (*) Associated process. */
TAILQ_ENTRY(thread) td_plist; /* (*) All threads in this proc. */ TAILQ_ENTRY(thread) td_plist; /* (*) All threads in this proc. */
TAILQ_ENTRY(thread) td_runq; /* (t) Run queue. */ TAILQ_ENTRY(thread) td_runq; /* (t) Run queue. */
TAILQ_ENTRY(thread) td_slpq; /* (t) Sleep queue. */ union {
TAILQ_ENTRY(thread) td_slpq; /* (t) Sleep queue. */
struct thread *td_zombie; /* Zombie list linkage */
};
TAILQ_ENTRY(thread) td_lockq; /* (t) Lock queue. */ TAILQ_ENTRY(thread) td_lockq; /* (t) Lock queue. */
LIST_ENTRY(thread) td_hash; /* (d) Hash chain. */ LIST_ENTRY(thread) td_hash; /* (d) Hash chain. */
struct cpuset *td_cpuset; /* (t) CPU affinity mask. */ struct cpuset *td_cpuset; /* (t) CPU affinity mask. */