Modify the code path of the ifdef NOTYET part of _kse_single_thread():
o Don't reinitialise the atfork() handler list in the child. We are meant to call the child handler, and on subsequent fork()s should call all three functions as normal. o Don't reinitialise the thread specific keyed data in the child after a fork. Applications may require this for context. o Reinitialise curthread->tlflags after removing ourselves from (and reinitialising) the various internal thread lists. o Reinitialise __malloc_lock in the child after fork() (to balance our explicitly taking the lock prior to the fork()). With these changes, it is possible to enable the NOTYET code in thr_kern.c to allow the use of non-async-safe functions after fork()ing from a threaded program. Reviewed by: Daniel Eischen <deischen@freebsd.org> [_malloc_lock reinitialisation has since been moved to avoid polluting the !NOTYET code]
This commit is contained in:
parent
f1b94d803f
commit
a49729bb2d
@ -437,6 +437,7 @@ init_private(void)
|
||||
}
|
||||
_pthread_attr_default.guardsize_attr = _thr_guard_default;
|
||||
_pthread_attr_default.stacksize_attr = _thr_stack_default;
|
||||
TAILQ_INIT(&_thr_atfork_list);
|
||||
init_once = 1; /* Don't do this again. */
|
||||
} else {
|
||||
/*
|
||||
@ -453,7 +454,6 @@ init_private(void)
|
||||
/* Initialize everything else. */
|
||||
TAILQ_INIT(&_thread_list);
|
||||
TAILQ_INIT(&_thread_gc_list);
|
||||
TAILQ_INIT(&_thr_atfork_list);
|
||||
_pthread_mutex_init(&_thr_atfork_mutex, NULL);
|
||||
|
||||
/*
|
||||
|
@ -55,6 +55,9 @@ __FBSDID("$FreeBSD$");
|
||||
#include "atomic_ops.h"
|
||||
#include "thr_private.h"
|
||||
#include "libc_private.h"
|
||||
#ifdef NOTYET
|
||||
#include "spinlock.h"
|
||||
#endif
|
||||
|
||||
/* #define DEBUG_THREAD_KERN */
|
||||
#ifdef DEBUG_THREAD_KERN
|
||||
@ -210,9 +213,9 @@ _kse_single_thread(struct pthread *curthread)
|
||||
struct kse *kse;
|
||||
struct kse_group *kseg;
|
||||
struct pthread *thread;
|
||||
kse_critical_t crit;
|
||||
int i;
|
||||
|
||||
_thr_spinlock_init();
|
||||
*__malloc_lock = (spinlock_t)_SPINLOCK_INITIALIZER;
|
||||
if (__isthreaded) {
|
||||
_thr_rtld_fini();
|
||||
_thr_signal_deinit();
|
||||
@ -250,11 +253,8 @@ _kse_single_thread(struct pthread *curthread)
|
||||
curthread->joiner = NULL; /* no joining threads yet */
|
||||
curthread->refcount = 0;
|
||||
SIGEMPTYSET(curthread->sigpend); /* clear pending signals */
|
||||
if (curthread->specific != NULL) {
|
||||
free(curthread->specific);
|
||||
curthread->specific = NULL;
|
||||
curthread->specific_data_count = 0;
|
||||
}
|
||||
|
||||
/* Don't free thread-specific data as the caller may require it */
|
||||
|
||||
/* Free the free KSEs: */
|
||||
while ((kse = TAILQ_FIRST(&free_kseq)) != NULL) {
|
||||
@ -317,6 +317,9 @@ _kse_single_thread(struct pthread *curthread)
|
||||
curthread->attr.flags &= ~PTHREAD_SCOPE_SYSTEM;
|
||||
curthread->attr.flags |= PTHREAD_SCOPE_PROCESS;
|
||||
|
||||
/* We're no longer part of any lists */
|
||||
curthread->tlflags = 0;
|
||||
|
||||
/*
|
||||
* After a fork, we are still operating on the thread's original
|
||||
* stack. Don't clear the THR_FLAGS_USER from the thread's
|
||||
|
@ -437,6 +437,7 @@ init_private(void)
|
||||
}
|
||||
_pthread_attr_default.guardsize_attr = _thr_guard_default;
|
||||
_pthread_attr_default.stacksize_attr = _thr_stack_default;
|
||||
TAILQ_INIT(&_thr_atfork_list);
|
||||
init_once = 1; /* Don't do this again. */
|
||||
} else {
|
||||
/*
|
||||
@ -453,7 +454,6 @@ init_private(void)
|
||||
/* Initialize everything else. */
|
||||
TAILQ_INIT(&_thread_list);
|
||||
TAILQ_INIT(&_thread_gc_list);
|
||||
TAILQ_INIT(&_thr_atfork_list);
|
||||
_pthread_mutex_init(&_thr_atfork_mutex, NULL);
|
||||
|
||||
/*
|
||||
|
@ -55,6 +55,9 @@ __FBSDID("$FreeBSD$");
|
||||
#include "atomic_ops.h"
|
||||
#include "thr_private.h"
|
||||
#include "libc_private.h"
|
||||
#ifdef NOTYET
|
||||
#include "spinlock.h"
|
||||
#endif
|
||||
|
||||
/* #define DEBUG_THREAD_KERN */
|
||||
#ifdef DEBUG_THREAD_KERN
|
||||
@ -210,9 +213,9 @@ _kse_single_thread(struct pthread *curthread)
|
||||
struct kse *kse;
|
||||
struct kse_group *kseg;
|
||||
struct pthread *thread;
|
||||
kse_critical_t crit;
|
||||
int i;
|
||||
|
||||
_thr_spinlock_init();
|
||||
*__malloc_lock = (spinlock_t)_SPINLOCK_INITIALIZER;
|
||||
if (__isthreaded) {
|
||||
_thr_rtld_fini();
|
||||
_thr_signal_deinit();
|
||||
@ -250,11 +253,8 @@ _kse_single_thread(struct pthread *curthread)
|
||||
curthread->joiner = NULL; /* no joining threads yet */
|
||||
curthread->refcount = 0;
|
||||
SIGEMPTYSET(curthread->sigpend); /* clear pending signals */
|
||||
if (curthread->specific != NULL) {
|
||||
free(curthread->specific);
|
||||
curthread->specific = NULL;
|
||||
curthread->specific_data_count = 0;
|
||||
}
|
||||
|
||||
/* Don't free thread-specific data as the caller may require it */
|
||||
|
||||
/* Free the free KSEs: */
|
||||
while ((kse = TAILQ_FIRST(&free_kseq)) != NULL) {
|
||||
@ -317,6 +317,9 @@ _kse_single_thread(struct pthread *curthread)
|
||||
curthread->attr.flags &= ~PTHREAD_SCOPE_SYSTEM;
|
||||
curthread->attr.flags |= PTHREAD_SCOPE_PROCESS;
|
||||
|
||||
/* We're no longer part of any lists */
|
||||
curthread->tlflags = 0;
|
||||
|
||||
/*
|
||||
* After a fork, we are still operating on the thread's original
|
||||
* stack. Don't clear the THR_FLAGS_USER from the thread's
|
||||
|
Loading…
Reference in New Issue
Block a user