Add an implementation for pthread_atfork().

Aside from the POSIX requirements for pthread_atfork(), when
fork()ing, take the malloc lock to keep malloc state consistent
in the child.

Reviewed by:	davidxu
This commit is contained in:
Daniel Eischen 2003-11-04 20:04:45 +00:00
parent d6b826bac7
commit 4c1123c1c0
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=122075
11 changed files with 144 additions and 12 deletions

View File

@ -5,7 +5,7 @@
SRCS+= \
thr_aio_suspend.c \
thr_autoinit.c \
thr_atfork.c \
thr_attr_destroy.c \
thr_attr_init.c \
thr_attr_get_np.c \
@ -28,6 +28,7 @@ SRCS+= \
thr_attr_setstack.c \
thr_attr_setstackaddr.c \
thr_attr_setstacksize.c \
thr_autoinit.c \
thr_barrier.c \
thr_barrierattr.c \
thr_cancel.c \

View File

@ -37,9 +37,13 @@
#include <unistd.h>
#include <fcntl.h>
#include <pthread.h>
#include <spinlock.h>
#include <sys/signalvar.h>
#include "thr_private.h"
extern spinlock_t *__malloc_lock;
#pragma weak __malloc_lock
__weak_reference(_fork, fork);
pid_t
@ -47,6 +51,7 @@ _fork(void)
{
sigset_t sigset, oldset;
struct pthread *curthread;
struct pthread_atfork *af;
pid_t ret;
int errsave;
@ -66,18 +71,48 @@ _fork(void)
SIGFILLSET(sigset);
__sys_sigprocmask(SIG_SETMASK, &sigset, &oldset);
}
_pthread_mutex_lock(&_thr_atfork_mutex);
/* Run down atfork prepare handlers. */
TAILQ_FOREACH_REVERSE(af, &_thr_atfork_list, atfork_head, qe) {
if (af->prepare != NULL)
af->prepare();
}
/* Fork a new process: */
if ((_kse_isthreaded() != 0) && (__malloc_lock != NULL)) {
_spinlock(__malloc_lock);
}
if ((ret = __sys_fork()) == 0) {
/* Child process */
_kse_single_thread(curthread);
errsave = errno;
/* Kernel signal mask is restored in _kse_single_thread */
_kse_single_thread(curthread);
/* Run down atfork child handlers. */
TAILQ_FOREACH(af, &_thr_atfork_list, qe) {
if (af->child != NULL)
af->child();
}
_thr_mutex_reinit(&_thr_atfork_mutex);
} else {
if ((_kse_isthreaded() != 0) && (__malloc_lock != NULL)) {
_spinunlock(__malloc_lock);
}
errsave = errno;
if (curthread->attr.flags & PTHREAD_SCOPE_SYSTEM) {
errsave = errno;
__sys_sigprocmask(SIG_SETMASK, &oldset, NULL);
errno = errsave;
}
}
/* Run down atfork parent handlers. */
TAILQ_FOREACH(af, &_thr_atfork_list, qe) {
if (af->parent != NULL)
af->parent();
}
_pthread_mutex_unlock(&_thr_atfork_mutex);
}
errno = errsave;
/* Return the process ID: */
return (ret);

View File

@ -461,6 +461,8 @@ init_private(void)
/* Initialize everything else. */
TAILQ_INIT(&_thread_list);
TAILQ_INIT(&_thread_gc_list);
TAILQ_INIT(&_thr_atfork_list);
_pthread_mutex_init(&_thr_atfork_mutex, NULL);
/*
* Initialize the lock for temporary installation of signal

View File

@ -329,6 +329,20 @@ _kse_single_thread(struct pthread *curthread)
_kse_initial = NULL;
_libpthread_init(curthread);
#else
int i;
/* Reset the current thread and KSE lock data. */
for (i = 0; i < curthread->locklevel; i++) {
_lockuser_reinit(&curthread->lockusers[i], (void *)curthread);
}
curthread->locklevel = 0;
for (i = 0; i < curthread->kse->k_locklevel; i++) {
_lockuser_reinit(&curthread->kse->k_lockusers[i],
(void *)curthread->kse);
_LCK_SET_PRIVATE2(&curthread->kse->k_lockusers[i], NULL);
}
curthread->kse->k_locklevel = 0;
_thr_spinlock_init();
if (__isthreaded) {
_thr_rtld_fini();
_thr_signal_deinit();
@ -2015,7 +2029,7 @@ _thr_setrunnable(struct pthread *curthread, struct pthread *thread)
kmbx = _thr_setrunnable_unlocked(thread);
KSE_SCHED_UNLOCK(curthread->kse, thread->kseg);
_kse_critical_leave(crit);
if (kmbx != NULL)
if ((kmbx != NULL) && (__isthreaded != 0))
kse_wakeup(kmbx);
}

View File

@ -442,6 +442,13 @@ struct pthread_cleanup {
void *routine_arg;
};
struct pthread_atfork {
TAILQ_ENTRY(pthread_atfork) qe;
void (*prepare)(void);
void (*parent)(void);
void (*child)(void);
};
struct pthread_attr {
int sched_policy;
int sched_inherit;
@ -997,6 +1004,9 @@ SCLASS TAILQ_HEAD(, pthread) _thread_gc_list
SCLASS int _thr_active_threads SCLASS_PRESET(1);
SCLASS TAILQ_HEAD(atfork_head, pthread_atfork) _thr_atfork_list;
SCLASS pthread_mutex_t _thr_atfork_mutex;
/* Default thread attributes: */
SCLASS struct pthread_attr _pthread_attr_default
SCLASS_PRESET({
@ -1109,8 +1119,11 @@ void _thr_exit(char *, int, char *);
void _thr_exit_cleanup(void);
void _thr_lock_wait(struct lock *lock, struct lockuser *lu);
void _thr_lock_wakeup(struct lock *lock, struct lockuser *lu);
void _thr_mutex_reinit(pthread_mutex_t *);
int _thr_ref_add(struct pthread *, struct pthread *, int);
void _thr_ref_delete(struct pthread *, struct pthread *);
void _thr_rtld_init(void);
void _thr_rtld_fini(void);
int _thr_schedule_add(struct pthread *, struct pthread *);
void _thr_schedule_remove(struct pthread *, struct pthread *);
void _thr_setrunnable(struct pthread *curthread, struct pthread *thread);

View File

@ -27,6 +27,7 @@ global:
_nanosleep;
_pause;
_pselect;
_pthread_atfork;
_pthread_barrier_destroy;
_pthread_barrier_init;
_pthread_barrier_wait;
@ -178,6 +179,7 @@ global:
pause;
poll;
pselect;
pthread_atfork;
pthread_barrier_destroy;
pthread_barrier_init;
pthread_barrier_wait;

View File

@ -5,7 +5,7 @@
SRCS+= \
thr_aio_suspend.c \
thr_autoinit.c \
thr_atfork.c \
thr_attr_destroy.c \
thr_attr_init.c \
thr_attr_get_np.c \
@ -28,6 +28,7 @@ SRCS+= \
thr_attr_setstack.c \
thr_attr_setstackaddr.c \
thr_attr_setstacksize.c \
thr_autoinit.c \
thr_barrier.c \
thr_barrierattr.c \
thr_cancel.c \

View File

@ -37,9 +37,13 @@
#include <unistd.h>
#include <fcntl.h>
#include <pthread.h>
#include <spinlock.h>
#include <sys/signalvar.h>
#include "thr_private.h"
extern spinlock_t *__malloc_lock;
#pragma weak __malloc_lock
__weak_reference(_fork, fork);
pid_t
@ -47,6 +51,7 @@ _fork(void)
{
sigset_t sigset, oldset;
struct pthread *curthread;
struct pthread_atfork *af;
pid_t ret;
int errsave;
@ -66,18 +71,48 @@ _fork(void)
SIGFILLSET(sigset);
__sys_sigprocmask(SIG_SETMASK, &sigset, &oldset);
}
_pthread_mutex_lock(&_thr_atfork_mutex);
/* Run down atfork prepare handlers. */
TAILQ_FOREACH_REVERSE(af, &_thr_atfork_list, atfork_head, qe) {
if (af->prepare != NULL)
af->prepare();
}
/* Fork a new process: */
if ((_kse_isthreaded() != 0) && (__malloc_lock != NULL)) {
_spinlock(__malloc_lock);
}
if ((ret = __sys_fork()) == 0) {
/* Child process */
_kse_single_thread(curthread);
errsave = errno;
/* Kernel signal mask is restored in _kse_single_thread */
_kse_single_thread(curthread);
/* Run down atfork child handlers. */
TAILQ_FOREACH(af, &_thr_atfork_list, qe) {
if (af->child != NULL)
af->child();
}
_thr_mutex_reinit(&_thr_atfork_mutex);
} else {
if ((_kse_isthreaded() != 0) && (__malloc_lock != NULL)) {
_spinunlock(__malloc_lock);
}
errsave = errno;
if (curthread->attr.flags & PTHREAD_SCOPE_SYSTEM) {
errsave = errno;
__sys_sigprocmask(SIG_SETMASK, &oldset, NULL);
errno = errsave;
}
}
/* Run down atfork parent handlers. */
TAILQ_FOREACH(af, &_thr_atfork_list, qe) {
if (af->parent != NULL)
af->parent();
}
_pthread_mutex_unlock(&_thr_atfork_mutex);
}
errno = errsave;
/* Return the process ID: */
return (ret);

View File

@ -461,6 +461,8 @@ init_private(void)
/* Initialize everything else. */
TAILQ_INIT(&_thread_list);
TAILQ_INIT(&_thread_gc_list);
TAILQ_INIT(&_thr_atfork_list);
_pthread_mutex_init(&_thr_atfork_mutex, NULL);
/*
* Initialize the lock for temporary installation of signal

View File

@ -329,6 +329,20 @@ _kse_single_thread(struct pthread *curthread)
_kse_initial = NULL;
_libpthread_init(curthread);
#else
int i;
/* Reset the current thread and KSE lock data. */
for (i = 0; i < curthread->locklevel; i++) {
_lockuser_reinit(&curthread->lockusers[i], (void *)curthread);
}
curthread->locklevel = 0;
for (i = 0; i < curthread->kse->k_locklevel; i++) {
_lockuser_reinit(&curthread->kse->k_lockusers[i],
(void *)curthread->kse);
_LCK_SET_PRIVATE2(&curthread->kse->k_lockusers[i], NULL);
}
curthread->kse->k_locklevel = 0;
_thr_spinlock_init();
if (__isthreaded) {
_thr_rtld_fini();
_thr_signal_deinit();
@ -2015,7 +2029,7 @@ _thr_setrunnable(struct pthread *curthread, struct pthread *thread)
kmbx = _thr_setrunnable_unlocked(thread);
KSE_SCHED_UNLOCK(curthread->kse, thread->kseg);
_kse_critical_leave(crit);
if (kmbx != NULL)
if ((kmbx != NULL) && (__isthreaded != 0))
kse_wakeup(kmbx);
}

View File

@ -442,6 +442,13 @@ struct pthread_cleanup {
void *routine_arg;
};
struct pthread_atfork {
TAILQ_ENTRY(pthread_atfork) qe;
void (*prepare)(void);
void (*parent)(void);
void (*child)(void);
};
struct pthread_attr {
int sched_policy;
int sched_inherit;
@ -997,6 +1004,9 @@ SCLASS TAILQ_HEAD(, pthread) _thread_gc_list
SCLASS int _thr_active_threads SCLASS_PRESET(1);
SCLASS TAILQ_HEAD(atfork_head, pthread_atfork) _thr_atfork_list;
SCLASS pthread_mutex_t _thr_atfork_mutex;
/* Default thread attributes: */
SCLASS struct pthread_attr _pthread_attr_default
SCLASS_PRESET({
@ -1109,8 +1119,11 @@ void _thr_exit(char *, int, char *);
void _thr_exit_cleanup(void);
void _thr_lock_wait(struct lock *lock, struct lockuser *lu);
void _thr_lock_wakeup(struct lock *lock, struct lockuser *lu);
void _thr_mutex_reinit(pthread_mutex_t *);
int _thr_ref_add(struct pthread *, struct pthread *, int);
void _thr_ref_delete(struct pthread *, struct pthread *);
void _thr_rtld_init(void);
void _thr_rtld_fini(void);
int _thr_schedule_add(struct pthread *, struct pthread *);
void _thr_schedule_remove(struct pthread *, struct pthread *);
void _thr_setrunnable(struct pthread *curthread, struct pthread *thread);