Refine thread suspension code, now thread suspension is a blockable

operation, the caller is blocked util target threads are really
suspended, also avoid suspending a thread when it is holding a
critical lock.
Fix a bug in _thr_ref_delete which tests a never set flag.
This commit is contained in:
David Xu 2006-01-05 13:51:22 +00:00
parent 98a95f61fa
commit bc414752d3
7 changed files with 177 additions and 68 deletions

View File

@ -37,9 +37,7 @@
#include "thr_private.h"
static void free_thread(struct pthread *curthread, struct pthread *thread);
static int create_stack(struct pthread_attr *pattr);
static void free_stack(struct pthread *curthread, struct pthread_attr *pattr);
static void thread_start(struct pthread *curthread);
__weak_reference(_pthread_create, pthread_create);
@ -50,7 +48,8 @@ _pthread_create(pthread_t * thread, const pthread_attr_t * attr,
{
struct pthread *curthread, *new_thread;
struct thr_param param;
int ret = 0, locked;
int ret = 0, locked, create_suspended;
sigset_t set, oset;
_thr_check_init();
@ -133,11 +132,20 @@ _pthread_create(pthread_t * thread, const pthread_attr_t * attr,
TAILQ_INIT(&new_thread->pri_mutexq);
/* Initialise hooks in the thread structure: */
if (new_thread->attr.suspend == THR_CREATE_SUSPENDED)
if (new_thread->attr.suspend == THR_CREATE_SUSPENDED) {
new_thread->flags = THR_FLAGS_NEED_SUSPEND;
create_suspended = 1;
} else {
create_suspended = 0;
}
new_thread->state = PS_RUNNING;
if (new_thread->attr.flags & PTHREAD_CREATE_DETACHED)
new_thread->tlflags |= TLFLAGS_DETACHED;
/* Add the new thread. */
new_thread->refcount = 1;
_thr_link(curthread, new_thread);
/* Return thread pointer eariler so that new thread can use it. */
(*thread) = new_thread;
@ -157,13 +165,34 @@ _pthread_create(pthread_t * thread, const pthread_attr_t * attr,
param.flags = 0;
if (new_thread->attr.flags & PTHREAD_SCOPE_SYSTEM)
param.flags |= THR_SYSTEM_SCOPE;
/* Schedule the new thread. */
if (create_suspended) {
SIGFILLSET(set);
SIGDELSET(set, SIGTRAP);
__sys_sigprocmask(SIG_SETMASK, &set, &oset);
new_thread->sigmask = oset;
}
ret = thr_new(&param, sizeof(param));
if (create_suspended)
__sys_sigprocmask(SIG_SETMASK, &oset, NULL);
if (ret != 0) {
if (locked)
THR_THREAD_UNLOCK(curthread, new_thread);
_thr_unlink(curthread, new_thread);
free_thread(curthread, new_thread);
if (!locked)
THR_THREAD_LOCK(curthread, new_thread);
new_thread->state = PS_DEAD;
new_thread->tid = TID_TERMINATED;
if (new_thread->flags & THR_FLAGS_NEED_SUSPEND) {
new_thread->cycle++;
_thr_umtx_wake(&new_thread->cycle, INT_MAX);
}
THR_THREAD_UNLOCK(curthread, new_thread);
THREAD_LIST_LOCK(curthread);
new_thread->tlflags |= TLFLAGS_DETACHED;
_thr_ref_delete_unlocked(curthread, new_thread);
THREAD_LIST_UNLOCK(curthread);
(*thread) = 0;
ret = EAGAIN;
} else if (locked) {
@ -173,14 +202,6 @@ _pthread_create(pthread_t * thread, const pthread_attr_t * attr,
return (ret);
}
static void
free_thread(struct pthread *curthread, struct pthread *thread)
{
free_stack(curthread, &thread->attr);
curthread->tid = TID_TERMINATED;
_thr_free(curthread, thread);
}
static int
create_stack(struct pthread_attr *pattr)
{
@ -197,23 +218,26 @@ create_stack(struct pthread_attr *pattr)
return (ret);
}
static void
free_stack(struct pthread *curthread, struct pthread_attr *pattr)
{
if ((pattr->flags & THR_STACK_USER) == 0) {
THREAD_LIST_LOCK(curthread);
/* Stack routines don't use malloc/free. */
_thr_stack_free(pattr);
THREAD_LIST_UNLOCK(curthread);
}
}
static void
thread_start(struct pthread *curthread)
{
if (curthread->flags & THR_FLAGS_NEED_SUSPEND)
_thr_suspend_check(curthread);
if (curthread->attr.suspend == THR_CREATE_SUSPENDED) {
sigset_t set = curthread->sigmask;
_thr_ast(curthread);
/*
* Parent thread have stored signal mask for us,
* we should restore it now.
*/
sigprocmask(SIG_SETMASK, &set, NULL);
}
/*
* This is used as a serialization point to allow parent
* to report 'new thread' event to debugger before the thread
* does real work.
*/
THR_LOCK(curthread);
THR_UNLOCK(curthread);

View File

@ -124,9 +124,16 @@ _pthread_exit(void *status)
exit(0);
/* Never reach! */
}
THR_LOCK(curthread);
curthread->state = PS_DEAD;
THR_UNLOCK(curthread);
/*
* Thread was created with initial refcount 1, we drop the
* reference count to allow it to be garbage collected.
*/
curthread->refcount--;
if (curthread->tlflags & TLFLAGS_DETACHED)
THR_GCLIST_ADD(curthread);
curthread->state = PS_DEAD;
THREAD_LIST_UNLOCK(curthread);
if (SHOULD_REPORT_EVENT(curthread, TD_DEATH))
_thr_report_death(curthread);

View File

@ -129,7 +129,6 @@ _thr_gc(struct pthread *curthread)
continue;
}
DBG_MSG("Freeing thread %p\n", td);
_thr_free(curthread, td);
}
}
@ -224,8 +223,6 @@ _thr_link(struct pthread *curthread, struct pthread *thread)
{
THREAD_LIST_LOCK(curthread);
THR_LIST_ADD(thread);
if (thread->attr.flags & PTHREAD_DETACHED)
thread->tlflags |= TLFLAGS_DETACHED;
_thread_active_threads++;
THREAD_LIST_UNLOCK(curthread);
}
@ -298,14 +295,20 @@ _thr_ref_add(struct pthread *curthread, struct pthread *thread,
void
_thr_ref_delete(struct pthread *curthread, struct pthread *thread)
{
THREAD_LIST_LOCK(curthread);
_thr_ref_delete_unlocked(curthread, thread);
THREAD_LIST_UNLOCK(curthread);
}
void
_thr_ref_delete_unlocked(struct pthread *curthread, struct pthread *thread)
{
if (thread != NULL) {
THREAD_LIST_LOCK(curthread);
thread->refcount--;
if ((thread->refcount == 0) &&
(thread->tlflags & TLFLAGS_GC_SAFE) != 0)
if ((thread->refcount == 0) && thread->state == PS_DEAD &&
(thread->tlflags & TLFLAGS_DETACHED) != 0)
THR_GCLIST_ADD(thread);
THREAD_LIST_UNLOCK(curthread);
}
}

View File

@ -349,6 +349,12 @@ struct pthread {
/* How many low level locks the thread held. */
int locklevel;
/*
* Set to non-zero when this thread has entered a critical
* region. We allow for recursive entries into critical regions.
*/
int critical_count;
/* Signal blocked counter. */
int sigblock;
@ -494,6 +500,10 @@ struct pthread {
td_event_msg_t event_buf;
};
#define THR_IN_CRITICAL(thrd) \
(((thrd)->locklevel > 0) || \
((thrd)->critical_count > 0))
#define THR_UMTX_TRYLOCK(thrd, lck) \
_thr_umtx_trylock((lck), (thrd)->tid)
@ -517,6 +527,7 @@ do { \
if ((thrd)->locklevel > 0) { \
_thr_umtx_unlock((lck), (thrd)->tid); \
(thrd)->locklevel--; \
_thr_ast(thrd); \
} else { \
_thr_assert_lock_level(); \
} \
@ -673,6 +684,7 @@ void _thread_exit(char *, int, char *) __hidden __dead2;
void _thr_exit_cleanup(void) __hidden;
int _thr_ref_add(struct pthread *, struct pthread *, int) __hidden;
void _thr_ref_delete(struct pthread *, struct pthread *) __hidden;
void _thr_ref_delete_unlocked(struct pthread *, struct pthread *) __hidden;
int _thr_find_thread(struct pthread *, struct pthread *, int) __hidden;
void _thr_rtld_init(void) __hidden;
void _thr_rtld_fini(void) __hidden;
@ -695,10 +707,11 @@ void _thr_list_init(void) __hidden;
void _thr_hash_add(struct pthread *) __hidden;
void _thr_hash_remove(struct pthread *) __hidden;
struct pthread *_thr_hash_find(struct pthread *) __hidden;
void _thr_link(struct pthread *curthread, struct pthread *thread) __hidden;
void _thr_unlink(struct pthread *curthread, struct pthread *thread) __hidden;
void _thr_suspend_check(struct pthread *curthread) __hidden;
void _thr_link(struct pthread *, struct pthread *) __hidden;
void _thr_unlink(struct pthread *, struct pthread *) __hidden;
void _thr_suspend_check(struct pthread *) __hidden;
void _thr_assert_lock_level(void) __hidden __dead2;
void _thr_ast(struct pthread *) __hidden;
void _thr_timer_init(void) __hidden;
void _thr_report_creation(struct pthread *curthread,
struct pthread *newthread) __hidden;

View File

@ -88,5 +88,4 @@ resume_common(struct pthread *thread)
thread->flags &= ~THR_FLAGS_NEED_SUSPEND;
thread->cycle++;
_thr_umtx_wake(&thread->cycle, 1);
_thr_send_sig(thread, SIGCANCEL);
}

View File

@ -52,34 +52,67 @@ sigcancel_handler(int sig, siginfo_t *info, ucontext_t *ucp)
if (curthread->cancelflags & THR_CANCEL_AT_POINT)
pthread_testcancel();
if (curthread->flags & THR_FLAGS_NEED_SUSPEND) {
__sys_sigprocmask(SIG_SETMASK, &ucp->uc_sigmask, NULL);
_thr_suspend_check(curthread);
_thr_ast(curthread);
}
void
_thr_ast(struct pthread *curthread)
{
if (!THR_IN_CRITICAL(curthread)) {
if (__predict_false((curthread->flags &
(THR_FLAGS_NEED_SUSPEND | THR_FLAGS_SUSPENDED))
== THR_FLAGS_NEED_SUSPEND))
_thr_suspend_check(curthread);
}
}
void
_thr_suspend_check(struct pthread *curthread)
{
long cycle;
umtx_t cycle;
/* Async suspend. */
/*
* Blocks SIGCANCEL which other threads must send.
*/
_thr_signal_block(curthread);
THR_LOCK(curthread);
if ((curthread->flags & (THR_FLAGS_NEED_SUSPEND | THR_FLAGS_SUSPENDED))
== THR_FLAGS_NEED_SUSPEND) {
/*
* Increase critical_count, here we don't use THR_LOCK/UNLOCK
* because we are leaf code, we don't want to recursively call
* ourself.
*/
curthread->critical_count++;
THR_UMTX_LOCK(curthread, &(curthread)->lock);
while ((curthread->flags & (THR_FLAGS_NEED_SUSPEND |
THR_FLAGS_SUSPENDED)) == THR_FLAGS_NEED_SUSPEND) {
curthread->cycle++;
cycle = curthread->cycle;
/* Wake the thread suspending us. */
_thr_umtx_wake(&curthread->cycle, INT_MAX);
/*
* if we are from pthread_exit, we don't want to
* suspend, just go and die.
*/
if (curthread->state == PS_DEAD)
break;
curthread->flags |= THR_FLAGS_SUSPENDED;
while (curthread->flags & THR_FLAGS_NEED_SUSPEND) {
cycle = curthread->cycle;
THR_UNLOCK(curthread);
_thr_signal_unblock(curthread);
_thr_umtx_wait(&curthread->cycle, cycle, NULL);
_thr_signal_block(curthread);
THR_LOCK(curthread);
}
THR_UMTX_UNLOCK(curthread, &(curthread)->lock);
_thr_umtx_wait(&curthread->cycle, cycle, NULL);
THR_UMTX_LOCK(curthread, &(curthread)->lock);
curthread->flags &= ~THR_FLAGS_SUSPENDED;
}
THR_UNLOCK(curthread);
THR_UMTX_UNLOCK(curthread, &(curthread)->lock);
curthread->critical_count--;
/*
* Unblocks SIGCANCEL, it is possible a new SIGCANCEL is ready and
* a new signal frame will nest us, this seems a problem because
* stack will grow and overflow, but because kernel will automatically
* mask the SIGCANCEL when delivering the signal, so we at most only
* have one nesting signal frame, this should be fine.
*/
_thr_signal_unblock(curthread);
}

View File

@ -37,7 +37,8 @@
#include "thr_private.h"
static void suspend_common(struct pthread *thread);
static int suspend_common(struct pthread *, struct pthread *,
int);
__weak_reference(_pthread_suspend_np, pthread_suspend_np);
__weak_reference(_pthread_suspend_all_np, pthread_suspend_all_np);
@ -58,7 +59,7 @@ _pthread_suspend_np(pthread_t thread)
== 0) {
/* Lock the threads scheduling queue: */
THR_THREAD_LOCK(curthread, thread);
suspend_common(thread);
suspend_common(curthread, thread, 1);
/* Unlock the threads scheduling queue: */
THR_THREAD_UNLOCK(curthread, thread);
@ -71,29 +72,58 @@ _pthread_suspend_np(pthread_t thread)
void
_pthread_suspend_all_np(void)
{
struct pthread *curthread = _get_curthread();
struct pthread *thread;
struct pthread *curthread = _get_curthread();
struct pthread *thread;
int ret;
/* Take the thread list lock: */
restart:
THREAD_LIST_LOCK(curthread);
TAILQ_FOREACH(thread, &_thread_list, tle) {
if (thread != curthread) {
THR_THREAD_LOCK(curthread, thread);
suspend_common(thread);
/* First try to suspend the thread without waiting */
ret = suspend_common(curthread, thread, 0);
if (ret == 0) {
/* Can not suspended, try to wait */
thread->refcount++;
THREAD_LIST_UNLOCK(curthread);
suspend_common(curthread, thread, 1);
THR_THREAD_UNLOCK(curthread, thread);
_thr_ref_delete(curthread, thread);
/*
* Because we were blocked, things may have
* been changed, we have to restart the
* process.
*/
goto restart;
}
THR_THREAD_UNLOCK(curthread, thread);
}
}
/* Release the thread list lock: */
THREAD_LIST_UNLOCK(curthread);
}
static void
suspend_common(struct pthread *thread)
static int
suspend_common(struct pthread *curthread, struct pthread *thread,
int waitok)
{
if (thread->state != PS_DEAD) {
umtx_t tmp;
while (thread->state != PS_DEAD &&
!(thread->flags & THR_FLAGS_SUSPENDED)) {
thread->flags |= THR_FLAGS_NEED_SUSPEND;
_thr_send_sig(thread, SIGCANCEL);
if (waitok) {
tmp = thread->cycle;
THR_THREAD_UNLOCK(curthread, thread);
_thr_umtx_wait(&thread->cycle, tmp, NULL);
THR_THREAD_LOCK(curthread, thread);
} else {
return (0);
}
}
return (1);
}