Convert thread list lock from mutex to rwlock.

This commit is contained in:
David Xu 2010-09-13 07:03:01 +00:00
parent 336c4b45ea
commit a9b764e218
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=212536
15 changed files with 153 additions and 129 deletions

View File

@ -50,8 +50,7 @@ _pthread_setaffinity_np(pthread_t td, size_t cpusetsize, const cpuset_t *cpusetp
-1, cpusetsize, cpusetp);
if (error == -1)
error = errno;
} else {
THR_THREAD_LOCK(curthread, td);
} else if ((error = _thr_find_thread(curthread, td, 0)) == 0) {
if (td->state == PS_DEAD) {
THR_THREAD_UNLOCK(curthread, td);
return (EINVAL);
@ -73,10 +72,18 @@ _pthread_getaffinity_np(pthread_t td, size_t cpusetsize, cpuset_t *cpusetp)
lwpid_t tid;
int error;
tid = TID(td);
error = cpuset_getaffinity(CPU_LEVEL_WHICH, CPU_WHICH_TID,
(td == curthread) ? -1 : tid, cpusetsize, cpusetp);
if (error == -1)
error = errno;
if (td == curthread) {
error = cpuset_getaffinity(CPU_LEVEL_WHICH, CPU_WHICH_TID,
(td == curthread) ? -1 : tid, cpusetsize, cpusetp);
if (error == -1)
error = errno;
} else if ((error = _thr_find_thread(curthread, td, 0)) == 0) {
tid = TID(td);
error = cpuset_getaffinity(CPU_LEVEL_WHICH, CPU_WHICH_TID,
(td == curthread) ? -1 : tid, cpusetsize, cpusetp);
if (error == -1)
error = errno;
THR_THREAD_UNLOCK(curthread, td);
}
return (error);
}

View File

@ -132,22 +132,23 @@ _pthread_attr_destroy(pthread_attr_t *attr)
__weak_reference(_pthread_attr_get_np, pthread_attr_get_np);
int
_pthread_attr_get_np(pthread_t pid, pthread_attr_t *dst)
_pthread_attr_get_np(pthread_t pthread, pthread_attr_t *dst)
{
struct pthread *curthread;
struct pthread_attr attr;
int ret;
if (pid == NULL || dst == NULL || *dst == NULL)
if (pthread == NULL || dst == NULL || *dst == NULL)
return (EINVAL);
curthread = _get_curthread();
if ((ret = _thr_ref_add(curthread, pid, /*include dead*/0)) != 0)
if ((ret = _thr_find_thread(curthread, pthread, /*include dead*/0)) != 0)
return (ret);
attr = pid->attr;
if (pid->tlflags & TLFLAGS_DETACHED)
attr = pthread->attr;
if (pthread->flags & THR_FLAGS_DETACHED)
attr.flags |= PTHREAD_DETACHED;
_thr_ref_delete(curthread, pid);
THR_THREAD_UNLOCK(curthread, pthread);
memcpy(*dst, &attr, sizeof(struct pthread_attr));
/* XXX */
(*dst)->cpuset = NULL;

View File

@ -60,18 +60,16 @@ _pthread_cancel(pthread_t pthread)
/*
* POSIX says _pthread_cancel should be async cancellation safe.
* _thr_ref_add and _thr_ref_delete will enter and leave critical
* _thr_find_thread and THR_THREAD_UNLOCK will enter and leave critical
* region automatically.
*/
if ((ret = _thr_ref_add(curthread, pthread, 0)) == 0) {
THR_THREAD_LOCK(curthread, pthread);
if ((ret = _thr_find_thread(curthread, pthread, 0)) == 0) {
if (!pthread->cancel_pending) {
pthread->cancel_pending = 1;
if (pthread->state != PS_DEAD)
_thr_send_sig(pthread, SIGCANCEL);
}
THR_THREAD_UNLOCK(curthread, pthread);
_thr_ref_delete(curthread, pthread);
}
return (ret);
}

View File

@ -125,7 +125,7 @@ _pthread_create(pthread_t * thread, const pthread_attr_t * attr,
new_thread->state = PS_RUNNING;
if (new_thread->attr.flags & PTHREAD_CREATE_DETACHED)
new_thread->tlflags |= TLFLAGS_DETACHED;
new_thread->flags |= THR_FLAGS_DETACHED;
/* Add the new thread. */
new_thread->refcount = 1;
@ -185,16 +185,14 @@ _pthread_create(pthread_t * thread, const pthread_attr_t * attr,
THR_THREAD_LOCK(curthread, new_thread);
new_thread->state = PS_DEAD;
new_thread->tid = TID_TERMINATED;
new_thread->flags |= THR_FLAGS_DETACHED;
new_thread->refcount--;
if (new_thread->flags & THR_FLAGS_NEED_SUSPEND) {
new_thread->cycle++;
_thr_umtx_wake(&new_thread->cycle, INT_MAX, 0);
}
THR_THREAD_UNLOCK(curthread, new_thread);
THREAD_LIST_LOCK(curthread);
_thread_active_threads--;
new_thread->tlflags |= TLFLAGS_DETACHED;
_thr_ref_delete_unlocked(curthread, new_thread);
THREAD_LIST_UNLOCK(curthread);
_thr_try_gc(curthread, new_thread); /* thread lock released */
atomic_add_int(&_thread_active_threads, -1);
} else if (locked) {
if (cpusetp != NULL) {
if (cpuset_setaffinity(CPU_LEVEL_WHICH, CPU_WHICH_TID,
@ -202,22 +200,17 @@ _pthread_create(pthread_t * thread, const pthread_attr_t * attr,
ret = errno;
/* kill the new thread */
new_thread->force_exit = 1;
THR_THREAD_UNLOCK(curthread, new_thread);
new_thread->flags |= THR_FLAGS_DETACHED;
_thr_try_gc(curthread, new_thread);
/* thread lock released */
goto out;
}
}
_thr_report_creation(curthread, new_thread);
THR_THREAD_UNLOCK(curthread, new_thread);
out:
if (ret) {
THREAD_LIST_LOCK(curthread);
new_thread->tlflags |= TLFLAGS_DETACHED;
THR_GCLIST_ADD(new_thread);
THREAD_LIST_UNLOCK(curthread);
}
}
out:
if (ret)
(*thread) = 0;
return (ret);

View File

@ -47,25 +47,21 @@ _pthread_detach(pthread_t pthread)
if (pthread == NULL)
return (EINVAL);
THREAD_LIST_LOCK(curthread);
if ((rval = _thr_find_thread(curthread, pthread,
/*include dead*/1)) != 0) {
THREAD_LIST_UNLOCK(curthread);
return (rval);
}
/* Check if the thread is already detached or has a joiner. */
if ((pthread->tlflags & TLFLAGS_DETACHED) != 0 ||
if ((pthread->flags & THR_FLAGS_DETACHED) != 0 ||
(pthread->joiner != NULL)) {
THREAD_LIST_UNLOCK(curthread);
THR_THREAD_UNLOCK(curthread, pthread);
return (EINVAL);
}
/* Flag the thread as detached. */
pthread->tlflags |= TLFLAGS_DETACHED;
if (pthread->state == PS_DEAD)
THR_GCLIST_ADD(pthread);
THREAD_LIST_UNLOCK(curthread);
pthread->flags |= THR_FLAGS_DETACHED;
_thr_try_gc(curthread, pthread); /* thread lock released */
return (0);
}

View File

@ -108,37 +108,34 @@ _pthread_exit_mask(void *status, sigset_t *mask)
if (!_thr_isthreaded())
exit(0);
THREAD_LIST_LOCK(curthread);
_thread_active_threads--;
if (_thread_active_threads == 0) {
THREAD_LIST_UNLOCK(curthread);
if (atomic_fetchadd_int(&_thread_active_threads, -1) == 1) {
exit(0);
/* Never reach! */
}
THREAD_LIST_UNLOCK(curthread);
/* Tell malloc that the thread is exiting. */
_malloc_thread_cleanup();
THREAD_LIST_LOCK(curthread);
THR_LOCK(curthread);
curthread->state = PS_DEAD;
if (curthread->flags & THR_FLAGS_NEED_SUSPEND) {
curthread->cycle++;
_thr_umtx_wake(&curthread->cycle, INT_MAX, 0);
}
THR_UNLOCK(curthread);
/*
* Thread was created with initial refcount 1, we drop the
* reference count to allow it to be garbage collected.
*/
curthread->refcount--;
if (curthread->tlflags & TLFLAGS_DETACHED)
THR_GCLIST_ADD(curthread);
THREAD_LIST_UNLOCK(curthread);
_thr_try_gc(curthread, curthread); /* thread lock released */
if (!curthread->force_exit && SHOULD_REPORT_EVENT(curthread, TD_DEATH))
_thr_report_death(curthread);
#if defined(_PTHREADS_INVARIANTS)
if (THR_IN_CRITICAL(curthread))
PANIC("thread exits with resources held!");
#endif
/*
* Kernel will do wakeup at the address, so joiner thread
* will be resumed if it is sleeping at the address.

View File

@ -178,13 +178,13 @@ _fork(void)
/* Child process */
errsave = errno;
curthread->cancel_pending = 0;
curthread->flags &= ~THR_FLAGS_NEED_SUSPEND;
curthread->flags &= ~(THR_FLAGS_NEED_SUSPEND|THR_FLAGS_DETACHED);
/*
* Thread list will be reinitialized, and later we call
* _libpthread_init(), it will add us back to list.
*/
curthread->tlflags &= ~(TLFLAGS_IN_TDLIST | TLFLAGS_DETACHED);
curthread->tlflags &= ~TLFLAGS_IN_TDLIST;
/* child is a new kernel thread. */
thr_self(&curthread->tid);

View File

@ -111,7 +111,7 @@ struct umutex _mutex_static_lock = DEFAULT_UMUTEX;
struct umutex _cond_static_lock = DEFAULT_UMUTEX;
struct umutex _rwlock_static_lock = DEFAULT_UMUTEX;
struct umutex _keytable_lock = DEFAULT_UMUTEX;
struct umutex _thr_list_lock = DEFAULT_UMUTEX;
struct urwlock _thr_list_lock = DEFAULT_URWLOCK;
struct umutex _thr_event_lock = DEFAULT_UMUTEX;
int __pthread_cond_wait(pthread_cond_t *, pthread_mutex_t *);

View File

@ -43,12 +43,12 @@ __weak_reference(_pthread_timedjoin_np, pthread_timedjoin_np);
static void backout_join(void *arg)
{
struct pthread *curthread = _get_curthread();
struct pthread *pthread = (struct pthread *)arg;
struct pthread *curthread = _get_curthread();
THREAD_LIST_LOCK(curthread);
THR_THREAD_LOCK(curthread, pthread);
pthread->joiner = NULL;
THREAD_LIST_UNLOCK(curthread);
THR_THREAD_LOCK(curthread, pthread);
}
int
@ -88,23 +88,23 @@ join_common(pthread_t pthread, void **thread_return,
if (pthread == curthread)
return (EDEADLK);
THREAD_LIST_LOCK(curthread);
if ((ret = _thr_find_thread(curthread, pthread, 1)) != 0) {
ret = ESRCH;
} else if ((pthread->tlflags & TLFLAGS_DETACHED) != 0) {
if ((ret = _thr_find_thread(curthread, pthread, 1)) != 0)
return (ESRCH);
if ((pthread->flags & THR_FLAGS_DETACHED) != 0) {
ret = EINVAL;
} else if (pthread->joiner != NULL) {
/* Multiple joiners are not supported. */
ret = ENOTSUP;
}
if (ret) {
THREAD_LIST_UNLOCK(curthread);
THR_THREAD_UNLOCK(curthread, pthread);
return (ret);
}
/* Set the running thread to be the joiner: */
pthread->joiner = curthread;
THREAD_LIST_UNLOCK(curthread);
THR_THREAD_UNLOCK(curthread, pthread);
THR_CLEANUP_PUSH(curthread, backout_join, pthread);
_thr_cancel_enter(curthread);
@ -131,17 +131,16 @@ join_common(pthread_t pthread, void **thread_return,
THR_CLEANUP_POP(curthread, 0);
if (ret == ETIMEDOUT) {
THREAD_LIST_LOCK(curthread);
THR_THREAD_LOCK(curthread, pthread);
pthread->joiner = NULL;
THREAD_LIST_UNLOCK(curthread);
THR_THREAD_UNLOCK(curthread, pthread);
} else {
ret = 0;
tmp = pthread->ret;
THREAD_LIST_LOCK(curthread);
pthread->tlflags |= TLFLAGS_DETACHED;
THR_THREAD_LOCK(curthread, pthread);
pthread->flags |= THR_FLAGS_DETACHED;
pthread->joiner = NULL;
THR_GCLIST_ADD(pthread);
THREAD_LIST_UNLOCK(curthread);
_thr_try_gc(curthread, pthread); /* thread lock released */
if (thread_return != NULL)
*thread_return = tmp;

View File

@ -54,11 +54,15 @@ _pthread_kill(pthread_t pthread, int sig)
* signal is valid (signal 0 specifies error checking only) and
* not being ignored:
*/
else if ((ret = _thr_ref_add(curthread, pthread, /*include dead*/0))
else if (curthread == pthread) {
if (sig > 0)
_thr_send_sig(pthread, sig);
ret = 0;
} if ((ret = _thr_find_thread(curthread, pthread, /*include dead*/0))
== 0) {
if (sig > 0)
_thr_send_sig(pthread, sig);
_thr_ref_delete(curthread, pthread);
THR_THREAD_UNLOCK(curthread, pthread);
}
/* Return the completion status: */

View File

@ -79,7 +79,7 @@ _thr_list_init(void)
_gc_count = 0;
total_threads = 1;
_thr_umutex_init(&_thr_list_lock);
_thr_urwlock_init(&_thr_list_lock);
TAILQ_INIT(&_thread_list);
TAILQ_INIT(&free_threadq);
_thr_umutex_init(&free_thread_lock);
@ -98,7 +98,7 @@ _thr_gc(struct pthread *curthread)
TAILQ_HEAD(, pthread) worklist;
TAILQ_INIT(&worklist);
THREAD_LIST_LOCK(curthread);
THREAD_LIST_WRLOCK(curthread);
/* Check the threads waiting for GC. */
TAILQ_FOREACH_SAFE(td, &_thread_gc_list, gcle, td_next) {
@ -107,17 +107,8 @@ _thr_gc(struct pthread *curthread)
continue;
}
_thr_stack_free(&td->attr);
if (((td->tlflags & TLFLAGS_DETACHED) != 0) &&
(td->refcount == 0)) {
THR_GCLIST_REMOVE(td);
/*
* The thread has detached and is no longer
* referenced. It is safe to remove all
* remnants of the thread.
*/
THR_LIST_REMOVE(td);
TAILQ_INSERT_HEAD(&worklist, td, gcle);
}
THR_GCLIST_REMOVE(td);
TAILQ_INSERT_HEAD(&worklist, td, gcle);
}
THREAD_LIST_UNLOCK(curthread);
@ -228,10 +219,10 @@ thr_destroy(struct pthread *curthread __unused, struct pthread *thread)
void
_thr_link(struct pthread *curthread, struct pthread *thread)
{
THREAD_LIST_LOCK(curthread);
THREAD_LIST_WRLOCK(curthread);
THR_LIST_ADD(thread);
_thread_active_threads++;
THREAD_LIST_UNLOCK(curthread);
atomic_add_int(&_thread_active_threads, 1);
}
/*
@ -240,10 +231,10 @@ _thr_link(struct pthread *curthread, struct pthread *thread)
void
_thr_unlink(struct pthread *curthread, struct pthread *thread)
{
THREAD_LIST_LOCK(curthread);
THREAD_LIST_WRLOCK(curthread);
THR_LIST_REMOVE(thread);
_thread_active_threads--;
THREAD_LIST_UNLOCK(curthread);
atomic_add_int(&_thread_active_threads, -1);
}
void
@ -290,12 +281,11 @@ _thr_ref_add(struct pthread *curthread, struct pthread *thread,
/* Invalid thread: */
return (EINVAL);
THREAD_LIST_LOCK(curthread);
if ((ret = _thr_find_thread(curthread, thread, include_dead)) == 0) {
thread->refcount++;
THR_CRITICAL_ENTER(curthread);
THR_THREAD_UNLOCK(curthread, thread);
}
THREAD_LIST_UNLOCK(curthread);
/* Return zero if the thread exists: */
return (ret);
@ -304,41 +294,56 @@ _thr_ref_add(struct pthread *curthread, struct pthread *thread,
void
_thr_ref_delete(struct pthread *curthread, struct pthread *thread)
{
THREAD_LIST_LOCK(curthread);
_thr_ref_delete_unlocked(curthread, thread);
THREAD_LIST_UNLOCK(curthread);
THR_THREAD_LOCK(curthread, thread);
thread->refcount--;
_thr_try_gc(curthread, thread);
THR_CRITICAL_LEAVE(curthread);
}
/* entered with thread lock held, exit with thread lock released */
void
_thr_ref_delete_unlocked(struct pthread *curthread,
struct pthread *thread)
_thr_try_gc(struct pthread *curthread, struct pthread *thread)
{
if (thread != NULL) {
thread->refcount--;
if ((thread->refcount == 0) && thread->state == PS_DEAD &&
(thread->tlflags & TLFLAGS_DETACHED) != 0)
if (THR_SHOULD_GC(thread)) {
THR_REF_ADD(curthread, thread);
THR_THREAD_UNLOCK(curthread, thread);
THREAD_LIST_WRLOCK(curthread);
THR_THREAD_LOCK(curthread, thread);
THR_REF_DEL(curthread, thread);
if (THR_SHOULD_GC(thread)) {
THR_LIST_REMOVE(thread);
THR_GCLIST_ADD(thread);
THR_CRITICAL_LEAVE(curthread);
}
THR_THREAD_UNLOCK(curthread, thread);
THREAD_LIST_UNLOCK(curthread);
} else {
THR_THREAD_UNLOCK(curthread, thread);
}
}
/* return with thread lock held if thread is found */
int
_thr_find_thread(struct pthread *curthread __unused, struct pthread *thread,
_thr_find_thread(struct pthread *curthread, struct pthread *thread,
int include_dead)
{
struct pthread *pthread;
int ret;
if (thread == NULL)
/* Invalid thread: */
return (EINVAL);
ret = 0;
THREAD_LIST_RDLOCK(curthread);
pthread = _thr_hash_find(thread);
if (pthread) {
THR_THREAD_LOCK(curthread, pthread);
if (include_dead == 0 && pthread->state == PS_DEAD) {
pthread = NULL;
}
THR_THREAD_UNLOCK(curthread, pthread);
ret = ESRCH;
}
} else {
ret = ESRCH;
}
/* Return zero if the thread exists: */
return ((pthread != NULL) ? 0 : ESRCH);
THREAD_LIST_UNLOCK(curthread);
return (ret);
}

View File

@ -415,13 +415,13 @@ struct pthread {
#define THR_FLAGS_PRIVATE 0x0001
#define THR_FLAGS_NEED_SUSPEND 0x0002 /* thread should be suspended */
#define THR_FLAGS_SUSPENDED 0x0004 /* thread is suspended */
#define THR_FLAGS_IN_GCLIST 0x0004 /* thread in gc list */
#define THR_FLAGS_DETACHED 0x0008 /* thread is detached */
/* Thread list flags; only set with thread list lock held. */
int tlflags;
#define TLFLAGS_GC_SAFE 0x0001 /* thread safe for cleaning */
#define TLFLAGS_IN_TDLIST 0x0002 /* thread in all thread list */
#define TLFLAGS_IN_GCLIST 0x0004 /* thread in gc list */
#define TLFLAGS_DETACHED 0x0008 /* thread is detached */
/* Queue of currently owned NORMAL or PRIO_INHERIT type mutexes. */
struct mutex_queue mutexq;
@ -463,6 +463,10 @@ struct pthread {
td_event_msg_t event_buf;
};
#define THR_SHOULD_GC(thrd) \
((thrd)->refcount == 0 && (thrd)->state == PS_DEAD && \
((thrd)->flags & THR_FLAGS_DETACHED) != 0)
#define THR_IN_CRITICAL(thrd) \
(((thrd)->locklevel > 0) || \
((thrd)->critical_count > 0))
@ -517,14 +521,23 @@ do { \
#define THR_THREAD_LOCK(curthrd, thr) THR_LOCK_ACQUIRE(curthrd, &(thr)->lock)
#define THR_THREAD_UNLOCK(curthrd, thr) THR_LOCK_RELEASE(curthrd, &(thr)->lock)
#define THREAD_LIST_LOCK(curthrd) \
#define THREAD_LIST_RDLOCK(curthrd) \
do { \
THR_LOCK_ACQUIRE((curthrd), &_thr_list_lock); \
(curthrd)->locklevel++; \
_thr_rwl_rdlock(&_thr_list_lock); \
} while (0)
#define THREAD_LIST_WRLOCK(curthrd) \
do { \
(curthrd)->locklevel++; \
_thr_rwl_wrlock(&_thr_list_lock); \
} while (0)
#define THREAD_LIST_UNLOCK(curthrd) \
do { \
THR_LOCK_RELEASE((curthrd), &_thr_list_lock); \
_thr_rwl_unlock(&_thr_list_lock); \
(curthrd)->locklevel--; \
_thr_ast(curthrd); \
} while (0)
/*
@ -546,20 +559,30 @@ do { \
} \
} while (0)
#define THR_GCLIST_ADD(thrd) do { \
if (((thrd)->tlflags & TLFLAGS_IN_GCLIST) == 0) { \
if (((thrd)->flags & THR_FLAGS_IN_GCLIST) == 0) { \
TAILQ_INSERT_HEAD(&_thread_gc_list, thrd, gcle);\
(thrd)->tlflags |= TLFLAGS_IN_GCLIST; \
(thrd)->flags |= THR_FLAGS_IN_GCLIST; \
_gc_count++; \
} \
} while (0)
#define THR_GCLIST_REMOVE(thrd) do { \
if (((thrd)->tlflags & TLFLAGS_IN_GCLIST) != 0) { \
if (((thrd)->flags & THR_FLAGS_IN_GCLIST) != 0) { \
TAILQ_REMOVE(&_thread_gc_list, thrd, gcle); \
(thrd)->tlflags &= ~TLFLAGS_IN_GCLIST; \
(thrd)->flags &= ~THR_FLAGS_IN_GCLIST; \
_gc_count--; \
} \
} while (0)
#define THR_REF_ADD(curthread, pthread) { \
THR_CRITICAL_ENTER(curthread); \
pthread->refcount++; \
} while (0)
#define THR_REF_DEL(curthread, pthread) { \
pthread->refcount--; \
THR_CRITICAL_LEAVE(curthread); \
} while (0)
#define GC_NEEDED() (_gc_count >= 5)
#define SHOULD_REPORT_EVENT(curthr, e) \
@ -618,7 +641,7 @@ extern struct umutex _mutex_static_lock __hidden;
extern struct umutex _cond_static_lock __hidden;
extern struct umutex _rwlock_static_lock __hidden;
extern struct umutex _keytable_lock __hidden;
extern struct umutex _thr_list_lock __hidden;
extern struct urwlock _thr_list_lock __hidden;
extern struct umutex _thr_event_lock __hidden;
/*
@ -673,6 +696,7 @@ int _thr_setscheduler(lwpid_t, int, const struct sched_param *) __hidden;
void _thr_signal_prefork(void) __hidden;
void _thr_signal_postfork(void) __hidden;
void _thr_signal_postfork_child(void) __hidden;
void _thr_try_gc(struct pthread *, struct pthread *) __hidden;
int _rtp_to_schedparam(const struct rtprio *rtp, int *policy,
struct sched_param *param) __hidden;
int _schedparam_to_rtp(int policy, const struct sched_param *param,

View File

@ -50,12 +50,10 @@ _pthread_resume_np(pthread_t thread)
int ret;
/* Add a reference to the thread: */
if ((ret = _thr_ref_add(curthread, thread, /*include dead*/0)) == 0) {
if ((ret = _thr_find_thread(curthread, thread, /*include dead*/0)) == 0) {
/* Lock the threads scheduling queue: */
THR_THREAD_LOCK(curthread, thread);
resume_common(thread);
THR_THREAD_UNLOCK(curthread, thread);
_thr_ref_delete(curthread, thread);
}
return (ret);
}
@ -67,7 +65,7 @@ _pthread_resume_all_np(void)
struct pthread *thread;
/* Take the thread list lock: */
THREAD_LIST_LOCK(curthread);
THREAD_LIST_RDLOCK(curthread);
TAILQ_FOREACH(thread, &_thread_list, tle) {
if (thread != curthread) {

View File

@ -154,7 +154,7 @@ _thr_stack_alloc(struct pthread_attr *attr)
* Use the garbage collector lock for synchronization of the
* spare stack lists and allocations from usrstack.
*/
THREAD_LIST_LOCK(curthread);
THREAD_LIST_WRLOCK(curthread);
/*
* If the stack and guard sizes are default, try to allocate a stack
* from the default-size stack cache:

View File

@ -76,7 +76,7 @@ _pthread_suspend_all_np(void)
struct pthread *thread;
int ret;
THREAD_LIST_LOCK(curthread);
THREAD_LIST_RDLOCK(curthread);
TAILQ_FOREACH(thread, &_thread_list, tle) {
if (thread != curthread) {
@ -96,13 +96,15 @@ _pthread_suspend_all_np(void)
THR_THREAD_LOCK(curthread, thread);
ret = suspend_common(curthread, thread, 0);
if (ret == 0) {
/* Can not suspend, try to wait */
thread->refcount++;
THREAD_LIST_UNLOCK(curthread);
/* Can not suspend, try to wait */
THR_REF_ADD(curthread, thread);
suspend_common(curthread, thread, 1);
THR_THREAD_UNLOCK(curthread, thread);
THREAD_LIST_LOCK(curthread);
_thr_ref_delete_unlocked(curthread, thread);
THR_REF_DEL(curthread, thread);
_thr_try_gc(curthread, thread);
/* thread lock released */
THREAD_LIST_RDLOCK(curthread);
/*
* Because we were blocked, things may have
* been changed, we have to restart the
@ -127,8 +129,8 @@ suspend_common(struct pthread *curthread, struct pthread *thread,
!(thread->flags & THR_FLAGS_SUSPENDED)) {
thread->flags |= THR_FLAGS_NEED_SUSPEND;
tmp = thread->cycle;
THR_THREAD_UNLOCK(curthread, thread);
_thr_send_sig(thread, SIGCANCEL);
THR_THREAD_UNLOCK(curthread, thread);
if (waitok) {
_thr_umtx_wait_uint(&thread->cycle, tmp, NULL, 0);
THR_THREAD_LOCK(curthread, thread);