1. Allocating and freeing lock related resource in _thr_alloc and _thr_free

to avoid potential memory leak, also fix a bug in pthread_create, contention
   scope should be inherited when PTHREAD_INHERIT_SCHED is set, and also check
   right field for PTHREAD_INHERIT_SCHED, scheduling inherit flag is in sched_inherit.
2. Execute hooks registered by atexit() on thread stack but not on scheduler
   stack.
3. Simplify some code in _kse_single_thread by calling xxx_destroy functions.

Reviewed by: deischen
This commit is contained in:
David Xu 2003-09-14 22:52:16 +00:00
parent 06925cea04
commit a3a398b57d
8 changed files with 174 additions and 188 deletions

View File

@ -99,7 +99,6 @@ _pthread_create(pthread_t * thread, const pthread_attr_t * attr,
struct kse *kse = NULL;
struct kse_group *kseg = NULL;
kse_critical_t crit;
int i;
int ret = 0;
if (_thr_initial == NULL)
@ -127,8 +126,20 @@ _pthread_create(pthread_t * thread, const pthread_attr_t * attr,
if (attr == NULL || *attr == NULL)
/* Use the default thread attributes: */
new_thread->attr = _pthread_attr_default;
else
else {
new_thread->attr = *(*attr);
if ((*attr)->sched_inherit == PTHREAD_INHERIT_SCHED) {
/* inherit scheduling contention scop */
if (curthread->attr.flags & PTHREAD_SCOPE_SYSTEM)
new_thread->attr.flags |= PTHREAD_SCOPE_SYSTEM;
else
new_thread->attr.flags &= ~PTHREAD_SCOPE_SYSTEM;
/*
* scheduling policy and scheduling parameters will be
* inherited in following code.
*/
}
}
#ifdef SYSTEM_SCOPE_ONLY
new_thread->attr.flags |= PTHREAD_SCOPE_SYSTEM;
#endif
@ -199,7 +210,7 @@ _pthread_create(pthread_t * thread, const pthread_attr_t * attr,
* Check if this thread is to inherit the scheduling
* attributes from its parent:
*/
if ((new_thread->attr.flags & PTHREAD_INHERIT_SCHED) != 0) {
if (new_thread->attr.sched_inherit == PTHREAD_INHERIT_SCHED) {
/*
* Copy the scheduling attributes.
* Lock the scheduling lock to get consistent
@ -230,26 +241,17 @@ _pthread_create(pthread_t * thread, const pthread_attr_t * attr,
/* Initialize the mutex queue: */
TAILQ_INIT(&new_thread->mutexq);
/*
* Initialize thread locking.
* Lock initializing needs malloc, so don't
* enter critical region before doing this!
*/
if (_lock_init(&new_thread->lock, LCK_ADAPTIVE,
_thr_lock_wait, _thr_lock_wakeup) != 0)
PANIC("Cannot initialize thread lock");
for (i = 0; i < MAX_THR_LOCKLEVEL; i++) {
_lockuser_init(&new_thread->lockusers[i],
(void *)new_thread);
_LCK_SET_PRIVATE2(&new_thread->lockusers[i],
(void *)new_thread);
}
/* Initialise hooks in the thread structure: */
new_thread->specific = NULL;
new_thread->specific_data_count = 0;
new_thread->cleanup = NULL;
new_thread->flags = 0;
new_thread->continuation = NULL;
new_thread->wakeup_time.tv_sec = -1;
new_thread->lock_switch = 0;
sigemptyset(&new_thread->sigpend);
new_thread->check_pending = 0;
new_thread->locklevel = 0;
if (new_thread->attr.suspend == THR_CREATE_SUSPENDED) {
new_thread->state = PS_SUSPENDED;

View File

@ -86,9 +86,9 @@ void
_pthread_exit(void *status)
{
struct pthread *curthread = _get_curthread();
kse_critical_t crit;
struct kse *curkse;
if (!_kse_isthreaded())
exit(0);
/* Check if this thread is already in the process of exiting: */
if ((curthread->flags & THR_FLAGS_EXITING) != 0) {
char msg[128];
@ -119,9 +119,27 @@ _pthread_exit(void *status)
/* Run the thread-specific data destructors: */
_thread_cleanupspecific();
}
if (!_kse_isthreaded())
exit(0);
crit = _kse_critical_enter();
curkse = _get_curkse();
KSE_LOCK_ACQUIRE(curkse, &_thread_list_lock);
/* Use thread_list_lock */
_thr_active_threads--;
#ifdef SYSTEM_SCOPE_ONLY
if (_thr_active_threads == 0) {
#else
if (_thr_active_threads == 1) {
#endif
KSE_LOCK_RELEASE(curkse, &_thread_list_lock);
_kse_critical_leave(crit);
exit(0);
/* Never reach! */
}
KSE_LOCK_RELEASE(curkse, &_thread_list_lock);
/* This thread will never be re-scheduled. */
THR_LOCK_SWITCH(curthread);
KSE_LOCK(curkse);
THR_SET_STATE(curthread, PS_DEAD);
_thr_sched_switch_unlocked(curthread);
/* Never reach! */

View File

@ -121,7 +121,6 @@ static TAILQ_HEAD(, pthread) free_threadq;
static struct lock thread_lock;
static int free_thread_count = 0;
static int inited = 0;
static int active_threads = 1;
static int active_kse_count = 0;
static int active_kseg_count = 0;
static u_int64_t next_uniqueid = 1;
@ -158,6 +157,7 @@ static void thr_resume_check(struct pthread *curthread, ucontext_t *ucp,
struct pthread_sigframe *psf);
static int thr_timedout(struct pthread *thread, struct timespec *curtime);
static void thr_unlink(struct pthread *thread);
static void thr_destroy(struct pthread *thread);
static void thread_gc(struct pthread *thread);
static void kse_gc(struct pthread *thread);
static void kseg_gc(struct pthread *thread);
@ -213,7 +213,7 @@ _kse_single_thread(struct pthread *curthread)
* dump core.
*/
sigprocmask(SIG_SETMASK, &curthread->sigmask, NULL);
active_threads = 1;
_thr_active_threads = 1;
/*
* Enter a loop to remove and free all threads other than
@ -232,11 +232,7 @@ _kse_single_thread(struct pthread *curthread)
_thr_stack_free(&thread->attr);
if (thread->specific != NULL)
free(thread->specific);
for (i = 0; i < MAX_THR_LOCKLEVEL; i++) {
_lockuser_destroy(&thread->lockusers[i]);
}
_lock_destroy(&thread->lock);
free(thread);
thr_destroy(thread);
}
}
@ -253,69 +249,42 @@ _kse_single_thread(struct pthread *curthread)
/* Free the free KSEs: */
while ((kse = TAILQ_FIRST(&free_kseq)) != NULL) {
TAILQ_REMOVE(&free_kseq, kse, k_qe);
for (i = 0; i < MAX_KSE_LOCKLEVEL; i++) {
_lockuser_destroy(&kse->k_lockusers[i]);
}
_lock_destroy(&kse->k_lock);
_kcb_dtor(kse->k_kcb);
if (kse->k_stack.ss_sp != NULL)
free(kse->k_stack.ss_sp);
free(kse);
kse_destroy(kse);
}
free_kse_count = 0;
/* Free the active KSEs: */
while ((kse = TAILQ_FIRST(&active_kseq)) != NULL) {
TAILQ_REMOVE(&active_kseq, kse, k_qe);
for (i = 0; i < MAX_KSE_LOCKLEVEL; i++) {
_lockuser_destroy(&kse->k_lockusers[i]);
}
_lock_destroy(&kse->k_lock);
if (kse->k_stack.ss_sp != NULL)
free(kse->k_stack.ss_sp);
free(kse);
kse_destroy(kse);
}
active_kse_count = 0;
/* Free the free KSEGs: */
while ((kseg = TAILQ_FIRST(&free_kse_groupq)) != NULL) {
TAILQ_REMOVE(&free_kse_groupq, kseg, kg_qe);
_lock_destroy(&kseg->kg_lock);
_pq_free(&kseg->kg_schedq.sq_runq);
free(kseg);
kseg_destroy(kseg);
}
free_kseg_count = 0;
/* Free the active KSEGs: */
while ((kseg = TAILQ_FIRST(&active_kse_groupq)) != NULL) {
TAILQ_REMOVE(&active_kse_groupq, kseg, kg_qe);
_lock_destroy(&kseg->kg_lock);
_pq_free(&kseg->kg_schedq.sq_runq);
free(kseg);
kseg_destroy(kseg);
}
active_kseg_count = 0;
/* Free the free threads. */
while ((thread = TAILQ_FIRST(&free_threadq)) != NULL) {
TAILQ_REMOVE(&free_threadq, thread, tle);
if (thread->specific != NULL)
free(thread->specific);
for (i = 0; i < MAX_THR_LOCKLEVEL; i++) {
_lockuser_destroy(&thread->lockusers[i]);
}
_lock_destroy(&thread->lock);
free(thread);
thr_destroy(thread);
}
free_thread_count = 0;
/* Free the to-be-gc'd threads. */
while ((thread = TAILQ_FIRST(&_thread_gc_list)) != NULL) {
TAILQ_REMOVE(&_thread_gc_list, thread, gcle);
for (i = 0; i < MAX_THR_LOCKLEVEL; i++) {
_lockuser_destroy(&thread->lockusers[i]);
}
_lock_destroy(&thread->lock);
free(thread);
thr_destroy(thread);
}
TAILQ_INIT(&gc_ksegq);
_gc_count = 0;
@ -361,7 +330,7 @@ _kse_single_thread(struct pthread *curthread)
*/
sigprocmask(SIG_SETMASK, &curthread->sigmask, NULL);
curthread->kse->k_kcb->kcb_kmbx.km_curthread = NULL;
active_threads = 1;
_thr_active_threads = 1;
#endif
}
@ -1247,19 +1216,6 @@ thr_cleanup(struct kse *curkse, struct pthread *thread)
KSE_SCHED_UNLOCK(curkse, curkse->k_kseg);
DBG_MSG("Adding thread %p to GC list\n", thread);
KSE_LOCK_ACQUIRE(curkse, &_thread_list_lock);
/* Use thread_list_lock */
active_threads--;
#ifdef SYSTEM_SCOPE_ONLY
if (active_threads == 0) {
#else
if (active_threads == 1) {
#endif
KSE_LOCK_RELEASE(curkse, &_thread_list_lock);
/* Possible use a signalcontext wrapper to call exit ? */
curkse->k_curthread = thread;
_tcb_set(curkse->k_kcb, thread->tcb);
exit(0);
}
THR_GCLIST_ADD(thread);
KSE_LOCK_RELEASE(curkse, &_thread_list_lock);
if (sys_scope) {
@ -2347,8 +2303,9 @@ kse_destroy(struct kse *kse)
struct pthread *
_thr_alloc(struct pthread *curthread)
{
kse_critical_t crit;
struct pthread *thread = NULL;
kse_critical_t crit;
struct pthread *thread = NULL;
int i;
if (curthread != NULL) {
if (GC_NEEDED())
@ -2370,6 +2327,21 @@ _thr_alloc(struct pthread *curthread)
if ((thread->tcb = _tcb_ctor(thread)) == NULL) {
free(thread);
thread = NULL;
} else {
/*
* Initialize thread locking.
* Lock initializing needs malloc, so don't
* enter critical region before doing this!
*/
if (_lock_init(&thread->lock, LCK_ADAPTIVE,
_thr_lock_wait, _thr_lock_wakeup) != 0)
PANIC("Cannot initialize thread lock");
for (i = 0; i < MAX_THR_LOCKLEVEL; i++) {
_lockuser_init(&thread->lockusers[i],
(void *)thread);
_LCK_SET_PRIVATE2(&thread->lockusers[i],
(void *)thread);
}
}
}
return (thread);
@ -2379,23 +2351,11 @@ void
_thr_free(struct pthread *curthread, struct pthread *thread)
{
kse_critical_t crit;
int i;
DBG_MSG("Freeing thread %p\n", thread);
if ((curthread == NULL) || (free_thread_count >= MAX_CACHED_THREADS)) {
for (i = 0; i < MAX_THR_LOCKLEVEL; i++) {
_lockuser_destroy(&thread->lockusers[i]);
}
_lock_destroy(&thread->lock);
_tcb_dtor(thread->tcb);
free(thread);
}
else {
/* Reinitialize any important fields here. */
thread->lock_switch = 0;
sigemptyset(&thread->sigpend);
thread->check_pending = 0;
thr_destroy(thread);
} else {
/* Add the thread to the free thread list. */
crit = _kse_critical_enter();
KSE_LOCK_ACQUIRE(curthread->kse, &thread_lock);
@ -2406,6 +2366,18 @@ _thr_free(struct pthread *curthread, struct pthread *thread)
}
}
static void
thr_destroy(struct pthread *thread)
{
int i;
for (i = 0; i < MAX_THR_LOCKLEVEL; i++)
_lockuser_destroy(&thread->lockusers[i]);
_lock_destroy(&thread->lock);
_tcb_dtor(thread->tcb);
free(thread);
}
/*
* Add an active thread:
*
@ -2424,7 +2396,6 @@ thr_link(struct pthread *thread)
crit = _kse_critical_enter();
curkse = _get_curkse();
curthread = _get_curthread();
thread->sigmask = curthread->sigmask;
KSE_LOCK_ACQUIRE(curkse, &_thread_list_lock);
/*
* Initialize the unique id (which GDB uses to track
@ -2433,7 +2404,7 @@ thr_link(struct pthread *thread)
*/
thread->uniqueid = next_uniqueid++;
THR_LIST_ADD(thread);
active_threads++;
_thr_active_threads++;
KSE_LOCK_RELEASE(curkse, &_thread_list_lock);
_kse_critical_leave(crit);
}
@ -2451,7 +2422,7 @@ thr_unlink(struct pthread *thread)
curkse = _get_curkse();
KSE_LOCK_ACQUIRE(curkse, &_thread_list_lock);
THR_LIST_REMOVE(thread);
active_threads--;
_thr_active_threads--;
KSE_LOCK_RELEASE(curkse, &_thread_list_lock);
_kse_critical_leave(crit);
}

View File

@ -994,6 +994,8 @@ SCLASS TAILQ_HEAD(, pthread) _thread_list
SCLASS TAILQ_HEAD(, pthread) _thread_gc_list
SCLASS_PRESET(TAILQ_HEAD_INITIALIZER(_thread_gc_list));
SCLASS int _thr_active_threads SCLASS_PRESET(1);
/* Default thread attributes: */
SCLASS struct pthread_attr _pthread_attr_default
SCLASS_PRESET({

View File

@ -99,7 +99,6 @@ _pthread_create(pthread_t * thread, const pthread_attr_t * attr,
struct kse *kse = NULL;
struct kse_group *kseg = NULL;
kse_critical_t crit;
int i;
int ret = 0;
if (_thr_initial == NULL)
@ -127,8 +126,20 @@ _pthread_create(pthread_t * thread, const pthread_attr_t * attr,
if (attr == NULL || *attr == NULL)
/* Use the default thread attributes: */
new_thread->attr = _pthread_attr_default;
else
else {
new_thread->attr = *(*attr);
if ((*attr)->sched_inherit == PTHREAD_INHERIT_SCHED) {
/* inherit scheduling contention scop */
if (curthread->attr.flags & PTHREAD_SCOPE_SYSTEM)
new_thread->attr.flags |= PTHREAD_SCOPE_SYSTEM;
else
new_thread->attr.flags &= ~PTHREAD_SCOPE_SYSTEM;
/*
* scheduling policy and scheduling parameters will be
* inherited in following code.
*/
}
}
#ifdef SYSTEM_SCOPE_ONLY
new_thread->attr.flags |= PTHREAD_SCOPE_SYSTEM;
#endif
@ -199,7 +210,7 @@ _pthread_create(pthread_t * thread, const pthread_attr_t * attr,
* Check if this thread is to inherit the scheduling
* attributes from its parent:
*/
if ((new_thread->attr.flags & PTHREAD_INHERIT_SCHED) != 0) {
if (new_thread->attr.sched_inherit == PTHREAD_INHERIT_SCHED) {
/*
* Copy the scheduling attributes.
* Lock the scheduling lock to get consistent
@ -230,26 +241,17 @@ _pthread_create(pthread_t * thread, const pthread_attr_t * attr,
/* Initialize the mutex queue: */
TAILQ_INIT(&new_thread->mutexq);
/*
* Initialize thread locking.
* Lock initializing needs malloc, so don't
* enter critical region before doing this!
*/
if (_lock_init(&new_thread->lock, LCK_ADAPTIVE,
_thr_lock_wait, _thr_lock_wakeup) != 0)
PANIC("Cannot initialize thread lock");
for (i = 0; i < MAX_THR_LOCKLEVEL; i++) {
_lockuser_init(&new_thread->lockusers[i],
(void *)new_thread);
_LCK_SET_PRIVATE2(&new_thread->lockusers[i],
(void *)new_thread);
}
/* Initialise hooks in the thread structure: */
new_thread->specific = NULL;
new_thread->specific_data_count = 0;
new_thread->cleanup = NULL;
new_thread->flags = 0;
new_thread->continuation = NULL;
new_thread->wakeup_time.tv_sec = -1;
new_thread->lock_switch = 0;
sigemptyset(&new_thread->sigpend);
new_thread->check_pending = 0;
new_thread->locklevel = 0;
if (new_thread->attr.suspend == THR_CREATE_SUSPENDED) {
new_thread->state = PS_SUSPENDED;

View File

@ -86,9 +86,9 @@ void
_pthread_exit(void *status)
{
struct pthread *curthread = _get_curthread();
kse_critical_t crit;
struct kse *curkse;
if (!_kse_isthreaded())
exit(0);
/* Check if this thread is already in the process of exiting: */
if ((curthread->flags & THR_FLAGS_EXITING) != 0) {
char msg[128];
@ -119,9 +119,27 @@ _pthread_exit(void *status)
/* Run the thread-specific data destructors: */
_thread_cleanupspecific();
}
if (!_kse_isthreaded())
exit(0);
crit = _kse_critical_enter();
curkse = _get_curkse();
KSE_LOCK_ACQUIRE(curkse, &_thread_list_lock);
/* Use thread_list_lock */
_thr_active_threads--;
#ifdef SYSTEM_SCOPE_ONLY
if (_thr_active_threads == 0) {
#else
if (_thr_active_threads == 1) {
#endif
KSE_LOCK_RELEASE(curkse, &_thread_list_lock);
_kse_critical_leave(crit);
exit(0);
/* Never reach! */
}
KSE_LOCK_RELEASE(curkse, &_thread_list_lock);
/* This thread will never be re-scheduled. */
THR_LOCK_SWITCH(curthread);
KSE_LOCK(curkse);
THR_SET_STATE(curthread, PS_DEAD);
_thr_sched_switch_unlocked(curthread);
/* Never reach! */

View File

@ -121,7 +121,6 @@ static TAILQ_HEAD(, pthread) free_threadq;
static struct lock thread_lock;
static int free_thread_count = 0;
static int inited = 0;
static int active_threads = 1;
static int active_kse_count = 0;
static int active_kseg_count = 0;
static u_int64_t next_uniqueid = 1;
@ -158,6 +157,7 @@ static void thr_resume_check(struct pthread *curthread, ucontext_t *ucp,
struct pthread_sigframe *psf);
static int thr_timedout(struct pthread *thread, struct timespec *curtime);
static void thr_unlink(struct pthread *thread);
static void thr_destroy(struct pthread *thread);
static void thread_gc(struct pthread *thread);
static void kse_gc(struct pthread *thread);
static void kseg_gc(struct pthread *thread);
@ -213,7 +213,7 @@ _kse_single_thread(struct pthread *curthread)
* dump core.
*/
sigprocmask(SIG_SETMASK, &curthread->sigmask, NULL);
active_threads = 1;
_thr_active_threads = 1;
/*
* Enter a loop to remove and free all threads other than
@ -232,11 +232,7 @@ _kse_single_thread(struct pthread *curthread)
_thr_stack_free(&thread->attr);
if (thread->specific != NULL)
free(thread->specific);
for (i = 0; i < MAX_THR_LOCKLEVEL; i++) {
_lockuser_destroy(&thread->lockusers[i]);
}
_lock_destroy(&thread->lock);
free(thread);
thr_destroy(thread);
}
}
@ -253,69 +249,42 @@ _kse_single_thread(struct pthread *curthread)
/* Free the free KSEs: */
while ((kse = TAILQ_FIRST(&free_kseq)) != NULL) {
TAILQ_REMOVE(&free_kseq, kse, k_qe);
for (i = 0; i < MAX_KSE_LOCKLEVEL; i++) {
_lockuser_destroy(&kse->k_lockusers[i]);
}
_lock_destroy(&kse->k_lock);
_kcb_dtor(kse->k_kcb);
if (kse->k_stack.ss_sp != NULL)
free(kse->k_stack.ss_sp);
free(kse);
kse_destroy(kse);
}
free_kse_count = 0;
/* Free the active KSEs: */
while ((kse = TAILQ_FIRST(&active_kseq)) != NULL) {
TAILQ_REMOVE(&active_kseq, kse, k_qe);
for (i = 0; i < MAX_KSE_LOCKLEVEL; i++) {
_lockuser_destroy(&kse->k_lockusers[i]);
}
_lock_destroy(&kse->k_lock);
if (kse->k_stack.ss_sp != NULL)
free(kse->k_stack.ss_sp);
free(kse);
kse_destroy(kse);
}
active_kse_count = 0;
/* Free the free KSEGs: */
while ((kseg = TAILQ_FIRST(&free_kse_groupq)) != NULL) {
TAILQ_REMOVE(&free_kse_groupq, kseg, kg_qe);
_lock_destroy(&kseg->kg_lock);
_pq_free(&kseg->kg_schedq.sq_runq);
free(kseg);
kseg_destroy(kseg);
}
free_kseg_count = 0;
/* Free the active KSEGs: */
while ((kseg = TAILQ_FIRST(&active_kse_groupq)) != NULL) {
TAILQ_REMOVE(&active_kse_groupq, kseg, kg_qe);
_lock_destroy(&kseg->kg_lock);
_pq_free(&kseg->kg_schedq.sq_runq);
free(kseg);
kseg_destroy(kseg);
}
active_kseg_count = 0;
/* Free the free threads. */
while ((thread = TAILQ_FIRST(&free_threadq)) != NULL) {
TAILQ_REMOVE(&free_threadq, thread, tle);
if (thread->specific != NULL)
free(thread->specific);
for (i = 0; i < MAX_THR_LOCKLEVEL; i++) {
_lockuser_destroy(&thread->lockusers[i]);
}
_lock_destroy(&thread->lock);
free(thread);
thr_destroy(thread);
}
free_thread_count = 0;
/* Free the to-be-gc'd threads. */
while ((thread = TAILQ_FIRST(&_thread_gc_list)) != NULL) {
TAILQ_REMOVE(&_thread_gc_list, thread, gcle);
for (i = 0; i < MAX_THR_LOCKLEVEL; i++) {
_lockuser_destroy(&thread->lockusers[i]);
}
_lock_destroy(&thread->lock);
free(thread);
thr_destroy(thread);
}
TAILQ_INIT(&gc_ksegq);
_gc_count = 0;
@ -361,7 +330,7 @@ _kse_single_thread(struct pthread *curthread)
*/
sigprocmask(SIG_SETMASK, &curthread->sigmask, NULL);
curthread->kse->k_kcb->kcb_kmbx.km_curthread = NULL;
active_threads = 1;
_thr_active_threads = 1;
#endif
}
@ -1247,19 +1216,6 @@ thr_cleanup(struct kse *curkse, struct pthread *thread)
KSE_SCHED_UNLOCK(curkse, curkse->k_kseg);
DBG_MSG("Adding thread %p to GC list\n", thread);
KSE_LOCK_ACQUIRE(curkse, &_thread_list_lock);
/* Use thread_list_lock */
active_threads--;
#ifdef SYSTEM_SCOPE_ONLY
if (active_threads == 0) {
#else
if (active_threads == 1) {
#endif
KSE_LOCK_RELEASE(curkse, &_thread_list_lock);
/* Possible use a signalcontext wrapper to call exit ? */
curkse->k_curthread = thread;
_tcb_set(curkse->k_kcb, thread->tcb);
exit(0);
}
THR_GCLIST_ADD(thread);
KSE_LOCK_RELEASE(curkse, &_thread_list_lock);
if (sys_scope) {
@ -2347,8 +2303,9 @@ kse_destroy(struct kse *kse)
struct pthread *
_thr_alloc(struct pthread *curthread)
{
kse_critical_t crit;
struct pthread *thread = NULL;
kse_critical_t crit;
struct pthread *thread = NULL;
int i;
if (curthread != NULL) {
if (GC_NEEDED())
@ -2370,6 +2327,21 @@ _thr_alloc(struct pthread *curthread)
if ((thread->tcb = _tcb_ctor(thread)) == NULL) {
free(thread);
thread = NULL;
} else {
/*
* Initialize thread locking.
* Lock initializing needs malloc, so don't
* enter critical region before doing this!
*/
if (_lock_init(&thread->lock, LCK_ADAPTIVE,
_thr_lock_wait, _thr_lock_wakeup) != 0)
PANIC("Cannot initialize thread lock");
for (i = 0; i < MAX_THR_LOCKLEVEL; i++) {
_lockuser_init(&thread->lockusers[i],
(void *)thread);
_LCK_SET_PRIVATE2(&thread->lockusers[i],
(void *)thread);
}
}
}
return (thread);
@ -2379,23 +2351,11 @@ void
_thr_free(struct pthread *curthread, struct pthread *thread)
{
kse_critical_t crit;
int i;
DBG_MSG("Freeing thread %p\n", thread);
if ((curthread == NULL) || (free_thread_count >= MAX_CACHED_THREADS)) {
for (i = 0; i < MAX_THR_LOCKLEVEL; i++) {
_lockuser_destroy(&thread->lockusers[i]);
}
_lock_destroy(&thread->lock);
_tcb_dtor(thread->tcb);
free(thread);
}
else {
/* Reinitialize any important fields here. */
thread->lock_switch = 0;
sigemptyset(&thread->sigpend);
thread->check_pending = 0;
thr_destroy(thread);
} else {
/* Add the thread to the free thread list. */
crit = _kse_critical_enter();
KSE_LOCK_ACQUIRE(curthread->kse, &thread_lock);
@ -2406,6 +2366,18 @@ _thr_free(struct pthread *curthread, struct pthread *thread)
}
}
static void
thr_destroy(struct pthread *thread)
{
int i;
for (i = 0; i < MAX_THR_LOCKLEVEL; i++)
_lockuser_destroy(&thread->lockusers[i]);
_lock_destroy(&thread->lock);
_tcb_dtor(thread->tcb);
free(thread);
}
/*
* Add an active thread:
*
@ -2424,7 +2396,6 @@ thr_link(struct pthread *thread)
crit = _kse_critical_enter();
curkse = _get_curkse();
curthread = _get_curthread();
thread->sigmask = curthread->sigmask;
KSE_LOCK_ACQUIRE(curkse, &_thread_list_lock);
/*
* Initialize the unique id (which GDB uses to track
@ -2433,7 +2404,7 @@ thr_link(struct pthread *thread)
*/
thread->uniqueid = next_uniqueid++;
THR_LIST_ADD(thread);
active_threads++;
_thr_active_threads++;
KSE_LOCK_RELEASE(curkse, &_thread_list_lock);
_kse_critical_leave(crit);
}
@ -2451,7 +2422,7 @@ thr_unlink(struct pthread *thread)
curkse = _get_curkse();
KSE_LOCK_ACQUIRE(curkse, &_thread_list_lock);
THR_LIST_REMOVE(thread);
active_threads--;
_thr_active_threads--;
KSE_LOCK_RELEASE(curkse, &_thread_list_lock);
_kse_critical_leave(crit);
}

View File

@ -994,6 +994,8 @@ SCLASS TAILQ_HEAD(, pthread) _thread_list
SCLASS TAILQ_HEAD(, pthread) _thread_gc_list
SCLASS_PRESET(TAILQ_HEAD_INITIALIZER(_thread_gc_list));
SCLASS int _thr_active_threads SCLASS_PRESET(1);
/* Default thread attributes: */
SCLASS struct pthread_attr _pthread_attr_default
SCLASS_PRESET({