Add a couple asserts to pthread_cond_foo to ensure the (low-level)

lock level is 0.  Thus far, the threads implementation doesn't use
mutexes or condition variables so the lock level should be 0.

Save the return value when trying to schedule a new thread and
use this to return an error from pthread_create().

Change the max sleep time for an idle KSE to 1 minute from 2 minutes.

Maintain a count of the number of KSEs within a KSEG.

With these changes scope system threads seem to work, but heavy
use of them crash the kernel (supposedly VM bugs).
This commit is contained in:
deischen 2003-04-22 20:28:33 +00:00
parent 18f0a39a3f
commit 23350dd1f5
10 changed files with 144 additions and 42 deletions

View File

@ -373,6 +373,8 @@ _pthread_cond_timedwait(pthread_cond_t * cond, pthread_mutex_t * mutex,
int unlock_mutex = 1;
int seqno;
THR_ASSERT(curthread->locklevel == 0,
"cv_timedwait: locklevel is not zero!");
_thr_enter_cancellation_point(curthread);
if (abstime == NULL || abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
@ -575,6 +577,8 @@ _pthread_cond_signal(pthread_cond_t * cond)
struct pthread *pthread;
int rval = 0;
THR_ASSERT(curthread->locklevel == 0,
"cv_timedwait: locklevel is not zero!");
if (cond == NULL)
rval = EINVAL;
/*
@ -632,6 +636,8 @@ _pthread_cond_broadcast(pthread_cond_t * cond)
struct pthread *pthread;
int rval = 0;
THR_ASSERT(curthread->locklevel == 0,
"cv_timedwait: locklevel is not zero!");
if (cond == NULL)
rval = EINVAL;
/*

View File

@ -57,6 +57,7 @@ int _thread_ctx_offset = OFF(tmbx.tm_context);
int _thread_PS_RUNNING_value = PS_RUNNING;
int _thread_PS_DEAD_value = PS_DEAD;
static void free_thread(struct pthread *curthread, struct pthread *thread);
static int create_stack(struct pthread_attr *pattr);
static void thread_start(struct pthread *curthread,
void *(*start_routine) (void *), void *arg);
@ -295,8 +296,10 @@ _pthread_create(pthread_t * thread, const pthread_attr_t * attr,
* Schedule the new thread starting a new KSEG/KSE
* pair if necessary.
*/
_thr_schedule_add(curthread, new_thread);
ret = _thr_schedule_add(curthread, new_thread);
_kse_critical_leave(crit);
if (ret != 0)
free_thread(curthread, new_thread);
/* Return a pointer to the thread structure: */
(*thread) = new_thread;
@ -307,6 +310,17 @@ _pthread_create(pthread_t * thread, const pthread_attr_t * attr,
return (ret);
}
static void
free_thread(struct pthread *curthread, struct pthread *thread)
{
if ((thread->attr.flags & PTHREAD_SCOPE_SYSTEM) != 0) {
/* Free the KSE and KSEG. */
_kseg_free(thread->kseg);
_kse_free(curthread, thread->kse);
}
_thr_free(curthread, thread);
}
static int
create_stack(struct pthread_attr *pattr)
{

View File

@ -264,6 +264,9 @@ _libpthread_init(struct pthread *curthread)
PANIC("Can't allocate initial kseg.");
_kse_initial->k_schedq = &_kse_initial->k_kseg->kg_schedq;
TAILQ_INSERT_TAIL(&_kse_initial->k_kseg->kg_kseq, _kse_initial, k_kgqe);
_kse_initial->k_kseg->kg_ksecount = 1;
/* Set the initial thread. */
if (curthread == NULL) {
/* Create and initialize the initial thread. */

View File

@ -42,6 +42,7 @@ __FBSDID("$FreeBSD$");
#include <machine/atomic.h>
#include <assert.h>
#include <errno.h>
#include <signal.h>
#include <stdlib.h>
#include <string.h>
@ -134,7 +135,7 @@ static void kse_sched_single(struct kse *curkse);
static void kse_switchout_thread(struct kse *kse, struct pthread *thread);
static void kse_wait(struct kse *kse, struct pthread *td_wait);
static void kse_free_unlocked(struct kse *kse);
static void kseg_free(struct kse_group *kseg);
static void kseg_free_unlocked(struct kse_group *kseg);
static void kseg_init(struct kse_group *kseg);
static void kseg_reinit(struct kse_group *kseg);
static void kse_waitq_insert(struct pthread *thread);
@ -404,7 +405,7 @@ _kse_lock_wait(struct lock *lock, struct lockuser *lu)
* is granted.
*/
saved_flags = curkse->k_mbx.km_flags;
curkse->k_mbx.km_flags |= KMF_NOUPCALL;
curkse->k_mbx.km_flags |= KMF_NOUPCALL | KMF_NOCOMPLETED;
kse_release(&ts);
curkse->k_mbx.km_flags = saved_flags;
@ -703,6 +704,9 @@ kse_sched_multi(struct kse *curkse)
struct pthread_sigframe *curframe;
int ret;
THR_ASSERT(curkse->k_mbx.km_curthread == NULL,
"Mailbox not null in kse_sched_multi");
/* Check for first time initialization: */
if ((curkse->k_flags & KF_INITIALIZED) == 0) {
/* Setup this KSEs specific data. */
@ -714,8 +718,10 @@ kse_sched_multi(struct kse *curkse)
}
/* This may have returned from a kse_release(). */
if (KSE_WAITING(curkse))
if (KSE_WAITING(curkse)) {
DBG_MSG("Entered upcall when KSE is waiting.");
KSE_CLEAR_WAIT(curkse);
}
/* Lock the scheduling lock. */
KSE_SCHED_LOCK(curkse, curkse->k_kseg);
@ -1067,7 +1073,7 @@ _thr_gc(struct pthread *curthread)
crit = _kse_critical_enter();
KSE_LOCK_ACQUIRE(curthread->kse, &kse_lock);
kse_free_unlocked(td->kse);
kseg_free(td->kseg);
kseg_free_unlocked(td->kseg);
KSE_LOCK_RELEASE(curthread->kse, &kse_lock);
_kse_critical_leave(crit);
}
@ -1080,12 +1086,13 @@ _thr_gc(struct pthread *curthread)
/*
* Only new threads that are running or suspended may be scheduled.
*/
void
int
_thr_schedule_add(struct pthread *curthread, struct pthread *newthread)
{
struct kse *curkse;
kse_critical_t crit;
int need_start;
int ret;
/*
* If this is the first time creating a thread, make sure
@ -1106,6 +1113,7 @@ _thr_schedule_add(struct pthread *curthread, struct pthread *newthread)
KSEG_THRQ_ADD(newthread->kseg, newthread);
TAILQ_INSERT_TAIL(&newthread->kseg->kg_kseq, newthread->kse,
k_kgqe);
newthread->kseg->kg_ksecount = 1;
if (newthread->state == PS_RUNNING)
THR_RUNQ_INSERT_TAIL(newthread);
newthread->kse->k_curthread = NULL;
@ -1119,7 +1127,9 @@ _thr_schedule_add(struct pthread *curthread, struct pthread *newthread)
curkse = _get_curkse();
_ksd_setprivate(&newthread->kse->k_ksd);
newthread->kse->k_flags |= KF_INITIALIZED;
kse_create(&newthread->kse->k_mbx, 1);
ret = kse_create(&newthread->kse->k_mbx, 1);
if (ret != 0)
ret = errno;
_ksd_setprivate(&curkse->k_ksd);
_kse_critical_leave(crit);
}
@ -1156,7 +1166,9 @@ _thr_schedule_add(struct pthread *curthread, struct pthread *newthread)
*/
KSE_WAKEUP(newthread->kse);
}
ret = 0;
}
return (ret);
}
void
@ -1420,13 +1432,13 @@ kse_wait(struct kse *kse, struct pthread *td_wait)
KSE_GET_TOD(kse, &ts);
if ((td_wait == NULL) || (td_wait->wakeup_time.tv_sec < 0)) {
/* Limit sleep to no more than 2 minutes. */
ts_sleep.tv_sec = 120;
/* Limit sleep to no more than 1 minute. */
ts_sleep.tv_sec = 60;
ts_sleep.tv_nsec = 0;
} else {
TIMESPEC_SUB(&ts_sleep, &td_wait->wakeup_time, &ts);
if (ts_sleep.tv_sec > 120) {
ts_sleep.tv_sec = 120;
if (ts_sleep.tv_sec > 60) {
ts_sleep.tv_sec = 60;
ts_sleep.tv_nsec = 0;
}
}
@ -1462,6 +1474,7 @@ kse_fini(struct kse *kse)
/* Remove this KSE from the KSEG's list of KSEs. */
KSE_SCHED_LOCK(kse, kse->k_kseg);
TAILQ_REMOVE(&kse->k_kseg->kg_kseq, kse, k_kgqe);
kse->k_kseg->kg_ksecount--;
if (TAILQ_EMPTY(&kse->k_kseg->kg_kseq))
free_kseg = kse->k_kseg;
KSE_SCHED_UNLOCK(kse, kse->k_kseg);
@ -1472,7 +1485,7 @@ kse_fini(struct kse *kse)
*/
KSE_LOCK_ACQUIRE(kse, &kse_lock);
if (free_kseg != NULL)
kseg_free(free_kseg);
kseg_free_unlocked(free_kseg);
kse_free_unlocked(kse);
KSE_LOCK_RELEASE(kse, &kse_lock);
kse_exit();
@ -1491,14 +1504,11 @@ kse_fini(struct kse *kse)
if ((active_kse_count > 1) &&
(kse->k_kseg->kg_threadcount == 0)) {
KSE_SCHED_UNLOCK(kse, kse->k_kseg);
/*
* XXX - We need a way for the KSE to do a timed
* wait.
*/
kse_release(&ts);
/* The above never returns. */
}
KSE_SCHED_UNLOCK(kse, kse->k_kseg);
else
KSE_SCHED_UNLOCK(kse, kse->k_kseg);
/* There are no more threads; exit this process: */
if (kse->k_kseg->kg_threadcount == 0) {
@ -1708,7 +1718,7 @@ _kseg_alloc(struct pthread *curthread)
* no more threads that reference it.
*/
static void
kseg_free(struct kse_group *kseg)
kseg_free_unlocked(struct kse_group *kseg)
{
TAILQ_REMOVE(&active_kse_groupq, kseg, kg_qe);
TAILQ_INSERT_HEAD(&free_kse_groupq, kseg, kg_qe);
@ -1716,6 +1726,20 @@ kseg_free(struct kse_group *kseg)
active_kseg_count--;
}
void
_kseg_free(struct kse_group *kseg)
{
struct kse *curkse;
kse_critical_t crit;
crit = _kse_critical_enter();
curkse = _get_curkse();
KSE_LOCK_ACQUIRE(curkse, &kse_lock);
kseg_free_unlocked(kseg);
KSE_LOCK_RELEASE(curkse, &kse_lock);
_kse_critical_leave(crit);
}
/*
* Allocate a new KSE.
*
@ -1747,8 +1771,8 @@ _kse_alloc(struct pthread *curthread)
if (kse != NULL) {
TAILQ_REMOVE(&free_kseq, kse, k_qe);
free_kse_count--;
active_kse_count++;
TAILQ_INSERT_TAIL(&active_kseq, kse, k_qe);
active_kse_count++;
}
KSE_LOCK_RELEASE(curthread->kse, &kse_lock);
_kse_critical_leave(crit);
@ -1817,8 +1841,8 @@ _kse_alloc(struct pthread *curthread)
return (NULL);
}
kse->k_flags = 0;
active_kse_count++;
TAILQ_INSERT_TAIL(&active_kseq, kse, k_qe);
active_kse_count++;
if (curthread != NULL) {
KSE_LOCK_RELEASE(curthread->kse, &kse_lock);
_kse_critical_leave(crit);
@ -1830,6 +1854,7 @@ _kse_alloc(struct pthread *curthread)
void
kse_free_unlocked(struct kse *kse)
{
TAILQ_REMOVE(&active_kseq, kse, k_qe);
active_kse_count--;
kse->k_kseg = NULL;
kse->k_flags &= ~KF_INITIALIZED;
@ -1868,6 +1893,7 @@ kseg_reinit(struct kse_group *kseg)
TAILQ_INIT(&kseg->kg_threadq);
TAILQ_INIT(&kseg->kg_schedq.sq_waitq);
kseg->kg_threadcount = 0;
kseg->kg_ksecount = 0;
kseg->kg_idle_kses = 0;
kseg->kg_flags = 0;
}

View File

@ -215,6 +215,7 @@ struct kse_group {
struct sched_queue kg_schedq; /* scheduling queue */
struct lock kg_lock;
int kg_threadcount; /* # of assigned threads */
int kg_ksecount; /* # of assigned KSEs */
int kg_idle_kses;
int kg_flags;
#define KGF_SINGLE_THREAD 0x0001 /* scope system kse group */
@ -1023,6 +1024,7 @@ void _kse_single_thread(struct pthread *);
void _kse_start(struct kse *);
int _kse_setthreaded(int);
int _kse_isthreaded(void);
void _kseg_free(struct kse_group *);
int _mutex_cv_lock(pthread_mutex_t *);
int _mutex_cv_unlock(pthread_mutex_t *);
void _mutex_lock_backout(struct pthread *);
@ -1060,7 +1062,7 @@ void _thr_lock_wait(struct lock *lock, struct lockuser *lu);
void _thr_lock_wakeup(struct lock *lock, struct lockuser *lu);
int _thr_ref_add(struct pthread *, struct pthread *, int);
void _thr_ref_delete(struct pthread *, struct pthread *);
void _thr_schedule_add(struct pthread *, struct pthread *);
int _thr_schedule_add(struct pthread *, struct pthread *);
void _thr_schedule_remove(struct pthread *, struct pthread *);
void _thr_setrunnable(struct pthread *curthread, struct pthread *thread);
void _thr_setrunnable_unlocked(struct pthread *thread);

View File

@ -373,6 +373,8 @@ _pthread_cond_timedwait(pthread_cond_t * cond, pthread_mutex_t * mutex,
int unlock_mutex = 1;
int seqno;
THR_ASSERT(curthread->locklevel == 0,
"cv_timedwait: locklevel is not zero!");
_thr_enter_cancellation_point(curthread);
if (abstime == NULL || abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
@ -575,6 +577,8 @@ _pthread_cond_signal(pthread_cond_t * cond)
struct pthread *pthread;
int rval = 0;
THR_ASSERT(curthread->locklevel == 0,
"cv_timedwait: locklevel is not zero!");
if (cond == NULL)
rval = EINVAL;
/*
@ -632,6 +636,8 @@ _pthread_cond_broadcast(pthread_cond_t * cond)
struct pthread *pthread;
int rval = 0;
THR_ASSERT(curthread->locklevel == 0,
"cv_timedwait: locklevel is not zero!");
if (cond == NULL)
rval = EINVAL;
/*

View File

@ -57,6 +57,7 @@ int _thread_ctx_offset = OFF(tmbx.tm_context);
int _thread_PS_RUNNING_value = PS_RUNNING;
int _thread_PS_DEAD_value = PS_DEAD;
static void free_thread(struct pthread *curthread, struct pthread *thread);
static int create_stack(struct pthread_attr *pattr);
static void thread_start(struct pthread *curthread,
void *(*start_routine) (void *), void *arg);
@ -295,8 +296,10 @@ _pthread_create(pthread_t * thread, const pthread_attr_t * attr,
* Schedule the new thread starting a new KSEG/KSE
* pair if necessary.
*/
_thr_schedule_add(curthread, new_thread);
ret = _thr_schedule_add(curthread, new_thread);
_kse_critical_leave(crit);
if (ret != 0)
free_thread(curthread, new_thread);
/* Return a pointer to the thread structure: */
(*thread) = new_thread;
@ -307,6 +310,17 @@ _pthread_create(pthread_t * thread, const pthread_attr_t * attr,
return (ret);
}
static void
free_thread(struct pthread *curthread, struct pthread *thread)
{
if ((thread->attr.flags & PTHREAD_SCOPE_SYSTEM) != 0) {
/* Free the KSE and KSEG. */
_kseg_free(thread->kseg);
_kse_free(curthread, thread->kse);
}
_thr_free(curthread, thread);
}
static int
create_stack(struct pthread_attr *pattr)
{

View File

@ -264,6 +264,9 @@ _libpthread_init(struct pthread *curthread)
PANIC("Can't allocate initial kseg.");
_kse_initial->k_schedq = &_kse_initial->k_kseg->kg_schedq;
TAILQ_INSERT_TAIL(&_kse_initial->k_kseg->kg_kseq, _kse_initial, k_kgqe);
_kse_initial->k_kseg->kg_ksecount = 1;
/* Set the initial thread. */
if (curthread == NULL) {
/* Create and initialize the initial thread. */

View File

@ -42,6 +42,7 @@ __FBSDID("$FreeBSD$");
#include <machine/atomic.h>
#include <assert.h>
#include <errno.h>
#include <signal.h>
#include <stdlib.h>
#include <string.h>
@ -134,7 +135,7 @@ static void kse_sched_single(struct kse *curkse);
static void kse_switchout_thread(struct kse *kse, struct pthread *thread);
static void kse_wait(struct kse *kse, struct pthread *td_wait);
static void kse_free_unlocked(struct kse *kse);
static void kseg_free(struct kse_group *kseg);
static void kseg_free_unlocked(struct kse_group *kseg);
static void kseg_init(struct kse_group *kseg);
static void kseg_reinit(struct kse_group *kseg);
static void kse_waitq_insert(struct pthread *thread);
@ -404,7 +405,7 @@ _kse_lock_wait(struct lock *lock, struct lockuser *lu)
* is granted.
*/
saved_flags = curkse->k_mbx.km_flags;
curkse->k_mbx.km_flags |= KMF_NOUPCALL;
curkse->k_mbx.km_flags |= KMF_NOUPCALL | KMF_NOCOMPLETED;
kse_release(&ts);
curkse->k_mbx.km_flags = saved_flags;
@ -703,6 +704,9 @@ kse_sched_multi(struct kse *curkse)
struct pthread_sigframe *curframe;
int ret;
THR_ASSERT(curkse->k_mbx.km_curthread == NULL,
"Mailbox not null in kse_sched_multi");
/* Check for first time initialization: */
if ((curkse->k_flags & KF_INITIALIZED) == 0) {
/* Setup this KSEs specific data. */
@ -714,8 +718,10 @@ kse_sched_multi(struct kse *curkse)
}
/* This may have returned from a kse_release(). */
if (KSE_WAITING(curkse))
if (KSE_WAITING(curkse)) {
DBG_MSG("Entered upcall when KSE is waiting.");
KSE_CLEAR_WAIT(curkse);
}
/* Lock the scheduling lock. */
KSE_SCHED_LOCK(curkse, curkse->k_kseg);
@ -1067,7 +1073,7 @@ _thr_gc(struct pthread *curthread)
crit = _kse_critical_enter();
KSE_LOCK_ACQUIRE(curthread->kse, &kse_lock);
kse_free_unlocked(td->kse);
kseg_free(td->kseg);
kseg_free_unlocked(td->kseg);
KSE_LOCK_RELEASE(curthread->kse, &kse_lock);
_kse_critical_leave(crit);
}
@ -1080,12 +1086,13 @@ _thr_gc(struct pthread *curthread)
/*
* Only new threads that are running or suspended may be scheduled.
*/
void
int
_thr_schedule_add(struct pthread *curthread, struct pthread *newthread)
{
struct kse *curkse;
kse_critical_t crit;
int need_start;
int ret;
/*
* If this is the first time creating a thread, make sure
@ -1106,6 +1113,7 @@ _thr_schedule_add(struct pthread *curthread, struct pthread *newthread)
KSEG_THRQ_ADD(newthread->kseg, newthread);
TAILQ_INSERT_TAIL(&newthread->kseg->kg_kseq, newthread->kse,
k_kgqe);
newthread->kseg->kg_ksecount = 1;
if (newthread->state == PS_RUNNING)
THR_RUNQ_INSERT_TAIL(newthread);
newthread->kse->k_curthread = NULL;
@ -1119,7 +1127,9 @@ _thr_schedule_add(struct pthread *curthread, struct pthread *newthread)
curkse = _get_curkse();
_ksd_setprivate(&newthread->kse->k_ksd);
newthread->kse->k_flags |= KF_INITIALIZED;
kse_create(&newthread->kse->k_mbx, 1);
ret = kse_create(&newthread->kse->k_mbx, 1);
if (ret != 0)
ret = errno;
_ksd_setprivate(&curkse->k_ksd);
_kse_critical_leave(crit);
}
@ -1156,7 +1166,9 @@ _thr_schedule_add(struct pthread *curthread, struct pthread *newthread)
*/
KSE_WAKEUP(newthread->kse);
}
ret = 0;
}
return (ret);
}
void
@ -1420,13 +1432,13 @@ kse_wait(struct kse *kse, struct pthread *td_wait)
KSE_GET_TOD(kse, &ts);
if ((td_wait == NULL) || (td_wait->wakeup_time.tv_sec < 0)) {
/* Limit sleep to no more than 2 minutes. */
ts_sleep.tv_sec = 120;
/* Limit sleep to no more than 1 minute. */
ts_sleep.tv_sec = 60;
ts_sleep.tv_nsec = 0;
} else {
TIMESPEC_SUB(&ts_sleep, &td_wait->wakeup_time, &ts);
if (ts_sleep.tv_sec > 120) {
ts_sleep.tv_sec = 120;
if (ts_sleep.tv_sec > 60) {
ts_sleep.tv_sec = 60;
ts_sleep.tv_nsec = 0;
}
}
@ -1462,6 +1474,7 @@ kse_fini(struct kse *kse)
/* Remove this KSE from the KSEG's list of KSEs. */
KSE_SCHED_LOCK(kse, kse->k_kseg);
TAILQ_REMOVE(&kse->k_kseg->kg_kseq, kse, k_kgqe);
kse->k_kseg->kg_ksecount--;
if (TAILQ_EMPTY(&kse->k_kseg->kg_kseq))
free_kseg = kse->k_kseg;
KSE_SCHED_UNLOCK(kse, kse->k_kseg);
@ -1472,7 +1485,7 @@ kse_fini(struct kse *kse)
*/
KSE_LOCK_ACQUIRE(kse, &kse_lock);
if (free_kseg != NULL)
kseg_free(free_kseg);
kseg_free_unlocked(free_kseg);
kse_free_unlocked(kse);
KSE_LOCK_RELEASE(kse, &kse_lock);
kse_exit();
@ -1491,14 +1504,11 @@ kse_fini(struct kse *kse)
if ((active_kse_count > 1) &&
(kse->k_kseg->kg_threadcount == 0)) {
KSE_SCHED_UNLOCK(kse, kse->k_kseg);
/*
* XXX - We need a way for the KSE to do a timed
* wait.
*/
kse_release(&ts);
/* The above never returns. */
}
KSE_SCHED_UNLOCK(kse, kse->k_kseg);
else
KSE_SCHED_UNLOCK(kse, kse->k_kseg);
/* There are no more threads; exit this process: */
if (kse->k_kseg->kg_threadcount == 0) {
@ -1708,7 +1718,7 @@ _kseg_alloc(struct pthread *curthread)
* no more threads that reference it.
*/
static void
kseg_free(struct kse_group *kseg)
kseg_free_unlocked(struct kse_group *kseg)
{
TAILQ_REMOVE(&active_kse_groupq, kseg, kg_qe);
TAILQ_INSERT_HEAD(&free_kse_groupq, kseg, kg_qe);
@ -1716,6 +1726,20 @@ kseg_free(struct kse_group *kseg)
active_kseg_count--;
}
void
_kseg_free(struct kse_group *kseg)
{
struct kse *curkse;
kse_critical_t crit;
crit = _kse_critical_enter();
curkse = _get_curkse();
KSE_LOCK_ACQUIRE(curkse, &kse_lock);
kseg_free_unlocked(kseg);
KSE_LOCK_RELEASE(curkse, &kse_lock);
_kse_critical_leave(crit);
}
/*
* Allocate a new KSE.
*
@ -1747,8 +1771,8 @@ _kse_alloc(struct pthread *curthread)
if (kse != NULL) {
TAILQ_REMOVE(&free_kseq, kse, k_qe);
free_kse_count--;
active_kse_count++;
TAILQ_INSERT_TAIL(&active_kseq, kse, k_qe);
active_kse_count++;
}
KSE_LOCK_RELEASE(curthread->kse, &kse_lock);
_kse_critical_leave(crit);
@ -1817,8 +1841,8 @@ _kse_alloc(struct pthread *curthread)
return (NULL);
}
kse->k_flags = 0;
active_kse_count++;
TAILQ_INSERT_TAIL(&active_kseq, kse, k_qe);
active_kse_count++;
if (curthread != NULL) {
KSE_LOCK_RELEASE(curthread->kse, &kse_lock);
_kse_critical_leave(crit);
@ -1830,6 +1854,7 @@ _kse_alloc(struct pthread *curthread)
void
kse_free_unlocked(struct kse *kse)
{
TAILQ_REMOVE(&active_kseq, kse, k_qe);
active_kse_count--;
kse->k_kseg = NULL;
kse->k_flags &= ~KF_INITIALIZED;
@ -1868,6 +1893,7 @@ kseg_reinit(struct kse_group *kseg)
TAILQ_INIT(&kseg->kg_threadq);
TAILQ_INIT(&kseg->kg_schedq.sq_waitq);
kseg->kg_threadcount = 0;
kseg->kg_ksecount = 0;
kseg->kg_idle_kses = 0;
kseg->kg_flags = 0;
}

View File

@ -215,6 +215,7 @@ struct kse_group {
struct sched_queue kg_schedq; /* scheduling queue */
struct lock kg_lock;
int kg_threadcount; /* # of assigned threads */
int kg_ksecount; /* # of assigned KSEs */
int kg_idle_kses;
int kg_flags;
#define KGF_SINGLE_THREAD 0x0001 /* scope system kse group */
@ -1023,6 +1024,7 @@ void _kse_single_thread(struct pthread *);
void _kse_start(struct kse *);
int _kse_setthreaded(int);
int _kse_isthreaded(void);
void _kseg_free(struct kse_group *);
int _mutex_cv_lock(pthread_mutex_t *);
int _mutex_cv_unlock(pthread_mutex_t *);
void _mutex_lock_backout(struct pthread *);
@ -1060,7 +1062,7 @@ void _thr_lock_wait(struct lock *lock, struct lockuser *lu);
void _thr_lock_wakeup(struct lock *lock, struct lockuser *lu);
int _thr_ref_add(struct pthread *, struct pthread *, int);
void _thr_ref_delete(struct pthread *, struct pthread *);
void _thr_schedule_add(struct pthread *, struct pthread *);
int _thr_schedule_add(struct pthread *, struct pthread *);
void _thr_schedule_remove(struct pthread *, struct pthread *);
void _thr_setrunnable(struct pthread *curthread, struct pthread *thread);
void _thr_setrunnable_unlocked(struct pthread *thread);