Use kernel provided userspace condition variable to implement pthread

condition variable.
This commit is contained in:
davidxu 2006-12-04 14:20:41 +00:00
parent 22a81fc246
commit cbb0fd8174
6 changed files with 104 additions and 95 deletions

View File

@ -42,7 +42,7 @@ static inline void
testcancel(struct pthread *curthread)
{
if (__predict_false(SHOULD_CANCEL(curthread) &&
!THR_IN_CRITICAL(curthread)))
!THR_IN_CRITICAL(curthread) && curthread->cancel_defer == 0))
_pthread_exit(PTHREAD_CANCELED);
}
@ -155,3 +155,24 @@ _thr_cancel_leave(struct pthread *curthread)
if (curthread->cancel_enable)
curthread->cancel_point--;
}
void
_thr_cancel_enter_defer(struct pthread *curthread)
{
if (curthread->cancel_enable) {
curthread->cancel_point++;
testcancel(curthread);
curthread->cancel_defer++;
}
}
void
_thr_cancel_leave_defer(struct pthread *curthread, int check)
{
if (curthread->cancel_enable) {
curthread->cancel_defer--;
if (check)
testcancel(curthread);
curthread->cancel_point--;
}
}

View File

@ -67,16 +67,12 @@ cond_init(pthread_cond_t *cond, const pthread_condattr_t *cond_attr)
int rval = 0;
if ((pcond = (pthread_cond_t)
malloc(sizeof(struct pthread_cond))) == NULL) {
calloc(1, sizeof(struct pthread_cond))) == NULL) {
rval = ENOMEM;
} else {
/*
* Initialise the condition variable structure:
*/
_thr_umutex_init(&pcond->c_lock);
pcond->c_seqno = 0;
pcond->c_waiters = 0;
pcond->c_wakeups = 0;
if (cond_attr == NULL || *cond_attr == NULL) {
pcond->c_pshared = 0;
pcond->c_clockid = CLOCK_REALTIME;
@ -84,6 +80,7 @@ cond_init(pthread_cond_t *cond, const pthread_condattr_t *cond_attr)
pcond->c_pshared = (*cond_attr)->c_pshared;
pcond->c_clockid = (*cond_attr)->c_clockid;
}
_thr_umutex_init(&pcond->c_lock);
*cond = pcond;
}
/* Return the completion status: */
@ -118,31 +115,26 @@ _pthread_cond_init(pthread_cond_t *cond, const pthread_condattr_t *cond_attr)
int
_pthread_cond_destroy(pthread_cond_t *cond)
{
struct pthread_cond *cv;
struct pthread *curthread = _get_curthread();
struct pthread_cond *cv;
int rval = 0;
if (*cond == NULL)
rval = EINVAL;
else {
cv = *cond;
THR_UMUTEX_LOCK(curthread, &cv->c_lock);
/* Lock the condition variable structure: */
THR_LOCK_ACQUIRE(curthread, &(*cond)->c_lock);
if ((*cond)->c_waiters + (*cond)->c_wakeups != 0) {
THR_LOCK_RELEASE(curthread, &(*cond)->c_lock);
if (cv->c_kerncv.c_has_waiters) {
THR_UMUTEX_UNLOCK(curthread, &cv->c_lock);
return (EBUSY);
}
/*
* NULL the caller's pointer now that the condition
* variable has been destroyed:
*/
cv = *cond;
*cond = NULL;
/* Unlock the condition variable structure: */
THR_LOCK_RELEASE(curthread, &cv->c_lock);
/* Free the cond lock structure: */
THR_UMUTEX_UNLOCK(curthread, &cv->c_lock);
/*
* Free the memory allocated for the condition
@ -159,7 +151,6 @@ struct cond_cancel_info
{
pthread_mutex_t *mutex;
pthread_cond_t *cond;
long seqno;
int count;
};
@ -168,21 +159,11 @@ cond_cancel_handler(void *arg)
{
struct pthread *curthread = _get_curthread();
struct cond_cancel_info *info = (struct cond_cancel_info *)arg;
pthread_cond_t cv;
pthread_cond_t cv;
cv = *(info->cond);
THR_LOCK_ACQUIRE(curthread, &cv->c_lock);
if (cv->c_seqno != info->seqno && cv->c_wakeups != 0) {
if (cv->c_waiters > 0) {
cv->c_seqno++;
_thr_umtx_wake(&cv->c_seqno, 1);
} else
cv->c_wakeups--;
} else {
cv->c_waiters--;
}
THR_LOCK_RELEASE(curthread, &cv->c_lock);
if ((cv->c_lock.m_owner & ~UMUTEX_CONTESTED) == TID(curthread))
THR_UMUTEX_UNLOCK(curthread, &cv->c_lock);
_mutex_cv_lock(info->mutex, info->count);
}
@ -194,7 +175,6 @@ cond_wait_common(pthread_cond_t *cond, pthread_mutex_t *mutex,
struct timespec ts, ts2, *tsp;
struct cond_cancel_info info;
pthread_cond_t cv;
long seq, oldseq;
int ret = 0;
/*
@ -206,56 +186,34 @@ cond_wait_common(pthread_cond_t *cond, pthread_mutex_t *mutex,
return (ret);
cv = *cond;
THR_LOCK_ACQUIRE(curthread, &cv->c_lock);
THR_UMUTEX_LOCK(curthread, &cv->c_lock);
ret = _mutex_cv_unlock(mutex, &info.count);
if (ret) {
THR_LOCK_RELEASE(curthread, &cv->c_lock);
THR_UMUTEX_UNLOCK(curthread, &cv->c_lock);
return (ret);
}
oldseq = seq = cv->c_seqno;
info.mutex = mutex;
info.cond = cond;
info.seqno = oldseq;
cv->c_waiters++;
do {
THR_LOCK_RELEASE(curthread, &cv->c_lock);
if (abstime != NULL) {
clock_gettime(cv->c_clockid, &ts);
TIMESPEC_SUB(&ts2, abstime, &ts);
tsp = &ts2;
} else
tsp = NULL;
if (abstime != NULL) {
clock_gettime(cv->c_clockid, &ts);
TIMESPEC_SUB(&ts2, abstime, &ts);
tsp = &ts2;
} else
tsp = NULL;
if (cancel) {
THR_CLEANUP_PUSH(curthread, cond_cancel_handler, &info);
_thr_cancel_enter(curthread);
ret = _thr_umtx_wait(&cv->c_seqno, seq, tsp);
_thr_cancel_leave(curthread);
THR_CLEANUP_POP(curthread, 0);
} else {
ret = _thr_umtx_wait(&cv->c_seqno, seq, tsp);
}
THR_LOCK_ACQUIRE(curthread, &cv->c_lock);
seq = cv->c_seqno;
if (abstime != NULL && ret == ETIMEDOUT)
break;
/*
* loop if we have never been told to wake up
* or we lost a race.
*/
} while (seq == oldseq || cv->c_wakeups == 0);
if (seq != oldseq && cv->c_wakeups != 0) {
cv->c_wakeups--;
ret = 0;
if (cancel) {
THR_CLEANUP_PUSH(curthread, cond_cancel_handler, &info);
_thr_cancel_enter_defer(curthread);
ret = _thr_ucond_wait(&cv->c_kerncv, &cv->c_lock, tsp, 1);
_thr_cancel_leave_defer(curthread, ret);
THR_CLEANUP_POP(curthread, 0);
} else {
cv->c_waiters--;
ret = _thr_ucond_wait(&cv->c_kerncv, &cv->c_lock, tsp, 0);
}
THR_LOCK_RELEASE(curthread, &cv->c_lock);
if (ret == EINTR)
ret = 0;
_mutex_cv_lock(mutex, info.count);
return (ret);
}
@ -303,7 +261,7 @@ cond_signal_common(pthread_cond_t *cond, int broadcast)
{
struct pthread *curthread = _get_curthread();
pthread_cond_t cv;
int ret = 0, oldwaiters;
int ret = 0;
/*
* If the condition variable is statically initialized, perform dynamic
@ -314,23 +272,14 @@ cond_signal_common(pthread_cond_t *cond, int broadcast)
return (ret);
cv = *cond;
/* Lock the condition variable structure. */
THR_LOCK_ACQUIRE(curthread, &cv->c_lock);
if (cv->c_waiters) {
if (!broadcast) {
cv->c_wakeups++;
cv->c_waiters--;
cv->c_seqno++;
_thr_umtx_wake(&cv->c_seqno, 1);
} else {
oldwaiters = cv->c_waiters;
cv->c_wakeups += cv->c_waiters;
cv->c_waiters = 0;
cv->c_seqno++;
_thr_umtx_wake(&cv->c_seqno, oldwaiters);
}
THR_UMUTEX_LOCK(curthread, &cv->c_lock);
if (cv->c_kerncv.c_has_waiters) {
if (!broadcast)
ret = _thr_ucond_signal(&cv->c_kerncv);
else
ret = _thr_ucond_broadcast(&cv->c_kerncv);
}
THR_LOCK_RELEASE(curthread, &cv->c_lock);
THR_UMUTEX_UNLOCK(curthread, &cv->c_lock);
return (ret);
}

View File

@ -143,13 +143,8 @@ struct pthread_mutex_attr {
{ PTHREAD_MUTEX_DEFAULT, PTHREAD_PRIO_NONE, 0, MUTEX_FLAGS_PRIVATE }
struct pthread_cond {
/*
* Lock for accesses to this structure.
*/
struct umutex c_lock;
volatile umtx_t c_seqno;
volatile int c_waiters;
volatile int c_wakeups;
struct ucond c_kerncv;
int c_pshared;
int c_clockid;
};
@ -364,6 +359,9 @@ struct pthread {
/* Thread is at cancellation point */
int cancel_point;
/* Cancellation should be synchoronized */
int cancel_defer;
/* Asynchronouse cancellation is enabled */
int cancel_async;
@ -625,6 +623,8 @@ void _thread_printf(int, const char *, ...) __hidden;
void _thr_spinlock_init(void) __hidden;
void _thr_cancel_enter(struct pthread *) __hidden;
void _thr_cancel_leave(struct pthread *) __hidden;
void _thr_cancel_enter_defer(struct pthread *) __hidden;
void _thr_cancel_leave_defer(struct pthread *, int) __hidden;
void _thr_testcancel(struct pthread *) __hidden;
void _thr_signal_block(struct pthread *) __hidden;
void _thr_signal_unblock(struct pthread *) __hidden;

View File

@ -61,6 +61,8 @@ sigcancel_handler(int sig __unused,
{
struct pthread *curthread = _get_curthread();
if (curthread->cancel_defer)
thr_wake(curthread->tid);
_thr_ast(curthread);
}

View File

@ -104,3 +104,36 @@ _thr_umtx_wake(volatile umtx_t *mtx, int nr_wakeup)
return (0);
return (errno);
}
int
_thr_ucond_wait(struct ucond *cv, struct umutex *m,
const struct timespec *timeout, int check_unparking)
{
if (timeout && (timeout->tv_sec < 0 || (timeout->tv_sec == 0 &&
timeout->tv_nsec <= 0))) {
__thr_umutex_unlock(m);
return (ETIMEDOUT);
}
if (_umtx_op(cv, UMTX_OP_CV_WAIT,
check_unparking ? UMTX_CHECK_UNPAKING : 0,
m, __DECONST(void*, timeout)) == 0) {
return (0);
}
return (errno);
}
int
_thr_ucond_signal(struct ucond *cv)
{
if (_umtx_op(cv, UMTX_OP_CV_SIGNAL, 0, NULL, NULL) == 0)
return (0);
return (errno);
}
int
_thr_ucond_broadcast(struct ucond *cv)
{
if (_umtx_op(cv, UMTX_OP_CV_BROADCAST, 0, NULL, NULL) == 0)
return (0);
return (errno);
}

View File

@ -48,6 +48,10 @@ void _thr_umutex_init(struct umutex *mtx) __hidden;
int _thr_umtx_wait(volatile umtx_t *mtx, umtx_t exp,
const struct timespec *timeout) __hidden;
int _thr_umtx_wake(volatile umtx_t *mtx, int count) __hidden;
int _thr_ucond_wait(struct ucond *cv, struct umutex *m,
const struct timespec *timeout, int check_unpaking);
int _thr_ucond_signal(struct ucond *cv);
int _thr_ucond_broadcast(struct ucond *cv);
static inline int
_thr_umutex_trylock(struct umutex *mtx, uint32_t id)