Avoid doing adaptive spinning for priority protected mutex, current

implementation always does lock in kernel.
This commit is contained in:
David Xu 2007-10-31 01:50:48 +00:00
parent 55f18e070f
commit 56b45d9067

View File

@ -366,6 +366,9 @@ mutex_lock_common(struct pthread *curthread, pthread_mutex_t *mutex,
* the lock is likely to be released quickly and it is * the lock is likely to be released quickly and it is
* faster than entering the kernel * faster than entering the kernel
*/ */
if (m->m_lock.m_flags & UMUTEX_PRIO_PROTECT)
goto sleep_in_kernel;
if (!_thr_is_smp) if (!_thr_is_smp)
goto yield_loop; goto yield_loop;
@ -381,8 +384,7 @@ mutex_lock_common(struct pthread *curthread, pthread_mutex_t *mutex,
if (ret == 0) if (ret == 0)
goto done; goto done;
} else { } else {
if (_thr_spinloops != 0 && if (_thr_spinloops != 0) {
!(m->m_lock.m_flags & UMUTEX_PRIO_PROTECT)) {
count = _thr_spinloops; count = _thr_spinloops;
while (count) { while (count) {
if (m->m_lock.m_owner == UMUTEX_UNOWNED) { if (m->m_lock.m_owner == UMUTEX_UNOWNED) {
@ -407,6 +409,7 @@ yield_loop:
} }
} }
sleep_in_kernel:
if (abstime == NULL) { if (abstime == NULL) {
ret = __thr_umutex_lock(&m->m_lock); ret = __thr_umutex_lock(&m->m_lock);
} else if (__predict_false( } else if (__predict_false(