In sem_post, the field _has_waiters is no longer used, because some

application destroys semaphore after sem_wait returns. Just enter
kernel to wake up sleeping threads, only update _has_waiters if
it is safe. While here, check if the value exceed SEM_VALUE_MAX and
return EOVERFLOW if this is true.
This commit is contained in:
David Xu 2012-04-05 03:05:02 +00:00
parent 17ce606321
commit 8931e524bf
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=233913
2 changed files with 22 additions and 25 deletions

View File

@ -332,9 +332,6 @@ _sem_getvalue(sem_t * __restrict sem, int * __restrict sval)
static __inline int static __inline int
usem_wake(struct _usem *sem) usem_wake(struct _usem *sem)
{ {
rmb();
if (!sem->_has_waiters)
return (0);
return _umtx_op(sem, UMTX_OP_SEM_WAKE, 0, NULL, NULL); return _umtx_op(sem, UMTX_OP_SEM_WAKE, 0, NULL, NULL);
} }
@ -374,17 +371,6 @@ _sem_trywait(sem_t *sem)
return (-1); return (-1);
} }
#define TIMESPEC_SUB(dst, src, val) \
do { \
(dst)->tv_sec = (src)->tv_sec - (val)->tv_sec; \
(dst)->tv_nsec = (src)->tv_nsec - (val)->tv_nsec; \
if ((dst)->tv_nsec < 0) { \
(dst)->tv_sec--; \
(dst)->tv_nsec += 1000000000; \
} \
} while (0)
int int
_sem_timedwait(sem_t * __restrict sem, _sem_timedwait(sem_t * __restrict sem,
const struct timespec * __restrict abstime) const struct timespec * __restrict abstime)
@ -438,10 +424,16 @@ _sem_wait(sem_t *sem)
int int
_sem_post(sem_t *sem) _sem_post(sem_t *sem)
{ {
unsigned int count;
if (sem_check_validity(sem) != 0) if (sem_check_validity(sem) != 0)
return (-1); return (-1);
atomic_add_rel_int(&sem->_kern._count, 1); do {
return usem_wake(&sem->_kern); count = sem->_kern._count;
if (count + 1 > SEM_VALUE_MAX)
return (EOVERFLOW);
} while(!atomic_cmpset_rel_int(&sem->_kern._count, count, count+1));
(void)usem_wake(&sem->_kern);
return (0);
} }

View File

@ -2840,9 +2840,7 @@ do_sem_wait(struct thread *td, struct _usem *sem, struct _umtx_time *timeout)
umtxq_busy(&uq->uq_key); umtxq_busy(&uq->uq_key);
umtxq_insert(uq); umtxq_insert(uq);
umtxq_unlock(&uq->uq_key); umtxq_unlock(&uq->uq_key);
casuword32(__DEVOLATILE(uint32_t *, &sem->_has_waiters), 0, 1); casuword32(__DEVOLATILE(uint32_t *, &sem->_has_waiters), 0, 1);
rmb();
count = fuword32(__DEVOLATILE(uint32_t *, &sem->_count)); count = fuword32(__DEVOLATILE(uint32_t *, &sem->_count));
if (count != 0) { if (count != 0) {
umtxq_lock(&uq->uq_key); umtxq_lock(&uq->uq_key);
@ -2876,7 +2874,7 @@ static int
do_sem_wake(struct thread *td, struct _usem *sem) do_sem_wake(struct thread *td, struct _usem *sem)
{ {
struct umtx_key key; struct umtx_key key;
int error, cnt, nwake; int error, cnt;
uint32_t flags; uint32_t flags;
flags = fuword32(&sem->_flags); flags = fuword32(&sem->_flags);
@ -2885,12 +2883,19 @@ do_sem_wake(struct thread *td, struct _usem *sem)
umtxq_lock(&key); umtxq_lock(&key);
umtxq_busy(&key); umtxq_busy(&key);
cnt = umtxq_count(&key); cnt = umtxq_count(&key);
nwake = umtxq_signal(&key, 1); if (cnt > 0) {
if (cnt <= nwake) { umtxq_signal(&key, 1);
umtxq_unlock(&key); /*
error = suword32( * Check if count is greater than 0, this means the memory is
__DEVOLATILE(uint32_t *, &sem->_has_waiters), 0); * still being referenced by user code, so we can safely
umtxq_lock(&key); * update _has_waiters flag.
*/
if (cnt == 1) {
umtxq_unlock(&key);
error = suword32(
__DEVOLATILE(uint32_t *, &sem->_has_waiters), 0);
umtxq_lock(&key);
}
} }
umtxq_unbusy(&key); umtxq_unbusy(&key);
umtxq_unlock(&key); umtxq_unlock(&key);