Use version 2 of semaphore provided by kernel umtx code, now if there is

no waiters, we still increase and decrease count in user mode without
entering kernel, once there is a waiter, sem_post will enter kernel to
increase count and wake thread up, this is atomicy and allow us to
gracefully destroy semaphore after sem_wait returned.
This commit is contained in:
David Xu 2012-03-21 07:12:52 +00:00
parent 7f06c60810
commit cc583082f4

View File

@ -61,7 +61,8 @@ __weak_reference(_sem_unlink, sem_unlink);
__weak_reference(_sem_wait, sem_wait);
#define SEM_PREFIX "/tmp/SEMD"
#define SEM_MAGIC ((u_int32_t)0x73656d31)
#define SEM_MAGIC1 ((u_int32_t)0x73656d31)
#define SEM_MAGIC ((u_int32_t)0x73656d32)
struct sem_nameinfo {
int open_count;
@ -109,7 +110,7 @@ static inline int
sem_check_validity(sem_t *sem)
{
if (sem->_magic == SEM_MAGIC)
if (sem->_magic == SEM_MAGIC || sem->_magic == SEM_MAGIC1)
return (0);
else {
errno = EINVAL;
@ -130,7 +131,7 @@ _sem_init(sem_t *sem, int pshared, unsigned int value)
sem->_magic = SEM_MAGIC;
sem->_kern._count = (u_int32_t)value;
sem->_kern._has_waiters = 0;
sem->_kern._flags = pshared ? USYNC_PROCESS_SHARED : 0;
sem->_kern._flags = (pshared ? USYNC_PROCESS_SHARED : 0) | SEM_VER2;
return (0);
}
@ -207,7 +208,7 @@ _sem_open(const char *name, int flags, ...)
tmp._magic = SEM_MAGIC;
tmp._kern._has_waiters = 0;
tmp._kern._count = value;
tmp._kern._flags = USYNC_PROCESS_SHARED | SEM_NAMED;
tmp._kern._flags = USYNC_PROCESS_SHARED | SEM_NAMED | SEM_VER2;
if (_write(fd, &tmp, sizeof(tmp)) != sizeof(tmp)) {
flock(fd, LOCK_UN);
goto error;
@ -325,19 +326,10 @@ _sem_getvalue(sem_t * __restrict sem, int * __restrict sval)
if (sem_check_validity(sem) != 0)
return (-1);
*sval = (int)sem->_kern._count;
*sval = (int)sem->_kern._count & ~SEM_WAITERS;
return (0);
}
static __inline int
usem_wake(struct _usem *sem)
{
rmb();
if (!sem->_has_waiters)
return (0);
return _umtx_op(sem, UMTX_OP_SEM_WAKE, 0, NULL, NULL);
}
static __inline int
usem_wait(struct _usem *sem, const struct timespec *abstime)
{
@ -358,48 +350,51 @@ usem_wait(struct _usem *sem, const struct timespec *abstime)
(void *)tm_size, __DECONST(void*, tm_p));
}
static inline int
_trywait(sem_t *sem)
{
int count;
if ((sem->_kern._flags & SEM_VER2) != 0) {
while (((count = sem->_kern._count) & ~SEM_WAITERS) > 0) {
if (atomic_cmpset_acq_int(&sem->_kern._count, count, count - 1))
return (0);
}
} else {
while ((count = sem->_kern._count) > 0) {
if (atomic_cmpset_acq_int(&sem->_kern._count, count, count - 1))
return (0);
}
}
return (EAGAIN);
}
int
_sem_trywait(sem_t *sem)
{
int val;
int status;
if (sem_check_validity(sem) != 0)
return (-1);
while ((val = sem->_kern._count) > 0) {
if (atomic_cmpset_acq_int(&sem->_kern._count, val, val - 1))
return (0);
}
errno = EAGAIN;
if ((status = _trywait(sem)) == 0)
return (0);
errno = status;
return (-1);
}
#define TIMESPEC_SUB(dst, src, val) \
do { \
(dst)->tv_sec = (src)->tv_sec - (val)->tv_sec; \
(dst)->tv_nsec = (src)->tv_nsec - (val)->tv_nsec; \
if ((dst)->tv_nsec < 0) { \
(dst)->tv_sec--; \
(dst)->tv_nsec += 1000000000; \
} \
} while (0)
int
_sem_timedwait(sem_t * __restrict sem,
const struct timespec * __restrict abstime)
{
int val, retval;
int retval;
if (sem_check_validity(sem) != 0)
return (-1);
retval = 0;
for (;;) {
while ((val = sem->_kern._count) > 0) {
if (atomic_cmpset_acq_int(&sem->_kern._count, val, val - 1))
return (0);
}
if (_trywait(sem) == 0)
return (0);
if (retval) {
_pthread_testcancel();
@ -438,10 +433,36 @@ _sem_wait(sem_t *sem)
int
_sem_post(sem_t *sem)
{
int count;
if (sem_check_validity(sem) != 0)
return (-1);
atomic_add_rel_int(&sem->_kern._count, 1);
return usem_wake(&sem->_kern);
if ((sem->_kern._flags & SEM_VER2) != 0) {
for (;;) {
count = sem->_kern._count;
if ((count & SEM_WAITERS) == 0) {
if (__predict_false(count == SEM_VALUE_MAX)) {
errno = ERANGE;
return (-1);
}
if (atomic_cmpset_rel_int(&sem->_kern._count, count, count+1))
return (0);
} else {
return _umtx_op(&sem->_kern, UMTX_OP_SEM_WAKE, 0, NULL, NULL);
}
}
} else {
do {
count = sem->_kern._count;
if (__predict_false(count == SEM_VALUE_MAX)) {
errno = ERANGE;
return (-1);
}
} while (!atomic_cmpset_rel_int(&sem->_kern._count, count, count+1));
rmb();
if (!sem->_kern._has_waiters)
return (0);
return _umtx_op(&sem->_kern, UMTX_OP_SEM_WAKE, 0, NULL, NULL);
}
}