Block in cv_destroy() on all waiters

Previously we would ASSERT in cv_destroy() if it was ever called
with active waiters.  However, I've now seen several instances in
OpenSolaris code where they do the following:

  cv_broadcast();
  cv_destroy();

This leaves no time for active waiters to be woken up and scheduled
and we trip the ASSERT.  This has not been observed to be an issue
on OpenSolaris because their cv_destroy() basically does nothing.
They still do run the risk of the memory being free'd after the
cv_destroy() and hitting a bad paging request.  But in practice
this race is so small and unlikely it either doesn't happen, or
is so unlikely when it does happen the root cause has not yet been
identified.

Rather than risk the same issue in our code this change updates
cv_destroy() to block until all waiters have been woken and
scheduled.  This may take some time because each waiter must
acquire the mutex.

This change may have an impact on performance for frequently
created and destroyed condition variables.  That however is a price
worth paying it avoid crashing your system.  If performance issues
are observed they can be addressed by the caller.
This commit is contained in:
Brian Behlendorf 2011-02-04 14:09:08 -08:00
parent 0aff071d18
commit d599e4fa79
2 changed files with 24 additions and 3 deletions

View File

@ -42,6 +42,7 @@ typedef struct {
char *cv_name;
int cv_name_size;
wait_queue_head_t cv_event;
wait_queue_head_t cv_destroy;
atomic_t cv_waiters;
kmutex_t *cv_mutex;
} kcondvar_t;

View File

@ -46,6 +46,7 @@ __cv_init(kcondvar_t *cvp, char *name, kcv_type_t type, void *arg)
cvp->cv_magic = CV_MAGIC;
init_waitqueue_head(&cvp->cv_event);
init_waitqueue_head(&cvp->cv_destroy);
atomic_set(&cvp->cv_waiters, 0);
cvp->cv_mutex = NULL;
cvp->cv_name = NULL;
@ -65,12 +66,27 @@ __cv_init(kcondvar_t *cvp, char *name, kcv_type_t type, void *arg)
}
EXPORT_SYMBOL(__cv_init);
static int
cv_destroy_wakeup(kcondvar_t *cvp)
{
if ((waitqueue_active(&cvp->cv_event)) ||
(atomic_read(&cvp->cv_waiters) > 0))
return 0;
return 1;
}
void
__cv_destroy(kcondvar_t *cvp)
{
SENTRY;
ASSERT(cvp);
ASSERT(cvp->cv_magic == CV_MAGIC);
/* Block until all waiters have woken */
while (cv_destroy_wakeup(cvp) == 0)
wait_event_timeout(cvp->cv_destroy, cv_destroy_wakeup(cvp), 1);
ASSERT(cvp->cv_mutex == NULL);
ASSERT(atomic_read(&cvp->cv_waiters) == 0);
ASSERT(!waitqueue_active(&cvp->cv_event));
@ -78,7 +94,6 @@ __cv_destroy(kcondvar_t *cvp)
if (cvp->cv_name)
kmem_free(cvp->cv_name, cvp->cv_name_size);
ASSERT3P(memset(cvp, CV_POISON, sizeof(*cvp)), ==, cvp);
SEXIT;
}
EXPORT_SYMBOL(__cv_destroy);
@ -111,10 +126,13 @@ cv_wait_common(kcondvar_t *cvp, kmutex_t *mp, int state)
mutex_enter(mp);
/* No more waiters a different mutex could be used */
if (atomic_dec_and_test(&cvp->cv_waiters))
if (atomic_dec_and_test(&cvp->cv_waiters)) {
cvp->cv_mutex = NULL;
wake_up(&cvp->cv_destroy);
}
finish_wait(&cvp->cv_event, &wait);
SEXIT;
}
@ -170,8 +188,10 @@ __cv_timedwait_common(kcondvar_t *cvp, kmutex_t *mp,
mutex_enter(mp);
/* No more waiters a different mutex could be used */
if (atomic_dec_and_test(&cvp->cv_waiters))
if (atomic_dec_and_test(&cvp->cv_waiters)) {
cvp->cv_mutex = NULL;
wake_up(&cvp->cv_destroy);
}
finish_wait(&cvp->cv_event, &wait);