freebsd-dev/lib/libkse/thread/thr_spinlock.c
Daniel Eischen 843d4004b3 Use a generic way to back threads out of wait queues when handling
signals instead of having more intricate knowledge of thread state
within signal handling.

Simplify signal code because of above (by David Xu).

Use macros for libpthread usage of pthread_cleanup_push() and
pthread_cleanup_pop().  This removes some instances of malloc()
and free() from the semaphore and pthread_once() implementations.

When single threaded and forking(), make sure that the current
thread's signal mask is inherited by the forked thread.

Use private mutexes for libc and libpthread.  Signals are
deferred while threads hold private mutexes.  This fix also
breaks www/linuxpluginwrapper; a patch that fixes it is at
http://people.freebsd.org/~deischen/kse/linuxpluginwrapper.diff

Fix race condition in condition variables where handling a
signal (pthread_kill() or kill()) may not see a wakeup
(pthread_cond_signal() or pthread_cond_broadcast()).

In collaboration with:	davidxu
2004-12-18 18:07:37 +00:00

149 lines
4.5 KiB
C

/*
* Copyright (c) 1997 John Birrell <jb@cimlogic.com.au>.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by John Birrell.
* 4. Neither the name of the author nor the names of any co-contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*
*/
#include <sys/types.h>
#include <machine/atomic.h>
#include <libc_private.h>
#include "spinlock.h"
#include "thr_private.h"
#define MAX_SPINLOCKS 5
struct spinlock_extra {
spinlock_t *owner;
pthread_mutex_t lock;
};
static void init_spinlock(spinlock_t *lck);
static struct pthread_mutex_attr static_mutex_attr =
PTHREAD_MUTEXATTR_STATIC_INITIALIZER;
static pthread_mutexattr_t static_mattr = &static_mutex_attr;
static pthread_mutex_t spinlock_static_lock;
static struct spinlock_extra extra[MAX_SPINLOCKS];
static int spinlock_count = 0;
static int initialized = 0;
/*
* These are for compatability only. Spinlocks of this type
* are deprecated.
*/
void
_spinunlock(spinlock_t *lck)
{
struct spinlock_extra *extra;
extra = (struct spinlock_extra *)lck->fname;
_pthread_mutex_unlock(&extra->lock);
}
/*
* Lock a location for the running thread. Yield to allow other
* threads to run if this thread is blocked because the lock is
* not available. Note that this function does not sleep. It
* assumes that the lock will be available very soon.
*/
void
_spinlock(spinlock_t *lck)
{
struct spinlock_extra *extra;
if (!__isthreaded)
PANIC("Spinlock called when not threaded.");
if (!initialized)
PANIC("Spinlocks not initialized.");
/*
* Try to grab the lock and loop if another thread grabs
* it before we do.
*/
if (lck->fname == NULL)
init_spinlock(lck);
extra = (struct spinlock_extra *)lck->fname;
_pthread_mutex_lock(&extra->lock);
}
/*
* Lock a location for the running thread. Yield to allow other
* threads to run if this thread is blocked because the lock is
* not available. Note that this function does not sleep. It
* assumes that the lock will be available very soon.
*
* This function checks if the running thread has already locked the
* location, warns if this occurs and creates a thread dump before
* returning.
*/
void
_spinlock_debug(spinlock_t *lck, char *fname, int lineno)
{
_spinlock(lck);
}
static void
init_spinlock(spinlock_t *lck)
{
_pthread_mutex_lock(&spinlock_static_lock);
if ((lck->fname == NULL) && (spinlock_count < MAX_SPINLOCKS)) {
lck->fname = (char *)&extra[spinlock_count];
extra[spinlock_count].owner = lck;
spinlock_count++;
}
_pthread_mutex_unlock(&spinlock_static_lock);
if (lck->fname == NULL)
PANIC("Exceeded max spinlocks");
}
void
_thr_spinlock_init(void)
{
int i;
if (initialized != 0) {
_thr_mutex_reinit(&spinlock_static_lock);
for (i = 0; i < spinlock_count; i++)
_thr_mutex_reinit(&extra[i].lock);
} else {
if (_pthread_mutex_init(&spinlock_static_lock, &static_mattr))
PANIC("Cannot initialize spinlock_static_lock");
for (i = 0; i < MAX_SPINLOCKS; i++) {
if (_pthread_mutex_init(&extra[i].lock, &static_mattr))
PANIC("Cannot initialize spinlock extra");
}
initialized = 1;
}
}