2003-04-01 03:46:29 +00:00
|
|
|
/*
|
|
|
|
* Copyright (c) 1995-1998 John Birrell <jb@cimlogic.com.au>
|
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
2007-01-12 07:26:21 +00:00
|
|
|
* 3. Neither the name of the author nor the names of any co-contributors
|
2003-04-01 03:46:29 +00:00
|
|
|
* may be used to endorse or promote products derived from this software
|
|
|
|
* without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
|
|
|
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
|
|
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
|
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
|
|
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
|
|
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
|
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
|
|
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
|
|
|
* SUCH DAMAGE.
|
|
|
|
*/
|
2005-04-02 01:20:00 +00:00
|
|
|
|
2016-04-08 11:15:26 +00:00
|
|
|
#include <sys/cdefs.h>
|
|
|
|
__FBSDID("$FreeBSD$");
|
|
|
|
|
2008-03-25 13:28:12 +00:00
|
|
|
#include "namespace.h"
|
2003-04-01 03:46:29 +00:00
|
|
|
#include <errno.h>
|
2010-09-15 02:56:32 +00:00
|
|
|
#ifdef _PTHREAD_FORCED_UNWIND
|
|
|
|
#include <dlfcn.h>
|
|
|
|
#endif
|
2016-06-01 16:09:56 +00:00
|
|
|
#include <stdarg.h>
|
2003-04-01 03:46:29 +00:00
|
|
|
#include <stdio.h>
|
|
|
|
#include <stdlib.h>
|
|
|
|
#include <pthread.h>
|
Add signal handler wrapper, the reason to add it becauses there are
some cases we want to improve:
1) if a thread signal got a signal while in cancellation point,
it is possible the TDP_WAKEUP may be eaten by signal handler
if the handler called some interruptibly system calls.
2) In signal handler, we want to disable cancellation.
3) When thread holding some low level locks, it is better to
disable signal, those code need not to worry reentrancy,
sigprocmask system call is avoided because it is a bit expensive.
The signal handler wrapper works in this way:
1) libthr installs its signal handler if user code invokes sigaction
to install its handler, the user handler is recorded in internal
array.
2) when a signal is delivered, libthr's signal handler is invoke,
libthr checks if thread holds some low level lock or is in critical
region, if it is true, the signal is buffered, and all signals are
masked, once the thread leaves critical region, correct signal
mask is restored and buffered signal is processed.
3) before user signal handler is invoked, cancellation is temporarily
disabled, after user signal handler is returned, cancellation state
is restored, and pending cancellation is rescheduled.
2010-09-01 02:18:33 +00:00
|
|
|
#include <sys/types.h>
|
|
|
|
#include <sys/signalvar.h>
|
2008-03-25 13:28:12 +00:00
|
|
|
#include "un-namespace.h"
|
2005-04-02 01:20:00 +00:00
|
|
|
|
Add thread-specific caching for small size classes, based on magazines.
This caching allows for completely lock-free allocation/deallocation in the
steady state, at the expense of likely increased memory use and
fragmentation.
Reduce the default number of arenas to 2*ncpus, since thread-specific
caching typically reduces arena contention.
Modify size class spacing to include ranges of 2^n-spaced, quantum-spaced,
cacheline-spaced, and subpage-spaced size classes. The advantages are:
fewer size classes, reduced false cacheline sharing, and reduced internal
fragmentation for allocations that are slightly over 512, 1024, etc.
Increase RUN_MAX_SMALL, in order to limit fragmentation for the
subpage-spaced size classes.
Add a size-->bin lookup table for small sizes to simplify translating sizes
to size classes. Include a hard-coded constant table that is used unless
custom size class spacing is specified at run time.
Add the ability to disable tiny size classes at compile time via
MALLOC_TINY.
2008-08-27 02:00:53 +00:00
|
|
|
#include "libc_private.h"
|
2003-04-01 03:46:29 +00:00
|
|
|
#include "thr_private.h"
|
|
|
|
|
2005-04-02 01:20:00 +00:00
|
|
|
void _pthread_exit(void *status);
|
2003-04-01 03:46:29 +00:00
|
|
|
|
2010-09-15 02:56:32 +00:00
|
|
|
static void exit_thread(void) __dead2;
|
|
|
|
|
2005-04-02 01:20:00 +00:00
|
|
|
__weak_reference(_pthread_exit, pthread_exit);
|
2004-03-28 14:05:28 +00:00
|
|
|
|
2010-09-15 02:56:32 +00:00
|
|
|
#ifdef _PTHREAD_FORCED_UNWIND
|
2010-09-25 06:27:09 +00:00
|
|
|
static int message_printed;
|
2010-09-15 02:56:32 +00:00
|
|
|
|
|
|
|
static void thread_unwind(void) __dead2;
|
2014-11-25 03:50:31 +00:00
|
|
|
#ifdef PIC
|
2010-09-15 02:56:32 +00:00
|
|
|
static void thread_uw_init(void);
|
|
|
|
static _Unwind_Reason_Code thread_unwind_stop(int version,
|
|
|
|
_Unwind_Action actions,
|
2010-09-30 12:59:56 +00:00
|
|
|
int64_t exc_class,
|
2010-09-15 02:56:32 +00:00
|
|
|
struct _Unwind_Exception *exc_obj,
|
|
|
|
struct _Unwind_Context *context, void *stop_parameter);
|
|
|
|
/* unwind library pointers */
|
|
|
|
static _Unwind_Reason_Code (*uwl_forcedunwind)(struct _Unwind_Exception *,
|
|
|
|
_Unwind_Stop_Fn, void *);
|
2010-09-30 12:59:56 +00:00
|
|
|
static unsigned long (*uwl_getcfa)(struct _Unwind_Context *);
|
2010-09-15 02:56:32 +00:00
|
|
|
|
|
|
|
static void
|
|
|
|
thread_uw_init(void)
|
|
|
|
{
|
|
|
|
static int inited = 0;
|
2010-09-19 05:19:47 +00:00
|
|
|
Dl_info dlinfo;
|
2010-09-15 02:56:32 +00:00
|
|
|
void *handle;
|
2010-09-19 05:42:29 +00:00
|
|
|
void *forcedunwind, *getcfa;
|
2010-09-15 02:56:32 +00:00
|
|
|
|
|
|
|
if (inited)
|
2010-09-19 05:19:47 +00:00
|
|
|
return;
|
2010-09-15 02:56:32 +00:00
|
|
|
handle = RTLD_DEFAULT;
|
2010-09-19 05:19:47 +00:00
|
|
|
if ((forcedunwind = dlsym(handle, "_Unwind_ForcedUnwind")) != NULL) {
|
|
|
|
if (dladdr(forcedunwind, &dlinfo)) {
|
2010-09-19 05:42:29 +00:00
|
|
|
/*
|
|
|
|
* Make sure the address is always valid by holding the library,
|
|
|
|
* also assume functions are in same library.
|
|
|
|
*/
|
2010-09-19 05:19:47 +00:00
|
|
|
if ((handle = dlopen(dlinfo.dli_fname, RTLD_LAZY)) != NULL) {
|
|
|
|
forcedunwind = dlsym(handle, "_Unwind_ForcedUnwind");
|
|
|
|
getcfa = dlsym(handle, "_Unwind_GetCFA");
|
2010-09-19 05:42:29 +00:00
|
|
|
if (forcedunwind != NULL && getcfa != NULL) {
|
2010-09-19 05:19:47 +00:00
|
|
|
uwl_getcfa = getcfa;
|
2010-09-19 05:42:29 +00:00
|
|
|
atomic_store_rel_ptr((volatile void *)&uwl_forcedunwind,
|
|
|
|
(uintptr_t)forcedunwind);
|
2010-09-19 05:19:47 +00:00
|
|
|
} else {
|
|
|
|
dlclose(handle);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2010-09-15 02:56:32 +00:00
|
|
|
}
|
2010-09-19 05:19:47 +00:00
|
|
|
inited = 1;
|
2010-09-15 02:56:32 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
_Unwind_Reason_Code
|
|
|
|
_Unwind_ForcedUnwind(struct _Unwind_Exception *ex, _Unwind_Stop_Fn stop_func,
|
|
|
|
void *stop_arg)
|
|
|
|
{
|
|
|
|
return (*uwl_forcedunwind)(ex, stop_func, stop_arg);
|
|
|
|
}
|
|
|
|
|
2010-09-30 12:59:56 +00:00
|
|
|
unsigned long
|
2010-09-15 02:56:32 +00:00
|
|
|
_Unwind_GetCFA(struct _Unwind_Context *context)
|
|
|
|
{
|
|
|
|
return (*uwl_getcfa)(context);
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
#pragma weak _Unwind_GetCFA
|
|
|
|
#pragma weak _Unwind_ForcedUnwind
|
2014-11-25 03:50:31 +00:00
|
|
|
#endif /* PIC */
|
2010-09-15 02:56:32 +00:00
|
|
|
|
|
|
|
static void
|
|
|
|
thread_unwind_cleanup(_Unwind_Reason_Code code, struct _Unwind_Exception *e)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Specification said that _Unwind_Resume should not be used here,
|
|
|
|
* instead, user should rethrow the exception. For C++ user, they
|
|
|
|
* should put "throw" sentence in catch(...) block.
|
|
|
|
*/
|
|
|
|
PANIC("exception should be rethrown");
|
|
|
|
}
|
|
|
|
|
|
|
|
static _Unwind_Reason_Code
|
|
|
|
thread_unwind_stop(int version, _Unwind_Action actions,
|
2010-09-30 12:59:56 +00:00
|
|
|
int64_t exc_class,
|
2010-09-15 02:56:32 +00:00
|
|
|
struct _Unwind_Exception *exc_obj,
|
|
|
|
struct _Unwind_Context *context, void *stop_parameter)
|
|
|
|
{
|
|
|
|
struct pthread *curthread = _get_curthread();
|
|
|
|
struct pthread_cleanup *cur;
|
|
|
|
uintptr_t cfa;
|
|
|
|
int done = 0;
|
|
|
|
|
|
|
|
/* XXX assume stack grows down to lower address */
|
|
|
|
|
|
|
|
cfa = _Unwind_GetCFA(context);
|
2010-09-25 04:21:31 +00:00
|
|
|
if (actions & _UA_END_OF_STACK ||
|
|
|
|
cfa >= (uintptr_t)curthread->unwind_stackend) {
|
2010-09-15 02:56:32 +00:00
|
|
|
done = 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
while ((cur = curthread->cleanup) != NULL &&
|
2010-09-25 04:21:31 +00:00
|
|
|
(done || (uintptr_t)cur <= cfa)) {
|
|
|
|
__pthread_cleanup_pop_imp(1);
|
2010-09-15 02:56:32 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (done)
|
|
|
|
exit_thread(); /* Never return! */
|
|
|
|
|
|
|
|
return (_URC_NO_REASON);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
thread_unwind(void)
|
|
|
|
{
|
|
|
|
struct pthread *curthread = _get_curthread();
|
|
|
|
|
|
|
|
curthread->ex.exception_class = 0;
|
|
|
|
curthread->ex.exception_cleanup = thread_unwind_cleanup;
|
|
|
|
_Unwind_ForcedUnwind(&curthread->ex, thread_unwind_stop, NULL);
|
|
|
|
PANIC("_Unwind_ForcedUnwind returned");
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
2003-04-01 03:46:29 +00:00
|
|
|
void
|
2016-06-01 16:09:56 +00:00
|
|
|
_thread_exitf(const char *fname, int lineno, const char *fmt, ...)
|
2003-04-01 03:46:29 +00:00
|
|
|
{
|
2016-06-01 16:09:56 +00:00
|
|
|
va_list ap;
|
2003-04-01 03:46:29 +00:00
|
|
|
|
2005-04-02 01:20:00 +00:00
|
|
|
/* Write an error message to the standard error file descriptor: */
|
2016-06-01 16:09:56 +00:00
|
|
|
_thread_printf(STDERR_FILENO, "Fatal error '");
|
|
|
|
|
|
|
|
va_start(ap, fmt);
|
|
|
|
_thread_vprintf(STDERR_FILENO, fmt, ap);
|
|
|
|
va_end(ap);
|
|
|
|
|
|
|
|
_thread_printf(STDERR_FILENO, "' at line %d in file %s (errno = %d)\n",
|
|
|
|
lineno, fname, errno);
|
2003-04-01 03:46:29 +00:00
|
|
|
|
|
|
|
abort();
|
|
|
|
}
|
|
|
|
|
2016-06-01 16:09:56 +00:00
|
|
|
void
|
|
|
|
_thread_exit(const char *fname, int lineno, const char *msg)
|
|
|
|
{
|
|
|
|
|
|
|
|
_thread_exitf(fname, lineno, "%s", msg);
|
|
|
|
}
|
|
|
|
|
2003-04-01 03:46:29 +00:00
|
|
|
void
|
|
|
|
_pthread_exit(void *status)
|
Add signal handler wrapper, the reason to add it becauses there are
some cases we want to improve:
1) if a thread signal got a signal while in cancellation point,
it is possible the TDP_WAKEUP may be eaten by signal handler
if the handler called some interruptibly system calls.
2) In signal handler, we want to disable cancellation.
3) When thread holding some low level locks, it is better to
disable signal, those code need not to worry reentrancy,
sigprocmask system call is avoided because it is a bit expensive.
The signal handler wrapper works in this way:
1) libthr installs its signal handler if user code invokes sigaction
to install its handler, the user handler is recorded in internal
array.
2) when a signal is delivered, libthr's signal handler is invoke,
libthr checks if thread holds some low level lock or is in critical
region, if it is true, the signal is buffered, and all signals are
masked, once the thread leaves critical region, correct signal
mask is restored and buffered signal is processed.
3) before user signal handler is invoked, cancellation is temporarily
disabled, after user signal handler is returned, cancellation state
is restored, and pending cancellation is rescheduled.
2010-09-01 02:18:33 +00:00
|
|
|
{
|
|
|
|
_pthread_exit_mask(status, NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
_pthread_exit_mask(void *status, sigset_t *mask)
|
2003-04-01 03:46:29 +00:00
|
|
|
{
|
2005-04-02 01:20:00 +00:00
|
|
|
struct pthread *curthread = _get_curthread();
|
Make libthr async-signal-safe without costly signal masking. The guidlines I
followed are: Only 3 functions (pthread_cancel, pthread_setcancelstate,
pthread_setcanceltype) are required to be async-signal-safe by POSIX. None of
the rest of the pthread api is required to be async-signal-safe. This means
that only the three mentioned functions are safe to use from inside
signal handlers.
However, there are certain system/libc calls that are
cancellation points that a caller may call from within a signal handler,
and since they are cancellation points calls have to be made into libthr
to test for cancellation and exit the thread if necessary. So, the
cancellation test and thread exit code paths must be async-signal-safe
as well. A summary of the changes follows:
o Almost all of the code paths that masked signals, as well as locking the
pthread structure now lock only the pthread structure.
o Signals are masked (and left that way) as soon as a thread enters
pthread_exit().
o The active and dead threads locks now explicitly require that signals
are masked.
o Access to the isdead field of the pthread structure is protected by both
the active and dead list locks for writing. Either one is sufficient for
reading.
o The thread state and type fields have been combined into one three-state
switch to make it easier to read without requiring a lock. It doesn't need
a lock for writing (and therefore for reading either) because only the
current thread can write to it and it is an integer value.
o The thread state field of the pthread structure has been eliminated. It
was an unnecessary field that mostly duplicated the flags field, but
required additional locking that would make a lot more code paths require
signal masking. Any truly unique values (such as PS_DEAD) have been
reborn as separate members of the pthread structure.
o Since the mutex and condvar pthread functions are not async-signal-safe
there is no need to muck about with the wait queues when handling
a signal ...
o ... which also removes the need for wrapping signal handlers and sigaction(2).
o The condvar and mutex async-cancellation code had to be revised as a result
of some of these changes, which resulted in semi-unrelated changes which
would have been difficult to work on as a separate commit, so they are
included as well.
The only part of the changes I am worried about is related to locking for
the pthread joining fields. But, I will take a closer look at them once this
mega-patch is committed.
2004-05-20 12:06:16 +00:00
|
|
|
|
2003-04-01 03:46:29 +00:00
|
|
|
/* Check if this thread is already in the process of exiting: */
|
2016-06-01 16:12:26 +00:00
|
|
|
if (curthread->cancelling)
|
|
|
|
PANIC("Thread %p has called "
|
2005-04-02 01:20:00 +00:00
|
|
|
"pthread_exit() from a destructor. POSIX 1003.1 "
|
|
|
|
"1996 s16.2.5.2 does not allow this!", curthread);
|
2003-04-01 03:46:29 +00:00
|
|
|
|
2005-04-02 01:20:00 +00:00
|
|
|
/* Flag this thread as exiting. */
|
2006-11-24 09:57:38 +00:00
|
|
|
curthread->cancelling = 1;
|
2010-09-19 09:03:11 +00:00
|
|
|
curthread->no_cancel = 1;
|
2010-08-17 02:50:12 +00:00
|
|
|
curthread->cancel_async = 0;
|
Add signal handler wrapper, the reason to add it becauses there are
some cases we want to improve:
1) if a thread signal got a signal while in cancellation point,
it is possible the TDP_WAKEUP may be eaten by signal handler
if the handler called some interruptibly system calls.
2) In signal handler, we want to disable cancellation.
3) When thread holding some low level locks, it is better to
disable signal, those code need not to worry reentrancy,
sigprocmask system call is avoided because it is a bit expensive.
The signal handler wrapper works in this way:
1) libthr installs its signal handler if user code invokes sigaction
to install its handler, the user handler is recorded in internal
array.
2) when a signal is delivered, libthr's signal handler is invoke,
libthr checks if thread holds some low level lock or is in critical
region, if it is true, the signal is buffered, and all signals are
masked, once the thread leaves critical region, correct signal
mask is restored and buffered signal is processed.
3) before user signal handler is invoked, cancellation is temporarily
disabled, after user signal handler is returned, cancellation state
is restored, and pending cancellation is rescheduled.
2010-09-01 02:18:33 +00:00
|
|
|
curthread->cancel_point = 0;
|
|
|
|
if (mask != NULL)
|
|
|
|
__sys_sigprocmask(SIG_SETMASK, mask, NULL);
|
|
|
|
if (curthread->unblock_sigcancel) {
|
|
|
|
sigset_t set;
|
|
|
|
|
|
|
|
curthread->unblock_sigcancel = 0;
|
|
|
|
SIGEMPTYSET(set);
|
|
|
|
SIGADDSET(set, SIGCANCEL);
|
|
|
|
__sys_sigprocmask(SIG_UNBLOCK, mask, NULL);
|
|
|
|
}
|
2005-04-02 01:20:00 +00:00
|
|
|
|
2003-04-01 03:46:29 +00:00
|
|
|
/* Save the return value: */
|
|
|
|
curthread->ret = status;
|
2010-09-15 02:56:32 +00:00
|
|
|
#ifdef _PTHREAD_FORCED_UNWIND
|
2010-09-25 06:27:09 +00:00
|
|
|
|
2014-11-25 03:50:31 +00:00
|
|
|
#ifdef PIC
|
2010-09-15 02:56:32 +00:00
|
|
|
thread_uw_init();
|
2014-11-25 03:50:31 +00:00
|
|
|
#endif /* PIC */
|
2010-09-25 06:27:09 +00:00
|
|
|
|
2014-11-25 03:50:31 +00:00
|
|
|
#ifdef PIC
|
2010-09-15 02:56:32 +00:00
|
|
|
if (uwl_forcedunwind != NULL) {
|
|
|
|
#else
|
|
|
|
if (_Unwind_ForcedUnwind != NULL) {
|
2010-09-25 06:27:09 +00:00
|
|
|
#endif
|
|
|
|
if (curthread->unwind_disabled) {
|
|
|
|
if (message_printed == 0) {
|
|
|
|
message_printed = 1;
|
|
|
|
_thread_printf(2, "Warning: old _pthread_cleanup_push was called, "
|
|
|
|
"stack unwinding is disabled.\n");
|
|
|
|
}
|
|
|
|
goto cleanup;
|
|
|
|
}
|
2010-09-15 02:56:32 +00:00
|
|
|
thread_unwind();
|
|
|
|
|
2010-09-25 06:27:09 +00:00
|
|
|
} else {
|
|
|
|
cleanup:
|
2010-09-15 02:56:32 +00:00
|
|
|
while (curthread->cleanup != NULL) {
|
|
|
|
__pthread_cleanup_pop_imp(1);
|
|
|
|
}
|
|
|
|
exit_thread();
|
|
|
|
}
|
|
|
|
|
|
|
|
#else
|
2003-04-01 03:46:29 +00:00
|
|
|
while (curthread->cleanup != NULL) {
|
2010-09-15 02:56:32 +00:00
|
|
|
__pthread_cleanup_pop_imp(1);
|
2003-04-01 03:46:29 +00:00
|
|
|
}
|
2006-04-04 02:57:49 +00:00
|
|
|
|
2010-09-15 02:56:32 +00:00
|
|
|
exit_thread();
|
|
|
|
#endif /* _PTHREAD_FORCED_UNWIND */
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
exit_thread(void)
|
|
|
|
{
|
|
|
|
struct pthread *curthread = _get_curthread();
|
|
|
|
|
2003-04-01 03:46:29 +00:00
|
|
|
/* Check if there is thread specific data: */
|
|
|
|
if (curthread->specific != NULL) {
|
|
|
|
/* Run the thread-specific data destructors: */
|
|
|
|
_thread_cleanupspecific();
|
|
|
|
}
|
|
|
|
|
2005-04-02 01:20:00 +00:00
|
|
|
if (!_thr_isthreaded())
|
2003-05-23 08:13:24 +00:00
|
|
|
exit(0);
|
2003-05-21 03:29:18 +00:00
|
|
|
|
2010-09-13 07:03:01 +00:00
|
|
|
if (atomic_fetchadd_int(&_thread_active_threads, -1) == 1) {
|
2005-04-02 01:20:00 +00:00
|
|
|
exit(0);
|
|
|
|
/* Never reach! */
|
2004-03-28 14:05:28 +00:00
|
|
|
}
|
2008-09-09 17:14:32 +00:00
|
|
|
|
|
|
|
/* Tell malloc that the thread is exiting. */
|
|
|
|
_malloc_thread_cleanup();
|
|
|
|
|
2006-01-05 13:51:22 +00:00
|
|
|
THR_LOCK(curthread);
|
|
|
|
curthread->state = PS_DEAD;
|
2008-03-18 02:06:51 +00:00
|
|
|
if (curthread->flags & THR_FLAGS_NEED_SUSPEND) {
|
|
|
|
curthread->cycle++;
|
2008-04-29 03:58:18 +00:00
|
|
|
_thr_umtx_wake(&curthread->cycle, INT_MAX, 0);
|
2008-03-18 02:06:51 +00:00
|
|
|
}
|
2010-09-26 06:45:24 +00:00
|
|
|
if (!curthread->force_exit && SHOULD_REPORT_EVENT(curthread, TD_DEATH))
|
|
|
|
_thr_report_death(curthread);
|
2006-01-05 13:51:22 +00:00
|
|
|
/*
|
|
|
|
* Thread was created with initial refcount 1, we drop the
|
|
|
|
* reference count to allow it to be garbage collected.
|
|
|
|
*/
|
|
|
|
curthread->refcount--;
|
2010-09-13 07:03:01 +00:00
|
|
|
_thr_try_gc(curthread, curthread); /* thread lock released */
|
|
|
|
|
|
|
|
#if defined(_PTHREADS_INVARIANTS)
|
|
|
|
if (THR_IN_CRITICAL(curthread))
|
2016-06-01 16:12:26 +00:00
|
|
|
PANIC("thread %p exits with resources held!", curthread);
|
2010-09-13 07:03:01 +00:00
|
|
|
#endif
|
2005-10-26 07:11:43 +00:00
|
|
|
/*
|
|
|
|
* Kernel will do wakeup at the address, so joiner thread
|
|
|
|
* will be resumed if it is sleeping at the address.
|
|
|
|
*/
|
2005-04-12 03:00:28 +00:00
|
|
|
thr_exit(&curthread->tid);
|
2005-04-02 01:20:00 +00:00
|
|
|
PANIC("thr_exit() returned");
|
|
|
|
/* Never reach! */
|
2004-03-28 14:05:28 +00:00
|
|
|
}
|