Import my recent 1:1 threading working. some features improved includes:

1. fast simple type mutex.
 2. __thread tls works.
 3. asynchronous cancellation works ( using signal ).
 4. thread synchronization is fully based on umtx, mainly, condition
    variable and other synchronization objects were rewritten by using
    umtx directly. those objects can be shared between processes via
    shared memory, it has to change ABI which does not happen yet.
 5. default stack size is increased to 1M on 32 bits platform, 2M for
    64 bits platform.
As the result, some mysql super-smack benchmarks show performance is
improved massivly.

Okayed by: jeff, mtm, rwatson, scottl
This commit is contained in:
David Xu 2005-04-02 01:20:00 +00:00
parent 824a5e96dc
commit a091d823ad
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=144518
77 changed files with 7421 additions and 4862 deletions

View File

@ -10,20 +10,26 @@
LIB=thr
SHLIB_MAJOR= 1
DEBUG_FLAGS=-g
CFLAGS+=-DPTHREAD_KERNEL -D_THREAD_SAFE
CFLAGS+=-DPTHREAD_KERNEL
CFLAGS+=-I${.CURDIR}/../libc/include -I${.CURDIR}/thread \
-I${.CURDIR}/../../include
CFLAGS+=-I${.CURDIR}/arch/${MACHINE_ARCH}/include
CFLAGS+=-I${.CURDIR}/sys
CFLAGS+=-I${.CURDIR}/../../libexec/rtld-elf
CFLAGS+=-I${.CURDIR}/../../libexec/rtld-elf/${MACHINE_ARCH}
CFLAGS+=-Winline
# CFLAGS+=-DSYSTEM_SCOPE_ONLY
LDFLAGS= -Wl,--version-script=${.CURDIR}/pthread.map
# enable extra internal consistancy checks
CFLAGS+=-D_PTHREADS_INVARIANTS
CFLAGS+=-D_PTHREADS_INVARIANTS -Wall
PRECIOUSLIB=
WARNS?= 2
.include "${.CURDIR}/thread/Makefile.inc"
.include "${.CURDIR}/sys/Makefile.inc"
.include "${.CURDIR}/arch/${MACHINE_ARCH}/Makefile.inc"
.include "${.CURDIR}/sys/Makefile.inc"
.include "${.CURDIR}/thread/Makefile.inc"
.include <bsd.lib.mk>

View File

@ -1,5 +1,5 @@
# $FreeBSD$
.PATH: ${.CURDIR}/sys ${.CURDIR}/arch/${MACHINE_ARCH}/${MACHINE_ARCH}
.PATH: ${.CURDIR}/arch/${MACHINE_ARCH}/${MACHINE_ARCH}
SRCS+= _curthread.c
SRCS+= pthread_md.c

View File

@ -0,0 +1,53 @@
/*
* Copyright (c) 2003 Daniel Eischen <deischen@freebsd.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Neither the name of the author nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*/
#include <stdlib.h>
#include <string.h>
#include "pthread_md.h"
/*
* The constructors.
*/
struct tcb *
_tcb_ctor(struct pthread *thread, int initial)
{
struct tcb *tcb;
if ((tcb = malloc(sizeof(struct tcb))) != NULL) {
memset(tcb, 0, sizeof(struct tcb));
tcb->tcb_thread = thread;
}
return (tcb);
}
void
_tcb_dtor(struct tcb *tcb)
{
free(tcb);
}

View File

@ -22,38 +22,54 @@
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#ifndef _PTHREAD_MD_H_
#define _PTHREAD_MD_H_
#include <stddef.h>
#include <sys/types.h>
#include <sys/ucontext.h>
#include <pthread.h>
#include "thr_private.h"
#define DTV_OFFSET offsetof(struct tcb, tcb_dtv)
register struct pthread *_tp __asm("%r13");
/*
* Variant I tcb. The structure layout is fixed, don't blindly
* change it!
*/
struct tcb {
void *tcb_dtv;
struct pthread *tcb_thread;
};
struct pthread *
#define _tp __builtin_thread_pointer()
#define _tcb ((struct tcb *)_tp)
struct tcb *_tcb_ctor(struct pthread *, int);
void _tcb_dtor(struct tcb *);
/* Called from the thread to set its private data. */
static __inline void
_tcb_set(struct tcb *tcb)
{
__builtin_set_thread_pointer(tcb);
}
static __inline struct tcb *
_tcb_get(void)
{
return (_tcb);
}
extern struct pthread *_thr_initial;
static __inline struct pthread *
_get_curthread(void)
{
return (_tp);
}
void
_retire_thread(void *v)
{
}
void *
_set_curthread(ucontext_t *uc, struct pthread *thread, int *err)
{
*err = 0;
if (uc != NULL)
uc->uc_mcontext.mc_special.tp = (uint64_t)thread;
else
_tp = thread;
if (_thr_initial)
return (_tcb->tcb_thread);
return (NULL);
}
#endif /* _PTHREAD_MD_H_ */

View File

@ -1,5 +1,5 @@
#$FreeBSD$
.PATH: ${.CURDIR}/sys ${.CURDIR}/arch/${MACHINE_ARCH}/${MACHINE_ARCH}
.PATH: ${.CURDIR}/arch/${MACHINE_ARCH}/${MACHINE_ARCH}
SRCS+= _setcurthread.c
SRCS+= pthread_md.c

View File

@ -1,101 +0,0 @@
/*
* Copyright (c) 2004, David Xu <davidxu@freebsd.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice unmodified, this list of conditions, and the following
* disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*/
#include <sys/types.h>
#include <sys/ucontext.h>
#include <pthread.h>
#include <machine/sysarch.h>
#include "thr_private.h"
#include "rtld_tls.h"
struct tcb {
struct tcb *tcb_self; /* required by rtld */
void *tcb_dtv; /* required by rtld */
struct pthread *tcb_thread;
};
void
_retire_thread(void *entry)
{
struct tcb *tcb = (struct tcb *)entry;
_rtld_free_tls(tcb, sizeof(struct tcb), 16);
}
void *
_set_curthread(ucontext_t *uc, struct pthread *thr, int *err)
{
struct tcb *tcb;
void *oldtls;
*err = 0;
if (thr->arch_id != NULL && uc == NULL) {
amd64_set_fsbase(thr->arch_id);
return (thr->arch_id);
}
if (uc == NULL) {
__asm __volatile("movq %%fs:0, %0" : "=r" (oldtls));
} else {
oldtls = NULL;
}
/*
* Allocate and initialise a new TLS block with enough extra
* space for our self pointer.
*/
tcb = _rtld_allocate_tls(oldtls, sizeof(struct tcb), 16);
/*
* Cache the address of the thread structure here, after
* rtld's two words of private space.
*/
tcb->tcb_thread = thr;
if (uc == NULL)
amd64_set_fsbase(tcb);
return (tcb);
}
pthread_t
_get_curthread(void)
{
extern pthread_t _thread_initial;
pthread_t td;
if (_thread_initial == NULL)
return (NULL);
__asm __volatile("movq %%fs:%1, %0" \
: "=r" (td) \
: "m" (*(long *)(__offsetof(struct tcb, tcb_thread))));
return (td);
}

View File

@ -0,0 +1,57 @@
/*
* Copyright (c) 2003 Daniel Eischen <deischen@freebsd.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Neither the name of the author nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*/
#include <sys/types.h>
#include <rtld_tls.h>
#include "pthread_md.h"
/*
* The constructors.
*/
struct tcb *
_tcb_ctor(struct pthread *thread, int initial)
{
struct tcb *tcb;
void *oldtls;
if (initial)
__asm __volatile("movq %%fs:0, %0" : "=r" (oldtls));
else
oldtls = NULL;
tcb = _rtld_allocate_tls(oldtls, sizeof(struct tcb), 16);
if (tcb)
tcb->tcb_thread = thread;
return (tcb);
}
void
_tcb_dtor(struct tcb *tcb)
{
_rtld_free_tls(tcb, sizeof(struct tcb), 16);
}

View File

@ -0,0 +1,103 @@
/*-
* Copyright (C) 2003 David Xu <davidxu@freebsd.org>
* Copyright (c) 2001 Daniel Eischen <deischen@freebsd.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Neither the name of the author nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
/*
* Machine-dependent thread prototypes/definitions.
*/
#ifndef _PTHREAD_MD_H_
#define _PTHREAD_MD_H_
#include <stddef.h>
#include <sys/types.h>
#include <machine/sysarch.h>
#include <ucontext.h>
#define DTV_OFFSET offsetof(struct tcb, tcb_dtv)
/*
* Variant II tcb, first two members are required by rtld,
* %fs points to the structure.
*/
struct tcb {
struct tcb *tcb_self; /* required by rtld */
void *tcb_dtv; /* required by rtld */
struct pthread *tcb_thread;
void *tcb_spare[1];
};
/*
* Evaluates to the byte offset of the per-tcb variable name.
*/
#define __tcb_offset(name) __offsetof(struct tcb, name)
/*
* Evaluates to the type of the per-tcb variable name.
*/
#define __tcb_type(name) __typeof(((struct tcb *)0)->name)
/*
* Evaluates to the value of the per-tcb variable name.
*/
#define TCB_GET64(name) ({ \
__tcb_type(name) __result; \
\
u_long __i; \
__asm __volatile("movq %%fs:%1, %0" \
: "=r" (__i) \
: "m" (*(u_long *)(__tcb_offset(name)))); \
__result = (__tcb_type(name))__i; \
\
__result; \
})
struct tcb *_tcb_ctor(struct pthread *, int);
void _tcb_dtor(struct tcb *tcb);
static __inline void
_tcb_set(struct tcb *tcb)
{
amd64_set_fsbase(tcb);
}
static __inline struct tcb *
_tcb_get(void)
{
return (TCB_GET64(tcb_self));
}
extern struct pthread *_thr_initial;
static __inline struct pthread *
_get_curthread(void)
{
if (_thr_initial)
return (TCB_GET64(tcb_thread));
return (NULL);
}
#endif

View File

@ -0,0 +1,7 @@
# $FreeBSD$
.PATH: ${.CURDIR}/arch/${MACHINE_ARCH}/${MACHINE_ARCH}
CFLAGS+= -DARM_HAS_ATOMIC_CMPSET_32
SRCS+= pthread_md.c

View File

@ -1,5 +1,5 @@
/*-
* Copyright (c) 2002 Jake Burkholder.
* Copyright (C) 2005 David Xu <davidxu@freebsd.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -7,9 +7,9 @@
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 2. Neither the name of the author nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
@ -22,38 +22,33 @@
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <stdlib.h>
#include <sys/types.h>
#include <sys/ucontext.h>
#include <rtld_tls.h>
#include <pthread.h>
#include "thr_private.h"
#include "pthread_md.h"
register struct pthread *_curthread __asm("%g6");
struct umtx arm_umtx = {
.u_owner = UMTX_UNOWNED
};
struct pthread *
_get_curthread(void)
struct tcb *
_tcb_ctor(struct pthread *thread, int initial)
{
struct tcb *tcb;
return (_curthread);
tcb = malloc(sizeof(struct tcb));
if (tcb)
tcb->tcb_thread = thread;
return (tcb);
}
void
_retire_thread(void *v)
_tcb_dtor(struct tcb *tcb)
{
}
void *
_set_curthread(ucontext_t *uc, struct pthread *thread, int *err)
{
*err = 0;
if (uc != NULL)
uc->uc_mcontext.mc_global[6] = (uint64_t)thread;
else
_curthread = thread;
return (NULL);
free(tcb);
}

View File

@ -0,0 +1,106 @@
/*-
* Copyright (c) 2005 David Xu <davidxu@freebsd.org>.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*/
/*
* Machine-dependent thread prototypes/definitions.
*/
#ifndef _PTHREAD_MD_H_
#define _PTHREAD_MD_H_
#include <sys/types.h>
#include <machine/sysarch.h>
#include <stddef.h>
#include <errno.h>
static __inline int atomic_cmpset_32(volatile uint32_t *, uint32_t, uint32_t);
#include <sys/umtx.h>
#define DTV_OFFSET offsetof(struct tcb, tcb_dtv)
/*
* Variant II tcb, first two members are required by rtld.
*/
struct tcb {
struct tcb *tcb_self; /* required by rtld */
void *tcb_dtv; /* required by rtld */
struct pthread *tcb_thread; /* our hook */
void *tcb_spare[1];
};
/*
* The tcb constructors.
*/
struct tcb *_tcb_ctor(struct pthread *, int);
void _tcb_dtor(struct tcb *);
/* Called from the thread to set its private data. */
static __inline void
_tcb_set(struct tcb *tcb)
{
*((struct tcb **)ARM_TP_ADDRESS) = tcb;
}
/*
* Get the current tcb.
*/
static __inline struct tcb *
_tcb_get(void)
{
return (*((struct tcb **)ARM_TP_ADDRESS));
}
extern struct pthread *_thr_initial;
static __inline struct pthread *
_get_curthread(void)
{
if (_thr_initial)
return (_tcb_get()->tcb_thread);
return (NULL);
}
extern struct umtx arm_umtx;
static __inline int
atomic_cmpset_32(volatile uint32_t *dst, uint32_t old, uint32_t newval)
{
int ret;
_umtx_lock(&arm_umtx);
arm_umtx.u_owner = (void*)((uint32_t)arm_umtx.u_owner | UMTX_CONTESTED);
if (*dst == old) {
*dst = newval;
ret = 1;
} else
ret = 0;
_umtx_unlock(&arm_umtx);
return (ret);
}
#endif /* _PTHREAD_MD_H_ */

View File

@ -1,5 +1,5 @@
# $FreeBSD$
.PATH: ${.CURDIR}/sys ${.CURDIR}/arch/${MACHINE_ARCH}/${MACHINE_ARCH}
.PATH: ${.CURDIR}/arch/${MACHINE_ARCH}/${MACHINE_ARCH}
SRCS+= _setcurthread.c _curthread.S
SRCS+= pthread_md.c

View File

@ -1,17 +0,0 @@
/* $FreeBSD$ */
#include <machine/asm.h>
ENTRY(_get_curthread)
cmpl $0, _thread_initial
je nothreads
movl %gs:8, %eax
ret
nothreads:
xor %eax, %eax
ret
ENTRY(_set_gs)
movl 4(%esp), %eax
movl %eax, %gs
ret

View File

@ -1,136 +0,0 @@
/*
* Copyright (c) 2003, Jeffrey Roberson <jeff@freebsd.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice unmodified, this list of conditions, and the following
* disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*/
#include <sys/types.h>
#include <sys/ucontext.h>
#include <pthread.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <machine/sysarch.h>
#include <machine/segments.h>
#include "thr_private.h"
#include "rtld_tls.h"
/* in _curthread.S */
extern void _set_gs(int);
struct tcb {
struct tcb *tcb_self; /* required by rtld */
void *tcb_dtv; /* required by rtld */
struct pthread *tcb_thread;
int tcb_ldt;
};
void
_retire_thread(void *entry)
{
struct tcb *tcb = (struct tcb *)entry;
i386_set_ldt(tcb->tcb_ldt, NULL, 1);
_rtld_free_tls(tcb, sizeof(struct tcb), 16);
}
void *
_set_curthread(ucontext_t *uc, struct pthread *thr, int *err)
{
#ifndef COMPAT_32BIT
union descriptor desc;
#endif
struct tcb *tcb;
void *oldtls;
#ifndef COMPAT_32BIT
int ldt_index;
#endif
*err = 0;
if (uc == NULL && thr->arch_id != NULL) {
#ifdef COMPAT_32BIT
_amd64_set_gsbase(thr->arch_id);
#endif
return (thr->arch_id);
}
if (uc == NULL) {
__asm __volatile("movl %%gs:0, %0" : "=r" (oldtls));
} else {
oldtls = NULL;
}
/*
* Allocate and initialise a new TLS block with enough extra
* space for our self pointer.
*/
tcb = _rtld_allocate_tls(oldtls, sizeof(struct tcb), 16);
/*
* Cache the address of the thread structure here, after
* rtld's two words of private space.
*/
tcb->tcb_thread = thr;
#ifndef COMPAT_32BIT
bzero(&desc, sizeof(desc));
/*
* Set up the descriptor to point at the TLS block.
*/
desc.sd.sd_lolimit = 0xFFFF;
desc.sd.sd_lobase = (unsigned int)tcb & 0xFFFFFF;
desc.sd.sd_type = SDT_MEMRW;
desc.sd.sd_dpl = SEL_UPL;
desc.sd.sd_p = 1;
desc.sd.sd_hilimit = 0xF;
desc.sd.sd_xx = 0;
desc.sd.sd_def32 = 1;
desc.sd.sd_gran = 1;
desc.sd.sd_hibase = (unsigned int)tcb >> 24;
/* Get a slot from the process' LDT list */
ldt_index = i386_set_ldt(LDT_AUTO_ALLOC, &desc, 1);
if (ldt_index == -1)
abort();
tcb->tcb_ldt = ldt_index;
/*
* Set up our gs with the index into the ldt for this entry.
*/
if (uc != NULL)
uc->uc_mcontext.mc_gs = LSEL(ldt_index, SEL_UPL);
else
_set_gs(LSEL(ldt_index, SEL_UPL));
#else
if (uc == NULL)
_amd64_set_gsbase(tcb);
#endif
return (tcb);
}

View File

@ -0,0 +1,84 @@
/*-
* Copyright (C) 2003 David Xu <davidxu@freebsd.org>
* Copyright (c) 2001,2003 Daniel Eischen <deischen@freebsd.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Neither the name of the author nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#include <sys/types.h>
#include <machine/segments.h>
#include <machine/sysarch.h>
#include <string.h>
#include <rtld_tls.h>
#include "pthread_md.h"
struct tcb *
_tcb_ctor(struct pthread *thread, int initial)
{
#ifndef COMPAT_32BIT
union descriptor ldt;
#endif
struct tcb *tcb;
void *oldtls;
if (initial)
__asm __volatile("movl %%gs:0, %0" : "=r" (oldtls));
else
oldtls = NULL;
tcb = _rtld_allocate_tls(oldtls, sizeof(struct tcb), 16);
if (tcb) {
tcb->tcb_thread = thread;
#ifndef COMPAT_32BIT
ldt.sd.sd_hibase = (unsigned int)tcb >> 24;
ldt.sd.sd_lobase = (unsigned int)tcb & 0xFFFFFF;
ldt.sd.sd_hilimit = (sizeof(struct tcb) >> 16) & 0xF;
ldt.sd.sd_lolimit = sizeof(struct tcb) & 0xFFFF;
ldt.sd.sd_type = SDT_MEMRWA;
ldt.sd.sd_dpl = SEL_UPL;
ldt.sd.sd_p = 1;
ldt.sd.sd_xx = 0;
ldt.sd.sd_def32 = 1;
ldt.sd.sd_gran = 0; /* no more than 1M */
tcb->tcb_ldt = i386_set_ldt(LDT_AUTO_ALLOC, &ldt, 1);
if (tcb->tcb_ldt < 0) {
_rtld_free_tls(tcb, sizeof(struct tcb), 16);
tcb = NULL;
}
#endif
}
return (tcb);
}
void
_tcb_dtor(struct tcb *tcb)
{
#ifndef COMPAT_32BIT
if (tcb->tcb_ldt >= 0)
i386_set_ldt(tcb->tcb_ldt, NULL, 1);
#endif
_rtld_free_tls(tcb, sizeof(struct tcb), 16);
}

View File

@ -0,0 +1,116 @@
/*-
* Copyright (c) 2002 Daniel Eischen <deischen@freebsd.org>.
* Copyright (c) 2005 David Xu <davidxu@freebsd.org>.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
/*
* Machine-dependent thread prototypes/definitions.
*/
#ifndef _PTHREAD_MD_H_
#define _PTHREAD_MD_H_
#include <stddef.h>
#include <sys/types.h>
#include <machine/sysarch.h>
#define DTV_OFFSET offsetof(struct tcb, tcb_dtv)
/*
* Variant II tcb, first two members are required by rtld,
* %gs points to the structure.
*/
struct tcb {
struct tcb *tcb_self; /* required by rtld */
void *tcb_dtv; /* required by rtld */
struct pthread *tcb_thread;
int tcb_ldt;
};
/*
* Evaluates to the byte offset of the per-tcb variable name.
*/
#define __tcb_offset(name) __offsetof(struct tcb, name)
/*
* Evaluates to the type of the per-tcb variable name.
*/
#define __tcb_type(name) __typeof(((struct tcb *)0)->name)
/*
* Evaluates to the value of the per-tcb variable name.
*/
#define TCB_GET32(name) ({ \
__tcb_type(name) __result; \
\
u_int __i; \
__asm __volatile("movl %%gs:%1, %0" \
: "=r" (__i) \
: "m" (*(u_int *)(__tcb_offset(name)))); \
__result = (__tcb_type(name))__i; \
\
__result; \
})
/*
* The constructors.
*/
struct tcb *_tcb_ctor(struct pthread *, int);
void _tcb_dtor(struct tcb *tcb);
/* Called from the thread to set its private data. */
static __inline void
_tcb_set(struct tcb *tcb)
{
#ifndef COMPAT_32BIT
int val;
val = (tcb->tcb_ldt << 3) | 7;
__asm __volatile("movl %0, %%gs" : : "r" (val));
#else
_amd64_set_gsbase(tcb);
#endif
}
/* Get the current kcb. */
static __inline struct tcb *
_tcb_get(void)
{
return (TCB_GET32(tcb_self));
}
extern struct pthread *_thr_initial;
/* Get the current thread. */
static __inline struct pthread *
_get_curthread(void)
{
if (_thr_initial)
return (TCB_GET32(tcb_thread));
return (NULL);
}
#endif

View File

@ -1,5 +1,5 @@
# $FreeBSD$
.PATH: ${.CURDIR}/sys ${.CURDIR}/arch/${MACHINE_ARCH}/${MACHINE_ARCH}
.PATH: ${.CURDIR}/arch/${MACHINE_ARCH}/${MACHINE_ARCH}
SRCS+= _curthread.c
SRCS+= pthread_md.c

View File

@ -0,0 +1,58 @@
/*
* Copyright (c) 2003 Daniel Eischen <deischen@freebsd.org>
* Copyright (c) 2005 David Xu <davidxu@freebsd.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Neither the name of the author nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*/
#include <sys/types.h>
#include <rtld_tls.h>
#include "pthread_md.h"
/*
* The constructors.
*/
struct tcb *
_tcb_ctor(struct pthread *thread, int initial)
{
struct tcb *tcb;
void *oldtls;
if (initial)
oldtls = _tp;
else
oldtls = NULL;
tcb = _rtld_allocate_tls(oldtls, sizeof(struct tcb), 16);
if (tcb)
tcb->tcb_thread = thread;
return (tcb);
}
void
_tcb_dtor(struct tcb *tcb)
{
_rtld_free_tls(tcb, sizeof(tcb), 16);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2004 Suleiman Souhlal
* Copyright (c) 2003 Marcel Moolenaar
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -22,37 +22,57 @@
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#ifndef _PTHREAD_MD_H_
#define _PTHREAD_MD_H_
#include <sys/types.h>
#include <sys/ucontext.h>
#include <stddef.h>
#include <pthread.h>
#include "thr_private.h"
#define DTV_OFFSET offsetof(struct tcb, tcb_dtv)
register struct pthread *_curthread __asm("%r2");
/*
* Variant I tcb. The structure layout is fixed, don't blindly
* change it!
*/
struct tcb {
void *tcb_dtv;
struct pthread *tcb_thread;
};
struct pthread *
register struct tcb *_tp __asm("%r13");
#define _tcb _tp
/*
* The tcb constructors.
*/
struct tcb *_tcb_ctor(struct pthread *, int);
void _tcb_dtor(struct tcb *);
/* Called from the thread to set its private data. */
static __inline void
_tcb_set(struct tcb *tcb)
{
_tp = tcb;
}
static __inline struct tcb *
_tcb_get(void)
{
return (_tcb);
}
extern struct pthread *_thr_initial;
static __inline struct pthread *
_get_curthread(void)
{
return (_curthread);
}
void
_retire_thread(void *v)
{
}
void *
_set_curthread(ucontext_t *uc, struct pthread *thread, int *err)
{
*err = 0;
if (uc != NULL)
uc->uc_mcontext.mc_gpr[2] = (uint32_t)thread;
else
_curthread = thread;
if (_thr_initial)
return (_tcb->tcb_thread);
return (NULL);
}
#endif /* _PTHREAD_MD_H_ */

View File

@ -1,5 +1,5 @@
# $FreeBSD$
.PATH: ${.CURDIR}/sys ${.CURDIR}/arch/${MACHINE_ARCH}/${MACHINE_ARCH}
.PATH: ${.CURDIR}/arch/${MACHINE_ARCH}/${MACHINE_ARCH}
SRCS+= _curthread.c
SRCS+= pthread_md.c

View File

@ -0,0 +1,80 @@
/*
* Copyright 2004 by Peter Grehan. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
/*
* Machine-dependent thread prototypes/definitions.
*/
#ifndef _PTHREAD_MD_H_
#define _PTHREAD_MD_H_
#include <stddef.h>
#include <sys/types.h>
#define DTV_OFFSET offsetof(struct tcb, tcb_dtv)
/*
* Variant I tcb. The structure layout is fixed, don't blindly
* change it.
* %r2 points to end of the structure.
*/
struct tcb {
void *tcb_dtv;
struct pthread *tcb_thread;
};
register uint8_t *_tp __asm("%r2");
#define _tcb ((struct tcb *)(_tp - sizeof(struct tcb)))
struct tcb *_tcb_ctor(struct pthread *, int);
void _tcb_dtor(struct tcb *);
static __inline void
_tcb_set(struct tcb *tcb)
{
_tp = (uint8_t *)tcb + sizeof(struct tcb);
}
static __inline struct tcb *
_tcb_get(void)
{
return (_tcb);
}
extern struct pthread *_thr_initial;
static __inline struct pthread *
_get_curthread(void)
{
if (_thr_initial)
return (_tcb->tcb_thread);
return (NULL);
}
#endif /* _PTHREAD_MD_H_ */

View File

@ -1,14 +1,16 @@
/*
* Copyright (c) 2003 The FreeBSD Project. All rights reserved.
* Copyright (c) 2003 Daniel Eischen <deischen@freebsd.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 2. Neither the name of the author nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
@ -21,35 +23,36 @@
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/types.h>
#include <sys/ucontext.h>
#include <rtld_tls.h>
#include <pthread.h>
#include "thr_private.h"
#include "pthread_md.h"
void *
_set_curthread(ucontext_t *uc, struct pthread *thread, int *err)
/*
* The constructors.
*/
struct tcb *
_tcb_ctor(struct pthread *thread, int initial)
{
*err = 0;
if (uc != NULL)
uc->uc_mcontext.mc_thrptr = (uint64_t)thread;
struct tcb *tcb;
void *oldtls;
if (initial)
oldtls = _tp;
else
__builtin_set_thread_pointer(thread);
return (NULL);
}
oldtls = NULL;
tcb = _rtld_allocate_tls(oldtls, sizeof(struct tcb), 16);
if (tcb)
tcb->tcb_thread = thread;
return (tcb);
struct pthread *
_get_curthread(void)
{
return (__builtin_thread_pointer());
}
void
_retire_thread(void *v)
_tcb_dtor(struct tcb *tcb)
{
_rtld_free_tls(tcb, sizeof(tcb), 16);
}

View File

@ -1,5 +1,5 @@
# $FreeBSD$
.PATH: ${.CURDIR}/sys ${.CURDIR}/arch/${MACHINE_ARCH}/${MACHINE_ARCH}
.PATH: ${.CURDIR}/arch/${MACHINE_ARCH}/${MACHINE_ARCH}
SRCS+= _setcurthread.c
SRCS+= pthread_md.c

View File

@ -0,0 +1,87 @@
/*-
* Copyright (c) 2003 Jake Burkholder <jake@freebsd.org>.
* Copyright (c) 2003 Marcel Moolenaar
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*/
/*
* Machine-dependent thread prototypes/definitions.
*/
#ifndef _PTHREAD_MD_H_
#define _PTHREAD_MD_H_
#include <stddef.h>
#define DTV_OFFSET offsetof(struct tcb, tcb_dtv)
/*
* Variant II tcb, first two members are required by rtld.
* %g7 points to the structure.
*/
struct tcb {
struct tcb *tcb_self; /* required by rtld */
void *tcb_dtv; /* required by rtld */
struct pthread *tcb_thread; /* our hook */
void *tcb_spare[1];
};
register struct tcb *_tp __asm("%g7");
#define _tcb (_tp)
/*
* The tcb constructors.
*/
struct tcb *_tcb_ctor(struct pthread *, int);
void _tcb_dtor(struct tcb *);
/* Called from the thread to set its private data. */
static __inline void
_tcb_set(struct tcb *tcb)
{
_tp = tcb;
}
/*
* Get the current tcb.
*/
static __inline struct tcb *
_tcb_get(void)
{
return (_tcb);
}
extern struct pthread *_thr_initial;
static __inline struct pthread *
_get_curthread(void)
{
if (_thr_initial)
return (_tcb->tcb_thread);
return (NULL);
}
#endif /* _PTHREAD_MD_H_ */

View File

@ -0,0 +1,56 @@
/*-
* Copyright (C) 2003 Jake Burkholder <jake@freebsd.org>
* Copyright (C) 2003 David Xu <davidxu@freebsd.org>
* Copyright (c) 2001,2003 Daniel Eischen <deischen@freebsd.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Neither the name of the author nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#include <sys/types.h>
#include <rtld_tls.h>
#include "pthread_md.h"
struct tcb *
_tcb_ctor(struct pthread *thread, int initial)
{
struct tcb *tcb;
void *oldtls;
if (initial)
oldtls = _tp;
else
oldtls = NULL;
tcb = _rtld_allocate_tls(oldtls, sizeof(struct tcb), 16);
if (tcb)
tcb->tcb_thread = thread;
return (tcb);
}
void
_tcb_dtor(struct tcb *tcb)
{
_rtld_free_tls(tcb, sizeof(struct tcb), 16);
}

365
lib/libthr/pthread.map Normal file
View File

@ -0,0 +1,365 @@
# $FreeBSD$
LIBPTHREAD_1_0 {
global:
___creat;
__accept;
__close;
__connect;
__error;
__fcntl;
__fsync;
__msync;
__nanosleep;
__open;
__poll;
__pthread_cond_timedwait;
__pthread_cond_wait;
__pthread_mutex_init;
__pthread_mutex_lock;
__pthread_mutex_trylock;
__pthread_mutex_timedlock;
__read;
__readv;
__recvfrom;
__recvmsg;
__select;
__sendmsg;
__sendto;
__sigsuspend;
__wait4;
__write;
__writev;
_aio_suspend;
_execve;
_fork;
_nanosleep;
_pause;
_pselect;
_pthread_atfork;
_pthread_barrier_destroy;
_pthread_barrier_init;
_pthread_barrier_wait;
_pthread_barrierattr_destroy;
_pthread_barrierattr_getpshared;
_pthread_barrierattr_init;
_pthread_barrierattr_setpshared;
_pthread_attr_default;
_pthread_attr_destroy;
_pthread_attr_get_np;
_pthread_attr_getdetachstate;
_pthread_attr_getguardsize;
_pthread_attr_getinheritsched;
_pthread_attr_getschedparam;
_pthread_attr_getschedpolicy;
_pthread_attr_getscope;
_pthread_attr_getstack;
_pthread_attr_getstackaddr;
_pthread_attr_getstacksize;
_pthread_attr_init;
_pthread_attr_setcreatesuspend_np;
_pthread_attr_setdetachstate;
_pthread_attr_setguardsize;
_pthread_attr_setinheritsched;
_pthread_attr_setschedparam;
_pthread_attr_setschedpolicy;
_pthread_attr_setscope;
_pthread_attr_setstack;
_pthread_attr_setstackaddr;
_pthread_attr_setstacksize;
_pthread_cancel;
_pthread_cleanup_pop;
_pthread_cleanup_push;
_pthread_cond_broadcast;
_pthread_cond_destroy;
_pthread_cond_init;
_pthread_cond_signal;
_pthread_cond_timedwait;
_pthread_cond_wait;
_pthread_condattr_default;
_pthread_condattr_destroy;
_pthread_condattr_getclock;
_pthread_condattr_getpshared;
_pthread_condattr_init;
_pthread_condattr_setclock;
_pthread_condattr_setpshared;
_pthread_create;
_pthread_detach;
_pthread_equal;
_pthread_exit;
_pthread_getconcurrency;
_pthread_getprio;
_pthread_getschedparam;
_pthread_getspecific;
_pthread_join;
_pthread_key_create;
_pthread_key_delete;
_pthread_kill;
_pthread_main_np;
_pthread_multi_np;
_pthread_mutex_destroy;
_pthread_mutex_getprioceiling;
_pthread_mutex_init;
_pthread_mutex_lock;
_pthread_mutex_setprioceiling;
_pthread_mutex_timedlock;
_pthread_mutex_trylock;
_pthread_mutex_unlock;
_pthread_mutexattr_default;
_pthread_mutexattr_destroy;
_pthread_mutexattr_getkind_np;
_pthread_mutexattr_getprioceiling;
_pthread_mutexattr_getprotocol;
_pthread_mutexattr_gettype;
_pthread_mutexattr_init;
_pthread_mutexattr_setkind_np;
_pthread_mutexattr_setprioceiling;
_pthread_mutexattr_setprotocol;
_pthread_mutexattr_settype;
_pthread_once;
_pthread_resume_all_np;
_pthread_resume_np;
_pthread_rwlock_destroy;
_pthread_rwlock_init;
_pthread_rwlock_rdlock;
_pthread_rwlock_timedrdlock;
_pthread_rwlock_timedwrlock;
_pthread_rwlock_tryrdlock;
_pthread_rwlock_trywrlock;
_pthread_rwlock_unlock;
_pthread_rwlock_wrlock;
_pthread_rwlockattr_destroy;
_pthread_rwlockattr_getpshared;
_pthread_rwlockattr_init;
_pthread_rwlockattr_setpshared;
_pthread_self;
_pthread_set_name_np;
_pthread_setcancelstate;
_pthread_setcanceltype;
_pthread_setconcurrency;
_pthread_setprio;
_pthread_setschedparam;
_pthread_setspecific;
_pthread_sigmask;
_pthread_single_np;
_pthread_spin_destroy;
_pthread_spin_init;
_pthread_spin_lock;
_pthread_spin_trylock;
_pthread_spin_unlock;
_pthread_suspend_all_np;
_pthread_suspend_np;
_pthread_switch_add_np;
_pthread_switch_delete_np;
_pthread_testcancel;
_pthread_yield;
_raise;
_sem_close;
_sem_destroy;
_sem_getvalue;
_sem_init;
_sem_open;
_sem_post;
_sem_timedwait;
_sem_trywait;
_sem_unlink;
_sem_wait;
_sigaction;
_sigprocmask;
_sigsuspend;
_sigwait;
_sigtimedwait;
_sigwaitinfo;
_sleep;
_spinlock;
_spinlock_debug;
_spinunlock;
_system;
_tcdrain;
_vfork;
_wait;
_waitpid;
accept;
aio_suspend;
close;
connect;
creat;
execve;
fcntl;
fork;
fsync;
msync;
nanosleep;
open;
pause;
poll;
pselect;
pthread_atfork;
pthread_barrier_destroy;
pthread_barrier_init;
pthread_barrier_wait;
pthread_barrierattr_destroy;
pthread_barrierattr_getpshared;
pthread_barrierattr_init;
pthread_barrierattr_setpshared;
pthread_attr_destroy;
pthread_attr_get_np;
pthread_attr_getdetachstate;
pthread_attr_getguardsize;
pthread_attr_getinheritsched;
pthread_attr_getschedparam;
pthread_attr_getschedpolicy;
pthread_attr_getscope;
pthread_attr_getstack;
pthread_attr_getstackaddr;
pthread_attr_getstacksize;
pthread_attr_init;
pthread_attr_setcreatesuspend_np;
pthread_attr_setdetachstate;
pthread_attr_setguardsize;
pthread_attr_setinheritsched;
pthread_attr_setschedparam;
pthread_attr_setschedpolicy;
pthread_attr_setscope;
pthread_attr_setstack;
pthread_attr_setstackaddr;
pthread_attr_setstacksize;
pthread_cancel;
pthread_cleanup_pop;
pthread_cleanup_push;
pthread_cond_broadcast;
pthread_cond_destroy;
pthread_cond_init;
pthread_cond_signal;
pthread_cond_timedwait;
pthread_cond_wait;
pthread_condattr_destroy;
pthread_condattr_init;
pthread_create;
pthread_detach;
pthread_equal;
pthread_exit;
pthread_getconcurrency;
pthread_getprio;
pthread_getschedparam;
pthread_getspecific;
pthread_join;
pthread_key_create;
pthread_key_delete;
pthread_kill;
pthread_main_np;
pthread_multi_np;
pthread_mutex_destroy;
pthread_mutex_getprioceiling;
pthread_mutex_init;
pthread_mutex_lock;
pthread_mutex_setprioceiling;
pthread_mutex_timedlock;
pthread_mutex_trylock;
pthread_mutex_unlock;
pthread_mutexattr_destroy;
pthread_mutexattr_getkind_np;
pthread_mutexattr_getprioceiling;
pthread_mutexattr_getprotocol;
pthread_mutexattr_gettype;
pthread_mutexattr_init;
pthread_mutexattr_setkind_np;
pthread_mutexattr_setprioceiling;
pthread_mutexattr_setprotocol;
pthread_mutexattr_settype;
pthread_once;
pthread_resume_all_np;
pthread_resume_np;
pthread_rwlock_destroy;
pthread_rwlock_init;
pthread_rwlock_rdlock;
pthread_rwlock_timedrdlock;
pthread_rwlock_timedwrlock;
pthread_rwlock_tryrdlock;
pthread_rwlock_trywrlock;
pthread_rwlock_unlock;
pthread_rwlock_wrlock;
pthread_rwlockattr_destroy;
pthread_rwlockattr_getpshared;
pthread_rwlockattr_init;
pthread_rwlockattr_setpshared;
pthread_self;
pthread_set_name_np;
pthread_setcancelstate;
pthread_setcanceltype;
pthread_setconcurrency;
pthread_setprio;
pthread_setschedparam;
pthread_setspecific;
pthread_sigmask;
pthread_single_np;
pthread_spin_destroy;
pthread_spin_init;
pthread_spin_lock;
pthread_spin_trylock;
pthread_spin_unlock;
pthread_suspend_all_np;
pthread_suspend_np;
pthread_switch_add_np;
pthread_switch_delete_np;
pthread_testcancel;
pthread_yield;
raise;
read;
readv;
recvfrom;
recvmsg;
select;
sem_close;
sem_destroy;
sem_getvalue;
sem_init;
sem_open;
sem_post;
sem_timedwait;
sem_trywait;
sem_unlink;
sem_wait;
sendmsg;
sendto;
sigaction;
sigaltstack;
sigpending;
sigprocmask;
sigsuspend;
sigwait;
sigwaitinfo;
sigtimedwait;
sleep;
system;
tcdrain;
vfork;
wait;
wait4;
waitpid;
write;
writev;
# Debugger needs these.
_libthr_debug;
_thread_active_threads;
_thread_keytable;
_thread_list;
_thread_max_keys;
_thread_off_attr_flags;
_thread_off_dtv;
_thread_off_linkmap;
_thread_off_next;
_thread_off_tcb;
_thread_off_tid;
_thread_off_key_allocated;
_thread_off_key_destructor;
_thread_off_state;
_thread_off_thr_locklevel;
_thread_off_tlsindex;
_thread_off_isdead;
_thread_size_key;
_thread_state_running;
_thread_state_zoombie;
local:
*;
};

View File

@ -1,5 +1,5 @@
# $FreeBSD$
.PATH: ${.CURDIR}/sys ${.CURDIR}/arch/${MACHINE_ARCH}/${MACHINE_ARCH}
.PATH: ${.CURDIR}/sys
SRCS+= thr_error.c
SRCS+= thr_error.c

View File

@ -35,25 +35,20 @@
*/
#include <pthread.h>
#include "libc_private.h"
#include "thr_private.h"
#undef errno
extern int errno;
__weak_reference(___error, __error);
int *
___error()
__error(void)
{
struct pthread *pthread;
struct pthread *curthread = _get_curthread();
if (_thread_initial == NULL)
if (curthread != NULL && curthread != _thr_initial)
return (&curthread->error);
else
return (&errno);
pthread = _get_curthread();
if (pthread == _thread_initial)
return (&errno);
return (&pthread->error);
}

View File

@ -4,36 +4,35 @@
.PATH: ${.CURDIR}/thread
SRCS+= \
thr_atfork.c \
thr_attr.c \
thr_autoinit.c \
thr_barrier.c \
thr_barrierattr.c \
thr_cancel.c \
thr_clean.c \
thr_concurrency.c \
thr_cond.c \
thr_condattr_destroy.c \
thr_condattr_init.c \
thr_condattr.c \
thr_create.c \
thr_detach.c \
thr_equal.c \
thr_exit.c \
thr_find_thread.c \
thr_fork.c \
thr_getprio.c \
thr_getschedparam.c \
thr_init.c \
thr_join.c \
thr_list.c \
thr_kern.c \
thr_kill.c \
thr_main_np.c \
thr_mattr_init.c \
thr_mattr_kind_np.c \
thr_multi_np.c \
thr_mutex.c \
thr_mutexattr.c \
thr_mutex_prioceiling.c \
thr_mutex_protocol.c \
thr_mutexattr_destroy.c \
thr_once.c \
thr_printf.c \
thr_pspinlock.c \
thr_resume_np.c \
thr_rwlock.c \
thr_rwlockattr.c \
@ -43,10 +42,13 @@ SRCS+= \
thr_setprio.c \
thr_setschedparam.c \
thr_sig.c \
thr_single_np.c \
thr_spec.c \
thr_spinlock.c \
thr_stack.c \
thr_subr.c \
thr_suspend_np.c \
thr_syscalls.c \
thr_suspend_np.c \
thr_switch_np.c \
thr_symbols.c \
thr_umtx.c \
thr_yield.c

View File

@ -23,16 +23,13 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <errno.h>
#include <stdlib.h>
#include <pthread.h>
#include <sys/queue.h>
#include "thr_private.h"
__weak_reference(_pthread_atfork, pthread_atfork);
@ -41,20 +38,20 @@ int
_pthread_atfork(void (*prepare)(void), void (*parent)(void),
void (*child)(void))
{
struct pthread *curthread;
struct pthread_atfork *af;
if (_thread_initial == NULL)
_thread_init();
_thr_check_init();
if ((af = malloc(sizeof(struct pthread_atfork))) == NULL)
return (ENOMEM);
curthread = _get_curthread();
af->prepare = prepare;
af->parent = parent;
af->child = child;
_pthread_mutex_lock(&_atfork_mutex);
TAILQ_INSERT_TAIL(&_atfork_list, af, qe);
_pthread_mutex_unlock(&_atfork_mutex);
THR_UMTX_LOCK(curthread, &_thr_atfork_lock);
TAILQ_INSERT_TAIL(&_thr_atfork_list, af, qe);
THR_UMTX_UNLOCK(curthread, &_thr_atfork_lock);
return (0);
}

View File

@ -1,37 +1,3 @@
/*
* Copyright (c) 1995-1997 John Birrell <jb@cimlogic.com.au>.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by John Birrell.
* 4. Neither the name of the author nor the names of any co-contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
/*
* Copyright (c) 2003 Craig Rodrigues <rodrigc@attbi.com>.
* All rights reserved.
@ -69,7 +35,6 @@
* Copyright (c) 1998 Daniel Eischen <eischen@vigrid.com>.
* Copyright (C) 2001 Jason Evans <jasone@freebsd.org>.
* Copyright (c) 2002,2003 Alexey Zelkin <phantom@FreeBSD.org>
* Copyright (c) 2003 Jeff Roberson <jeff@FreeBSD.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -97,333 +62,471 @@
* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/* XXXTHR I rewrote the entire file, can we lose some of the copyrights? */
#include <sys/param.h>
/*
* Copyright (c) 1996 John Birrell <jb@cimlogic.com.au>.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by John Birrell.
* 4. Neither the name of the author nor the names of any co-contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#include <errno.h>
#include <pthread.h>
#include <pthread_np.h>
#include <stdlib.h>
#include <string.h>
#include <pthread_np.h>
#include "thr_private.h"
__weak_reference(_pthread_attr_destroy, pthread_attr_destroy);
__weak_reference(_pthread_attr_init, pthread_attr_init);
__weak_reference(_pthread_attr_setcreatesuspend_np,
pthread_attr_setcreatesuspend_np);
__weak_reference(_pthread_attr_setdetachstate, pthread_attr_setdetachstate);
__weak_reference(_pthread_attr_setguardsize, pthread_attr_setguardsize);
__weak_reference(_pthread_attr_setinheritsched, pthread_attr_setinheritsched);
__weak_reference(_pthread_attr_setschedparam, pthread_attr_setschedparam);
__weak_reference(_pthread_attr_setschedpolicy, pthread_attr_setschedpolicy);
__weak_reference(_pthread_attr_setscope, pthread_attr_setscope);
__weak_reference(_pthread_attr_setstack, pthread_attr_setstack);
__weak_reference(_pthread_attr_setstackaddr, pthread_attr_setstackaddr);
__weak_reference(_pthread_attr_setstacksize, pthread_attr_setstacksize);
__weak_reference(_pthread_attr_get_np, pthread_attr_get_np);
__weak_reference(_pthread_attr_getdetachstate, pthread_attr_getdetachstate);
__weak_reference(_pthread_attr_getguardsize, pthread_attr_getguardsize);
__weak_reference(_pthread_attr_getinheritsched, pthread_attr_getinheritsched);
__weak_reference(_pthread_attr_getschedparam, pthread_attr_getschedparam);
__weak_reference(_pthread_attr_getschedpolicy, pthread_attr_getschedpolicy);
__weak_reference(_pthread_attr_getscope, pthread_attr_getscope);
__weak_reference(_pthread_attr_getstack, pthread_attr_getstack);
__weak_reference(_pthread_attr_getstackaddr, pthread_attr_getstackaddr);
__weak_reference(_pthread_attr_getstacksize, pthread_attr_getstacksize);
int
_pthread_attr_init(pthread_attr_t *attr)
{
pthread_attr_t pattr;
if ((pattr = (pthread_attr_t)
malloc(sizeof(struct pthread_attr))) == NULL)
return (ENOMEM);
memcpy(pattr, &pthread_attr_default, sizeof(struct pthread_attr));
*attr = pattr;
return (0);
}
int
_pthread_attr_destroy(pthread_attr_t *attr)
{
int ret;
/* Check for invalid arguments: */
if (attr == NULL || *attr == NULL)
return (EINVAL);
/* Invalid argument: */
ret = EINVAL;
else {
/* Free the memory allocated to the attribute object: */
free(*attr);
free(*attr);
*attr = NULL;
return (0);
}
int
_pthread_attr_setcreatesuspend_np(pthread_attr_t *attr)
{
if (attr == NULL || *attr == NULL) {
errno = EINVAL;
return (-1);
/*
* Leave the attribute pointer NULL now that the memory
* has been freed:
*/
*attr = NULL;
ret = 0;
}
(*attr)->suspend = PTHREAD_CREATE_SUSPENDED;
return (0);
return(ret);
}
int
_pthread_attr_setdetachstate(pthread_attr_t *attr, int detachstate)
{
if (attr == NULL || *attr == NULL ||
(detachstate != PTHREAD_CREATE_DETACHED &&
detachstate != PTHREAD_CREATE_JOINABLE))
return (EINVAL);
if (detachstate == PTHREAD_CREATE_DETACHED)
(*attr)->flags |= PTHREAD_DETACHED;
else
(*attr)->flags &= ~PTHREAD_DETACHED;
return (0);
}
int
_pthread_attr_setguardsize(pthread_attr_t *attr, size_t guardsize)
{
if (attr == NULL || *attr == NULL)
return (EINVAL);
(*attr)->guardsize_attr = roundup(guardsize, _pthread_page_size);
return (0);
}
int
_pthread_attr_setinheritsched(pthread_attr_t *attr, int sched_inherit)
{
if (attr == NULL || *attr == NULL)
return (EINVAL);
(*attr)->sched_inherit = sched_inherit;
return (0);
}
int
_pthread_attr_setschedparam(pthread_attr_t *attr,
const struct sched_param *param)
{
if (attr == NULL || *attr == NULL)
return (EINVAL);
if (param == NULL)
return (ENOTSUP);
if (param->sched_priority < PTHREAD_MIN_PRIORITY ||
param->sched_priority > PTHREAD_MAX_PRIORITY)
return (ENOTSUP);
(*attr)->prio = param->sched_priority;
return (0);
}
int
_pthread_attr_setschedpolicy(pthread_attr_t *attr, int policy)
{
if (attr == NULL || *attr == NULL)
return (EINVAL);
if (policy < SCHED_FIFO || policy > SCHED_RR)
return (ENOTSUP);
(*attr)->sched_policy = policy;
return (0);
}
int
_pthread_attr_setscope(pthread_attr_t *attr, int contentionscope)
{
if (attr == NULL || *attr == NULL)
return (EINVAL);
if (contentionscope != PTHREAD_SCOPE_PROCESS ||
contentionscope == PTHREAD_SCOPE_SYSTEM)
/* We don't support PTHREAD_SCOPE_SYSTEM. */
return (ENOTSUP);
(*attr)->flags |= contentionscope;
return (0);
}
int
_pthread_attr_setstack(pthread_attr_t *attr, void *stackaddr,
size_t stacksize)
{
if (attr == NULL || *attr == NULL || stackaddr == NULL
|| stacksize < PTHREAD_STACK_MIN)
return (EINVAL);
(*attr)->stackaddr_attr = stackaddr;
(*attr)->stacksize_attr = stacksize;
return (0);
}
int
_pthread_attr_setstackaddr(pthread_attr_t *attr, void *stackaddr)
{
if (attr == NULL || *attr == NULL || stackaddr == NULL)
return (EINVAL);
(*attr)->stackaddr_attr = stackaddr;
return (0);
}
int
_pthread_attr_setstacksize(pthread_attr_t *attr, size_t stacksize)
{
if (attr == NULL || *attr == NULL || stacksize < PTHREAD_STACK_MIN)
return (EINVAL);
(*attr)->stacksize_attr = stacksize;
return (0);
}
__weak_reference(_pthread_attr_get_np, pthread_attr_get_np);
int
_pthread_attr_get_np(pthread_t pid, pthread_attr_t *dst)
{
struct pthread *curthread;
struct pthread_attr attr;
int ret;
if (pid == NULL || dst == NULL || *dst == NULL)
return (EINVAL);
if ((ret = _find_thread(pid)) != 0)
curthread = _get_curthread();
if ((ret = _thr_ref_add(curthread, pid, /*include dead*/0)) != 0)
return (ret);
memcpy(*dst, &pid->attr, sizeof(struct pthread_attr));
/*
* Special case, if stack address was not provided by caller
* of pthread_create(), then return address allocated internally
*/
if ((*dst)->stackaddr_attr == NULL)
(*dst)->stackaddr_attr = pid->stack;
attr = pid->attr;
_thr_ref_delete(curthread, pid);
memcpy(*dst, &attr, sizeof(struct pthread_attr));
return (0);
}
__weak_reference(_pthread_attr_getdetachstate, pthread_attr_getdetachstate);
int
_pthread_attr_getdetachstate(const pthread_attr_t *attr, int *detachstate)
{
int ret;
/* Check for invalid arguments: */
if (attr == NULL || *attr == NULL || detachstate == NULL)
return (EINVAL);
/* Check if the detached flag is set: */
if ((*attr)->flags & PTHREAD_DETACHED)
*detachstate = PTHREAD_CREATE_DETACHED;
else
*detachstate = PTHREAD_CREATE_JOINABLE;
return (0);
ret = EINVAL;
else {
/* Check if the detached flag is set: */
if ((*attr)->flags & PTHREAD_DETACHED)
/* Return detached: */
*detachstate = PTHREAD_CREATE_DETACHED;
else
/* Return joinable: */
*detachstate = PTHREAD_CREATE_JOINABLE;
ret = 0;
}
return(ret);
}
__weak_reference(_pthread_attr_getguardsize, pthread_attr_getguardsize);
int
_pthread_attr_getguardsize(const pthread_attr_t *attr, size_t *guardsize)
{
int ret;
/* Check for invalid arguments: */
if (attr == NULL || *attr == NULL || guardsize == NULL)
return (EINVAL);
*guardsize = (*attr)->guardsize_attr;
return (0);
ret = EINVAL;
else {
/* Return the guard size: */
*guardsize = (*attr)->guardsize_attr;
ret = 0;
}
return(ret);
}
__weak_reference(_pthread_attr_getinheritsched, pthread_attr_getinheritsched);
int
_pthread_attr_getinheritsched(const pthread_attr_t *attr, int *sched_inherit)
{
if (attr == NULL || *attr == NULL)
return (EINVAL);
int ret = 0;
*sched_inherit = (*attr)->sched_inherit;
if ((attr == NULL) || (*attr == NULL))
ret = EINVAL;
else
*sched_inherit = (*attr)->sched_inherit;
return (0);
return(ret);
}
__weak_reference(_pthread_attr_getschedparam, pthread_attr_getschedparam);
int
_pthread_attr_getschedparam(const pthread_attr_t *attr, struct sched_param *param)
{
if (attr == NULL || *attr == NULL || param == NULL)
return (EINVAL);
int ret = 0;
param->sched_priority = (*attr)->prio;
if ((attr == NULL) || (*attr == NULL) || (param == NULL))
ret = EINVAL;
else
param->sched_priority = (*attr)->prio;
return (0);
return(ret);
}
__weak_reference(_pthread_attr_getschedpolicy, pthread_attr_getschedpolicy);
int
_pthread_attr_getschedpolicy(const pthread_attr_t *attr, int *policy)
{
if (attr == NULL || *attr == NULL || policy == NULL)
return (EINVAL);
int ret = 0;
*policy = (*attr)->sched_policy;
if ((attr == NULL) || (*attr == NULL) || (policy == NULL))
ret = EINVAL;
else
*policy = (*attr)->sched_policy;
return (0);
return(ret);
}
__weak_reference(_pthread_attr_getscope, pthread_attr_getscope);
int
_pthread_attr_getscope(const pthread_attr_t *attr, int *contentionscope)
{
if (attr == NULL || *attr == NULL || contentionscope == NULL)
return (EINVAL);
int ret = 0;
*contentionscope = (*attr)->flags & PTHREAD_SCOPE_SYSTEM ?
PTHREAD_SCOPE_SYSTEM : PTHREAD_SCOPE_PROCESS;
if ((attr == NULL) || (*attr == NULL) || (contentionscope == NULL))
/* Return an invalid argument: */
ret = EINVAL;
return (0);
else
*contentionscope = (*attr)->flags & PTHREAD_SCOPE_SYSTEM ?
PTHREAD_SCOPE_SYSTEM : PTHREAD_SCOPE_PROCESS;
return(ret);
}
__weak_reference(_pthread_attr_getstack, pthread_attr_getstack);
int
_pthread_attr_getstack(const pthread_attr_t * __restrict attr,
void ** __restrict stackaddr,
size_t * __restrict stacksize)
{
int ret;
/* Check for invalid arguments: */
if (attr == NULL || *attr == NULL || stackaddr == NULL
|| stacksize == NULL)
return (EINVAL);
*stackaddr = (*attr)->stackaddr_attr;
*stacksize = (*attr)->stacksize_attr;
return (0);
|| stacksize == NULL )
ret = EINVAL;
else {
/* Return the stack address and size */
*stackaddr = (*attr)->stackaddr_attr;
*stacksize = (*attr)->stacksize_attr;
ret = 0;
}
return(ret);
}
__weak_reference(_pthread_attr_getstackaddr, pthread_attr_getstackaddr);
int
_pthread_attr_getstackaddr(const pthread_attr_t *attr, void **stackaddr)
{
int ret;
/* Check for invalid arguments: */
if (attr == NULL || *attr == NULL || stackaddr == NULL)
return (EINVAL);
*stackaddr = (*attr)->stackaddr_attr;
return (0);
ret = EINVAL;
else {
/* Return the stack address: */
*stackaddr = (*attr)->stackaddr_attr;
ret = 0;
}
return(ret);
}
__weak_reference(_pthread_attr_getstacksize, pthread_attr_getstacksize);
int
_pthread_attr_getstacksize(const pthread_attr_t *attr, size_t *stacksize)
{
int ret;
/* Check for invalid arguments: */
if (attr == NULL || *attr == NULL || stacksize == NULL)
return (EINVAL);
*stacksize = (*attr)->stacksize_attr;
return (0);
ret = EINVAL;
else {
/* Return the stack size: */
*stacksize = (*attr)->stacksize_attr;
ret = 0;
}
return(ret);
}
__weak_reference(_pthread_attr_init, pthread_attr_init);
int
_pthread_attr_init(pthread_attr_t *attr)
{
int ret;
pthread_attr_t pattr;
_thr_check_init();
/* Allocate memory for the attribute object: */
if ((pattr = (pthread_attr_t) malloc(sizeof(struct pthread_attr))) == NULL)
/* Insufficient memory: */
ret = ENOMEM;
else {
/* Initialise the attribute object with the defaults: */
memcpy(pattr, &_pthread_attr_default, sizeof(struct pthread_attr));
/* Return a pointer to the attribute object: */
*attr = pattr;
ret = 0;
}
return(ret);
}
__weak_reference(_pthread_attr_setcreatesuspend_np, pthread_attr_setcreatesuspend_np);
int
_pthread_attr_setcreatesuspend_np(pthread_attr_t *attr)
{
int ret;
if (attr == NULL || *attr == NULL) {
ret = EINVAL;
} else {
(*attr)->suspend = THR_CREATE_SUSPENDED;
ret = 0;
}
return(ret);
}
__weak_reference(_pthread_attr_setdetachstate, pthread_attr_setdetachstate);
int
_pthread_attr_setdetachstate(pthread_attr_t *attr, int detachstate)
{
int ret;
/* Check for invalid arguments: */
if (attr == NULL || *attr == NULL ||
(detachstate != PTHREAD_CREATE_DETACHED &&
detachstate != PTHREAD_CREATE_JOINABLE))
ret = EINVAL;
else {
/* Check if detached state: */
if (detachstate == PTHREAD_CREATE_DETACHED)
/* Set the detached flag: */
(*attr)->flags |= PTHREAD_DETACHED;
else
/* Reset the detached flag: */
(*attr)->flags &= ~PTHREAD_DETACHED;
ret = 0;
}
return(ret);
}
__weak_reference(_pthread_attr_setguardsize, pthread_attr_setguardsize);
int
_pthread_attr_setguardsize(pthread_attr_t *attr, size_t guardsize)
{
int ret;
/* Check for invalid arguments. */
if (attr == NULL || *attr == NULL)
ret = EINVAL;
else {
/* Save the stack size. */
(*attr)->guardsize_attr = guardsize;
ret = 0;
}
return(ret);
}
__weak_reference(_pthread_attr_setinheritsched, pthread_attr_setinheritsched);
int
_pthread_attr_setinheritsched(pthread_attr_t *attr, int sched_inherit)
{
int ret = 0;
if ((attr == NULL) || (*attr == NULL))
ret = EINVAL;
else if (sched_inherit != PTHREAD_INHERIT_SCHED &&
sched_inherit != PTHREAD_EXPLICIT_SCHED)
ret = ENOTSUP;
else
(*attr)->sched_inherit = sched_inherit;
return(ret);
}
__weak_reference(_pthread_attr_setschedparam, pthread_attr_setschedparam);
int
_pthread_attr_setschedparam(pthread_attr_t *attr, const struct sched_param *param)
{
int ret = 0;
if ((attr == NULL) || (*attr == NULL))
ret = EINVAL;
else if (param == NULL) {
ret = ENOTSUP;
} else if ((param->sched_priority < THR_MIN_PRIORITY) ||
(param->sched_priority > THR_MAX_PRIORITY)) {
/* Return an unsupported value error. */
ret = ENOTSUP;
} else
(*attr)->prio = param->sched_priority;
return(ret);
}
__weak_reference(_pthread_attr_setschedpolicy, pthread_attr_setschedpolicy);
int
_pthread_attr_setschedpolicy(pthread_attr_t *attr, int policy)
{
int ret = 0;
if ((attr == NULL) || (*attr == NULL))
ret = EINVAL;
else if ((policy < SCHED_FIFO) || (policy > SCHED_RR)) {
ret = ENOTSUP;
} else
(*attr)->sched_policy = policy;
return(ret);
}
__weak_reference(_pthread_attr_setscope, pthread_attr_setscope);
int
_pthread_attr_setscope(pthread_attr_t *attr, int contentionscope)
{
int ret = 0;
if ((attr == NULL) || (*attr == NULL)) {
/* Return an invalid argument: */
ret = EINVAL;
} else if ((contentionscope != PTHREAD_SCOPE_PROCESS) &&
(contentionscope != PTHREAD_SCOPE_SYSTEM)) {
ret = EINVAL;
} else if (contentionscope == PTHREAD_SCOPE_SYSTEM) {
(*attr)->flags |= contentionscope;
} else {
(*attr)->flags &= ~PTHREAD_SCOPE_SYSTEM;
}
return (ret);
}
__weak_reference(_pthread_attr_setstack, pthread_attr_setstack);
int
_pthread_attr_setstack(pthread_attr_t *attr, void *stackaddr,
size_t stacksize)
{
int ret;
/* Check for invalid arguments: */
if (attr == NULL || *attr == NULL || stackaddr == NULL
|| stacksize < PTHREAD_STACK_MIN)
ret = EINVAL;
else {
/* Save the stack address and stack size */
(*attr)->stackaddr_attr = stackaddr;
(*attr)->stacksize_attr = stacksize;
ret = 0;
}
return(ret);
}
__weak_reference(_pthread_attr_setstackaddr, pthread_attr_setstackaddr);
int
_pthread_attr_setstackaddr(pthread_attr_t *attr, void *stackaddr)
{
int ret;
/* Check for invalid arguments: */
if (attr == NULL || *attr == NULL || stackaddr == NULL)
ret = EINVAL;
else {
/* Save the stack address: */
(*attr)->stackaddr_attr = stackaddr;
ret = 0;
}
return(ret);
}
__weak_reference(_pthread_attr_setstacksize, pthread_attr_setstacksize);
int
_pthread_attr_setstacksize(pthread_attr_t *attr, size_t stacksize)
{
int ret;
/* Check for invalid arguments: */
if (attr == NULL || *attr == NULL || stacksize < PTHREAD_STACK_MIN)
ret = EINVAL;
else {
/* Save the stack size: */
(*attr)->stacksize_attr = stacksize;
ret = 0;
}
return(ret);
}

View File

@ -1,5 +1,5 @@
/*-
* Copyright (c) 2004 Michael Telahun Makonnen <mtm@FreeBSD.Org>
* Copyright (c) 2003 David Xu <davidxu@freebsd.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -26,104 +26,82 @@
* $FreeBSD$
*/
#include <pthread.h>
#include <errno.h>
#include <stdlib.h>
#include <string.h>
#include <pthread.h>
#include "thr_private.h"
__weak_reference(_pthread_barrier_destroy, pthread_barrier_destroy);
__weak_reference(_pthread_barrier_init, pthread_barrier_init);
__weak_reference(_pthread_barrier_wait, pthread_barrier_wait);
__weak_reference(_pthread_barrier_init, pthread_barrier_init);
__weak_reference(_pthread_barrier_wait, pthread_barrier_wait);
__weak_reference(_pthread_barrier_destroy, pthread_barrier_destroy);
int
_pthread_barrier_destroy(pthread_barrier_t *barrier)
{
if (*barrier == NULL)
pthread_barrier_t bar;
if (barrier == NULL || *barrier == NULL)
return (EINVAL);
if ((*barrier)->b_subtotal > 0)
bar = *barrier;
if (bar->b_waiters > 0)
return (EBUSY);
PTHREAD_ASSERT((*barrier)->b_subtotal == 0,
"barrier count must be zero when destroyed");
free(*barrier);
*barrier = NULL;
free(bar);
return (0);
}
int
_pthread_barrier_init(pthread_barrier_t *barrier,
const pthread_barrierattr_t attr, unsigned int count)
const pthread_barrierattr_t *attr, int count)
{
if (count < 1)
pthread_barrier_t bar;
if (barrier == NULL || count <= 0)
return (EINVAL);
*barrier =
(struct pthread_barrier *)malloc(sizeof(struct pthread_barrier));
if (*barrier == NULL)
bar = malloc(sizeof(struct pthread_barrier));
if (bar == NULL)
return (ENOMEM);
memset((void *)*barrier, 0, sizeof(struct pthread_barrier));
(*barrier)->b_total = count;
TAILQ_INIT(&(*barrier)->b_barrq);
_thr_umtx_init(&bar->b_lock);
bar->b_cycle = 0;
bar->b_waiters = 0;
bar->b_count = count;
*barrier = bar;
return (0);
}
int
_pthread_barrier_wait(pthread_barrier_t *barrier)
{
struct pthread_barrier *b;
struct pthread *ptd;
int error;
struct pthread *curthread = _get_curthread();
pthread_barrier_t bar;
long cycle;
int ret;
if (*barrier == NULL)
if (barrier == NULL || *barrier == NULL)
return (EINVAL);
/*
* Check if threads waiting on the barrier can be released. If
* so, release them and make this last thread the special thread.
*/
error = 0;
b = *barrier;
UMTX_LOCK(&b->b_lock);
if (b->b_subtotal == (b->b_total - 1)) {
TAILQ_FOREACH(ptd, &b->b_barrq, sqe) {
PTHREAD_LOCK(ptd);
TAILQ_REMOVE(&b->b_barrq, ptd, sqe);
ptd->flags &= ~PTHREAD_FLAGS_IN_BARRQ;
ptd->flags |= PTHREAD_FLAGS_BARR_REL;
PTHREAD_WAKE(ptd);
PTHREAD_UNLOCK(ptd);
}
b->b_subtotal = 0;
UMTX_UNLOCK(&b->b_lock);
return (PTHREAD_BARRIER_SERIAL_THREAD);
bar = *barrier;
THR_UMTX_LOCK(curthread, &bar->b_lock);
if (++bar->b_waiters == bar->b_count) {
/* Current thread is lastest thread */
bar->b_waiters = 0;
bar->b_cycle++;
_thr_umtx_wake(&bar->b_cycle, bar->b_count);
THR_UMTX_UNLOCK(curthread, &bar->b_lock);
ret = PTHREAD_BARRIER_SERIAL_THREAD;
} else {
cycle = bar->b_cycle;
THR_UMTX_UNLOCK(curthread, &bar->b_lock);
do {
_thr_umtx_wait(&bar->b_cycle, cycle, NULL);
/* test cycle to avoid bogus wakeup */
} while (cycle == bar->b_cycle);
ret = 0;
}
/*
* More threads need to reach the barrier. Suspend this thread.
*/
PTHREAD_LOCK(curthread);
TAILQ_INSERT_HEAD(&b->b_barrq, curthread, sqe);
curthread->flags |= PTHREAD_FLAGS_IN_BARRQ;
PTHREAD_UNLOCK(curthread);
b->b_subtotal++;
PTHREAD_ASSERT(b->b_subtotal < b->b_total,
"the number of threads waiting at a barrier is too large");
UMTX_UNLOCK(&b->b_lock);
do {
error = _thread_suspend(curthread, NULL);
if (error == EINTR) {
/*
* Make sure this thread wasn't released from
* the barrier while it was handling the signal.
*/
PTHREAD_LOCK(curthread);
if ((curthread->flags & PTHREAD_FLAGS_BARR_REL) != 0) {
curthread->flags &= ~PTHREAD_FLAGS_BARR_REL;
PTHREAD_UNLOCK(curthread);
error = 0;
break;
}
PTHREAD_UNLOCK(curthread);
}
} while (error == EINTR);
return (error);
return (ret);
}

View File

@ -1,79 +1,94 @@
/*-
* Copyright (c) 2004 Michael Telahun Makonnen <mtm@FreeBSD.Org>
/*
* Copyright (c) 2003 David Xu <davidxu@freebsd.org>.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* notice(s), this list of conditions and the following disclaimer as
* the first lines of this file unmodified other than the possible
* addition of one or more copyright notices.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* notice(s), this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*
* $FreeBSD$
*/
#include <pthread.h>
#include <errno.h>
#include <stdlib.h>
#include <pthread.h>
#include "thr_private.h"
__weak_reference(_pthread_barrierattr_destroy, pthread_barrierattr_destroy);
__weak_reference(_pthread_barrierattr_init, pthread_barrierattr_init);
__weak_reference(_pthread_barrierattr_getpshared,
pthread_barrierattr_getpshared);
__weak_reference(_pthread_barrierattr_setpshared,
pthread_barrierattr_setpshared);
pthread_barrierattr_setpshared);
__weak_reference(_pthread_barrierattr_getpshared,
pthread_barrierattr_getpshared);
int
_pthread_barrierattr_destroy(pthread_barrierattr_t *attr)
{
if (*attr == NULL)
if (attr == NULL || *attr == NULL)
return (EINVAL);
free(*attr);
*attr = NULL;
return (0);
}
int
_pthread_barrierattr_getpshared(const pthread_barrierattr_t *attr,
int *pshared)
{
if (attr == NULL || *attr == NULL)
return (EINVAL);
*pshared = (*attr)->pshared;
return (0);
}
int
_pthread_barrierattr_init(pthread_barrierattr_t *attr)
{
*attr =
(pthread_barrierattr_t)malloc(sizeof(struct pthread_barrierattr));
if ((*attr) == NULL)
return (ENOMEM);
(*attr)->ba_pshared = PTHREAD_PROCESS_PRIVATE;
return (0);
}
int
_pthread_barrierattr_getpshared(const pthread_barrierattr_t *attr, int *pshared)
{
if (*attr == NULL)
if (attr == NULL)
return (EINVAL);
*pshared = (*attr)->ba_pshared;
if ((*attr = malloc(sizeof(struct pthread_barrierattr))) == NULL)
return (ENOMEM);
(*attr)->pshared = PTHREAD_PROCESS_PRIVATE;
return (0);
}
int
_pthread_barrierattr_setpshared(pthread_barrierattr_t *attr, int pshared)
{
if (*attr == NULL || (pshared != PTHREAD_PROCESS_PRIVATE &&
pshared != PTHREAD_PROCESS_SHARED))
if (attr == NULL || *attr == NULL)
return (EINVAL);
(*attr)->ba_pshared = pshared;
/* Only PTHREAD_PROCESS_PRIVATE is supported. */
if (pshared != PTHREAD_PROCESS_PRIVATE)
return (EINVAL);
(*attr)->pshared = pshared;
return (0);
}

View File

@ -1,143 +1,163 @@
/*
* David Leonard <d@openbsd.org>, 1999. Public domain.
* Copyright (c) 2005, David Xu <davidxu@freebsd.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice unmodified, this list of conditions, and the following
* disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*
*/
#include <sys/errno.h>
#include <pthread.h>
#include <stdlib.h>
#include "thr_private.h"
/*
* Static prototypes
*/
static void testcancel(void);
#include <pthread.h>
#include "thr_private.h"
__weak_reference(_pthread_cancel, pthread_cancel);
__weak_reference(_pthread_setcancelstate, pthread_setcancelstate);
__weak_reference(_pthread_setcanceltype, pthread_setcanceltype);
__weak_reference(_pthread_testcancel, pthread_testcancel);
/*
* Posix requires this function to be async-cancel-safe, so it
* may not aquire any type of resource or call any functions
* that might do so.
*/
int _pthread_setcanceltype(int type, int *oldtype);
int
_pthread_cancel(pthread_t pthread)
{
/* Don't continue if cancellation has already been set. */
if (atomic_cmpset_int(&pthread->cancellation, (int)CS_NULL,
(int)CS_PENDING) != 1)
return (0);
struct pthread *curthread = _get_curthread();
int oldval, newval = 0;
int oldtype;
int ret;
/*
* Only wakeup threads that are in cancellation points or
* have set async cancel.
* XXX - access to pthread->flags is not safe. We should just
* unconditionally wake the thread and make sure that
* the the library correctly handles spurious wakeups.
* POSIX says _pthread_cancel should be async cancellation safe,
* so we temporarily disable async cancellation.
*/
if ((pthread->cancellationpoint || pthread->cancelmode == M_ASYNC) &&
(pthread->flags & PTHREAD_FLAGS_NOT_RUNNING) != 0)
PTHREAD_WAKE(pthread);
_pthread_setcanceltype(PTHREAD_CANCEL_DEFERRED, &oldtype);
if ((ret = _thr_ref_add(curthread, pthread, 0)) != 0) {
_pthread_setcanceltype(oldtype, NULL);
return (ret);
}
do {
oldval = pthread->cancelflags;
if (oldval & THR_CANCEL_NEEDED)
break;
newval = oldval | THR_CANCEL_NEEDED;
} while (!atomic_cmpset_acq_int(&pthread->cancelflags, oldval, newval));
if (!(oldval & THR_CANCEL_NEEDED) && SHOULD_ASYNC_CANCEL(newval))
_thr_send_sig(pthread, SIGCANCEL);
_thr_ref_delete(curthread, pthread);
_pthread_setcanceltype(oldtype, NULL);
return (0);
}
/*
* Posix requires this function to be async-cancel-safe, so it
* may not aquire any type of resource or call any functions
* that might do so.
*/
static inline void
testcancel(struct pthread *curthread)
{
int newval;
newval = curthread->cancelflags;
if (SHOULD_CANCEL(newval))
pthread_exit(PTHREAD_CANCELED);
}
int
_pthread_setcancelstate(int state, int *oldstate)
{
int ostate;
struct pthread *curthread = _get_curthread();
int oldval, ret;
ostate = (curthread->cancelmode == M_OFF) ? PTHREAD_CANCEL_DISABLE :
PTHREAD_CANCEL_ENABLE;
oldval = curthread->cancelflags;
if (oldstate != NULL)
*oldstate = ((oldval & THR_CANCEL_DISABLE) ?
PTHREAD_CANCEL_DISABLE : PTHREAD_CANCEL_ENABLE);
switch (state) {
case PTHREAD_CANCEL_ENABLE:
curthread->cancelmode = curthread->cancelstate;
break;
case PTHREAD_CANCEL_DISABLE:
if (curthread->cancelmode != M_OFF) {
curthread->cancelstate = curthread->cancelmode;
curthread->cancelmode = M_OFF;
}
atomic_set_int(&curthread->cancelflags, THR_CANCEL_DISABLE);
ret = 0;
break;
case PTHREAD_CANCEL_ENABLE:
atomic_clear_int(&curthread->cancelflags, THR_CANCEL_DISABLE);
testcancel(curthread);
ret = 0;
break;
default:
return (EINVAL);
ret = EINVAL;
}
if (oldstate != NULL)
*oldstate = ostate;
return (0);
return (ret);
}
/*
* Posix requires this function to be async-cancel-safe, so it
* may not aquire any type of resource or call any functions that
* might do so.
*/
int
_pthread_setcanceltype(int type, int *oldtype)
{
enum cancel_mode omode;
struct pthread *curthread = _get_curthread();
int oldval, ret;
omode = curthread->cancelstate;
oldval = curthread->cancelflags;
if (oldtype != NULL)
*oldtype = ((oldval & THR_CANCEL_AT_POINT) ?
PTHREAD_CANCEL_ASYNCHRONOUS :
PTHREAD_CANCEL_DEFERRED);
switch (type) {
case PTHREAD_CANCEL_ASYNCHRONOUS:
if (curthread->cancelmode != M_OFF)
curthread->cancelmode = M_ASYNC;
curthread->cancelstate = M_ASYNC;
atomic_set_int(&curthread->cancelflags, THR_CANCEL_AT_POINT);
testcancel(curthread);
ret = 0;
break;
case PTHREAD_CANCEL_DEFERRED:
if (curthread->cancelmode != M_OFF)
curthread->cancelmode = M_DEFERRED;
curthread->cancelstate = M_DEFERRED;
atomic_clear_int(&curthread->cancelflags, THR_CANCEL_AT_POINT);
ret = 0;
break;
default:
return (EINVAL);
ret = EINVAL;
}
if (oldtype != NULL) {
if (omode == M_DEFERRED)
*oldtype = PTHREAD_CANCEL_DEFERRED;
else if (omode == M_ASYNC)
*oldtype = PTHREAD_CANCEL_ASYNCHRONOUS;
}
return (0);
return (ret);
}
void
_pthread_testcancel(void)
{
testcancel();
testcancel(_get_curthread());
}
static void
testcancel()
int
_thr_cancel_enter(struct pthread *curthread)
{
if (curthread->cancelmode != M_OFF) {
int oldval;
/* Cleanup a canceled thread only once. */
if (atomic_cmpset_int(&curthread->cancellation,
(int)CS_PENDING, (int)CS_SET) == 1) {
_thread_exit_cleanup();
pthread_exit(PTHREAD_CANCELED);
PANIC("cancel");
}
oldval = curthread->cancelflags;
if (!(oldval & THR_CANCEL_AT_POINT)) {
atomic_set_int(&curthread->cancelflags, THR_CANCEL_AT_POINT);
testcancel(curthread);
}
return (oldval);
}
void
_thread_enter_cancellation_point(void)
_thr_cancel_leave(struct pthread *curthread, int previous)
{
testcancel();
curthread->cancellationpoint = 1;
}
void
_thread_leave_cancellation_point(void)
{
curthread->cancellationpoint = 0;
testcancel();
if (!(previous & THR_CANCEL_AT_POINT))
atomic_clear_int(&curthread->cancelflags, THR_CANCEL_AT_POINT);
}

View File

@ -31,10 +31,12 @@
*
* $FreeBSD$
*/
#include <signal.h>
#include <errno.h>
#include <stdlib.h>
#include <pthread.h>
#include "thr_private.h"
__weak_reference(_pthread_cleanup_push, pthread_cleanup_push);
@ -43,11 +45,14 @@ __weak_reference(_pthread_cleanup_pop, pthread_cleanup_pop);
void
_pthread_cleanup_push(void (*routine) (void *), void *routine_arg)
{
struct pthread *curthread = _get_curthread();
struct pthread_cleanup *new;
if ((new = (struct pthread_cleanup *) malloc(sizeof(struct pthread_cleanup))) != NULL) {
if ((new = (struct pthread_cleanup *)
malloc(sizeof(struct pthread_cleanup))) != NULL) {
new->routine = routine;
new->routine_arg = routine_arg;
new->onstack = 0;
new->next = curthread->cleanup;
curthread->cleanup = new;
@ -57,6 +62,7 @@ _pthread_cleanup_push(void (*routine) (void *), void *routine_arg)
void
_pthread_cleanup_pop(int execute)
{
struct pthread *curthread = _get_curthread();
struct pthread_cleanup *old;
if ((old = curthread->cleanup) != NULL) {
@ -64,7 +70,7 @@ _pthread_cleanup_pop(int execute)
if (execute) {
old->routine(old->routine_arg);
}
free(old);
if (old->onstack == 0)
free(old);
}
}

View File

@ -33,6 +33,9 @@
*/
#include <errno.h>
#include <pthread.h>
#include "thr_private.h"
static int current_concurrency = 0;

View File

@ -1,476 +1,344 @@
/*
* Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>.
* Copyright (c) 2005 David Xu <davidxu@freebsd.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* notice unmodified, this list of conditions, and the following
* disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by John Birrell.
* 4. Neither the name of the author nor the names of any co-contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*/
#include <stdlib.h>
#include <errno.h>
#include <string.h>
#include <pthread.h>
#include "thr_private.h"
#include <limits.h>
/*
* Proctect two different threads calling a pthread_cond_* function
* from accidentally initializing the condition variable twice.
*/
static spinlock_t static_cond_lock = _SPINLOCK_INITIALIZER;
#include "thr_private.h"
/*
* Prototypes
*/
static inline int cond_init(pthread_cond_t *);
static pthread_t cond_queue_deq(pthread_cond_t);
static void cond_queue_remove(pthread_cond_t, pthread_t);
static void cond_queue_enq(pthread_cond_t, pthread_t);
static int cond_signal(pthread_cond_t *, int);
static int cond_wait_common(pthread_cond_t *,
pthread_mutex_t *, const struct timespec *);
static int cond_init(pthread_cond_t *cond, const pthread_condattr_t *attr);
static int cond_wait_common(pthread_cond_t *cond, pthread_mutex_t *mutex,
const struct timespec *abstime, int cancel);
static int cond_signal_common(pthread_cond_t *cond, int broadcast);
/*
* Double underscore versions are cancellation points. Single underscore
* versions are not and are provided for libc internal usage (which
* shouldn't introduce cancellation points).
*/
__weak_reference(__pthread_cond_wait, pthread_cond_wait);
__weak_reference(__pthread_cond_timedwait, pthread_cond_timedwait);
__weak_reference(_pthread_cond_init, pthread_cond_init);
__weak_reference(_pthread_cond_destroy, pthread_cond_destroy);
__weak_reference(_pthread_cond_wait, pthread_cond_wait);
__weak_reference(_pthread_cond_timedwait, pthread_cond_timedwait);
__weak_reference(_pthread_cond_signal, pthread_cond_signal);
__weak_reference(_pthread_cond_broadcast, pthread_cond_broadcast);
#define COND_LOCK(c) \
do { \
if (umtx_lock(&(c)->c_lock, curthread->thr_id)) \
abort(); \
} while (0)
#define COND_UNLOCK(c) \
do { \
if (umtx_unlock(&(c)->c_lock, curthread->thr_id)) \
abort(); \
} while (0)
/* Reinitialize a condition variable to defaults. */
int
_cond_reinit(pthread_cond_t *cond)
static int
cond_init(pthread_cond_t *cond, const pthread_condattr_t *cond_attr)
{
if (cond == NULL)
return (EINVAL);
pthread_cond_t pcond;
int rval = 0;
if ((pcond = (pthread_cond_t)
malloc(sizeof(struct pthread_cond))) == NULL) {
rval = ENOMEM;
} else {
/*
* Initialise the condition variable structure:
*/
_thr_umtx_init(&pcond->c_lock);
pcond->c_seqno = 0;
pcond->c_waiters = 0;
pcond->c_wakeups = 0;
if (cond_attr == NULL || *cond_attr == NULL) {
pcond->c_pshared = 0;
pcond->c_clockid = CLOCK_REALTIME;
} else {
pcond->c_pshared = (*cond_attr)->c_pshared;
pcond->c_clockid = (*cond_attr)->c_clockid;
}
*cond = pcond;
}
/* Return the completion status: */
return (rval);
}
static int
init_static(struct pthread *thread, pthread_cond_t *cond)
{
int ret;
THR_LOCK_ACQUIRE(thread, &_cond_static_lock);
if (*cond == NULL)
return (pthread_cond_init(cond, NULL));
ret = cond_init(cond, NULL);
else
ret = 0;
/*
* Initialize the condition variable structure:
*/
TAILQ_INIT(&(*cond)->c_queue);
(*cond)->c_flags = COND_FLAGS_INITED;
(*cond)->c_type = COND_TYPE_FAST;
(*cond)->c_mutex = NULL;
(*cond)->c_seqno = 0;
bzero(&(*cond)->c_lock, sizeof((*cond)->c_lock));
THR_LOCK_RELEASE(thread, &_cond_static_lock);
return (0);
return (ret);
}
int
_pthread_cond_init(pthread_cond_t *cond, const pthread_condattr_t *cond_attr)
{
enum pthread_cond_type type;
pthread_cond_t pcond;
if (cond == NULL)
return (EINVAL);
/*
* Check if a pointer to a condition variable attribute
* structure was passed by the caller:
*/
if (cond_attr != NULL && *cond_attr != NULL)
type = (*cond_attr)->c_type;
else
/* Default to a fast condition variable: */
type = COND_TYPE_FAST;
/* Process according to condition variable type: */
switch (type) {
case COND_TYPE_FAST:
break;
default:
return (EINVAL);
break;
}
if ((pcond = (pthread_cond_t)
malloc(sizeof(struct pthread_cond))) == NULL)
return (ENOMEM);
/*
* Initialise the condition variable
* structure:
*/
TAILQ_INIT(&pcond->c_queue);
pcond->c_flags |= COND_FLAGS_INITED;
pcond->c_type = type;
pcond->c_mutex = NULL;
pcond->c_seqno = 0;
bzero(&pcond->c_lock, sizeof(pcond->c_lock));
*cond = pcond;
return (0);
*cond = NULL;
return (cond_init(cond, cond_attr));
}
int
_pthread_cond_destroy(pthread_cond_t *cond)
{
/*
* Short circuit for a statically initialized condvar
* that is being destroyed without having been used.
*/
if (*cond == PTHREAD_COND_INITIALIZER)
return (0);
struct pthread_cond *cv;
struct pthread *curthread = _get_curthread();
int rval = 0;
COND_LOCK(*cond);
if (*cond == NULL)
rval = EINVAL;
else {
/* Lock the condition variable structure: */
THR_LOCK_ACQUIRE(curthread, &(*cond)->c_lock);
if ((*cond)->c_waiters + (*cond)->c_wakeups != 0) {
THR_LOCK_RELEASE(curthread, &(*cond)->c_lock);
return (EBUSY);
}
/*
* NULL the caller's pointer now that the condition
* variable has been destroyed:
*/
cv = *cond;
*cond = NULL;
/* Unlock the condition variable structure: */
THR_LOCK_RELEASE(curthread, &cv->c_lock);
/* Free the cond lock structure: */
/*
* Free the memory allocated for the condition
* variable structure:
*/
free(cv);
}
/* Return the completion status: */
return (rval);
}
struct cond_cancel_info
{
pthread_mutex_t *mutex;
pthread_cond_t *cond;
long seqno;
};
static void
cond_cancel_handler(void *arg)
{
struct pthread *curthread = _get_curthread();
struct cond_cancel_info *cci = (struct cond_cancel_info *)arg;
pthread_cond_t cv;
cv = *(cci->cond);
THR_LOCK_ACQUIRE(curthread, &cv->c_lock);
if (cv->c_seqno != cci->seqno && cv->c_wakeups != 0) {
if (cv->c_waiters > 0) {
cv->c_seqno++;
_thr_umtx_wake(&cv->c_seqno, 1);
} else
cv->c_wakeups--;
} else {
cv->c_waiters--;
}
THR_LOCK_RELEASE(curthread, &cv->c_lock);
_mutex_cv_lock(cci->mutex);
}
static int
cond_wait_common(pthread_cond_t *cond, pthread_mutex_t *mutex,
const struct timespec *abstime, int cancel)
{
struct pthread *curthread = _get_curthread();
struct timespec ts, ts2, *tsp;
struct cond_cancel_info cci;
pthread_cond_t cv;
long seq, oldseq;
int oldcancel;
int ret = 0;
/*
* Free the memory allocated for the condition
* variable structure:
* If the condition variable is statically initialized,
* perform the dynamic initialization:
*/
free(*cond);
if (__predict_false(*cond == NULL &&
(ret = init_static(curthread, cond)) != 0))
return (ret);
/*
* NULL the caller's pointer now that the condition
* variable has been destroyed:
*/
*cond = NULL;
cv = *cond;
THR_LOCK_ACQUIRE(curthread, &cv->c_lock);
ret = _mutex_cv_unlock(mutex);
if (ret) {
THR_LOCK_RELEASE(curthread, &cv->c_lock);
return (ret);
}
oldseq = seq = cv->c_seqno;
cci.mutex = mutex;
cci.cond = cond;
cci.seqno = oldseq;
return (0);
cv->c_waiters++;
do {
THR_LOCK_RELEASE(curthread, &cv->c_lock);
if (abstime != NULL) {
clock_gettime(cv->c_clockid, &ts);
TIMESPEC_SUB(&ts2, abstime, &ts);
tsp = &ts2;
} else
tsp = NULL;
if (cancel) {
THR_CLEANUP_PUSH(curthread, cond_cancel_handler, &cci);
oldcancel = _thr_cancel_enter(curthread);
ret = _thr_umtx_wait(&cv->c_seqno, seq, tsp);
_thr_cancel_leave(curthread, oldcancel);
THR_CLEANUP_POP(curthread, 0);
} else {
ret = _thr_umtx_wait(&cv->c_seqno, seq, tsp);
}
THR_LOCK_ACQUIRE(curthread, &cv->c_lock);
seq = cv->c_seqno;
if (abstime != NULL && ret == ETIMEDOUT)
break;
/*
* loop if we have never been told to wake up
* or we lost a race.
*/
} while (seq == oldseq || cv->c_wakeups == 0);
if (seq != oldseq && cv->c_wakeups != 0) {
cv->c_wakeups--;
ret = 0;
} else {
cv->c_waiters--;
}
THR_LOCK_RELEASE(curthread, &cv->c_lock);
_mutex_cv_lock(mutex);
return (ret);
}
int
_pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
{
int rval;
rval = cond_wait_common(cond, mutex, NULL);
return (cond_wait_common(cond, mutex, NULL, 0));
}
/* This should never happen. */
if (rval == ETIMEDOUT)
abort();
int
__pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
{
return (rval);
return (cond_wait_common(cond, mutex, NULL, 1));
}
int
_pthread_cond_timedwait(pthread_cond_t * cond, pthread_mutex_t * mutex,
const struct timespec * abstime)
{
if (abstime == NULL || abstime->tv_nsec >= 1000000000)
if (abstime == NULL || abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
abstime->tv_nsec >= 1000000000)
return (EINVAL);
return (cond_wait_common(cond, mutex, abstime));
return (cond_wait_common(cond, mutex, abstime, 0));
}
int
__pthread_cond_timedwait(pthread_cond_t *cond, pthread_mutex_t *mutex,
const struct timespec *abstime)
{
if (abstime == NULL || abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
abstime->tv_nsec >= 1000000000)
return (EINVAL);
return (cond_wait_common(cond, mutex, abstime, 1));
}
static int
cond_wait_common(pthread_cond_t * cond, pthread_mutex_t * mutex,
const struct timespec * abstime)
cond_signal_common(pthread_cond_t *cond, int broadcast)
{
int rval = 0;
int mtxrval;
struct pthread *curthread = _get_curthread();
pthread_cond_t cv;
int ret = 0, oldwaiters;
if (cond == NULL)
return (EINVAL);
/*
* If the condition variable is statically initialized, perform dynamic
* initialization.
*/
if (*cond == PTHREAD_COND_INITIALIZER && (rval = cond_init(cond)) != 0)
return (rval);
if (__predict_false(*cond == NULL &&
(ret = init_static(curthread, cond)) != 0))
return (ret);
if ((*cond)->c_type != COND_TYPE_FAST)
return (EINVAL);
COND_LOCK(*cond);
/*
* If the condvar was statically allocated, properly
* initialize the tail queue.
*/
if (((*cond)->c_flags & COND_FLAGS_INITED) == 0) {
TAILQ_INIT(&(*cond)->c_queue);
(*cond)->c_flags |= COND_FLAGS_INITED;
cv = *cond;
/* Lock the condition variable structure. */
THR_LOCK_ACQUIRE(curthread, &cv->c_lock);
if (cv->c_waiters) {
if (!broadcast) {
cv->c_wakeups++;
cv->c_waiters--;
cv->c_seqno++;
_thr_umtx_wake(&cv->c_seqno, 1);
} else {
oldwaiters = cv->c_waiters;
cv->c_wakeups += cv->c_waiters;
cv->c_waiters = 0;
cv->c_seqno++;
_thr_umtx_wake(&cv->c_seqno, oldwaiters);
}
}
if ((mutex == NULL) || (((*cond)->c_mutex != NULL) &&
((*cond)->c_mutex != *mutex))) {
COND_UNLOCK(*cond);
return (EINVAL);
}
/* Remember the mutex */
(*cond)->c_mutex = *mutex;
_thread_enter_cancellation_point();
if ((rval = _mutex_cv_unlock(mutex)) != 0) {
if (rval == -1){
printf("mutex unlock by condvar failed!");
fflush(stdout);
abort();
}
_thread_leave_cancellation_point();
COND_UNLOCK(*cond);
return (rval);
}
/*
* We need to protect the queue operations. It also
* protects the pthread flag field. This is
* dropped before calling _thread_suspend() and reaquired
* when we return.
*/
PTHREAD_LOCK(curthread);
/*
* Queue the running thread on the condition
* variable and wait to be signaled.
*/
cond_queue_enq(*cond, curthread);
do {
PTHREAD_UNLOCK(curthread);
COND_UNLOCK(*cond);
if (curthread->cancellation == CS_PENDING) {
/*
* Posix says we must lock the mutex
* even if we're being canceled.
*/
_mutex_cv_lock(mutex);
_thread_leave_cancellation_point();
PANIC("Shouldn't have come back.");
}
rval = _thread_suspend(curthread, (struct timespec *)abstime);
if (rval != 0 && rval != ETIMEDOUT && rval != EINTR) {
printf("thread suspend returned an invalid value");
fflush(stdout);
abort();
}
COND_LOCK(*cond);
PTHREAD_LOCK(curthread);
if (rval == ETIMEDOUT) {
/*
* Condition may have been signaled between the
* time the thread timed out and locked the condvar.
* If it wasn't, manually remove it from the queue.
*/
if ((curthread->flags & PTHREAD_FLAGS_IN_CONDQ) == 0)
rval = 0;
else
cond_queue_remove(*cond, curthread);
}
} while ((curthread->flags & PTHREAD_FLAGS_IN_CONDQ) != 0);
PTHREAD_UNLOCK(curthread);
COND_UNLOCK(*cond);
mtxrval = _mutex_cv_lock(mutex);
/* If the mutex failed return that error. */
if (mtxrval == -1) {
printf("mutex lock from condvar failed!");
fflush(stdout);
abort();
}
if (mtxrval != 0)
rval = mtxrval;
_thread_leave_cancellation_point();
return (rval);
THR_LOCK_RELEASE(curthread, &cv->c_lock);
return (ret);
}
int
_pthread_cond_signal(pthread_cond_t * cond)
{
return (cond_signal(cond, 0));
return (cond_signal_common(cond, 0));
}
int
_pthread_cond_broadcast(pthread_cond_t * cond)
{
return (cond_signal(cond, 1));
return (cond_signal_common(cond, 1));
}
static int
cond_signal(pthread_cond_t * cond, int broadcast)
{
int rval = 0;
pthread_t pthread;
if (cond == NULL)
return (EINVAL);
/*
* If the condition variable is statically initialized, perform dynamic
* initialization.
*/
if (*cond == PTHREAD_COND_INITIALIZER && (rval = cond_init(cond)) != 0)
return (rval);
if ((*cond)->c_type != COND_TYPE_FAST)
return (EINVAL);
COND_LOCK(*cond);
/*
* Enter a loop to bring all (or only one) threads off the
* condition queue:
*/
do {
/*
* Wake up the signaled thread. It will be returned
* to us locked.
*/
if ((pthread = cond_queue_deq(*cond)) != NULL) {
PTHREAD_WAKE(pthread);
PTHREAD_UNLOCK(pthread);
}
} while (broadcast && pthread != NULL);
COND_UNLOCK(*cond);
return (rval);
}
void
_cond_wait_backout(pthread_t pthread)
{
pthread_cond_t cond;
cond = pthread->data.cond;
if (cond == NULL)
return;
/* Process according to condition variable type: */
switch (cond->c_type) {
/* Fast condition variable: */
case COND_TYPE_FAST:
cond_queue_remove(cond, pthread);
break;
default:
break;
}
}
/*
* Dequeue a waiting thread from the head of a condition queue in
* descending priority order.
*/
static pthread_t
cond_queue_deq(pthread_cond_t cond)
{
pthread_t pthread;
while ((pthread = TAILQ_FIRST(&cond->c_queue)) != NULL) {
PTHREAD_LOCK(pthread);
cond_queue_remove(cond, pthread);
/*
* Only exit the loop when we find a thread
* that hasn't been canceled.
*/
if (pthread->cancellation == CS_NULL)
break;
else
PTHREAD_UNLOCK(pthread);
}
return(pthread);
}
/*
* Remove a waiting thread from a condition queue in descending priority
* order.
*/
static void
cond_queue_remove(pthread_cond_t cond, pthread_t pthread)
{
/*
* Because pthread_cond_timedwait() can timeout as well
* as be signaled by another thread, it is necessary to
* guard against removing the thread from the queue if
* it isn't in the queue.
*/
if (pthread->flags & PTHREAD_FLAGS_IN_CONDQ) {
TAILQ_REMOVE(&cond->c_queue, pthread, sqe);
pthread->flags &= ~PTHREAD_FLAGS_IN_CONDQ;
}
/* Check for no more waiters. */
if (TAILQ_FIRST(&cond->c_queue) == NULL)
cond->c_mutex = NULL;
}
/*
* Enqueue a waiting thread to a condition queue in descending priority
* order.
*/
static void
cond_queue_enq(pthread_cond_t cond, pthread_t pthread)
{
pthread_t tid = TAILQ_LAST(&cond->c_queue, cond_head);
char *name;
name = pthread->name ? pthread->name : "unknown";
if ((pthread->flags & PTHREAD_FLAGS_IN_CONDQ) != 0)
_thread_printf(2, "Thread (%s:%ld) already on condq\n",
pthread->name, pthread->thr_id);
if ((pthread->flags & PTHREAD_FLAGS_IN_MUTEXQ) != 0)
_thread_printf(2, "Thread (%s:%ld) already on mutexq\n",
pthread->name, pthread->thr_id);
PTHREAD_ASSERT_NOT_IN_SYNCQ(pthread);
/*
* For the common case of all threads having equal priority,
* we perform a quick check against the priority of the thread
* at the tail of the queue.
*/
if ((tid == NULL) || (pthread->active_priority <= tid->active_priority))
TAILQ_INSERT_TAIL(&cond->c_queue, pthread, sqe);
else {
tid = TAILQ_FIRST(&cond->c_queue);
while (pthread->active_priority <= tid->active_priority)
tid = TAILQ_NEXT(tid, sqe);
TAILQ_INSERT_BEFORE(tid, pthread, sqe);
}
pthread->flags |= PTHREAD_FLAGS_IN_CONDQ;
pthread->data.cond = cond;
}
static inline int
cond_init(pthread_cond_t *cond)
{
int error = 0;
_SPINLOCK(&static_cond_lock);
if (*cond == PTHREAD_COND_INITIALIZER)
error = _pthread_cond_init(cond, NULL);
_SPINUNLOCK(&static_cond_lock);
return (error);
}

View File

@ -0,0 +1,128 @@
/*
* Copyright (c) 1997 John Birrell <jb@cimlogic.com.au>.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by John Birrell.
* 4. Neither the name of the author nor the names of any co-contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#include <stdlib.h>
#include <string.h>
#include <errno.h>
#include <pthread.h>
#include "thr_private.h"
__weak_reference(_pthread_condattr_init, pthread_condattr_init);
__weak_reference(_pthread_condattr_destroy, pthread_condattr_destroy);
__weak_reference(_pthread_condattr_getclock, pthread_condattr_getclock);
__weak_reference(_pthread_condattr_setclock, pthread_condattr_setclock);
__weak_reference(_pthread_condattr_getpshared, pthread_condattr_getpshared);
__weak_reference(_pthread_condattr_setpshared, pthread_condattr_setpshared);
int
_pthread_condattr_init(pthread_condattr_t *attr)
{
pthread_condattr_t pattr;
int ret;
if ((pattr = (pthread_condattr_t)
malloc(sizeof(struct pthread_cond_attr))) == NULL) {
ret = ENOMEM;
} else {
memcpy(pattr, &_pthread_condattr_default,
sizeof(struct pthread_cond_attr));
*attr = pattr;
ret = 0;
}
return (ret);
}
int
_pthread_condattr_destroy(pthread_condattr_t *attr)
{
int ret;
if (attr == NULL || *attr == NULL) {
ret = EINVAL;
} else {
free(*attr);
*attr = NULL;
ret = 0;
}
return(ret);
}
int
_pthread_condattr_getclock(const pthread_condattr_t *attr,
clockid_t *clock_id)
{
if (attr == NULL || *attr == NULL)
return (EINVAL);
*clock_id = (*attr)->c_clockid;
return (0);
}
int
_pthread_condattr_setclock(pthread_condattr_t *attr,
clockid_t clock_id)
{
if (attr == NULL || *attr == NULL)
return (EINVAL);
if (clock_id != CLOCK_REALTIME &&
clock_id != CLOCK_VIRTUAL &&
clock_id != CLOCK_PROF &&
clock_id != CLOCK_MONOTONIC) {
return (EINVAL);
}
(*attr)->c_clockid = clock_id;
return (0);
}
int
_pthread_condattr_getpshared(const pthread_condattr_t *attr,
int *pshared)
{
if (attr == NULL || *attr == NULL)
return (EINVAL);
pshared = PTHREAD_PROCESS_PRIVATE;
return (0);
}
int
_pthread_condattr_setpshared(pthread_condattr_t *attr, int pshared)
{
if (attr == NULL || *attr == NULL)
return (EINVAL);
if (pshared != PTHREAD_PROCESS_PRIVATE)
return (EINVAL);
return (0);
}

View File

@ -1,4 +1,5 @@
/*
* Copyright (c) 2003 Daniel M. Eischen <deischen@gdeb.com>
* Copyright (c) 1995-1998 John Birrell <jb@cimlogic.com.au>
* All rights reserved.
*
@ -31,27 +32,20 @@
*
* $FreeBSD$
*/
#include <errno.h>
#include <stdlib.h>
#include <string.h>
#include <fcntl.h>
#include <unistd.h>
#include <stddef.h>
#include <sys/time.h>
#include <machine/reg.h>
#include <pthread.h>
#include <sys/signalvar.h>
#include "thr_private.h"
#include "libc_private.h"
#define OFF(f) offsetof(struct pthread, f)
int _thread_thr_id_offset = OFF(thr_id);
int _thread_next_offset = OFF(tle.tqe_next);
int _thread_name_offset = OFF(name);
int _thread_ctx_offset = OFF(ctx);
#undef OFF
int _thread_PS_RUNNING_value = PS_RUNNING;
int _thread_PS_DEAD_value = PS_DEAD;
static void free_thread(struct pthread *curthread, struct pthread *thread);
static int create_stack(struct pthread_attr *pattr);
static void free_stack(struct pthread *curthread, struct pthread_attr *pattr);
static void thread_start(struct pthread *curthread);
__weak_reference(_pthread_create, pthread_create);
@ -59,73 +53,80 @@ int
_pthread_create(pthread_t * thread, const pthread_attr_t * attr,
void *(*start_routine) (void *), void *arg)
{
int ret = 0;
pthread_t new_thread;
pthread_attr_t pattr;
int flags;
void *stack;
ucontext_t uc;
sigset_t sigmask, oldsigmask;
struct pthread *curthread, *new_thread;
int ret = 0;
_thr_check_init();
/*
* Locking functions in libc are required when there are
* threads other than the initial thread.
* Tell libc and others now they need lock to protect their data.
*/
__isthreaded = 1;
/* Allocate memory for the thread structure: */
if ((new_thread = (pthread_t) malloc(sizeof(struct pthread))) == NULL)
if (_thr_isthreaded() == 0 && _thr_setthreaded(1))
return (EAGAIN);
/* Check if default thread attributes are required: */
if (attr == NULL || *attr == NULL)
pattr = &pthread_attr_default;
else
pattr = *attr;
/* Check if a stack was specified in the thread attributes: */
if ((stack = pattr->stackaddr_attr) == NULL) {
stack = _thread_stack_alloc(pattr->stacksize_attr,
pattr->guardsize_attr);
if (stack == NULL) {
free(new_thread);
return (EAGAIN);
}
curthread = _get_curthread();
if ((new_thread = _thr_alloc(curthread)) == NULL)
return (EAGAIN);
if (attr == NULL || *attr == NULL)
/* Use the default thread attributes: */
new_thread->attr = _pthread_attr_default;
else
new_thread->attr = *(*attr);
if (new_thread->attr.sched_inherit == PTHREAD_INHERIT_SCHED) {
/* inherit scheduling contention scope */
if (curthread->attr.flags & PTHREAD_SCOPE_SYSTEM)
new_thread->attr.flags |= PTHREAD_SCOPE_SYSTEM;
else
new_thread->attr.flags &= ~PTHREAD_SCOPE_SYSTEM;
/*
* scheduling policy and scheduling parameters will be
* inherited in following code.
*/
}
/* Initialise the thread structure: */
init_td_common(new_thread, pattr, 0);
new_thread->stack = stack;
if (_thr_scope_system > 0)
new_thread->attr.flags |= PTHREAD_SCOPE_SYSTEM;
else if (_thr_scope_system < 0)
new_thread->attr.flags &= ~PTHREAD_SCOPE_SYSTEM;
if (create_stack(&new_thread->attr) != 0) {
/* Insufficient memory to create a stack: */
new_thread->terminated = 1;
_thr_free(curthread, new_thread);
return (EAGAIN);
}
/*
* Write a magic value to the thread structure
* to help identify valid ones:
*/
new_thread->magic = THR_MAGIC;
new_thread->start_routine = start_routine;
new_thread->arg = arg;
/* Initialise the machine context: */
getcontext(&new_thread->ctx);
new_thread->savedsig = new_thread->ctx.uc_sigmask;
new_thread->ctx.uc_stack.ss_sp = new_thread->stack;
new_thread->ctx.uc_stack.ss_size = pattr->stacksize_attr;
makecontext(&new_thread->ctx, (void (*)(void))_thread_start, 1, new_thread);
new_thread->arch_id = _set_curthread(&new_thread->ctx, new_thread, &ret);
if (ret != 0) {
if (pattr->stackaddr_attr == NULL) {
STACK_LOCK;
_thread_stack_free(new_thread->stack,
pattr->stacksize_attr, pattr->guardsize_attr);
STACK_UNLOCK;
}
free(new_thread);
return (ret);
}
new_thread->cancelflags = PTHREAD_CANCEL_ENABLE |
PTHREAD_CANCEL_DEFERRED;
getcontext(&uc);
SIGFILLSET(uc.uc_sigmask);
uc.uc_stack.ss_sp = new_thread->attr.stackaddr_attr;
uc.uc_stack.ss_size = new_thread->attr.stacksize_attr;
makecontext(&uc, (void (*)(void))thread_start, 1, new_thread);
/*
* Check if this thread is to inherit the scheduling
* attributes from its parent:
*/
if (new_thread->attr.flags & PTHREAD_INHERIT_SCHED) {
/* Copy the scheduling attributes: */
new_thread->base_priority = curthread->base_priority &
~PTHREAD_SIGNAL_PRIORITY;
new_thread->attr.prio = curthread->base_priority &
~PTHREAD_SIGNAL_PRIORITY;
if (new_thread->attr.sched_inherit == PTHREAD_INHERIT_SCHED) {
/*
* Copy the scheduling attributes. Lock the scheduling
* lock to get consistent scheduling parameters.
*/
THR_LOCK(curthread);
new_thread->base_priority = curthread->base_priority;
new_thread->attr.prio = curthread->base_priority;
new_thread->attr.sched_policy = curthread->attr.sched_policy;
THR_UNLOCK(curthread);
} else {
/*
* Use just the thread priority, leaving the
@ -136,53 +137,87 @@ _pthread_create(pthread_t * thread, const pthread_attr_t * attr,
}
new_thread->active_priority = new_thread->base_priority;
THREAD_LIST_LOCK;
/* Add the thread to the linked list of all threads: */
TAILQ_INSERT_HEAD(&_thread_list, new_thread, tle);
/* Initialize the mutex queue: */
TAILQ_INIT(&new_thread->mutexq);
TAILQ_INIT(&new_thread->pri_mutexq);
/* Initialise hooks in the thread structure: */
if (new_thread->attr.suspend == THR_CREATE_SUSPENDED)
new_thread->flags = THR_FLAGS_SUSPENDED;
new_thread->state = PS_RUNNING;
/*
* Create the thread.
* Thread created by thr_create() inherits currrent thread
* sigmask, however, before new thread setup itself correctly,
* it can not handle signal, so we should masks all signals here.
*/
if (pattr->suspend == PTHREAD_CREATE_SUSPENDED)
new_thread->flags |= PTHREAD_FLAGS_SUSPENDED;
/* new thread inherits signal mask in kernel */
_thread_sigblock();
ret = thr_create(&new_thread->ctx, &new_thread->thr_id, flags);
/* restore my signal mask */
_thread_sigunblock();
if (ret != 0) {
_thread_printf(STDERR_FILENO, "thr_create() == %d\n", ret);
PANIC("thr_create");
}
THREAD_LIST_UNLOCK;
/* Return a pointer to the thread structure: */
SIGFILLSET(sigmask);
__sys_sigprocmask(SIG_SETMASK, &sigmask, &oldsigmask);
new_thread->sigmask = oldsigmask;
/* Add the new thread. */
_thr_link(curthread, new_thread);
/* Return thread pointer eariler so that new thread can use it. */
(*thread) = new_thread;
return (0);
/* Schedule the new thread. */
ret = thr_create(&uc, &new_thread->tid, 0);
__sys_sigprocmask(SIG_SETMASK, &oldsigmask, NULL);
if (ret != 0) {
_thr_unlink(curthread, new_thread);
free_thread(curthread, new_thread);
(*thread) = 0;
ret = EAGAIN;
}
return (ret);
}
void
_thread_start(pthread_t td)
static void
free_thread(struct pthread *curthread, struct pthread *thread)
{
free_stack(curthread, &thread->attr);
curthread->terminated = 1;
_thr_free(curthread, thread);
}
static int
create_stack(struct pthread_attr *pattr)
{
int ret;
/*
* for AMD64, we need to set fsbase by thread itself, before
* fsbase is set, we can not run any other code, for example
* signal code.
*/
_set_curthread(NULL, td, &ret);
/* Check if a stack was specified in the thread attributes: */
if ((pattr->stackaddr_attr) != NULL) {
pattr->guardsize_attr = 0;
pattr->flags |= THR_STACK_USER;
ret = 0;
}
else
ret = _thr_stack_alloc(pattr);
return (ret);
}
/* restore signal mask inherited before */
__sys_sigprocmask(SIG_SETMASK, &td->savedsig, NULL);
static void
free_stack(struct pthread *curthread, struct pthread_attr *pattr)
{
if ((pattr->flags & THR_STACK_USER) == 0) {
THREAD_LIST_LOCK(curthread);
/* Stack routines don't use malloc/free. */
_thr_stack_free(pattr);
THREAD_LIST_UNLOCK(curthread);
}
}
if ((curthread->flags & PTHREAD_FLAGS_SUSPENDED) != 0)
_thread_suspend(curthread, NULL);
static void
thread_start(struct pthread *curthread)
{
_tcb_set(curthread->tcb);
/* Thread was created with all signals blocked, unblock them. */
__sys_sigprocmask(SIG_SETMASK, &curthread->sigmask, NULL);
if (curthread->flags & THR_FLAGS_NEED_SUSPEND)
_thr_suspend_check(curthread);
/* Run the current thread's start routine with argument: */
pthread_exit(curthread->start_routine(curthread->arg));
/* This point should never be reached. */
PANIC("Thread has resumed after exit");
}

View File

@ -31,9 +31,11 @@
*
* $FreeBSD$
*/
#include <sys/types.h>
#include <errno.h>
#include <pthread.h>
#include <stdlib.h>
#include "thr_private.h"
__weak_reference(_pthread_detach, pthread_detach);
@ -41,42 +43,31 @@ __weak_reference(_pthread_detach, pthread_detach);
int
_pthread_detach(pthread_t pthread)
{
int error;
struct pthread *curthread = _get_curthread();
int rval;
if (pthread->magic != PTHREAD_MAGIC)
if (pthread == NULL)
return (EINVAL);
PTHREAD_LOCK(pthread);
if ((pthread->attr.flags & PTHREAD_DETACHED) != 0) {
_thread_sigblock();
DEAD_LIST_LOCK;
error = pthread->isdead ? ESRCH : EINVAL;
DEAD_LIST_UNLOCK;
_thread_sigunblock();
PTHREAD_UNLOCK(pthread);
return (error);
THREAD_LIST_LOCK(curthread);
if ((rval = _thr_find_thread(curthread, pthread,
/*include dead*/1)) != 0) {
THREAD_LIST_UNLOCK(curthread);
return (rval);
}
pthread->attr.flags |= PTHREAD_DETACHED;
/* Check if there is a joiner: */
if (pthread->joiner != NULL) {
struct pthread *joiner = pthread->joiner;
/* Set the return value for the woken thread: */
joiner->join_status.error = ESRCH;
joiner->join_status.ret = NULL;
joiner->join_status.thread = NULL;
/*
* Disconnect the joiner from the thread being detached:
*/
pthread->joiner = NULL;
PTHREAD_WAKE(joiner);
/* Check if the thread is already detached or has a joiner. */
if ((pthread->tlflags & TLFLAGS_DETACHED) != 0 ||
(pthread->joiner != NULL)) {
THREAD_LIST_UNLOCK(curthread);
return (EINVAL);
}
PTHREAD_UNLOCK(pthread);
/* Flag the thread as detached. */
pthread->tlflags |= TLFLAGS_DETACHED;
if (pthread->state == PS_DEAD)
THR_GCLIST_ADD(pthread);
THREAD_LIST_UNLOCK(curthread);
return (0);
}

View File

@ -31,39 +31,28 @@
*
* $FreeBSD$
*/
#include <errno.h>
#include <unistd.h>
#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <pthread.h>
#include "thr_private.h"
void _pthread_exit(void *status);
__weak_reference(_pthread_exit, pthread_exit);
static void deadlist_free_threads();
void
_thread_exit(char *fname, int lineno, char *string)
_thread_exit(char *fname, int lineno, char *msg)
{
char s[256];
/* Prepare an error message string: */
snprintf(s, sizeof(s),
/* Write an error message to the standard error file descriptor: */
_thread_printf(2,
"Fatal error '%s' at line %d in file %s (errno = %d)\n",
string, lineno, fname, errno);
msg, lineno, fname, errno);
/* Write the string to the standard error file descriptor: */
__sys_write(2, s, strlen(s));
/* Force this process to exit: */
/* XXX - Do we want abort to be conditional on _PTHREADS_INVARIANTS? */
#if defined(_PTHREADS_INVARIANTS)
abort();
#else
__sys_exit(1);
#endif
}
/*
@ -72,8 +61,10 @@ _thread_exit(char *fname, int lineno, char *string)
* abnormal thread termination can be found.
*/
void
_thread_exit_cleanup(void)
_thr_exit_cleanup(void)
{
struct pthread *curthread = _get_curthread();
/*
* POSIX states that cancellation/termination of a thread should
* not release any visible resources (such as mutexes) and that
@ -93,27 +84,24 @@ _thread_exit_cleanup(void)
void
_pthread_exit(void *status)
{
struct pthread *pthread;
int exitNow = 0;
/*
* This thread will no longer handle any signals.
*/
_thread_sigblock();
struct pthread *curthread = _get_curthread();
/* Check if this thread is already in the process of exiting: */
if (curthread->exiting) {
if ((curthread->cancelflags & THR_CANCEL_EXITING) != 0) {
char msg[128];
snprintf(msg, sizeof(msg), "Thread %p has called pthread_exit() from a destructor. POSIX 1003.1 1996 s16.2.5.2 does not allow this!",curthread);
snprintf(msg, sizeof(msg), "Thread %p has called "
"pthread_exit() from a destructor. POSIX 1003.1 "
"1996 s16.2.5.2 does not allow this!", curthread);
PANIC(msg);
}
/* Flag this thread as exiting: */
curthread->exiting = 1;
/* Flag this thread as exiting. */
atomic_set_int(&curthread->cancelflags, THR_CANCEL_EXITING);
_thr_exit_cleanup();
/* Save the return value: */
curthread->ret = status;
while (curthread->cleanup != NULL) {
pthread_cleanup_pop(1);
}
@ -126,93 +114,23 @@ _pthread_exit(void *status)
_thread_cleanupspecific();
}
/*
* Remove read-write lock list. It is allocated as-needed.
* Therefore, it must be checked for validity before freeing.
*/
if (curthread->rwlockList != NULL)
free(curthread->rwlockList);
/* Lock the dead list first to maintain correct lock order */
DEAD_LIST_LOCK;
THREAD_LIST_LOCK;
/* Check if there is a thread joining this one: */
if (curthread->joiner != NULL) {
pthread = curthread->joiner;
curthread->joiner = NULL;
/* Set the return value for the joining thread: */
pthread->join_status.ret = curthread->ret;
pthread->join_status.error = 0;
pthread->join_status.thread = NULL;
/* Make the joining thread runnable: */
PTHREAD_WAKE(pthread);
curthread->attr.flags |= PTHREAD_DETACHED;
}
/*
* Free any memory allocated for dead threads.
* Add this thread to the list of dead threads, and
* also remove it from the active threads list.
*/
deadlist_free_threads();
TAILQ_INSERT_HEAD(&_dead_list, curthread, dle);
TAILQ_REMOVE(&_thread_list, curthread, tle);
/* If we're the last thread, call it quits */
if (TAILQ_EMPTY(&_thread_list))
exitNow = 1;
THREAD_LIST_UNLOCK;
DEAD_LIST_UNLOCK;
if (exitNow)
if (!_thr_isthreaded())
exit(0);
/*
* This function will not return unless we are the last
* thread, which we can't be because we've already checked
* for that.
*/
thr_exit((long *)&curthread->isdead);
/* This point should not be reached. */
PANIC("Dead thread has resumed");
}
/*
* Note: this function must be called with the dead thread list
* locked.
*/
static void
deadlist_free_threads()
{
struct pthread *ptd, *ptdTemp;
TAILQ_FOREACH_SAFE(ptd, &_dead_list, dle, ptdTemp) {
/* Don't destroy the initial thread or non-detached threads. */
if (ptd == _thread_initial ||
(ptd->attr.flags & PTHREAD_DETACHED) == 0 ||
!ptd->isdead)
continue;
TAILQ_REMOVE(&_dead_list, ptd, dle);
deadlist_free_onethread(ptd);
THREAD_LIST_LOCK(curthread);
_thread_active_threads--;
if (_thread_active_threads == 0) {
THREAD_LIST_UNLOCK(curthread);
exit(0);
/* Never reach! */
}
}
void
deadlist_free_onethread(struct pthread *ptd)
{
if (ptd->attr.stackaddr_attr == NULL && ptd->stack != NULL) {
STACK_LOCK;
_thread_stack_free(ptd->stack, ptd->attr.stacksize_attr,
ptd->attr.guardsize_attr);
STACK_UNLOCK;
}
_retire_thread(ptd->arch_id);
free(ptd);
if (curthread->tlflags & TLFLAGS_DETACHED)
THR_GCLIST_ADD(curthread);
curthread->state = PS_DEAD;
THREAD_LIST_UNLOCK(curthread);
if (curthread->joiner)
_thr_umtx_wake(&curthread->state, INT_MAX);
thr_exit(&curthread->terminated);
PANIC("thr_exit() returned");
/* Never reach! */
}

View File

@ -0,0 +1,222 @@
/*
* Copyright (c) 2005 David Xu <davidxu@freebsd.org>
* Copyright (c) 2003 Daniel Eischen <deischen@freebsd.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Neither the name of the author nor the names of any co-contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
/*
* Copyright (c) 1995-1998 John Birrell <jb@cimlogic.com.au>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by John Birrell.
* 4. Neither the name of the author nor the names of any co-contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
*/
#include <errno.h>
#include <string.h>
#include <stdlib.h>
#include <unistd.h>
#include <pthread.h>
#include <spinlock.h>
#include "libc_private.h"
#include "thr_private.h"
__weak_reference(_pthread_atfork, pthread_atfork);
int
_pthread_atfork(void (*prepare)(void), void (*parent)(void),
void (*child)(void))
{
struct pthread *curthread;
struct pthread_atfork *af;
_thr_check_init();
if ((af = malloc(sizeof(struct pthread_atfork))) == NULL)
return (ENOMEM);
curthread = _get_curthread();
af->prepare = prepare;
af->parent = parent;
af->child = child;
THR_UMTX_LOCK(curthread, &_thr_atfork_lock);
TAILQ_INSERT_TAIL(&_thr_atfork_list, af, qe);
THR_UMTX_UNLOCK(curthread, &_thr_atfork_lock);
return (0);
}
/*
* For a while, allow libpthread to work with a libc that doesn't
* export the malloc lock.
*/
#pragma weak __malloc_lock
__weak_reference(_fork, fork);
pid_t
_fork(void)
{
static umtx_t inprogress;
static int waiters;
umtx_t tmp;
struct pthread *curthread;
struct pthread_atfork *af;
pid_t ret;
int errsave;
int unlock_malloc;
if (!_thr_is_inited())
return (__sys_fork());
curthread = _get_curthread();
/*
* Block all signals until we reach a safe point.
*/
_thr_signal_block(curthread);
THR_UMTX_LOCK(curthread, &_thr_atfork_lock);
tmp = inprogress;
while (tmp) {
waiters++;
THR_UMTX_UNLOCK(curthread, &_thr_atfork_lock);
_thr_umtx_wait(&inprogress, tmp, NULL);
THR_UMTX_LOCK(curthread, &_thr_atfork_lock);
waiters--;
tmp = inprogress;
}
inprogress = 1;
/* Unlock mutex, allow new hook to be added during executing hooks. */
THR_UMTX_UNLOCK(curthread, &_thr_atfork_lock);
/* Run down atfork prepare handlers. */
TAILQ_FOREACH_REVERSE(af, &_thr_atfork_list, atfork_head, qe) {
if (af->prepare != NULL)
af->prepare();
}
/*
* Try our best to protect memory from being corrupted in
* child process because another thread in malloc code will
* simply be kill by fork().
*/
if ((_thr_isthreaded() != 0) && (__malloc_lock != NULL)) {
unlock_malloc = 1;
_spinlock(__malloc_lock);
} else {
unlock_malloc = 0;
}
/* Fork a new process: */
if ((ret = __sys_fork()) == 0) {
/* Child process */
errsave = errno;
inprogress = 0;
curthread->cancelflags &= ~THR_CANCEL_NEEDED;
/*
* Thread list will be reinitialized, and later we call
* _libpthread_init(), it will add us back to list.
*/
curthread->tlflags &= ~(TLFLAGS_IN_TDLIST | TLFLAGS_DETACHED);
/* child is a new kernel thread. */
thr_self(&curthread->tid);
/* clear other threads locked us. */
_thr_umtx_init(&curthread->lock);
_thr_umtx_init(&_thr_atfork_lock);
_thr_setthreaded(0);
/* reinitialize libc spinlocks, this includes __malloc_lock. */
_thr_spinlock_init();
_mutex_fork(curthread);
/* reinitalize library. */
_libpthread_init(curthread);
/* Ready to continue, unblock signals. */
_thr_signal_unblock(curthread);
/* Run down atfork child handlers. */
TAILQ_FOREACH(af, &_thr_atfork_list, qe) {
if (af->child != NULL)
af->child();
}
} else {
/* Parent process */
errsave = errno;
if (unlock_malloc)
_spinunlock(__malloc_lock);
/* Ready to continue, unblock signals. */
_thr_signal_unblock(curthread);
/* Run down atfork parent handlers. */
TAILQ_FOREACH(af, &_thr_atfork_list, qe) {
if (af->parent != NULL)
af->parent();
}
THR_UMTX_LOCK(curthread, &_thr_atfork_lock);
inprogress = 0;
if (waiters)
_thr_umtx_wake(&inprogress, waiters);
THR_UMTX_UNLOCK(curthread, &_thr_atfork_lock);
}
errno = errsave;
/* Return the process ID: */
return (ret);
}

View File

@ -0,0 +1,77 @@
/*
* Copyright (c) 1998 Daniel Eischen <eischen@vigrid.com>.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by Daniel Eischen.
* 4. Neither the name of the author nor the names of any co-contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY DANIEL EISCHEN AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#include <errno.h>
#include <pthread.h>
#include "thr_private.h"
__weak_reference(_pthread_getschedparam, pthread_getschedparam);
int
_pthread_getschedparam(pthread_t pthread, int *policy,
struct sched_param *param)
{
struct pthread *curthread = _get_curthread();
int ret, tmp;
if ((param == NULL) || (policy == NULL))
/* Return an invalid argument error: */
ret = EINVAL;
else if (pthread == curthread) {
/*
* Avoid searching the thread list when it is the current
* thread.
*/
THR_THREAD_LOCK(curthread, curthread);
param->sched_priority =
THR_BASE_PRIORITY(pthread->base_priority);
tmp = pthread->attr.sched_policy;
THR_THREAD_UNLOCK(curthread, curthread);
*policy = tmp;
ret = 0;
}
/* Find the thread in the list of active threads. */
else if ((ret = _thr_ref_add(curthread, pthread, /*include dead*/0))
== 0) {
THR_THREAD_LOCK(curthread, pthread);
param->sched_priority =
THR_BASE_PRIORITY(pthread->base_priority);
tmp = pthread->attr.sched_policy;
THR_THREAD_UNLOCK(curthread, pthread);
_thr_ref_delete(curthread, pthread);
*policy = tmp;
}
return (ret);
}

View File

@ -31,6 +31,7 @@
*
* $FreeBSD$
*/
#include <stdio.h>
#include <stdlib.h>
#include <fcntl.h>
@ -38,6 +39,7 @@
#include <unistd.h>
#include <pthread.h>
#include <errno.h>
#include "thr_private.h"
#ifndef NELEMENTS
@ -57,10 +59,8 @@ struct s_thread_info {
static const struct s_thread_info thread_info[] = {
{PS_RUNNING , "Running"},
{PS_MUTEX_WAIT , "Waiting on a mutex"},
{PS_COND_WAIT , "Waiting on a condition variable"},
{PS_SLEEP_WAIT , "Sleeping"},
{PS_WAIT_WAIT , "Waiting process"},
{PS_JOIN , "Waiting to join"},
{PS_SUSPENDED , "Suspended"},
{PS_DEAD , "Dead"},
{PS_DEADLOCK , "Deadlocked"},
{PS_STATE_MAX , "Not a real state!"}
@ -69,14 +69,12 @@ static const struct s_thread_info thread_info[] = {
void
_thread_dump_info(void)
{
char s[512];
int fd;
int i;
pthread_t pthread;
char tmpfile[128];
char s[512], tmpfile[128];
pthread_t pthread;
int fd, i;
for (i = 0; i < 100000; i++) {
snprintf(tmpfile, sizeof(tmpfile), "/tmp/uthread.dump.%u.%i",
snprintf(tmpfile, sizeof(tmpfile), "/tmp/pthread.dump.%u.%i",
getpid(), i);
/* Open the dump file for append and create it if necessary: */
if ((fd = __sys_open(tmpfile, O_RDWR | O_CREAT | O_EXCL,
@ -99,37 +97,34 @@ _thread_dump_info(void)
/* all 100000 possibilities are in use :( */
return;
} else {
/* Output a header for active threads: */
strcpy(s, "\n\n=============\nACTIVE THREADS\n\n");
/* Dump the active threads. */
strcpy(s, "\n\n========\nACTIVE THREADS\n\n");
__sys_write(fd, s, strlen(s));
/* Enter a loop to report each thread in the global list: */
TAILQ_FOREACH(pthread, &_thread_list, tle) {
dump_thread(fd, pthread, /*long_verson*/ 1);
if (pthread->state != PS_DEAD)
dump_thread(fd, pthread, /*long_verson*/ 1);
}
/* Check if there are no dead threads: */
DEAD_LIST_LOCK;
if (TAILQ_FIRST(&_dead_list) == NULL) {
/* Output a record: */
strcpy(s, "\n\nTHERE ARE NO DEAD THREADS\n");
__sys_write(fd, s, strlen(s));
} else {
/* Output a header for dead threads: */
strcpy(s, "\n\nDEAD THREADS\n\n");
__sys_write(fd, s, strlen(s));
/*
* Dump the ready threads.
* XXX - We can't easily do this because the run queues
* are per-KSEG.
*/
strcpy(s, "\n\n========\nREADY THREADS - unimplemented\n\n");
__sys_write(fd, s, strlen(s));
/*
* Enter a loop to report each thread in the global
* dead thread list:
*/
TAILQ_FOREACH(pthread, &_dead_list, dle) {
dump_thread(fd, pthread, /*long_version*/ 0);
}
}
DEAD_LIST_UNLOCK;
/* Close the dump file: */
/*
* Dump the waiting threads.
* XXX - We can't easily do this because the wait queues
* are per-KSEG.
*/
strcpy(s, "\n\n========\nWAITING THREADS - unimplemented\n\n");
__sys_write(fd, s, strlen(s));
/* Close the dump file. */
__sys_close(fd);
}
}
@ -137,8 +132,9 @@ _thread_dump_info(void)
static void
dump_thread(int fd, pthread_t pthread, int long_version)
{
char s[512];
int i;
struct pthread *curthread = _get_curthread();
char s[512];
int i;
/* Find the state: */
for (i = 0; i < NELEMENTS(thread_info) - 1; i++)
@ -147,10 +143,12 @@ dump_thread(int fd, pthread_t pthread, int long_version)
/* Output a record for the thread: */
snprintf(s, sizeof(s),
"--------------------\nThread %p (%s) prio %3d state %s [%s:%d]\n",
"--------------------\n"
"Thread %p (%s), scope %s, prio %3d, state %s [%s:%d]\n",
pthread, (pthread->name == NULL) ? "" : pthread->name,
pthread->active_priority, thread_info[i].name, pthread->fname,
pthread->lineno);
pthread->attr.flags & PTHREAD_SCOPE_SYSTEM ? "system" : "process",
pthread->active_priority,
thread_info[i].name, pthread->fname, pthread->lineno);
__sys_write(fd, s, strlen(s));
if (long_version != 0) {
@ -161,11 +159,12 @@ dump_thread(int fd, pthread_t pthread, int long_version)
__sys_write(fd, s, strlen(s));
}
/* Check if this is the initial thread: */
if (pthread == _thread_initial) {
if (pthread == _thr_initial) {
/* Output a record for the initial thread: */
strcpy(s, "This is the initial thread\n");
__sys_write(fd, s, strlen(s));
}
/* Process according to thread state: */
switch (pthread->state) {
/*
@ -173,7 +172,15 @@ dump_thread(int fd, pthread_t pthread, int long_version)
* coded to dump information:
*/
default:
/* Nothing to do here. */
snprintf(s, sizeof(s), "sigmask (hi) ");
__sys_write(fd, s, strlen(s));
for (i = _SIG_WORDS - 1; i >= 0; i--) {
snprintf(s, sizeof(s), "%08x ",
pthread->sigmask.__bits[i]);
__sys_write(fd, s, strlen(s));
}
snprintf(s, sizeof(s), "(lo)\n");
__sys_write(fd, s, strlen(s));
break;
}
}
@ -181,10 +188,10 @@ dump_thread(int fd, pthread_t pthread, int long_version)
/* Set the thread name for debug: */
void
_pthread_set_name_np(pthread_t thread, const char *name)
_pthread_set_name_np(pthread_t thread, char *name)
{
/* Check if the caller has specified a valid thread: */
if (thread != NULL && thread->magic == PTHREAD_MAGIC) {
if (thread != NULL && thread->magic == THR_MAGIC) {
if (thread->name != NULL) {
/* Free space for previous name. */
free(thread->name);

View File

@ -1,4 +1,5 @@
/*
* Copyright (c) 2003 Daniel M. Eischen <deischen@freebsd.org>
* Copyright (c) 1995-1998 John Birrell <jb@cimlogic.com.au>
* All rights reserved.
*
@ -38,6 +39,7 @@
#include "namespace.h"
#include <sys/param.h>
#include <sys/types.h>
#include <sys/signalvar.h>
#include <machine/reg.h>
#include <sys/ioctl.h>
@ -56,6 +58,7 @@
#include <fcntl.h>
#include <paths.h>
#include <pthread.h>
#include <pthread_np.h>
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
@ -63,19 +66,22 @@
#include <unistd.h>
#include "un-namespace.h"
#include "libc_private.h"
#include "thr_private.h"
extern void _thread_init_hack(void);
int __pthread_cond_wait(pthread_cond_t *, pthread_mutex_t *);
int __pthread_mutex_lock(pthread_mutex_t *);
int __pthread_mutex_trylock(pthread_mutex_t *);
void _thread_init_hack(void) __attribute__ ((constructor));
static void init_private(void);
static void init_main_thread(struct pthread *thread);
/*
* All weak references used within libc should be in this table.
* This will is so that static libraries will work.
*
* XXXTHR - Check this list.
* This is so that static libraries will work.
*/
static void *references[] = {
&_thread_init_hack,
&_thread_init,
&_accept,
&_bind,
&_close,
@ -126,6 +132,7 @@ static void *references[] = {
&_sigsuspend,
&_socket,
&_socketpair,
&_thread_init_hack,
&_wait4,
&_write,
&_writev
@ -138,8 +145,6 @@ static void *references[] = {
* libraries, then the actual functions will not be loaded.
*/
static void *libgcc_references[] = {
&_thread_init_hack,
&_thread_init,
&_pthread_once,
&_pthread_key_create,
&_pthread_key_delete,
@ -149,123 +154,93 @@ static void *libgcc_references[] = {
&_pthread_mutex_destroy,
&_pthread_mutex_lock,
&_pthread_mutex_trylock,
&_pthread_mutex_unlock
&_pthread_mutex_unlock,
&_pthread_create
};
int _pthread_guard_default;
int _pthread_page_size;
int _pthread_stack_default;
int _pthread_stack_initial;
#define DUAL_ENTRY(entry) \
(pthread_func_t)entry, (pthread_func_t)entry
static pthread_func_t jmp_table[][2] = {
{DUAL_ENTRY(_pthread_cond_broadcast)}, /* PJT_COND_BROADCAST */
{DUAL_ENTRY(_pthread_cond_destroy)}, /* PJT_COND_DESTROY */
{DUAL_ENTRY(_pthread_cond_init)}, /* PJT_COND_INIT */
{DUAL_ENTRY(_pthread_cond_signal)}, /* PJT_COND_SIGNAL */
{(pthread_func_t)__pthread_cond_wait,
(pthread_func_t)_pthread_cond_wait}, /* PJT_COND_WAIT */
{DUAL_ENTRY(_pthread_getspecific)}, /* PJT_GETSPECIFIC */
{DUAL_ENTRY(_pthread_key_create)}, /* PJT_KEY_CREATE */
{DUAL_ENTRY(_pthread_key_delete)}, /* PJT_KEY_DELETE*/
{DUAL_ENTRY(_pthread_main_np)}, /* PJT_MAIN_NP */
{DUAL_ENTRY(_pthread_mutex_destroy)}, /* PJT_MUTEX_DESTROY */
{DUAL_ENTRY(_pthread_mutex_init)}, /* PJT_MUTEX_INIT */
{(pthread_func_t)__pthread_mutex_lock,
(pthread_func_t)_pthread_mutex_lock}, /* PJT_MUTEX_LOCK */
{(pthread_func_t)__pthread_mutex_trylock,
(pthread_func_t)_pthread_mutex_trylock},/* PJT_MUTEX_TRYLOCK */
{DUAL_ENTRY(_pthread_mutex_unlock)}, /* PJT_MUTEX_UNLOCK */
{DUAL_ENTRY(_pthread_mutexattr_destroy)}, /* PJT_MUTEXATTR_DESTROY */
{DUAL_ENTRY(_pthread_mutexattr_init)}, /* PJT_MUTEXATTR_INIT */
{DUAL_ENTRY(_pthread_mutexattr_settype)}, /* PJT_MUTEXATTR_SETTYPE */
{DUAL_ENTRY(_pthread_once)}, /* PJT_ONCE */
{DUAL_ENTRY(_pthread_rwlock_destroy)}, /* PJT_RWLOCK_DESTROY */
{DUAL_ENTRY(_pthread_rwlock_init)}, /* PJT_RWLOCK_INIT */
{DUAL_ENTRY(_pthread_rwlock_rdlock)}, /* PJT_RWLOCK_RDLOCK */
{DUAL_ENTRY(_pthread_rwlock_tryrdlock)},/* PJT_RWLOCK_TRYRDLOCK */
{DUAL_ENTRY(_pthread_rwlock_trywrlock)},/* PJT_RWLOCK_TRYWRLOCK */
{DUAL_ENTRY(_pthread_rwlock_unlock)}, /* PJT_RWLOCK_UNLOCK */
{DUAL_ENTRY(_pthread_rwlock_wrlock)}, /* PJT_RWLOCK_WRLOCK */
{DUAL_ENTRY(_pthread_self)}, /* PJT_SELF */
{DUAL_ENTRY(_pthread_setspecific)}, /* PJT_SETSPECIFIC */
{DUAL_ENTRY(_pthread_sigmask)} /* PJT_SIGMASK */
};
extern int _thread_state_running;
static int init_once = 0;
/*
* Initialize the current thread.
* For the shared version of the threads library, the above is sufficient.
* But for the archive version of the library, we need a little bit more.
* Namely, we must arrange for this particular module to be pulled in from
* the archive library at link time. To accomplish that, we define and
* initialize a variable, "_thread_autoinit_dummy_decl". This variable is
* referenced (as an extern) from libc/stdlib/exit.c. This will always
* create a need for this module, ensuring that it is present in the
* executable.
*/
extern int _thread_autoinit_dummy_decl;
int _thread_autoinit_dummy_decl = 0;
void
init_td_common(struct pthread *td, struct pthread_attr *attrp, int reinit)
_thread_init_hack(void)
{
/*
* Some parts of a pthread are initialized only once.
*/
if (!reinit) {
memset(td, 0, sizeof(struct pthread));
td->cancelmode = M_DEFERRED;
td->cancelstate = M_DEFERRED;
td->cancellation = CS_NULL;
memcpy(&td->attr, attrp, sizeof(struct pthread_attr));
td->magic = PTHREAD_MAGIC;
TAILQ_INIT(&td->mutexq);
td->base_priority = PTHREAD_DEFAULT_PRIORITY;
td->active_priority = PTHREAD_DEFAULT_PRIORITY;
td->inherited_priority = PTHREAD_MIN_PRIORITY;
} else {
memset(&td->join_status, 0, sizeof(struct join_status));
}
td->joiner = NULL;
td->error = 0;
td->flags = 0;
_libpthread_init(NULL);
}
/*
* Initialize the active and dead threads list. Any threads in the active
* list will be removed and the thread td * will be marked as the
* initial thread and inserted in the list as the only thread. Any threads
* in the dead threads list will also be removed.
*/
void
init_tdlist(struct pthread *td, int reinit)
{
struct pthread *tdTemp, *tdTemp2;
_thread_initial = td;
td->name = strdup("_thread_initial");
/*
* If this is not the first initialization, remove any entries
* that may be in the list and deallocate their memory. Also
* destroy any global pthread primitives (they will be recreated).
*/
if (reinit) {
TAILQ_FOREACH_SAFE(tdTemp, &_thread_list, tle, tdTemp2) {
if (tdTemp != NULL && tdTemp != td) {
TAILQ_REMOVE(&_thread_list, tdTemp, tle);
free(tdTemp);
}
}
TAILQ_FOREACH_SAFE(tdTemp, &_dead_list, dle, tdTemp2) {
if (tdTemp != NULL) {
TAILQ_REMOVE(&_dead_list, tdTemp, dle);
free(tdTemp);
}
}
_pthread_mutex_destroy(&dead_list_lock);
} else {
TAILQ_INIT(&_thread_list);
TAILQ_INIT(&_dead_list);
/* Insert this thread as the first thread in the active list */
TAILQ_INSERT_HEAD(&_thread_list, td, tle);
}
/*
* Initialize the active thread list lock and the
* dead threads list lock.
*/
memset(&thread_list_lock, 0, sizeof(spinlock_t));
if (_pthread_mutex_init(&dead_list_lock,NULL) != 0)
PANIC("Failed to initialize garbage collector primitives");
}
/*
* Threaded process initialization
* Threaded process initialization.
*
* This is only called under two conditions:
*
* 1) Some thread routines have detected that the library hasn't yet
* been initialized (_thr_initial == NULL && curthread == NULL), or
*
* 2) An explicit call to reinitialize after a fork (indicated
* by curthread != NULL)
*/
void
_thread_init(void)
_libpthread_init(struct pthread *curthread)
{
struct pthread *pthread;
int fd;
size_t len;
int mib[2];
int error;
int fd, first = 0;
sigset_t sigset, oldset;
/* Check if this function has already been called: */
if (_thread_initial)
/* Only initialise the threaded application once. */
if ((_thr_initial != NULL) && (curthread == NULL))
/* Only initialize the threaded application once. */
return;
_pthread_page_size = getpagesize();
_pthread_guard_default = getpagesize();
if (sizeof(void *) == 8) {
_pthread_stack_default = PTHREAD_STACK64_DEFAULT;
_pthread_stack_initial = PTHREAD_STACK64_INITIAL;
}
else {
_pthread_stack_default = PTHREAD_STACK32_DEFAULT;
_pthread_stack_initial = PTHREAD_STACK32_INITIAL;
}
pthread_attr_default.guardsize_attr = _pthread_guard_default;
pthread_attr_default.stacksize_attr = _pthread_stack_default;
/*
* Make gcc quiescent about {,libgcc_}references not being
* referenced:
@ -273,11 +248,22 @@ _thread_init(void)
if ((references[0] == NULL) || (libgcc_references[0] == NULL))
PANIC("Failed loading mandatory references in _thread_init");
/* Pull debug symbols in for static binary */
_thread_state_running = PS_RUNNING;
/*
* Check the size of the jump table to make sure it is preset
* with the correct number of entries.
*/
if (sizeof(jmp_table) != (sizeof(pthread_func_t) * PJT_MAX * 2))
PANIC("Thread jump table not properly initialized");
memcpy(__thr_jtable, jmp_table, sizeof(jmp_table));
/*
* Check for the special case of this process running as
* or in place of init as pid = 1:
*/
if (getpid() == 1) {
if ((_thr_pid = getpid()) == 1) {
/*
* Setup a new session for this process which is
* assumed to be running as root.
@ -292,74 +278,141 @@ _thread_init(void)
PANIC("Can't set login to root");
if (__sys_ioctl(fd, TIOCSCTTY, (char *) NULL) == -1)
PANIC("Can't set controlling terminal");
if (__sys_dup2(fd, 0) == -1 ||
__sys_dup2(fd, 1) == -1 ||
__sys_dup2(fd, 2) == -1)
PANIC("Can't dup2");
}
/* Allocate memory for the thread structure of the initial thread: */
if ((pthread = (pthread_t) malloc(sizeof(struct pthread))) == NULL) {
/*
* Insufficient memory to initialise this application, so
* abort:
*/
PANIC("Cannot allocate memory for initial thread");
/* Initialize pthread private data. */
init_private();
/* Set the initial thread. */
if (curthread == NULL) {
first = 1;
/* Create and initialize the initial thread. */
curthread = _thr_alloc(NULL);
if (curthread == NULL)
PANIC("Can't allocate initial thread");
init_main_thread(curthread);
}
init_tdlist(pthread, 0);
init_td_common(pthread, &pthread_attr_default, 0);
pthread->arch_id = _set_curthread(NULL, pthread, &error);
/* Get our thread id. */
thr_self(&pthread->thr_id);
/* Find the stack top */
mib[0] = CTL_KERN;
mib[1] = KERN_USRSTACK;
len = sizeof (_usrstack);
if (sysctl(mib, 2, &_usrstack, &len, NULL, 0) == -1)
_usrstack = (void *)USRSTACK;
/*
* Create a red zone below the main stack. All other stacks are
* constrained to a maximum size by the paramters passed to
* mmap(), but this stack is only limited by resource limits, so
* this stack needs an explicitly mapped red zone to protect the
* thread stack that is just beyond.
* Add the thread to the thread list queue.
*/
if (mmap(_usrstack - _pthread_stack_initial -
_pthread_guard_default, _pthread_guard_default, 0,
MAP_ANON, -1, 0) == MAP_FAILED)
PANIC("Cannot allocate red zone for initial thread");
THR_LIST_ADD(curthread);
_thread_active_threads = 1;
/* Set the main thread stack pointer. */
pthread->stack = _usrstack - _pthread_stack_initial;
/* Setup the thread specific data */
_tcb_set(curthread->tcb);
/* Set the stack attributes. */
pthread->attr.stackaddr_attr = pthread->stack;
pthread->attr.stacksize_attr = _pthread_stack_initial;
/* Setup the context for initial thread. */
getcontext(&pthread->ctx);
pthread->ctx.uc_stack.ss_sp = pthread->stack;
pthread->ctx.uc_stack.ss_size = _pthread_stack_initial;
/* Initialize the atfork list and mutex */
TAILQ_INIT(&_atfork_list);
_pthread_mutex_init(&_atfork_mutex, NULL);
if (first) {
SIGFILLSET(sigset);
SIGDELSET(sigset, SIGTRAP);
__sys_sigprocmask(SIG_SETMASK, &sigset, &oldset);
_thr_signal_init();
_thr_initial = curthread;
SIGDELSET(oldset, SIGCANCEL);
__sys_sigprocmask(SIG_SETMASK, &oldset, NULL);
}
}
/*
* Special start up code for NetBSD/Alpha
* This function and pthread_create() do a lot of the same things.
* It'd be nice to consolidate the common stuff in one place.
*/
#if defined(__NetBSD__) && defined(__alpha__)
int
main(int argc, char *argv[], char *env);
int
_thread_main(int argc, char *argv[], char *env)
static void
init_main_thread(struct pthread *thread)
{
_thread_init();
return (main(argc, argv, env));
/* Setup the thread attributes. */
thr_self(&thread->tid);
thread->attr = _pthread_attr_default;
/*
* Set up the thread stack.
*
* Create a red zone below the main stack. All other stacks
* are constrained to a maximum size by the parameters
* passed to mmap(), but this stack is only limited by
* resource limits, so this stack needs an explicitly mapped
* red zone to protect the thread stack that is just beyond.
*/
if (mmap((void *)_usrstack - _thr_stack_initial -
_thr_guard_default, _thr_guard_default, 0, MAP_ANON,
-1, 0) == MAP_FAILED)
PANIC("Cannot allocate red zone for initial thread");
/*
* Mark the stack as an application supplied stack so that it
* isn't deallocated.
*
* XXX - I'm not sure it would hurt anything to deallocate
* the main thread stack because deallocation doesn't
* actually free() it; it just puts it in the free
* stack queue for later reuse.
*/
thread->attr.stackaddr_attr = (void *)_usrstack - _thr_stack_initial;
thread->attr.stacksize_attr = _thr_stack_initial;
thread->attr.guardsize_attr = _thr_guard_default;
thread->attr.flags |= THR_STACK_USER;
/*
* Write a magic value to the thread structure
* to help identify valid ones:
*/
thread->magic = THR_MAGIC;
thread->cancelflags = PTHREAD_CANCEL_ENABLE | PTHREAD_CANCEL_DEFERRED;
thread->name = strdup("initial thread");
/* Default the priority of the initial thread: */
thread->base_priority = THR_DEFAULT_PRIORITY;
thread->active_priority = THR_DEFAULT_PRIORITY;
thread->inherited_priority = 0;
/* Initialize the mutex queue: */
TAILQ_INIT(&thread->mutexq);
TAILQ_INIT(&thread->pri_mutexq);
thread->state = PS_RUNNING;
thread->uniqueid = 0;
/* Others cleared to zero by thr_alloc() */
}
static void
init_private(void)
{
size_t len;
int mib[2];
_thr_umtx_init(&_mutex_static_lock);
_thr_umtx_init(&_cond_static_lock);
_thr_umtx_init(&_rwlock_static_lock);
_thr_umtx_init(&_keytable_lock);
_thr_umtx_init(&_thr_atfork_lock);
_thr_spinlock_init();
_thr_list_init();
/*
* Avoid reinitializing some things if they don't need to be,
* e.g. after a fork().
*/
if (init_once == 0) {
/* Find the stack top */
mib[0] = CTL_KERN;
mib[1] = KERN_USRSTACK;
len = sizeof (_usrstack);
if (sysctl(mib, 2, &_usrstack, &len, NULL, 0) == -1)
PANIC("Cannot get kern.usrstack from sysctl");
_thr_page_size = getpagesize();
_thr_guard_default = _thr_page_size;
_pthread_attr_default.guardsize_attr = _thr_guard_default;
_pthread_attr_default.stacksize_attr = _thr_stack_default;
TAILQ_INIT(&_thr_atfork_list);
#ifdef SYSTEM_SCOPE_ONLY
_thr_scope_system = 1;
#else
if (getenv("LIBPTHREAD_SYSTEM_SCOPE") != NULL)
_thr_scope_system = 1;
else if (getenv("LIBPTHREAD_PROCESS_SCOPE") != NULL)
_thr_scope_system = -1;
#endif
}
init_once = 1;
}

View File

@ -31,170 +31,75 @@
*
* $FreeBSD$
*/
#include <errno.h>
#include <pthread.h>
#include <stdlib.h>
#include "thr_private.h"
__weak_reference(_pthread_join, pthread_join);
static void backout_join(void *arg)
{
struct pthread *curthread = _get_curthread();
struct pthread *pthread = (struct pthread *)arg;
THREAD_LIST_LOCK(curthread);
pthread->joiner = NULL;
THREAD_LIST_UNLOCK(curthread);
}
int
_pthread_join(pthread_t pthread, void **thread_return)
{
int ret, dead;
pthread_t thread;
struct pthread *curthread = _get_curthread();
void *tmp;
long state;
int oldcancel;
int ret = 0;
/* Check if the caller has specified an invalid thread: */
if (pthread->magic != PTHREAD_MAGIC)
/* Invalid thread: */
return(EINVAL);
if (pthread == NULL)
return (EINVAL);
/* Check if the caller has specified itself: */
if (pthread == curthread)
/* Avoid a deadlock condition: */
return(EDEADLK);
return (EDEADLK);
/*
* Search for the specified thread in the list of active threads. This
* is done manually here rather than calling _find_thread() because
* the searches in _thread_list and _dead_list (as well as setting up
* join/detach state) have to be done atomically.
*/
ret = 0;
dead = 0;
thread = NULL;
_thread_sigblock();
DEAD_LIST_LOCK;
THREAD_LIST_LOCK;
if (!pthread->isdead) {
TAILQ_FOREACH(thread, &_thread_list, tle) {
if (thread == pthread) {
PTHREAD_LOCK(pthread);
break;
}
}
}
if (thread == NULL) {
TAILQ_FOREACH(thread, &_dead_list, dle) {
if (thread == pthread) {
PTHREAD_LOCK(pthread);
dead = 1;
break;
}
}
}
/* Check if the thread was not found or has been detached: */
if (thread == NULL) {
THREAD_LIST_UNLOCK;
DEAD_LIST_UNLOCK;
_thread_sigunblock();
THREAD_LIST_LOCK(curthread);
if ((ret = _thr_find_thread(curthread, pthread, 1)) != 0) {
ret = ESRCH;
goto out;
}
if ((pthread->attr.flags & PTHREAD_DETACHED) != 0) {
PTHREAD_UNLOCK(pthread);
THREAD_LIST_UNLOCK;
DEAD_LIST_UNLOCK;
_thread_sigunblock();
ret = EINVAL;
goto out;
}
if (pthread->joiner != NULL) {
} else if ((pthread->tlflags & TLFLAGS_DETACHED) != 0) {
ret = ESRCH;
} else if (pthread->joiner != NULL) {
/* Multiple joiners are not supported. */
/* XXXTHR - support multiple joiners. */
PTHREAD_UNLOCK(pthread);
THREAD_LIST_UNLOCK;
DEAD_LIST_UNLOCK;
_thread_sigunblock();
ret = ENOTSUP;
goto out;
}
if (ret) {
THREAD_LIST_UNLOCK(curthread);
return (ret);
}
/* Set the running thread to be the joiner: */
pthread->joiner = curthread;
THREAD_LIST_UNLOCK(curthread);
THR_CLEANUP_PUSH(curthread, backout_join, pthread);
oldcancel = _thr_cancel_enter(curthread);
while ((state = pthread->state) != PS_DEAD) {
_thr_umtx_wait(&pthread->state, state, NULL);
}
/* Check if the thread is not dead: */
if (!dead) {
/* Set the running thread to be the joiner: */
pthread->joiner = curthread;
PTHREAD_UNLOCK(pthread);
_thr_cancel_leave(curthread, oldcancel);
THR_CLEANUP_POP(curthread, 0);
/* Keep track of which thread we're joining to: */
curthread->join_status.thread = pthread;
tmp = pthread->ret;
THREAD_LIST_LOCK(curthread);
pthread->tlflags |= TLFLAGS_DETACHED;
THR_GCLIST_ADD(pthread);
THREAD_LIST_UNLOCK(curthread);
while (curthread->join_status.thread == pthread) {
/* Wait for our signal to wake up. */
THREAD_LIST_UNLOCK;
DEAD_LIST_UNLOCK;
_thread_sigunblock();
if (curthread->cancellation != CS_NULL)
pthread->joiner = NULL;
_thread_enter_cancellation_point();
if (thread_return != NULL)
*thread_return = tmp;
/*
* XXX - Workaround to make a join a cancellation
* point. Must find a better solution.
*/
PTHREAD_LOCK(curthread);
curthread->flags |= PTHREAD_FLAGS_SUSPENDED;
PTHREAD_UNLOCK(curthread);
ret = _thread_suspend(curthread, NULL);
if (ret != 0 && ret != EAGAIN && ret != EINTR)
PANIC("Unable to suspend in join.");
PTHREAD_LOCK(curthread);
curthread->flags &= ~PTHREAD_FLAGS_SUSPENDED;
PTHREAD_UNLOCK(curthread);
if (curthread->cancellation != CS_NULL)
pthread->joiner = NULL;
_thread_leave_cancellation_point();
/*
* XXX - For correctness reasons.
* We must aquire these in the same order and also
* importantly, release in the same order because
* otherwise we might deadlock with the joined thread
* when we attempt to release one of these locks.
*/
_thread_sigblock();
DEAD_LIST_LOCK;
THREAD_LIST_LOCK;
}
/*
* The thread return value and error are set by the thread we're
* joining to when it exits or detaches:
*/
ret = curthread->join_status.error;
if ((ret == 0) && (thread_return != NULL))
*thread_return = curthread->join_status.ret;
THREAD_LIST_UNLOCK;
DEAD_LIST_UNLOCK;
_thread_sigunblock();
} else {
/*
* The thread exited (is dead) without being detached, and no
* thread has joined it.
*/
/* Check if the return value is required: */
if (thread_return != NULL) {
/* Return the thread's return value: */
*thread_return = pthread->ret;
}
/* Free all remaining memory allocated to the thread. */
pthread->attr.flags |= PTHREAD_DETACHED;
PTHREAD_UNLOCK(pthread);
TAILQ_REMOVE(&_dead_list, pthread, dle);
deadlist_free_onethread(pthread);
THREAD_LIST_UNLOCK;
DEAD_LIST_UNLOCK;
_thread_sigunblock();
}
out:
_thread_leave_cancellation_point();
/* Return the completion status: */
return (ret);
}

View File

@ -1,5 +1,6 @@
/*
* Copyright (c) 2003 Jeffrey Roberson <jeff@freebsd.org>
* Copyright (c) 2005 David Xu <davidxu@freebsd.org>
* Copyright (C) 2003 Daniel M. Eischen <deischen@freebsd.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -26,120 +27,74 @@
* $FreeBSD$
*/
#include <sys/cdefs.h>
#include <sys/types.h>
#include <sys/signalvar.h>
#include <sys/time.h>
#include <sys/timespec.h>
#include <pthread.h>
#include <signal.h>
#include <stdlib.h>
#include <errno.h>
#include <unistd.h>
#include "thr_private.h"
/* XXX Why can't I get this from time.h? :-( */
#define timespecsub(vvp, uvp) \
do { \
(vvp)->tv_sec -= (uvp)->tv_sec; \
(vvp)->tv_nsec -= (uvp)->tv_nsec; \
if ((vvp)->tv_nsec < 0) { \
(vvp)->tv_sec--; \
(vvp)->tv_nsec += 1000000000; \
} \
} while (0)
/*#define DEBUG_THREAD_KERN */
#ifdef DEBUG_THREAD_KERN
#define DBG_MSG stdout_debug
#else
#define DBG_MSG(x...)
#endif
void
_thread_critical_enter(pthread_t pthread)
/*
* This is called when the first thread (other than the initial
* thread) is created.
*/
int
_thr_setthreaded(int threaded)
{
_thread_sigblock();
UMTX_LOCK(&pthread->lock);
if (((threaded == 0) ^ (__isthreaded == 0)) == 0)
return (0);
__isthreaded = threaded;
#if 0
if (threaded != 0) {
_thr_rtld_init();
} else {
_thr_rtld_fini();
}
#endif
return (0);
}
void
_thread_critical_exit(pthread_t pthread)
{
UMTX_UNLOCK(&pthread->lock);
_thread_sigunblock();
}
void
_thread_sigblock()
_thr_signal_block(struct pthread *curthread)
{
sigset_t set;
sigset_t sav;
/*
* Block all signals.
*/
if (curthread->sigblock > 0) {
curthread->sigblock++;
return;
}
SIGFILLSET(set);
SIGDELSET(set, SIGBUS);
SIGDELSET(set, SIGILL);
SIGDELSET(set, SIGFPE);
SIGDELSET(set, SIGSEGV);
SIGDELSET(set, SIGTRAP);
/* If we have already blocked signals, just up the refcount */
if (++curthread->signest > 1)
return;
PTHREAD_ASSERT(curthread->signest == 1,
("Blocked signal nesting level must be 1!"));
if (__sys_sigprocmask(SIG_SETMASK, &set, &sav)) {
_thread_printf(STDERR_FILENO, "Critical Enter: sig err %d\n",
errno);
abort();
}
curthread->savedsig = sav;
__sys_sigprocmask(SIG_BLOCK, &set, &curthread->sigmask);
curthread->sigblock++;
}
void
_thread_sigunblock()
_thr_signal_unblock(struct pthread *curthread)
{
sigset_t set;
/* We might be in a nested 'blocked signal' section */
if (--curthread->signest > 0)
return;
PTHREAD_ASSERT(curthread->signest == 0,
("Non-Zero blocked signal nesting level."));
/*
* Restore signals.
*/
set = curthread->savedsig;
if (__sys_sigprocmask(SIG_SETMASK, &set, NULL)) {
_thread_printf(STDERR_FILENO, "Critical Exit: sig err %d\n",
errno);
abort();
}
if (--curthread->sigblock == 0)
__sys_sigprocmask(SIG_SETMASK, &curthread->sigmask, NULL);
}
int
_thread_suspend(pthread_t pthread, const struct timespec *abstime)
_thr_send_sig(struct pthread *thread, int sig)
{
struct timespec remaining;
struct timespec *ts;
int error;
/*
* Compute the remainder of the run time.
*/
if (abstime) {
struct timespec now;
struct timeval tv;
GET_CURRENT_TOD(tv);
TIMEVAL_TO_TIMESPEC(&tv, &now);
remaining = *abstime;
timespecsub(&remaining, &now);
ts = &remaining;
/*
* NOTE: timespecsub() makes sure the tv_nsec member >= 0.
*/
if (ts->tv_sec < 0)
return (ETIMEDOUT);
} else
ts = NULL;
error = thr_suspend(ts);
return (error == -1 ? errno : error);
return thr_kill(thread->tid, sig);
}
void
_thr_assert_lock_level()
{
PANIC("locklevel <= 0");
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997 John Birrell <jb@cimlogic.com.au>
* Copyright (c) 1997 John Birrell <jb@cimlogic.com.au>.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -31,28 +31,37 @@
*
* $FreeBSD$
*/
#include <string.h>
#include <stdlib.h>
#include <errno.h>
#include <signal.h>
#include <pthread.h>
#include "thr_private.h"
__weak_reference(_pthread_condattr_init, pthread_condattr_init);
__weak_reference(_pthread_kill, pthread_kill);
int
_pthread_condattr_init(pthread_condattr_t *attr)
_pthread_kill(pthread_t pthread, int sig)
{
struct pthread *curthread = _get_curthread();
int ret;
pthread_condattr_t pattr;
if ((pattr = (pthread_condattr_t)
malloc(sizeof(struct pthread_cond_attr))) == NULL) {
ret = ENOMEM;
} else {
memcpy(pattr, &pthread_condattr_default,
sizeof(struct pthread_cond_attr));
*attr = pattr;
ret = 0;
/* Check for invalid signal numbers: */
if (sig < 0 || sig > _SIG_MAXSIG)
/* Invalid signal: */
ret = EINVAL;
/*
* Ensure the thread is in the list of active threads, and the
* signal is valid (signal 0 specifies error checking only) and
* not being ignored:
*/
else if ((ret = _thr_ref_add(curthread, pthread, /*include dead*/0))
== 0) {
if (sig > 0)
_thr_send_sig(pthread, sig);
_thr_ref_delete(curthread, pthread);
}
return(ret);
/* Return the completion status: */
return (ret);
}

View File

@ -0,0 +1,342 @@
/*
* Copyright (c) 2005 David Xu <davidxu@freebsd.org>
* Copyright (C) 2003 Daniel M. Eischen <deischen@freebsd.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice unmodified, this list of conditions, and the following
* disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*/
#include <sys/types.h>
#include <sys/queue.h>
#include <stdlib.h>
#include <string.h>
#include <pthread.h>
#include "thr_private.h"
#include "libc_private.h"
/*#define DEBUG_THREAD_LIST */
#ifdef DEBUG_THREAD_LIST
#define DBG_MSG stdout_debug
#else
#define DBG_MSG(x...)
#endif
/*
* Define a high water mark for the maximum number of threads that
* will be cached. Once this level is reached, any extra threads
* will be free()'d.
*/
#define MAX_CACHED_THREADS 100
/*
* We've got to keep track of everything that is allocated, not only
* to have a speedy free list, but also so they can be deallocated
* after a fork().
*/
static TAILQ_HEAD(, pthread) free_threadq;
static umtx_t free_thread_lock;
static umtx_t tcb_lock;
static int free_thread_count = 0;
static int inited = 0;
static u_int64_t next_uniqueid = 1;
LIST_HEAD(thread_hash_head, pthread);
#define HASH_QUEUES 128
static struct thread_hash_head thr_hashtable[HASH_QUEUES];
#define THREAD_HASH(thrd) (((unsigned long)thrd >> 12) % HASH_QUEUES)
static void thr_destroy(struct pthread *curthread, struct pthread *thread);
void
_thr_list_init(void)
{
int i;
_gc_count = 0;
_thr_umtx_init(&_thr_list_lock);
TAILQ_INIT(&_thread_list);
TAILQ_INIT(&free_threadq);
_thr_umtx_init(&free_thread_lock);
_thr_umtx_init(&tcb_lock);
if (inited) {
for (i = 0; i < HASH_QUEUES; ++i)
LIST_INIT(&thr_hashtable[i]);
}
inited = 1;
}
void
_thr_gc(struct pthread *curthread)
{
struct pthread *td, *td_next;
TAILQ_HEAD(, pthread) worklist;
TAILQ_INIT(&worklist);
THREAD_LIST_LOCK(curthread);
/* Check the threads waiting for GC. */
for (td = TAILQ_FIRST(&_thread_gc_list); td != NULL; td = td_next) {
td_next = TAILQ_NEXT(td, gcle);
if (td->terminated == 0) {
/* make sure we are not still in userland */
continue;
}
_thr_stack_free(&td->attr);
if (((td->tlflags & TLFLAGS_DETACHED) != 0) &&
(td->refcount == 0)) {
THR_GCLIST_REMOVE(td);
/*
* The thread has detached and is no longer
* referenced. It is safe to remove all
* remnants of the thread.
*/
THR_LIST_REMOVE(td);
TAILQ_INSERT_HEAD(&worklist, td, gcle);
}
}
THREAD_LIST_UNLOCK(curthread);
while ((td = TAILQ_FIRST(&worklist)) != NULL) {
TAILQ_REMOVE(&worklist, td, gcle);
/*
* XXX we don't free initial thread, because there might
* have some code referencing initial thread.
*/
if (td == _thr_initial) {
DBG_MSG("Initial thread won't be freed\n");
continue;
}
DBG_MSG("Freeing thread %p\n", td);
_thr_free(curthread, td);
}
}
struct pthread *
_thr_alloc(struct pthread *curthread)
{
struct pthread *thread = NULL;
struct tcb *tcb;
if (curthread != NULL) {
if (GC_NEEDED())
_thr_gc(curthread);
if (free_thread_count > 0) {
THR_LOCK_ACQUIRE(curthread, &free_thread_lock);
if ((thread = TAILQ_FIRST(&free_threadq)) != NULL) {
TAILQ_REMOVE(&free_threadq, thread, tle);
free_thread_count--;
}
THR_LOCK_RELEASE(curthread, &free_thread_lock);
}
}
if (thread == NULL) {
thread = malloc(sizeof(struct pthread));
if (thread == NULL)
return (NULL);
}
if (curthread != NULL) {
THR_LOCK_ACQUIRE(curthread, &tcb_lock);
tcb = _tcb_ctor(thread, 0 /* not initial tls */);
THR_LOCK_RELEASE(curthread, &tcb_lock);
} else {
tcb = _tcb_ctor(thread, 1 /* initial tls */);
}
if (tcb != NULL) {
memset(thread, 0, sizeof(*thread));
thread->tcb = tcb;
} else {
thr_destroy(curthread, thread);
thread = NULL;
}
return (thread);
}
void
_thr_free(struct pthread *curthread, struct pthread *thread)
{
DBG_MSG("Freeing thread %p\n", thread);
if (thread->name) {
free(thread->name);
thread->name = NULL;
}
/*
* Always free tcb, as we only know it is part of RTLD TLS
* block, but don't know its detail and can not assume how
* it works, so better to avoid caching it here.
*/
if (curthread != NULL) {
THR_LOCK_ACQUIRE(curthread, &tcb_lock);
_tcb_dtor(thread->tcb);
THR_LOCK_RELEASE(curthread, &tcb_lock);
} else {
_tcb_dtor(thread->tcb);
}
thread->tcb = NULL;
if ((curthread == NULL) || (free_thread_count >= MAX_CACHED_THREADS)) {
thr_destroy(curthread, thread);
} else {
/*
* Add the thread to the free thread list, this also avoids
* pthread id is reused too quickly, may help some buggy apps.
*/
THR_LOCK_ACQUIRE(curthread, &free_thread_lock);
TAILQ_INSERT_TAIL(&free_threadq, thread, tle);
free_thread_count++;
THR_LOCK_RELEASE(curthread, &free_thread_lock);
}
}
static void
thr_destroy(struct pthread *curthread __unused, struct pthread *thread)
{
free(thread);
}
/*
* Add an active thread:
*
* o Assign the thread a unique id (which GDB uses to track
* threads.
* o Add the thread to the list of all threads and increment
* number of active threads.
*/
void
_thr_link(struct pthread *curthread, struct pthread *thread)
{
THREAD_LIST_LOCK(curthread);
/*
* Initialize the unique id (which GDB uses to track
* threads), add the thread to the list of all threads,
* and
*/
thread->uniqueid = next_uniqueid++;
THR_LIST_ADD(thread);
if (thread->attr.flags & PTHREAD_DETACHED)
thread->tlflags |= TLFLAGS_DETACHED;
_thread_active_threads++;
THREAD_LIST_UNLOCK(curthread);
}
/*
* Remove an active thread.
*/
void
_thr_unlink(struct pthread *curthread, struct pthread *thread)
{
THREAD_LIST_LOCK(curthread);
THR_LIST_REMOVE(thread);
_thread_active_threads--;
THREAD_LIST_UNLOCK(curthread);
}
void
_thr_hash_add(struct pthread *thread)
{
struct thread_hash_head *head;
head = &thr_hashtable[THREAD_HASH(thread)];
LIST_INSERT_HEAD(head, thread, hle);
}
void
_thr_hash_remove(struct pthread *thread)
{
LIST_REMOVE(thread, hle);
}
struct pthread *
_thr_hash_find(struct pthread *thread)
{
struct pthread *td;
struct thread_hash_head *head;
head = &thr_hashtable[THREAD_HASH(thread)];
LIST_FOREACH(td, head, hle) {
if (td == thread)
return (thread);
}
return (NULL);
}
/*
* Find a thread in the linked list of active threads and add a reference
* to it. Threads with positive reference counts will not be deallocated
* until all references are released.
*/
int
_thr_ref_add(struct pthread *curthread, struct pthread *thread,
int include_dead)
{
int ret;
if (thread == NULL)
/* Invalid thread: */
return (EINVAL);
THREAD_LIST_LOCK(curthread);
if ((ret = _thr_find_thread(curthread, thread, include_dead)) == 0) {
thread->refcount++;
}
THREAD_LIST_UNLOCK(curthread);
/* Return zero if the thread exists: */
return (ret);
}
void
_thr_ref_delete(struct pthread *curthread, struct pthread *thread)
{
if (thread != NULL) {
THREAD_LIST_LOCK(curthread);
thread->refcount--;
if ((thread->refcount == 0) &&
(thread->tlflags & TLFLAGS_GC_SAFE) != 0)
THR_GCLIST_ADD(thread);
THREAD_LIST_UNLOCK(curthread);
}
}
int
_thr_find_thread(struct pthread *curthread, struct pthread *thread,
int include_dead)
{
struct pthread *pthread;
if (thread == NULL)
/* Invalid thread: */
return (EINVAL);
pthread = _thr_hash_find(thread);
if (pthread) {
if (include_dead == 0 && pthread->state == PS_DEAD) {
pthread = NULL;
}
}
/* Return zero if the thread exists: */
return ((pthread != NULL) ? 0 : ESRCH);
}

View File

@ -24,11 +24,12 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
* $FreeBSD$
*/
#include <pthread.h>
#include <pthread_np.h>
#include "thr_private.h"
__weak_reference(_pthread_main_np, pthread_main_np);
@ -40,8 +41,8 @@ int
_pthread_main_np()
{
if (!_thread_initial)
if (!_thr_initial)
return (-1);
else
return (pthread_equal(pthread_self(), _thread_initial) ? 1 : 0);
return (pthread_equal(pthread_self(), _thr_initial) ? 1 : 0);
}

File diff suppressed because it is too large Load Diff

View File

@ -31,6 +31,7 @@
*
* $FreeBSD$
*/
#include <string.h>
#include <stdlib.h>
#include <errno.h>
@ -47,7 +48,9 @@ _pthread_mutexattr_getprioceiling(pthread_mutexattr_t *mattr, int *prioceiling)
{
int ret = 0;
if (*mattr == NULL)
if ((mattr == NULL) || (*mattr == NULL))
ret = EINVAL;
else if ((*mattr)->m_protocol != PTHREAD_PRIO_PROTECT)
ret = EINVAL;
else
*prioceiling = (*mattr)->m_ceiling;
@ -60,26 +63,30 @@ _pthread_mutexattr_setprioceiling(pthread_mutexattr_t *mattr, int prioceiling)
{
int ret = 0;
if (*mattr == NULL)
if ((mattr == NULL) || (*mattr == NULL))
ret = EINVAL;
else if ((*mattr)->m_protocol != PTHREAD_PRIO_PROTECT)
ret = EINVAL;
else if (prioceiling <= PTHREAD_MAX_PRIORITY &&
prioceiling >= PTHREAD_MIN_PRIORITY)
(*mattr)->m_ceiling = prioceiling;
else
ret = EINVAL;
(*mattr)->m_ceiling = prioceiling;
return (ret);
return(ret);
}
int
_pthread_mutex_getprioceiling(pthread_mutex_t *mutex,
int *prioceiling)
{
if (*mutex == NULL)
return (EINVAL);
int ret;
if ((mutex == NULL) || (*mutex == NULL))
ret = EINVAL;
else if ((*mutex)->m_protocol != PTHREAD_PRIO_PROTECT)
ret = EINVAL;
else
*prioceiling = (*mutex)->m_prio;
return (0);
ret = (*mutex)->m_prio;
return(ret);
}
int
@ -87,27 +94,23 @@ _pthread_mutex_setprioceiling(pthread_mutex_t *mutex,
int prioceiling, int *old_ceiling)
{
int ret = 0;
int tmp;
if (*mutex == NULL)
return (EINVAL);
else if (prioceiling > PTHREAD_MAX_PRIORITY ||
prioceiling < PTHREAD_MIN_PRIORITY)
return (EINVAL);
/*
* Because of the use of pthread_mutex_unlock(), the
* priority ceiling of a mutex cannot be changed
* while the mutex is held by another thread. It also,
* means that the the thread trying to change the
* priority ceiling must adhere to prio protection rules.
*/
if ((ret = pthread_mutex_lock(mutex)) == 0) {
/* Return the old ceiling and set the new ceiling: */
*old_ceiling = (*mutex)->m_prio;
if ((mutex == NULL) || (*mutex == NULL))
ret = EINVAL;
else if ((*mutex)->m_protocol != PTHREAD_PRIO_PROTECT)
ret = EINVAL;
/* Lock the mutex: */
else if ((ret = pthread_mutex_lock(mutex)) == 0) {
tmp = (*mutex)->m_prio;
/* Set the new ceiling: */
(*mutex)->m_prio = prioceiling;
/* Unlock the mutex: */
ret = pthread_mutex_unlock(mutex);
/* Return the old ceiling: */
*old_ceiling = tmp;
}
return(ret);
}

View File

@ -31,6 +31,7 @@
*
* $FreeBSD$
*/
#include <string.h>
#include <stdlib.h>
#include <errno.h>
@ -43,19 +44,28 @@ __weak_reference(_pthread_mutexattr_setprotocol, pthread_mutexattr_setprotocol);
int
_pthread_mutexattr_getprotocol(pthread_mutexattr_t *mattr, int *protocol)
{
if (*mattr == NULL)
return (EINVAL);
*protocol = (*mattr)->m_protocol;
return(0);
int ret = 0;
if ((mattr == NULL) || (*mattr == NULL))
ret = EINVAL;
else
*protocol = (*mattr)->m_protocol;
return(ret);
}
int
_pthread_mutexattr_setprotocol(pthread_mutexattr_t *mattr, int protocol)
{
if (*mattr == NULL || protocol < PTHREAD_PRIO_NONE ||
protocol > PTHREAD_PRIO_PROTECT)
return (EINVAL);
(*mattr)->m_protocol = protocol;
(*mattr)->m_ceiling = PTHREAD_MAX_PRIORITY;
return(0);
int ret = 0;
if ((mattr == NULL) || (*mattr == NULL) ||
(protocol < PTHREAD_PRIO_NONE) || (protocol > PTHREAD_PRIO_PROTECT))
ret = EINVAL;
else {
(*mattr)->m_protocol = protocol;
(*mattr)->m_ceiling = THR_MAX_PRIORITY;
}
return(ret);
}

View File

@ -31,14 +31,70 @@
*
* $FreeBSD$
*/
/*
* Copyright (c) 1997 John Birrell <jb@cimlogic.com.au>.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by John Birrell.
* 4. Neither the name of the author nor the names of any co-contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
*/
#include <string.h>
#include <stdlib.h>
#include <errno.h>
#include <pthread.h>
#include "thr_private.h"
__weak_reference(_pthread_mutexattr_init, pthread_mutexattr_init);
__weak_reference(_pthread_mutexattr_setkind_np, pthread_mutexattr_setkind_np);
__weak_reference(_pthread_mutexattr_getkind_np, pthread_mutexattr_getkind_np);
__weak_reference(_pthread_mutexattr_gettype, pthread_mutexattr_gettype);
__weak_reference(_pthread_mutexattr_settype, pthread_mutexattr_settype);
__weak_reference(_pthread_mutexattr_destroy, pthread_mutexattr_destroy);
int
_pthread_mutexattr_init(pthread_mutexattr_t *attr)
{
int ret;
pthread_mutexattr_t pattr;
if ((pattr = (pthread_mutexattr_t)
malloc(sizeof(struct pthread_mutex_attr))) == NULL) {
ret = ENOMEM;
} else {
memcpy(pattr, &_pthread_mutexattr_default,
sizeof(struct pthread_mutex_attr));
*attr = pattr;
ret = 0;
}
return (ret);
}
int
_pthread_mutexattr_setkind_np(pthread_mutexattr_t *attr, int kind)
@ -71,10 +127,9 @@ int
_pthread_mutexattr_settype(pthread_mutexattr_t *attr, int type)
{
int ret;
if (*attr == NULL || type < PTHREAD_MUTEX_ERRORCHECK ||
type >= MUTEX_TYPE_MAX) {
if (attr == NULL || *attr == NULL || type >= MUTEX_TYPE_MAX) {
errno = EINVAL;
ret = EINVAL;
ret = -1;
} else {
(*attr)->m_type = type;
ret = 0;
@ -96,3 +151,17 @@ _pthread_mutexattr_gettype(pthread_mutexattr_t *attr, int *type)
}
return ret;
}
int
_pthread_mutexattr_destroy(pthread_mutexattr_t *attr)
{
int ret;
if (attr == NULL || *attr == NULL) {
ret = EINVAL;
} else {
free(*attr);
*attr = NULL;
ret = 0;
}
return(ret);
}

View File

@ -31,23 +31,68 @@
*
* $FreeBSD$
*/
#include "namespace.h"
#include <pthread.h>
#include "un-namespace.h"
#include "thr_private.h"
__weak_reference(_pthread_once, pthread_once);
int
_pthread_once(pthread_once_t * once_control, void (*init_routine) (void))
#define ONCE_NEVER_DONE PTHREAD_NEEDS_INIT
#define ONCE_DONE PTHREAD_DONE_INIT
#define ONCE_IN_PROGRESS 0x02
#define ONCE_MASK 0x03
static pthread_mutex_t once_lock = PTHREAD_MUTEX_INITIALIZER;
static pthread_cond_t once_cv = PTHREAD_COND_INITIALIZER;
/*
* POSIX:
* The pthread_once() function is not a cancellation point. However,
* if init_routine is a cancellation point and is canceled, the effect
* on once_control shall be as if pthread_once() was never called.
*/
static void
once_cancel_handler(void *arg)
{
if (once_control->state == PTHREAD_NEEDS_INIT) {
if (_thread_initial == NULL)
_thread_init();
pthread_mutex_lock(&(once_control->mutex));
if (once_control->state == PTHREAD_NEEDS_INIT) {
init_routine();
once_control->state = PTHREAD_DONE_INIT;
}
pthread_mutex_unlock(&(once_control->mutex));
pthread_once_t *once_control = arg;
_pthread_mutex_lock(&once_lock);
once_control->state = ONCE_NEVER_DONE;
_pthread_mutex_unlock(&once_lock);
_pthread_cond_broadcast(&once_cv);
}
int
_pthread_once(pthread_once_t *once_control, void (*init_routine) (void))
{
int wakeup = 0;
if (once_control->state == ONCE_DONE)
return (0);
_pthread_mutex_lock(&once_lock);
while (*(volatile int *)&(once_control->state) == ONCE_IN_PROGRESS)
_pthread_cond_wait(&once_cv, &once_lock);
/*
* If previous thread was canceled, then the state still
* could be ONCE_NEVER_DONE, we need to check it again.
*/
if (*(volatile int *)&(once_control->state) == ONCE_NEVER_DONE) {
once_control->state = ONCE_IN_PROGRESS;
_pthread_mutex_unlock(&once_lock);
_pthread_cleanup_push(once_cancel_handler, once_control);
init_routine();
_pthread_cleanup_pop(0);
_pthread_mutex_lock(&once_lock);
once_control->state = ONCE_DONE;
wakeup = 1;
}
_pthread_mutex_unlock(&once_lock);
if (wakeup)
_pthread_cond_broadcast(&once_cv);
return (0);
}

View File

@ -22,15 +22,10 @@
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/types.h>
#include <sys/fcntl.h>
#include <sys/uio.h>
#include <errno.h>
#include <stdarg.h>
#include <string.h>
#include <unistd.h>
@ -57,15 +52,20 @@ _thread_printf(int fd, const char *fmt, ...)
{
static const char digits[16] = "0123456789abcdef";
va_list ap;
char buf[10];
char buf[20];
char *s;
unsigned r, u;
int c, d;
unsigned long r, u;
int c;
long d;
int islong;
va_start(ap, fmt);
while ((c = *fmt++)) {
islong = 0;
if (c == '%') {
c = *fmt++;
next: c = *fmt++;
if (c == '\0')
goto out;
switch (c) {
case 'c':
pchar(fd, va_arg(ap, int));
@ -73,20 +73,31 @@ _thread_printf(int fd, const char *fmt, ...)
case 's':
pstr(fd, va_arg(ap, char *));
continue;
case 'l':
islong = 1;
goto next;
case 'p':
islong = 1;
case 'd':
case 'u':
case 'p':
case 'x':
r = ((c == 'u') || (c == 'd')) ? 10 : 16;
if (c == 'd') {
d = va_arg(ap, unsigned);
if (islong)
d = va_arg(ap, unsigned long);
else
d = va_arg(ap, unsigned);
if (d < 0) {
pchar(fd, '-');
u = (unsigned)(d * -1);
u = (unsigned long)(d * -1);
} else
u = (unsigned)d;
} else
u = va_arg(ap, unsigned);
u = (unsigned long)d;
} else {
if (islong)
u = va_arg(ap, unsigned long);
else
u = va_arg(ap, unsigned);
}
s = buf;
do {
*s++ = digits[u % r];
@ -98,6 +109,7 @@ _thread_printf(int fd, const char *fmt, ...)
}
pchar(fd, c);
}
out:
va_end(ap);
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,133 @@
/*-
* Copyright (c) 2003 David Xu <davidxu@freebsd.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#include <errno.h>
#include <stdlib.h>
#include <pthread.h>
#include "thr_private.h"
#define SPIN_COUNT 100000
__weak_reference(_pthread_spin_init, pthread_spin_init);
__weak_reference(_pthread_spin_destroy, pthread_spin_destroy);
__weak_reference(_pthread_spin_trylock, pthread_spin_trylock);
__weak_reference(_pthread_spin_lock, pthread_spin_lock);
__weak_reference(_pthread_spin_unlock, pthread_spin_unlock);
int
_pthread_spin_init(pthread_spinlock_t *lock, int pshared)
{
struct pthread_spinlock *lck;
int ret;
if (lock == NULL || pshared != PTHREAD_PROCESS_PRIVATE)
ret = EINVAL;
else if ((lck = malloc(sizeof(struct pthread_spinlock))) == NULL)
ret = ENOMEM;
else {
_thr_umtx_init(&lck->s_lock);
*lock = lck;
ret = 0;
}
return (ret);
}
int
_pthread_spin_destroy(pthread_spinlock_t *lock)
{
int ret;
if (lock == NULL || *lock == NULL)
ret = EINVAL;
else {
free(*lock);
*lock = NULL;
ret = 0;
}
return (ret);
}
int
_pthread_spin_trylock(pthread_spinlock_t *lock)
{
struct pthread *curthread = _get_curthread();
struct pthread_spinlock *lck;
int ret;
if (lock == NULL || (lck = *lock) == NULL)
ret = EINVAL;
else
ret = THR_UMTX_TRYLOCK(curthread, &lck->s_lock);
return (ret);
}
int
_pthread_spin_lock(pthread_spinlock_t *lock)
{
struct pthread *curthread = _get_curthread();
struct pthread_spinlock *lck;
int ret, count;
if (lock == NULL || (lck = *lock) == NULL)
ret = EINVAL;
else {
count = SPIN_COUNT;
while ((ret = THR_UMTX_TRYLOCK(curthread, &lck->s_lock)) != 0) {
while (lck->s_lock) {
#ifdef __i386__
/* tell cpu we are spinning */
__asm __volatile("pause");
#endif
if (--count <= 0) {
count = SPIN_COUNT;
_pthread_yield();
}
}
}
ret = 0;
}
return (ret);
}
int
_pthread_spin_unlock(pthread_spinlock_t *lock)
{
struct pthread *curthread = _get_curthread();
struct pthread_spinlock *lck;
int ret;
if (lock == NULL || (lck = *lock) == NULL)
ret = EINVAL;
else {
ret = THR_UMTX_UNLOCK(curthread, &lck->s_lock);
}
return (ret);
}

View File

@ -31,30 +31,31 @@
*
* $FreeBSD$
*/
#include <errno.h>
#include <pthread.h>
#include <stdlib.h>
#include "thr_private.h"
static void resume_common(struct pthread *);
#include "thr_private.h"
__weak_reference(_pthread_resume_np, pthread_resume_np);
__weak_reference(_pthread_resume_all_np, pthread_resume_all_np);
static void resume_common(struct pthread *thread);
/* Resume a thread: */
int
_pthread_resume_np(pthread_t thread)
{
struct pthread *curthread = _get_curthread();
int ret;
/* Find the thread in the list of active threads: */
if ((ret = _find_thread(thread)) == 0) {
PTHREAD_LOCK(thread);
if ((thread->flags & PTHREAD_FLAGS_SUSPENDED) != 0)
resume_common(thread);
PTHREAD_UNLOCK(thread);
/* Add a reference to the thread: */
if ((ret = _thr_ref_add(curthread, thread, /*include dead*/0)) == 0) {
/* Lock the threads scheduling queue: */
THR_THREAD_LOCK(curthread, thread);
resume_common(thread);
THR_THREAD_UNLOCK(curthread, thread);
_thr_ref_delete(curthread, thread);
}
return (ret);
}
@ -62,28 +63,30 @@ _pthread_resume_np(pthread_t thread)
void
_pthread_resume_all_np(void)
{
struct pthread *thread;
struct pthread *curthread = _get_curthread();
struct pthread *thread;
/* Take the thread list lock: */
THREAD_LIST_LOCK(curthread);
_thread_sigblock();
THREAD_LIST_LOCK;
TAILQ_FOREACH(thread, &_thread_list, tle) {
PTHREAD_LOCK(thread);
if ((thread != curthread) &&
((thread->flags & PTHREAD_FLAGS_SUSPENDED) != 0))
if (thread != curthread) {
THR_THREAD_LOCK(curthread, thread);
resume_common(thread);
PTHREAD_UNLOCK(thread);
THR_THREAD_UNLOCK(curthread, thread);
}
}
THREAD_LIST_UNLOCK;
_thread_sigunblock();
/* Release the thread list lock: */
THREAD_LIST_UNLOCK(curthread);
}
/*
* The caller is required to have locked the thread before
* calling this function.
*/
static void
resume_common(struct pthread *thread)
{
thread->flags &= ~PTHREAD_FLAGS_SUSPENDED;
thr_wake(thread->thr_id);
/* Clear the suspend flag: */
thread->flags &= ~THR_FLAGS_NEED_SUSPEND;
thread->cycle++;
_thr_umtx_wake(&thread->cycle, 1);
_thr_send_sig(thread, SIGCANCEL);
}

View File

@ -1,6 +1,5 @@
/*-
* Copyright (c) 1998 Alex Nash
* Copyright (c) 2004 Michael Telahun Makonnen
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -31,180 +30,183 @@
#include <limits.h>
#include <stdlib.h>
#include "namespace.h"
#include <pthread.h>
#include "un-namespace.h"
#include "thr_private.h"
/* maximum number of times a read lock may be obtained */
#define MAX_READ_LOCKS (INT_MAX - 1)
/*
* For distinguishing operations on read and write locks.
*/
enum rwlock_type {RWT_READ, RWT_WRITE};
/* Support for staticaly initialized mutexes. */
static struct umtx init_lock = UMTX_INITIALIZER;
__weak_reference(_pthread_rwlock_destroy, pthread_rwlock_destroy);
__weak_reference(_pthread_rwlock_init, pthread_rwlock_init);
__weak_reference(_pthread_rwlock_rdlock, pthread_rwlock_rdlock);
__weak_reference(_pthread_rwlock_timedrdlock, pthread_rwlock_timedrdlock);
__weak_reference(_pthread_rwlock_timedwrlock, pthread_rwlock_timedwrlock);
__weak_reference(_pthread_rwlock_tryrdlock, pthread_rwlock_tryrdlock);
__weak_reference(_pthread_rwlock_trywrlock, pthread_rwlock_trywrlock);
__weak_reference(_pthread_rwlock_unlock, pthread_rwlock_unlock);
__weak_reference(_pthread_rwlock_wrlock, pthread_rwlock_wrlock);
__weak_reference(_pthread_rwlock_timedwrlock, pthread_rwlock_timedwrlock);
static int insert_rwlock(struct pthread_rwlock *, enum rwlock_type);
static int rwlock_init_static(struct pthread_rwlock **rwlock);
static int rwlock_rdlock_common(pthread_rwlock_t *, int,
const struct timespec *);
static int rwlock_wrlock_common(pthread_rwlock_t *, int,
const struct timespec *);
/*
* Prototypes
*/
static int
rwlock_init(pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr)
{
pthread_rwlock_t prwlock;
int ret;
/* allocate rwlock object */
prwlock = (pthread_rwlock_t)malloc(sizeof(struct pthread_rwlock));
if (prwlock == NULL)
return (ENOMEM);
/* initialize the lock */
if ((ret = _pthread_mutex_init(&prwlock->lock, NULL)) != 0)
free(prwlock);
else {
/* initialize the read condition signal */
ret = _pthread_cond_init(&prwlock->read_signal, NULL);
if (ret != 0) {
_pthread_mutex_destroy(&prwlock->lock);
free(prwlock);
} else {
/* initialize the write condition signal */
ret = _pthread_cond_init(&prwlock->write_signal, NULL);
if (ret != 0) {
_pthread_cond_destroy(&prwlock->read_signal);
_pthread_mutex_destroy(&prwlock->lock);
free(prwlock);
} else {
/* success */
prwlock->state = 0;
prwlock->blocked_writers = 0;
*rwlock = prwlock;
}
}
}
return (ret);
}
int
_pthread_rwlock_destroy (pthread_rwlock_t *rwlock)
{
pthread_rwlock_t prwlock;
int ret;
if (rwlock == NULL || *rwlock == NULL)
return (EINVAL);
if (rwlock == NULL)
ret = EINVAL;
else {
pthread_rwlock_t prwlock;
prwlock = *rwlock;
prwlock = *rwlock;
if (prwlock->state != 0)
return (EBUSY);
_pthread_mutex_destroy(&prwlock->lock);
_pthread_cond_destroy(&prwlock->read_signal);
_pthread_cond_destroy(&prwlock->write_signal);
free(prwlock);
pthread_mutex_destroy(&prwlock->lock);
pthread_cond_destroy(&prwlock->read_signal);
pthread_cond_destroy(&prwlock->write_signal);
free(prwlock);
*rwlock = NULL;
*rwlock = NULL;
ret = 0;
}
return (ret);
}
return (0);
static int
init_static(struct pthread *thread, pthread_rwlock_t *rwlock)
{
int ret;
THR_LOCK_ACQUIRE(thread, &_rwlock_static_lock);
if (*rwlock == NULL)
ret = rwlock_init(rwlock, NULL);
else
ret = 0;
THR_LOCK_RELEASE(thread, &_rwlock_static_lock);
return (ret);
}
int
_pthread_rwlock_init (pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr)
{
pthread_rwlock_t prwlock;
int ret;
/* allocate rwlock object */
prwlock = (pthread_rwlock_t)malloc(sizeof(struct pthread_rwlock));
if (prwlock == NULL) {
ret = ENOMEM;
goto out;
}
/* initialize the lock */
if ((ret = pthread_mutex_init(&prwlock->lock, NULL)) != 0)
goto out;
/* initialize the read condition signal */
if ((ret = pthread_cond_init(&prwlock->read_signal, NULL)) != 0)
goto out_readcond;
/* initialize the write condition signal */
if ((ret = pthread_cond_init(&prwlock->write_signal, NULL)) != 0)
goto out_writecond;
/* success */
prwlock->state = 0;
prwlock->blocked_writers = 0;
*rwlock = prwlock;
return (0);
out_writecond:
pthread_cond_destroy(&prwlock->read_signal);
out_readcond:
pthread_mutex_destroy(&prwlock->lock);
out:
if (prwlock != NULL)
free(prwlock);
return(ret);
*rwlock = NULL;
return (rwlock_init(rwlock, attr));
}
/*
* If nonblocking is 0 this function will wait on the lock. If
* it is greater than 0 it will return immediately with EBUSY.
*/
static int
rwlock_rdlock_common(pthread_rwlock_t *rwlock, int nonblocking,
const struct timespec *timeout)
rwlock_rdlock_common (pthread_rwlock_t *rwlock, const struct timespec *abstime)
{
struct rwlock_held *rh;
pthread_rwlock_t prwlock;
int ret;
struct pthread *curthread = _get_curthread();
pthread_rwlock_t prwlock;
int ret;
rh = NULL;
if (rwlock == NULL)
return(EINVAL);
/*
* Check for validity of the timeout parameter.
*/
if (timeout != NULL &&
(timeout->tv_nsec < 0 || timeout->tv_nsec >= 1000000000))
return (EINVAL);
if ((ret = rwlock_init_static(rwlock)) !=0 )
return (ret);
prwlock = *rwlock;
/* check for static initialization */
if (prwlock == NULL) {
if ((ret = init_static(curthread, rwlock)) != 0)
return (ret);
prwlock = *rwlock;
}
/* grab the monitor lock */
if ((ret = pthread_mutex_lock(&prwlock->lock)) != 0)
return(ret);
if ((ret = _pthread_mutex_lock(&prwlock->lock)) != 0)
return (ret);
/* check lock count */
if (prwlock->state == MAX_READ_LOCKS) {
pthread_mutex_unlock(&prwlock->lock);
_pthread_mutex_unlock(&prwlock->lock);
return (EAGAIN);
}
/* give writers priority over readers */
while (prwlock->blocked_writers || prwlock->state < 0) {
if (nonblocking) {
pthread_mutex_unlock(&prwlock->lock);
return (EBUSY);
}
curthread = _get_curthread();
if ((curthread->rdlock_count > 0) && (prwlock->state > 0)) {
/*
* If this lock is already held for writing we have
* a deadlock situation.
* To avoid having to track all the rdlocks held by
* a thread or all of the threads that hold a rdlock,
* we keep a simple count of all the rdlocks held by
* a thread. If a thread holds any rdlocks it is
* possible that it is attempting to take a recursive
* rdlock. If there are blocked writers and precedence
* is given to them, then that would result in the thread
* deadlocking. So allowing a thread to take the rdlock
* when it already has one or more rdlocks avoids the
* deadlock. I hope the reader can follow that logic ;-)
*/
if (curthread->rwlockList != NULL && prwlock->state < 0) {
LIST_FOREACH(rh, curthread->rwlockList, rh_link) {
if (rh->rh_rwlock == prwlock &&
rh->rh_wrcount > 0) {
pthread_mutex_unlock(&prwlock->lock);
return (EDEADLK);
}
; /* nothing needed */
} else {
/* give writers priority over readers */
while (prwlock->blocked_writers || prwlock->state < 0) {
if (abstime)
ret = _pthread_cond_timedwait
(&prwlock->read_signal,
&prwlock->lock, abstime);
else
ret = _pthread_cond_wait(&prwlock->read_signal,
&prwlock->lock);
if (ret != 0) {
/* can't do a whole lot if this fails */
_pthread_mutex_unlock(&prwlock->lock);
return (ret);
}
}
if (timeout == NULL)
ret = pthread_cond_wait(&prwlock->read_signal,
&prwlock->lock);
else
ret = pthread_cond_timedwait(&prwlock->read_signal,
&prwlock->lock, timeout);
if (ret != 0 && ret != EINTR) {
/* can't do a whole lot if this fails */
pthread_mutex_unlock(&prwlock->lock);
return(ret);
}
}
++prwlock->state; /* indicate we are locked for reading */
ret = insert_rwlock(prwlock, RWT_READ);
if (ret != 0) {
pthread_mutex_unlock(&prwlock->lock);
return (ret);
}
curthread->rdlock_count++;
prwlock->state++; /* indicate we are locked for reading */
/*
* Something is really wrong if this call fails. Returning
@ -212,262 +214,207 @@ rwlock_rdlock_common(pthread_rwlock_t *rwlock, int nonblocking,
* lock. Decrementing 'state' is no good because we probably
* don't have the monitor lock.
*/
pthread_mutex_unlock(&prwlock->lock);
_pthread_mutex_unlock(&prwlock->lock);
return(0);
return (ret);
}
int
_pthread_rwlock_rdlock (pthread_rwlock_t *rwlock)
{
return (rwlock_rdlock_common(rwlock, 0, NULL));
return (rwlock_rdlock_common(rwlock, NULL));
}
int
_pthread_rwlock_timedrdlock(pthread_rwlock_t *rwlock,
const struct timespec *timeout)
_pthread_rwlock_timedrdlock (pthread_rwlock_t *rwlock,
const struct timespec *abstime)
{
return (rwlock_rdlock_common(rwlock, 0, timeout));
return (rwlock_rdlock_common(rwlock, abstime));
}
int
_pthread_rwlock_tryrdlock (pthread_rwlock_t *rwlock)
{
return (rwlock_rdlock_common(rwlock, 1, NULL));
}
struct pthread *curthread = _get_curthread();
pthread_rwlock_t prwlock;
int ret;
int
_pthread_rwlock_unlock (pthread_rwlock_t *rwlock)
{
struct rwlock_held *rh;
pthread_rwlock_t prwlock;
int ret;
rh = NULL;
if (rwlock == NULL || *rwlock == NULL)
return(EINVAL);
if (rwlock == NULL)
return (EINVAL);
prwlock = *rwlock;
/* check for static initialization */
if (prwlock == NULL) {
if ((ret = init_static(curthread, rwlock)) != 0)
return (ret);
prwlock = *rwlock;
}
/* grab the monitor lock */
if ((ret = pthread_mutex_lock(&prwlock->lock)) != 0)
return(ret);
if ((ret = _pthread_mutex_lock(&prwlock->lock)) != 0)
return (ret);
if (curthread->rwlockList != NULL) {
LIST_FOREACH(rh, curthread->rwlockList, rh_link) {
if (rh->rh_rwlock == prwlock)
break;
}
curthread = _get_curthread();
if (prwlock->state == MAX_READ_LOCKS)
ret = EAGAIN;
else if ((curthread->rdlock_count > 0) && (prwlock->state > 0)) {
/* see comment for pthread_rwlock_rdlock() */
curthread->rdlock_count++;
prwlock->state++;
}
if (rh == NULL) {
ret = EPERM;
goto out;
}
if (prwlock->state > 0) {
PTHREAD_ASSERT(rh->rh_wrcount == 0,
"write count on a readlock should be zero!");
rh->rh_rdcount--;
if (--prwlock->state == 0 && prwlock->blocked_writers)
ret = pthread_cond_signal(&prwlock->write_signal);
} else if (prwlock->state < 0) {
PTHREAD_ASSERT(rh->rh_rdcount == 0,
"read count on a writelock should be zero!");
rh->rh_wrcount--;
prwlock->state = 0;
if (prwlock->blocked_writers)
ret = pthread_cond_signal(&prwlock->write_signal);
else
ret = pthread_cond_broadcast(&prwlock->read_signal);
} else {
/*
* No thread holds this lock. We should never get here.
*/
PTHREAD_ASSERT(0, "state=0 on read-write lock held by thread");
ret = EPERM;
goto out;
}
if (rh->rh_wrcount == 0 && rh->rh_rdcount == 0) {
LIST_REMOVE(rh, rh_link);
free(rh);
/* give writers priority over readers */
else if (prwlock->blocked_writers || prwlock->state < 0)
ret = EBUSY;
else {
curthread->rdlock_count++;
prwlock->state++; /* indicate we are locked for reading */
}
out:
/* see the comment on this in rwlock_rdlock_common */
pthread_mutex_unlock(&prwlock->lock);
/* see the comment on this in pthread_rwlock_rdlock */
_pthread_mutex_unlock(&prwlock->lock);
return(ret);
}
int
_pthread_rwlock_wrlock (pthread_rwlock_t *rwlock)
{
return (rwlock_wrlock_common(rwlock, 0, NULL));
}
int
_pthread_rwlock_timedwrlock (pthread_rwlock_t *rwlock,
const struct timespec *timeout)
{
return (rwlock_wrlock_common(rwlock, 0, timeout));
return (ret);
}
int
_pthread_rwlock_trywrlock (pthread_rwlock_t *rwlock)
{
return (rwlock_wrlock_common(rwlock, 1, NULL));
}
struct pthread *curthread = _get_curthread();
pthread_rwlock_t prwlock;
int ret;
/*
* If nonblocking is 0 this function will wait on the lock. If
* it is greater than 0 it will return immediately with EBUSY.
*/
static int
rwlock_wrlock_common(pthread_rwlock_t *rwlock, int nonblocking,
const struct timespec *timeout)
{
struct rwlock_held *rh;
pthread_rwlock_t prwlock;
int ret;
rh = NULL;
if (rwlock == NULL)
return(EINVAL);
/*
* Check the timeout value for validity.
*/
if (timeout != NULL &&
(timeout->tv_nsec < 0 || timeout->tv_nsec >= 1000000000))
return (EINVAL);
if ((ret = rwlock_init_static(rwlock)) !=0 )
return (ret);
prwlock = *rwlock;
/* check for static initialization */
if (prwlock == NULL) {
if ((ret = init_static(curthread, rwlock)) != 0)
return (ret);
prwlock = *rwlock;
}
/* grab the monitor lock */
if ((ret = pthread_mutex_lock(&prwlock->lock)) != 0)
return(ret);
if ((ret = _pthread_mutex_lock(&prwlock->lock)) != 0)
return (ret);
if (prwlock->state != 0)
ret = EBUSY;
else
/* indicate we are locked for writing */
prwlock->state = -1;
/* see the comment on this in pthread_rwlock_rdlock */
_pthread_mutex_unlock(&prwlock->lock);
return (ret);
}
int
_pthread_rwlock_unlock (pthread_rwlock_t *rwlock)
{
struct pthread *curthread;
pthread_rwlock_t prwlock;
int ret;
if (rwlock == NULL)
return (EINVAL);
prwlock = *rwlock;
if (prwlock == NULL)
return (EINVAL);
/* grab the monitor lock */
if ((ret = _pthread_mutex_lock(&prwlock->lock)) != 0)
return (ret);
curthread = _get_curthread();
if (prwlock->state > 0) {
curthread->rdlock_count--;
prwlock->state--;
if (prwlock->state == 0 && prwlock->blocked_writers)
ret = _pthread_cond_signal(&prwlock->write_signal);
} else if (prwlock->state < 0) {
prwlock->state = 0;
if (prwlock->blocked_writers)
ret = _pthread_cond_signal(&prwlock->write_signal);
else
ret = _pthread_cond_broadcast(&prwlock->read_signal);
} else
ret = EINVAL;
/* see the comment on this in pthread_rwlock_rdlock */
_pthread_mutex_unlock(&prwlock->lock);
return (ret);
}
static int
rwlock_wrlock_common (pthread_rwlock_t *rwlock, const struct timespec *abstime)
{
struct pthread *curthread = _get_curthread();
pthread_rwlock_t prwlock;
int ret;
if (rwlock == NULL)
return (EINVAL);
prwlock = *rwlock;
/* check for static initialization */
if (prwlock == NULL) {
if ((ret = init_static(curthread, rwlock)) != 0)
return (ret);
prwlock = *rwlock;
}
/* grab the monitor lock */
if ((ret = _pthread_mutex_lock(&prwlock->lock)) != 0)
return (ret);
while (prwlock->state != 0) {
if (nonblocking) {
pthread_mutex_unlock(&prwlock->lock);
return (EBUSY);
}
prwlock->blocked_writers++;
/*
* If this thread already holds the lock for reading
* or writing we have a deadlock situation.
*/
if (curthread->rwlockList != NULL) {
LIST_FOREACH(rh, curthread->rwlockList, rh_link) {
if (rh->rh_rwlock == prwlock) {
PTHREAD_ASSERT((rh->rh_rdcount > 0 ||
rh->rh_wrcount > 0),
"Invalid 0 R/RW count!");
pthread_mutex_unlock(&prwlock->lock);
return (EDEADLK);
break;
}
}
}
++prwlock->blocked_writers;
if (timeout == NULL)
ret = pthread_cond_wait(&prwlock->write_signal,
&prwlock->lock);
if (abstime != NULL)
ret = _pthread_cond_timedwait(&prwlock->write_signal,
&prwlock->lock, abstime);
else
ret = pthread_cond_timedwait(&prwlock->write_signal,
&prwlock->lock, timeout);
if (ret != 0 && ret != EINTR) {
--prwlock->blocked_writers;
pthread_mutex_unlock(&prwlock->lock);
return(ret);
ret = _pthread_cond_wait(&prwlock->write_signal,
&prwlock->lock);
if (ret != 0) {
prwlock->blocked_writers--;
_pthread_mutex_unlock(&prwlock->lock);
return (ret);
}
--prwlock->blocked_writers;
prwlock->blocked_writers--;
}
/* indicate we are locked for writing */
prwlock->state = -1;
ret = insert_rwlock(prwlock, RWT_WRITE);
if (ret != 0) {
pthread_mutex_unlock(&prwlock->lock);
return (ret);
}
/* see the comment on this in pthread_rwlock_rdlock */
pthread_mutex_unlock(&prwlock->lock);
_pthread_mutex_unlock(&prwlock->lock);
return(0);
return (ret);
}
static int
insert_rwlock(struct pthread_rwlock *prwlock, enum rwlock_type rwt)
int
_pthread_rwlock_wrlock (pthread_rwlock_t *rwlock)
{
struct rwlock_held *rh;
/*
* Initialize the rwlock list in the thread. Although this function
* may be called for many read-write locks, the initialization
* of the the head happens only once during the lifetime of
* the thread.
*/
if (curthread->rwlockList == NULL) {
curthread->rwlockList =
(struct rwlock_listhead *)malloc(sizeof(struct rwlock_listhead));
if (curthread->rwlockList == NULL) {
return (ENOMEM);
}
LIST_INIT(curthread->rwlockList);
}
LIST_FOREACH(rh, curthread->rwlockList, rh_link) {
if (rh->rh_rwlock == prwlock) {
if (rwt == RWT_READ)
rh->rh_rdcount++;
else if (rwt == RWT_WRITE)
rh->rh_wrcount++;
return (0);
}
}
/*
* This is the first time we're holding this lock,
* create a new entry.
*/
rh = (struct rwlock_held *)malloc(sizeof(struct rwlock_held));
if (rh == NULL)
return (ENOMEM);
rh->rh_rwlock = prwlock;
rh->rh_rdcount = 0;
rh->rh_wrcount = 0;
if (rwt == RWT_READ)
rh->rh_rdcount = 1;
else if (rwt == RWT_WRITE)
rh->rh_wrcount = 1;
LIST_INSERT_HEAD(curthread->rwlockList, rh, rh_link);
return (0);
return (rwlock_wrlock_common (rwlock, NULL));
}
/*
* There are consumers of rwlocks, inluding our own libc, that depend on
* a PTHREAD_RWLOCK_INITIALIZER to do for rwlocks what
* a similarly named symbol does for statically initialized mutexes.
* This symbol was dropped in The Open Group Base Specifications Issue 6
* and does not exist in IEEE Std 1003.1, 2003, but it should still be
* supported for backwards compatibility.
*/
static int
rwlock_init_static(struct pthread_rwlock **rwlock)
int
_pthread_rwlock_timedwrlock (pthread_rwlock_t *rwlock,
const struct timespec *abstime)
{
int error;
error = 0;
UMTX_LOCK(&init_lock);
if (*rwlock == PTHREAD_RWLOCK_INITIALIZER)
error = _pthread_rwlock_init(rwlock, NULL);
UMTX_UNLOCK(&init_lock);
return (error);
return (rwlock_wrlock_common (rwlock, abstime));
}

View File

@ -31,7 +31,9 @@
*
* $FreeBSD$
*/
#include <pthread.h>
#include "thr_private.h"
__weak_reference(_pthread_self, pthread_self);
@ -39,6 +41,8 @@ __weak_reference(_pthread_self, pthread_self);
pthread_t
_pthread_self(void)
{
_thr_check_init();
/* Return the running thread pointer: */
return (curthread);
return (_get_curthread());
}

View File

@ -1,4 +1,5 @@
/*
* Copyright (C) 2005 David Xu <davidxu@freebsd.org>.
* Copyright (C) 2000 Jason Evans <jasone@freebsd.org>.
* All rights reserved.
*
@ -29,227 +30,240 @@
* $FreeBSD$
*/
#include <stdlib.h>
#include "namespace.h"
#include <sys/queue.h>
#include <errno.h>
#include <semaphore.h>
#include <fcntl.h>
#include <pthread.h>
#include <semaphore.h>
#include <stdlib.h>
#include <time.h>
#include <_semaphore.h>
#include "un-namespace.h"
#include "thr_private.h"
#define _SEM_CHECK_VALIDITY(sem) \
if ((*(sem))->magic != SEM_MAGIC) { \
errno = EINVAL; \
retval = -1; \
goto RETURN; \
}
__weak_reference(_sem_init, sem_init);
__weak_reference(_sem_destroy, sem_destroy);
__weak_reference(_sem_open, sem_open);
__weak_reference(_sem_close, sem_close);
__weak_reference(_sem_unlink, sem_unlink);
__weak_reference(_sem_wait, sem_wait);
__weak_reference(_sem_trywait, sem_trywait);
__weak_reference(_sem_post, sem_post);
__weak_reference(_sem_getvalue, sem_getvalue);
__weak_reference(_sem_trywait, sem_trywait);
__weak_reference(_sem_wait, sem_wait);
__weak_reference(_sem_timedwait, sem_timedwait);
__weak_reference(_sem_post, sem_post);
static inline int
sem_check_validity(sem_t *sem)
{
if ((sem != NULL) && ((*sem)->magic == SEM_MAGIC))
return (0);
else {
errno = EINVAL;
return (-1);
}
}
static sem_t
sem_alloc(unsigned int value, semid_t semid, int system_sem)
{
sem_t sem;
if (value > SEM_VALUE_MAX) {
errno = EINVAL;
return (NULL);
}
sem = (sem_t)malloc(sizeof(struct sem));
if (sem == NULL) {
errno = ENOSPC;
return (NULL);
}
_thr_umtx_init((umtx_t *)&sem->lock);
/*
* Fortunatly count and nwaiters are adjacency, so we can
* use umtx_wait to wait on it, umtx_wait needs an address
* can be accessed as a long interger.
*/
sem->count = (u_int32_t)value;
sem->nwaiters = 0;
sem->magic = SEM_MAGIC;
sem->semid = semid;
sem->syssem = system_sem;
return (sem);
}
int
_sem_init(sem_t *sem, int pshared, unsigned int value)
{
int retval;
semid_t semid;
/*
* Range check the arguments.
*/
if (pshared != 0) {
/*
* The user wants a semaphore that can be shared among
* processes, which this implementation can't do. Sounds like a
* permissions problem to me (yeah right).
*/
errno = EPERM;
retval = -1;
goto RETURN;
semid = (semid_t)SEM_USER;
if ((pshared != 0) && (ksem_init(&semid, value) != 0))
return (-1);
(*sem) = sem_alloc(value, semid, pshared);
if ((*sem) == NULL) {
if (pshared != 0)
ksem_destroy(semid);
return (-1);
}
if (value > SEM_VALUE_MAX) {
errno = EINVAL;
retval = -1;
goto RETURN;
}
*sem = (sem_t)malloc(sizeof(struct sem));
if (*sem == NULL) {
errno = ENOSPC;
retval = -1;
goto RETURN;
}
/*
* Initialize the semaphore.
*/
if (pthread_mutex_init(&(*sem)->lock, NULL) != 0) {
free(*sem);
errno = ENOSPC;
retval = -1;
goto RETURN;
}
if (pthread_cond_init(&(*sem)->gtzero, NULL) != 0) {
pthread_mutex_destroy(&(*sem)->lock);
free(*sem);
errno = ENOSPC;
retval = -1;
goto RETURN;
}
(*sem)->count = (u_int32_t)value;
(*sem)->nwaiters = 0;
(*sem)->magic = SEM_MAGIC;
retval = 0;
RETURN:
return retval;
return (0);
}
int
_sem_destroy(sem_t *sem)
{
int retval;
_SEM_CHECK_VALIDITY(sem);
int retval;
/* Make sure there are no waiters. */
pthread_mutex_lock(&(*sem)->lock);
if ((*sem)->nwaiters > 0) {
pthread_mutex_unlock(&(*sem)->lock);
errno = EBUSY;
retval = -1;
goto RETURN;
if (sem_check_validity(sem) != 0)
return (-1);
/*
* If this is a system semaphore let the kernel track it otherwise
* make sure there are no waiters.
*/
if ((*sem)->syssem != 0)
retval = ksem_destroy((*sem)->semid);
else {
retval = 0;
(*sem)->magic = 0;
}
pthread_mutex_unlock(&(*sem)->lock);
pthread_mutex_destroy(&(*sem)->lock);
pthread_cond_destroy(&(*sem)->gtzero);
(*sem)->magic = 0;
free(*sem);
retval = 0;
RETURN:
return retval;
}
sem_t *
_sem_open(const char *name, int oflag, ...)
{
errno = ENOSYS;
return SEM_FAILED;
if (retval == 0)
free(*sem);
return (retval);
}
int
_sem_close(sem_t *sem)
_sem_getvalue(sem_t * __restrict sem, int * __restrict sval)
{
errno = ENOSYS;
return -1;
}
int retval;
int
_sem_unlink(const char *name)
{
errno = ENOSYS;
return -1;
}
if (sem_check_validity(sem) != 0)
return (-1);
int
_sem_wait(sem_t *sem)
{
int retval;
_thread_enter_cancellation_point();
_SEM_CHECK_VALIDITY(sem);
pthread_mutex_lock(&(*sem)->lock);
while ((*sem)->count == 0) {
(*sem)->nwaiters++;
pthread_cond_wait(&(*sem)->gtzero, &(*sem)->lock);
(*sem)->nwaiters--;
if ((*sem)->syssem != 0)
retval = ksem_getvalue((*sem)->semid, sval);
else {
*sval = (int)(*sem)->count;
retval = 0;
}
(*sem)->count--;
pthread_mutex_unlock(&(*sem)->lock);
retval = 0;
RETURN:
_thread_leave_cancellation_point();
return retval;
return (retval);
}
int
_sem_trywait(sem_t *sem)
{
int retval;
int val;
_SEM_CHECK_VALIDITY(sem);
if (sem_check_validity(sem) != 0)
return (-1);
pthread_mutex_lock(&(*sem)->lock);
if ((*sem)->syssem != 0)
return (ksem_trywait((*sem)->semid));
if ((*sem)->count > 0) {
(*sem)->count--;
retval = 0;
} else {
errno = EAGAIN;
retval = -1;
while ((val = (*sem)->count) > 0) {
if (atomic_cmpset_acq_int(&(*sem)->count, val, val - 1))
return (0);
}
pthread_mutex_unlock(&(*sem)->lock);
errno = EAGAIN;
return (-1);
}
RETURN:
return retval;
int
_sem_wait(sem_t *sem)
{
struct pthread *curthread;
int val, oldcancel, retval;
if (sem_check_validity(sem) != 0)
return (-1);
curthread = _get_curthread();
if ((*sem)->syssem != 0) {
oldcancel = _thr_cancel_enter(curthread);
retval = ksem_wait((*sem)->semid);
_thr_cancel_leave(curthread, oldcancel);
return (retval);
}
_pthread_testcancel();
do {
while ((val = (*sem)->count) > 0) {
if (atomic_cmpset_acq_int(&(*sem)->count, val, val - 1))
return (0);
}
oldcancel = _thr_cancel_enter(curthread);
retval = _thr_umtx_wait((umtx_t *)&(*sem)->count, 0, NULL);
_thr_cancel_leave(curthread, oldcancel);
} while (retval == 0);
errno = retval;
return (-1);
}
int
_sem_timedwait(sem_t * __restrict sem, struct timespec * __restrict abstime)
{
struct timespec ts, ts2;
struct pthread *curthread;
int val, oldcancel, retval;
if (sem_check_validity(sem) != 0)
return (-1);
curthread = _get_curthread();
if ((*sem)->syssem != 0) {
oldcancel = _thr_cancel_enter(curthread);
retval = ksem_timedwait((*sem)->semid, abstime);
_thr_cancel_leave(curthread, oldcancel);
return (retval);
}
/*
* The timeout argument is only supposed to
* be checked if the thread would have blocked.
*/
_pthread_testcancel();
do {
while ((val = (*sem)->count) > 0) {
if (atomic_cmpset_acq_int(&(*sem)->count, val, val - 1))
return (0);
}
if (abstime == NULL) {
errno = EINVAL;
return (-1);
}
clock_gettime(CLOCK_REALTIME, &ts);
TIMESPEC_SUB(&ts2, abstime, &ts);
oldcancel = _thr_cancel_enter(curthread);
retval = _thr_umtx_wait((umtx_t *)&(*sem)->count, 0, &ts2);
_thr_cancel_leave(curthread, oldcancel);
} while (retval == 0);
errno = retval;
return (-1);
}
int
_sem_post(sem_t *sem)
{
int retval;
int val, retval;
if (sem_check_validity(sem) != 0)
return (-1);
_SEM_CHECK_VALIDITY(sem);
if ((*sem)->syssem != 0)
return (ksem_post((*sem)->semid));
/*
* sem_post() is required to be safe to call from within signal
* handlers. Thus, we must defer signals.
* sem_post() is required to be safe to call from within
* signal handlers, these code should work as that.
*/
pthread_mutex_lock(&(*sem)->lock);
/* GIANT_LOCK(curthread); */
(*sem)->count++;
if ((*sem)->nwaiters > 0)
pthread_cond_signal(&(*sem)->gtzero);
/* GIANT_UNLOCK(curthread); */
pthread_mutex_unlock(&(*sem)->lock);
retval = 0;
RETURN:
return retval;
}
int
_sem_getvalue(sem_t *sem, int *sval)
{
int retval;
_SEM_CHECK_VALIDITY(sem);
pthread_mutex_lock(&(*sem)->lock);
*sval = (int)(*sem)->count;
pthread_mutex_unlock(&(*sem)->lock);
retval = 0;
RETURN:
return retval;
do {
val = (*sem)->count;
} while (!atomic_cmpset_acq_int(&(*sem)->count, val, val + 1));
retval = _thr_umtx_wake((umtx_t *)&(*sem)->count, val + 1);
if (retval > 0)
retval = 0;
return (retval);
}

View File

@ -31,23 +31,23 @@
*
* $FreeBSD$
*/
#include <pthread.h>
#include "thr_private.h"
/*
* This function needs to reference the global error variable which is
* normally hidden from the user.
*/
#ifdef errno
#undef errno
#endif
extern int errno;
void
_thread_seterrno(pthread_t thread, int error)
{
/* Check for the initial thread: */
if (thread == _thread_initial)
if (thread == NULL || thread == _thr_initial)
/* The initial thread always uses the global error variable: */
errno = error;
else

View File

@ -31,93 +31,106 @@
*
* $FreeBSD$
*/
#include <errno.h>
#include <sys/param.h>
#include <pthread.h>
#include <stdlib.h>
#include "thr_private.h"
__weak_reference(_pthread_getschedparam, pthread_getschedparam);
__weak_reference(_pthread_setschedparam, pthread_setschedparam);
int
_pthread_getschedparam(pthread_t pthread, int *policy,
struct sched_param *param)
{
if (param == NULL || policy == NULL)
return (EINVAL);
if (_find_thread(pthread) == ESRCH)
return (ESRCH);
param->sched_priority = pthread->base_priority;
*policy = pthread->attr.sched_policy;
return(0);
}
int
_pthread_setschedparam(pthread_t pthread, int policy,
const struct sched_param *param)
{
struct pthread_mutex *mtx;
int old_prio;
struct pthread *curthread = _get_curthread();
int in_syncq;
int in_readyq = 0;
int old_prio;
int ret = 0;
mtx = NULL;
old_prio = 0;
if ((param == NULL) || (policy < SCHED_FIFO) || (policy > SCHED_RR))
return (EINVAL);
if ((param->sched_priority < PTHREAD_MIN_PRIORITY) ||
(param->sched_priority > PTHREAD_MAX_PRIORITY))
return (ENOTSUP);
if (_find_thread(pthread) != 0)
return (ESRCH);
if ((param == NULL) || (policy < SCHED_FIFO) || (policy > SCHED_RR)) {
/* Return an invalid argument error: */
ret = EINVAL;
} else if ((param->sched_priority < THR_MIN_PRIORITY) ||
(param->sched_priority > THR_MAX_PRIORITY)) {
/* Return an unsupported value error. */
ret = ENOTSUP;
/*
* If the pthread is waiting on a mutex grab it now. Doing it now
* even though we do not need it immediately greatly simplifies the
* LOR avoidance code.
*/
do {
PTHREAD_LOCK(pthread);
if ((pthread->flags & PTHREAD_FLAGS_IN_MUTEXQ) != 0) {
mtx = pthread->data.mutex;
if (_spintrylock(&mtx->lock) == EBUSY)
PTHREAD_UNLOCK(pthread);
else
break;
} else {
mtx = NULL;
break;
}
} while (1);
PTHREAD_ASSERT(pthread->active_priority >= pthread->inherited_priority,
"active priority cannot be less than inherited priority");
old_prio = pthread->base_priority;
pthread->base_priority = param->sched_priority;
if (param->sched_priority <= pthread->active_priority) {
/* Find the thread in the list of active threads: */
} else if ((ret = _thr_ref_add(curthread, pthread, /*include dead*/0))
== 0) {
/*
* Active priority is affected only if it was the
* base priority and the new base priority is lower.
* Lock the threads scheduling queue while we change
* its priority:
*/
if (pthread->active_priority == old_prio &&
pthread->active_priority != pthread->inherited_priority) {
pthread->active_priority = param->sched_priority;
readjust_priorities(pthread, mtx);
THR_THREAD_LOCK(curthread, pthread);
if (pthread->state == PS_DEAD) {
THR_THREAD_UNLOCK(curthread, pthread);
_thr_ref_delete(curthread, pthread);
return (ESRCH);
}
in_syncq = pthread->sflags & THR_FLAGS_IN_SYNCQ;
} else {
/*
* New base priority is greater than active priority. This
* only affects threads that are holding priority inheritance
* mutexes this thread is waiting on and its position in the
* queue.
*/
pthread->active_priority = param->sched_priority;
readjust_priorities(pthread, mtx);
/* Set the scheduling policy: */
pthread->attr.sched_policy = policy;
if (param->sched_priority ==
THR_BASE_PRIORITY(pthread->base_priority))
/*
* There is nothing to do; unlock the threads
* scheduling queue.
*/
THR_THREAD_UNLOCK(curthread, pthread);
else {
/*
* Remove the thread from its current priority
* queue before any adjustments are made to its
* active priority:
*/
old_prio = pthread->active_priority;
/* if ((pthread->flags & THR_FLAGS_IN_RUNQ) != 0) */ {
in_readyq = 1;
/* THR_RUNQ_REMOVE(pthread); */
}
/* Set the thread base priority: */
pthread->base_priority &=
(THR_SIGNAL_PRIORITY | THR_RT_PRIORITY);
pthread->base_priority = param->sched_priority;
/* Recalculate the active priority: */
pthread->active_priority = MAX(pthread->base_priority,
pthread->inherited_priority);
if (in_readyq) {
if ((pthread->priority_mutex_count > 0) &&
(old_prio > pthread->active_priority)) {
/*
* POSIX states that if the priority is
* being lowered, the thread must be
* inserted at the head of the queue for
* its priority if it owns any priority
* protection or inheritence mutexes.
*/
/* THR_RUNQ_INSERT_HEAD(pthread); */
}
else
/* THR_RUNQ_INSERT_TAIL(pthread)*/ ;
}
/* Unlock the threads scheduling queue: */
THR_THREAD_UNLOCK(curthread, pthread);
/*
* Check for any mutex priority adjustments. This
* includes checking for a priority mutex on which
* this thread is waiting.
*/
_mutex_notify_priochange(curthread, pthread, in_syncq);
}
_thr_ref_delete(curthread, pthread);
}
pthread->attr.sched_policy = policy;
PTHREAD_UNLOCK(pthread);
if (mtx != NULL)
_SPINUNLOCK(&mtx->lock);
return(0);
return (ret);
}

View File

@ -1,28 +1,33 @@
/*
* Copyright (c) 2003 Jeffrey Roberson <jeff@freebsd.org>
* Copyright (c) 2003 Jonathan Mini <mini@freebsd.org>
* Copyright (c) 1995-1998 John Birrell <jb@cimlogic.com.au>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice unmodified, this list of conditions, and the following
* disclaimer.
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by John Birrell.
* 4. Neither the name of the author nor the names of any co-contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
@ -31,12 +36,11 @@
#include <sys/types.h>
#include <sys/signalvar.h>
#include <signal.h>
#include <errno.h>
#include <fcntl.h>
#include <unistd.h>
#include <setjmp.h>
#include <errno.h>
#include <string.h>
#include <pthread.h>
#include <stdlib.h>
#include "thr_private.h"
@ -47,49 +51,195 @@
#define DBG_MSG(x...)
#endif
static void
sigcancel_handler(int sig, siginfo_t *info, ucontext_t *ucp)
{
struct pthread *curthread = _get_curthread();
if (curthread->cancelflags & THR_CANCEL_AT_POINT)
pthread_testcancel();
if (curthread->flags & THR_FLAGS_NEED_SUSPEND) {
__sys_sigprocmask(SIG_SETMASK, &ucp->uc_sigmask, NULL);
_thr_suspend_check(curthread);
}
}
void
_thr_suspend_check(struct pthread *curthread)
{
long cycle;
/* Async suspend. */
_thr_signal_block(curthread);
THR_LOCK(curthread);
if ((curthread->flags & (THR_FLAGS_NEED_SUSPEND | THR_FLAGS_SUSPENDED))
== THR_FLAGS_NEED_SUSPEND) {
curthread->flags |= THR_FLAGS_SUSPENDED;
while (curthread->flags & THR_FLAGS_NEED_SUSPEND) {
cycle = curthread->cycle;
THR_UNLOCK(curthread);
_thr_signal_unblock(curthread);
_thr_umtx_wait(&curthread->cycle, cycle, NULL);
_thr_signal_block(curthread);
THR_LOCK(curthread);
}
curthread->flags &= ~THR_FLAGS_SUSPENDED;
}
THR_UNLOCK(curthread);
_thr_signal_unblock(curthread);
}
void
_thr_signal_init(void)
{
struct sigaction act;
/* Install cancel handler. */
SIGEMPTYSET(act.sa_mask);
act.sa_flags = SA_SIGINFO | SA_RESTART;
act.sa_sigaction = (__siginfohandler_t *)&sigcancel_handler;
__sys_sigaction(SIGCANCEL, &act, NULL);
}
void
_thr_signal_deinit(void)
{
}
__weak_reference(_sigaction, sigaction);
int
_sigaction(int sig, const struct sigaction * act, struct sigaction * oact)
{
/* Check if the signal number is out of range: */
if (sig < 1 || sig > _SIG_MAXSIG || sig == SIGCANCEL) {
/* Return an invalid argument: */
errno = EINVAL;
return (-1);
}
return __sys_sigaction(sig, act, oact);
}
__weak_reference(_sigprocmask, sigprocmask);
int
_sigprocmask(int how, const sigset_t *set, sigset_t *oset)
{
const sigset_t *p = set;
sigset_t newset;
if (how != SIG_UNBLOCK) {
if (set != NULL) {
newset = *set;
SIGDELSET(newset, SIGCANCEL);
p = &newset;
}
}
return (__sys_sigprocmask(how, p, oset));
}
__weak_reference(_pthread_sigmask, pthread_sigmask);
int
_pthread_sigmask(int how, const sigset_t *set, sigset_t *oset)
{
int error;
/*
* This always sets the mask on the current thread.
*/
error = sigprocmask(how, set, oset);
/*
* pthread_sigmask returns errno or success while sigprocmask returns
* -1 and sets errno.
*/
if (error == -1)
error = errno;
return (error);
if (_sigprocmask(how, set, oset))
return (errno);
return (0);
}
__weak_reference(_pthread_kill, pthread_kill);
__weak_reference(_sigsuspend, sigsuspend);
int
_pthread_kill(pthread_t pthread, int sig)
_sigsuspend(const sigset_t * set)
{
int error;
struct pthread *curthread = _get_curthread();
sigset_t newset;
const sigset_t *pset;
int oldcancel;
int ret;
if (sig < 0 || sig > NSIG)
return (EINVAL);
if (_thread_initial == NULL)
_thread_init();
error = _find_thread(pthread);
if (error != 0)
return (error);
if (SIGISMEMBER(*set, SIGCANCEL)) {
newset = *set;
SIGDELSET(newset, SIGCANCEL);
pset = &newset;
} else
pset = set;
/*
* A 0 signal means do error-checking but don't send signal.
*/
if (sig == 0)
return (0);
oldcancel = _thr_cancel_enter(curthread);
ret = __sys_sigsuspend(pset);
_thr_cancel_leave(curthread, oldcancel);
return (thr_kill(pthread->thr_id, sig));
return (ret);
}
__weak_reference(__sigwait, sigwait);
__weak_reference(__sigtimedwait, sigtimedwait);
__weak_reference(__sigwaitinfo, sigwaitinfo);
int
__sigtimedwait(const sigset_t *set, siginfo_t *info,
const struct timespec * timeout)
{
struct pthread *curthread = _get_curthread();
sigset_t newset;
const sigset_t *pset;
int oldcancel;
int ret;
if (SIGISMEMBER(*set, SIGCANCEL)) {
newset = *set;
SIGDELSET(newset, SIGCANCEL);
pset = &newset;
} else
pset = set;
oldcancel = _thr_cancel_enter(curthread);
ret = __sys_sigtimedwait(pset, info, timeout);
_thr_cancel_leave(curthread, oldcancel);
return (ret);
}
int
__sigwaitinfo(const sigset_t *set, siginfo_t *info)
{
struct pthread *curthread = _get_curthread();
sigset_t newset;
const sigset_t *pset;
int oldcancel;
int ret;
if (SIGISMEMBER(*set, SIGCANCEL)) {
newset = *set;
SIGDELSET(newset, SIGCANCEL);
pset = &newset;
} else
pset = set;
oldcancel = _thr_cancel_enter(curthread);
ret = __sys_sigwaitinfo(pset, info);
_thr_cancel_leave(curthread, oldcancel);
return (ret);
}
int
__sigwait(const sigset_t *set, int *sig)
{
struct pthread *curthread = _get_curthread();
sigset_t newset;
const sigset_t *pset;
int oldcancel;
int ret;
if (SIGISMEMBER(*set, SIGCANCEL)) {
newset = *set;
SIGDELSET(newset, SIGCANCEL);
pset = &newset;
} else
pset = set;
oldcancel = _thr_cancel_enter(curthread);
ret = __sys_sigwait(pset, sig);
_thr_cancel_leave(curthread, oldcancel);
return (ret);
}

View File

@ -31,23 +31,22 @@
*
* $FreeBSD$
*/
#include <stdlib.h>
#include <errno.h>
#include <signal.h>
#include <pthread.h>
#include "thr_private.h"
__weak_reference(_pthread_condattr_destroy, pthread_condattr_destroy);
__weak_reference(_pthread_sigmask, pthread_sigmask);
extern int
_sigprocmask(int how, const sigset_t *set, sigset_t *oset);
int
_pthread_condattr_destroy(pthread_condattr_t *attr)
_pthread_sigmask(int how, const sigset_t *set, sigset_t *oset)
{
int ret;
if (attr == NULL || *attr == NULL) {
ret = EINVAL;
} else {
free(*attr);
*attr = NULL;
ret = 0;
}
return(ret);
/* use our overridden verion of _sigprocmask */
if (_sigprocmask(how, set, oset))
return (errno);
return (0);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997 John Birrell <jb@cimlogic.com.au>.
* Copyright (c) 1996 John Birrell <jb@cimlogic.com.au>.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -31,23 +31,20 @@
*
* $FreeBSD$
*/
#include <stdlib.h>
#include <errno.h>
#include <pthread.h>
#include "thr_private.h"
#include <pthread_np.h>
__weak_reference(_pthread_mutexattr_destroy, pthread_mutexattr_destroy);
__weak_reference(_pthread_single_np, pthread_single_np);
int
_pthread_mutexattr_destroy(pthread_mutexattr_t *attr)
int _pthread_single_np()
{
int ret;
if (attr == NULL || *attr == NULL) {
ret = EINVAL;
} else {
free(*attr);
*attr = NULL;
ret = 0;
}
return(ret);
/* Enter single-threaded (non-POSIX) scheduling mode: */
pthread_suspend_all_np();
/*
* XXX - Do we want to do this?
* __is_threaded = 0;
*/
return (0);
}

View File

@ -31,23 +31,17 @@
*
* $FreeBSD$
*/
#include <signal.h>
#include <stdlib.h>
#include <string.h>
#include <errno.h>
#include <pthread.h>
#include "thr_private.h"
struct pthread_key {
spinlock_t lock;
volatile int allocated;
volatile int count;
int seqno;
void (*destructor) ();
};
/* Static variables: */
static struct pthread_key key_table[PTHREAD_KEYS_MAX];
struct pthread_key _thread_keytable[PTHREAD_KEYS_MAX];
__weak_reference(_pthread_key_create, pthread_key_create);
__weak_reference(_pthread_key_delete, pthread_key_delete);
@ -56,44 +50,49 @@ __weak_reference(_pthread_setspecific, pthread_setspecific);
int
_pthread_key_create(pthread_key_t * key, void (*destructor) (void *))
_pthread_key_create(pthread_key_t *key, void (*destructor) (void *))
{
for ((*key) = 0; (*key) < PTHREAD_KEYS_MAX; (*key)++) {
/* Lock the key table entry: */
_SPINLOCK(&key_table[*key].lock);
struct pthread *curthread = _get_curthread();
int i;
if (key_table[(*key)].allocated == 0) {
key_table[(*key)].allocated = 1;
key_table[(*key)].destructor = destructor;
key_table[(*key)].seqno++;
/* Lock the key table: */
THR_LOCK_ACQUIRE(curthread, &_keytable_lock);
for (i = 0; i < PTHREAD_KEYS_MAX; i++) {
/* Unlock the key table entry: */
_SPINUNLOCK(&key_table[*key].lock);
if (_thread_keytable[i].allocated == 0) {
_thread_keytable[i].allocated = 1;
_thread_keytable[i].destructor = destructor;
_thread_keytable[i].seqno++;
/* Unlock the key table: */
THR_LOCK_RELEASE(curthread, &_keytable_lock);
*key = i;
return (0);
}
/* Unlock the key table entry: */
_SPINUNLOCK(&key_table[*key].lock);
}
/* Unlock the key table: */
THR_LOCK_RELEASE(curthread, &_keytable_lock);
return (EAGAIN);
}
int
_pthread_key_delete(pthread_key_t key)
{
struct pthread *curthread = _get_curthread();
int ret = 0;
if (key < PTHREAD_KEYS_MAX) {
/* Lock the key table entry: */
_SPINLOCK(&key_table[key].lock);
if ((unsigned int)key < PTHREAD_KEYS_MAX) {
/* Lock the key table: */
THR_LOCK_ACQUIRE(curthread, &_keytable_lock);
if (key_table[key].allocated)
key_table[key].allocated = 0;
if (_thread_keytable[key].allocated)
_thread_keytable[key].allocated = 0;
else
ret = EINVAL;
/* Unlock the key table entry: */
_SPINUNLOCK(&key_table[key].lock);
/* Unlock the key table: */
THR_LOCK_RELEASE(curthread, &_keytable_lock);
} else
ret = EINVAL;
return (ret);
@ -102,49 +101,57 @@ _pthread_key_delete(pthread_key_t key)
void
_thread_cleanupspecific(void)
{
struct pthread *curthread = _get_curthread();
void (*destructor)( void *);
void *data = NULL;
int key;
int itr;
void (*destructor)( void *);
int i;
for (itr = 0; itr < PTHREAD_DESTRUCTOR_ITERATIONS; itr++) {
for (key = 0; key < PTHREAD_KEYS_MAX; key++) {
if (curthread->specific_data_count > 0) {
/* Lock the key table entry: */
_SPINLOCK(&key_table[key].lock);
destructor = NULL;
if (curthread->specific == NULL)
return;
if (key_table[key].allocated &&
(curthread->specific[key].data != NULL)) {
if (curthread->specific[key].seqno ==
key_table[key].seqno) {
data = (void *) curthread->specific[key].data;
destructor = key_table[key].destructor;
}
curthread->specific[key].data = NULL;
curthread->specific_data_count--;
/* Lock the key table: */
THR_LOCK_ACQUIRE(curthread, &_keytable_lock);
for (i = 0; (i < PTHREAD_DESTRUCTOR_ITERATIONS) &&
(curthread->specific_data_count > 0); i++) {
for (key = 0; (key < PTHREAD_KEYS_MAX) &&
(curthread->specific_data_count > 0); key++) {
destructor = NULL;
if (_thread_keytable[key].allocated &&
(curthread->specific[key].data != NULL)) {
if (curthread->specific[key].seqno ==
_thread_keytable[key].seqno) {
data = (void *)
curthread->specific[key].data;
destructor = _thread_keytable[key].destructor;
}
curthread->specific[key].data = NULL;
curthread->specific_data_count--;
}
/* Unlock the key table entry: */
_SPINUNLOCK(&key_table[key].lock);
/*
* If there is a destructore, call it
* with the key table entry unlocked:
*/
if (destructor != NULL) {
/*
* If there is a destructore, call it
* with the key table entry unlocked:
* Don't hold the lock while calling the
* destructor:
*/
if (destructor)
destructor(data);
} else {
free(curthread->specific);
curthread->specific = NULL;
return;
THR_LOCK_RELEASE(curthread, &_keytable_lock);
destructor(data);
THR_LOCK_ACQUIRE(curthread, &_keytable_lock);
}
}
}
if (curthread->specific != NULL) {
free(curthread->specific);
curthread->specific = NULL;
}
THR_LOCK_RELEASE(curthread, &_keytable_lock);
free(curthread->specific);
curthread->specific = NULL;
if (curthread->specific_data_count > 0)
stderr_debug("Thread %p has exited with leftover "
"thread-specific data after %d destructor iterations\n",
curthread, PTHREAD_DESTRUCTOR_ITERATIONS);
}
static inline struct pthread_specific_elem *
@ -164,23 +171,24 @@ pthread_key_allocate_data(void)
int
_pthread_setspecific(pthread_key_t key, const void *value)
{
struct pthread *pthread;
int ret = 0;
pthread_t pthread = curthread;
/* Point to the running thread: */
pthread = _get_curthread();
if ((pthread->specific) ||
(pthread->specific = pthread_key_allocate_data())) {
if (key < PTHREAD_KEYS_MAX) {
if (key_table[key].allocated) {
if ((unsigned int)key < PTHREAD_KEYS_MAX) {
if (_thread_keytable[key].allocated) {
if (pthread->specific[key].data == NULL) {
if (value != NULL)
pthread->specific_data_count++;
} else {
if (value == NULL)
pthread->specific_data_count--;
}
} else if (value == NULL)
pthread->specific_data_count--;
pthread->specific[key].data = value;
pthread->specific[key].seqno =
key_table[key].seqno;
_thread_keytable[key].seqno;
ret = 0;
} else
ret = EINVAL;
@ -194,14 +202,17 @@ _pthread_setspecific(pthread_key_t key, const void *value)
void *
_pthread_getspecific(pthread_key_t key)
{
pthread_t pthread = curthread;
struct pthread *pthread;
void *data;
/* Point to the running thread: */
pthread = _get_curthread();
/* Check if there is specific data: */
if (pthread->specific != NULL && key < PTHREAD_KEYS_MAX) {
if (pthread->specific != NULL && (unsigned int)key < PTHREAD_KEYS_MAX) {
/* Check if this key has been used before: */
if (key_table[key].allocated &&
(pthread->specific[key].seqno == key_table[key].seqno)) {
if (_thread_keytable[key].allocated &&
(pthread->specific[key].seqno == _thread_keytable[key].seqno)) {
/* Return the value: */
data = (void *) pthread->specific[key].data;
} else {

View File

@ -1,5 +1,4 @@
/*
* Copyright (c) 2004 Michael Telahun Makonnen <mtm@FreeBSD.Org>
* Copyright (c) 1997 John Birrell <jb@cimlogic.com.au>.
* All rights reserved.
*
@ -35,141 +34,92 @@
*/
#include <sys/types.h>
#include <machine/atomic.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <sched.h>
#include <pthread.h>
#include <unistd.h>
#include <libc_private.h>
#include <spinlock.h>
#include "thr_private.h"
#define THR_SPIN_MAGIC 0xdadadada
#define THR_SPIN_UNOWNED (void *)0
#define MAGIC_TEST_RETURN_ON_FAIL(l) \
do { \
if ((l) == NULL || (l)->s_magic != THR_SPIN_MAGIC) \
return (EINVAL); \
} while(0)
__weak_reference(_pthread_spin_destroy, pthread_spin_destroy);
__weak_reference(_pthread_spin_init, pthread_spin_init);
__weak_reference(_pthread_spin_lock, pthread_spin_lock);
__weak_reference(_pthread_spin_trylock, pthread_spin_trylock);
__weak_reference(_pthread_spin_unlock, pthread_spin_unlock);
int
_pthread_spin_destroy(pthread_spinlock_t *lock)
{
MAGIC_TEST_RETURN_ON_FAIL((*lock));
if ((*lock)->s_owner == THR_SPIN_UNOWNED) {
(*lock)->s_magic = 0;
free((*lock));
*lock = NULL;
return (0);
}
return (EBUSY);
}
int
_pthread_spin_init(pthread_spinlock_t *lock, int pshared)
{
struct pthread_spinlock *s;
s = (struct pthread_spinlock *)malloc(sizeof(struct pthread_spinlock));
if (s == NULL)
return (ENOMEM);
s->s_magic = THR_SPIN_MAGIC;
s->s_owner = THR_SPIN_UNOWNED;
*lock = s;
return (0);
}
#define MAX_SPINLOCKS 20
/*
* If the caller sets nonblocking to 1, this function will return
* immediately without acquiring the lock it is owned by another thread.
* If set to 0, it will keep spinning until it acquires the lock.
* These data structures are used to trace all spinlocks
* in libc.
*/
int
_pthread_spin_lock(pthread_spinlock_t *lock)
{
MAGIC_TEST_RETURN_ON_FAIL(*lock);
if ((*lock)->s_owner == curthread)
return (EDEADLK);
while (atomic_cmpset_acq_ptr(&(*lock)->s_owner, THR_SPIN_UNOWNED,
(void *)curthread) != 1)
; /* SPIN */
return (0);
}
struct spinlock_extra {
spinlock_t *owner;
};
int
_pthread_spin_trylock(pthread_spinlock_t *lock)
{
MAGIC_TEST_RETURN_ON_FAIL(*lock);
if (atomic_cmpset_acq_ptr(&(*lock)->s_owner, THR_SPIN_UNOWNED,
(void *)curthread) == 1)
return (0);
return (EBUSY);
}
static umtx_t spinlock_static_lock;
static struct spinlock_extra extra[MAX_SPINLOCKS];
static int spinlock_count;
static int initialized;
int
_pthread_spin_unlock(pthread_spinlock_t *lock)
{
MAGIC_TEST_RETURN_ON_FAIL(*lock);
if (atomic_cmpset_rel_ptr(&(*lock)->s_owner, (void *)curthread,
THR_SPIN_UNOWNED) == 1)
return (0);
return (EPERM);
}
static void init_spinlock(spinlock_t *lck);
/*
* These are for compatability only. Spinlocks of this type
* are deprecated.
*/
void
_spinunlock(spinlock_t *lck)
{
if (umtx_unlock((struct umtx *)lck, curthread->thr_id))
abort();
THR_UMTX_UNLOCK(_get_curthread(), (umtx_t *)&lck->access_lock);
}
/*
* Lock a location for the running thread. Yield to allow other
* threads to run if this thread is blocked because the lock is
* not available. Note that this function does not sleep. It
* assumes that the lock will be available very soon.
*/
void
_spinlock(spinlock_t *lck)
{
if (umtx_lock((struct umtx *)lck, curthread->thr_id))
abort();
if (!__isthreaded)
PANIC("Spinlock called when not threaded.");
if (!initialized)
PANIC("Spinlocks not initialized.");
if (lck->fname == NULL)
init_spinlock(lck);
THR_UMTX_LOCK(_get_curthread(), (umtx_t *)&lck->access_lock);
}
int
_spintrylock(spinlock_t *lck)
{
int error;
error = umtx_lock((struct umtx *)lck, curthread->thr_id);
if (error != 0 && error != EBUSY)
abort();
return (error);
}
/*
* Lock a location for the running thread. Yield to allow other
* threads to run if this thread is blocked because the lock is
* not available. Note that this function does not sleep. It
* assumes that the lock will be available very soon.
*
* This function checks if the running thread has already locked the
* location, warns if this occurs and creates a thread dump before
* returning.
*/
void
_spinlock_debug(spinlock_t *lck, char *fname, int lineno)
{
if (umtx_lock((struct umtx *)lck, curthread->thr_id))
abort();
_spinlock(lck);
}
static void
init_spinlock(spinlock_t *lck)
{
static int count = 0;
THR_UMTX_LOCK(_get_curthread(), &spinlock_static_lock);
if ((lck->fname == NULL) && (spinlock_count < MAX_SPINLOCKS)) {
lck->fname = (char *)&extra[spinlock_count];
extra[spinlock_count].owner = lck;
spinlock_count++;
}
THR_UMTX_UNLOCK(_get_curthread(), &spinlock_static_lock);
if (lck->fname == NULL && ++count < 5)
stderr_debug("Warning: exceeded max spinlocks");
}
void
_thr_spinlock_init(void)
{
int i;
_thr_umtx_init(&spinlock_static_lock);
if (initialized != 0) {
/*
* called after fork() to reset state of libc spin locks,
* it is not quite right since libc may be in inconsistent
* state, resetting the locks to allow current thread to be
* able to hold them may not help things too much, but
* anyway, we do our best.
* it is better to do pthread_atfork in libc.
*/
for (i = 0; i < spinlock_count; i++)
_thr_umtx_init((umtx_t *)&extra[i].owner->access_lock);
} else {
initialized = 1;
}
}

View File

@ -26,12 +26,13 @@
*
* $FreeBSD$
*/
#include <sys/types.h>
#include <sys/mman.h>
#include <sys/param.h>
#include <sys/queue.h>
#include <stdlib.h>
#include <pthread.h>
#include "thr_private.h"
/* Spare thread stack. */
@ -43,31 +44,32 @@ struct stack {
};
/*
* Default sized (stack and guard) spare stack queue. Stacks are cached to
* avoid additional complexity managing mmap()ed stack regions. Spare stacks
* are used in LIFO order to increase cache locality.
* Default sized (stack and guard) spare stack queue. Stacks are cached
* to avoid additional complexity managing mmap()ed stack regions. Spare
* stacks are used in LIFO order to increase cache locality.
*/
static LIST_HEAD(, stack) _dstackq = LIST_HEAD_INITIALIZER(_dstackq);
static LIST_HEAD(, stack) dstackq = LIST_HEAD_INITIALIZER(dstackq);
/*
* Miscellaneous sized (non-default stack and/or guard) spare stack queue.
* Stacks are cached to avoid additional complexity managing mmap()ed stack
* regions. This list is unordered, since ordering on both stack size and guard
* size would be more trouble than it's worth. Stacks are allocated from this
* cache on a first size match basis.
* Stacks are cached to avoid additional complexity managing mmap()ed
* stack regions. This list is unordered, since ordering on both stack
* size and guard size would be more trouble than it's worth. Stacks are
* allocated from this cache on a first size match basis.
*/
static LIST_HEAD(, stack) _mstackq = LIST_HEAD_INITIALIZER(_mstackq);
static LIST_HEAD(, stack) mstackq = LIST_HEAD_INITIALIZER(mstackq);
/**
* Base address of the last stack allocated (including its red zone, if there is
* one). Stacks are allocated contiguously, starting beyond the top of the main
* stack. When a new stack is created, a red zone is typically created
* (actually, the red zone is simply left unmapped) above the top of the stack,
* such that the stack will not be able to grow all the way to the bottom of the
* next stack. This isn't fool-proof. It is possible for a stack to grow by a
* large amount, such that it grows into the next stack, and as long as the
* memory within the red zone is never accessed, nothing will prevent one thread
* stack from trouncing all over the next.
* Base address of the last stack allocated (including its red zone, if
* there is one). Stacks are allocated contiguously, starting beyond the
* top of the main stack. When a new stack is created, a red zone is
* typically created (actually, the red zone is mapped with PROT_NONE) above
* the top of the stack, such that the stack will not be able to grow all
* the way to the bottom of the next stack. This isn't fool-proof. It is
* possible for a stack to grow by a large amount, such that it grows into
* the next stack, and as long as the memory within the red zone is never
* accessed, nothing will prevent one thread stack from trouncing all over
* the next.
*
* low memory
* . . . . . . . . . . . . . . . . . .
@ -78,7 +80,7 @@ static LIST_HEAD(, stack) _mstackq = LIST_HEAD_INITIALIZER(_mstackq);
* | Red Zone (guard page) | red zone for 2nd thread
* | |
* +-----------------------------------+
* | stack 2 - _pthread_stack_default | top of 2nd thread stack
* | stack 2 - _thr_stack_default | top of 2nd thread stack
* | |
* | |
* | |
@ -89,7 +91,7 @@ static LIST_HEAD(, stack) _mstackq = LIST_HEAD_INITIALIZER(_mstackq);
* | Red Zone | red zone for 1st thread
* | |
* +-----------------------------------+
* | stack 1 - _pthread_stack_default | top of 1st thread stack
* | stack 1 - _thr_stack_default | top of 1st thread stack
* | |
* | |
* | |
@ -100,7 +102,7 @@ static LIST_HEAD(, stack) _mstackq = LIST_HEAD_INITIALIZER(_mstackq);
* | Red Zone |
* | | red zone for main thread
* +-----------------------------------+
* | USRSTACK - _pthread_stack_initial | top of main thread stack
* | USRSTACK - _thr_stack_initial | top of main thread stack
* | | ^
* | | |
* | | |
@ -111,48 +113,59 @@ static LIST_HEAD(, stack) _mstackq = LIST_HEAD_INITIALIZER(_mstackq);
* high memory
*
*/
static void * last_stack;
static void *last_stack = NULL;
void *
_thread_stack_alloc(size_t stacksize, size_t guardsize)
/*
* Round size up to the nearest multiple of
* _thr_page_size.
*/
static inline size_t
round_up(size_t size)
{
void *stack = NULL;
struct stack *spare_stack;
size_t stack_size;
if (size % _thr_page_size != 0)
size = ((size / _thr_page_size) + 1) *
_thr_page_size;
return size;
}
int
_thr_stack_alloc(struct pthread_attr *attr)
{
struct pthread *curthread = _get_curthread();
struct stack *spare_stack;
size_t stacksize;
size_t guardsize;
char *stackaddr;
/*
* Round up stack size to nearest multiple of _pthread_page_size,
* so that mmap() * will work. If the stack size is not an even
* multiple, we end up initializing things such that there is unused
* space above the beginning of the stack, so the stack sits snugly
* against its guard.
* Round up stack size to nearest multiple of _thr_page_size so
* that mmap() * will work. If the stack size is not an even
* multiple, we end up initializing things such that there is
* unused space above the beginning of the stack, so the stack
* sits snugly against its guard.
*/
if (stacksize % _pthread_page_size != 0)
stack_size = ((stacksize / _pthread_page_size) + 1) *
_pthread_page_size;
else
stack_size = stacksize;
stacksize = round_up(attr->stacksize_attr);
guardsize = round_up(attr->guardsize_attr);
attr->stackaddr_attr = NULL;
attr->flags &= ~THR_STACK_USER;
/*
* Use the garbage collector lock for synchronization of the
* spare stack lists and allocations from usrstack.
*/
THREAD_LIST_LOCK(curthread);
/*
* If the stack and guard sizes are default, try to allocate a stack
* from the default-size stack cache:
*/
if (stack_size == _pthread_stack_default &&
guardsize == _pthread_guard_default) {
/*
* Use the garbage collector mutex for synchronization of the
* spare stack list.
*/
STACK_LOCK;
if ((spare_stack = LIST_FIRST(&_dstackq)) != NULL) {
/* Use the spare stack. */
if ((stacksize == THR_STACK_DEFAULT) &&
(guardsize == _thr_guard_default)) {
if ((spare_stack = LIST_FIRST(&dstackq)) != NULL) {
/* Use the spare stack. */
LIST_REMOVE(spare_stack, qe);
stack = spare_stack->stackaddr;
attr->stackaddr_attr = spare_stack->stackaddr;
}
/* Unlock the garbage collector mutex. */
STACK_UNLOCK;
}
/*
* The user specified a non-default stack and/or guard size, so try to
@ -160,76 +173,83 @@ _thread_stack_alloc(size_t stacksize, size_t guardsize)
* rounded up stack size (stack_size) in the search:
*/
else {
/*
* Use the garbage collector mutex for synchronization of the
* spare stack list.
*/
STACK_LOCK;
LIST_FOREACH(spare_stack, &_mstackq, qe) {
if (spare_stack->stacksize == stack_size &&
LIST_FOREACH(spare_stack, &mstackq, qe) {
if (spare_stack->stacksize == stacksize &&
spare_stack->guardsize == guardsize) {
LIST_REMOVE(spare_stack, qe);
stack = spare_stack->stackaddr;
attr->stackaddr_attr = spare_stack->stackaddr;
break;
}
}
/* Unlock the garbage collector mutex. */
STACK_UNLOCK;
}
/* Check if a stack was not allocated from a stack cache: */
if (stack == NULL) {
if (attr->stackaddr_attr != NULL) {
/* A cached stack was found. Release the lock. */
THREAD_LIST_UNLOCK(curthread);
}
else {
/* Allocate a stack from usrstack. */
if (last_stack == NULL)
last_stack = _usrstack - _pthread_stack_initial -
_pthread_guard_default;
last_stack = _usrstack - _thr_stack_initial -
_thr_guard_default;
/* Allocate a new stack. */
stack = last_stack - stack_size;
stackaddr = last_stack - stacksize - guardsize;
/*
* Even if stack allocation fails, we don't want to try to use
* this location again, so unconditionally decrement
* Even if stack allocation fails, we don't want to try to
* use this location again, so unconditionally decrement
* last_stack. Under normal operating conditions, the most
* likely reason for an mmap() error is a stack overflow of the
* adjacent thread stack.
* likely reason for an mmap() error is a stack overflow of
* the adjacent thread stack.
*/
last_stack -= (stack_size + guardsize);
last_stack -= (stacksize + guardsize);
/* Stack: */
if (mmap(stack, stack_size, PROT_READ | PROT_WRITE, MAP_STACK,
-1, 0) == MAP_FAILED)
stack = NULL;
/* Release the lock before mmap'ing it. */
THREAD_LIST_UNLOCK(curthread);
/* Map the stack and guard page together, and split guard
page from allocated space: */
if ((stackaddr = mmap(stackaddr, stacksize+guardsize,
PROT_READ | PROT_WRITE, MAP_STACK,
-1, 0)) != MAP_FAILED &&
(guardsize == 0 ||
mprotect(stackaddr, guardsize, PROT_NONE) == 0)) {
stackaddr += guardsize;
} else {
if (stackaddr != MAP_FAILED)
munmap(stackaddr, stacksize + guardsize);
stackaddr = NULL;
}
attr->stackaddr_attr = stackaddr;
}
return (stack);
if (attr->stackaddr_attr != NULL)
return (0);
else
return (-1);
}
/* This function must be called with the 'dead thread list' lock held. */
/* This function must be called with _thread_list_lock held. */
void
_thread_stack_free(void *stack, size_t stacksize, size_t guardsize)
_thr_stack_free(struct pthread_attr *attr)
{
struct stack *spare_stack;
struct stack *spare_stack;
spare_stack = (stack + stacksize - sizeof(struct stack));
/* Round stacksize up to nearest multiple of _pthread_page_size. */
if (stacksize % _pthread_page_size != 0) {
spare_stack->stacksize =
((stacksize / _pthread_page_size) + 1) *
_pthread_page_size;
} else
spare_stack->stacksize = stacksize;
spare_stack->guardsize = guardsize;
spare_stack->stackaddr = stack;
if ((attr != NULL) && ((attr->flags & THR_STACK_USER) == 0)
&& (attr->stackaddr_attr != NULL)) {
spare_stack = (attr->stackaddr_attr + attr->stacksize_attr
- sizeof(struct stack));
spare_stack->stacksize = round_up(attr->stacksize_attr);
spare_stack->guardsize = round_up(attr->guardsize_attr);
spare_stack->stackaddr = attr->stackaddr_attr;
if (spare_stack->stacksize == _pthread_stack_default &&
spare_stack->guardsize == _pthread_guard_default) {
/* Default stack/guard size. */
LIST_INSERT_HEAD(&_dstackq, spare_stack, qe);
} else {
/* Non-default stack/guard size. */
LIST_INSERT_HEAD(&_mstackq, spare_stack, qe);
if (spare_stack->stacksize == THR_STACK_DEFAULT &&
spare_stack->guardsize == _thr_guard_default) {
/* Default stack/guard size. */
LIST_INSERT_HEAD(&dstackq, spare_stack, qe);
} else {
/* Non-default stack/guard size. */
LIST_INSERT_HEAD(&mstackq, spare_stack, qe);
}
attr->stackaddr_attr = NULL;
}
}

View File

@ -1,91 +0,0 @@
/*-
* Copyright (c) 2003 Michael Telahun Makonnen
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* Problems/Questions to: Mike Makonnen <mtm@FreeBSD.Org>
*
* $FreeBSD$
*/
#include <stdlib.h>
#include <errno.h>
#include <string.h>
#include <sys/param.h>
#include <sys/queue.h>
#include <pthread.h>
#include "thr_private.h"
/*
* Lock for the process global signal actions list.
* This lock does NOT insure up-to-date-ness, only integrity.
*/
struct umtx sigactList_lock = UMTX_INITIALIZER;
/*
* proc_sigact_copyin(sig, actp)
* Copy the contents of actp into the process global
* action for signal sig.
*/
void
proc_sigact_copyin(int sig, const struct sigaction *actp)
{
UMTX_LOCK(&sigactList_lock);
bcopy((const void *)actp, (void *)&_thread_sigact[sig - 1],
sizeof(struct sigaction));
UMTX_UNLOCK(&sigactList_lock);
}
/*
* proc_sigact_copyout(sig, sigact)
* Copy the contents of the process global action for
* signal sig into sigact.
*/
void
proc_sigact_copyout(int sig, struct sigaction *actp)
{
UMTX_LOCK(&sigactList_lock);
bcopy((const void *)&_thread_sigact[sig - 1], (void *)actp,
sizeof(struct sigaction));
UMTX_UNLOCK(&sigactList_lock);
}
/*
* proc_sigact_sigaction(sig)
* Obtains the struct sigaction associated with signal sig.
* The address of the structure is the return value. It is
* upto the caller to check the value of the structure at
* that address against SIG_IGN and SIG_DFL before trying
* to dereference it.
*/
struct sigaction *
proc_sigact_sigaction(int sig)
{
struct sigaction *actp;
UMTX_LOCK(&sigactList_lock);
actp = &_thread_sigact[sig - 1];
UMTX_UNLOCK(&sigactList_lock);
return (actp);
}

View File

@ -31,10 +31,14 @@
*
* $FreeBSD$
*/
#include <errno.h>
#include <pthread.h>
#include "thr_private.h"
static void suspend_common(struct pthread *thread);
__weak_reference(_pthread_suspend_np, pthread_suspend_np);
__weak_reference(_pthread_suspend_all_np, pthread_suspend_all_np);
@ -42,12 +46,54 @@ __weak_reference(_pthread_suspend_all_np, pthread_suspend_all_np);
int
_pthread_suspend_np(pthread_t thread)
{
/* XXXTHR */
return (ENOTSUP);
struct pthread *curthread = _get_curthread();
int ret;
/* Suspending the current thread doesn't make sense. */
if (thread == _get_curthread())
ret = EDEADLK;
/* Add a reference to the thread: */
else if ((ret = _thr_ref_add(curthread, thread, /*include dead*/0))
== 0) {
/* Lock the threads scheduling queue: */
THR_THREAD_LOCK(curthread, thread);
suspend_common(thread);
/* Unlock the threads scheduling queue: */
THR_THREAD_UNLOCK(curthread, thread);
/* Don't forget to remove the reference: */
_thr_ref_delete(curthread, thread);
}
return (ret);
}
void
_pthread_suspend_all_np(void)
{
/* XXXTHR */
struct pthread *curthread = _get_curthread();
struct pthread *thread;
/* Take the thread list lock: */
THREAD_LIST_LOCK(curthread);
TAILQ_FOREACH(thread, &_thread_list, tle) {
if (thread != curthread) {
THR_THREAD_LOCK(curthread, thread);
suspend_common(thread);
THR_THREAD_UNLOCK(curthread, thread);
}
}
/* Release the thread list lock: */
THREAD_LIST_UNLOCK(curthread);
}
static void
suspend_common(struct pthread *thread)
{
if (thread->state != PS_DEAD) {
thread->flags |= THR_FLAGS_NEED_SUSPEND;
_thr_send_sig(thread, SIGCANCEL);
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1996 Jeffrey Hsu <hsu@freebsd.org>.
* Copyright (c) 1998 Daniel Eischen <eischen@vigrid.com>.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -12,12 +12,12 @@
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by John Birrell.
* This product includes software developed by Daniel Eischen.
* 4. Neither the name of the author nor the names of any co-contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
* THIS SOFTWARE IS PROVIDED BY DANIEL EISCHEN AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
@ -31,26 +31,25 @@
*
* $FreeBSD$
*/
#include <string.h>
#include <stdlib.h>
#include <errno.h>
#include <pthread.h>
#include <pthread_np.h>
#include "thr_private.h"
__weak_reference(_pthread_mutexattr_init, pthread_mutexattr_init);
__weak_reference(_pthread_switch_add_np, pthread_switch_add_np);
__weak_reference(_pthread_switch_delete_np, pthread_switch_delete_np);
int
_pthread_mutexattr_init(pthread_mutexattr_t *attr)
_pthread_switch_add_np(pthread_switch_routine_t routine)
{
pthread_mutexattr_t pattr;
if ((pattr = (pthread_mutexattr_t)
malloc(sizeof(struct pthread_mutex_attr))) == NULL)
return (ENOMEM);
memcpy(pattr, &pthread_mutexattr_default,
sizeof(struct pthread_mutex_attr));
*attr = pattr;
return (0);
return (ENOTSUP);
}
int
_pthread_switch_delete_np(pthread_switch_routine_t routine)
{
return (ENOTSUP);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1998 John Birrell <jb@cimlogic.com.au>.
* Copyright (c) 2004 David Xu <davidxu@freebsd.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -31,32 +31,29 @@
*
* $FreeBSD$
*/
#include <errno.h>
#include <stddef.h>
#include <pthread.h>
#include <rtld.h>
#include "thr_private.h"
/* Find a thread in the linked list of active threads: */
int
_find_thread(pthread_t pthread)
{
pthread_t pthread1;
/* A collection of symbols needed by debugger */
if (pthread == NULL)
return(EINVAL);
THREAD_LIST_LOCK;
/* Search for the specified thread: */
pthread1 = NULL;
TAILQ_FOREACH(pthread1, &_thread_list, tle) {
if (pthread == pthread1)
break;
}
THREAD_LIST_UNLOCK;
if (pthread1 != NULL && pthread1->magic != PTHREAD_MAGIC)
return (EINVAL);
/* Return zero if the thread exists: */
return ((pthread1 != NULL) ? 0:ESRCH);
}
/* int _libthr_debug */
int _thread_off_tcb = offsetof(struct pthread, tcb);
int _thread_off_tid = offsetof(struct pthread, tid);
int _thread_off_next = offsetof(struct pthread, tle.tqe_next);
int _thread_off_attr_flags = offsetof(struct pthread, attr.flags);
int _thread_off_thr_locklevel = offsetof(struct pthread, locklevel);
int _thread_off_linkmap = offsetof(Obj_Entry, linkmap);
int _thread_off_tlsindex = offsetof(Obj_Entry, tlsindex);
int _thread_off_isdead = offsetof(struct pthread, terminated);
int _thread_size_key = sizeof(struct pthread_key);
int _thread_off_key_allocated = offsetof(struct pthread_key, allocated);
int _thread_off_key_destructor = offsetof(struct pthread_key, destructor);
int _thread_max_keys = PTHREAD_KEYS_MAX;
int _thread_off_dtv = DTV_OFFSET;
int _thread_off_state = offsetof(struct pthread, state);
int _thread_state_running = PS_RUNNING;
int _thread_state_zoombie = PS_DEAD;

View File

@ -1,7 +1,7 @@
/*
* Copyright (c) 2000 Jason Evans <jasone@freebsd.org>.
* Copyright (c) 2002 Daniel M. Eischen <deischen@freebsd.org>
* Copyright (c) 2003 Jeff Roberson <jeff@freebsd.org>
* Copyright (C) 2005 David Xu <davidxu@freebsd.org>.
* Copyright (c) 2003 Daniel Eischen <deischen@freebsd.org>.
* Copyright (C) 2000 Jason Evans <jasone@freebsd.org>.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@ -64,24 +64,21 @@
*
*/
#include <sys/cdefs.h>
#include <sys/fcntl.h>
#include <sys/types.h>
#include <sys/mman.h>
#include <sys/param.h>
#include <sys/select.h>
#include <sys/signalvar.h>
#include <sys/socket.h>
#include <sys/stat.h>
#include <sys/time.h>
#include <sys/types.h>
#include <sys/uio.h>
#include <sys/wait.h>
#include <aio.h>
#include <dirent.h>
#include <errno.h>
#include <fcntl.h>
#include <poll.h>
#include <pthread.h>
#include <semaphore.h>
#include <signal.h>
#include <stdarg.h>
#include <stdio.h>
@ -89,33 +86,35 @@
#include <string.h>
#include <termios.h>
#include <unistd.h>
#include <pthread.h>
#include "thr_private.h"
extern spinlock_t *__malloc_lock;
extern int __creat(const char *, mode_t);
extern int __sleep(unsigned int);
extern int __sys_nanosleep(const struct timespec *, struct timespec *);
extern int __sys_select(int, fd_set *, fd_set *, fd_set *, struct timeval *);
extern int __sys_sigaction(int, const struct sigaction *, struct sigaction *);
extern int __pause(void);
extern int __pselect(int count, fd_set *rfds, fd_set *wfds, fd_set *efds,
const struct timespec *timo, const sigset_t *mask);
extern unsigned int __sleep(unsigned int);
extern int __system(const char *);
extern int __tcdrain(int);
extern pid_t __wait(int *);
extern pid_t __sys_wait4(pid_t, int *, int, struct rusage *);
extern pid_t __waitpid(pid_t, int *, int);
__weak_reference(_accept, accept);
__weak_reference(__accept, accept);
int
_accept(int s, struct sockaddr *addr, socklen_t *addrlen)
__accept(int s, struct sockaddr *addr, socklen_t *addrlen)
{
struct pthread *curthread;
int oldcancel;
int ret;
_thread_enter_cancellation_point();
curthread = _get_curthread();
oldcancel = _thr_cancel_enter(curthread);
ret = __sys_accept(s, addr, addrlen);
_thread_leave_cancellation_point();
return (ret);
_thr_cancel_leave(curthread, oldcancel);
return (ret);
}
__weak_reference(_aio_suspend, aio_suspend);
@ -124,222 +123,162 @@ int
_aio_suspend(const struct aiocb * const iocbs[], int niocb, const struct
timespec *timeout)
{
int ret;
_thread_enter_cancellation_point();
ret = __sys_aio_suspend(iocbs, niocb, timeout);
_thread_leave_cancellation_point();
return ret;
}
__weak_reference(_close, close);
int
_close(int fd)
{
int ret;
_thread_enter_cancellation_point();
ret = __sys_close(fd);
_thread_leave_cancellation_point();
return ret;
}
__weak_reference(_connect, connect);
int
_connect(int s, const struct sockaddr *n, socklen_t l)
{
struct pthread *curthread = _get_curthread();
int oldcancel;
int ret;
_thread_enter_cancellation_point();
ret = __sys_connect(s, n, l);
_thread_leave_cancellation_point();
return ret;
oldcancel = _thr_cancel_enter(curthread);
ret = __sys_aio_suspend(iocbs, niocb, timeout);
_thr_cancel_leave(curthread, oldcancel);
return (ret);
}
__weak_reference(_creat, creat);
__weak_reference(__close, close);
int
_creat(const char *path, mode_t mode)
__close(int fd)
{
struct pthread *curthread = _get_curthread();
int oldcancel;
int ret;
_thread_enter_cancellation_point();
oldcancel = _thr_cancel_enter(curthread);
ret = __sys_close(fd);
_thr_cancel_leave(curthread, oldcancel);
return (ret);
}
__weak_reference(__connect, connect);
int
__connect(int fd, const struct sockaddr *name, socklen_t namelen)
{
struct pthread *curthread = _get_curthread();
int oldcancel;
int ret;
curthread = _get_curthread();
oldcancel = _thr_cancel_enter(curthread);
ret = __sys_connect(fd, name, namelen);
_thr_cancel_leave(curthread, oldcancel);
return (ret);
}
__weak_reference(___creat, creat);
int
___creat(const char *path, mode_t mode)
{
struct pthread *curthread = _get_curthread();
int oldcancel;
int ret;
oldcancel = _thr_cancel_enter(curthread);
ret = __creat(path, mode);
_thread_leave_cancellation_point();
_thr_cancel_leave(curthread, oldcancel);
return ret;
}
__weak_reference(_fcntl, fcntl);
__weak_reference(__fcntl, fcntl);
int
_fcntl(int fd, int cmd,...)
__fcntl(int fd, int cmd,...)
{
struct pthread *curthread = _get_curthread();
int oldcancel;
int ret;
va_list ap;
_thread_enter_cancellation_point();
oldcancel = _thr_cancel_enter(curthread);
va_start(ap, cmd);
switch (cmd) {
case F_DUPFD:
case F_SETFD:
case F_SETFL:
ret = __sys_fcntl(fd, cmd, va_arg(ap, int));
break;
case F_GETFD:
case F_GETFL:
ret = __sys_fcntl(fd, cmd);
break;
default:
ret = __sys_fcntl(fd, cmd, va_arg(ap, void *));
case F_DUPFD:
ret = __sys_fcntl(fd, cmd, va_arg(ap, int));
break;
case F_SETFD:
case F_SETFL:
ret = __sys_fcntl(fd, cmd, va_arg(ap, int));
break;
case F_GETFD:
case F_GETFL:
ret = __sys_fcntl(fd, cmd);
break;
default:
ret = __sys_fcntl(fd, cmd, va_arg(ap, void *));
}
va_end(ap);
_thread_leave_cancellation_point();
_thr_cancel_leave(curthread, oldcancel);
return ret;
return (ret);
}
__weak_reference(_fork, fork);
__weak_reference(__fsync, fsync);
int
_fork(int fd)
{
int ret;
struct pthread_atfork *af;
_pthread_mutex_lock(&_atfork_mutex);
/* Run down atfork prepare handlers. */
TAILQ_FOREACH_REVERSE(af, &_atfork_list, atfork_head, qe) {
if (af->prepare != NULL)
af->prepare();
}
/*
* Fork a new process.
* XXX - The correct way to handle __malloc_lock is to have
* the threads libraries (or libc) install fork handlers for it
* in their initialization routine. We should probably
* do that for all the locks in libc.
*/
if (__isthreaded && __malloc_lock != NULL)
_SPINLOCK(__malloc_lock);
ret = __sys_fork();
if (ret == 0) {
__isthreaded = 0;
if (__malloc_lock != NULL)
memset(__malloc_lock, 0, sizeof(spinlock_t));
init_tdlist(curthread, 1);
init_td_common(curthread, NULL, 1);
_mutex_reinit(&_atfork_mutex);
/* Run down atfork child handlers. */
TAILQ_FOREACH(af, &_atfork_list, qe) {
if (af->child != NULL)
af->child();
}
} else if (ret != -1) {
/* Run down atfork parent handlers. */
TAILQ_FOREACH(af, &_atfork_list, qe) {
if (af->parent != NULL)
af->parent();
}
}
if (ret != 0) {
if (__isthreaded && __malloc_lock != NULL)
_SPINUNLOCK(__malloc_lock);
_pthread_mutex_unlock(&_atfork_mutex);
}
return ret;
}
__weak_reference(_fsync, fsync);
int
_fsync(int fd)
__fsync(int fd)
{
struct pthread *curthread = _get_curthread();
int oldcancel;
int ret;
_thread_enter_cancellation_point();
oldcancel = _thr_cancel_enter(curthread);
ret = __sys_fsync(fd);
_thread_leave_cancellation_point();
_thr_cancel_leave(curthread, oldcancel);
return ret;
return (ret);
}
__weak_reference(_msgrcv, msgrcv);
__weak_reference(__msync, msync);
int
_msgrcv(int id, void *p, size_t sz, long t, int f)
{
int ret;
_thread_enter_cancellation_point();
ret = __sys_msgrcv(id, p, sz, t, f);
_thread_leave_cancellation_point();
return ret;
}
__weak_reference(_msgsnd, msgsnd);
int
_msgsnd(int id, const void *p, size_t sz, int f)
{
int ret;
_thread_enter_cancellation_point();
ret = __sys_msgsnd(id, p, sz, f);
_thread_leave_cancellation_point();
return ret;
}
__weak_reference(_msync, msync);
int
_msync(void *addr, size_t len, int flags)
__msync(void *addr, size_t len, int flags)
{
struct pthread *curthread = _get_curthread();
int oldcancel;
int ret;
_thread_enter_cancellation_point();
oldcancel = _thr_cancel_enter(curthread);
ret = __sys_msync(addr, len, flags);
_thread_leave_cancellation_point();
_thr_cancel_leave(curthread, oldcancel);
return ret;
}
__weak_reference(_nanosleep, nanosleep);
__weak_reference(__nanosleep, nanosleep);
int
_nanosleep(const struct timespec * time_to_sleep, struct timespec *
time_remaining)
__nanosleep(const struct timespec *time_to_sleep,
struct timespec *time_remaining)
{
int ret;
struct pthread *curthread = _get_curthread();
int oldcancel;
int ret;
_thread_enter_cancellation_point();
oldcancel = _thr_cancel_enter(curthread);
ret = __sys_nanosleep(time_to_sleep, time_remaining);
_thread_leave_cancellation_point();
_thr_cancel_leave(curthread, oldcancel);
return ret;
return (ret);
}
__weak_reference(_open, open);
__weak_reference(__open, open);
int
_open(const char *path, int flags,...)
__open(const char *path, int flags,...)
{
struct pthread *curthread = _get_curthread();
int oldcancel;
int ret;
int mode = 0;
va_list ap;
_thread_enter_cancellation_point();
oldcancel = _thr_cancel_enter(curthread);
/* Check if the file is being created: */
if (flags & O_CREAT) {
@ -350,325 +289,202 @@ _open(const char *path, int flags,...)
}
ret = __sys_open(path, flags, mode);
_thread_leave_cancellation_point();
_thr_cancel_leave(curthread, oldcancel);
return ret;
}
/*
* The implementation in libc calls sigpause(), which is also
* a cancellation point.
*/
#if 0
__weak_reference(_pause, pause);
int
_pause(void)
{
_thread_enter_cancellation_point();
__pause();
_thread_leave_cancellation_point();
}
#endif
struct pthread *curthread = _get_curthread();
int oldcancel;
int ret;
__weak_reference(_poll, poll);
oldcancel = _thr_cancel_enter(curthread);
ret = __pause();
_thr_cancel_leave(curthread, oldcancel);
return ret;
}
__weak_reference(__poll, poll);
int
_poll(struct pollfd *fds, unsigned int nfds, int timeout)
__poll(struct pollfd *fds, unsigned int nfds, int timeout)
{
struct pthread *curthread = _get_curthread();
int oldcancel;
int ret;
_thread_enter_cancellation_point();
oldcancel = _thr_cancel_enter(curthread);
ret = __sys_poll(fds, nfds, timeout);
_thread_leave_cancellation_point();
_thr_cancel_leave(curthread, oldcancel);
return ret;
}
/* XXXFix */
#if 0
__weak_reference(_pread, pread);
ssize_t
_pread(int d, void *b, size_t n, off_t o)
{
ssize_t ret;
_thread_enter_cancellation_point();
ret = __sys_pread(d, b, n, o);
_thread_leave_cancellation_point();
return (ret);
}
#endif
/* The libc version calls select(), which is also a cancellation point. */
#if 0
extern int __pselect(int count, fd_set *rfds, fd_set *wfds, fd_set *efds,
const struct timespec *timo, const sigset_t *mask);
__weak_reference(_pselect, pselect);
int
pselect(int count, fd_set *rfds, fd_set *wfds, fd_set *efds,
_pselect(int count, fd_set *rfds, fd_set *wfds, fd_set *efds,
const struct timespec *timo, const sigset_t *mask)
{
struct pthread *curthread = _get_curthread();
int oldcancel;
int ret;
_thread_enter_cancellation_point();
oldcancel = _thr_cancel_enter(curthread);
ret = __pselect(count, rfds, wfds, efds, timo, mask);
_thread_leave_cancellation_point();
_thr_cancel_leave(curthread, oldcancel);
return (ret);
}
#endif
/* XXXFix */
#if 0
__weak_reference(_pwrite, pwrite);
ssize_t
_pwrite(int d, const void *b, size_t n, off_t o)
{
ssize_t ret;
_thread_enter_cancellation_point();
ret = __sys_pwrite(d, b, n, o);
_thread_leave_cancellation_point();
return (ret);
}
#endif
__weak_reference(_raise, raise);
int
_raise(int sig)
{
int error;
int ret;
error = pthread_kill(pthread_self(), sig);
if (error != 0) {
errno = error;
error = -1;
if (!_thr_isthreaded())
ret = kill(getpid(), sig);
else {
ret = pthread_kill(pthread_self(), sig);
if (ret != 0) {
errno = ret;
ret = -1;
}
}
return (error);
return (ret);
}
__weak_reference(_read, read);
__weak_reference(__read, read);
ssize_t
_read(int fd, void *buf, size_t nbytes)
__read(int fd, void *buf, size_t nbytes)
{
struct pthread *curthread = _get_curthread();
int oldcancel;
ssize_t ret;
_thread_enter_cancellation_point();
oldcancel = _thr_cancel_enter(curthread);
ret = __sys_read(fd, buf, nbytes);
_thread_leave_cancellation_point();
_thr_cancel_leave(curthread, oldcancel);
return ret;
}
__weak_reference(_readv, readv);
__weak_reference(__readv, readv);
ssize_t
_readv(int fd, const struct iovec *iov, int iovcnt)
__readv(int fd, const struct iovec *iov, int iovcnt)
{
struct pthread *curthread = _get_curthread();
int oldcancel;
ssize_t ret;
_thread_enter_cancellation_point();
oldcancel = _thr_cancel_enter(curthread);
ret = __sys_readv(fd, iov, iovcnt);
_thread_leave_cancellation_point();
_thr_cancel_leave(curthread, oldcancel);
return ret;
}
/*
* The libc implementation of recv() calls recvfrom, which
* is also a cancellation point.
*/
#if 0
__weak_reference(_recv, recv);
__weak_reference(__recvfrom, recvfrom);
ssize_t
_recv(int s, void *b, size_t l, int f)
{
ssize_t ret;
_thread_enter_cancellation_point();
ret = __sys_recv(s, b, l, f);
_thread_leave_cancellation_point();
return (ret);
}
#endif
__weak_reference(_recvfrom, recvfrom);
ssize_t
_recvfrom(int s, void *b, size_t l, int f, struct sockaddr *from,
__recvfrom(int s, void *b, size_t l, int f, struct sockaddr *from,
socklen_t *fl)
{
struct pthread *curthread = _get_curthread();
int oldcancel;
ssize_t ret;
_thread_enter_cancellation_point();
oldcancel = _thr_cancel_enter(curthread);
ret = __sys_recvfrom(s, b, l, f, from, fl);
_thread_leave_cancellation_point();
_thr_cancel_leave(curthread, oldcancel);
return (ret);
}
__weak_reference(_recvmsg, recvmsg);
__weak_reference(__recvmsg, recvmsg);
ssize_t
_recvmsg(int s, struct msghdr *m, int f)
__recvmsg(int s, struct msghdr *m, int f)
{
struct pthread *curthread = _get_curthread();
ssize_t ret;
int oldcancel;
_thread_enter_cancellation_point();
oldcancel = _thr_cancel_enter(curthread);
ret = __sys_recvmsg(s, m, f);
_thread_leave_cancellation_point();
_thr_cancel_leave(curthread, oldcancel);
return (ret);
}
__weak_reference(_select, select);
__weak_reference(__select, select);
int
_select(int numfds, fd_set *readfds, fd_set *writefds, fd_set *exceptfds,
__select(int numfds, fd_set *readfds, fd_set *writefds, fd_set *exceptfds,
struct timeval *timeout)
{
struct pthread *curthread = _get_curthread();
int oldcancel;
int ret;
_thread_enter_cancellation_point();
oldcancel = _thr_cancel_enter(curthread);
ret = __sys_select(numfds, readfds, writefds, exceptfds, timeout);
_thread_leave_cancellation_point();
_thr_cancel_leave(curthread, oldcancel);
return ret;
}
/*
* Libc implements this by calling _sendto(), which is also a
* cancellation point.
*/
#if 0
__weak_reference(_send, send);
__weak_reference(__sendmsg, sendmsg);
ssize_t
_send(int s, const void *m, size_t l, int f)
__sendmsg(int s, const struct msghdr *m, int f)
{
struct pthread *curthread = _get_curthread();
ssize_t ret;
int oldcancel;
_thread_enter_cancellation_point();
ret = _sendto(s, m, l, f, NULL, 0);
_thread_leave_cancellation_point();
return (ret);
}
#endif
__weak_reference(_sendmsg, sendmsg);
ssize_t
_sendmsg(int s, const struct msghdr *m, int f)
{
ssize_t ret;
_thread_enter_cancellation_point();
oldcancel = _thr_cancel_enter(curthread);
ret = __sys_sendmsg(s, m, f);
_thread_leave_cancellation_point();
_thr_cancel_leave(curthread, oldcancel);
return (ret);
}
__weak_reference(_sendto, sendto);
__weak_reference(__sendto, sendto);
ssize_t
_sendto(int s, const void *m, size_t l, int f, const struct sockaddr *t,
__sendto(int s, const void *m, size_t l, int f, const struct sockaddr *t,
socklen_t tl)
{
struct pthread *curthread = _get_curthread();
ssize_t ret;
int oldcancel;
_thread_enter_cancellation_point();
oldcancel = _thr_cancel_enter(curthread);
ret = __sys_sendto(s, m, l, f, t, tl);
_thread_leave_cancellation_point();
_thr_cancel_leave(curthread, oldcancel);
return (ret);
}
/*
* The implementation in libc calls sigsuspend(), which is also
* a cancellation point.
*/
#if 0
__weak_reference(_sigpause, sigpause);
int
_sigpause(int m)
{
int ret;
_thread_enter_cancellation_point();
ret = __sys_sigpause(m);
_thread_leave_cancellation_point();
return (ret);
}
#endif
__weak_reference(_sigsuspend, sigsuspend);
int
_sigsuspend(const sigset_t *m)
{
int ret;
_thread_enter_cancellation_point();
ret = __sys_sigsuspend(m);
_thread_leave_cancellation_point();
return (ret);
}
__weak_reference(_sigtimedwait, sigtimedwait);
int
_sigtimedwait(const sigset_t *s, siginfo_t *i, const struct timespec *t)
{
int ret;
_thread_enter_cancellation_point();
ret = __sys_sigtimedwait(s, i, t);
_thread_leave_cancellation_point();
return (ret);
}
__weak_reference(_sigwait, sigwait);
int
_sigwait(const sigset_t *s, int *i)
{
int ret;
_thread_enter_cancellation_point();
ret = __sys_sigwait(s, i);
_thread_leave_cancellation_point();
return (ret);
}
__weak_reference(_sigwaitinfo, sigwaitinfo);
int
_sigwaitinfo(const sigset_t *s, siginfo_t *i)
{
int ret;
_thread_enter_cancellation_point();
ret = __sys_sigwaitinfo(s, i);
_thread_leave_cancellation_point();
return (ret);
}
__weak_reference(_sleep, sleep);
unsigned int
_sleep(unsigned int seconds)
{
struct pthread *curthread = _get_curthread();
int oldcancel;
unsigned int ret;
_thread_enter_cancellation_point();
oldcancel = _thr_cancel_enter(curthread);
ret = __sleep(seconds);
_thread_leave_cancellation_point();
_thr_cancel_leave(curthread, oldcancel);
return ret;
return (ret);
}
__weak_reference(_system, system);
@ -676,120 +492,117 @@ __weak_reference(_system, system);
int
_system(const char *string)
{
struct pthread *curthread = _get_curthread();
int oldcancel;
int ret;
_thread_enter_cancellation_point();
oldcancel = _thr_cancel_enter(curthread);
ret = __system(string);
_thread_leave_cancellation_point();
_thr_cancel_leave(curthread, oldcancel);
return ret;
}
__weak_reference(_tcdrain, tcdrain);
int
_tcdrain(int fd)
{
struct pthread *curthread = _get_curthread();
int oldcancel;
int ret;
_thread_enter_cancellation_point();
oldcancel = _thr_cancel_enter(curthread);
ret = __tcdrain(fd);
_thread_leave_cancellation_point();
_thr_cancel_leave(curthread, oldcancel);
return ret;
}
/*
* The usleep() implementation calls nanosleep(), which is also
* a cancellation point.
*/
#if 0
__weak_reference(_usleep, usleep);
int
_usleep(useconds_t u)
{
int ret;
_thread_enter_cancellation_point();
ret = __sys_usleep(u);
_thread_leave_cancellation_point();
return (ret);
}
#endif
__weak_reference(_vfork, vfork);
int
_vfork(void)
{
return (fork());
}
__weak_reference(_wait, wait);
pid_t
_wait(int *istat)
{
struct pthread *curthread = _get_curthread();
int oldcancel;
pid_t ret;
_thread_enter_cancellation_point();
oldcancel = _thr_cancel_enter(curthread);
ret = __wait(istat);
_thread_leave_cancellation_point();
_thr_cancel_leave(curthread, oldcancel);
return ret;
}
__weak_reference(_wait4, wait4);
__weak_reference(__wait4, wait4);
pid_t
_wait4(pid_t pid, int *istat, int options, struct rusage *rusage)
__wait4(pid_t pid, int *istat, int options, struct rusage *rusage)
{
struct pthread *curthread = _get_curthread();
int oldcancel;
pid_t ret;
_thread_enter_cancellation_point();
oldcancel = _thr_cancel_enter(curthread);
ret = __sys_wait4(pid, istat, options, rusage);
_thread_leave_cancellation_point();
_thr_cancel_leave(curthread, oldcancel);
return ret;
}
/*
* The libc implementation of waitpid calls wait4().
*/
#if 0
__weak_reference(_waitpid, waitpid);
pid_t
_waitpid(pid_t wpid, int *status, int options)
{
struct pthread *curthread = _get_curthread();
int oldcancel;
pid_t ret;
_thread_enter_cancellation_point();
oldcancel = _thr_cancel_enter(curthread);
ret = __waitpid(wpid, status, options);
_thread_leave_cancellation_point();
_thr_cancel_leave(curthread, oldcancel);
return ret;
}
#endif
__weak_reference(_write, write);
__weak_reference(__write, write);
ssize_t
_write(int fd, const void *buf, size_t nbytes)
__write(int fd, const void *buf, size_t nbytes)
{
struct pthread *curthread = _get_curthread();
int oldcancel;
ssize_t ret;
_thread_enter_cancellation_point();
oldcancel = _thr_cancel_enter(curthread);
ret = __sys_write(fd, buf, nbytes);
_thread_leave_cancellation_point();
_thr_cancel_leave(curthread, oldcancel);
return ret;
}
__weak_reference(_writev, writev);
__weak_reference(__writev, writev);
ssize_t
_writev(int fd, const struct iovec *iov, int iovcnt)
__writev(int fd, const struct iovec *iov, int iovcnt)
{
struct pthread *curthread = _get_curthread();
int oldcancel;
ssize_t ret;
_thread_enter_cancellation_point();
oldcancel = _thr_cancel_enter(curthread);
ret = __sys_writev(fd, iov, iovcnt);
_thread_leave_cancellation_point();
_thr_cancel_leave(curthread, oldcancel);
return ret;
}

View File

@ -0,0 +1,80 @@
/*
* Copyright (c) 2005 David Xu <davidxu@freebsd.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice unmodified, this list of conditions, and the following
* disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*
*/
#include "thr_private.h"
#include "thr_umtx.h"
int
__thr_umtx_lock(volatile umtx_t *mtx, long id)
{
while (_umtx_op((struct umtx *)mtx, UMTX_OP_LOCK, id, 0, 0))
;
return (0);
}
int
__thr_umtx_timedlock(volatile umtx_t *mtx, long id,
const struct timespec *timeout)
{
if (timeout && (timeout->tv_sec < 0 || (timeout->tv_sec == 0 &&
timeout->tv_nsec <= 0)))
return (ETIMEDOUT);
if (_umtx_op((struct umtx *)mtx, UMTX_OP_LOCK, id, 0,
(void *)timeout) == 0)
return (0);
return (errno);
}
int
__thr_umtx_unlock(volatile umtx_t *mtx, long id)
{
if (_umtx_op((struct umtx *)mtx, UMTX_OP_UNLOCK, id, 0, 0) == 0)
return (0);
return (errno);
}
int
_thr_umtx_wait(volatile umtx_t *mtx, long id, const struct timespec *timeout)
{
if (timeout && (timeout->tv_sec < 0 || (timeout->tv_sec == 0 &&
timeout->tv_nsec <= 0)))
return (ETIMEDOUT);
if (_umtx_op((struct umtx *)mtx, UMTX_OP_WAIT, id, 0,
(void*) timeout) == 0)
return (0);
return (errno);
}
int
_thr_umtx_wake(volatile umtx_t *mtx, int nr_wakeup)
{
if (_umtx_op((struct umtx *)mtx, UMTX_OP_WAKE, nr_wakeup, 0, 0) == 0)
return (0);
return (errno);
}

View File

@ -0,0 +1,81 @@
/*-
* Copyright (c) 2005 David Xu <davidxu@freebsd.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _THR_FBSD_UMTX_H_
#define _THR_FBSD_UMTX_H_
#include <sys/umtx.h>
typedef long umtx_t;
int __thr_umtx_lock(volatile umtx_t *mtx, long id);
int __thr_umtx_timedlock(volatile umtx_t *mtx, long id,
const struct timespec *timeout);
int __thr_umtx_unlock(volatile umtx_t *mtx, long id);
static inline void
_thr_umtx_init(volatile umtx_t *mtx)
{
*mtx = 0;
}
static inline int
_thr_umtx_trylock(volatile umtx_t *mtx, long id)
{
return umtx_trylock((struct umtx *)mtx, id);
}
static inline int
_thr_umtx_lock(volatile umtx_t *mtx, long id)
{
if (atomic_cmpset_acq_ptr(mtx, (void *)UMTX_UNOWNED, (void *)id))
return (0);
return __thr_umtx_lock(mtx, id);
}
static inline int
_thr_umtx_timedlock(volatile umtx_t *mtx, long id,
const struct timespec *timeout)
{
if (atomic_cmpset_acq_ptr(mtx, (void *)UMTX_UNOWNED, (void *)id))
return (0);
return __thr_umtx_timedlock(mtx, id, timeout);
}
static inline int
_thr_umtx_unlock(volatile umtx_t *mtx, long id)
{
if (atomic_cmpset_rel_ptr(mtx, (void *)id, (void *)UMTX_UNOWNED))
return (0);
return __thr_umtx_unlock(mtx, id);
}
int _thr_umtx_wait(volatile umtx_t *mtx, umtx_t exp,
const struct timespec *timeout);
int _thr_umtx_wake(volatile umtx_t *mtx, int count);
#endif