diff --git a/lib/libkse/arch/amd64/Makefile.inc b/lib/libkse/arch/amd64/Makefile.inc index c0e4c47941bd..c8b0362bca85 100644 --- a/lib/libkse/arch/amd64/Makefile.inc +++ b/lib/libkse/arch/amd64/Makefile.inc @@ -2,4 +2,4 @@ .PATH: ${.CURDIR}/arch/${MACHINE_ARCH}/${MACHINE_ARCH} -SRCS+= context.S enter_uts.S +SRCS+= context.S enter_uts.S pthread_md.c diff --git a/lib/libkse/arch/amd64/amd64/pthread_md.c b/lib/libkse/arch/amd64/amd64/pthread_md.c new file mode 100644 index 000000000000..374b6d8785b6 --- /dev/null +++ b/lib/libkse/arch/amd64/amd64/pthread_md.c @@ -0,0 +1,72 @@ +/* + * Copyright (c) 2003 Daniel Eischen + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Neither the name of the author nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * $FreeBSD$ + */ + +#include +#include +#include "pthread_md.h" + +/* + * The constructors. + */ +struct tcb * +_tcb_ctor(struct pthread *thread) +{ + struct tcb *tcb; + + if ((tcb = malloc(sizeof(struct tcb))) != NULL) { + bzero(tcb, sizeof(struct tcb)); + tcb->tcb_thread = thread; + /* Allocate TDV */ + } + return (tcb); +} + +void +_tcb_dtor(struct tcb *tcb) +{ + /* Free TDV */ + free(tcb); +} + +struct kcb * +_kcb_ctor(struct kse *kse) +{ + struct kcb *kcb; + + if ((kcb = malloc(sizeof(struct kcb))) != NULL) { + bzero(kcb, sizeof(struct kcb)); + kcb->kcb_kse = kse; + } + return (kcb); +} + +void +_kcb_dtor(struct kcb *kcb) +{ + free(kcb); +} diff --git a/lib/libkse/arch/amd64/include/pthread_md.h b/lib/libkse/arch/amd64/include/pthread_md.h index 3c7cd03f7aae..27a07cc2cbea 100644 --- a/lib/libkse/arch/amd64/include/pthread_md.h +++ b/lib/libkse/arch/amd64/include/pthread_md.h @@ -1,27 +1,28 @@ -/* - * Copyright (c) 2003 Marcel Moolenaar +/*- + * Copyright (C) 2003 David Xu + * Copyright (c) 2001 Daniel Eischen * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: - * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. + * 2. Neither the name of the author nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. * * $FreeBSD$ */ @@ -31,52 +32,224 @@ #ifndef _PTHREAD_MD_H_ #define _PTHREAD_MD_H_ +#include +#include +#include #include +/* should define this, but doesn't. */ +extern int sysarch(int, void *); + #define THR_GETCONTEXT(ucp) \ (void)_amd64_save_context(&(ucp)->uc_mcontext) #define THR_SETCONTEXT(ucp) \ - (void)_amd64_restore_context(&(ucp)->uc_mcontext, NULL, NULL) + (void)_amd64_restore_context(&(ucp)->uc_mcontext, 0, NULL) -#define THR_ALIGNBYTES 15 -#define THR_ALIGN(td) (((uintptr_t)(td) + THR_ALIGNBYTES) & ~THR_ALIGNBYTES) + +#define PER_KSE +#undef PER_THREAD + +struct kse; +struct pthread; +struct tdv; /* - * KSE Specific Data. + * %fs points to a struct kcb. */ -struct ksd { - void *base; - long size; +struct kcb { + struct tcb *kcb_curtcb; + struct kcb *kcb_self; /* self reference */ + struct kse *kcb_kse; + struct kse_mailbox kcb_kmbx; }; +struct tcb { + struct tdv *tcb_tdv; + struct pthread *tcb_thread; + void *tcb_spare[2]; /* align tcb_tmbx to 16 bytes */ + struct kse_thr_mailbox tcb_tmbx; +}; + +/* + * Evaluates to the byte offset of the per-kse variable name. + */ +#define __kcb_offset(name) __offsetof(struct kcb, name) + +/* + * Evaluates to the type of the per-kse variable name. + */ +#define __kcb_type(name) __typeof(((struct kcb *)0)->name) + +/* + * Evaluates to the value of the per-kse variable name. + */ +#define KCB_GET64(name) ({ \ + __kcb_type(name) __result; \ + \ + u_long __i; \ + __asm __volatile("movq %%fs:%1, %0" \ + : "=r" (__i) \ + : "m" (*(u_long *)(__kcb_offset(name)))); \ + __result = *(__kcb_type(name) *)&__i; \ + \ + __result; \ +}) + +/* + * Sets the value of the per-kse variable name to value val. + */ +#define KCB_SET64(name, val) ({ \ + __kcb_type(name) __val = (val); \ + \ + u_long __i; \ + __i = *(u_long *)&__val; \ + __asm __volatile("movq %1,%%fs:%0" \ + : "=m" (*(u_long *)(__kcb_offset(name))) \ + : "r" (__i)); \ +}) + +static __inline u_long +__kcb_readandclear64(volatile u_long *addr) +{ + u_long result; + + __asm __volatile ( + " xorq %0, %0;" + " xchgq %%fs:%1, %0;" + "# __kcb_readandclear64" + : "=&r" (result) + : "m" (*addr)); + return (result); +} + +#define KCB_READANDCLEAR64(name) ({ \ + __kcb_type(name) __result; \ + \ + __result = (__kcb_type(name)) \ + __kcb_readandclear64((u_long *)__kcb_offset(name)); \ + __result; \ +}) + + +#define _kcb_curkcb() KCB_GET64(kcb_self) +#define _kcb_curtcb() KCB_GET64(kcb_curtcb) +#define _kcb_curkse() ((struct kse *)KCB_GET64(kcb_kmbx.km_udata)) +#define _kcb_get_tmbx() KCB_GET64(kcb_kmbx.km_curthread) +#define _kcb_set_tmbx(value) KCB_SET64(kcb_kmbx.km_curthread, (void *)value) +#define _kcb_readandclear_tmbx() KCB_READANDCLEAR64(kcb_kmbx.km_curthread) + +/* + * The constructors. + */ +struct tcb *_tcb_ctor(struct pthread *); +void _tcb_dtor(struct tcb *tcb); +struct kcb *_kcb_ctor(struct kse *); +void _kcb_dtor(struct kcb *); + +/* Called from the KSE to set its private data. */ +static __inline void +_kcb_set(struct kcb *kcb) +{ + void *addr = kcb; + + sysarch(AMD64_SET_FSBASE, &addr); +} + +/* Get the current kcb. */ +static __inline struct kcb * +_kcb_get(void) +{ + return (_kcb_curkcb()); +} + +static __inline struct kse_thr_mailbox * +_kcb_critical_enter(void) +{ + struct kse_thr_mailbox *crit; + + crit = _kcb_readandclear_tmbx(); + return (crit); +} + +static __inline void +_kcb_critical_leave(struct kse_thr_mailbox *crit) +{ + _kcb_set_tmbx(crit); +} + +static __inline int +_kcb_in_critical(void) +{ + return (_kcb_get_tmbx() == NULL); +} + +static __inline void +_tcb_set(struct kcb *kcb, struct tcb *tcb) +{ + kcb->kcb_curtcb = tcb; +} + +static __inline struct tcb * +_tcb_get(void) +{ + return (_kcb_curtcb()); +} + +static __inline struct pthread * +_get_curthread(void) +{ + struct tcb *tcb; + + tcb = _kcb_curtcb(); + if (tcb != NULL) + return (tcb->tcb_thread); + else + return (NULL); +} + +static __inline struct kse * +_get_curkse(void) +{ + return ((struct kse *)_kcb_curkse()); +} + void _amd64_enter_uts(struct kse_mailbox *km, kse_func_t uts, void *stack, size_t stacksz); int _amd64_restore_context(mcontext_t *mc, intptr_t val, intptr_t *loc); int _amd64_save_context(mcontext_t *mc); static __inline int -_thread_enter_uts(struct kse_thr_mailbox *tm, struct kse_mailbox *km) +_thread_enter_uts(struct tcb *tcb, struct kcb *kcb) { - if (tm == NULL) - return (-1); - if (!_amd64_save_context(&tm->tm_context.uc_mcontext)) { - _amd64_enter_uts(km, km->km_func, km->km_stack.ss_sp, - km->km_stack.ss_size); + int ret; + + ret = _amd64_save_context(&tcb->tcb_tmbx.tm_context.uc_mcontext); + if (ret == 0) { + _amd64_enter_uts(&kcb->kcb_kmbx, kcb->kcb_kmbx.km_func, + kcb->kcb_kmbx.km_stack.ss_sp, + kcb->kcb_kmbx.km_stack.ss_size); /* We should not reach here. */ return (-1); } + else if (ret < 0) + return (-1); return (0); } static __inline int -_thread_switch(struct kse_thr_mailbox *tm, struct kse_thr_mailbox **thrp) +_thread_switch(struct kcb *kcb, struct tcb *tcb, int setmbox) { - if (tm == NULL) + if ((kcb == NULL) || (tcb == NULL)) return (-1); - _amd64_restore_context(&tm->tm_context.uc_mcontext, (intptr_t)tm, - (intptr_t*)thrp); + kcb->kcb_curtcb = tcb; + if (setmbox != 0) + _amd64_restore_context(&tcb->tcb_tmbx.tm_context.uc_mcontext, + (intptr_t)&tcb->tcb_tmbx, + (intptr_t *)&kcb->kcb_kmbx.km_curthread); + else + _amd64_restore_context(&tcb->tcb_tmbx.tm_context.uc_mcontext, + 0, NULL); /* We should not reach here. */ return (-1); } - #endif diff --git a/lib/libkse/arch/i386/Makefile.inc b/lib/libkse/arch/i386/Makefile.inc index c5cbe18b4b2b..73a9a8acde45 100644 --- a/lib/libkse/arch/i386/Makefile.inc +++ b/lib/libkse/arch/i386/Makefile.inc @@ -2,4 +2,4 @@ .PATH: ${.CURDIR}/arch/${MACHINE_ARCH}/${MACHINE_ARCH} -SRCS+= ksd.c thr_enter_uts.S thr_getcontext.S +SRCS+= thr_enter_uts.S thr_getcontext.S pthread_md.c diff --git a/lib/libpthread/arch/i386/i386/ksd.c b/lib/libkse/arch/i386/i386/pthread_md.c similarity index 67% rename from lib/libpthread/arch/i386/i386/ksd.c rename to lib/libkse/arch/i386/i386/pthread_md.c index 4e959594eb27..aeaa6495efc6 100644 --- a/lib/libpthread/arch/i386/i386/ksd.c +++ b/lib/libkse/arch/i386/i386/pthread_md.c @@ -40,7 +40,6 @@ __FBSDID("$FreeBSD$"); #include #include "pthread_md.h" -#include "ksd.h" #define LDT_ENTRIES 8192 #define LDT_WORDS (8192/sizeof(unsigned int)) @@ -94,76 +93,79 @@ free_ldt_entry(u_int index) ldt_mask[i] |= (1 << j); } -/* - * Initialize KSD. This also includes setting up the LDT. - */ -int -_ksd_create(struct ksd *ksd, void *base, int size) +struct tcb * +_tcb_ctor(struct pthread *thread) { - union descriptor ldt; + struct tcb *tcb; + void *addr; - if (initialized == 0) - initialize(); - ksd->ldt = alloc_ldt_entry(); - if (ksd->ldt == 0) - return (-1); - ksd->base = base; - ksd->size = size; - ldt.sd.sd_hibase = (unsigned int)ksd->base >> 24; - ldt.sd.sd_lobase = (unsigned int)ksd->base & 0xFFFFFF; - ldt.sd.sd_hilimit = (size >> 16) & 0xF; - ldt.sd.sd_lolimit = ksd->size & 0xFFFF; - ldt.sd.sd_type = SDT_MEMRWA; - ldt.sd.sd_dpl = SEL_UPL; - ldt.sd.sd_p = 1; - ldt.sd.sd_xx = 0; - ldt.sd.sd_def32 = 1; - ldt.sd.sd_gran = 0; /* no more than 1M */ - if (i386_set_ldt(ksd->ldt, &ldt, 1) < 0) { - free_ldt_entry(ksd->ldt); - return (-1); + addr = malloc(sizeof(struct tcb) + 15); + if (addr == NULL) + tcb = NULL; + else { + tcb = (struct tcb *)(((uintptr_t)(addr) + 15) & ~15); + bzero(tcb, sizeof(struct tcb)); + tcb->tcb_addr = addr; + tcb->tcb_thread = thread; + /* XXX - Allocate tdv/tls */ } - ksd->flags = KSDF_INITIALIZED; - return (0); + return (tcb); } void -_ksd_destroy(struct ksd *ksd) +_tcb_dtor(struct tcb *tcb) { - if ((ksd->flags & KSDF_INITIALIZED) != 0) { - free_ldt_entry(ksd->ldt); - } -} + void *addr; -int -_ksd_getprivate(struct ksd *ksd, void **base, int *size) -{ - - if ((ksd == NULL) || ((ksd->flags & KSDF_INITIALIZED) == 0)) - return (-1); - else { - *base = ksd->base; - *size = ksd->size; - return (0); - } + addr = tcb->tcb_addr; + tcb->tcb_addr = NULL; + free(addr); } /* - * This assumes that the LDT is already setup. Just set %gs to - * reference it. + * Initialize KSD. This also includes setting up the LDT. */ -int -_ksd_setprivate(struct ksd *ksd) +struct kcb * +_kcb_ctor(struct kse *kse) { - int val; - int ret; + union descriptor ldt; + struct kcb *kcb; - if ((ksd->flags & KSDF_INITIALIZED) == 0) - ret = -1; - else { - val = (ksd->ldt << 3) | 7; - __asm __volatile("movl %0, %%gs" : : "r" (val)); - ret = 0; + if (initialized == 0) + initialize(); + kcb = malloc(sizeof(struct kcb)); + if (kcb != NULL) { + bzero(kcb, sizeof(struct kcb)); + kcb->kcb_self = kcb; + kcb->kcb_kse = kse; + kcb->kcb_ldt = alloc_ldt_entry(); + if (kcb->kcb_ldt == 0) { + free(kcb); + return (NULL); + } + ldt.sd.sd_hibase = (unsigned int)kcb >> 24; + ldt.sd.sd_lobase = (unsigned int)kcb & 0xFFFFFF; + ldt.sd.sd_hilimit = (sizeof(struct kcb) >> 16) & 0xF; + ldt.sd.sd_lolimit = sizeof(struct kcb) & 0xFFFF; + ldt.sd.sd_type = SDT_MEMRWA; + ldt.sd.sd_dpl = SEL_UPL; + ldt.sd.sd_p = 1; + ldt.sd.sd_xx = 0; + ldt.sd.sd_def32 = 1; + ldt.sd.sd_gran = 0; /* no more than 1M */ + if (i386_set_ldt(kcb->kcb_ldt, &ldt, 1) < 0) { + free_ldt_entry(kcb->kcb_ldt); + free(kcb); + return (NULL); + } } - return (ret); + return (kcb); +} + +void +_kcb_dtor(struct kcb *kcb) +{ + if (kcb->kcb_ldt != -1) + free_ldt_entry(kcb->kcb_ldt); + free(kcb); } diff --git a/lib/libkse/arch/i386/i386/thr_getcontext.S b/lib/libkse/arch/i386/i386/thr_getcontext.S index 19afd061f638..7f58dce4248f 100644 --- a/lib/libkse/arch/i386/i386/thr_getcontext.S +++ b/lib/libkse/arch/i386/i386/thr_getcontext.S @@ -74,7 +74,7 @@ ENTRY(__thr_setcontext) movl 72(%edx), %esp /* switch to context defined stack */ pushl 60(%edx) /* push return address on stack */ pushl 44(%edx) /* push ecx on stack */ - push 48(%edx) /* push eax on stack */ + pushl 48(%edx) /* push eax on stack */ /* * if (mc_fpowned == MC_OWNEDFP_FPU || mc_fpowned == MC_OWNEDFP_PCB) { * if (mc_fpformat == MC_FPFMT_387) diff --git a/lib/libkse/arch/i386/include/pthread_md.h b/lib/libkse/arch/i386/include/pthread_md.h index 980680a281ed..971856244f71 100644 --- a/lib/libkse/arch/i386/include/pthread_md.h +++ b/lib/libkse/arch/i386/include/pthread_md.h @@ -37,34 +37,193 @@ extern int _thr_setcontext(mcontext_t *, intptr_t, intptr_t *); extern int _thr_getcontext(mcontext_t *); -#define THR_GETCONTEXT(ucp) _thr_getcontext(&(ucp)->uc_mcontext); -#define THR_SETCONTEXT(ucp) _thr_setcontext(&(ucp)->uc_mcontext, NULL, NULL); +#define THR_GETCONTEXT(ucp) _thr_getcontext(&(ucp)->uc_mcontext) +#define THR_SETCONTEXT(ucp) _thr_setcontext(&(ucp)->uc_mcontext, 0, NULL) -#define THR_ALIGNBYTES 15 -#define THR_ALIGN(td) (((unsigned)(td) + THR_ALIGNBYTES) & ~THR_ALIGNBYTES) +#define PER_KSE +#undef PER_THREAD + +struct kse; +struct pthread; +struct tdv; /* - * KSE Specific Data. + * %gs points to a struct kcb. */ -struct ksd { - int ldt; -#define KSDF_INITIALIZED 0x01 - long flags; - void *base; - long size; +struct kcb { + struct tcb *kcb_curtcb; + struct kcb *kcb_self; /* self reference */ + int kcb_ldt; + struct kse *kcb_kse; + struct kse_mailbox kcb_kmbx; }; -extern void _i386_enter_uts(struct kse_mailbox *, kse_func_t, void *, long); +struct tcb { + struct tdv *tcb_tdv; + struct pthread *tcb_thread; + void *tcb_addr; /* allocated tcb address */ + void *tcb_spare; /* align tcb_tmbx to 16 bytes */ + struct kse_thr_mailbox tcb_tmbx; +}; + +/* + * Evaluates to the byte offset of the per-kse variable name. + */ +#define __kcb_offset(name) __offsetof(struct kcb, name) + +/* + * Evaluates to the type of the per-kse variable name. + */ +#define __kcb_type(name) __typeof(((struct kcb *)0)->name) + +/* + * Evaluates to the value of the per-kse variable name. + */ +#define KCB_GET32(name) ({ \ + __kcb_type(name) __result; \ + \ + u_int __i; \ + __asm __volatile("movl %%gs:%1, %0" \ + : "=r" (__i) \ + : "m" (*(u_int *)(__kcb_offset(name)))); \ + __result = *(__kcb_type(name) *)&__i; \ + \ + __result; \ +}) + +/* + * Sets the value of the per-kse variable name to value val. + */ +#define KCB_SET32(name, val) ({ \ + __kcb_type(name) __val = (val); \ + \ + u_int __i; \ + __i = *(u_int *)&__val; \ + __asm __volatile("movl %1,%%gs:%0" \ + : "=m" (*(u_int *)(__kcb_offset(name))) \ + : "r" (__i)); \ +}) + +static __inline u_long +__kcb_readandclear32(volatile u_long *addr) +{ + u_long result; + + __asm __volatile ( + " xorl %0, %0;" + " xchgl %%gs:%1, %0;" + "# __kcb_readandclear32" + : "=&r" (result) + : "m" (*addr)); + return (result); +} + +#define KCB_READANDCLEAR32(name) ({ \ + __kcb_type(name) __result; \ + \ + __result = (__kcb_type(name)) \ + __kcb_readandclear32((u_long *)__kcb_offset(name)); \ + __result; \ +}) + + +#define _kcb_curkcb() KCB_GET32(kcb_self) +#define _kcb_curtcb() KCB_GET32(kcb_curtcb) +#define _kcb_curkse() ((struct kse *)KCB_GET32(kcb_kmbx.km_udata)) +#define _kcb_get_tmbx() KCB_GET32(kcb_kmbx.km_curthread) +#define _kcb_set_tmbx(value) KCB_SET32(kcb_kmbx.km_curthread, (void *)value) +#define _kcb_readandclear_tmbx() KCB_READANDCLEAR32(kcb_kmbx.km_curthread) + + +/* + * The constructors. + */ +struct tcb *_tcb_ctor(struct pthread *); +void _tcb_dtor(struct tcb *tcb); +struct kcb *_kcb_ctor(struct kse *); +void _kcb_dtor(struct kcb *); + +/* Called from the KSE to set its private data. */ +static __inline void +_kcb_set(struct kcb *kcb) +{ + int val; + + val = (kcb->kcb_ldt << 3) | 7; + __asm __volatile("movl %0, %%gs" : : "r" (val)); +} + +/* Get the current kcb. */ +static __inline struct kcb * +_kcb_get(void) +{ + return (_kcb_curkcb()); +} + +static __inline struct kse_thr_mailbox * +_kcb_critical_enter(void) +{ + struct kse_thr_mailbox *crit; + + crit = _kcb_readandclear_tmbx(); + return (crit); +} + +static __inline void +_kcb_critical_leave(struct kse_thr_mailbox *crit) +{ + _kcb_set_tmbx(crit); +} static __inline int -_thread_enter_uts(struct kse_thr_mailbox *tmbx, struct kse_mailbox *kmbx) +_kcb_in_critical(void) +{ + return (_kcb_get_tmbx() == NULL); +} + +static __inline void +_tcb_set(struct kcb *kcb, struct tcb *tcb) +{ + kcb->kcb_curtcb = tcb; +} + +static __inline struct tcb * +_tcb_get(void) +{ + return (_kcb_curtcb()); +} + +static __inline struct pthread * +_get_curthread(void) +{ + struct tcb *tcb; + + tcb = _kcb_curtcb(); + if (tcb != NULL) + return (tcb->tcb_thread); + else + return (NULL); +} + +static __inline struct kse * +_get_curkse(void) +{ + return ((struct kse *)_kcb_curkse()); +} + +void _i386_enter_uts(struct kse_mailbox *km, kse_func_t uts, void *stack, + size_t stacksz); + +static __inline int +_thread_enter_uts(struct tcb *tcb, struct kcb *kcb) { int ret; - ret = _thr_getcontext(&tmbx->tm_context.uc_mcontext); + ret = _thr_getcontext(&tcb->tcb_tmbx.tm_context.uc_mcontext); if (ret == 0) { - _i386_enter_uts(kmbx, kmbx->km_func, - kmbx->km_stack.ss_sp, kmbx->km_stack.ss_size); + _i386_enter_uts(&kcb->kcb_kmbx, kcb->kcb_kmbx.km_func, + kcb->kcb_kmbx.km_stack.ss_sp, + kcb->kcb_kmbx.km_stack.ss_size); /* We should not reach here. */ return (-1); } @@ -74,10 +233,17 @@ _thread_enter_uts(struct kse_thr_mailbox *tmbx, struct kse_mailbox *kmbx) } static __inline int -_thread_switch(struct kse_thr_mailbox *tmbx, struct kse_thr_mailbox **loc) +_thread_switch(struct kcb *kcb, struct tcb *tcb, int setmbox) { - _thr_setcontext(&tmbx->tm_context.uc_mcontext, - (intptr_t)tmbx, (intptr_t *)loc); + if ((kcb == NULL) || (tcb == NULL)) + return (-1); + kcb->kcb_curtcb = tcb; + if (setmbox != 0) + _thr_setcontext(&tcb->tcb_tmbx.tm_context.uc_mcontext, + (intptr_t)&tcb->tcb_tmbx, + (intptr_t *)&kcb->kcb_kmbx.km_curthread); + else + _thr_setcontext(&tcb->tcb_tmbx.tm_context.uc_mcontext, 0, NULL); /* We should not reach here. */ return (-1); } diff --git a/lib/libkse/arch/ia64/Makefile.inc b/lib/libkse/arch/ia64/Makefile.inc index c0e4c47941bd..c8b0362bca85 100644 --- a/lib/libkse/arch/ia64/Makefile.inc +++ b/lib/libkse/arch/ia64/Makefile.inc @@ -2,4 +2,4 @@ .PATH: ${.CURDIR}/arch/${MACHINE_ARCH}/${MACHINE_ARCH} -SRCS+= context.S enter_uts.S +SRCS+= context.S enter_uts.S pthread_md.c diff --git a/lib/libkse/arch/ia64/ia64/pthread_md.c b/lib/libkse/arch/ia64/ia64/pthread_md.c new file mode 100644 index 000000000000..e8fd64e741a0 --- /dev/null +++ b/lib/libkse/arch/ia64/ia64/pthread_md.c @@ -0,0 +1,77 @@ +/* + * Copyright (c) 2003 Daniel Eischen + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Neither the name of the author nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * $FreeBSD$ + */ + +#include +#include +#include "pthread_md.h" + +/* + * The constructors. + */ +struct tcb * +_tcb_ctor(struct pthread *thread) +{ + struct tcb *tcb; + + if ((tcb = malloc(sizeof(struct tcb))) != NULL) { + bzero(tcb, sizeof(struct tcb)); + tcb->tcb_thread = thread; + tcb->tcb_tp.tp_self = tcb; + /* Allocate TDV */ + } + return (tcb); +} + +void +_tcb_dtor(struct tcb *tcb) +{ + /* Free TDV */ + free(tcb); +} + +struct kcb * +_kcb_ctor(struct kse *kse) +{ + struct kcb *kcb; + + if ((kcb = malloc(sizeof(struct kcb))) != NULL) { + bzero(kcb, sizeof(struct kcb)); + kcb->kcb_faketcb.tcb_isfake = 1; + kcb->kcb_faketcb.tcb_tmbx.tm_flags = TMF_NOUPCALL; + kcb->kcb_faketcb.tcb_tp.tp_self = &kcb->kcb_faketcb; + kcb->kcb_curtcb = &kcb->kcb_faketcb; + kcb->kcb_kse = kse; + } + return (kcb); +} + +void +_kcb_dtor(struct kcb *kcb) +{ + free(kcb); +} diff --git a/lib/libkse/arch/ia64/include/pthread_md.h b/lib/libkse/arch/ia64/include/pthread_md.h index 6d30025c5c1e..f7076d0a65d2 100644 --- a/lib/libkse/arch/ia64/include/pthread_md.h +++ b/lib/libkse/arch/ia64/include/pthread_md.h @@ -29,32 +29,190 @@ #ifndef _PTHREAD_MD_H_ #define _PTHREAD_MD_H_ +#include +#include + #define THR_GETCONTEXT(ucp) _ia64_save_context(&(ucp)->uc_mcontext) #define THR_SETCONTEXT(ucp) _ia64_restore_context(&(ucp)->uc_mcontext, \ 0, NULL) -#define THR_ALIGNBYTES 0 -#define THR_ALIGN(td) (td) +#define PER_THREAD -/* KSE Specific Data. */ -struct ksd { - void *ksd_base; - int ksd_size; +struct kcb; +struct kse; +struct pthread; +struct tcb; +struct tdv; /* We don't know what this is yet? */ + +/* + * tp points to one of these. + */ +struct ia64_tp { + struct tdv *tp_tdv; /* dynamic TLS */ + struct tcb *tp_self; + char tp_tls[0]; /* static TLS */ }; +struct tcb { + struct kse_thr_mailbox tcb_tmbx; + struct pthread *tcb_thread; + struct kcb *tcb_curkcb; + long tcb_isfake; + struct ia64_tp tcb_tp; +}; + +struct kcb { + struct kse_mailbox kcb_kmbx; + struct tcb kcb_faketcb; + struct tcb *kcb_curtcb; + struct kse *kcb_kse; +}; + +register struct ia64_tp *_tp __asm("%r13"); + +/* + * The kcb and tcb constructors. + */ +struct tcb *_tcb_ctor(struct pthread *); +void _tcb_dtor(struct tcb *); +struct kcb *_kcb_ctor(struct kse *kse); +void _kcb_dtor(struct kcb *); + +/* Called from the KSE to set its private data. */ +static __inline void +_kcb_set(struct kcb *kcb) +{ + /* There is no thread yet; use the fake tcb. */ + _tp = &kcb->kcb_faketcb.tcb_tp; +} + +/* + * Get the current kcb. + * + * This can only be called while in a critical region; don't + * worry about having the kcb changed out from under us. + */ +static __inline struct kcb * +_kcb_get(void) +{ + return (_tp->tp_self->tcb_curkcb); +} + +/* + * Enter a critical region. + * + * Read and clear km_curthread in the kse mailbox. + */ +static __inline struct kse_thr_mailbox * +_kcb_critical_enter(void) +{ + struct kse_thr_mailbox *crit; + struct tcb *tcb; + uint32_t flags; + + tcb = _tp->tp_self; + if (tcb->tcb_isfake != 0) { + /* + * We already are in a critical region since + * there is no current thread. + */ + crit = NULL; + } else { + flags = tcb->tcb_tmbx.tm_flags; + tcb->tcb_tmbx.tm_flags |= TMF_NOUPCALL; + crit = tcb->tcb_curkcb->kcb_kmbx.km_curthread; + tcb->tcb_curkcb->kcb_kmbx.km_curthread = NULL; + tcb->tcb_tmbx.tm_flags = flags; + } + return (crit); +} + +static __inline void +_kcb_critical_leave(struct kse_thr_mailbox *crit) +{ + struct tcb *tcb; + + tcb = _tp->tp_self; + /* No need to do anything if this is a fake tcb. */ + if (tcb->tcb_isfake == 0) + tcb->tcb_curkcb->kcb_kmbx.km_curthread = crit; +} + +static __inline int +_kcb_in_critical(void) +{ + struct tcb *tcb; + uint32_t flags; + int ret; + + tcb = _tp->tp_self; + if (tcb->tcb_isfake != 0) { + /* + * We are in a critical region since there is no + * current thread. + */ + ret = 1; + } else { + flags = tcb->tcb_tmbx.tm_flags; + tcb->tcb_tmbx.tm_flags |= TMF_NOUPCALL; + ret = (tcb->tcb_curkcb->kcb_kmbx.km_curthread == NULL); + tcb->tcb_tmbx.tm_flags = flags; + } + return (ret); +} + +static __inline void +_tcb_set(struct kcb *kcb, struct tcb *tcb) +{ + if (tcb == NULL) { + kcb->kcb_curtcb = &kcb->kcb_faketcb; + _tp = &kcb->kcb_faketcb.tcb_tp; + } + else { + kcb->kcb_curtcb = tcb; + tcb->tcb_curkcb = kcb; + _tp = &tcb->tcb_tp; + } +} + +static __inline struct tcb * +_tcb_get(void) +{ + return (_tp->tp_self); +} + +static __inline struct pthread * +_get_curthread(void) +{ + return (_tp->tp_self->tcb_thread); +} + +/* + * Get the current kse. + * + * Line _kcb_get(), this can only be called while in a critical region. + */ +static __inline struct kse * +_get_curkse(void) +{ + return (_tp->tp_self->tcb_curkcb->kcb_kse); +} + void _ia64_enter_uts(kse_func_t uts, struct kse_mailbox *km, void *stack, size_t stacksz); int _ia64_restore_context(mcontext_t *mc, intptr_t val, intptr_t *loc); int _ia64_save_context(mcontext_t *mc); static __inline int -_thread_enter_uts(struct kse_thr_mailbox *tm, struct kse_mailbox *km) +_thread_enter_uts(struct tcb *tcb, struct kcb *kcb) { - if (tm == NULL) - return (-1); - if (!_ia64_save_context(&tm->tm_context.uc_mcontext)) { - _ia64_enter_uts(km->km_func, km, km->km_stack.ss_sp, - km->km_stack.ss_size); + if (_ia64_save_context(&tcb->tcb_tmbx.tm_context.uc_mcontext) == 0) { + /* Make the fake tcb the current thread. */ + kcb->kcb_curtcb = &kcb->kcb_faketcb; + _tp = &kcb->kcb_faketcb.tcb_tp; + _ia64_enter_uts(kcb->kcb_kmbx.km_func, &kcb->kcb_kmbx, + kcb->kcb_kmbx.km_stack.ss_sp, + kcb->kcb_kmbx.km_stack.ss_size); /* We should not reach here. */ return (-1); } @@ -62,12 +220,18 @@ _thread_enter_uts(struct kse_thr_mailbox *tm, struct kse_mailbox *km) } static __inline int -_thread_switch(struct kse_thr_mailbox *tm, struct kse_thr_mailbox **thrp) +_thread_switch(struct kcb *kcb, struct tcb *tcb, int setmbox) { - if (tm == NULL) - return (-1); - _ia64_restore_context(&tm->tm_context.uc_mcontext, (intptr_t)tm, - (intptr_t*)thrp); + kcb->kcb_curtcb = tcb; + tcb->tcb_curkcb = kcb; + _tp = &tcb->tcb_tp; + if (setmbox != 0) + _ia64_restore_context(&tcb->tcb_tmbx.tm_context.uc_mcontext, + (intptr_t)&tcb->tcb_tmbx, + (intptr_t *)&kcb->kcb_kmbx.km_curthread); + else + _ia64_restore_context(&tcb->tcb_tmbx.tm_context.uc_mcontext, + 0, NULL); /* We should not reach here. */ return (-1); } diff --git a/lib/libkse/thread/thr_cancel.c b/lib/libkse/thread/thr_cancel.c index 581b71974eb6..8190af6437bc 100644 --- a/lib/libkse/thread/thr_cancel.c +++ b/lib/libkse/thread/thr_cancel.c @@ -111,7 +111,7 @@ _pthread_cancel(pthread_t pthread) if ((pthread->cancelflags & THR_AT_CANCEL_POINT) && (pthread->blocked != 0 || pthread->attr.flags & PTHREAD_SCOPE_SYSTEM)) - kse_thr_interrupt(&pthread->tmbx, + kse_thr_interrupt(&pthread->tcb->tcb_tmbx, KSE_INTR_INTERRUPT, 0); } diff --git a/lib/libkse/thread/thr_concurrency.c b/lib/libkse/thread/thr_concurrency.c index 8eaa6c14fbec..694255ae8b24 100644 --- a/lib/libkse/thread/thr_concurrency.c +++ b/lib/libkse/thread/thr_concurrency.c @@ -105,7 +105,7 @@ _thr_setconcurrency(int new_level) newkse->k_kseg->kg_ksecount++; newkse->k_flags |= KF_STARTED; KSE_SCHED_UNLOCK(curthread->kse, newkse->k_kseg); - if (kse_create(&newkse->k_mbx, 0) != 0) { + if (kse_create(&newkse->k_kcb->kcb_kmbx, 0) != 0) { KSE_SCHED_LOCK(curthread->kse, newkse->k_kseg); TAILQ_REMOVE(&newkse->k_kseg->kg_kseq, newkse, k_kgqe); diff --git a/lib/libkse/thread/thr_create.c b/lib/libkse/thread/thr_create.c index 9f041368d9fb..fcc4ac5280da 100644 --- a/lib/libkse/thread/thr_create.c +++ b/lib/libkse/thread/thr_create.c @@ -49,7 +49,10 @@ int _thread_next_offset = OFF(tle.tqe_next); int _thread_uniqueid_offset = OFF(uniqueid); int _thread_state_offset = OFF(state); int _thread_name_offset = OFF(name); -int _thread_ctx_offset = OFF(tmbx.tm_context); +void *_thread_tcb_offset = OFF(tcb); +#undef OFF +#define OFF(f) offsetof(struct tcb, f) +int _thread_ctx_offset = OFF(tcb_tmbx.tm_context); #undef OFF int _thread_PS_RUNNING_value = PS_RUNNING; @@ -95,7 +98,6 @@ _pthread_create(pthread_t * thread, const pthread_attr_t * attr, struct pthread *curthread, *new_thread; struct kse *kse = NULL; struct kse_group *kseg = NULL; - void *p; kse_critical_t crit; int i; int ret = 0; @@ -121,11 +123,6 @@ _pthread_create(pthread_t * thread, const pthread_attr_t * attr, /* Insufficient memory to create a thread: */ ret = EAGAIN; } else { - /* Initialize the thread structure: */ - p = new_thread->alloc_addr; - memset(new_thread, 0, sizeof(struct pthread)); - new_thread->alloc_addr = p; - /* Check if default thread attributes are required: */ if (attr == NULL || *attr == NULL) /* Use the default thread attributes: */ @@ -146,7 +143,7 @@ _pthread_create(pthread_t * thread, const pthread_attr_t * attr, /* Insufficient memory to create a new KSE/KSEG: */ ret = EAGAIN; if (kse != NULL) { - kse->k_mbx.km_flags |= KMF_DONE; + kse->k_kcb->kcb_kmbx.km_flags |= KMF_DONE; _kse_free(curthread, kse); } free_stack(&new_thread->attr); @@ -183,18 +180,19 @@ _pthread_create(pthread_t * thread, const pthread_attr_t * attr, * Enter a critical region to get consistent context. */ crit = _kse_critical_enter(); - THR_GETCONTEXT(&new_thread->tmbx.tm_context); + THR_GETCONTEXT(&new_thread->tcb->tcb_tmbx.tm_context); /* Initialize the thread for signals: */ new_thread->sigmask = curthread->sigmask; _kse_critical_leave(crit); - new_thread->tmbx.tm_udata = new_thread; - new_thread->tmbx.tm_context.uc_sigmask = + + new_thread->tcb->tcb_tmbx.tm_udata = new_thread; + new_thread->tcb->tcb_tmbx.tm_context.uc_sigmask = new_thread->sigmask; - new_thread->tmbx.tm_context.uc_stack.ss_size = + new_thread->tcb->tcb_tmbx.tm_context.uc_stack.ss_size = new_thread->attr.stacksize_attr; - new_thread->tmbx.tm_context.uc_stack.ss_sp = + new_thread->tcb->tcb_tmbx.tm_context.uc_stack.ss_sp = new_thread->attr.stackaddr_attr; - makecontext(&new_thread->tmbx.tm_context, + makecontext(&new_thread->tcb->tcb_tmbx.tm_context, (void (*)(void))thread_start, 4, new_thread, start_routine, arg); /* @@ -274,8 +272,8 @@ _pthread_create(pthread_t * thread, const pthread_attr_t * attr, kse->k_kseg->kg_flags |= KGF_SINGLE_THREAD; new_thread->kse = kse; new_thread->kseg = kse->k_kseg; - kse->k_mbx.km_udata = kse; - kse->k_mbx.km_curthread = NULL; + kse->k_kcb->kcb_kmbx.km_udata = kse; + kse->k_kcb->kcb_kmbx.km_curthread = NULL; } /* diff --git a/lib/libkse/thread/thr_init.c b/lib/libkse/thread/thr_init.c index 5c31ae7b9b44..f9e0d9bc9767 100644 --- a/lib/libkse/thread/thr_init.c +++ b/lib/libkse/thread/thr_init.c @@ -68,7 +68,6 @@ #include "libc_private.h" #include "thr_private.h" -#include "ksd.h" int __pthread_cond_wait(pthread_cond_t *, pthread_mutex_t *); int __pthread_mutex_lock(pthread_mutex_t *); @@ -306,12 +305,10 @@ _libpthread_init(struct pthread *curthread) KSEG_THRQ_ADD(_kse_initial->k_kseg, _thr_initial); /* Setup the KSE/thread specific data for the current KSE/thread. */ - if (_ksd_setprivate(&_thr_initial->kse->k_ksd) != 0) - PANIC("Can't set initial KSE specific data"); - _set_curkse(_thr_initial->kse); _thr_initial->kse->k_curthread = _thr_initial; + _kcb_set(_thr_initial->kse->k_kcb); + _tcb_set(_thr_initial->kse->k_kcb, _thr_initial->tcb); _thr_initial->kse->k_flags |= KF_INITIALIZED; - _kse_initial->k_curthread = _thr_initial; _thr_rtld_init(); } @@ -323,14 +320,8 @@ _libpthread_init(struct pthread *curthread) static void init_main_thread(struct pthread *thread) { - void *p; int i; - /* Zero the initial thread structure. */ - p = thread->alloc_addr; - memset(thread, 0, sizeof(struct pthread)); - thread->alloc_addr = p; - /* Setup the thread attributes. */ thread->attr = _pthread_attr_default; #ifdef SYSTEM_SCOPE_ONLY @@ -381,9 +372,11 @@ init_main_thread(struct pthread *thread) * Set up the thread mailbox. The threads saved context * is also in the mailbox. */ - thread->tmbx.tm_udata = thread; - thread->tmbx.tm_context.uc_stack.ss_size = thread->attr.stacksize_attr; - thread->tmbx.tm_context.uc_stack.ss_sp = thread->attr.stackaddr_attr; + thread->tcb->tcb_tmbx.tm_udata = thread; + thread->tcb->tcb_tmbx.tm_context.uc_stack.ss_size = + thread->attr.stacksize_attr; + thread->tcb->tcb_tmbx.tm_context.uc_stack.ss_sp = + thread->attr.stackaddr_attr; /* Default the priority of the initial thread: */ thread->base_priority = THR_DEFAULT_PRIORITY; diff --git a/lib/libkse/thread/thr_kern.c b/lib/libkse/thread/thr_kern.c index a08c677bb599..085c079c89c4 100644 --- a/lib/libkse/thread/thr_kern.c +++ b/lib/libkse/thread/thr_kern.c @@ -54,7 +54,6 @@ __FBSDID("$FreeBSD$"); #include "atomic_ops.h" #include "thr_private.h" #include "libc_private.h" -#include "ksd.h" /*#define DEBUG_THREAD_KERN */ #ifdef DEBUG_THREAD_KERN @@ -79,7 +78,7 @@ __FBSDID("$FreeBSD$"); #define KSE_STACKSIZE 16384 #define KSE_SET_MBOX(kse, thrd) \ - (kse)->k_mbx.km_curthread = &(thrd)->tmbx + (kse)->k_kcb->kcb_kmbx.km_curthread = &(thrd)->tcb->tcb_tmbx #define KSE_SET_EXITED(kse) (kse)->k_flags |= KF_EXITED @@ -132,8 +131,8 @@ static void kse_check_completed(struct kse *kse); static void kse_check_waitq(struct kse *kse); static void kse_fini(struct kse *curkse); static void kse_reinit(struct kse *kse, int sys_scope); -static void kse_sched_multi(struct kse *curkse); -static void kse_sched_single(struct kse *curkse); +static void kse_sched_multi(struct kse_mailbox *kmbx); +static void kse_sched_single(struct kse_mailbox *kmbx); static void kse_switchout_thread(struct kse *kse, struct pthread *thread); static void kse_wait(struct kse *kse, struct pthread *td_wait, int sigseq); static void kse_free_unlocked(struct kse *kse); @@ -152,20 +151,30 @@ static int thr_timedout(struct pthread *thread, struct timespec *curtime); static void thr_unlink(struct pthread *thread); +static __inline void +kse_set_curthread(struct kse *kse, struct pthread *td) +{ + kse->k_curthread = td; + if (td != NULL) + _tcb_set(kse->k_kcb, td->tcb); + else + _tcb_set(kse->k_kcb, NULL); +} + static void __inline thr_accounting(struct pthread *thread) { if ((thread->slice_usec != -1) && (thread->slice_usec <= TIMESLICE_USEC) && (thread->attr.sched_policy != SCHED_FIFO)) { - thread->slice_usec += (thread->tmbx.tm_uticks - + thread->tmbx.tm_sticks) * _clock_res_usec; + thread->slice_usec += (thread->tcb->tcb_tmbx.tm_uticks + + thread->tcb->tcb_tmbx.tm_sticks) * _clock_res_usec; /* Check for time quantum exceeded: */ if (thread->slice_usec > TIMESLICE_USEC) thread->slice_usec = -1; } - thread->tmbx.tm_uticks = 0; - thread->tmbx.tm_sticks = 0; + thread->tcb->tcb_tmbx.tm_uticks = 0; + thread->tcb->tcb_tmbx.tm_sticks = 0; } /* @@ -246,7 +255,7 @@ _kse_single_thread(struct pthread *curthread) _lockuser_destroy(&kse->k_lockusers[i]); } _lock_destroy(&kse->k_lock); - _ksd_destroy(&kse->k_ksd); + _kcb_dtor(kse->k_kcb); if (kse->k_stack.ss_sp != NULL) free(kse->k_stack.ss_sp); free(kse); @@ -341,7 +350,7 @@ _kse_single_thread(struct pthread *curthread) #else if (__isthreaded) _thr_signal_deinit(); - _ksd_set_tmbx(NULL); + curthread->kse->k_kcb->kcb_kmbx.km_curthread = NULL; __isthreaded = 0; active_threads = 0; #endif @@ -409,11 +418,12 @@ _kse_setthreaded(int threaded) * For bound thread, kernel reads mailbox pointer once, * we'd set it here before calling kse_create */ + _tcb_set(_kse_initial->k_kcb, _thr_initial->tcb); KSE_SET_MBOX(_kse_initial, _thr_initial); - _kse_initial->k_mbx.km_flags |= KMF_BOUND; + _kse_initial->k_kcb->kcb_kmbx.km_flags |= KMF_BOUND; #endif - if (kse_create(&_kse_initial->k_mbx, 0) != 0) { + if (kse_create(&_kse_initial->k_kcb->kcb_kmbx, 0) != 0) { _kse_initial->k_flags &= ~KF_STARTED; __isthreaded = 0; PANIC("kse_create() failed\n"); @@ -422,6 +432,7 @@ _kse_setthreaded(int threaded) #ifndef SYSTEM_SCOPE_ONLY /* Set current thread to initial thread */ + _tcb_set(_kse_initial->k_kcb, _thr_initial->tcb); KSE_SET_MBOX(_kse_initial, _thr_initial); _thr_start_sig_daemon(); _thr_setmaxconcurrency(); @@ -450,7 +461,7 @@ _kse_lock_wait(struct lock *lock, struct lockuser *lu) struct timespec ts; int saved_flags; - if (curkse->k_mbx.km_curthread != NULL) + if (curkse->k_kcb->kcb_kmbx.km_curthread != NULL) PANIC("kse_lock_wait does not disable upcall.\n"); /* * Enter a loop to wait until we get the lock. @@ -462,10 +473,11 @@ _kse_lock_wait(struct lock *lock, struct lockuser *lu) * Yield the kse and wait to be notified when the lock * is granted. */ - saved_flags = curkse->k_mbx.km_flags; - curkse->k_mbx.km_flags |= KMF_NOUPCALL | KMF_NOCOMPLETED; + saved_flags = curkse->k_kcb->kcb_kmbx.km_flags; + curkse->k_kcb->kcb_kmbx.km_flags |= KMF_NOUPCALL | + KMF_NOCOMPLETED; kse_release(&ts); - curkse->k_mbx.km_flags = saved_flags; + curkse->k_kcb->kcb_kmbx.km_flags = saved_flags; } } @@ -482,7 +494,7 @@ _kse_lock_wakeup(struct lock *lock, struct lockuser *lu) if (kse == curkse) PANIC("KSE trying to wake itself up in lock"); else { - mbx = &kse->k_mbx; + mbx = &kse->k_kcb->kcb_kmbx; _lock_grant(lock, lu); /* * Notify the owning kse that it has the lock. @@ -534,8 +546,7 @@ _kse_critical_enter(void) { kse_critical_t crit; - crit = _ksd_get_tmbx(); - _ksd_set_tmbx(NULL); + crit = (kse_critical_t)_kcb_critical_enter(); return (crit); } @@ -544,7 +555,7 @@ _kse_critical_leave(kse_critical_t crit) { struct pthread *curthread; - _ksd_set_tmbx(crit); + _kcb_critical_leave((struct kse_thr_mailbox *)crit); if ((crit != NULL) && ((curthread = _get_curthread()) != NULL)) THR_YIELD_CHECK(curthread); } @@ -552,7 +563,7 @@ _kse_critical_leave(kse_critical_t crit) int _kse_in_critical(void) { - return (_ksd_get_tmbx() == NULL); + return (_kcb_in_critical()); } void @@ -629,17 +640,17 @@ _thr_sched_switch_unlocked(struct pthread *curthread) * we don't bother checking for that. */ if (curthread->attr.flags & PTHREAD_SCOPE_SYSTEM) - kse_sched_single(curkse); + kse_sched_single(&curkse->k_kcb->kcb_kmbx); else if ((curthread->state == PS_DEAD) || (((td = KSE_RUNQ_FIRST(curkse)) == NULL) && (curthread->state != PS_RUNNING)) || ((td != NULL) && (td->lock_switch == 0))) { curkse->k_switch = 1; - _thread_enter_uts(&curthread->tmbx, &curkse->k_mbx); + _thread_enter_uts(curthread->tcb, curkse->k_kcb); } else { uts_once = 0; - THR_GETCONTEXT(&curthread->tmbx.tm_context); + THR_GETCONTEXT(&curthread->tcb->tcb_tmbx.tm_context); if (uts_once == 0) { uts_once = 1; @@ -649,7 +660,7 @@ _thr_sched_switch_unlocked(struct pthread *curthread) /* Choose another thread to run. */ td = KSE_RUNQ_FIRST(curkse); KSE_RUNQ_REMOVE(curkse, td); - curkse->k_curthread = td; + kse_set_curthread(curkse, td); /* * Make sure the current thread's kse points to @@ -674,7 +685,7 @@ _thr_sched_switch_unlocked(struct pthread *curthread) /* * Continue the thread at its current frame: */ - ret = _thread_switch(&td->tmbx, NULL); + ret = _thread_switch(curkse->k_kcb, td->tcb, 0); /* This point should not be reached. */ if (ret != 0) PANIC("Bad return from _thread_switch"); @@ -701,7 +712,7 @@ _thr_sched_switch_unlocked(struct pthread *curthread) curthread->lock_switch = 0; KSE_SCHED_UNLOCK(curkse, curkse->k_kseg); - _kse_critical_leave(&curthread->tmbx); + _kse_critical_leave(&curthread->tcb->tcb_tmbx); } /* * This thread is being resumed; check for cancellations. @@ -728,17 +739,21 @@ _thr_sched_switch_unlocked(struct pthread *curthread) */ static void -kse_sched_single(struct kse *curkse) +kse_sched_single(struct kse_mailbox *kmbx) { - struct pthread *curthread = curkse->k_curthread; + struct kse *curkse; + struct pthread *curthread; struct timespec ts; sigset_t sigmask; int i, sigseqno, level, first = 0; + curkse = (struct kse *)kmbx->km_udata; + curthread = curkse->k_curthread; + if ((curkse->k_flags & KF_INITIALIZED) == 0) { /* Setup this KSEs specific data. */ - _ksd_setprivate(&curkse->k_ksd); - _set_curkse(curkse); + _kcb_set(curkse->k_kcb); + _tcb_set(curkse->k_kcb, curthread->tcb); curkse->k_flags |= KF_INITIALIZED; first = 1; curthread->active = 1; @@ -750,7 +765,7 @@ kse_sched_single(struct kse *curkse) * It is used to let other code work, those code want mailbox * to be cleared. */ - _kse_critical_enter(); + (void)_kse_critical_enter(); } curthread->critical_yield = 0; @@ -875,7 +890,7 @@ kse_sched_single(struct kse *curkse) DBG_MSG("Continuing bound thread %p\n", curthread); if (first) { - _kse_critical_leave(&curthread->tmbx); + _kse_critical_leave(&curthread->tcb->tcb_tmbx); pthread_exit(curthread->start_routine(curthread->arg)); } } @@ -898,20 +913,21 @@ dump_queues(struct kse *curkse) * This is the scheduler for a KSE which runs multiple threads. */ static void -kse_sched_multi(struct kse *curkse) +kse_sched_multi(struct kse_mailbox *kmbx) { + struct kse *curkse; struct pthread *curthread, *td_wait; struct pthread_sigframe *curframe; int ret; - THR_ASSERT(curkse->k_mbx.km_curthread == NULL, + curkse = (struct kse *)kmbx->km_udata; + THR_ASSERT(curkse->k_kcb->kcb_kmbx.km_curthread == NULL, "Mailbox not null in kse_sched_multi"); /* Check for first time initialization: */ if ((curkse->k_flags & KF_INITIALIZED) == 0) { /* Setup this KSEs specific data. */ - _ksd_setprivate(&curkse->k_ksd); - _set_curkse(curkse); + _kcb_set(curkse->k_kcb); /* Set this before grabbing the context. */ curkse->k_flags |= KF_INITIALIZED; @@ -928,6 +944,12 @@ kse_sched_multi(struct kse *curkse) KSE_SCHED_LOCK(curkse, curkse->k_kseg); curkse->k_switch = 0; + /* + * Now that the scheduler lock is held, get the current + * thread. The KSE's current thread cannot be safely + * examined without the lock because it could have returned + * as completed on another KSE. See kse_check_completed(). + */ curthread = curkse->k_curthread; if (KSE_IS_IDLE(curkse)) { @@ -975,20 +997,19 @@ kse_sched_multi(struct kse *curkse) curthread->active = 1; if ((curthread->flags & THR_FLAGS_IN_RUNQ) != 0) KSE_RUNQ_REMOVE(curkse, curthread); - curkse->k_curthread = curthread; + kse_set_curthread(curkse, curthread); curthread->kse = curkse; DBG_MSG("Continuing thread %p in critical region\n", curthread); kse_wakeup_multi(curkse); KSE_SCHED_UNLOCK(curkse, curkse->k_kseg); - ret = _thread_switch(&curthread->tmbx, - &curkse->k_mbx.km_curthread); + ret = _thread_switch(curkse->k_kcb, curthread->tcb, 1); if (ret != 0) PANIC("Can't resume thread in critical region\n"); } else if ((curthread->flags & THR_FLAGS_IN_RUNQ) == 0) kse_switchout_thread(curkse, curthread); - curkse->k_curthread = NULL; + kse_set_curthread(curkse, NULL); kse_wakeup_multi(curkse); @@ -1034,7 +1055,7 @@ kse_sched_multi(struct kse *curkse) /* * Make the selected thread the current thread. */ - curkse->k_curthread = curthread; + kse_set_curthread(curkse, curthread); /* * Make sure the current thread's kse points to this kse. @@ -1069,13 +1090,13 @@ kse_sched_multi(struct kse *curkse) (((curthread->cancelflags & THR_AT_CANCEL_POINT) == 0) && ((curthread->cancelflags & PTHREAD_CANCEL_ASYNCHRONOUS) != 0))) && !THR_IN_CRITICAL(curthread)) - signalcontext(&curthread->tmbx.tm_context, 0, + signalcontext(&curthread->tcb->tcb_tmbx.tm_context, 0, (__sighandler_t *)thr_resume_wrapper); #else if ((curframe == NULL) && (curthread->state == PS_RUNNING) && (curthread->check_pending != 0) && !THR_IN_CRITICAL(curthread)) { curthread->check_pending = 0; - signalcontext(&curthread->tmbx.tm_context, 0, + signalcontext(&curthread->tcb->tcb_tmbx.tm_context, 0, (__sighandler_t *)thr_resume_wrapper); } #endif @@ -1087,12 +1108,11 @@ kse_sched_multi(struct kse *curkse) * This thread came from a scheduler switch; it will * unlock the scheduler lock and set the mailbox. */ - ret = _thread_switch(&curthread->tmbx, NULL); + ret = _thread_switch(curkse->k_kcb, curthread->tcb, 0); } else { /* This thread won't unlock the scheduler lock. */ KSE_SCHED_UNLOCK(curkse, curkse->k_kseg); - ret = _thread_switch(&curthread->tmbx, - &curkse->k_mbx.km_curthread); + ret = _thread_switch(curkse->k_kcb, curthread->tcb, 1); } if (ret != 0) PANIC("Thread has returned from _thread_switch"); @@ -1114,9 +1134,9 @@ thr_resume_wrapper(int sig, siginfo_t *siginfo, ucontext_t *ucp) thr_resume_check(curthread, ucp, NULL); _kse_critical_enter(); curkse = _get_curkse(); - curthread->tmbx.tm_context = *ucp; + curthread->tcb->tcb_tmbx.tm_context = *ucp; curthread->error = err_save; - ret = _thread_switch(&curthread->tmbx, &curkse->k_mbx.km_curthread); + ret = _thread_switch(curkse->k_kcb, curthread->tcb, 1); if (ret != 0) PANIC("thr_resume_wrapper: thread has returned " "from _thread_switch"); @@ -1242,7 +1262,7 @@ _thr_gc(struct pthread *curthread) if ((td->flags & THR_FLAGS_GC_SAFE) == 0) continue; else if (((td->attr.flags & PTHREAD_SCOPE_SYSTEM) != 0) && - ((td->kse->k_mbx.km_flags & KMF_DONE) == 0)) { + ((td->kse->k_kcb->kcb_kmbx.km_flags & KMF_DONE) == 0)) { /* * The thread and KSE are operating on the same * stack. Wait for the KSE to exit before freeing @@ -1319,9 +1339,9 @@ _thr_schedule_add(struct pthread *curthread, struct pthread *newthread) */ if ((newthread->attr.flags & PTHREAD_SCOPE_SYSTEM) != 0) { /* We use the thread's stack as the KSE's stack. */ - newthread->kse->k_mbx.km_stack.ss_sp = + newthread->kse->k_kcb->kcb_kmbx.km_stack.ss_sp = newthread->attr.stackaddr_attr; - newthread->kse->k_mbx.km_stack.ss_size = + newthread->kse->k_kcb->kcb_kmbx.km_stack.ss_size = newthread->attr.stacksize_attr; /* @@ -1331,10 +1351,10 @@ _thr_schedule_add(struct pthread *curthread, struct pthread *newthread) KSEG_THRQ_ADD(newthread->kseg, newthread); /* this thread never gives up kse */ newthread->active = 1; - newthread->kse->k_curthread = newthread; - newthread->kse->k_mbx.km_flags = KMF_BOUND; - newthread->kse->k_mbx.km_func = (kse_func_t *)kse_sched_single; - newthread->kse->k_mbx.km_quantum = 0; + kse_set_curthread(newthread->kse, newthread); + newthread->kse->k_kcb->kcb_kmbx.km_flags = KMF_BOUND; + newthread->kse->k_kcb->kcb_kmbx.km_func = (kse_func_t *)kse_sched_single; + newthread->kse->k_kcb->kcb_kmbx.km_quantum = 0; KSE_SET_MBOX(newthread->kse, newthread); /* * This thread needs a new KSE and KSEG. @@ -1342,7 +1362,7 @@ _thr_schedule_add(struct pthread *curthread, struct pthread *newthread) newthread->kse->k_flags &= ~KF_INITIALIZED; newthread->kse->k_flags |= KF_STARTED; /* Fire up! */ - ret = kse_create(&newthread->kse->k_mbx, 1); + ret = kse_create(&newthread->kse->k_kcb->kcb_kmbx, 1); if (ret != 0) ret = errno; } @@ -1363,10 +1383,10 @@ _thr_schedule_add(struct pthread *curthread, struct pthread *newthread) * outside of holding the lock. */ newthread->kse->k_flags |= KF_STARTED; - newthread->kse->k_mbx.km_func = + newthread->kse->k_kcb->kcb_kmbx.km_func = (kse_func_t *)kse_sched_multi; - newthread->kse->k_mbx.km_flags = 0; - kse_create(&newthread->kse->k_mbx, 0); + newthread->kse->k_kcb->kcb_kmbx.km_flags = 0; + kse_create(&newthread->kse->k_kcb->kcb_kmbx, 0); } else if ((newthread->state == PS_RUNNING) && KSE_IS_IDLE(newthread->kse)) { /* @@ -1418,8 +1438,8 @@ kse_check_completed(struct kse *kse) struct kse_thr_mailbox *completed; int sig; - if ((completed = kse->k_mbx.km_completed) != NULL) { - kse->k_mbx.km_completed = NULL; + if ((completed = kse->k_kcb->kcb_kmbx.km_completed) != NULL) { + kse->k_kcb->kcb_kmbx.km_completed = NULL; while (completed != NULL) { thread = completed->tm_udata; DBG_MSG("Found completed thread %p, name %s\n", @@ -1434,17 +1454,23 @@ kse_check_completed(struct kse *kse) KSE_RUNQ_INSERT_TAIL(kse, thread); if ((thread->kse != kse) && (thread->kse->k_curthread == thread)) { - thread->kse->k_curthread = NULL; + /* + * Remove this thread from its + * previous KSE so that it (the KSE) + * doesn't think it is still active. + */ + kse_set_curthread(thread->kse, NULL); thread->active = 0; } } - if ((sig = thread->tmbx.tm_syncsig.si_signo) != 0) { + if ((sig = thread->tcb->tcb_tmbx.tm_syncsig.si_signo) + != 0) { if (SIGISMEMBER(thread->sigmask, sig)) SIGADDSET(thread->sigpend, sig); else (void)_thr_sig_add(thread, sig, - &thread->tmbx.tm_syncsig); - thread->tmbx.tm_syncsig.si_signo = 0; + &thread->tcb->tcb_tmbx.tm_syncsig); + thread->tcb->tcb_tmbx.tm_syncsig.si_signo = 0; } completed = completed->tm_next; } @@ -1567,7 +1593,7 @@ kse_switchout_thread(struct kse *kse, struct pthread *thread) if (SIGISMEMBER(thread->sigpend, i) && !SIGISMEMBER(thread->sigmask, i)) { restart = _thread_sigact[1 - 1].sa_flags & SA_RESTART; - kse_thr_interrupt(&thread->tmbx, + kse_thr_interrupt(&thread->tcb->tcb_tmbx, restart ? KSE_INTR_RESTART : KSE_INTR_INTERRUPT, 0); break; } @@ -1584,6 +1610,7 @@ kse_switchout_thread(struct kse *kse, struct pthread *thread) */ thread->active = 0; thread->need_switchout = 0; + thread->lock_switch = 0; thr_cleanup(kse, thread); return; break; @@ -1705,10 +1732,10 @@ kse_wait(struct kse *kse, struct pthread *td_wait, int sigseqno) (kse->k_sigseqno != sigseqno)) ; /* don't sleep */ else { - saved_flags = kse->k_mbx.km_flags; - kse->k_mbx.km_flags |= KMF_NOUPCALL; + saved_flags = kse->k_kcb->kcb_kmbx.km_flags; + kse->k_kcb->kcb_kmbx.km_flags |= KMF_NOUPCALL; kse_release(&ts_sleep); - kse->k_mbx.km_flags = saved_flags; + kse->k_kcb->kcb_kmbx.km_flags = saved_flags; } KSE_SCHED_LOCK(kse, kse->k_kseg); if (KSE_IS_IDLE(kse)) { @@ -1781,7 +1808,7 @@ kse_fini(struct kse *kse) KSE_SCHED_UNLOCK(kse, kse->k_kseg); ts.tv_sec = 120; ts.tv_nsec = 0; - kse->k_mbx.km_flags = 0; + kse->k_kcb->kcb_kmbx.km_flags = 0; kse_release(&ts); /* Never reach */ } @@ -1898,13 +1925,13 @@ kse_wakeup_one(struct pthread *thread) if (KSE_IS_IDLE(thread->kse)) { KSE_CLEAR_IDLE(thread->kse); thread->kseg->kg_idle_kses--; - return (&thread->kse->k_mbx); + return (&thread->kse->k_kcb->kcb_kmbx); } else { TAILQ_FOREACH(ke, &thread->kseg->kg_kseq, k_kgqe) { if (KSE_IS_IDLE(ke)) { KSE_CLEAR_IDLE(ke); ke->k_kseg->kg_idle_kses--; - return (&ke->k_mbx); + return (&ke->k_kcb->kcb_kmbx); } } } @@ -1930,25 +1957,6 @@ kse_wakeup_multi(struct kse *curkse) } } -struct pthread * -_get_curthread(void) -{ - return (_ksd_curthread()); -} - -/* This assumes the caller has disabled upcalls. */ -struct kse * -_get_curkse(void) -{ - return (_ksd_curkse()); -} - -void -_set_curkse(struct kse *kse) -{ - _ksd_setprivate(&kse->k_ksd); -} - /* * Allocate a new KSEG. * @@ -2048,8 +2056,8 @@ struct kse * _kse_alloc(struct pthread *curthread, int sys_scope) { struct kse *kse = NULL; + char *stack; kse_critical_t crit; - int need_ksd = 0; int i; if ((curthread != NULL) && (free_kse_count > 0)) { @@ -2058,7 +2066,7 @@ _kse_alloc(struct pthread *curthread, int sys_scope) /* Search for a finished KSE. */ kse = TAILQ_FIRST(&free_kseq); while ((kse != NULL) && - ((kse->k_mbx.km_flags & KMF_DONE) == 0)) { + ((kse->k_kcb->kcb_kmbx.km_flags & KMF_DONE) == 0)) { kse = TAILQ_NEXT(kse, k_qe); } if (kse != NULL) { @@ -2075,8 +2083,22 @@ _kse_alloc(struct pthread *curthread, int sys_scope) } if ((kse == NULL) && ((kse = (struct kse *)malloc(sizeof(*kse))) != NULL)) { + if (sys_scope != 0) + stack = NULL; + else if ((stack = malloc(KSE_STACKSIZE)) == NULL) { + free(kse); + return (NULL); + } bzero(kse, sizeof(*kse)); + /* Initialize KCB without the lock. */ + if ((kse->k_kcb = _kcb_ctor(kse)) == NULL) { + if (stack != NULL) + free(stack); + free(kse); + return (NULL); + } + /* Initialize the lockusers. */ for (i = 0; i < MAX_KSE_LOCKLEVEL; i++) { _lockuser_init(&kse->k_lockusers[i], (void *)kse); @@ -2084,58 +2106,10 @@ _kse_alloc(struct pthread *curthread, int sys_scope) } /* _lock_init(kse->k_lock, ...) */ - /* We had to malloc a kse; mark it as needing a new ID.*/ - need_ksd = 1; - - /* - * Create the KSE context. - * Scope system threads (one thread per KSE) are not required - * to have a stack for an unneeded kse upcall. - */ - if (!sys_scope) { - kse->k_mbx.km_func = (kse_func_t *)kse_sched_multi; - kse->k_stack.ss_sp = (char *) malloc(KSE_STACKSIZE); - kse->k_stack.ss_size = KSE_STACKSIZE; - } else { - kse->k_mbx.km_func = (kse_func_t *)kse_sched_single; - } - kse->k_mbx.km_udata = (void *)kse; - kse->k_mbx.km_quantum = 20000; - /* - * We need to keep a copy of the stack in case it - * doesn't get used; a KSE running a scope system - * thread will use that thread's stack. - */ - kse->k_mbx.km_stack = kse->k_stack; - if (!sys_scope && kse->k_stack.ss_sp == NULL) { - for (i = 0; i < MAX_KSE_LOCKLEVEL; i++) { - _lockuser_destroy(&kse->k_lockusers[i]); - } - /* _lock_destroy(&kse->k_lock); */ - free(kse); - kse = NULL; - } - } - if ((kse != NULL) && (need_ksd != 0)) { - /* This KSE needs initialization. */ if (curthread != NULL) { crit = _kse_critical_enter(); KSE_LOCK_ACQUIRE(curthread->kse, &kse_lock); } - /* Initialize KSD inside of the lock. */ - if (_ksd_create(&kse->k_ksd, (void *)kse, sizeof(*kse)) != 0) { - if (curthread != NULL) { - KSE_LOCK_RELEASE(curthread->kse, &kse_lock); - _kse_critical_leave(crit); - } - if (kse->k_stack.ss_sp) - free(kse->k_stack.ss_sp); - for (i = 0; i < MAX_KSE_LOCKLEVEL; i++) { - _lockuser_destroy(&kse->k_lockusers[i]); - } - free(kse); - return (NULL); - } kse->k_flags = 0; TAILQ_INSERT_TAIL(&active_kseq, kse, k_qe); active_kse_count++; @@ -2143,6 +2117,28 @@ _kse_alloc(struct pthread *curthread, int sys_scope) KSE_LOCK_RELEASE(curthread->kse, &kse_lock); _kse_critical_leave(crit); } + /* + * Create the KSE context. + * Scope system threads (one thread per KSE) are not required + * to have a stack for an unneeded kse upcall. + */ + if (!sys_scope) { + kse->k_kcb->kcb_kmbx.km_func = (kse_func_t *)kse_sched_multi; + kse->k_stack.ss_sp = stack; + kse->k_stack.ss_size = KSE_STACKSIZE; + } else { + kse->k_kcb->kcb_kmbx.km_func = (kse_func_t *)kse_sched_single; + kse->k_stack.ss_sp = NULL; + kse->k_stack.ss_size = 0; + } + kse->k_kcb->kcb_kmbx.km_udata = (void *)kse; + kse->k_kcb->kcb_kmbx.km_quantum = 20000; + /* + * We need to keep a copy of the stack in case it + * doesn't get used; a KSE running a scope system + * thread will use that thread's stack. + */ + kse->k_kcb->kcb_kmbx.km_stack = kse->k_stack; } return (kse); } @@ -2151,26 +2147,26 @@ static void kse_reinit(struct kse *kse, int sys_scope) { if (!sys_scope) { - kse->k_mbx.km_func = (kse_func_t *)kse_sched_multi; + kse->k_kcb->kcb_kmbx.km_func = (kse_func_t *)kse_sched_multi; if (kse->k_stack.ss_sp == NULL) { /* XXX check allocation failure */ kse->k_stack.ss_sp = (char *) malloc(KSE_STACKSIZE); kse->k_stack.ss_size = KSE_STACKSIZE; } - kse->k_mbx.km_quantum = 20000; + kse->k_kcb->kcb_kmbx.km_quantum = 20000; } else { - kse->k_mbx.km_func = (kse_func_t *)kse_sched_single; + kse->k_kcb->kcb_kmbx.km_func = (kse_func_t *)kse_sched_single; if (kse->k_stack.ss_sp) free(kse->k_stack.ss_sp); kse->k_stack.ss_sp = NULL; kse->k_stack.ss_size = 0; - kse->k_mbx.km_quantum = 0; + kse->k_kcb->kcb_kmbx.km_quantum = 0; } - kse->k_mbx.km_stack = kse->k_stack; - kse->k_mbx.km_udata = (void *)kse; - kse->k_mbx.km_curthread = NULL; - kse->k_mbx.km_flags = 0; - kse->k_curthread = 0; + kse->k_kcb->kcb_kmbx.km_stack = kse->k_stack; + kse->k_kcb->kcb_kmbx.km_udata = (void *)kse; + kse->k_kcb->kcb_kmbx.km_curthread = NULL; + kse->k_kcb->kcb_kmbx.km_flags = 0; + kse->k_curthread = NULL; kse->k_kseg = 0; kse->k_schedq = 0; kse->k_locklevel = 0; @@ -2193,9 +2189,10 @@ kse_free_unlocked(struct kse *kse) TAILQ_REMOVE(&active_kseq, kse, k_qe); active_kse_count--; kse->k_kseg = NULL; - kse->k_mbx.km_quantum = 20000; + kse->k_kcb->kcb_kmbx.km_quantum = 20000; kse->k_flags = 0; TAILQ_INSERT_HEAD(&free_kseq, kse, k_qe); + _kcb_dtor(kse->k_kcb); free_kse_count++; } @@ -2239,7 +2236,6 @@ struct pthread * _thr_alloc(struct pthread *curthread) { kse_critical_t crit; - void *p; struct pthread *thread = NULL; if (curthread != NULL) { @@ -2256,11 +2252,12 @@ _thr_alloc(struct pthread *curthread) _kse_critical_leave(crit); } } - if (thread == NULL) { - p = malloc(sizeof(struct pthread) + THR_ALIGNBYTES); - if (p != NULL) { - thread = (struct pthread *)THR_ALIGN(p); - thread->alloc_addr = p; + if ((thread == NULL) && + ((thread = malloc(sizeof(struct pthread))) != NULL)) { + bzero(thread, sizeof(struct pthread)); + if ((thread->tcb = _tcb_ctor(thread)) == NULL) { + free(thread); + thread = NULL; } } return (thread); @@ -2278,9 +2275,16 @@ _thr_free(struct pthread *curthread, struct pthread *thread) _lockuser_destroy(&thread->lockusers[i]); } _lock_destroy(&thread->lock); - free(thread->alloc_addr); + _tcb_dtor(thread->tcb); + free(thread); } else { + /* Reinitialize any important fields here. */ + thread->lock_switch = 0; + sigemptyset(&thread->sigpend); + thread->check_pending = 0; + + /* Add the thread to the free thread list. */ crit = _kse_critical_enter(); KSE_LOCK_ACQUIRE(curthread->kse, &thread_lock); TAILQ_INSERT_TAIL(&free_threadq, thread, tle); diff --git a/lib/libkse/thread/thr_private.h b/lib/libkse/thread/thr_private.h index 25a47a77488b..0664aca4b24c 100644 --- a/lib/libkse/thread/thr_private.h +++ b/lib/libkse/thread/thr_private.h @@ -173,15 +173,14 @@ struct kse_group; #define MAX_KSE_LOCKLEVEL 5 struct kse { - struct kse_mailbox k_mbx; /* kernel kse mailbox */ /* -- location and order specific items for gdb -- */ + struct kcb *k_kcb; struct pthread *k_curthread; /* current thread */ struct kse_group *k_kseg; /* parent KSEG */ struct sched_queue *k_schedq; /* scheduling queue */ /* -- end of location and order specific items -- */ TAILQ_ENTRY(kse) k_qe; /* KSE list link entry */ TAILQ_ENTRY(kse) k_kgqe; /* KSEG's KSE list entry */ - struct ksd k_ksd; /* KSE specific data */ /* * Items that are only modified by the kse, or that otherwise * don't need to be locked when accessed @@ -300,7 +299,7 @@ do { \ #define KSE_CLEAR_WAIT(kse) atomic_store_rel_int(&(kse)->k_waiting, 0) #define KSE_WAITING(kse) (kse)->k_waiting != 0 -#define KSE_WAKEUP(kse) kse_wakeup(&(kse)->k_mbx) +#define KSE_WAKEUP(kse) kse_wakeup(&(kse)->k_kcb->kcb_kmbx) #define KSE_SET_IDLE(kse) ((kse)->k_idle = 1) #define KSE_CLEAR_IDLE(kse) ((kse)->k_idle = 0) @@ -509,7 +508,7 @@ struct pthread_attr { */ #define KSE_GET_TOD(curkse, tsp) \ do { \ - *tsp = (curkse)->k_mbx.km_timeofday; \ + *tsp = (curkse)->k_kcb->kcb_kmbx.km_timeofday; \ if ((tsp)->tv_sec == 0) \ clock_gettime(CLOCK_REALTIME, tsp); \ } while (0) @@ -601,8 +600,7 @@ struct pthread { /* * Thread mailbox is first so it cal be aligned properly. */ - struct kse_thr_mailbox tmbx; - void *alloc_addr; /* real address (unaligned) */ + struct tcb *tcb; /* * Magic value to help recognize a valid thread structure @@ -1049,9 +1047,6 @@ SCLASS int _thr_debug_flags SCLASS_PRESET(0); __BEGIN_DECLS int _cond_reinit(pthread_cond_t *); void _cond_wait_backout(struct pthread *); -struct pthread *_get_curthread(void); -struct kse *_get_curkse(void); -void _set_curkse(struct kse *); struct kse *_kse_alloc(struct pthread *, int sys_scope); kse_critical_t _kse_critical_enter(void); void _kse_critical_leave(kse_critical_t); @@ -1098,8 +1093,6 @@ int _pthread_rwlock_destroy (pthread_rwlock_t *); struct pthread *_pthread_self(void); int _pthread_setspecific(pthread_key_t, const void *); struct pthread *_thr_alloc(struct pthread *); -int _thread_enter_uts(struct kse_thr_mailbox *, struct kse_mailbox *); -int _thread_switch(struct kse_thr_mailbox *, struct kse_thr_mailbox **); void _thr_exit(char *, int, char *); void _thr_exit_cleanup(void); void _thr_lock_wait(struct lock *lock, struct lockuser *lu); diff --git a/lib/libkse/thread/thr_sig.c b/lib/libkse/thread/thr_sig.c index 6f1631831ce0..77654ded330a 100644 --- a/lib/libkse/thread/thr_sig.c +++ b/lib/libkse/thread/thr_sig.c @@ -41,7 +41,6 @@ #include #include #include "thr_private.h" -#include "pthread_md.h" /* Prototypes: */ static void build_siginfo(siginfo_t *info, int signo); @@ -212,11 +211,11 @@ sig_daemon(void *arg /* Unused */) } ts.tv_sec = 30; ts.tv_nsec = 0; - curkse->k_mbx.km_flags = + curkse->k_kcb->kcb_kmbx.km_flags = KMF_NOUPCALL | KMF_NOCOMPLETED | KMF_WAITSIGEVENT; kse_release(&ts); - curkse->k_mbx.km_flags = 0; - set = curkse->k_mbx.km_sigscaught; + curkse->k_kcb->kcb_kmbx.km_flags = 0; + set = curkse->k_kcb->kcb_kmbx.km_sigscaught; } return (0); } @@ -355,7 +354,7 @@ _thr_sig_handler(int sig, siginfo_t *info, ucontext_t *ucp) * so kse_release will return from kernel immediately. */ if (KSE_IS_IDLE(curkse)) - kse_wakeup(&curkse->k_mbx); + kse_wakeup(&curkse->k_kcb->kcb_kmbx); return; } @@ -377,7 +376,7 @@ _thr_sig_handler(int sig, siginfo_t *info, ucontext_t *ucp) __sys_sigaction(sig, NULL, &_thread_sigact[sig - 1]); } KSE_LOCK_RELEASE(curkse, &_thread_signal_lock); - _kse_critical_leave(&curthread->tmbx); + _kse_critical_leave(&curthread->tcb->tcb_tmbx); /* Now invoke real handler */ if (((__sighandler_t *)sigfunc != SIG_DFL) && @@ -403,7 +402,7 @@ _thr_sig_handler(int sig, siginfo_t *info, ucontext_t *ucp) curthread->interrupted = intr_save; _kse_critical_enter(); curthread->sigmask = ucp->uc_sigmask; - _kse_critical_leave(&curthread->tmbx); + _kse_critical_leave(&curthread->tcb->tcb_tmbx); DBG_MSG("<<< _thr_sig_handler(%d)\n", sig); } @@ -446,7 +445,7 @@ thr_sig_invoke_handler(struct pthread *curthread, int sig, siginfo_t *info, } KSE_LOCK_RELEASE(curkse, &_thread_signal_lock); KSE_SCHED_UNLOCK(curkse, curkse->k_kseg); - _kse_critical_leave(&curthread->tmbx); + _kse_critical_leave(&curthread->tcb->tcb_tmbx); /* * We are processing buffered signals, synchronize working * signal mask into kernel. @@ -737,7 +736,7 @@ _thr_sig_rundown(struct pthread *curthread, ucontext_t *ucp, KSE_SCHED_UNLOCK(curkse, curkse->k_kseg); if (curthread->attr.flags & PTHREAD_SCOPE_SYSTEM) __sys_sigprocmask(SIG_SETMASK, &curthread->sigmask, NULL); - _kse_critical_leave(&curthread->tmbx); + _kse_critical_leave(&curthread->tcb->tcb_tmbx); curthread->interrupted = interrupted; curthread->timeout = timeout; @@ -860,7 +859,7 @@ _thr_sig_add(struct pthread *pthread, int sig, siginfo_t *info) if (!(pthread->attr.flags & PTHREAD_SCOPE_SYSTEM) && (pthread->blocked != 0) && !THR_IN_CRITICAL(pthread)) - kse_thr_interrupt(&pthread->tmbx, + kse_thr_interrupt(&pthread->tcb->tcb_tmbx, restart ? KSE_INTR_RESTART : KSE_INTR_INTERRUPT, 0); } } @@ -983,7 +982,7 @@ _thr_sig_send(struct pthread *pthread, int sig) struct kse_mailbox *kmbx; if (pthread->attr.flags & PTHREAD_SCOPE_SYSTEM) { - kse_thr_interrupt(&pthread->tmbx, KSE_INTR_SENDSIG, sig); + kse_thr_interrupt(&pthread->tcb->tcb_tmbx, KSE_INTR_SENDSIG, sig); return; } diff --git a/lib/libpthread/arch/amd64/Makefile.inc b/lib/libpthread/arch/amd64/Makefile.inc index c0e4c47941bd..c8b0362bca85 100644 --- a/lib/libpthread/arch/amd64/Makefile.inc +++ b/lib/libpthread/arch/amd64/Makefile.inc @@ -2,4 +2,4 @@ .PATH: ${.CURDIR}/arch/${MACHINE_ARCH}/${MACHINE_ARCH} -SRCS+= context.S enter_uts.S +SRCS+= context.S enter_uts.S pthread_md.c diff --git a/lib/libpthread/arch/amd64/amd64/pthread_md.c b/lib/libpthread/arch/amd64/amd64/pthread_md.c new file mode 100644 index 000000000000..374b6d8785b6 --- /dev/null +++ b/lib/libpthread/arch/amd64/amd64/pthread_md.c @@ -0,0 +1,72 @@ +/* + * Copyright (c) 2003 Daniel Eischen + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Neither the name of the author nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * $FreeBSD$ + */ + +#include +#include +#include "pthread_md.h" + +/* + * The constructors. + */ +struct tcb * +_tcb_ctor(struct pthread *thread) +{ + struct tcb *tcb; + + if ((tcb = malloc(sizeof(struct tcb))) != NULL) { + bzero(tcb, sizeof(struct tcb)); + tcb->tcb_thread = thread; + /* Allocate TDV */ + } + return (tcb); +} + +void +_tcb_dtor(struct tcb *tcb) +{ + /* Free TDV */ + free(tcb); +} + +struct kcb * +_kcb_ctor(struct kse *kse) +{ + struct kcb *kcb; + + if ((kcb = malloc(sizeof(struct kcb))) != NULL) { + bzero(kcb, sizeof(struct kcb)); + kcb->kcb_kse = kse; + } + return (kcb); +} + +void +_kcb_dtor(struct kcb *kcb) +{ + free(kcb); +} diff --git a/lib/libpthread/arch/amd64/include/ksd.h b/lib/libpthread/arch/amd64/include/ksd.h deleted file mode 100644 index 843d18485fda..000000000000 --- a/lib/libpthread/arch/amd64/include/ksd.h +++ /dev/null @@ -1,132 +0,0 @@ -/*- - * Copyright (C) 2003 David Xu - * Copyright (c) 2001 Daniel Eischen - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Neither the name of the author nor the names of its contributors - * may be used to endorse or promote products derived from this software - * without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - */ - -/* - * $FreeBSD$ - */ - -#ifndef _KSD_H_ -#define _KSD_H_ - -#include -#include -extern int sysarch(int, void *); - -struct kse; -struct pthread; - -/* - * Evaluates to the byte offset of the per-kse variable name. - */ -#define __ksd_offset(name) __offsetof(struct kse, name) - -/* - * Evaluates to the type of the per-kse variable name. - */ -#define __ksd_type(name) __typeof(((struct kse *)0)->name) - - -/* - * Evaluates to the value of the per-kse variable name. - */ -#define KSD_GET64(name) ({ \ - __ksd_type(name) __result; \ - \ - u_long __i; \ - __asm __volatile("movq %%fs:%1, %0" \ - : "=r" (__i) \ - : "m" (*(u_long *)(__ksd_offset(name)))); \ - __result = *(__ksd_type(name) *)&__i; \ - \ - __result; \ -}) - -/* - * Sets the value of the per-cpu variable name to value val. - */ -#define KSD_SET64(name, val) ({ \ - __ksd_type(name) __val = (val); \ - \ - u_long __i; \ - __i = *(u_long *)&__val; \ - __asm __volatile("movq %1,%%fs:%0" \ - : "=m" (*(u_long *)(__ksd_offset(name))) \ - : "r" (__i)); \ -}) - -static __inline u_long -__ksd_readandclear64(volatile u_long *addr) -{ - u_long result; - - __asm __volatile ( - " xorq %0, %0;" - " xchgq %%fs:%1, %0;" - "# __ksd_readandclear64" - : "=&r" (result) - : "m" (*addr)); - return (result); -} - -#define KSD_READANDCLEAR64(name) ({ \ - __ksd_type(name) __result; \ - \ - __result = (__ksd_type(name)) \ - __ksd_readandclear64((u_long *)__ksd_offset(name)); \ - __result; \ -}) - - -#define _ksd_curkse() ((struct kse *)KSD_GET64(k_mbx.km_udata)) -#define _ksd_curthread() KSD_GET64(k_curthread) -#define _ksd_get_tmbx() KSD_GET64(k_mbx.km_curthread) -#define _ksd_set_tmbx(value) KSD_SET64(k_mbx.km_curthread, (void *)value); -#define _ksd_readandclear_tmbx() KSD_READANDCLEAR64(k_mbx.km_curthread) - - -static __inline int -_ksd_create(struct ksd *ksd, void *base, int size) -{ - ksd->base = base; - ksd->size = size; - return (0); -} - -static __inline void -_ksd_destroy(struct ksd *ksd) -{ - ksd->base = 0; - ksd->size = 0; -} - -static __inline int -_ksd_setprivate(struct ksd *ksd) -{ - return (sysarch(AMD64_SET_FSBASE, &ksd->base)); -} - -#endif diff --git a/lib/libpthread/arch/amd64/include/pthread_md.h b/lib/libpthread/arch/amd64/include/pthread_md.h index 3c7cd03f7aae..27a07cc2cbea 100644 --- a/lib/libpthread/arch/amd64/include/pthread_md.h +++ b/lib/libpthread/arch/amd64/include/pthread_md.h @@ -1,27 +1,28 @@ -/* - * Copyright (c) 2003 Marcel Moolenaar +/*- + * Copyright (C) 2003 David Xu + * Copyright (c) 2001 Daniel Eischen * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: - * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. + * 2. Neither the name of the author nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. * * $FreeBSD$ */ @@ -31,52 +32,224 @@ #ifndef _PTHREAD_MD_H_ #define _PTHREAD_MD_H_ +#include +#include +#include #include +/* should define this, but doesn't. */ +extern int sysarch(int, void *); + #define THR_GETCONTEXT(ucp) \ (void)_amd64_save_context(&(ucp)->uc_mcontext) #define THR_SETCONTEXT(ucp) \ - (void)_amd64_restore_context(&(ucp)->uc_mcontext, NULL, NULL) + (void)_amd64_restore_context(&(ucp)->uc_mcontext, 0, NULL) -#define THR_ALIGNBYTES 15 -#define THR_ALIGN(td) (((uintptr_t)(td) + THR_ALIGNBYTES) & ~THR_ALIGNBYTES) + +#define PER_KSE +#undef PER_THREAD + +struct kse; +struct pthread; +struct tdv; /* - * KSE Specific Data. + * %fs points to a struct kcb. */ -struct ksd { - void *base; - long size; +struct kcb { + struct tcb *kcb_curtcb; + struct kcb *kcb_self; /* self reference */ + struct kse *kcb_kse; + struct kse_mailbox kcb_kmbx; }; +struct tcb { + struct tdv *tcb_tdv; + struct pthread *tcb_thread; + void *tcb_spare[2]; /* align tcb_tmbx to 16 bytes */ + struct kse_thr_mailbox tcb_tmbx; +}; + +/* + * Evaluates to the byte offset of the per-kse variable name. + */ +#define __kcb_offset(name) __offsetof(struct kcb, name) + +/* + * Evaluates to the type of the per-kse variable name. + */ +#define __kcb_type(name) __typeof(((struct kcb *)0)->name) + +/* + * Evaluates to the value of the per-kse variable name. + */ +#define KCB_GET64(name) ({ \ + __kcb_type(name) __result; \ + \ + u_long __i; \ + __asm __volatile("movq %%fs:%1, %0" \ + : "=r" (__i) \ + : "m" (*(u_long *)(__kcb_offset(name)))); \ + __result = *(__kcb_type(name) *)&__i; \ + \ + __result; \ +}) + +/* + * Sets the value of the per-kse variable name to value val. + */ +#define KCB_SET64(name, val) ({ \ + __kcb_type(name) __val = (val); \ + \ + u_long __i; \ + __i = *(u_long *)&__val; \ + __asm __volatile("movq %1,%%fs:%0" \ + : "=m" (*(u_long *)(__kcb_offset(name))) \ + : "r" (__i)); \ +}) + +static __inline u_long +__kcb_readandclear64(volatile u_long *addr) +{ + u_long result; + + __asm __volatile ( + " xorq %0, %0;" + " xchgq %%fs:%1, %0;" + "# __kcb_readandclear64" + : "=&r" (result) + : "m" (*addr)); + return (result); +} + +#define KCB_READANDCLEAR64(name) ({ \ + __kcb_type(name) __result; \ + \ + __result = (__kcb_type(name)) \ + __kcb_readandclear64((u_long *)__kcb_offset(name)); \ + __result; \ +}) + + +#define _kcb_curkcb() KCB_GET64(kcb_self) +#define _kcb_curtcb() KCB_GET64(kcb_curtcb) +#define _kcb_curkse() ((struct kse *)KCB_GET64(kcb_kmbx.km_udata)) +#define _kcb_get_tmbx() KCB_GET64(kcb_kmbx.km_curthread) +#define _kcb_set_tmbx(value) KCB_SET64(kcb_kmbx.km_curthread, (void *)value) +#define _kcb_readandclear_tmbx() KCB_READANDCLEAR64(kcb_kmbx.km_curthread) + +/* + * The constructors. + */ +struct tcb *_tcb_ctor(struct pthread *); +void _tcb_dtor(struct tcb *tcb); +struct kcb *_kcb_ctor(struct kse *); +void _kcb_dtor(struct kcb *); + +/* Called from the KSE to set its private data. */ +static __inline void +_kcb_set(struct kcb *kcb) +{ + void *addr = kcb; + + sysarch(AMD64_SET_FSBASE, &addr); +} + +/* Get the current kcb. */ +static __inline struct kcb * +_kcb_get(void) +{ + return (_kcb_curkcb()); +} + +static __inline struct kse_thr_mailbox * +_kcb_critical_enter(void) +{ + struct kse_thr_mailbox *crit; + + crit = _kcb_readandclear_tmbx(); + return (crit); +} + +static __inline void +_kcb_critical_leave(struct kse_thr_mailbox *crit) +{ + _kcb_set_tmbx(crit); +} + +static __inline int +_kcb_in_critical(void) +{ + return (_kcb_get_tmbx() == NULL); +} + +static __inline void +_tcb_set(struct kcb *kcb, struct tcb *tcb) +{ + kcb->kcb_curtcb = tcb; +} + +static __inline struct tcb * +_tcb_get(void) +{ + return (_kcb_curtcb()); +} + +static __inline struct pthread * +_get_curthread(void) +{ + struct tcb *tcb; + + tcb = _kcb_curtcb(); + if (tcb != NULL) + return (tcb->tcb_thread); + else + return (NULL); +} + +static __inline struct kse * +_get_curkse(void) +{ + return ((struct kse *)_kcb_curkse()); +} + void _amd64_enter_uts(struct kse_mailbox *km, kse_func_t uts, void *stack, size_t stacksz); int _amd64_restore_context(mcontext_t *mc, intptr_t val, intptr_t *loc); int _amd64_save_context(mcontext_t *mc); static __inline int -_thread_enter_uts(struct kse_thr_mailbox *tm, struct kse_mailbox *km) +_thread_enter_uts(struct tcb *tcb, struct kcb *kcb) { - if (tm == NULL) - return (-1); - if (!_amd64_save_context(&tm->tm_context.uc_mcontext)) { - _amd64_enter_uts(km, km->km_func, km->km_stack.ss_sp, - km->km_stack.ss_size); + int ret; + + ret = _amd64_save_context(&tcb->tcb_tmbx.tm_context.uc_mcontext); + if (ret == 0) { + _amd64_enter_uts(&kcb->kcb_kmbx, kcb->kcb_kmbx.km_func, + kcb->kcb_kmbx.km_stack.ss_sp, + kcb->kcb_kmbx.km_stack.ss_size); /* We should not reach here. */ return (-1); } + else if (ret < 0) + return (-1); return (0); } static __inline int -_thread_switch(struct kse_thr_mailbox *tm, struct kse_thr_mailbox **thrp) +_thread_switch(struct kcb *kcb, struct tcb *tcb, int setmbox) { - if (tm == NULL) + if ((kcb == NULL) || (tcb == NULL)) return (-1); - _amd64_restore_context(&tm->tm_context.uc_mcontext, (intptr_t)tm, - (intptr_t*)thrp); + kcb->kcb_curtcb = tcb; + if (setmbox != 0) + _amd64_restore_context(&tcb->tcb_tmbx.tm_context.uc_mcontext, + (intptr_t)&tcb->tcb_tmbx, + (intptr_t *)&kcb->kcb_kmbx.km_curthread); + else + _amd64_restore_context(&tcb->tcb_tmbx.tm_context.uc_mcontext, + 0, NULL); /* We should not reach here. */ return (-1); } - #endif diff --git a/lib/libpthread/arch/i386/Makefile.inc b/lib/libpthread/arch/i386/Makefile.inc index c5cbe18b4b2b..73a9a8acde45 100644 --- a/lib/libpthread/arch/i386/Makefile.inc +++ b/lib/libpthread/arch/i386/Makefile.inc @@ -2,4 +2,4 @@ .PATH: ${.CURDIR}/arch/${MACHINE_ARCH}/${MACHINE_ARCH} -SRCS+= ksd.c thr_enter_uts.S thr_getcontext.S +SRCS+= thr_enter_uts.S thr_getcontext.S pthread_md.c diff --git a/lib/libpthread/arch/i386/i386/pthread_md.c b/lib/libpthread/arch/i386/i386/pthread_md.c new file mode 100644 index 000000000000..aeaa6495efc6 --- /dev/null +++ b/lib/libpthread/arch/i386/i386/pthread_md.c @@ -0,0 +1,171 @@ +/*- + * Copyright (C) 2003 David Xu + * Copyright (c) 2001,2003 Daniel Eischen + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Neither the name of the author nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include +__FBSDID("$FreeBSD$"); + +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "pthread_md.h" + +#define LDT_ENTRIES 8192 +#define LDT_WORDS (8192/sizeof(unsigned int)) +#define LDT_RESERVED NLDT + +static unsigned int ldt_mask[LDT_WORDS]; +static int initialized = 0; + +static void initialize(void); + +static void +initialize(void) +{ + int i, j; + + memset(ldt_mask, 0xFF, sizeof(ldt_mask)); + /* Reserve system predefined LDT entries */ + for (i = 0; i < LDT_RESERVED; ++i) { + j = i / 32; + ldt_mask[j] &= ~(1 << (i % 32)); + } + initialized = 1; +} + +static u_int +alloc_ldt_entry(void) +{ + u_int i, j, index; + + index = 0; + for (i = 0; i < LDT_WORDS; ++i) { + if (ldt_mask[i] != 0) { + j = bsfl(ldt_mask[i]); + ldt_mask[i] &= ~(1 << j); + index = i * 32 + j; + break; + } + } + return (index); +} + +static void +free_ldt_entry(u_int index) +{ + u_int i, j; + + if (index < LDT_RESERVED || index >= LDT_ENTRIES) + return; + i = index / 32; + j = index % 32; + ldt_mask[i] |= (1 << j); +} + +struct tcb * +_tcb_ctor(struct pthread *thread) +{ + struct tcb *tcb; + void *addr; + + addr = malloc(sizeof(struct tcb) + 15); + if (addr == NULL) + tcb = NULL; + else { + tcb = (struct tcb *)(((uintptr_t)(addr) + 15) & ~15); + bzero(tcb, sizeof(struct tcb)); + tcb->tcb_addr = addr; + tcb->tcb_thread = thread; + /* XXX - Allocate tdv/tls */ + } + return (tcb); +} + +void +_tcb_dtor(struct tcb *tcb) +{ + void *addr; + + addr = tcb->tcb_addr; + tcb->tcb_addr = NULL; + free(addr); +} + +/* + * Initialize KSD. This also includes setting up the LDT. + */ +struct kcb * +_kcb_ctor(struct kse *kse) +{ + union descriptor ldt; + struct kcb *kcb; + + if (initialized == 0) + initialize(); + kcb = malloc(sizeof(struct kcb)); + if (kcb != NULL) { + bzero(kcb, sizeof(struct kcb)); + kcb->kcb_self = kcb; + kcb->kcb_kse = kse; + kcb->kcb_ldt = alloc_ldt_entry(); + if (kcb->kcb_ldt == 0) { + free(kcb); + return (NULL); + } + ldt.sd.sd_hibase = (unsigned int)kcb >> 24; + ldt.sd.sd_lobase = (unsigned int)kcb & 0xFFFFFF; + ldt.sd.sd_hilimit = (sizeof(struct kcb) >> 16) & 0xF; + ldt.sd.sd_lolimit = sizeof(struct kcb) & 0xFFFF; + ldt.sd.sd_type = SDT_MEMRWA; + ldt.sd.sd_dpl = SEL_UPL; + ldt.sd.sd_p = 1; + ldt.sd.sd_xx = 0; + ldt.sd.sd_def32 = 1; + ldt.sd.sd_gran = 0; /* no more than 1M */ + if (i386_set_ldt(kcb->kcb_ldt, &ldt, 1) < 0) { + free_ldt_entry(kcb->kcb_ldt); + free(kcb); + return (NULL); + } + } + return (kcb); +} + +void +_kcb_dtor(struct kcb *kcb) +{ + if (kcb->kcb_ldt != -1) + free_ldt_entry(kcb->kcb_ldt); + free(kcb); +} diff --git a/lib/libpthread/arch/i386/i386/thr_getcontext.S b/lib/libpthread/arch/i386/i386/thr_getcontext.S index 19afd061f638..7f58dce4248f 100644 --- a/lib/libpthread/arch/i386/i386/thr_getcontext.S +++ b/lib/libpthread/arch/i386/i386/thr_getcontext.S @@ -74,7 +74,7 @@ ENTRY(__thr_setcontext) movl 72(%edx), %esp /* switch to context defined stack */ pushl 60(%edx) /* push return address on stack */ pushl 44(%edx) /* push ecx on stack */ - push 48(%edx) /* push eax on stack */ + pushl 48(%edx) /* push eax on stack */ /* * if (mc_fpowned == MC_OWNEDFP_FPU || mc_fpowned == MC_OWNEDFP_PCB) { * if (mc_fpformat == MC_FPFMT_387) diff --git a/lib/libpthread/arch/i386/include/ksd.h b/lib/libpthread/arch/i386/include/ksd.h deleted file mode 100644 index f7f728fa520d..000000000000 --- a/lib/libpthread/arch/i386/include/ksd.h +++ /dev/null @@ -1,111 +0,0 @@ -/*- - * Copyright (C) 2003 David Xu - * Copyright (c) 2001 Daniel Eischen - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Neither the name of the author nor the names of its contributors - * may be used to endorse or promote products derived from this software - * without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - */ - -/* - * $FreeBSD$ - */ - -#ifndef _KSD_H_ -#define _KSD_H_ - -#include - -struct kse; - -/* - * Evaluates to the byte offset of the per-kse variable name. - */ -#define __ksd_offset(name) __offsetof(struct kse, name) - -/* - * Evaluates to the type of the per-kse variable name. - */ -#define __ksd_type(name) __typeof(((struct kse *)0)->name) - - -/* - * Evaluates to the value of the per-kse variable name. - */ -#define KSD_GET32(name) ({ \ - __ksd_type(name) __result; \ - \ - u_int __i; \ - __asm __volatile("movl %%gs:%1, %0" \ - : "=r" (__i) \ - : "m" (*(u_int *)(__ksd_offset(name)))); \ - __result = *(__ksd_type(name) *)&__i; \ - \ - __result; \ -}) - -/* - * Sets the value of the per-cpu variable name to value val. - */ -#define KSD_SET32(name, val) ({ \ - __ksd_type(name) __val = (val); \ - \ - u_int __i; \ - __i = *(u_int *)&__val; \ - __asm __volatile("movl %1,%%gs:%0" \ - : "=m" (*(u_int *)(__ksd_offset(name))) \ - : "r" (__i)); \ -}) - -static __inline u_long -__ksd_readandclear32(volatile u_long *addr) -{ - u_long result; - - __asm __volatile ( - " xorl %0, %0;" - " xchgl %%gs:%1, %0;" - "# __ksd_readandclear32" - : "=&r" (result) - : "m" (*addr)); - return (result); -} - -#define KSD_READANDCLEAR32(name) ({ \ - __ksd_type(name) __result; \ - \ - __result = (__ksd_type(name)) \ - __ksd_readandclear32((u_long *)__ksd_offset(name)); \ - __result; \ -}) - - -#define _ksd_curkse() ((struct kse *)KSD_GET32(k_mbx.km_udata)) -#define _ksd_curthread() KSD_GET32(k_curthread) -#define _ksd_get_tmbx() KSD_GET32(k_mbx.km_curthread) -#define _ksd_set_tmbx(value) KSD_SET32(k_mbx.km_curthread, (void *)value); -#define _ksd_readandclear_tmbx() KSD_READANDCLEAR32(k_mbx.km_curthread) - -int _ksd_create(struct ksd *ksd, void *base, int size); -void _ksd_destroy(struct ksd *ksd); -int _ksd_getprivate(struct ksd *ksd, void **base, int *size); -int _ksd_setprivate(struct ksd *ksd); -#endif diff --git a/lib/libpthread/arch/i386/include/pthread_md.h b/lib/libpthread/arch/i386/include/pthread_md.h index 980680a281ed..971856244f71 100644 --- a/lib/libpthread/arch/i386/include/pthread_md.h +++ b/lib/libpthread/arch/i386/include/pthread_md.h @@ -37,34 +37,193 @@ extern int _thr_setcontext(mcontext_t *, intptr_t, intptr_t *); extern int _thr_getcontext(mcontext_t *); -#define THR_GETCONTEXT(ucp) _thr_getcontext(&(ucp)->uc_mcontext); -#define THR_SETCONTEXT(ucp) _thr_setcontext(&(ucp)->uc_mcontext, NULL, NULL); +#define THR_GETCONTEXT(ucp) _thr_getcontext(&(ucp)->uc_mcontext) +#define THR_SETCONTEXT(ucp) _thr_setcontext(&(ucp)->uc_mcontext, 0, NULL) -#define THR_ALIGNBYTES 15 -#define THR_ALIGN(td) (((unsigned)(td) + THR_ALIGNBYTES) & ~THR_ALIGNBYTES) +#define PER_KSE +#undef PER_THREAD + +struct kse; +struct pthread; +struct tdv; /* - * KSE Specific Data. + * %gs points to a struct kcb. */ -struct ksd { - int ldt; -#define KSDF_INITIALIZED 0x01 - long flags; - void *base; - long size; +struct kcb { + struct tcb *kcb_curtcb; + struct kcb *kcb_self; /* self reference */ + int kcb_ldt; + struct kse *kcb_kse; + struct kse_mailbox kcb_kmbx; }; -extern void _i386_enter_uts(struct kse_mailbox *, kse_func_t, void *, long); +struct tcb { + struct tdv *tcb_tdv; + struct pthread *tcb_thread; + void *tcb_addr; /* allocated tcb address */ + void *tcb_spare; /* align tcb_tmbx to 16 bytes */ + struct kse_thr_mailbox tcb_tmbx; +}; + +/* + * Evaluates to the byte offset of the per-kse variable name. + */ +#define __kcb_offset(name) __offsetof(struct kcb, name) + +/* + * Evaluates to the type of the per-kse variable name. + */ +#define __kcb_type(name) __typeof(((struct kcb *)0)->name) + +/* + * Evaluates to the value of the per-kse variable name. + */ +#define KCB_GET32(name) ({ \ + __kcb_type(name) __result; \ + \ + u_int __i; \ + __asm __volatile("movl %%gs:%1, %0" \ + : "=r" (__i) \ + : "m" (*(u_int *)(__kcb_offset(name)))); \ + __result = *(__kcb_type(name) *)&__i; \ + \ + __result; \ +}) + +/* + * Sets the value of the per-kse variable name to value val. + */ +#define KCB_SET32(name, val) ({ \ + __kcb_type(name) __val = (val); \ + \ + u_int __i; \ + __i = *(u_int *)&__val; \ + __asm __volatile("movl %1,%%gs:%0" \ + : "=m" (*(u_int *)(__kcb_offset(name))) \ + : "r" (__i)); \ +}) + +static __inline u_long +__kcb_readandclear32(volatile u_long *addr) +{ + u_long result; + + __asm __volatile ( + " xorl %0, %0;" + " xchgl %%gs:%1, %0;" + "# __kcb_readandclear32" + : "=&r" (result) + : "m" (*addr)); + return (result); +} + +#define KCB_READANDCLEAR32(name) ({ \ + __kcb_type(name) __result; \ + \ + __result = (__kcb_type(name)) \ + __kcb_readandclear32((u_long *)__kcb_offset(name)); \ + __result; \ +}) + + +#define _kcb_curkcb() KCB_GET32(kcb_self) +#define _kcb_curtcb() KCB_GET32(kcb_curtcb) +#define _kcb_curkse() ((struct kse *)KCB_GET32(kcb_kmbx.km_udata)) +#define _kcb_get_tmbx() KCB_GET32(kcb_kmbx.km_curthread) +#define _kcb_set_tmbx(value) KCB_SET32(kcb_kmbx.km_curthread, (void *)value) +#define _kcb_readandclear_tmbx() KCB_READANDCLEAR32(kcb_kmbx.km_curthread) + + +/* + * The constructors. + */ +struct tcb *_tcb_ctor(struct pthread *); +void _tcb_dtor(struct tcb *tcb); +struct kcb *_kcb_ctor(struct kse *); +void _kcb_dtor(struct kcb *); + +/* Called from the KSE to set its private data. */ +static __inline void +_kcb_set(struct kcb *kcb) +{ + int val; + + val = (kcb->kcb_ldt << 3) | 7; + __asm __volatile("movl %0, %%gs" : : "r" (val)); +} + +/* Get the current kcb. */ +static __inline struct kcb * +_kcb_get(void) +{ + return (_kcb_curkcb()); +} + +static __inline struct kse_thr_mailbox * +_kcb_critical_enter(void) +{ + struct kse_thr_mailbox *crit; + + crit = _kcb_readandclear_tmbx(); + return (crit); +} + +static __inline void +_kcb_critical_leave(struct kse_thr_mailbox *crit) +{ + _kcb_set_tmbx(crit); +} static __inline int -_thread_enter_uts(struct kse_thr_mailbox *tmbx, struct kse_mailbox *kmbx) +_kcb_in_critical(void) +{ + return (_kcb_get_tmbx() == NULL); +} + +static __inline void +_tcb_set(struct kcb *kcb, struct tcb *tcb) +{ + kcb->kcb_curtcb = tcb; +} + +static __inline struct tcb * +_tcb_get(void) +{ + return (_kcb_curtcb()); +} + +static __inline struct pthread * +_get_curthread(void) +{ + struct tcb *tcb; + + tcb = _kcb_curtcb(); + if (tcb != NULL) + return (tcb->tcb_thread); + else + return (NULL); +} + +static __inline struct kse * +_get_curkse(void) +{ + return ((struct kse *)_kcb_curkse()); +} + +void _i386_enter_uts(struct kse_mailbox *km, kse_func_t uts, void *stack, + size_t stacksz); + +static __inline int +_thread_enter_uts(struct tcb *tcb, struct kcb *kcb) { int ret; - ret = _thr_getcontext(&tmbx->tm_context.uc_mcontext); + ret = _thr_getcontext(&tcb->tcb_tmbx.tm_context.uc_mcontext); if (ret == 0) { - _i386_enter_uts(kmbx, kmbx->km_func, - kmbx->km_stack.ss_sp, kmbx->km_stack.ss_size); + _i386_enter_uts(&kcb->kcb_kmbx, kcb->kcb_kmbx.km_func, + kcb->kcb_kmbx.km_stack.ss_sp, + kcb->kcb_kmbx.km_stack.ss_size); /* We should not reach here. */ return (-1); } @@ -74,10 +233,17 @@ _thread_enter_uts(struct kse_thr_mailbox *tmbx, struct kse_mailbox *kmbx) } static __inline int -_thread_switch(struct kse_thr_mailbox *tmbx, struct kse_thr_mailbox **loc) +_thread_switch(struct kcb *kcb, struct tcb *tcb, int setmbox) { - _thr_setcontext(&tmbx->tm_context.uc_mcontext, - (intptr_t)tmbx, (intptr_t *)loc); + if ((kcb == NULL) || (tcb == NULL)) + return (-1); + kcb->kcb_curtcb = tcb; + if (setmbox != 0) + _thr_setcontext(&tcb->tcb_tmbx.tm_context.uc_mcontext, + (intptr_t)&tcb->tcb_tmbx, + (intptr_t *)&kcb->kcb_kmbx.km_curthread); + else + _thr_setcontext(&tcb->tcb_tmbx.tm_context.uc_mcontext, 0, NULL); /* We should not reach here. */ return (-1); } diff --git a/lib/libpthread/arch/ia64/Makefile.inc b/lib/libpthread/arch/ia64/Makefile.inc index c0e4c47941bd..c8b0362bca85 100644 --- a/lib/libpthread/arch/ia64/Makefile.inc +++ b/lib/libpthread/arch/ia64/Makefile.inc @@ -2,4 +2,4 @@ .PATH: ${.CURDIR}/arch/${MACHINE_ARCH}/${MACHINE_ARCH} -SRCS+= context.S enter_uts.S +SRCS+= context.S enter_uts.S pthread_md.c diff --git a/lib/libpthread/arch/ia64/ia64/pthread_md.c b/lib/libpthread/arch/ia64/ia64/pthread_md.c new file mode 100644 index 000000000000..e8fd64e741a0 --- /dev/null +++ b/lib/libpthread/arch/ia64/ia64/pthread_md.c @@ -0,0 +1,77 @@ +/* + * Copyright (c) 2003 Daniel Eischen + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Neither the name of the author nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * $FreeBSD$ + */ + +#include +#include +#include "pthread_md.h" + +/* + * The constructors. + */ +struct tcb * +_tcb_ctor(struct pthread *thread) +{ + struct tcb *tcb; + + if ((tcb = malloc(sizeof(struct tcb))) != NULL) { + bzero(tcb, sizeof(struct tcb)); + tcb->tcb_thread = thread; + tcb->tcb_tp.tp_self = tcb; + /* Allocate TDV */ + } + return (tcb); +} + +void +_tcb_dtor(struct tcb *tcb) +{ + /* Free TDV */ + free(tcb); +} + +struct kcb * +_kcb_ctor(struct kse *kse) +{ + struct kcb *kcb; + + if ((kcb = malloc(sizeof(struct kcb))) != NULL) { + bzero(kcb, sizeof(struct kcb)); + kcb->kcb_faketcb.tcb_isfake = 1; + kcb->kcb_faketcb.tcb_tmbx.tm_flags = TMF_NOUPCALL; + kcb->kcb_faketcb.tcb_tp.tp_self = &kcb->kcb_faketcb; + kcb->kcb_curtcb = &kcb->kcb_faketcb; + kcb->kcb_kse = kse; + } + return (kcb); +} + +void +_kcb_dtor(struct kcb *kcb) +{ + free(kcb); +} diff --git a/lib/libpthread/arch/ia64/include/ksd.h b/lib/libpthread/arch/ia64/include/ksd.h deleted file mode 100644 index 8878132bdf3a..000000000000 --- a/lib/libpthread/arch/ia64/include/ksd.h +++ /dev/null @@ -1,91 +0,0 @@ -/* - * Copyright (c) 2003 Marcel Moolenaar - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - * $FreeBSD$ - */ - -#ifndef _KSD_H_ -#define _KSD_H_ - -struct kse; -struct pthread; - -register struct kse *_tp __asm("%r13"); - -static __inline int -_ksd_create(struct ksd *ksd, void *base, int size) -{ - ksd->ksd_base = base; - ksd->ksd_size = size; - return (0); -} - -static __inline struct kse * -_ksd_curkse() -{ - /* XXX why not simply return _tp? */ - return ((struct kse *)_tp->k_mbx.km_udata); -} - -static __inline struct pthread * -_ksd_curthread() -{ - return (_tp->k_curthread); -} - -static __inline void -_ksd_destroy(struct ksd *ksd) -{ -} - -static __inline kse_critical_t -_ksd_get_tmbx() -{ - return (_tp->k_mbx.km_curthread); -} - -static __inline kse_critical_t -_ksd_readandclear_tmbx() -{ - kse_critical_t crit; - __asm("xchg8 %0=[%1],r0" : "=r"(crit) - : "r"(&_tp->k_mbx.km_curthread)); - return (crit); -} - -static __inline void -_ksd_set_tmbx(kse_critical_t crit) -{ - _tp->k_mbx.km_curthread = crit; -} - -static __inline int -_ksd_setprivate(struct ksd *ksd) -{ - _tp = (struct kse *)ksd->ksd_base; - return (0); -} - -#endif /* _KSD_H_ */ diff --git a/lib/libpthread/arch/ia64/include/pthread_md.h b/lib/libpthread/arch/ia64/include/pthread_md.h index 6d30025c5c1e..f7076d0a65d2 100644 --- a/lib/libpthread/arch/ia64/include/pthread_md.h +++ b/lib/libpthread/arch/ia64/include/pthread_md.h @@ -29,32 +29,190 @@ #ifndef _PTHREAD_MD_H_ #define _PTHREAD_MD_H_ +#include +#include + #define THR_GETCONTEXT(ucp) _ia64_save_context(&(ucp)->uc_mcontext) #define THR_SETCONTEXT(ucp) _ia64_restore_context(&(ucp)->uc_mcontext, \ 0, NULL) -#define THR_ALIGNBYTES 0 -#define THR_ALIGN(td) (td) +#define PER_THREAD -/* KSE Specific Data. */ -struct ksd { - void *ksd_base; - int ksd_size; +struct kcb; +struct kse; +struct pthread; +struct tcb; +struct tdv; /* We don't know what this is yet? */ + +/* + * tp points to one of these. + */ +struct ia64_tp { + struct tdv *tp_tdv; /* dynamic TLS */ + struct tcb *tp_self; + char tp_tls[0]; /* static TLS */ }; +struct tcb { + struct kse_thr_mailbox tcb_tmbx; + struct pthread *tcb_thread; + struct kcb *tcb_curkcb; + long tcb_isfake; + struct ia64_tp tcb_tp; +}; + +struct kcb { + struct kse_mailbox kcb_kmbx; + struct tcb kcb_faketcb; + struct tcb *kcb_curtcb; + struct kse *kcb_kse; +}; + +register struct ia64_tp *_tp __asm("%r13"); + +/* + * The kcb and tcb constructors. + */ +struct tcb *_tcb_ctor(struct pthread *); +void _tcb_dtor(struct tcb *); +struct kcb *_kcb_ctor(struct kse *kse); +void _kcb_dtor(struct kcb *); + +/* Called from the KSE to set its private data. */ +static __inline void +_kcb_set(struct kcb *kcb) +{ + /* There is no thread yet; use the fake tcb. */ + _tp = &kcb->kcb_faketcb.tcb_tp; +} + +/* + * Get the current kcb. + * + * This can only be called while in a critical region; don't + * worry about having the kcb changed out from under us. + */ +static __inline struct kcb * +_kcb_get(void) +{ + return (_tp->tp_self->tcb_curkcb); +} + +/* + * Enter a critical region. + * + * Read and clear km_curthread in the kse mailbox. + */ +static __inline struct kse_thr_mailbox * +_kcb_critical_enter(void) +{ + struct kse_thr_mailbox *crit; + struct tcb *tcb; + uint32_t flags; + + tcb = _tp->tp_self; + if (tcb->tcb_isfake != 0) { + /* + * We already are in a critical region since + * there is no current thread. + */ + crit = NULL; + } else { + flags = tcb->tcb_tmbx.tm_flags; + tcb->tcb_tmbx.tm_flags |= TMF_NOUPCALL; + crit = tcb->tcb_curkcb->kcb_kmbx.km_curthread; + tcb->tcb_curkcb->kcb_kmbx.km_curthread = NULL; + tcb->tcb_tmbx.tm_flags = flags; + } + return (crit); +} + +static __inline void +_kcb_critical_leave(struct kse_thr_mailbox *crit) +{ + struct tcb *tcb; + + tcb = _tp->tp_self; + /* No need to do anything if this is a fake tcb. */ + if (tcb->tcb_isfake == 0) + tcb->tcb_curkcb->kcb_kmbx.km_curthread = crit; +} + +static __inline int +_kcb_in_critical(void) +{ + struct tcb *tcb; + uint32_t flags; + int ret; + + tcb = _tp->tp_self; + if (tcb->tcb_isfake != 0) { + /* + * We are in a critical region since there is no + * current thread. + */ + ret = 1; + } else { + flags = tcb->tcb_tmbx.tm_flags; + tcb->tcb_tmbx.tm_flags |= TMF_NOUPCALL; + ret = (tcb->tcb_curkcb->kcb_kmbx.km_curthread == NULL); + tcb->tcb_tmbx.tm_flags = flags; + } + return (ret); +} + +static __inline void +_tcb_set(struct kcb *kcb, struct tcb *tcb) +{ + if (tcb == NULL) { + kcb->kcb_curtcb = &kcb->kcb_faketcb; + _tp = &kcb->kcb_faketcb.tcb_tp; + } + else { + kcb->kcb_curtcb = tcb; + tcb->tcb_curkcb = kcb; + _tp = &tcb->tcb_tp; + } +} + +static __inline struct tcb * +_tcb_get(void) +{ + return (_tp->tp_self); +} + +static __inline struct pthread * +_get_curthread(void) +{ + return (_tp->tp_self->tcb_thread); +} + +/* + * Get the current kse. + * + * Line _kcb_get(), this can only be called while in a critical region. + */ +static __inline struct kse * +_get_curkse(void) +{ + return (_tp->tp_self->tcb_curkcb->kcb_kse); +} + void _ia64_enter_uts(kse_func_t uts, struct kse_mailbox *km, void *stack, size_t stacksz); int _ia64_restore_context(mcontext_t *mc, intptr_t val, intptr_t *loc); int _ia64_save_context(mcontext_t *mc); static __inline int -_thread_enter_uts(struct kse_thr_mailbox *tm, struct kse_mailbox *km) +_thread_enter_uts(struct tcb *tcb, struct kcb *kcb) { - if (tm == NULL) - return (-1); - if (!_ia64_save_context(&tm->tm_context.uc_mcontext)) { - _ia64_enter_uts(km->km_func, km, km->km_stack.ss_sp, - km->km_stack.ss_size); + if (_ia64_save_context(&tcb->tcb_tmbx.tm_context.uc_mcontext) == 0) { + /* Make the fake tcb the current thread. */ + kcb->kcb_curtcb = &kcb->kcb_faketcb; + _tp = &kcb->kcb_faketcb.tcb_tp; + _ia64_enter_uts(kcb->kcb_kmbx.km_func, &kcb->kcb_kmbx, + kcb->kcb_kmbx.km_stack.ss_sp, + kcb->kcb_kmbx.km_stack.ss_size); /* We should not reach here. */ return (-1); } @@ -62,12 +220,18 @@ _thread_enter_uts(struct kse_thr_mailbox *tm, struct kse_mailbox *km) } static __inline int -_thread_switch(struct kse_thr_mailbox *tm, struct kse_thr_mailbox **thrp) +_thread_switch(struct kcb *kcb, struct tcb *tcb, int setmbox) { - if (tm == NULL) - return (-1); - _ia64_restore_context(&tm->tm_context.uc_mcontext, (intptr_t)tm, - (intptr_t*)thrp); + kcb->kcb_curtcb = tcb; + tcb->tcb_curkcb = kcb; + _tp = &tcb->tcb_tp; + if (setmbox != 0) + _ia64_restore_context(&tcb->tcb_tmbx.tm_context.uc_mcontext, + (intptr_t)&tcb->tcb_tmbx, + (intptr_t *)&kcb->kcb_kmbx.km_curthread); + else + _ia64_restore_context(&tcb->tcb_tmbx.tm_context.uc_mcontext, + 0, NULL); /* We should not reach here. */ return (-1); } diff --git a/lib/libpthread/thread/thr_cancel.c b/lib/libpthread/thread/thr_cancel.c index 581b71974eb6..8190af6437bc 100644 --- a/lib/libpthread/thread/thr_cancel.c +++ b/lib/libpthread/thread/thr_cancel.c @@ -111,7 +111,7 @@ _pthread_cancel(pthread_t pthread) if ((pthread->cancelflags & THR_AT_CANCEL_POINT) && (pthread->blocked != 0 || pthread->attr.flags & PTHREAD_SCOPE_SYSTEM)) - kse_thr_interrupt(&pthread->tmbx, + kse_thr_interrupt(&pthread->tcb->tcb_tmbx, KSE_INTR_INTERRUPT, 0); } diff --git a/lib/libpthread/thread/thr_concurrency.c b/lib/libpthread/thread/thr_concurrency.c index 8eaa6c14fbec..694255ae8b24 100644 --- a/lib/libpthread/thread/thr_concurrency.c +++ b/lib/libpthread/thread/thr_concurrency.c @@ -105,7 +105,7 @@ _thr_setconcurrency(int new_level) newkse->k_kseg->kg_ksecount++; newkse->k_flags |= KF_STARTED; KSE_SCHED_UNLOCK(curthread->kse, newkse->k_kseg); - if (kse_create(&newkse->k_mbx, 0) != 0) { + if (kse_create(&newkse->k_kcb->kcb_kmbx, 0) != 0) { KSE_SCHED_LOCK(curthread->kse, newkse->k_kseg); TAILQ_REMOVE(&newkse->k_kseg->kg_kseq, newkse, k_kgqe); diff --git a/lib/libpthread/thread/thr_create.c b/lib/libpthread/thread/thr_create.c index 9f041368d9fb..fcc4ac5280da 100644 --- a/lib/libpthread/thread/thr_create.c +++ b/lib/libpthread/thread/thr_create.c @@ -49,7 +49,10 @@ int _thread_next_offset = OFF(tle.tqe_next); int _thread_uniqueid_offset = OFF(uniqueid); int _thread_state_offset = OFF(state); int _thread_name_offset = OFF(name); -int _thread_ctx_offset = OFF(tmbx.tm_context); +void *_thread_tcb_offset = OFF(tcb); +#undef OFF +#define OFF(f) offsetof(struct tcb, f) +int _thread_ctx_offset = OFF(tcb_tmbx.tm_context); #undef OFF int _thread_PS_RUNNING_value = PS_RUNNING; @@ -95,7 +98,6 @@ _pthread_create(pthread_t * thread, const pthread_attr_t * attr, struct pthread *curthread, *new_thread; struct kse *kse = NULL; struct kse_group *kseg = NULL; - void *p; kse_critical_t crit; int i; int ret = 0; @@ -121,11 +123,6 @@ _pthread_create(pthread_t * thread, const pthread_attr_t * attr, /* Insufficient memory to create a thread: */ ret = EAGAIN; } else { - /* Initialize the thread structure: */ - p = new_thread->alloc_addr; - memset(new_thread, 0, sizeof(struct pthread)); - new_thread->alloc_addr = p; - /* Check if default thread attributes are required: */ if (attr == NULL || *attr == NULL) /* Use the default thread attributes: */ @@ -146,7 +143,7 @@ _pthread_create(pthread_t * thread, const pthread_attr_t * attr, /* Insufficient memory to create a new KSE/KSEG: */ ret = EAGAIN; if (kse != NULL) { - kse->k_mbx.km_flags |= KMF_DONE; + kse->k_kcb->kcb_kmbx.km_flags |= KMF_DONE; _kse_free(curthread, kse); } free_stack(&new_thread->attr); @@ -183,18 +180,19 @@ _pthread_create(pthread_t * thread, const pthread_attr_t * attr, * Enter a critical region to get consistent context. */ crit = _kse_critical_enter(); - THR_GETCONTEXT(&new_thread->tmbx.tm_context); + THR_GETCONTEXT(&new_thread->tcb->tcb_tmbx.tm_context); /* Initialize the thread for signals: */ new_thread->sigmask = curthread->sigmask; _kse_critical_leave(crit); - new_thread->tmbx.tm_udata = new_thread; - new_thread->tmbx.tm_context.uc_sigmask = + + new_thread->tcb->tcb_tmbx.tm_udata = new_thread; + new_thread->tcb->tcb_tmbx.tm_context.uc_sigmask = new_thread->sigmask; - new_thread->tmbx.tm_context.uc_stack.ss_size = + new_thread->tcb->tcb_tmbx.tm_context.uc_stack.ss_size = new_thread->attr.stacksize_attr; - new_thread->tmbx.tm_context.uc_stack.ss_sp = + new_thread->tcb->tcb_tmbx.tm_context.uc_stack.ss_sp = new_thread->attr.stackaddr_attr; - makecontext(&new_thread->tmbx.tm_context, + makecontext(&new_thread->tcb->tcb_tmbx.tm_context, (void (*)(void))thread_start, 4, new_thread, start_routine, arg); /* @@ -274,8 +272,8 @@ _pthread_create(pthread_t * thread, const pthread_attr_t * attr, kse->k_kseg->kg_flags |= KGF_SINGLE_THREAD; new_thread->kse = kse; new_thread->kseg = kse->k_kseg; - kse->k_mbx.km_udata = kse; - kse->k_mbx.km_curthread = NULL; + kse->k_kcb->kcb_kmbx.km_udata = kse; + kse->k_kcb->kcb_kmbx.km_curthread = NULL; } /* diff --git a/lib/libpthread/thread/thr_init.c b/lib/libpthread/thread/thr_init.c index 5c31ae7b9b44..f9e0d9bc9767 100644 --- a/lib/libpthread/thread/thr_init.c +++ b/lib/libpthread/thread/thr_init.c @@ -68,7 +68,6 @@ #include "libc_private.h" #include "thr_private.h" -#include "ksd.h" int __pthread_cond_wait(pthread_cond_t *, pthread_mutex_t *); int __pthread_mutex_lock(pthread_mutex_t *); @@ -306,12 +305,10 @@ _libpthread_init(struct pthread *curthread) KSEG_THRQ_ADD(_kse_initial->k_kseg, _thr_initial); /* Setup the KSE/thread specific data for the current KSE/thread. */ - if (_ksd_setprivate(&_thr_initial->kse->k_ksd) != 0) - PANIC("Can't set initial KSE specific data"); - _set_curkse(_thr_initial->kse); _thr_initial->kse->k_curthread = _thr_initial; + _kcb_set(_thr_initial->kse->k_kcb); + _tcb_set(_thr_initial->kse->k_kcb, _thr_initial->tcb); _thr_initial->kse->k_flags |= KF_INITIALIZED; - _kse_initial->k_curthread = _thr_initial; _thr_rtld_init(); } @@ -323,14 +320,8 @@ _libpthread_init(struct pthread *curthread) static void init_main_thread(struct pthread *thread) { - void *p; int i; - /* Zero the initial thread structure. */ - p = thread->alloc_addr; - memset(thread, 0, sizeof(struct pthread)); - thread->alloc_addr = p; - /* Setup the thread attributes. */ thread->attr = _pthread_attr_default; #ifdef SYSTEM_SCOPE_ONLY @@ -381,9 +372,11 @@ init_main_thread(struct pthread *thread) * Set up the thread mailbox. The threads saved context * is also in the mailbox. */ - thread->tmbx.tm_udata = thread; - thread->tmbx.tm_context.uc_stack.ss_size = thread->attr.stacksize_attr; - thread->tmbx.tm_context.uc_stack.ss_sp = thread->attr.stackaddr_attr; + thread->tcb->tcb_tmbx.tm_udata = thread; + thread->tcb->tcb_tmbx.tm_context.uc_stack.ss_size = + thread->attr.stacksize_attr; + thread->tcb->tcb_tmbx.tm_context.uc_stack.ss_sp = + thread->attr.stackaddr_attr; /* Default the priority of the initial thread: */ thread->base_priority = THR_DEFAULT_PRIORITY; diff --git a/lib/libpthread/thread/thr_kern.c b/lib/libpthread/thread/thr_kern.c index a08c677bb599..085c079c89c4 100644 --- a/lib/libpthread/thread/thr_kern.c +++ b/lib/libpthread/thread/thr_kern.c @@ -54,7 +54,6 @@ __FBSDID("$FreeBSD$"); #include "atomic_ops.h" #include "thr_private.h" #include "libc_private.h" -#include "ksd.h" /*#define DEBUG_THREAD_KERN */ #ifdef DEBUG_THREAD_KERN @@ -79,7 +78,7 @@ __FBSDID("$FreeBSD$"); #define KSE_STACKSIZE 16384 #define KSE_SET_MBOX(kse, thrd) \ - (kse)->k_mbx.km_curthread = &(thrd)->tmbx + (kse)->k_kcb->kcb_kmbx.km_curthread = &(thrd)->tcb->tcb_tmbx #define KSE_SET_EXITED(kse) (kse)->k_flags |= KF_EXITED @@ -132,8 +131,8 @@ static void kse_check_completed(struct kse *kse); static void kse_check_waitq(struct kse *kse); static void kse_fini(struct kse *curkse); static void kse_reinit(struct kse *kse, int sys_scope); -static void kse_sched_multi(struct kse *curkse); -static void kse_sched_single(struct kse *curkse); +static void kse_sched_multi(struct kse_mailbox *kmbx); +static void kse_sched_single(struct kse_mailbox *kmbx); static void kse_switchout_thread(struct kse *kse, struct pthread *thread); static void kse_wait(struct kse *kse, struct pthread *td_wait, int sigseq); static void kse_free_unlocked(struct kse *kse); @@ -152,20 +151,30 @@ static int thr_timedout(struct pthread *thread, struct timespec *curtime); static void thr_unlink(struct pthread *thread); +static __inline void +kse_set_curthread(struct kse *kse, struct pthread *td) +{ + kse->k_curthread = td; + if (td != NULL) + _tcb_set(kse->k_kcb, td->tcb); + else + _tcb_set(kse->k_kcb, NULL); +} + static void __inline thr_accounting(struct pthread *thread) { if ((thread->slice_usec != -1) && (thread->slice_usec <= TIMESLICE_USEC) && (thread->attr.sched_policy != SCHED_FIFO)) { - thread->slice_usec += (thread->tmbx.tm_uticks - + thread->tmbx.tm_sticks) * _clock_res_usec; + thread->slice_usec += (thread->tcb->tcb_tmbx.tm_uticks + + thread->tcb->tcb_tmbx.tm_sticks) * _clock_res_usec; /* Check for time quantum exceeded: */ if (thread->slice_usec > TIMESLICE_USEC) thread->slice_usec = -1; } - thread->tmbx.tm_uticks = 0; - thread->tmbx.tm_sticks = 0; + thread->tcb->tcb_tmbx.tm_uticks = 0; + thread->tcb->tcb_tmbx.tm_sticks = 0; } /* @@ -246,7 +255,7 @@ _kse_single_thread(struct pthread *curthread) _lockuser_destroy(&kse->k_lockusers[i]); } _lock_destroy(&kse->k_lock); - _ksd_destroy(&kse->k_ksd); + _kcb_dtor(kse->k_kcb); if (kse->k_stack.ss_sp != NULL) free(kse->k_stack.ss_sp); free(kse); @@ -341,7 +350,7 @@ _kse_single_thread(struct pthread *curthread) #else if (__isthreaded) _thr_signal_deinit(); - _ksd_set_tmbx(NULL); + curthread->kse->k_kcb->kcb_kmbx.km_curthread = NULL; __isthreaded = 0; active_threads = 0; #endif @@ -409,11 +418,12 @@ _kse_setthreaded(int threaded) * For bound thread, kernel reads mailbox pointer once, * we'd set it here before calling kse_create */ + _tcb_set(_kse_initial->k_kcb, _thr_initial->tcb); KSE_SET_MBOX(_kse_initial, _thr_initial); - _kse_initial->k_mbx.km_flags |= KMF_BOUND; + _kse_initial->k_kcb->kcb_kmbx.km_flags |= KMF_BOUND; #endif - if (kse_create(&_kse_initial->k_mbx, 0) != 0) { + if (kse_create(&_kse_initial->k_kcb->kcb_kmbx, 0) != 0) { _kse_initial->k_flags &= ~KF_STARTED; __isthreaded = 0; PANIC("kse_create() failed\n"); @@ -422,6 +432,7 @@ _kse_setthreaded(int threaded) #ifndef SYSTEM_SCOPE_ONLY /* Set current thread to initial thread */ + _tcb_set(_kse_initial->k_kcb, _thr_initial->tcb); KSE_SET_MBOX(_kse_initial, _thr_initial); _thr_start_sig_daemon(); _thr_setmaxconcurrency(); @@ -450,7 +461,7 @@ _kse_lock_wait(struct lock *lock, struct lockuser *lu) struct timespec ts; int saved_flags; - if (curkse->k_mbx.km_curthread != NULL) + if (curkse->k_kcb->kcb_kmbx.km_curthread != NULL) PANIC("kse_lock_wait does not disable upcall.\n"); /* * Enter a loop to wait until we get the lock. @@ -462,10 +473,11 @@ _kse_lock_wait(struct lock *lock, struct lockuser *lu) * Yield the kse and wait to be notified when the lock * is granted. */ - saved_flags = curkse->k_mbx.km_flags; - curkse->k_mbx.km_flags |= KMF_NOUPCALL | KMF_NOCOMPLETED; + saved_flags = curkse->k_kcb->kcb_kmbx.km_flags; + curkse->k_kcb->kcb_kmbx.km_flags |= KMF_NOUPCALL | + KMF_NOCOMPLETED; kse_release(&ts); - curkse->k_mbx.km_flags = saved_flags; + curkse->k_kcb->kcb_kmbx.km_flags = saved_flags; } } @@ -482,7 +494,7 @@ _kse_lock_wakeup(struct lock *lock, struct lockuser *lu) if (kse == curkse) PANIC("KSE trying to wake itself up in lock"); else { - mbx = &kse->k_mbx; + mbx = &kse->k_kcb->kcb_kmbx; _lock_grant(lock, lu); /* * Notify the owning kse that it has the lock. @@ -534,8 +546,7 @@ _kse_critical_enter(void) { kse_critical_t crit; - crit = _ksd_get_tmbx(); - _ksd_set_tmbx(NULL); + crit = (kse_critical_t)_kcb_critical_enter(); return (crit); } @@ -544,7 +555,7 @@ _kse_critical_leave(kse_critical_t crit) { struct pthread *curthread; - _ksd_set_tmbx(crit); + _kcb_critical_leave((struct kse_thr_mailbox *)crit); if ((crit != NULL) && ((curthread = _get_curthread()) != NULL)) THR_YIELD_CHECK(curthread); } @@ -552,7 +563,7 @@ _kse_critical_leave(kse_critical_t crit) int _kse_in_critical(void) { - return (_ksd_get_tmbx() == NULL); + return (_kcb_in_critical()); } void @@ -629,17 +640,17 @@ _thr_sched_switch_unlocked(struct pthread *curthread) * we don't bother checking for that. */ if (curthread->attr.flags & PTHREAD_SCOPE_SYSTEM) - kse_sched_single(curkse); + kse_sched_single(&curkse->k_kcb->kcb_kmbx); else if ((curthread->state == PS_DEAD) || (((td = KSE_RUNQ_FIRST(curkse)) == NULL) && (curthread->state != PS_RUNNING)) || ((td != NULL) && (td->lock_switch == 0))) { curkse->k_switch = 1; - _thread_enter_uts(&curthread->tmbx, &curkse->k_mbx); + _thread_enter_uts(curthread->tcb, curkse->k_kcb); } else { uts_once = 0; - THR_GETCONTEXT(&curthread->tmbx.tm_context); + THR_GETCONTEXT(&curthread->tcb->tcb_tmbx.tm_context); if (uts_once == 0) { uts_once = 1; @@ -649,7 +660,7 @@ _thr_sched_switch_unlocked(struct pthread *curthread) /* Choose another thread to run. */ td = KSE_RUNQ_FIRST(curkse); KSE_RUNQ_REMOVE(curkse, td); - curkse->k_curthread = td; + kse_set_curthread(curkse, td); /* * Make sure the current thread's kse points to @@ -674,7 +685,7 @@ _thr_sched_switch_unlocked(struct pthread *curthread) /* * Continue the thread at its current frame: */ - ret = _thread_switch(&td->tmbx, NULL); + ret = _thread_switch(curkse->k_kcb, td->tcb, 0); /* This point should not be reached. */ if (ret != 0) PANIC("Bad return from _thread_switch"); @@ -701,7 +712,7 @@ _thr_sched_switch_unlocked(struct pthread *curthread) curthread->lock_switch = 0; KSE_SCHED_UNLOCK(curkse, curkse->k_kseg); - _kse_critical_leave(&curthread->tmbx); + _kse_critical_leave(&curthread->tcb->tcb_tmbx); } /* * This thread is being resumed; check for cancellations. @@ -728,17 +739,21 @@ _thr_sched_switch_unlocked(struct pthread *curthread) */ static void -kse_sched_single(struct kse *curkse) +kse_sched_single(struct kse_mailbox *kmbx) { - struct pthread *curthread = curkse->k_curthread; + struct kse *curkse; + struct pthread *curthread; struct timespec ts; sigset_t sigmask; int i, sigseqno, level, first = 0; + curkse = (struct kse *)kmbx->km_udata; + curthread = curkse->k_curthread; + if ((curkse->k_flags & KF_INITIALIZED) == 0) { /* Setup this KSEs specific data. */ - _ksd_setprivate(&curkse->k_ksd); - _set_curkse(curkse); + _kcb_set(curkse->k_kcb); + _tcb_set(curkse->k_kcb, curthread->tcb); curkse->k_flags |= KF_INITIALIZED; first = 1; curthread->active = 1; @@ -750,7 +765,7 @@ kse_sched_single(struct kse *curkse) * It is used to let other code work, those code want mailbox * to be cleared. */ - _kse_critical_enter(); + (void)_kse_critical_enter(); } curthread->critical_yield = 0; @@ -875,7 +890,7 @@ kse_sched_single(struct kse *curkse) DBG_MSG("Continuing bound thread %p\n", curthread); if (first) { - _kse_critical_leave(&curthread->tmbx); + _kse_critical_leave(&curthread->tcb->tcb_tmbx); pthread_exit(curthread->start_routine(curthread->arg)); } } @@ -898,20 +913,21 @@ dump_queues(struct kse *curkse) * This is the scheduler for a KSE which runs multiple threads. */ static void -kse_sched_multi(struct kse *curkse) +kse_sched_multi(struct kse_mailbox *kmbx) { + struct kse *curkse; struct pthread *curthread, *td_wait; struct pthread_sigframe *curframe; int ret; - THR_ASSERT(curkse->k_mbx.km_curthread == NULL, + curkse = (struct kse *)kmbx->km_udata; + THR_ASSERT(curkse->k_kcb->kcb_kmbx.km_curthread == NULL, "Mailbox not null in kse_sched_multi"); /* Check for first time initialization: */ if ((curkse->k_flags & KF_INITIALIZED) == 0) { /* Setup this KSEs specific data. */ - _ksd_setprivate(&curkse->k_ksd); - _set_curkse(curkse); + _kcb_set(curkse->k_kcb); /* Set this before grabbing the context. */ curkse->k_flags |= KF_INITIALIZED; @@ -928,6 +944,12 @@ kse_sched_multi(struct kse *curkse) KSE_SCHED_LOCK(curkse, curkse->k_kseg); curkse->k_switch = 0; + /* + * Now that the scheduler lock is held, get the current + * thread. The KSE's current thread cannot be safely + * examined without the lock because it could have returned + * as completed on another KSE. See kse_check_completed(). + */ curthread = curkse->k_curthread; if (KSE_IS_IDLE(curkse)) { @@ -975,20 +997,19 @@ kse_sched_multi(struct kse *curkse) curthread->active = 1; if ((curthread->flags & THR_FLAGS_IN_RUNQ) != 0) KSE_RUNQ_REMOVE(curkse, curthread); - curkse->k_curthread = curthread; + kse_set_curthread(curkse, curthread); curthread->kse = curkse; DBG_MSG("Continuing thread %p in critical region\n", curthread); kse_wakeup_multi(curkse); KSE_SCHED_UNLOCK(curkse, curkse->k_kseg); - ret = _thread_switch(&curthread->tmbx, - &curkse->k_mbx.km_curthread); + ret = _thread_switch(curkse->k_kcb, curthread->tcb, 1); if (ret != 0) PANIC("Can't resume thread in critical region\n"); } else if ((curthread->flags & THR_FLAGS_IN_RUNQ) == 0) kse_switchout_thread(curkse, curthread); - curkse->k_curthread = NULL; + kse_set_curthread(curkse, NULL); kse_wakeup_multi(curkse); @@ -1034,7 +1055,7 @@ kse_sched_multi(struct kse *curkse) /* * Make the selected thread the current thread. */ - curkse->k_curthread = curthread; + kse_set_curthread(curkse, curthread); /* * Make sure the current thread's kse points to this kse. @@ -1069,13 +1090,13 @@ kse_sched_multi(struct kse *curkse) (((curthread->cancelflags & THR_AT_CANCEL_POINT) == 0) && ((curthread->cancelflags & PTHREAD_CANCEL_ASYNCHRONOUS) != 0))) && !THR_IN_CRITICAL(curthread)) - signalcontext(&curthread->tmbx.tm_context, 0, + signalcontext(&curthread->tcb->tcb_tmbx.tm_context, 0, (__sighandler_t *)thr_resume_wrapper); #else if ((curframe == NULL) && (curthread->state == PS_RUNNING) && (curthread->check_pending != 0) && !THR_IN_CRITICAL(curthread)) { curthread->check_pending = 0; - signalcontext(&curthread->tmbx.tm_context, 0, + signalcontext(&curthread->tcb->tcb_tmbx.tm_context, 0, (__sighandler_t *)thr_resume_wrapper); } #endif @@ -1087,12 +1108,11 @@ kse_sched_multi(struct kse *curkse) * This thread came from a scheduler switch; it will * unlock the scheduler lock and set the mailbox. */ - ret = _thread_switch(&curthread->tmbx, NULL); + ret = _thread_switch(curkse->k_kcb, curthread->tcb, 0); } else { /* This thread won't unlock the scheduler lock. */ KSE_SCHED_UNLOCK(curkse, curkse->k_kseg); - ret = _thread_switch(&curthread->tmbx, - &curkse->k_mbx.km_curthread); + ret = _thread_switch(curkse->k_kcb, curthread->tcb, 1); } if (ret != 0) PANIC("Thread has returned from _thread_switch"); @@ -1114,9 +1134,9 @@ thr_resume_wrapper(int sig, siginfo_t *siginfo, ucontext_t *ucp) thr_resume_check(curthread, ucp, NULL); _kse_critical_enter(); curkse = _get_curkse(); - curthread->tmbx.tm_context = *ucp; + curthread->tcb->tcb_tmbx.tm_context = *ucp; curthread->error = err_save; - ret = _thread_switch(&curthread->tmbx, &curkse->k_mbx.km_curthread); + ret = _thread_switch(curkse->k_kcb, curthread->tcb, 1); if (ret != 0) PANIC("thr_resume_wrapper: thread has returned " "from _thread_switch"); @@ -1242,7 +1262,7 @@ _thr_gc(struct pthread *curthread) if ((td->flags & THR_FLAGS_GC_SAFE) == 0) continue; else if (((td->attr.flags & PTHREAD_SCOPE_SYSTEM) != 0) && - ((td->kse->k_mbx.km_flags & KMF_DONE) == 0)) { + ((td->kse->k_kcb->kcb_kmbx.km_flags & KMF_DONE) == 0)) { /* * The thread and KSE are operating on the same * stack. Wait for the KSE to exit before freeing @@ -1319,9 +1339,9 @@ _thr_schedule_add(struct pthread *curthread, struct pthread *newthread) */ if ((newthread->attr.flags & PTHREAD_SCOPE_SYSTEM) != 0) { /* We use the thread's stack as the KSE's stack. */ - newthread->kse->k_mbx.km_stack.ss_sp = + newthread->kse->k_kcb->kcb_kmbx.km_stack.ss_sp = newthread->attr.stackaddr_attr; - newthread->kse->k_mbx.km_stack.ss_size = + newthread->kse->k_kcb->kcb_kmbx.km_stack.ss_size = newthread->attr.stacksize_attr; /* @@ -1331,10 +1351,10 @@ _thr_schedule_add(struct pthread *curthread, struct pthread *newthread) KSEG_THRQ_ADD(newthread->kseg, newthread); /* this thread never gives up kse */ newthread->active = 1; - newthread->kse->k_curthread = newthread; - newthread->kse->k_mbx.km_flags = KMF_BOUND; - newthread->kse->k_mbx.km_func = (kse_func_t *)kse_sched_single; - newthread->kse->k_mbx.km_quantum = 0; + kse_set_curthread(newthread->kse, newthread); + newthread->kse->k_kcb->kcb_kmbx.km_flags = KMF_BOUND; + newthread->kse->k_kcb->kcb_kmbx.km_func = (kse_func_t *)kse_sched_single; + newthread->kse->k_kcb->kcb_kmbx.km_quantum = 0; KSE_SET_MBOX(newthread->kse, newthread); /* * This thread needs a new KSE and KSEG. @@ -1342,7 +1362,7 @@ _thr_schedule_add(struct pthread *curthread, struct pthread *newthread) newthread->kse->k_flags &= ~KF_INITIALIZED; newthread->kse->k_flags |= KF_STARTED; /* Fire up! */ - ret = kse_create(&newthread->kse->k_mbx, 1); + ret = kse_create(&newthread->kse->k_kcb->kcb_kmbx, 1); if (ret != 0) ret = errno; } @@ -1363,10 +1383,10 @@ _thr_schedule_add(struct pthread *curthread, struct pthread *newthread) * outside of holding the lock. */ newthread->kse->k_flags |= KF_STARTED; - newthread->kse->k_mbx.km_func = + newthread->kse->k_kcb->kcb_kmbx.km_func = (kse_func_t *)kse_sched_multi; - newthread->kse->k_mbx.km_flags = 0; - kse_create(&newthread->kse->k_mbx, 0); + newthread->kse->k_kcb->kcb_kmbx.km_flags = 0; + kse_create(&newthread->kse->k_kcb->kcb_kmbx, 0); } else if ((newthread->state == PS_RUNNING) && KSE_IS_IDLE(newthread->kse)) { /* @@ -1418,8 +1438,8 @@ kse_check_completed(struct kse *kse) struct kse_thr_mailbox *completed; int sig; - if ((completed = kse->k_mbx.km_completed) != NULL) { - kse->k_mbx.km_completed = NULL; + if ((completed = kse->k_kcb->kcb_kmbx.km_completed) != NULL) { + kse->k_kcb->kcb_kmbx.km_completed = NULL; while (completed != NULL) { thread = completed->tm_udata; DBG_MSG("Found completed thread %p, name %s\n", @@ -1434,17 +1454,23 @@ kse_check_completed(struct kse *kse) KSE_RUNQ_INSERT_TAIL(kse, thread); if ((thread->kse != kse) && (thread->kse->k_curthread == thread)) { - thread->kse->k_curthread = NULL; + /* + * Remove this thread from its + * previous KSE so that it (the KSE) + * doesn't think it is still active. + */ + kse_set_curthread(thread->kse, NULL); thread->active = 0; } } - if ((sig = thread->tmbx.tm_syncsig.si_signo) != 0) { + if ((sig = thread->tcb->tcb_tmbx.tm_syncsig.si_signo) + != 0) { if (SIGISMEMBER(thread->sigmask, sig)) SIGADDSET(thread->sigpend, sig); else (void)_thr_sig_add(thread, sig, - &thread->tmbx.tm_syncsig); - thread->tmbx.tm_syncsig.si_signo = 0; + &thread->tcb->tcb_tmbx.tm_syncsig); + thread->tcb->tcb_tmbx.tm_syncsig.si_signo = 0; } completed = completed->tm_next; } @@ -1567,7 +1593,7 @@ kse_switchout_thread(struct kse *kse, struct pthread *thread) if (SIGISMEMBER(thread->sigpend, i) && !SIGISMEMBER(thread->sigmask, i)) { restart = _thread_sigact[1 - 1].sa_flags & SA_RESTART; - kse_thr_interrupt(&thread->tmbx, + kse_thr_interrupt(&thread->tcb->tcb_tmbx, restart ? KSE_INTR_RESTART : KSE_INTR_INTERRUPT, 0); break; } @@ -1584,6 +1610,7 @@ kse_switchout_thread(struct kse *kse, struct pthread *thread) */ thread->active = 0; thread->need_switchout = 0; + thread->lock_switch = 0; thr_cleanup(kse, thread); return; break; @@ -1705,10 +1732,10 @@ kse_wait(struct kse *kse, struct pthread *td_wait, int sigseqno) (kse->k_sigseqno != sigseqno)) ; /* don't sleep */ else { - saved_flags = kse->k_mbx.km_flags; - kse->k_mbx.km_flags |= KMF_NOUPCALL; + saved_flags = kse->k_kcb->kcb_kmbx.km_flags; + kse->k_kcb->kcb_kmbx.km_flags |= KMF_NOUPCALL; kse_release(&ts_sleep); - kse->k_mbx.km_flags = saved_flags; + kse->k_kcb->kcb_kmbx.km_flags = saved_flags; } KSE_SCHED_LOCK(kse, kse->k_kseg); if (KSE_IS_IDLE(kse)) { @@ -1781,7 +1808,7 @@ kse_fini(struct kse *kse) KSE_SCHED_UNLOCK(kse, kse->k_kseg); ts.tv_sec = 120; ts.tv_nsec = 0; - kse->k_mbx.km_flags = 0; + kse->k_kcb->kcb_kmbx.km_flags = 0; kse_release(&ts); /* Never reach */ } @@ -1898,13 +1925,13 @@ kse_wakeup_one(struct pthread *thread) if (KSE_IS_IDLE(thread->kse)) { KSE_CLEAR_IDLE(thread->kse); thread->kseg->kg_idle_kses--; - return (&thread->kse->k_mbx); + return (&thread->kse->k_kcb->kcb_kmbx); } else { TAILQ_FOREACH(ke, &thread->kseg->kg_kseq, k_kgqe) { if (KSE_IS_IDLE(ke)) { KSE_CLEAR_IDLE(ke); ke->k_kseg->kg_idle_kses--; - return (&ke->k_mbx); + return (&ke->k_kcb->kcb_kmbx); } } } @@ -1930,25 +1957,6 @@ kse_wakeup_multi(struct kse *curkse) } } -struct pthread * -_get_curthread(void) -{ - return (_ksd_curthread()); -} - -/* This assumes the caller has disabled upcalls. */ -struct kse * -_get_curkse(void) -{ - return (_ksd_curkse()); -} - -void -_set_curkse(struct kse *kse) -{ - _ksd_setprivate(&kse->k_ksd); -} - /* * Allocate a new KSEG. * @@ -2048,8 +2056,8 @@ struct kse * _kse_alloc(struct pthread *curthread, int sys_scope) { struct kse *kse = NULL; + char *stack; kse_critical_t crit; - int need_ksd = 0; int i; if ((curthread != NULL) && (free_kse_count > 0)) { @@ -2058,7 +2066,7 @@ _kse_alloc(struct pthread *curthread, int sys_scope) /* Search for a finished KSE. */ kse = TAILQ_FIRST(&free_kseq); while ((kse != NULL) && - ((kse->k_mbx.km_flags & KMF_DONE) == 0)) { + ((kse->k_kcb->kcb_kmbx.km_flags & KMF_DONE) == 0)) { kse = TAILQ_NEXT(kse, k_qe); } if (kse != NULL) { @@ -2075,8 +2083,22 @@ _kse_alloc(struct pthread *curthread, int sys_scope) } if ((kse == NULL) && ((kse = (struct kse *)malloc(sizeof(*kse))) != NULL)) { + if (sys_scope != 0) + stack = NULL; + else if ((stack = malloc(KSE_STACKSIZE)) == NULL) { + free(kse); + return (NULL); + } bzero(kse, sizeof(*kse)); + /* Initialize KCB without the lock. */ + if ((kse->k_kcb = _kcb_ctor(kse)) == NULL) { + if (stack != NULL) + free(stack); + free(kse); + return (NULL); + } + /* Initialize the lockusers. */ for (i = 0; i < MAX_KSE_LOCKLEVEL; i++) { _lockuser_init(&kse->k_lockusers[i], (void *)kse); @@ -2084,58 +2106,10 @@ _kse_alloc(struct pthread *curthread, int sys_scope) } /* _lock_init(kse->k_lock, ...) */ - /* We had to malloc a kse; mark it as needing a new ID.*/ - need_ksd = 1; - - /* - * Create the KSE context. - * Scope system threads (one thread per KSE) are not required - * to have a stack for an unneeded kse upcall. - */ - if (!sys_scope) { - kse->k_mbx.km_func = (kse_func_t *)kse_sched_multi; - kse->k_stack.ss_sp = (char *) malloc(KSE_STACKSIZE); - kse->k_stack.ss_size = KSE_STACKSIZE; - } else { - kse->k_mbx.km_func = (kse_func_t *)kse_sched_single; - } - kse->k_mbx.km_udata = (void *)kse; - kse->k_mbx.km_quantum = 20000; - /* - * We need to keep a copy of the stack in case it - * doesn't get used; a KSE running a scope system - * thread will use that thread's stack. - */ - kse->k_mbx.km_stack = kse->k_stack; - if (!sys_scope && kse->k_stack.ss_sp == NULL) { - for (i = 0; i < MAX_KSE_LOCKLEVEL; i++) { - _lockuser_destroy(&kse->k_lockusers[i]); - } - /* _lock_destroy(&kse->k_lock); */ - free(kse); - kse = NULL; - } - } - if ((kse != NULL) && (need_ksd != 0)) { - /* This KSE needs initialization. */ if (curthread != NULL) { crit = _kse_critical_enter(); KSE_LOCK_ACQUIRE(curthread->kse, &kse_lock); } - /* Initialize KSD inside of the lock. */ - if (_ksd_create(&kse->k_ksd, (void *)kse, sizeof(*kse)) != 0) { - if (curthread != NULL) { - KSE_LOCK_RELEASE(curthread->kse, &kse_lock); - _kse_critical_leave(crit); - } - if (kse->k_stack.ss_sp) - free(kse->k_stack.ss_sp); - for (i = 0; i < MAX_KSE_LOCKLEVEL; i++) { - _lockuser_destroy(&kse->k_lockusers[i]); - } - free(kse); - return (NULL); - } kse->k_flags = 0; TAILQ_INSERT_TAIL(&active_kseq, kse, k_qe); active_kse_count++; @@ -2143,6 +2117,28 @@ _kse_alloc(struct pthread *curthread, int sys_scope) KSE_LOCK_RELEASE(curthread->kse, &kse_lock); _kse_critical_leave(crit); } + /* + * Create the KSE context. + * Scope system threads (one thread per KSE) are not required + * to have a stack for an unneeded kse upcall. + */ + if (!sys_scope) { + kse->k_kcb->kcb_kmbx.km_func = (kse_func_t *)kse_sched_multi; + kse->k_stack.ss_sp = stack; + kse->k_stack.ss_size = KSE_STACKSIZE; + } else { + kse->k_kcb->kcb_kmbx.km_func = (kse_func_t *)kse_sched_single; + kse->k_stack.ss_sp = NULL; + kse->k_stack.ss_size = 0; + } + kse->k_kcb->kcb_kmbx.km_udata = (void *)kse; + kse->k_kcb->kcb_kmbx.km_quantum = 20000; + /* + * We need to keep a copy of the stack in case it + * doesn't get used; a KSE running a scope system + * thread will use that thread's stack. + */ + kse->k_kcb->kcb_kmbx.km_stack = kse->k_stack; } return (kse); } @@ -2151,26 +2147,26 @@ static void kse_reinit(struct kse *kse, int sys_scope) { if (!sys_scope) { - kse->k_mbx.km_func = (kse_func_t *)kse_sched_multi; + kse->k_kcb->kcb_kmbx.km_func = (kse_func_t *)kse_sched_multi; if (kse->k_stack.ss_sp == NULL) { /* XXX check allocation failure */ kse->k_stack.ss_sp = (char *) malloc(KSE_STACKSIZE); kse->k_stack.ss_size = KSE_STACKSIZE; } - kse->k_mbx.km_quantum = 20000; + kse->k_kcb->kcb_kmbx.km_quantum = 20000; } else { - kse->k_mbx.km_func = (kse_func_t *)kse_sched_single; + kse->k_kcb->kcb_kmbx.km_func = (kse_func_t *)kse_sched_single; if (kse->k_stack.ss_sp) free(kse->k_stack.ss_sp); kse->k_stack.ss_sp = NULL; kse->k_stack.ss_size = 0; - kse->k_mbx.km_quantum = 0; + kse->k_kcb->kcb_kmbx.km_quantum = 0; } - kse->k_mbx.km_stack = kse->k_stack; - kse->k_mbx.km_udata = (void *)kse; - kse->k_mbx.km_curthread = NULL; - kse->k_mbx.km_flags = 0; - kse->k_curthread = 0; + kse->k_kcb->kcb_kmbx.km_stack = kse->k_stack; + kse->k_kcb->kcb_kmbx.km_udata = (void *)kse; + kse->k_kcb->kcb_kmbx.km_curthread = NULL; + kse->k_kcb->kcb_kmbx.km_flags = 0; + kse->k_curthread = NULL; kse->k_kseg = 0; kse->k_schedq = 0; kse->k_locklevel = 0; @@ -2193,9 +2189,10 @@ kse_free_unlocked(struct kse *kse) TAILQ_REMOVE(&active_kseq, kse, k_qe); active_kse_count--; kse->k_kseg = NULL; - kse->k_mbx.km_quantum = 20000; + kse->k_kcb->kcb_kmbx.km_quantum = 20000; kse->k_flags = 0; TAILQ_INSERT_HEAD(&free_kseq, kse, k_qe); + _kcb_dtor(kse->k_kcb); free_kse_count++; } @@ -2239,7 +2236,6 @@ struct pthread * _thr_alloc(struct pthread *curthread) { kse_critical_t crit; - void *p; struct pthread *thread = NULL; if (curthread != NULL) { @@ -2256,11 +2252,12 @@ _thr_alloc(struct pthread *curthread) _kse_critical_leave(crit); } } - if (thread == NULL) { - p = malloc(sizeof(struct pthread) + THR_ALIGNBYTES); - if (p != NULL) { - thread = (struct pthread *)THR_ALIGN(p); - thread->alloc_addr = p; + if ((thread == NULL) && + ((thread = malloc(sizeof(struct pthread))) != NULL)) { + bzero(thread, sizeof(struct pthread)); + if ((thread->tcb = _tcb_ctor(thread)) == NULL) { + free(thread); + thread = NULL; } } return (thread); @@ -2278,9 +2275,16 @@ _thr_free(struct pthread *curthread, struct pthread *thread) _lockuser_destroy(&thread->lockusers[i]); } _lock_destroy(&thread->lock); - free(thread->alloc_addr); + _tcb_dtor(thread->tcb); + free(thread); } else { + /* Reinitialize any important fields here. */ + thread->lock_switch = 0; + sigemptyset(&thread->sigpend); + thread->check_pending = 0; + + /* Add the thread to the free thread list. */ crit = _kse_critical_enter(); KSE_LOCK_ACQUIRE(curthread->kse, &thread_lock); TAILQ_INSERT_TAIL(&free_threadq, thread, tle); diff --git a/lib/libpthread/thread/thr_private.h b/lib/libpthread/thread/thr_private.h index 25a47a77488b..0664aca4b24c 100644 --- a/lib/libpthread/thread/thr_private.h +++ b/lib/libpthread/thread/thr_private.h @@ -173,15 +173,14 @@ struct kse_group; #define MAX_KSE_LOCKLEVEL 5 struct kse { - struct kse_mailbox k_mbx; /* kernel kse mailbox */ /* -- location and order specific items for gdb -- */ + struct kcb *k_kcb; struct pthread *k_curthread; /* current thread */ struct kse_group *k_kseg; /* parent KSEG */ struct sched_queue *k_schedq; /* scheduling queue */ /* -- end of location and order specific items -- */ TAILQ_ENTRY(kse) k_qe; /* KSE list link entry */ TAILQ_ENTRY(kse) k_kgqe; /* KSEG's KSE list entry */ - struct ksd k_ksd; /* KSE specific data */ /* * Items that are only modified by the kse, or that otherwise * don't need to be locked when accessed @@ -300,7 +299,7 @@ do { \ #define KSE_CLEAR_WAIT(kse) atomic_store_rel_int(&(kse)->k_waiting, 0) #define KSE_WAITING(kse) (kse)->k_waiting != 0 -#define KSE_WAKEUP(kse) kse_wakeup(&(kse)->k_mbx) +#define KSE_WAKEUP(kse) kse_wakeup(&(kse)->k_kcb->kcb_kmbx) #define KSE_SET_IDLE(kse) ((kse)->k_idle = 1) #define KSE_CLEAR_IDLE(kse) ((kse)->k_idle = 0) @@ -509,7 +508,7 @@ struct pthread_attr { */ #define KSE_GET_TOD(curkse, tsp) \ do { \ - *tsp = (curkse)->k_mbx.km_timeofday; \ + *tsp = (curkse)->k_kcb->kcb_kmbx.km_timeofday; \ if ((tsp)->tv_sec == 0) \ clock_gettime(CLOCK_REALTIME, tsp); \ } while (0) @@ -601,8 +600,7 @@ struct pthread { /* * Thread mailbox is first so it cal be aligned properly. */ - struct kse_thr_mailbox tmbx; - void *alloc_addr; /* real address (unaligned) */ + struct tcb *tcb; /* * Magic value to help recognize a valid thread structure @@ -1049,9 +1047,6 @@ SCLASS int _thr_debug_flags SCLASS_PRESET(0); __BEGIN_DECLS int _cond_reinit(pthread_cond_t *); void _cond_wait_backout(struct pthread *); -struct pthread *_get_curthread(void); -struct kse *_get_curkse(void); -void _set_curkse(struct kse *); struct kse *_kse_alloc(struct pthread *, int sys_scope); kse_critical_t _kse_critical_enter(void); void _kse_critical_leave(kse_critical_t); @@ -1098,8 +1093,6 @@ int _pthread_rwlock_destroy (pthread_rwlock_t *); struct pthread *_pthread_self(void); int _pthread_setspecific(pthread_key_t, const void *); struct pthread *_thr_alloc(struct pthread *); -int _thread_enter_uts(struct kse_thr_mailbox *, struct kse_mailbox *); -int _thread_switch(struct kse_thr_mailbox *, struct kse_thr_mailbox **); void _thr_exit(char *, int, char *); void _thr_exit_cleanup(void); void _thr_lock_wait(struct lock *lock, struct lockuser *lu); diff --git a/lib/libpthread/thread/thr_sig.c b/lib/libpthread/thread/thr_sig.c index 6f1631831ce0..77654ded330a 100644 --- a/lib/libpthread/thread/thr_sig.c +++ b/lib/libpthread/thread/thr_sig.c @@ -41,7 +41,6 @@ #include #include #include "thr_private.h" -#include "pthread_md.h" /* Prototypes: */ static void build_siginfo(siginfo_t *info, int signo); @@ -212,11 +211,11 @@ sig_daemon(void *arg /* Unused */) } ts.tv_sec = 30; ts.tv_nsec = 0; - curkse->k_mbx.km_flags = + curkse->k_kcb->kcb_kmbx.km_flags = KMF_NOUPCALL | KMF_NOCOMPLETED | KMF_WAITSIGEVENT; kse_release(&ts); - curkse->k_mbx.km_flags = 0; - set = curkse->k_mbx.km_sigscaught; + curkse->k_kcb->kcb_kmbx.km_flags = 0; + set = curkse->k_kcb->kcb_kmbx.km_sigscaught; } return (0); } @@ -355,7 +354,7 @@ _thr_sig_handler(int sig, siginfo_t *info, ucontext_t *ucp) * so kse_release will return from kernel immediately. */ if (KSE_IS_IDLE(curkse)) - kse_wakeup(&curkse->k_mbx); + kse_wakeup(&curkse->k_kcb->kcb_kmbx); return; } @@ -377,7 +376,7 @@ _thr_sig_handler(int sig, siginfo_t *info, ucontext_t *ucp) __sys_sigaction(sig, NULL, &_thread_sigact[sig - 1]); } KSE_LOCK_RELEASE(curkse, &_thread_signal_lock); - _kse_critical_leave(&curthread->tmbx); + _kse_critical_leave(&curthread->tcb->tcb_tmbx); /* Now invoke real handler */ if (((__sighandler_t *)sigfunc != SIG_DFL) && @@ -403,7 +402,7 @@ _thr_sig_handler(int sig, siginfo_t *info, ucontext_t *ucp) curthread->interrupted = intr_save; _kse_critical_enter(); curthread->sigmask = ucp->uc_sigmask; - _kse_critical_leave(&curthread->tmbx); + _kse_critical_leave(&curthread->tcb->tcb_tmbx); DBG_MSG("<<< _thr_sig_handler(%d)\n", sig); } @@ -446,7 +445,7 @@ thr_sig_invoke_handler(struct pthread *curthread, int sig, siginfo_t *info, } KSE_LOCK_RELEASE(curkse, &_thread_signal_lock); KSE_SCHED_UNLOCK(curkse, curkse->k_kseg); - _kse_critical_leave(&curthread->tmbx); + _kse_critical_leave(&curthread->tcb->tcb_tmbx); /* * We are processing buffered signals, synchronize working * signal mask into kernel. @@ -737,7 +736,7 @@ _thr_sig_rundown(struct pthread *curthread, ucontext_t *ucp, KSE_SCHED_UNLOCK(curkse, curkse->k_kseg); if (curthread->attr.flags & PTHREAD_SCOPE_SYSTEM) __sys_sigprocmask(SIG_SETMASK, &curthread->sigmask, NULL); - _kse_critical_leave(&curthread->tmbx); + _kse_critical_leave(&curthread->tcb->tcb_tmbx); curthread->interrupted = interrupted; curthread->timeout = timeout; @@ -860,7 +859,7 @@ _thr_sig_add(struct pthread *pthread, int sig, siginfo_t *info) if (!(pthread->attr.flags & PTHREAD_SCOPE_SYSTEM) && (pthread->blocked != 0) && !THR_IN_CRITICAL(pthread)) - kse_thr_interrupt(&pthread->tmbx, + kse_thr_interrupt(&pthread->tcb->tcb_tmbx, restart ? KSE_INTR_RESTART : KSE_INTR_INTERRUPT, 0); } } @@ -983,7 +982,7 @@ _thr_sig_send(struct pthread *pthread, int sig) struct kse_mailbox *kmbx; if (pthread->attr.flags & PTHREAD_SCOPE_SYSTEM) { - kse_thr_interrupt(&pthread->tmbx, KSE_INTR_SENDSIG, sig); + kse_thr_interrupt(&pthread->tcb->tcb_tmbx, KSE_INTR_SENDSIG, sig); return; }