Modify previous changes to conform better to libc_r's coding style.
Always use mmap() for default-size stack allocation. Use MAP_ANON instead of MAP_STACK on the alpha architecture. Reduce the amount of code executed while owning _gc_mutex during stack allocation.
This commit is contained in:
parent
f687757a7c
commit
d1e30ddcd1
@ -31,7 +31,7 @@
|
||||
*
|
||||
* Private thread definitions for the uthread kernel.
|
||||
*
|
||||
* $Id: pthread_private.h,v 1.21 1999/07/05 00:35:17 jasone Exp $
|
||||
* $Id: pthread_private.h,v 1.22 1999/07/06 00:25:35 jasone Exp $
|
||||
*/
|
||||
|
||||
#ifndef _PTHREAD_PRIVATE_H
|
||||
@ -337,9 +337,11 @@ struct pthread_attr {
|
||||
#define PTHREAD_STACK_DEFAULT 65536
|
||||
/* Size of red zone at the end of each stack. */
|
||||
#define PTHREAD_STACK_GUARD 4096
|
||||
/* Maximum size of initial thread's stack. This perhaps deserves to be larger
|
||||
/*
|
||||
* Maximum size of initial thread's stack. This perhaps deserves to be larger
|
||||
* than the stacks of other threads, since many applications are likely to run
|
||||
* almost entirely on this stack. */
|
||||
* almost entirely on this stack.
|
||||
*/
|
||||
#define PTHREAD_STACK_INITIAL 0x100000
|
||||
/* Address immediately beyond the beginning of the initial thread stack. */
|
||||
#if defined(__FreeBSD__)
|
||||
@ -887,9 +889,11 @@ SCLASS pthread_switch_routine_t _sched_switch_hook
|
||||
#endif
|
||||
;
|
||||
|
||||
/* Spare stack queue. Stacks of default size are cached in order to reduce
|
||||
/*
|
||||
* Spare stack queue. Stacks of default size are cached in order to reduce
|
||||
* thread creation time. Spare stacks are used in LIFO order to increase cache
|
||||
* locality. */
|
||||
* locality.
|
||||
*/
|
||||
SCLASS SLIST_HEAD(, stack) _stackq;
|
||||
|
||||
/* Base address of next unallocated default-size stack. Stacks are allocated
|
||||
|
@ -29,7 +29,7 @@
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* $Id: uthread_create.c,v 1.14 1999/07/05 00:35:17 jasone Exp $
|
||||
* $Id: uthread_create.c,v 1.15 1999/07/06 00:25:36 jasone Exp $
|
||||
*/
|
||||
#include <errno.h>
|
||||
#include <stdlib.h>
|
||||
@ -79,64 +79,82 @@ pthread_create(pthread_t * thread, const pthread_attr_t * attr,
|
||||
/* Check if a stack was specified in the thread attributes: */
|
||||
if ((stack = pattr->stackaddr_attr) != NULL) {
|
||||
}
|
||||
#ifdef __i386__
|
||||
/* Allocate memory for a default-size stack: */
|
||||
else if (pattr->stacksize_attr == PTHREAD_STACK_DEFAULT) {
|
||||
struct stack * spare_stack;
|
||||
|
||||
/* Allocate or re-use a default-size stack. */
|
||||
|
||||
/* Use the garbage collector mutex for synchronization
|
||||
/*
|
||||
* Use the garbage collector mutex for synchronization
|
||||
* of the spare stack list.
|
||||
*
|
||||
* XXX This may not be ideal. */
|
||||
*/
|
||||
if (pthread_mutex_lock(&_gc_mutex) != 0)
|
||||
PANIC("Cannot lock gc mutex");
|
||||
|
||||
if (NULL != (spare_stack = SLIST_FIRST(&_stackq))) {
|
||||
if ((spare_stack = SLIST_FIRST(&_stackq)) != NULL) {
|
||||
/* Use the spare stack. */
|
||||
SLIST_REMOVE_HEAD(&_stackq, qe);
|
||||
stack = sizeof(struct stack) + (void *) spare_stack - PTHREAD_STACK_DEFAULT;
|
||||
|
||||
/* Unlock the garbage collector mutex. */
|
||||
if (pthread_mutex_unlock(&_gc_mutex) != 0)
|
||||
PANIC("Cannot unlock gc mutex");
|
||||
|
||||
stack = sizeof(struct stack)
|
||||
+ (void *) spare_stack
|
||||
- PTHREAD_STACK_DEFAULT;
|
||||
} else {
|
||||
/* Unlock the garbage collector mutex. */
|
||||
if (pthread_mutex_unlock(&_gc_mutex) != 0)
|
||||
PANIC("Cannot unlock gc mutex");
|
||||
|
||||
/* Allocate a new stack. */
|
||||
stack = _next_stack + PTHREAD_STACK_GUARD;
|
||||
/* Even if stack allocation fails, we don't want to try to use this location again, so unconditionally
|
||||
* decrement _next_stack. Under normal operating conditions, the most likely reason for an mmap()
|
||||
* error is a stack overflow of the adjacent thread stack. */
|
||||
_next_stack -= (PTHREAD_STACK_DEFAULT + PTHREAD_STACK_GUARD);
|
||||
/*
|
||||
* Even if stack allocation fails, we don't want
|
||||
* to try to use this location again, so
|
||||
* unconditionally decrement _next_stack. Under
|
||||
* normal operating conditions, the most likely
|
||||
* reason for an mmap() error is a stack
|
||||
* overflow of the adjacent thread stack.
|
||||
*/
|
||||
_next_stack -= (PTHREAD_STACK_DEFAULT
|
||||
+ PTHREAD_STACK_GUARD);
|
||||
|
||||
/* Red zone: */
|
||||
if (MAP_FAILED == mmap(_next_stack, PTHREAD_STACK_GUARD, 0, MAP_ANON, -1, 0)) {
|
||||
if (mmap(_next_stack, PTHREAD_STACK_GUARD, 0,
|
||||
MAP_ANON, -1, 0) == MAP_FAILED) {
|
||||
ret = EAGAIN;
|
||||
free(new_thread);
|
||||
}
|
||||
/* Stack: */
|
||||
else if (MAP_FAILED == mmap(stack, PTHREAD_STACK_DEFAULT, PROT_READ | PROT_WRITE, MAP_STACK, -1, 0)) {
|
||||
else if (mmap(stack,
|
||||
PTHREAD_STACK_DEFAULT,
|
||||
PROT_READ | PROT_WRITE,
|
||||
#ifdef __i386__
|
||||
MAP_STACK,
|
||||
#else
|
||||
MAP_ANON,
|
||||
#endif
|
||||
-1, 0) == MAP_FAILED) {
|
||||
ret = EAGAIN;
|
||||
munmap(_next_stack, PTHREAD_STACK_GUARD);
|
||||
munmap(_next_stack,
|
||||
PTHREAD_STACK_GUARD);
|
||||
free(new_thread);
|
||||
}
|
||||
}
|
||||
}
|
||||
/*
|
||||
* The user wants a stack of a particular size. Lets hope they
|
||||
* really know what they want, and simply malloc the stack.
|
||||
*/
|
||||
else if ((stack = (void *) malloc(pattr->stacksize_attr))
|
||||
== NULL) {
|
||||
/* Insufficient memory to create a thread: */
|
||||
ret = EAGAIN;
|
||||
free(new_thread);
|
||||
}
|
||||
|
||||
/* Unlock the garbage collector mutex. */
|
||||
if (pthread_mutex_unlock(&_gc_mutex) != 0)
|
||||
PANIC("Cannot unlock gc mutex");
|
||||
}
|
||||
/* The user wants a stack of a particular size. Lets hope they really know what they want, and simply malloc the
|
||||
* stack. */
|
||||
else if ((stack = (void *) malloc(pattr->stacksize_attr)) == NULL) {
|
||||
/* Insufficient memory to create a thread: */
|
||||
ret = EAGAIN;
|
||||
free(new_thread);
|
||||
}
|
||||
#else
|
||||
/* Allocate memory for the stack: */
|
||||
else if ((stack = (void *) malloc(pattr->stacksize_attr)) == NULL) {
|
||||
/* Insufficient memory to create a thread: */
|
||||
ret = EAGAIN;
|
||||
free(new_thread);
|
||||
}
|
||||
#endif
|
||||
/* Check for errors: */
|
||||
if (ret != 0) {
|
||||
} else {
|
||||
@ -211,16 +229,20 @@ pthread_create(pthread_t * thread, const pthread_attr_t * attr,
|
||||
*/
|
||||
if (new_thread->attr.flags & PTHREAD_INHERIT_SCHED) {
|
||||
/* Copy the scheduling attributes: */
|
||||
new_thread->base_priority = _thread_run->base_priority;
|
||||
new_thread->attr.prio = _thread_run->base_priority;
|
||||
new_thread->attr.sched_policy = _thread_run->attr.sched_policy;
|
||||
new_thread->base_priority
|
||||
= _thread_run->base_priority;
|
||||
new_thread->attr.prio
|
||||
= _thread_run->base_priority;
|
||||
new_thread->attr.sched_policy
|
||||
= _thread_run->attr.sched_policy;
|
||||
} else {
|
||||
/*
|
||||
* Use just the thread priority, leaving the
|
||||
* other scheduling attributes as their
|
||||
* default values:
|
||||
*/
|
||||
new_thread->base_priority = new_thread->attr.prio;
|
||||
new_thread->base_priority
|
||||
= new_thread->attr.prio;
|
||||
}
|
||||
new_thread->active_priority = new_thread->base_priority;
|
||||
new_thread->inherited_priority = 0;
|
||||
@ -256,8 +278,7 @@ pthread_create(pthread_t * thread, const pthread_attr_t * attr,
|
||||
if (pattr->suspend == PTHREAD_CREATE_SUSPENDED) {
|
||||
new_thread->state = PS_SUSPENDED;
|
||||
PTHREAD_WAITQ_INSERT(new_thread);
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
new_thread->state = PS_RUNNING;
|
||||
PTHREAD_PRIOQ_INSERT_TAIL(new_thread);
|
||||
}
|
||||
|
@ -29,7 +29,7 @@
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* $Id: uthread_gc.c,v 1.5 1999/07/05 00:35:18 jasone Exp $
|
||||
* $Id: uthread_gc.c,v 1.6 1999/07/06 00:25:37 jasone Exp $
|
||||
*
|
||||
* Garbage collector thread. Frees memory allocated for dead threads.
|
||||
*
|
||||
@ -113,8 +113,8 @@ _thread_gc(pthread_addr_t arg)
|
||||
* has memory to free.
|
||||
*/
|
||||
for (pthread = TAILQ_FIRST(&_dead_list);
|
||||
p_stack == NULL && pthread_cln == NULL && pthread != NULL;
|
||||
pthread = TAILQ_NEXT(pthread, dle)) {
|
||||
p_stack == NULL && pthread_cln == NULL && pthread != NULL;
|
||||
pthread = TAILQ_NEXT(pthread, dle)) {
|
||||
/* Check if the initial thread: */
|
||||
if (pthread == _thread_initial) {
|
||||
/* Don't destroy the initial thread. */
|
||||
@ -123,7 +123,7 @@ _thread_gc(pthread_addr_t arg)
|
||||
* Check if this thread has detached:
|
||||
*/
|
||||
else if ((pthread->attr.flags &
|
||||
PTHREAD_DETACHED) != 0) {
|
||||
PTHREAD_DETACHED) != 0) {
|
||||
/* Remove this thread from the dead list: */
|
||||
TAILQ_REMOVE(&_dead_list, pthread, dle);
|
||||
|
||||
@ -134,23 +134,28 @@ _thread_gc(pthread_addr_t arg)
|
||||
*/
|
||||
if (pthread->attr.stackaddr_attr == NULL &&
|
||||
pthread->stack != NULL) {
|
||||
#ifdef __i386__
|
||||
if (pthread->attr.stacksize_attr == PTHREAD_STACK_DEFAULT) {
|
||||
/* Default-size stack. Cache it: */
|
||||
struct stack * spare_stack = (pthread->stack + PTHREAD_STACK_DEFAULT
|
||||
- sizeof(struct stack));
|
||||
SLIST_INSERT_HEAD(&_stackq, spare_stack, qe);
|
||||
if (pthread->attr.stacksize_attr
|
||||
== PTHREAD_STACK_DEFAULT) {
|
||||
/*
|
||||
* Default-size stack. Cache
|
||||
* it:
|
||||
*/
|
||||
struct stack * spare_stack;
|
||||
|
||||
spare_stack
|
||||
= (pthread->stack
|
||||
+ PTHREAD_STACK_DEFAULT
|
||||
- sizeof(struct stack));
|
||||
SLIST_INSERT_HEAD(&_stackq,
|
||||
spare_stack,
|
||||
qe);
|
||||
} else {
|
||||
/* Non-standard stack size. free() it outside the locks: */
|
||||
/*
|
||||
* Non-standard stack size.
|
||||
* free() it outside the locks.
|
||||
*/
|
||||
p_stack = pthread->stack;
|
||||
}
|
||||
#else
|
||||
/*
|
||||
* Point to the stack that must
|
||||
* be freed outside the locks:
|
||||
*/
|
||||
p_stack = pthread->stack;
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
@ -170,23 +175,28 @@ _thread_gc(pthread_addr_t arg)
|
||||
*/
|
||||
if (pthread->attr.stackaddr_attr == NULL &&
|
||||
pthread->stack != NULL) {
|
||||
#ifdef __i386__
|
||||
if (pthread->attr.stacksize_attr == PTHREAD_STACK_DEFAULT) {
|
||||
/* Default-size stack. Cache it: */
|
||||
struct stack * spare_stack = (pthread->stack + PTHREAD_STACK_DEFAULT
|
||||
- sizeof(struct stack));
|
||||
SLIST_INSERT_HEAD(&_stackq, spare_stack, qe);
|
||||
if (pthread->attr.stacksize_attr
|
||||
== PTHREAD_STACK_DEFAULT) {
|
||||
/*
|
||||
* Default-size stack. Cache
|
||||
* it:
|
||||
*/
|
||||
struct stack * spare_stack;
|
||||
|
||||
spare_stack
|
||||
= (pthread->stack
|
||||
+ PTHREAD_STACK_DEFAULT
|
||||
- sizeof(struct stack));
|
||||
SLIST_INSERT_HEAD(&_stackq,
|
||||
spare_stack,
|
||||
qe);
|
||||
} else {
|
||||
/* Non-standard stack size. free() it outside the locks: */
|
||||
/*
|
||||
* Non-standard stack size.
|
||||
* free() it outside the locks:
|
||||
*/
|
||||
p_stack = pthread->stack;
|
||||
}
|
||||
#else
|
||||
/*
|
||||
* Point to the stack that must
|
||||
* be freed outside the locks:
|
||||
*/
|
||||
p_stack = pthread->stack;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* NULL the stack pointer now
|
||||
|
@ -29,7 +29,7 @@
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* $Id: uthread_init.c,v 1.13 1999/07/05 00:35:19 jasone Exp $
|
||||
* $Id: uthread_init.c,v 1.14 1999/07/06 00:25:38 jasone Exp $
|
||||
*/
|
||||
|
||||
/* Allocate space for global thread variables here: */
|
||||
@ -180,15 +180,17 @@ _thread_init(void)
|
||||
/* Initialize the scheduling switch hook routine: */
|
||||
_sched_switch_hook = NULL;
|
||||
|
||||
#ifdef __i386__
|
||||
/* Initialize the thread stack cache: */
|
||||
SLIST_INIT(&_stackq);
|
||||
|
||||
/* Create the red zone for the main stack. */
|
||||
if (MAP_FAILED == mmap((void *) PTHREAD_STACK_TOP - PTHREAD_STACK_INITIAL, PTHREAD_STACK_GUARD, 0, MAP_ANON, -1, 0)) {
|
||||
if (mmap((void *) PTHREAD_STACK_TOP
|
||||
- PTHREAD_STACK_INITIAL,
|
||||
PTHREAD_STACK_GUARD, 0, MAP_ANON,
|
||||
-1, 0) == MAP_FAILED) {
|
||||
PANIC("Cannot allocate red zone for initial thread");
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Write a magic value to the thread structure
|
||||
* to help identify valid ones:
|
||||
|
@ -29,7 +29,7 @@
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* $Id: uthread_create.c,v 1.14 1999/07/05 00:35:17 jasone Exp $
|
||||
* $Id: uthread_create.c,v 1.15 1999/07/06 00:25:36 jasone Exp $
|
||||
*/
|
||||
#include <errno.h>
|
||||
#include <stdlib.h>
|
||||
@ -79,64 +79,82 @@ pthread_create(pthread_t * thread, const pthread_attr_t * attr,
|
||||
/* Check if a stack was specified in the thread attributes: */
|
||||
if ((stack = pattr->stackaddr_attr) != NULL) {
|
||||
}
|
||||
#ifdef __i386__
|
||||
/* Allocate memory for a default-size stack: */
|
||||
else if (pattr->stacksize_attr == PTHREAD_STACK_DEFAULT) {
|
||||
struct stack * spare_stack;
|
||||
|
||||
/* Allocate or re-use a default-size stack. */
|
||||
|
||||
/* Use the garbage collector mutex for synchronization
|
||||
/*
|
||||
* Use the garbage collector mutex for synchronization
|
||||
* of the spare stack list.
|
||||
*
|
||||
* XXX This may not be ideal. */
|
||||
*/
|
||||
if (pthread_mutex_lock(&_gc_mutex) != 0)
|
||||
PANIC("Cannot lock gc mutex");
|
||||
|
||||
if (NULL != (spare_stack = SLIST_FIRST(&_stackq))) {
|
||||
if ((spare_stack = SLIST_FIRST(&_stackq)) != NULL) {
|
||||
/* Use the spare stack. */
|
||||
SLIST_REMOVE_HEAD(&_stackq, qe);
|
||||
stack = sizeof(struct stack) + (void *) spare_stack - PTHREAD_STACK_DEFAULT;
|
||||
|
||||
/* Unlock the garbage collector mutex. */
|
||||
if (pthread_mutex_unlock(&_gc_mutex) != 0)
|
||||
PANIC("Cannot unlock gc mutex");
|
||||
|
||||
stack = sizeof(struct stack)
|
||||
+ (void *) spare_stack
|
||||
- PTHREAD_STACK_DEFAULT;
|
||||
} else {
|
||||
/* Unlock the garbage collector mutex. */
|
||||
if (pthread_mutex_unlock(&_gc_mutex) != 0)
|
||||
PANIC("Cannot unlock gc mutex");
|
||||
|
||||
/* Allocate a new stack. */
|
||||
stack = _next_stack + PTHREAD_STACK_GUARD;
|
||||
/* Even if stack allocation fails, we don't want to try to use this location again, so unconditionally
|
||||
* decrement _next_stack. Under normal operating conditions, the most likely reason for an mmap()
|
||||
* error is a stack overflow of the adjacent thread stack. */
|
||||
_next_stack -= (PTHREAD_STACK_DEFAULT + PTHREAD_STACK_GUARD);
|
||||
/*
|
||||
* Even if stack allocation fails, we don't want
|
||||
* to try to use this location again, so
|
||||
* unconditionally decrement _next_stack. Under
|
||||
* normal operating conditions, the most likely
|
||||
* reason for an mmap() error is a stack
|
||||
* overflow of the adjacent thread stack.
|
||||
*/
|
||||
_next_stack -= (PTHREAD_STACK_DEFAULT
|
||||
+ PTHREAD_STACK_GUARD);
|
||||
|
||||
/* Red zone: */
|
||||
if (MAP_FAILED == mmap(_next_stack, PTHREAD_STACK_GUARD, 0, MAP_ANON, -1, 0)) {
|
||||
if (mmap(_next_stack, PTHREAD_STACK_GUARD, 0,
|
||||
MAP_ANON, -1, 0) == MAP_FAILED) {
|
||||
ret = EAGAIN;
|
||||
free(new_thread);
|
||||
}
|
||||
/* Stack: */
|
||||
else if (MAP_FAILED == mmap(stack, PTHREAD_STACK_DEFAULT, PROT_READ | PROT_WRITE, MAP_STACK, -1, 0)) {
|
||||
else if (mmap(stack,
|
||||
PTHREAD_STACK_DEFAULT,
|
||||
PROT_READ | PROT_WRITE,
|
||||
#ifdef __i386__
|
||||
MAP_STACK,
|
||||
#else
|
||||
MAP_ANON,
|
||||
#endif
|
||||
-1, 0) == MAP_FAILED) {
|
||||
ret = EAGAIN;
|
||||
munmap(_next_stack, PTHREAD_STACK_GUARD);
|
||||
munmap(_next_stack,
|
||||
PTHREAD_STACK_GUARD);
|
||||
free(new_thread);
|
||||
}
|
||||
}
|
||||
}
|
||||
/*
|
||||
* The user wants a stack of a particular size. Lets hope they
|
||||
* really know what they want, and simply malloc the stack.
|
||||
*/
|
||||
else if ((stack = (void *) malloc(pattr->stacksize_attr))
|
||||
== NULL) {
|
||||
/* Insufficient memory to create a thread: */
|
||||
ret = EAGAIN;
|
||||
free(new_thread);
|
||||
}
|
||||
|
||||
/* Unlock the garbage collector mutex. */
|
||||
if (pthread_mutex_unlock(&_gc_mutex) != 0)
|
||||
PANIC("Cannot unlock gc mutex");
|
||||
}
|
||||
/* The user wants a stack of a particular size. Lets hope they really know what they want, and simply malloc the
|
||||
* stack. */
|
||||
else if ((stack = (void *) malloc(pattr->stacksize_attr)) == NULL) {
|
||||
/* Insufficient memory to create a thread: */
|
||||
ret = EAGAIN;
|
||||
free(new_thread);
|
||||
}
|
||||
#else
|
||||
/* Allocate memory for the stack: */
|
||||
else if ((stack = (void *) malloc(pattr->stacksize_attr)) == NULL) {
|
||||
/* Insufficient memory to create a thread: */
|
||||
ret = EAGAIN;
|
||||
free(new_thread);
|
||||
}
|
||||
#endif
|
||||
/* Check for errors: */
|
||||
if (ret != 0) {
|
||||
} else {
|
||||
@ -211,16 +229,20 @@ pthread_create(pthread_t * thread, const pthread_attr_t * attr,
|
||||
*/
|
||||
if (new_thread->attr.flags & PTHREAD_INHERIT_SCHED) {
|
||||
/* Copy the scheduling attributes: */
|
||||
new_thread->base_priority = _thread_run->base_priority;
|
||||
new_thread->attr.prio = _thread_run->base_priority;
|
||||
new_thread->attr.sched_policy = _thread_run->attr.sched_policy;
|
||||
new_thread->base_priority
|
||||
= _thread_run->base_priority;
|
||||
new_thread->attr.prio
|
||||
= _thread_run->base_priority;
|
||||
new_thread->attr.sched_policy
|
||||
= _thread_run->attr.sched_policy;
|
||||
} else {
|
||||
/*
|
||||
* Use just the thread priority, leaving the
|
||||
* other scheduling attributes as their
|
||||
* default values:
|
||||
*/
|
||||
new_thread->base_priority = new_thread->attr.prio;
|
||||
new_thread->base_priority
|
||||
= new_thread->attr.prio;
|
||||
}
|
||||
new_thread->active_priority = new_thread->base_priority;
|
||||
new_thread->inherited_priority = 0;
|
||||
@ -256,8 +278,7 @@ pthread_create(pthread_t * thread, const pthread_attr_t * attr,
|
||||
if (pattr->suspend == PTHREAD_CREATE_SUSPENDED) {
|
||||
new_thread->state = PS_SUSPENDED;
|
||||
PTHREAD_WAITQ_INSERT(new_thread);
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
new_thread->state = PS_RUNNING;
|
||||
PTHREAD_PRIOQ_INSERT_TAIL(new_thread);
|
||||
}
|
||||
|
@ -29,7 +29,7 @@
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* $Id: uthread_init.c,v 1.13 1999/07/05 00:35:19 jasone Exp $
|
||||
* $Id: uthread_init.c,v 1.14 1999/07/06 00:25:38 jasone Exp $
|
||||
*/
|
||||
|
||||
/* Allocate space for global thread variables here: */
|
||||
@ -180,15 +180,17 @@ _thread_init(void)
|
||||
/* Initialize the scheduling switch hook routine: */
|
||||
_sched_switch_hook = NULL;
|
||||
|
||||
#ifdef __i386__
|
||||
/* Initialize the thread stack cache: */
|
||||
SLIST_INIT(&_stackq);
|
||||
|
||||
/* Create the red zone for the main stack. */
|
||||
if (MAP_FAILED == mmap((void *) PTHREAD_STACK_TOP - PTHREAD_STACK_INITIAL, PTHREAD_STACK_GUARD, 0, MAP_ANON, -1, 0)) {
|
||||
if (mmap((void *) PTHREAD_STACK_TOP
|
||||
- PTHREAD_STACK_INITIAL,
|
||||
PTHREAD_STACK_GUARD, 0, MAP_ANON,
|
||||
-1, 0) == MAP_FAILED) {
|
||||
PANIC("Cannot allocate red zone for initial thread");
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Write a magic value to the thread structure
|
||||
* to help identify valid ones:
|
||||
|
@ -31,7 +31,7 @@
|
||||
*
|
||||
* Private thread definitions for the uthread kernel.
|
||||
*
|
||||
* $Id: pthread_private.h,v 1.21 1999/07/05 00:35:17 jasone Exp $
|
||||
* $Id: pthread_private.h,v 1.22 1999/07/06 00:25:35 jasone Exp $
|
||||
*/
|
||||
|
||||
#ifndef _PTHREAD_PRIVATE_H
|
||||
@ -337,9 +337,11 @@ struct pthread_attr {
|
||||
#define PTHREAD_STACK_DEFAULT 65536
|
||||
/* Size of red zone at the end of each stack. */
|
||||
#define PTHREAD_STACK_GUARD 4096
|
||||
/* Maximum size of initial thread's stack. This perhaps deserves to be larger
|
||||
/*
|
||||
* Maximum size of initial thread's stack. This perhaps deserves to be larger
|
||||
* than the stacks of other threads, since many applications are likely to run
|
||||
* almost entirely on this stack. */
|
||||
* almost entirely on this stack.
|
||||
*/
|
||||
#define PTHREAD_STACK_INITIAL 0x100000
|
||||
/* Address immediately beyond the beginning of the initial thread stack. */
|
||||
#if defined(__FreeBSD__)
|
||||
@ -887,9 +889,11 @@ SCLASS pthread_switch_routine_t _sched_switch_hook
|
||||
#endif
|
||||
;
|
||||
|
||||
/* Spare stack queue. Stacks of default size are cached in order to reduce
|
||||
/*
|
||||
* Spare stack queue. Stacks of default size are cached in order to reduce
|
||||
* thread creation time. Spare stacks are used in LIFO order to increase cache
|
||||
* locality. */
|
||||
* locality.
|
||||
*/
|
||||
SCLASS SLIST_HEAD(, stack) _stackq;
|
||||
|
||||
/* Base address of next unallocated default-size stack. Stacks are allocated
|
||||
|
@ -29,7 +29,7 @@
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* $Id: uthread_create.c,v 1.14 1999/07/05 00:35:17 jasone Exp $
|
||||
* $Id: uthread_create.c,v 1.15 1999/07/06 00:25:36 jasone Exp $
|
||||
*/
|
||||
#include <errno.h>
|
||||
#include <stdlib.h>
|
||||
@ -79,64 +79,82 @@ pthread_create(pthread_t * thread, const pthread_attr_t * attr,
|
||||
/* Check if a stack was specified in the thread attributes: */
|
||||
if ((stack = pattr->stackaddr_attr) != NULL) {
|
||||
}
|
||||
#ifdef __i386__
|
||||
/* Allocate memory for a default-size stack: */
|
||||
else if (pattr->stacksize_attr == PTHREAD_STACK_DEFAULT) {
|
||||
struct stack * spare_stack;
|
||||
|
||||
/* Allocate or re-use a default-size stack. */
|
||||
|
||||
/* Use the garbage collector mutex for synchronization
|
||||
/*
|
||||
* Use the garbage collector mutex for synchronization
|
||||
* of the spare stack list.
|
||||
*
|
||||
* XXX This may not be ideal. */
|
||||
*/
|
||||
if (pthread_mutex_lock(&_gc_mutex) != 0)
|
||||
PANIC("Cannot lock gc mutex");
|
||||
|
||||
if (NULL != (spare_stack = SLIST_FIRST(&_stackq))) {
|
||||
if ((spare_stack = SLIST_FIRST(&_stackq)) != NULL) {
|
||||
/* Use the spare stack. */
|
||||
SLIST_REMOVE_HEAD(&_stackq, qe);
|
||||
stack = sizeof(struct stack) + (void *) spare_stack - PTHREAD_STACK_DEFAULT;
|
||||
|
||||
/* Unlock the garbage collector mutex. */
|
||||
if (pthread_mutex_unlock(&_gc_mutex) != 0)
|
||||
PANIC("Cannot unlock gc mutex");
|
||||
|
||||
stack = sizeof(struct stack)
|
||||
+ (void *) spare_stack
|
||||
- PTHREAD_STACK_DEFAULT;
|
||||
} else {
|
||||
/* Unlock the garbage collector mutex. */
|
||||
if (pthread_mutex_unlock(&_gc_mutex) != 0)
|
||||
PANIC("Cannot unlock gc mutex");
|
||||
|
||||
/* Allocate a new stack. */
|
||||
stack = _next_stack + PTHREAD_STACK_GUARD;
|
||||
/* Even if stack allocation fails, we don't want to try to use this location again, so unconditionally
|
||||
* decrement _next_stack. Under normal operating conditions, the most likely reason for an mmap()
|
||||
* error is a stack overflow of the adjacent thread stack. */
|
||||
_next_stack -= (PTHREAD_STACK_DEFAULT + PTHREAD_STACK_GUARD);
|
||||
/*
|
||||
* Even if stack allocation fails, we don't want
|
||||
* to try to use this location again, so
|
||||
* unconditionally decrement _next_stack. Under
|
||||
* normal operating conditions, the most likely
|
||||
* reason for an mmap() error is a stack
|
||||
* overflow of the adjacent thread stack.
|
||||
*/
|
||||
_next_stack -= (PTHREAD_STACK_DEFAULT
|
||||
+ PTHREAD_STACK_GUARD);
|
||||
|
||||
/* Red zone: */
|
||||
if (MAP_FAILED == mmap(_next_stack, PTHREAD_STACK_GUARD, 0, MAP_ANON, -1, 0)) {
|
||||
if (mmap(_next_stack, PTHREAD_STACK_GUARD, 0,
|
||||
MAP_ANON, -1, 0) == MAP_FAILED) {
|
||||
ret = EAGAIN;
|
||||
free(new_thread);
|
||||
}
|
||||
/* Stack: */
|
||||
else if (MAP_FAILED == mmap(stack, PTHREAD_STACK_DEFAULT, PROT_READ | PROT_WRITE, MAP_STACK, -1, 0)) {
|
||||
else if (mmap(stack,
|
||||
PTHREAD_STACK_DEFAULT,
|
||||
PROT_READ | PROT_WRITE,
|
||||
#ifdef __i386__
|
||||
MAP_STACK,
|
||||
#else
|
||||
MAP_ANON,
|
||||
#endif
|
||||
-1, 0) == MAP_FAILED) {
|
||||
ret = EAGAIN;
|
||||
munmap(_next_stack, PTHREAD_STACK_GUARD);
|
||||
munmap(_next_stack,
|
||||
PTHREAD_STACK_GUARD);
|
||||
free(new_thread);
|
||||
}
|
||||
}
|
||||
}
|
||||
/*
|
||||
* The user wants a stack of a particular size. Lets hope they
|
||||
* really know what they want, and simply malloc the stack.
|
||||
*/
|
||||
else if ((stack = (void *) malloc(pattr->stacksize_attr))
|
||||
== NULL) {
|
||||
/* Insufficient memory to create a thread: */
|
||||
ret = EAGAIN;
|
||||
free(new_thread);
|
||||
}
|
||||
|
||||
/* Unlock the garbage collector mutex. */
|
||||
if (pthread_mutex_unlock(&_gc_mutex) != 0)
|
||||
PANIC("Cannot unlock gc mutex");
|
||||
}
|
||||
/* The user wants a stack of a particular size. Lets hope they really know what they want, and simply malloc the
|
||||
* stack. */
|
||||
else if ((stack = (void *) malloc(pattr->stacksize_attr)) == NULL) {
|
||||
/* Insufficient memory to create a thread: */
|
||||
ret = EAGAIN;
|
||||
free(new_thread);
|
||||
}
|
||||
#else
|
||||
/* Allocate memory for the stack: */
|
||||
else if ((stack = (void *) malloc(pattr->stacksize_attr)) == NULL) {
|
||||
/* Insufficient memory to create a thread: */
|
||||
ret = EAGAIN;
|
||||
free(new_thread);
|
||||
}
|
||||
#endif
|
||||
/* Check for errors: */
|
||||
if (ret != 0) {
|
||||
} else {
|
||||
@ -211,16 +229,20 @@ pthread_create(pthread_t * thread, const pthread_attr_t * attr,
|
||||
*/
|
||||
if (new_thread->attr.flags & PTHREAD_INHERIT_SCHED) {
|
||||
/* Copy the scheduling attributes: */
|
||||
new_thread->base_priority = _thread_run->base_priority;
|
||||
new_thread->attr.prio = _thread_run->base_priority;
|
||||
new_thread->attr.sched_policy = _thread_run->attr.sched_policy;
|
||||
new_thread->base_priority
|
||||
= _thread_run->base_priority;
|
||||
new_thread->attr.prio
|
||||
= _thread_run->base_priority;
|
||||
new_thread->attr.sched_policy
|
||||
= _thread_run->attr.sched_policy;
|
||||
} else {
|
||||
/*
|
||||
* Use just the thread priority, leaving the
|
||||
* other scheduling attributes as their
|
||||
* default values:
|
||||
*/
|
||||
new_thread->base_priority = new_thread->attr.prio;
|
||||
new_thread->base_priority
|
||||
= new_thread->attr.prio;
|
||||
}
|
||||
new_thread->active_priority = new_thread->base_priority;
|
||||
new_thread->inherited_priority = 0;
|
||||
@ -256,8 +278,7 @@ pthread_create(pthread_t * thread, const pthread_attr_t * attr,
|
||||
if (pattr->suspend == PTHREAD_CREATE_SUSPENDED) {
|
||||
new_thread->state = PS_SUSPENDED;
|
||||
PTHREAD_WAITQ_INSERT(new_thread);
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
new_thread->state = PS_RUNNING;
|
||||
PTHREAD_PRIOQ_INSERT_TAIL(new_thread);
|
||||
}
|
||||
|
@ -29,7 +29,7 @@
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* $Id: uthread_gc.c,v 1.5 1999/07/05 00:35:18 jasone Exp $
|
||||
* $Id: uthread_gc.c,v 1.6 1999/07/06 00:25:37 jasone Exp $
|
||||
*
|
||||
* Garbage collector thread. Frees memory allocated for dead threads.
|
||||
*
|
||||
@ -113,8 +113,8 @@ _thread_gc(pthread_addr_t arg)
|
||||
* has memory to free.
|
||||
*/
|
||||
for (pthread = TAILQ_FIRST(&_dead_list);
|
||||
p_stack == NULL && pthread_cln == NULL && pthread != NULL;
|
||||
pthread = TAILQ_NEXT(pthread, dle)) {
|
||||
p_stack == NULL && pthread_cln == NULL && pthread != NULL;
|
||||
pthread = TAILQ_NEXT(pthread, dle)) {
|
||||
/* Check if the initial thread: */
|
||||
if (pthread == _thread_initial) {
|
||||
/* Don't destroy the initial thread. */
|
||||
@ -123,7 +123,7 @@ _thread_gc(pthread_addr_t arg)
|
||||
* Check if this thread has detached:
|
||||
*/
|
||||
else if ((pthread->attr.flags &
|
||||
PTHREAD_DETACHED) != 0) {
|
||||
PTHREAD_DETACHED) != 0) {
|
||||
/* Remove this thread from the dead list: */
|
||||
TAILQ_REMOVE(&_dead_list, pthread, dle);
|
||||
|
||||
@ -134,23 +134,28 @@ _thread_gc(pthread_addr_t arg)
|
||||
*/
|
||||
if (pthread->attr.stackaddr_attr == NULL &&
|
||||
pthread->stack != NULL) {
|
||||
#ifdef __i386__
|
||||
if (pthread->attr.stacksize_attr == PTHREAD_STACK_DEFAULT) {
|
||||
/* Default-size stack. Cache it: */
|
||||
struct stack * spare_stack = (pthread->stack + PTHREAD_STACK_DEFAULT
|
||||
- sizeof(struct stack));
|
||||
SLIST_INSERT_HEAD(&_stackq, spare_stack, qe);
|
||||
if (pthread->attr.stacksize_attr
|
||||
== PTHREAD_STACK_DEFAULT) {
|
||||
/*
|
||||
* Default-size stack. Cache
|
||||
* it:
|
||||
*/
|
||||
struct stack * spare_stack;
|
||||
|
||||
spare_stack
|
||||
= (pthread->stack
|
||||
+ PTHREAD_STACK_DEFAULT
|
||||
- sizeof(struct stack));
|
||||
SLIST_INSERT_HEAD(&_stackq,
|
||||
spare_stack,
|
||||
qe);
|
||||
} else {
|
||||
/* Non-standard stack size. free() it outside the locks: */
|
||||
/*
|
||||
* Non-standard stack size.
|
||||
* free() it outside the locks.
|
||||
*/
|
||||
p_stack = pthread->stack;
|
||||
}
|
||||
#else
|
||||
/*
|
||||
* Point to the stack that must
|
||||
* be freed outside the locks:
|
||||
*/
|
||||
p_stack = pthread->stack;
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
@ -170,23 +175,28 @@ _thread_gc(pthread_addr_t arg)
|
||||
*/
|
||||
if (pthread->attr.stackaddr_attr == NULL &&
|
||||
pthread->stack != NULL) {
|
||||
#ifdef __i386__
|
||||
if (pthread->attr.stacksize_attr == PTHREAD_STACK_DEFAULT) {
|
||||
/* Default-size stack. Cache it: */
|
||||
struct stack * spare_stack = (pthread->stack + PTHREAD_STACK_DEFAULT
|
||||
- sizeof(struct stack));
|
||||
SLIST_INSERT_HEAD(&_stackq, spare_stack, qe);
|
||||
if (pthread->attr.stacksize_attr
|
||||
== PTHREAD_STACK_DEFAULT) {
|
||||
/*
|
||||
* Default-size stack. Cache
|
||||
* it:
|
||||
*/
|
||||
struct stack * spare_stack;
|
||||
|
||||
spare_stack
|
||||
= (pthread->stack
|
||||
+ PTHREAD_STACK_DEFAULT
|
||||
- sizeof(struct stack));
|
||||
SLIST_INSERT_HEAD(&_stackq,
|
||||
spare_stack,
|
||||
qe);
|
||||
} else {
|
||||
/* Non-standard stack size. free() it outside the locks: */
|
||||
/*
|
||||
* Non-standard stack size.
|
||||
* free() it outside the locks:
|
||||
*/
|
||||
p_stack = pthread->stack;
|
||||
}
|
||||
#else
|
||||
/*
|
||||
* Point to the stack that must
|
||||
* be freed outside the locks:
|
||||
*/
|
||||
p_stack = pthread->stack;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* NULL the stack pointer now
|
||||
|
@ -29,7 +29,7 @@
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* $Id: uthread_init.c,v 1.13 1999/07/05 00:35:19 jasone Exp $
|
||||
* $Id: uthread_init.c,v 1.14 1999/07/06 00:25:38 jasone Exp $
|
||||
*/
|
||||
|
||||
/* Allocate space for global thread variables here: */
|
||||
@ -180,15 +180,17 @@ _thread_init(void)
|
||||
/* Initialize the scheduling switch hook routine: */
|
||||
_sched_switch_hook = NULL;
|
||||
|
||||
#ifdef __i386__
|
||||
/* Initialize the thread stack cache: */
|
||||
SLIST_INIT(&_stackq);
|
||||
|
||||
/* Create the red zone for the main stack. */
|
||||
if (MAP_FAILED == mmap((void *) PTHREAD_STACK_TOP - PTHREAD_STACK_INITIAL, PTHREAD_STACK_GUARD, 0, MAP_ANON, -1, 0)) {
|
||||
if (mmap((void *) PTHREAD_STACK_TOP
|
||||
- PTHREAD_STACK_INITIAL,
|
||||
PTHREAD_STACK_GUARD, 0, MAP_ANON,
|
||||
-1, 0) == MAP_FAILED) {
|
||||
PANIC("Cannot allocate red zone for initial thread");
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Write a magic value to the thread structure
|
||||
* to help identify valid ones:
|
||||
|
@ -31,7 +31,7 @@
|
||||
*
|
||||
* Private thread definitions for the uthread kernel.
|
||||
*
|
||||
* $Id: pthread_private.h,v 1.21 1999/07/05 00:35:17 jasone Exp $
|
||||
* $Id: pthread_private.h,v 1.22 1999/07/06 00:25:35 jasone Exp $
|
||||
*/
|
||||
|
||||
#ifndef _PTHREAD_PRIVATE_H
|
||||
@ -337,9 +337,11 @@ struct pthread_attr {
|
||||
#define PTHREAD_STACK_DEFAULT 65536
|
||||
/* Size of red zone at the end of each stack. */
|
||||
#define PTHREAD_STACK_GUARD 4096
|
||||
/* Maximum size of initial thread's stack. This perhaps deserves to be larger
|
||||
/*
|
||||
* Maximum size of initial thread's stack. This perhaps deserves to be larger
|
||||
* than the stacks of other threads, since many applications are likely to run
|
||||
* almost entirely on this stack. */
|
||||
* almost entirely on this stack.
|
||||
*/
|
||||
#define PTHREAD_STACK_INITIAL 0x100000
|
||||
/* Address immediately beyond the beginning of the initial thread stack. */
|
||||
#if defined(__FreeBSD__)
|
||||
@ -887,9 +889,11 @@ SCLASS pthread_switch_routine_t _sched_switch_hook
|
||||
#endif
|
||||
;
|
||||
|
||||
/* Spare stack queue. Stacks of default size are cached in order to reduce
|
||||
/*
|
||||
* Spare stack queue. Stacks of default size are cached in order to reduce
|
||||
* thread creation time. Spare stacks are used in LIFO order to increase cache
|
||||
* locality. */
|
||||
* locality.
|
||||
*/
|
||||
SCLASS SLIST_HEAD(, stack) _stackq;
|
||||
|
||||
/* Base address of next unallocated default-size stack. Stacks are allocated
|
||||
|
Loading…
Reference in New Issue
Block a user