Don't explicitly mmap() red zones at the bottom of thread stacks (except
the initial thread). Instead, just leave an unmapped gap between thread stacks and make sure that the thread stacks won't grow into these gaps, simply by limiting the size of the stacks with the 'len' argument to mmap(). This (if I understand correctly) reduces VM overhead considerably. Reviewed by: deischen
This commit is contained in:
parent
383cb3575c
commit
a0b3dffc3d
@ -335,7 +335,13 @@ struct pthread_attr {
|
||||
* Miscellaneous definitions.
|
||||
*/
|
||||
#define PTHREAD_STACK_DEFAULT 65536
|
||||
/* Size of red zone at the end of each stack. */
|
||||
/*
|
||||
* Size of red zone at the end of each stack. In actuality, this "red zone" is
|
||||
* merely an unmapped region, except in the case of the initial stack. Since
|
||||
* mmap() makes it possible to specify the maximum growth of a MAP_STACK region,
|
||||
* an unmapped gap between thread stacks achieves the same effect as explicitly
|
||||
* mapped red zones.
|
||||
*/
|
||||
#define PTHREAD_STACK_GUARD PAGE_SIZE
|
||||
|
||||
/*
|
||||
@ -904,10 +910,17 @@ SCLASS pthread_switch_routine_t _sched_switch_hook
|
||||
*/
|
||||
SCLASS SLIST_HEAD(, stack) _stackq;
|
||||
|
||||
/* Base address of next unallocated default-size stack. Stacks are allocated
|
||||
* contiguously, starting below the beginning of the main stack. When a new
|
||||
* stack is created, a guard page is created just above it in order to (usually)
|
||||
* detect attempts by the adjacent stack to trounce the next thread stack. */
|
||||
/*
|
||||
* Base address of next unallocated default-size {stack, red zone}. Stacks are
|
||||
* allocated contiguously, starting below the bottom of the main stack. When a
|
||||
* new stack is created, a red zone is created (actually, the red zone is simply
|
||||
* left unmapped) below the bottom of the stack, such that the stack will not be
|
||||
* able to grow all the way to the top of the next stack. This isn't
|
||||
* fool-proof. It is possible for a stack to grow by a large amount, such that
|
||||
* it grows into the next stack, and as long as the memory within the red zone
|
||||
* is never accessed, nothing will prevent one thread stack from trouncing all
|
||||
* over the next.
|
||||
*/
|
||||
SCLASS void * _next_stack
|
||||
#ifdef GLOBAL_PTHREAD_PRIVATE
|
||||
/* main stack top - main stack size - stack size - (red zone + main stack red zone) */
|
||||
|
@ -136,20 +136,11 @@ pthread_create(pthread_t * thread, const pthread_attr_t * attr,
|
||||
if (pthread_mutex_unlock(&_gc_mutex) != 0)
|
||||
PANIC("Cannot unlock gc mutex");
|
||||
|
||||
/* Red zone: */
|
||||
if (mmap(stack - PTHREAD_STACK_GUARD,
|
||||
PTHREAD_STACK_GUARD, 0, MAP_ANON,
|
||||
-1, 0) == MAP_FAILED) {
|
||||
ret = EAGAIN;
|
||||
free(new_thread);
|
||||
}
|
||||
/* Stack: */
|
||||
else if (mmap(stack, PTHREAD_STACK_DEFAULT,
|
||||
if (mmap(stack, PTHREAD_STACK_DEFAULT,
|
||||
PROT_READ | PROT_WRITE, MAP_STACK,
|
||||
-1, 0) == MAP_FAILED) {
|
||||
ret = EAGAIN;
|
||||
munmap(stack - PTHREAD_STACK_GUARD,
|
||||
PTHREAD_STACK_GUARD);
|
||||
free(new_thread);
|
||||
}
|
||||
}
|
||||
|
@ -186,7 +186,13 @@ _thread_init(void)
|
||||
/* Initialize the thread stack cache: */
|
||||
SLIST_INIT(&_stackq);
|
||||
|
||||
/* Create the red zone for the main stack. */
|
||||
/*
|
||||
* Create a red zone below the main stack. All other stacks are
|
||||
* constrained to a maximum size by the paramters passed to
|
||||
* mmap(), but this stack is only limited by resource limits, so
|
||||
* this stack needs an explicitly mapped red zone to protect the
|
||||
* thread stack that is just beyond.
|
||||
*/
|
||||
if (mmap((void *) USRSTACK - PTHREAD_STACK_INITIAL -
|
||||
PTHREAD_STACK_GUARD, PTHREAD_STACK_GUARD, 0, MAP_ANON,
|
||||
-1, 0) == MAP_FAILED)
|
||||
|
@ -136,20 +136,11 @@ pthread_create(pthread_t * thread, const pthread_attr_t * attr,
|
||||
if (pthread_mutex_unlock(&_gc_mutex) != 0)
|
||||
PANIC("Cannot unlock gc mutex");
|
||||
|
||||
/* Red zone: */
|
||||
if (mmap(stack - PTHREAD_STACK_GUARD,
|
||||
PTHREAD_STACK_GUARD, 0, MAP_ANON,
|
||||
-1, 0) == MAP_FAILED) {
|
||||
ret = EAGAIN;
|
||||
free(new_thread);
|
||||
}
|
||||
/* Stack: */
|
||||
else if (mmap(stack, PTHREAD_STACK_DEFAULT,
|
||||
if (mmap(stack, PTHREAD_STACK_DEFAULT,
|
||||
PROT_READ | PROT_WRITE, MAP_STACK,
|
||||
-1, 0) == MAP_FAILED) {
|
||||
ret = EAGAIN;
|
||||
munmap(stack - PTHREAD_STACK_GUARD,
|
||||
PTHREAD_STACK_GUARD);
|
||||
free(new_thread);
|
||||
}
|
||||
}
|
||||
|
@ -186,7 +186,13 @@ _thread_init(void)
|
||||
/* Initialize the thread stack cache: */
|
||||
SLIST_INIT(&_stackq);
|
||||
|
||||
/* Create the red zone for the main stack. */
|
||||
/*
|
||||
* Create a red zone below the main stack. All other stacks are
|
||||
* constrained to a maximum size by the paramters passed to
|
||||
* mmap(), but this stack is only limited by resource limits, so
|
||||
* this stack needs an explicitly mapped red zone to protect the
|
||||
* thread stack that is just beyond.
|
||||
*/
|
||||
if (mmap((void *) USRSTACK - PTHREAD_STACK_INITIAL -
|
||||
PTHREAD_STACK_GUARD, PTHREAD_STACK_GUARD, 0, MAP_ANON,
|
||||
-1, 0) == MAP_FAILED)
|
||||
|
@ -335,7 +335,13 @@ struct pthread_attr {
|
||||
* Miscellaneous definitions.
|
||||
*/
|
||||
#define PTHREAD_STACK_DEFAULT 65536
|
||||
/* Size of red zone at the end of each stack. */
|
||||
/*
|
||||
* Size of red zone at the end of each stack. In actuality, this "red zone" is
|
||||
* merely an unmapped region, except in the case of the initial stack. Since
|
||||
* mmap() makes it possible to specify the maximum growth of a MAP_STACK region,
|
||||
* an unmapped gap between thread stacks achieves the same effect as explicitly
|
||||
* mapped red zones.
|
||||
*/
|
||||
#define PTHREAD_STACK_GUARD PAGE_SIZE
|
||||
|
||||
/*
|
||||
@ -904,10 +910,17 @@ SCLASS pthread_switch_routine_t _sched_switch_hook
|
||||
*/
|
||||
SCLASS SLIST_HEAD(, stack) _stackq;
|
||||
|
||||
/* Base address of next unallocated default-size stack. Stacks are allocated
|
||||
* contiguously, starting below the beginning of the main stack. When a new
|
||||
* stack is created, a guard page is created just above it in order to (usually)
|
||||
* detect attempts by the adjacent stack to trounce the next thread stack. */
|
||||
/*
|
||||
* Base address of next unallocated default-size {stack, red zone}. Stacks are
|
||||
* allocated contiguously, starting below the bottom of the main stack. When a
|
||||
* new stack is created, a red zone is created (actually, the red zone is simply
|
||||
* left unmapped) below the bottom of the stack, such that the stack will not be
|
||||
* able to grow all the way to the top of the next stack. This isn't
|
||||
* fool-proof. It is possible for a stack to grow by a large amount, such that
|
||||
* it grows into the next stack, and as long as the memory within the red zone
|
||||
* is never accessed, nothing will prevent one thread stack from trouncing all
|
||||
* over the next.
|
||||
*/
|
||||
SCLASS void * _next_stack
|
||||
#ifdef GLOBAL_PTHREAD_PRIVATE
|
||||
/* main stack top - main stack size - stack size - (red zone + main stack red zone) */
|
||||
|
@ -136,20 +136,11 @@ pthread_create(pthread_t * thread, const pthread_attr_t * attr,
|
||||
if (pthread_mutex_unlock(&_gc_mutex) != 0)
|
||||
PANIC("Cannot unlock gc mutex");
|
||||
|
||||
/* Red zone: */
|
||||
if (mmap(stack - PTHREAD_STACK_GUARD,
|
||||
PTHREAD_STACK_GUARD, 0, MAP_ANON,
|
||||
-1, 0) == MAP_FAILED) {
|
||||
ret = EAGAIN;
|
||||
free(new_thread);
|
||||
}
|
||||
/* Stack: */
|
||||
else if (mmap(stack, PTHREAD_STACK_DEFAULT,
|
||||
if (mmap(stack, PTHREAD_STACK_DEFAULT,
|
||||
PROT_READ | PROT_WRITE, MAP_STACK,
|
||||
-1, 0) == MAP_FAILED) {
|
||||
ret = EAGAIN;
|
||||
munmap(stack - PTHREAD_STACK_GUARD,
|
||||
PTHREAD_STACK_GUARD);
|
||||
free(new_thread);
|
||||
}
|
||||
}
|
||||
|
@ -186,7 +186,13 @@ _thread_init(void)
|
||||
/* Initialize the thread stack cache: */
|
||||
SLIST_INIT(&_stackq);
|
||||
|
||||
/* Create the red zone for the main stack. */
|
||||
/*
|
||||
* Create a red zone below the main stack. All other stacks are
|
||||
* constrained to a maximum size by the paramters passed to
|
||||
* mmap(), but this stack is only limited by resource limits, so
|
||||
* this stack needs an explicitly mapped red zone to protect the
|
||||
* thread stack that is just beyond.
|
||||
*/
|
||||
if (mmap((void *) USRSTACK - PTHREAD_STACK_INITIAL -
|
||||
PTHREAD_STACK_GUARD, PTHREAD_STACK_GUARD, 0, MAP_ANON,
|
||||
-1, 0) == MAP_FAILED)
|
||||
|
@ -335,7 +335,13 @@ struct pthread_attr {
|
||||
* Miscellaneous definitions.
|
||||
*/
|
||||
#define PTHREAD_STACK_DEFAULT 65536
|
||||
/* Size of red zone at the end of each stack. */
|
||||
/*
|
||||
* Size of red zone at the end of each stack. In actuality, this "red zone" is
|
||||
* merely an unmapped region, except in the case of the initial stack. Since
|
||||
* mmap() makes it possible to specify the maximum growth of a MAP_STACK region,
|
||||
* an unmapped gap between thread stacks achieves the same effect as explicitly
|
||||
* mapped red zones.
|
||||
*/
|
||||
#define PTHREAD_STACK_GUARD PAGE_SIZE
|
||||
|
||||
/*
|
||||
@ -904,10 +910,17 @@ SCLASS pthread_switch_routine_t _sched_switch_hook
|
||||
*/
|
||||
SCLASS SLIST_HEAD(, stack) _stackq;
|
||||
|
||||
/* Base address of next unallocated default-size stack. Stacks are allocated
|
||||
* contiguously, starting below the beginning of the main stack. When a new
|
||||
* stack is created, a guard page is created just above it in order to (usually)
|
||||
* detect attempts by the adjacent stack to trounce the next thread stack. */
|
||||
/*
|
||||
* Base address of next unallocated default-size {stack, red zone}. Stacks are
|
||||
* allocated contiguously, starting below the bottom of the main stack. When a
|
||||
* new stack is created, a red zone is created (actually, the red zone is simply
|
||||
* left unmapped) below the bottom of the stack, such that the stack will not be
|
||||
* able to grow all the way to the top of the next stack. This isn't
|
||||
* fool-proof. It is possible for a stack to grow by a large amount, such that
|
||||
* it grows into the next stack, and as long as the memory within the red zone
|
||||
* is never accessed, nothing will prevent one thread stack from trouncing all
|
||||
* over the next.
|
||||
*/
|
||||
SCLASS void * _next_stack
|
||||
#ifdef GLOBAL_PTHREAD_PRIVATE
|
||||
/* main stack top - main stack size - stack size - (red zone + main stack red zone) */
|
||||
|
Loading…
Reference in New Issue
Block a user