Don't use PAGE_SIZE in userland, instead use getpagesize(), this is to
allow running on other arches when the instructions are supported but the page size granularity is not. Glanced at by: peter
This commit is contained in:
parent
289fc68db6
commit
efe5270b1e
@ -421,8 +421,11 @@ enum pthread_susp {
|
||||
* Since mmap() makes it possible to specify the maximum growth of a MAP_STACK
|
||||
* region, an unmapped gap between thread stacks achieves the same effect as
|
||||
* explicitly mapped red zones.
|
||||
* This is declared and initialized in uthread_init.c.
|
||||
*/
|
||||
#define PTHREAD_GUARD_DEFAULT PAGE_SIZE
|
||||
extern int pthread_guard_default;
|
||||
|
||||
extern int pthread_page_size;
|
||||
|
||||
/*
|
||||
* Maximum size of initial thread's stack. This perhaps deserves to be larger
|
||||
@ -431,9 +434,6 @@ enum pthread_susp {
|
||||
*/
|
||||
#define PTHREAD_STACK_INITIAL 0x100000
|
||||
|
||||
/* Size of the scheduler stack: */
|
||||
#define SCHED_STACK_SIZE PAGE_SIZE
|
||||
|
||||
/*
|
||||
* Define the different priority ranges. All applications have thread
|
||||
* priorities constrained within 0-31. The threads library raises the
|
||||
@ -971,7 +971,7 @@ SCLASS struct pthread_attr pthread_attr_default
|
||||
#ifdef GLOBAL_PTHREAD_PRIVATE
|
||||
= { SCHED_RR, 0, TIMESLICE_USEC, PTHREAD_DEFAULT_PRIORITY,
|
||||
PTHREAD_CREATE_RUNNING, PTHREAD_CREATE_JOINABLE, NULL, NULL, NULL,
|
||||
PTHREAD_STACK_DEFAULT, PTHREAD_GUARD_DEFAULT };
|
||||
PTHREAD_STACK_DEFAULT, -1 };
|
||||
#else
|
||||
;
|
||||
#endif
|
||||
|
@ -45,9 +45,13 @@ _pthread_attr_setguardsize(pthread_attr_t *attr, size_t guardsize)
|
||||
if (attr == NULL || *attr == NULL)
|
||||
ret = EINVAL;
|
||||
else {
|
||||
/* Round guardsize up to the nearest multiple of PAGE_SIZE. */
|
||||
if (guardsize % PAGE_SIZE != 0)
|
||||
guardsize = ((guardsize / PAGE_SIZE) + 1) * PAGE_SIZE;
|
||||
/*
|
||||
* Round guardsize up to the nearest multiple of
|
||||
* pthread_page_size.
|
||||
*/
|
||||
if (guardsize % pthread_page_size != 0)
|
||||
guardsize = ((guardsize / pthread_page_size) + 1) *
|
||||
pthread_page_size;
|
||||
|
||||
/* Save the stack size. */
|
||||
(*attr)->guardsize_attr = guardsize;
|
||||
|
@ -146,6 +146,8 @@ static void *libgcc_references[] = {
|
||||
&_pthread_mutex_unlock
|
||||
};
|
||||
|
||||
int pthread_guard_default;
|
||||
int pthread_page_size;
|
||||
|
||||
/*
|
||||
* Threaded process initialization
|
||||
@ -158,9 +160,18 @@ _thread_init(void)
|
||||
int i;
|
||||
size_t len;
|
||||
int mib[2];
|
||||
int sched_stack_size; /* Size of scheduler stack. */
|
||||
|
||||
struct clockinfo clockinfo;
|
||||
struct sigaction act;
|
||||
|
||||
pthread_page_size = getpagesize();
|
||||
pthread_guard_default = getpagesize();
|
||||
sched_stack_size = getpagesize();
|
||||
|
||||
pthread_attr_default.guardsize_attr = pthread_guard_default;
|
||||
|
||||
|
||||
/* Check if this function has already been called: */
|
||||
if (_thread_initial)
|
||||
/* Only initialise the threaded application once. */
|
||||
@ -247,7 +258,7 @@ _thread_init(void)
|
||||
PANIC("Cannot allocate memory for initial thread");
|
||||
}
|
||||
/* Allocate memory for the scheduler stack: */
|
||||
else if ((_thread_kern_sched_stack = malloc(SCHED_STACK_SIZE)) == NULL)
|
||||
else if ((_thread_kern_sched_stack = malloc(sched_stack_size)) == NULL)
|
||||
PANIC("Failed to allocate stack for scheduler");
|
||||
else {
|
||||
/* Zero the global kernel thread structure: */
|
||||
@ -280,7 +291,7 @@ _thread_init(void)
|
||||
* thread stack that is just beyond.
|
||||
*/
|
||||
if (mmap(_usrstack - PTHREAD_STACK_INITIAL -
|
||||
PTHREAD_GUARD_DEFAULT, PTHREAD_GUARD_DEFAULT, 0, MAP_ANON,
|
||||
pthread_guard_default, pthread_guard_default, 0, MAP_ANON,
|
||||
-1, 0) == MAP_FAILED)
|
||||
PANIC("Cannot allocate red zone for initial thread");
|
||||
|
||||
@ -294,7 +305,7 @@ _thread_init(void)
|
||||
/* Setup the context for the scheduler: */
|
||||
_setjmp(_thread_kern_sched_jb);
|
||||
SET_STACK_JB(_thread_kern_sched_jb, _thread_kern_sched_stack +
|
||||
SCHED_STACK_SIZE - sizeof(double));
|
||||
sched_stack_size - sizeof(double));
|
||||
SET_RETURN_ADDR_JB(_thread_kern_sched_jb, _thread_kern_scheduler);
|
||||
|
||||
/*
|
||||
|
@ -122,13 +122,13 @@ _thread_stack_alloc(size_t stacksize, size_t guardsize)
|
||||
size_t stack_size;
|
||||
|
||||
/*
|
||||
* Round up stack size to nearest multiple of PAGE_SIZE, so that mmap()
|
||||
* Round up stack size to nearest multiple of pthread_page_size, so that mmap()
|
||||
* will work. If the stack size is not an even multiple, we end up
|
||||
* initializing things such that there is unused space above the
|
||||
* beginning of the stack, so the stack sits snugly against its guard.
|
||||
*/
|
||||
if (stacksize % PAGE_SIZE != 0)
|
||||
stack_size = ((stacksize / PAGE_SIZE) + 1) * PAGE_SIZE;
|
||||
if (stacksize % pthread_page_size != 0)
|
||||
stack_size = ((stacksize / pthread_page_size) + 1) * pthread_page_size;
|
||||
else
|
||||
stack_size = stacksize;
|
||||
|
||||
@ -137,7 +137,7 @@ _thread_stack_alloc(size_t stacksize, size_t guardsize)
|
||||
* from the default-size stack cache:
|
||||
*/
|
||||
if (stack_size == PTHREAD_STACK_DEFAULT &&
|
||||
guardsize == PTHREAD_GUARD_DEFAULT) {
|
||||
guardsize == pthread_guard_default) {
|
||||
/*
|
||||
* Use the garbage collector mutex for synchronization of the
|
||||
* spare stack list.
|
||||
@ -187,7 +187,7 @@ _thread_stack_alloc(size_t stacksize, size_t guardsize)
|
||||
|
||||
if (last_stack == NULL)
|
||||
last_stack = _usrstack - PTHREAD_STACK_INITIAL -
|
||||
PTHREAD_GUARD_DEFAULT;
|
||||
pthread_guard_default;
|
||||
|
||||
/* Allocate a new stack. */
|
||||
stack = last_stack - stack_size;
|
||||
@ -217,17 +217,17 @@ _thread_stack_free(void *stack, size_t stacksize, size_t guardsize)
|
||||
struct stack *spare_stack;
|
||||
|
||||
spare_stack = (stack + stacksize - sizeof(struct stack));
|
||||
/* Round stacksize up to nearest multiple of PAGE_SIZE. */
|
||||
if (stacksize % PAGE_SIZE != 0) {
|
||||
spare_stack->stacksize = ((stacksize / PAGE_SIZE) + 1) *
|
||||
PAGE_SIZE;
|
||||
/* Round stacksize up to nearest multiple of pthread_page_size. */
|
||||
if (stacksize % pthread_page_size != 0) {
|
||||
spare_stack->stacksize = ((stacksize / pthread_page_size) + 1) *
|
||||
pthread_page_size;
|
||||
} else
|
||||
spare_stack->stacksize = stacksize;
|
||||
spare_stack->guardsize = guardsize;
|
||||
spare_stack->stackaddr = stack;
|
||||
|
||||
if (spare_stack->stacksize == PTHREAD_STACK_DEFAULT &&
|
||||
spare_stack->guardsize == PTHREAD_GUARD_DEFAULT) {
|
||||
spare_stack->guardsize == pthread_guard_default) {
|
||||
/* Default stack/guard size. */
|
||||
LIST_INSERT_HEAD(&_dstackq, spare_stack, qe);
|
||||
} else {
|
||||
|
@ -45,9 +45,13 @@ _pthread_attr_setguardsize(pthread_attr_t *attr, size_t guardsize)
|
||||
if (attr == NULL || *attr == NULL)
|
||||
ret = EINVAL;
|
||||
else {
|
||||
/* Round guardsize up to the nearest multiple of PAGE_SIZE. */
|
||||
if (guardsize % PAGE_SIZE != 0)
|
||||
guardsize = ((guardsize / PAGE_SIZE) + 1) * PAGE_SIZE;
|
||||
/*
|
||||
* Round guardsize up to the nearest multiple of
|
||||
* pthread_page_size.
|
||||
*/
|
||||
if (guardsize % pthread_page_size != 0)
|
||||
guardsize = ((guardsize / pthread_page_size) + 1) *
|
||||
pthread_page_size;
|
||||
|
||||
/* Save the stack size. */
|
||||
(*attr)->guardsize_attr = guardsize;
|
||||
|
@ -146,6 +146,8 @@ static void *libgcc_references[] = {
|
||||
&_pthread_mutex_unlock
|
||||
};
|
||||
|
||||
int pthread_guard_default;
|
||||
int pthread_page_size;
|
||||
|
||||
/*
|
||||
* Threaded process initialization
|
||||
@ -158,9 +160,18 @@ _thread_init(void)
|
||||
int i;
|
||||
size_t len;
|
||||
int mib[2];
|
||||
int sched_stack_size; /* Size of scheduler stack. */
|
||||
|
||||
struct clockinfo clockinfo;
|
||||
struct sigaction act;
|
||||
|
||||
pthread_page_size = getpagesize();
|
||||
pthread_guard_default = getpagesize();
|
||||
sched_stack_size = getpagesize();
|
||||
|
||||
pthread_attr_default.guardsize_attr = pthread_guard_default;
|
||||
|
||||
|
||||
/* Check if this function has already been called: */
|
||||
if (_thread_initial)
|
||||
/* Only initialise the threaded application once. */
|
||||
@ -247,7 +258,7 @@ _thread_init(void)
|
||||
PANIC("Cannot allocate memory for initial thread");
|
||||
}
|
||||
/* Allocate memory for the scheduler stack: */
|
||||
else if ((_thread_kern_sched_stack = malloc(SCHED_STACK_SIZE)) == NULL)
|
||||
else if ((_thread_kern_sched_stack = malloc(sched_stack_size)) == NULL)
|
||||
PANIC("Failed to allocate stack for scheduler");
|
||||
else {
|
||||
/* Zero the global kernel thread structure: */
|
||||
@ -280,7 +291,7 @@ _thread_init(void)
|
||||
* thread stack that is just beyond.
|
||||
*/
|
||||
if (mmap(_usrstack - PTHREAD_STACK_INITIAL -
|
||||
PTHREAD_GUARD_DEFAULT, PTHREAD_GUARD_DEFAULT, 0, MAP_ANON,
|
||||
pthread_guard_default, pthread_guard_default, 0, MAP_ANON,
|
||||
-1, 0) == MAP_FAILED)
|
||||
PANIC("Cannot allocate red zone for initial thread");
|
||||
|
||||
@ -294,7 +305,7 @@ _thread_init(void)
|
||||
/* Setup the context for the scheduler: */
|
||||
_setjmp(_thread_kern_sched_jb);
|
||||
SET_STACK_JB(_thread_kern_sched_jb, _thread_kern_sched_stack +
|
||||
SCHED_STACK_SIZE - sizeof(double));
|
||||
sched_stack_size - sizeof(double));
|
||||
SET_RETURN_ADDR_JB(_thread_kern_sched_jb, _thread_kern_scheduler);
|
||||
|
||||
/*
|
||||
|
@ -421,8 +421,11 @@ enum pthread_susp {
|
||||
* Since mmap() makes it possible to specify the maximum growth of a MAP_STACK
|
||||
* region, an unmapped gap between thread stacks achieves the same effect as
|
||||
* explicitly mapped red zones.
|
||||
* This is declared and initialized in uthread_init.c.
|
||||
*/
|
||||
#define PTHREAD_GUARD_DEFAULT PAGE_SIZE
|
||||
extern int pthread_guard_default;
|
||||
|
||||
extern int pthread_page_size;
|
||||
|
||||
/*
|
||||
* Maximum size of initial thread's stack. This perhaps deserves to be larger
|
||||
@ -431,9 +434,6 @@ enum pthread_susp {
|
||||
*/
|
||||
#define PTHREAD_STACK_INITIAL 0x100000
|
||||
|
||||
/* Size of the scheduler stack: */
|
||||
#define SCHED_STACK_SIZE PAGE_SIZE
|
||||
|
||||
/*
|
||||
* Define the different priority ranges. All applications have thread
|
||||
* priorities constrained within 0-31. The threads library raises the
|
||||
@ -971,7 +971,7 @@ SCLASS struct pthread_attr pthread_attr_default
|
||||
#ifdef GLOBAL_PTHREAD_PRIVATE
|
||||
= { SCHED_RR, 0, TIMESLICE_USEC, PTHREAD_DEFAULT_PRIORITY,
|
||||
PTHREAD_CREATE_RUNNING, PTHREAD_CREATE_JOINABLE, NULL, NULL, NULL,
|
||||
PTHREAD_STACK_DEFAULT, PTHREAD_GUARD_DEFAULT };
|
||||
PTHREAD_STACK_DEFAULT, -1 };
|
||||
#else
|
||||
;
|
||||
#endif
|
||||
|
@ -122,13 +122,13 @@ _thread_stack_alloc(size_t stacksize, size_t guardsize)
|
||||
size_t stack_size;
|
||||
|
||||
/*
|
||||
* Round up stack size to nearest multiple of PAGE_SIZE, so that mmap()
|
||||
* Round up stack size to nearest multiple of pthread_page_size, so that mmap()
|
||||
* will work. If the stack size is not an even multiple, we end up
|
||||
* initializing things such that there is unused space above the
|
||||
* beginning of the stack, so the stack sits snugly against its guard.
|
||||
*/
|
||||
if (stacksize % PAGE_SIZE != 0)
|
||||
stack_size = ((stacksize / PAGE_SIZE) + 1) * PAGE_SIZE;
|
||||
if (stacksize % pthread_page_size != 0)
|
||||
stack_size = ((stacksize / pthread_page_size) + 1) * pthread_page_size;
|
||||
else
|
||||
stack_size = stacksize;
|
||||
|
||||
@ -137,7 +137,7 @@ _thread_stack_alloc(size_t stacksize, size_t guardsize)
|
||||
* from the default-size stack cache:
|
||||
*/
|
||||
if (stack_size == PTHREAD_STACK_DEFAULT &&
|
||||
guardsize == PTHREAD_GUARD_DEFAULT) {
|
||||
guardsize == pthread_guard_default) {
|
||||
/*
|
||||
* Use the garbage collector mutex for synchronization of the
|
||||
* spare stack list.
|
||||
@ -187,7 +187,7 @@ _thread_stack_alloc(size_t stacksize, size_t guardsize)
|
||||
|
||||
if (last_stack == NULL)
|
||||
last_stack = _usrstack - PTHREAD_STACK_INITIAL -
|
||||
PTHREAD_GUARD_DEFAULT;
|
||||
pthread_guard_default;
|
||||
|
||||
/* Allocate a new stack. */
|
||||
stack = last_stack - stack_size;
|
||||
@ -217,17 +217,17 @@ _thread_stack_free(void *stack, size_t stacksize, size_t guardsize)
|
||||
struct stack *spare_stack;
|
||||
|
||||
spare_stack = (stack + stacksize - sizeof(struct stack));
|
||||
/* Round stacksize up to nearest multiple of PAGE_SIZE. */
|
||||
if (stacksize % PAGE_SIZE != 0) {
|
||||
spare_stack->stacksize = ((stacksize / PAGE_SIZE) + 1) *
|
||||
PAGE_SIZE;
|
||||
/* Round stacksize up to nearest multiple of pthread_page_size. */
|
||||
if (stacksize % pthread_page_size != 0) {
|
||||
spare_stack->stacksize = ((stacksize / pthread_page_size) + 1) *
|
||||
pthread_page_size;
|
||||
} else
|
||||
spare_stack->stacksize = stacksize;
|
||||
spare_stack->guardsize = guardsize;
|
||||
spare_stack->stackaddr = stack;
|
||||
|
||||
if (spare_stack->stacksize == PTHREAD_STACK_DEFAULT &&
|
||||
spare_stack->guardsize == PTHREAD_GUARD_DEFAULT) {
|
||||
spare_stack->guardsize == pthread_guard_default) {
|
||||
/* Default stack/guard size. */
|
||||
LIST_INSERT_HEAD(&_dstackq, spare_stack, qe);
|
||||
} else {
|
||||
|
@ -45,9 +45,13 @@ _pthread_attr_setguardsize(pthread_attr_t *attr, size_t guardsize)
|
||||
if (attr == NULL || *attr == NULL)
|
||||
ret = EINVAL;
|
||||
else {
|
||||
/* Round guardsize up to the nearest multiple of PAGE_SIZE. */
|
||||
if (guardsize % PAGE_SIZE != 0)
|
||||
guardsize = ((guardsize / PAGE_SIZE) + 1) * PAGE_SIZE;
|
||||
/*
|
||||
* Round guardsize up to the nearest multiple of
|
||||
* pthread_page_size.
|
||||
*/
|
||||
if (guardsize % pthread_page_size != 0)
|
||||
guardsize = ((guardsize / pthread_page_size) + 1) *
|
||||
pthread_page_size;
|
||||
|
||||
/* Save the stack size. */
|
||||
(*attr)->guardsize_attr = guardsize;
|
||||
|
@ -146,6 +146,8 @@ static void *libgcc_references[] = {
|
||||
&_pthread_mutex_unlock
|
||||
};
|
||||
|
||||
int pthread_guard_default;
|
||||
int pthread_page_size;
|
||||
|
||||
/*
|
||||
* Threaded process initialization
|
||||
@ -158,9 +160,18 @@ _thread_init(void)
|
||||
int i;
|
||||
size_t len;
|
||||
int mib[2];
|
||||
int sched_stack_size; /* Size of scheduler stack. */
|
||||
|
||||
struct clockinfo clockinfo;
|
||||
struct sigaction act;
|
||||
|
||||
pthread_page_size = getpagesize();
|
||||
pthread_guard_default = getpagesize();
|
||||
sched_stack_size = getpagesize();
|
||||
|
||||
pthread_attr_default.guardsize_attr = pthread_guard_default;
|
||||
|
||||
|
||||
/* Check if this function has already been called: */
|
||||
if (_thread_initial)
|
||||
/* Only initialise the threaded application once. */
|
||||
@ -247,7 +258,7 @@ _thread_init(void)
|
||||
PANIC("Cannot allocate memory for initial thread");
|
||||
}
|
||||
/* Allocate memory for the scheduler stack: */
|
||||
else if ((_thread_kern_sched_stack = malloc(SCHED_STACK_SIZE)) == NULL)
|
||||
else if ((_thread_kern_sched_stack = malloc(sched_stack_size)) == NULL)
|
||||
PANIC("Failed to allocate stack for scheduler");
|
||||
else {
|
||||
/* Zero the global kernel thread structure: */
|
||||
@ -280,7 +291,7 @@ _thread_init(void)
|
||||
* thread stack that is just beyond.
|
||||
*/
|
||||
if (mmap(_usrstack - PTHREAD_STACK_INITIAL -
|
||||
PTHREAD_GUARD_DEFAULT, PTHREAD_GUARD_DEFAULT, 0, MAP_ANON,
|
||||
pthread_guard_default, pthread_guard_default, 0, MAP_ANON,
|
||||
-1, 0) == MAP_FAILED)
|
||||
PANIC("Cannot allocate red zone for initial thread");
|
||||
|
||||
@ -294,7 +305,7 @@ _thread_init(void)
|
||||
/* Setup the context for the scheduler: */
|
||||
_setjmp(_thread_kern_sched_jb);
|
||||
SET_STACK_JB(_thread_kern_sched_jb, _thread_kern_sched_stack +
|
||||
SCHED_STACK_SIZE - sizeof(double));
|
||||
sched_stack_size - sizeof(double));
|
||||
SET_RETURN_ADDR_JB(_thread_kern_sched_jb, _thread_kern_scheduler);
|
||||
|
||||
/*
|
||||
|
@ -421,8 +421,11 @@ enum pthread_susp {
|
||||
* Since mmap() makes it possible to specify the maximum growth of a MAP_STACK
|
||||
* region, an unmapped gap between thread stacks achieves the same effect as
|
||||
* explicitly mapped red zones.
|
||||
* This is declared and initialized in uthread_init.c.
|
||||
*/
|
||||
#define PTHREAD_GUARD_DEFAULT PAGE_SIZE
|
||||
extern int pthread_guard_default;
|
||||
|
||||
extern int pthread_page_size;
|
||||
|
||||
/*
|
||||
* Maximum size of initial thread's stack. This perhaps deserves to be larger
|
||||
@ -431,9 +434,6 @@ enum pthread_susp {
|
||||
*/
|
||||
#define PTHREAD_STACK_INITIAL 0x100000
|
||||
|
||||
/* Size of the scheduler stack: */
|
||||
#define SCHED_STACK_SIZE PAGE_SIZE
|
||||
|
||||
/*
|
||||
* Define the different priority ranges. All applications have thread
|
||||
* priorities constrained within 0-31. The threads library raises the
|
||||
@ -971,7 +971,7 @@ SCLASS struct pthread_attr pthread_attr_default
|
||||
#ifdef GLOBAL_PTHREAD_PRIVATE
|
||||
= { SCHED_RR, 0, TIMESLICE_USEC, PTHREAD_DEFAULT_PRIORITY,
|
||||
PTHREAD_CREATE_RUNNING, PTHREAD_CREATE_JOINABLE, NULL, NULL, NULL,
|
||||
PTHREAD_STACK_DEFAULT, PTHREAD_GUARD_DEFAULT };
|
||||
PTHREAD_STACK_DEFAULT, -1 };
|
||||
#else
|
||||
;
|
||||
#endif
|
||||
|
@ -122,13 +122,13 @@ _thread_stack_alloc(size_t stacksize, size_t guardsize)
|
||||
size_t stack_size;
|
||||
|
||||
/*
|
||||
* Round up stack size to nearest multiple of PAGE_SIZE, so that mmap()
|
||||
* Round up stack size to nearest multiple of pthread_page_size, so that mmap()
|
||||
* will work. If the stack size is not an even multiple, we end up
|
||||
* initializing things such that there is unused space above the
|
||||
* beginning of the stack, so the stack sits snugly against its guard.
|
||||
*/
|
||||
if (stacksize % PAGE_SIZE != 0)
|
||||
stack_size = ((stacksize / PAGE_SIZE) + 1) * PAGE_SIZE;
|
||||
if (stacksize % pthread_page_size != 0)
|
||||
stack_size = ((stacksize / pthread_page_size) + 1) * pthread_page_size;
|
||||
else
|
||||
stack_size = stacksize;
|
||||
|
||||
@ -137,7 +137,7 @@ _thread_stack_alloc(size_t stacksize, size_t guardsize)
|
||||
* from the default-size stack cache:
|
||||
*/
|
||||
if (stack_size == PTHREAD_STACK_DEFAULT &&
|
||||
guardsize == PTHREAD_GUARD_DEFAULT) {
|
||||
guardsize == pthread_guard_default) {
|
||||
/*
|
||||
* Use the garbage collector mutex for synchronization of the
|
||||
* spare stack list.
|
||||
@ -187,7 +187,7 @@ _thread_stack_alloc(size_t stacksize, size_t guardsize)
|
||||
|
||||
if (last_stack == NULL)
|
||||
last_stack = _usrstack - PTHREAD_STACK_INITIAL -
|
||||
PTHREAD_GUARD_DEFAULT;
|
||||
pthread_guard_default;
|
||||
|
||||
/* Allocate a new stack. */
|
||||
stack = last_stack - stack_size;
|
||||
@ -217,17 +217,17 @@ _thread_stack_free(void *stack, size_t stacksize, size_t guardsize)
|
||||
struct stack *spare_stack;
|
||||
|
||||
spare_stack = (stack + stacksize - sizeof(struct stack));
|
||||
/* Round stacksize up to nearest multiple of PAGE_SIZE. */
|
||||
if (stacksize % PAGE_SIZE != 0) {
|
||||
spare_stack->stacksize = ((stacksize / PAGE_SIZE) + 1) *
|
||||
PAGE_SIZE;
|
||||
/* Round stacksize up to nearest multiple of pthread_page_size. */
|
||||
if (stacksize % pthread_page_size != 0) {
|
||||
spare_stack->stacksize = ((stacksize / pthread_page_size) + 1) *
|
||||
pthread_page_size;
|
||||
} else
|
||||
spare_stack->stacksize = stacksize;
|
||||
spare_stack->guardsize = guardsize;
|
||||
spare_stack->stackaddr = stack;
|
||||
|
||||
if (spare_stack->stacksize == PTHREAD_STACK_DEFAULT &&
|
||||
spare_stack->guardsize == PTHREAD_GUARD_DEFAULT) {
|
||||
spare_stack->guardsize == pthread_guard_default) {
|
||||
/* Default stack/guard size. */
|
||||
LIST_INSERT_HEAD(&_dstackq, spare_stack, qe);
|
||||
} else {
|
||||
|
Loading…
Reference in New Issue
Block a user