Adjust code to support AMD64, on AMD64, thread needs to set fsbase by
itself before it can execute any other code, so new thread should be created with all signals are masked until after fsbase is set.
This commit is contained in:
parent
be1d6f4eb1
commit
1d227ebfe2
@ -47,13 +47,16 @@ struct tcb {
|
||||
struct tcb *tcb_self; /* required by rtld */
|
||||
void *tcb_dtv; /* required by rtld */
|
||||
struct pthread *tcb_thread;
|
||||
int tcb_ldt;
|
||||
};
|
||||
|
||||
void
|
||||
_retire_thread(void *entry)
|
||||
{
|
||||
_rtld_free_tls(entry, sizeof(struct tcb), 16);
|
||||
/* XXX free ldt descriptor here */
|
||||
struct tcb *tcb = (struct tcb *)entry;
|
||||
|
||||
i386_set_ldt(tcb->tcb_ldt, NULL, 1);
|
||||
_rtld_free_tls(tcb, sizeof(struct tcb), 16);
|
||||
}
|
||||
|
||||
void *
|
||||
@ -66,6 +69,10 @@ _set_curthread(ucontext_t *uc, struct pthread *thr, int *err)
|
||||
|
||||
*err = 0;
|
||||
|
||||
if (uc == NULL && thr->arch_id != NULL) {
|
||||
return (thr->arch_id);
|
||||
}
|
||||
|
||||
if (uc == NULL) {
|
||||
__asm __volatile("movl %%gs:0, %0" : "=r" (oldtls));
|
||||
} else {
|
||||
@ -104,7 +111,7 @@ _set_curthread(ucontext_t *uc, struct pthread *thr, int *err)
|
||||
ldt_index = i386_set_ldt(LDT_AUTO_ALLOC, &desc, 1);
|
||||
if (ldt_index == -1)
|
||||
abort();
|
||||
|
||||
tcb->tcb_ldt = ldt_index;
|
||||
/*
|
||||
* Set up our gs with the index into the ldt for this entry.
|
||||
*/
|
||||
|
@ -99,9 +99,10 @@ _pthread_create(pthread_t * thread, const pthread_attr_t * attr,
|
||||
|
||||
/* Initialise the machine context: */
|
||||
getcontext(&new_thread->ctx);
|
||||
new_thread->savedsig = new_thread->ctx.uc_sigmask;
|
||||
new_thread->ctx.uc_stack.ss_sp = new_thread->stack;
|
||||
new_thread->ctx.uc_stack.ss_size = pattr->stacksize_attr;
|
||||
makecontext(&new_thread->ctx, _thread_start, 0);
|
||||
makecontext(&new_thread->ctx, (void (*)(void))_thread_start, 1, new_thread);
|
||||
new_thread->arch_id = _set_curthread(&new_thread->ctx, new_thread, &ret);
|
||||
if (ret != 0) {
|
||||
if (pattr->stackaddr_attr == NULL) {
|
||||
@ -145,7 +146,11 @@ _pthread_create(pthread_t * thread, const pthread_attr_t * attr,
|
||||
*/
|
||||
if (pattr->suspend == PTHREAD_CREATE_SUSPENDED)
|
||||
new_thread->flags |= PTHREAD_FLAGS_SUSPENDED;
|
||||
/* new thread inherits signal mask in kernel */
|
||||
_thread_sigblock();
|
||||
ret = thr_create(&new_thread->ctx, &new_thread->thr_id, flags);
|
||||
/* restore my signal mask */
|
||||
_thread_sigunblock();
|
||||
if (ret != 0) {
|
||||
_thread_printf(STDERR_FILENO, "thr_create() == %d\n", ret);
|
||||
PANIC("thr_create");
|
||||
@ -160,12 +165,24 @@ _pthread_create(pthread_t * thread, const pthread_attr_t * attr,
|
||||
}
|
||||
|
||||
void
|
||||
_thread_start(void)
|
||||
_thread_start(pthread_t td)
|
||||
{
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* for AMD64, we need to set fsbase by thread itself, before
|
||||
* fsbase is set, we can not run any other code, for example
|
||||
* signal code.
|
||||
*/
|
||||
_set_curthread(NULL, td, &ret);
|
||||
|
||||
/* restore signal mask inherited before */
|
||||
__sys_sigprocmask(SIG_SETMASK, &td->savedsig, NULL);
|
||||
|
||||
if ((curthread->flags & PTHREAD_FLAGS_SUSPENDED) != 0)
|
||||
_thread_suspend(curthread, NULL);
|
||||
pthread_exit(curthread->start_routine(curthread->arg));
|
||||
|
||||
pthread_exit(curthread->start_routine(curthread->arg));
|
||||
/* This point should never be reached. */
|
||||
PANIC("Thread has resumed after exit");
|
||||
}
|
||||
|
@ -797,7 +797,7 @@ void _thread_cleanupspecific(void);
|
||||
void _thread_dump_info(void);
|
||||
void _thread_init(void);
|
||||
void _thread_printf(int fd, const char *, ...);
|
||||
void _thread_start(void);
|
||||
void _thread_start(pthread_t td);
|
||||
void _thread_seterrno(pthread_t, int);
|
||||
void _thread_enter_cancellation_point(void);
|
||||
void _thread_leave_cancellation_point(void);
|
||||
|
Loading…
Reference in New Issue
Block a user