Fix an alpha inheritance bug:
On alpha, PAL is involved in context management and after wiring the CPU (in alpha_init()) a context switch was performed to tell PAL about the context. This was bogusly brought over to ia64 where it introduced bugs, because we restored the context from a mostly uninitialized PCB. The cleanup constitutes: o Remove the unused arguments from ia64_init(). o Don't return from ia64_init(), but instead call mi_startup() directly. This reduces the amount of muckery in assembly and also allows for the next bullet: o Save our currect context prior to calling mi_startup(). The reason for this is that many threads are created from thread0 by cloning the PCB. By saving our context in the PCB, we have something sane to clone. It also ensures that a cloned thread that does not alter the context in any way will return to the saved context, where we're ready for the eventuality with a nice, user unfriendly panic(). The cleanup fixes at least the following bugs: o Entering mi_startup() with the RSE in enforced lazy mode. o Re-execution of ia64_init() in certain "lab" conditions. While here, add proper unwind directives to __start() so that the unwind knows it has reached the bottom of the (call) stack. Approved by: re@ (blanket)
This commit is contained in:
parent
8d4f097d8b
commit
0f7b725a60
@ -51,6 +51,9 @@ kstack: .space KSTACK_PAGES * PAGE_SIZE
|
|||||||
* register r8.
|
* register r8.
|
||||||
*/
|
*/
|
||||||
ENTRY(__start, 1)
|
ENTRY(__start, 1)
|
||||||
|
.prologue
|
||||||
|
.save rp,r0
|
||||||
|
.body
|
||||||
{ .mlx
|
{ .mlx
|
||||||
mov ar.rsc=0
|
mov ar.rsc=0
|
||||||
movl r16=ia64_vector_table // set up IVT early
|
movl r16=ia64_vector_table // set up IVT early
|
||||||
@ -90,41 +93,10 @@ ENTRY(__start, 1)
|
|||||||
br.call.sptk.many rp=_reloc
|
br.call.sptk.many rp=_reloc
|
||||||
;;
|
;;
|
||||||
br.call.sptk.many rp=ia64_init
|
br.call.sptk.many rp=ia64_init
|
||||||
|
|
||||||
/*
|
|
||||||
* switch to thread0 and then initialise the rest of the kernel.
|
|
||||||
*/
|
|
||||||
alloc r16=ar.pfs,0,0,1,0
|
|
||||||
;;
|
|
||||||
movl out0=thread0
|
|
||||||
;;
|
;;
|
||||||
add out0=TD_PCB,out0
|
/* NOTREACHED */
|
||||||
;;
|
|
||||||
ld8 out0=[out0]
|
|
||||||
;;
|
|
||||||
add r16=PCB_SPECIAL_RP,out0 // return to mi_startup_trampoline
|
|
||||||
movl r17=mi_startup_trampoline
|
|
||||||
;;
|
|
||||||
st8 [r16]=r17
|
|
||||||
;;
|
|
||||||
br.call.sptk.many rp=restorectx
|
|
||||||
|
|
||||||
/* NOTREACHED */
|
|
||||||
|
|
||||||
END(__start)
|
|
||||||
|
|
||||||
|
|
||||||
ENTRY(mi_startup_trampoline, 0)
|
|
||||||
.prologue
|
|
||||||
.save rp,r0
|
|
||||||
.body
|
|
||||||
|
|
||||||
br.call.sptk.many rp=mi_startup
|
|
||||||
|
|
||||||
// Should never happen
|
|
||||||
1: br.cond.sptk.few 1b
|
1: br.cond.sptk.few 1b
|
||||||
|
END(__start)
|
||||||
END(mi_startup_trampoline)
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* fork_trampoline()
|
* fork_trampoline()
|
||||||
|
@ -51,6 +51,9 @@ kstack: .space KSTACK_PAGES * PAGE_SIZE
|
|||||||
* register r8.
|
* register r8.
|
||||||
*/
|
*/
|
||||||
ENTRY(__start, 1)
|
ENTRY(__start, 1)
|
||||||
|
.prologue
|
||||||
|
.save rp,r0
|
||||||
|
.body
|
||||||
{ .mlx
|
{ .mlx
|
||||||
mov ar.rsc=0
|
mov ar.rsc=0
|
||||||
movl r16=ia64_vector_table // set up IVT early
|
movl r16=ia64_vector_table // set up IVT early
|
||||||
@ -90,41 +93,10 @@ ENTRY(__start, 1)
|
|||||||
br.call.sptk.many rp=_reloc
|
br.call.sptk.many rp=_reloc
|
||||||
;;
|
;;
|
||||||
br.call.sptk.many rp=ia64_init
|
br.call.sptk.many rp=ia64_init
|
||||||
|
|
||||||
/*
|
|
||||||
* switch to thread0 and then initialise the rest of the kernel.
|
|
||||||
*/
|
|
||||||
alloc r16=ar.pfs,0,0,1,0
|
|
||||||
;;
|
|
||||||
movl out0=thread0
|
|
||||||
;;
|
;;
|
||||||
add out0=TD_PCB,out0
|
/* NOTREACHED */
|
||||||
;;
|
|
||||||
ld8 out0=[out0]
|
|
||||||
;;
|
|
||||||
add r16=PCB_SPECIAL_RP,out0 // return to mi_startup_trampoline
|
|
||||||
movl r17=mi_startup_trampoline
|
|
||||||
;;
|
|
||||||
st8 [r16]=r17
|
|
||||||
;;
|
|
||||||
br.call.sptk.many rp=restorectx
|
|
||||||
|
|
||||||
/* NOTREACHED */
|
|
||||||
|
|
||||||
END(__start)
|
|
||||||
|
|
||||||
|
|
||||||
ENTRY(mi_startup_trampoline, 0)
|
|
||||||
.prologue
|
|
||||||
.save rp,r0
|
|
||||||
.body
|
|
||||||
|
|
||||||
br.call.sptk.many rp=mi_startup
|
|
||||||
|
|
||||||
// Should never happen
|
|
||||||
1: br.cond.sptk.few 1b
|
1: br.cond.sptk.few 1b
|
||||||
|
END(__start)
|
||||||
END(mi_startup_trampoline)
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* fork_trampoline()
|
* fork_trampoline()
|
||||||
|
@ -149,6 +149,8 @@ vm_offset_t phys_avail[100];
|
|||||||
/* must be 2 less so 0 0 can signal end of chunks */
|
/* must be 2 less so 0 0 can signal end of chunks */
|
||||||
#define PHYS_AVAIL_ARRAY_END ((sizeof(phys_avail) / sizeof(vm_offset_t)) - 2)
|
#define PHYS_AVAIL_ARRAY_END ((sizeof(phys_avail) / sizeof(vm_offset_t)) - 2)
|
||||||
|
|
||||||
|
void mi_startup(void); /* XXX should be in a MI header */
|
||||||
|
|
||||||
static void identifycpu(void);
|
static void identifycpu(void);
|
||||||
|
|
||||||
struct kva_md_info kmi;
|
struct kva_md_info kmi;
|
||||||
@ -441,7 +443,7 @@ calculate_frequencies(void)
|
|||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
ia64_init(u_int64_t arg1, u_int64_t arg2)
|
ia64_init(void)
|
||||||
{
|
{
|
||||||
int phys_avail_cnt;
|
int phys_avail_cnt;
|
||||||
vm_offset_t kernstart, kernend;
|
vm_offset_t kernstart, kernend;
|
||||||
@ -762,6 +764,20 @@ ia64_init(u_int64_t arg1, u_int64_t arg2)
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
ia64_set_tpr(0);
|
ia64_set_tpr(0);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Save our current context so that we have a known (maybe even
|
||||||
|
* sane) context as the initial context for new threads that are
|
||||||
|
* forked from us. If any of those threads (including thread0)
|
||||||
|
* does something wrong, we may be lucky and return here where
|
||||||
|
* we're ready for them with a nice panic.
|
||||||
|
*/
|
||||||
|
if (!savectx(thread0.td_pcb))
|
||||||
|
mi_startup();
|
||||||
|
|
||||||
|
/* We should not get here. */
|
||||||
|
panic("ia64_init: Whooaa there!");
|
||||||
|
/* NOTREACHED */
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
|
@ -114,7 +114,7 @@ long fswintrberr(void); /* MAGIC */
|
|||||||
int ia64_highfp_drop(struct thread *);
|
int ia64_highfp_drop(struct thread *);
|
||||||
int ia64_highfp_load(struct thread *);
|
int ia64_highfp_load(struct thread *);
|
||||||
int ia64_highfp_save(struct thread *);
|
int ia64_highfp_save(struct thread *);
|
||||||
void ia64_init(u_int64_t, u_int64_t);
|
void ia64_init(void);
|
||||||
int ia64_pa_access(u_long);
|
int ia64_pa_access(u_long);
|
||||||
void init_prom_interface(struct rpb*);
|
void init_prom_interface(struct rpb*);
|
||||||
void interrupt(u_int64_t, struct trapframe *);
|
void interrupt(u_int64_t, struct trapframe *);
|
||||||
|
Loading…
Reference in New Issue
Block a user