Allocate a stack for thread0 and switch to it before calling

mi_startup(). This frees up kstack for static PAL/SAL calls
and double-fault handling.
This commit is contained in:
Marcel Moolenaar 2008-02-04 02:21:33 +00:00
parent 5fd410a787
commit 8bd9e9f2df
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=175959
3 changed files with 34 additions and 21 deletions

View File

@ -109,6 +109,28 @@ ENTRY_NOPROFILE(__start, 1)
nop 0
br.call.sptk.many rp=ia64_init
;;
}
// We have the new bspstore in r8 and the new sp in r9.
// Switch onto the new stack and call mi_startup().
{
mov ar.rsc = 0
;;
mov ar.bspstore = r8
mov sp = r9
;;
}
{
loadrs
;;
mov ar.rsc = 3
nop 0
;;
}
{
nop 0
nop 0
br.call.sptk.many rp=mi_startup
;;
}
/* NOTREACHED */
1: br.cond.sptk.few 1b

View File

@ -109,8 +109,6 @@ u_int64_t pa_bootinfo;
struct bootinfo bootinfo;
struct pcpu pcpu0;
extern char kstack[];
vm_offset_t proc0kstack;
extern u_int64_t kernel_text[], _end[];
@ -153,8 +151,6 @@ vm_paddr_t phys_avail[PHYSMAP_SIZE + 2];
/* must be 2 less so 0 0 can signal end of chunks */
#define PHYS_AVAIL_ARRAY_END ((sizeof(phys_avail) / sizeof(vm_offset_t)) - 2)
void mi_startup(void); /* XXX should be in a MI header */
struct kva_md_info kmi;
#define Mhz 1000000L
@ -550,9 +546,10 @@ calculate_frequencies(void)
}
}
void
struct ia64_init_return
ia64_init(void)
{
struct ia64_init_return ret;
int phys_avail_cnt;
vm_offset_t kernstart, kernend;
vm_offset_t kernstartpfn, kernendpfn, pfn0, pfn1;
@ -793,8 +790,7 @@ ia64_init(void)
/*
* Init mapping for kernel stack for proc 0
*/
proc0kstack = (vm_offset_t)kstack;
thread0.td_kstack = proc0kstack;
thread0.td_kstack = pmap_steal_memory(KSTACK_PAGES * PAGE_SIZE);
thread0.td_kstack_pages = KSTACK_PAGES;
mutex_init();
@ -831,19 +827,9 @@ ia64_init(void)
ia64_set_tpr(0);
ia64_srlz_d();
/*
* Save our current context so that we have a known (maybe even
* sane) context as the initial context for new threads that are
* forked from us. If any of those threads (including thread0)
* does something wrong, we may be lucky and return here where
* we're ready for them with a nice panic.
*/
if (!savectx(thread0.td_pcb))
mi_startup();
/* We should not get here. */
panic("ia64_init: Whooaa there!");
/* NOTREACHED */
ret.bspstore = thread0.td_pcb->pcb_special.bspstore;
ret.sp = thread0.td_pcb->pcb_special.sp;
return (ret);
}
__volatile void *

View File

@ -71,6 +71,11 @@ struct reg;
struct thread;
struct trapframe;
struct ia64_init_return {
uint64_t bspstore;
uint64_t sp;
};
void busdma_swi(void);
int copyout_regstack(struct thread *, uint64_t *, uint64_t *);
void cpu_mp_add(u_int, u_int, u_int);
@ -82,7 +87,7 @@ int ia64_flush_dirty(struct thread *, struct _special *);
uint64_t ia64_get_hcdp(void);
int ia64_highfp_drop(struct thread *);
int ia64_highfp_save(struct thread *);
void ia64_init(void);
struct ia64_init_return ia64_init(void);
void ia64_probe_sapics(void);
void interrupt(struct trapframe *);
void map_gateway_page(void);