Allocate a stack (with optional guard pages) for thread0 and

switch to it before calling mi_startup().
This commit is contained in:
Marcel Moolenaar 2008-04-16 23:28:12 +00:00
parent 640bd7d7d7
commit 014ffa990d
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=178261
3 changed files with 60 additions and 70 deletions

View File

@ -182,6 +182,9 @@ __start:
mr 7,21
bl powerpc_init
mr %r1, %r3
li %r3, 0
stw %r3, 0(%r1)
bl mi_startup
b OF_exit

View File

@ -132,9 +132,6 @@ int cold = 1;
static struct pcpu pcpu0;
static struct trapframe frame0;
vm_offset_t kstack0;
vm_offset_t kstack0_phys;
char machine[] = "powerpc";
SYSCTL_STRING(_hw, HW_MACHINE, machine, CTLFLAG_RD, machine, 0, "");
@ -145,7 +142,7 @@ SYSCTL_INT(_machdep, CPU_CACHELINE, cacheline_size,
static void cpu_startup(void *);
SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL);
void powerpc_init(u_int, u_int, u_int, void *);
u_int powerpc_init(u_int, u_int, u_int, void *);
int save_ofw_mapping(void);
int restore_ofw_mapping(void);
@ -248,11 +245,11 @@ extern void *extint, *extsize;
extern void *dblow, *dbsize;
extern void *vectrap, *vectrapsize;
void
u_int
powerpc_init(u_int startkernel, u_int endkernel, u_int basekernel, void *mdp)
{
struct pcpu *pc;
vm_offset_t end, off;
vm_offset_t end;
void *kmdp;
char *env;
@ -295,7 +292,6 @@ powerpc_init(u_int startkernel, u_int endkernel, u_int basekernel, void *mdp)
pc = &pcpu0;
pcpu_init(pc, 0, sizeof(struct pcpu));
pc->pc_curthread = &thread0;
pc->pc_curpcb = thread0.td_pcb;
pc->pc_cpuid = 0;
__asm __volatile("mtsprg 0, %0" :: "r"(pc));
@ -379,15 +375,12 @@ powerpc_init(u_int startkernel, u_int endkernel, u_int basekernel, void *mdp)
/*
* Finish setting up thread0.
*/
thread0.td_kstack = kstack0;
thread0.td_pcb = (struct pcb *)
(thread0.td_kstack + KSTACK_PAGES * PAGE_SIZE) - 1;
((thread0.td_kstack + thread0.td_kstack_pages * PAGE_SIZE -
sizeof(struct pcb)) & ~15);
pc->pc_curpcb = thread0.td_pcb;
/*
* Map and initialise the message buffer.
*/
for (off = 0; off < round_page(MSGBUF_SIZE); off += PAGE_SIZE)
pmap_kenter((vm_offset_t)msgbufp + off, msgbuf_phys + off);
/* Initialise the message buffer. */
msgbufinit(msgbufp, MSGBUF_SIZE);
#ifdef KDB
@ -395,6 +388,8 @@ powerpc_init(u_int startkernel, u_int endkernel, u_int basekernel, void *mdp)
kdb_enter(KDB_WHY_BOOTFLAGS,
"Boot flags requested debugger");
#endif
return (((uintptr_t)thread0.td_pcb - 16) & ~15);
}
void

View File

@ -266,8 +266,8 @@ SYSCTL_INT(_machdep, OID_AUTO, moea_pte_spills, CTLFLAG_RD,
struct pvo_entry *moea_pvo_zeropage;
struct mtx moea_pvo_zeropage_mtx;
vm_offset_t moea_rkva_start = VM_MIN_KERNEL_ADDRESS;
u_int moea_rkva_count = 4;
vm_offset_t moea_rkva_start;
u_int moea_rkva_count;
/*
* Allocate physical memory for use in moea_bootstrap.
@ -664,27 +664,11 @@ moea_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend)
"mtdbatu 0,%0; mtdbatl 0,%1; isync"
:: "r"(batu), "r"(batl));
#if 0
/* map frame buffer */
batu = BATU(0x90000000, BAT_BL_256M, BAT_Vs);
batl = BATL(0x90000000, BAT_I|BAT_G, BAT_PP_RW);
__asm ("mtdbatu 1,%0; mtdbatl 1,%1; isync"
:: "r"(batu), "r"(batl));
#endif
#if 1
/* map pci space */
batu = BATU(0x80000000, BAT_BL_256M, BAT_Vs);
batl = BATL(0x80000000, BAT_I|BAT_G, BAT_PP_RW);
__asm ("mtdbatu 1,%0; mtdbatl 1,%1; isync"
:: "r"(batu), "r"(batl));
#endif
/*
* Set the start and end of kva.
*/
virtual_avail = VM_MIN_KERNEL_ADDRESS;
virtual_end = VM_MAX_KERNEL_ADDRESS;
mem_regions(&pregions, &pregions_sz, &regions, &regions_sz);
CTR0(KTR_PMAP, "moea_bootstrap: physical memory");
@ -784,11 +768,6 @@ moea_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend)
mtx_init(&moea_table_mutex, "pmap table", NULL, MTX_DEF |
MTX_RECURSE);
/*
* Allocate the message buffer.
*/
msgbuf_phys = moea_bootstrap_alloc(MSGBUF_SIZE, 0);
/*
* Initialise the unmanaged pvo pool.
*/
@ -856,9 +835,13 @@ moea_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend)
ofw_mappings++;
}
}
#ifdef SMP
TLBSYNC();
#endif
/*
* Calculate the last available physical address.
*/
for (i = 0; phys_avail[i + 2] != 0; i += 2)
;
Maxmem = powerpc_btop(phys_avail[i + 1]);
/*
* Initialize the kernel pmap (which is statically allocated).
@ -871,36 +854,6 @@ moea_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend)
kernel_pmap->pm_sr[KERNEL2_SR] = KERNEL2_SEGMENT;
kernel_pmap->pm_active = ~0;
/*
* Allocate a kernel stack with a guard page for thread0 and map it
* into the kernel page map.
*/
pa = moea_bootstrap_alloc(KSTACK_PAGES * PAGE_SIZE, 0);
kstack0_phys = pa;
kstack0 = virtual_avail + (KSTACK_GUARD_PAGES * PAGE_SIZE);
CTR2(KTR_PMAP, "moea_bootstrap: kstack0 at %#x (%#x)", kstack0_phys,
kstack0);
virtual_avail += (KSTACK_PAGES + KSTACK_GUARD_PAGES) * PAGE_SIZE;
for (i = 0; i < KSTACK_PAGES; i++) {
pa = kstack0_phys + i * PAGE_SIZE;
va = kstack0 + i * PAGE_SIZE;
moea_kenter(mmup, va, pa);
TLBIE(va);
}
/*
* Calculate the last available physical address.
*/
for (i = 0; phys_avail[i + 2] != 0; i += 2)
;
Maxmem = powerpc_btop(phys_avail[i + 1]);
/*
* Allocate virtual address space for the message buffer.
*/
msgbufp = (struct msgbuf *)virtual_avail;
virtual_avail += round_page(MSGBUF_SIZE);
/*
* Initialize hardware.
*/
@ -916,6 +869,45 @@ moea_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend)
tlbia();
pmap_bootstrapped++;
/*
* Set the start and end of kva.
*/
virtual_avail = VM_MIN_KERNEL_ADDRESS;
virtual_end = VM_MAX_KERNEL_ADDRESS;
moea_rkva_start = virtual_avail;
moea_rkva_count = 4;
virtual_avail += moea_rkva_count * PAGE_SIZE;
/*
* Allocate a kernel stack with a guard page for thread0 and map it
* into the kernel page map.
*/
pa = moea_bootstrap_alloc(KSTACK_PAGES * PAGE_SIZE, PAGE_SIZE);
va = virtual_avail + KSTACK_GUARD_PAGES * PAGE_SIZE;
virtual_avail = va + KSTACK_PAGES * PAGE_SIZE;
CTR2(KTR_PMAP, "moea_bootstrap: kstack0 at %#x (%#x)", pa, va);
thread0.td_kstack = va;
thread0.td_kstack_pages = KSTACK_PAGES;
for (i = 0; i < KSTACK_PAGES; i++) {
moea_kenter(mmup, va, pa);;
pa += PAGE_SIZE;
va += PAGE_SIZE;
}
/*
* Allocate virtual address space for the message buffer.
*/
pa = msgbuf_phys = moea_bootstrap_alloc(MSGBUF_SIZE, PAGE_SIZE);
msgbufp = (struct msgbuf *)virtual_avail;
va = virtual_avail;
virtual_avail += round_page(MSGBUF_SIZE);
while (va < virtual_avail) {
moea_kenter(mmup, va, pa);;
pa += PAGE_SIZE;
va += PAGE_SIZE;
}
}
/*