Improve kernel stack handling on e500.

- Allocate thread0.td_kstack in pmap_bootstrap(), provide guard page
- Switch to thread0.td_kstack as soon as possible i.e. right after return
  from e500_init() and before mi_startup() happens
- Clean up temp stack area
- Other minor cosmetics in machdep.c

Obtained from:	Semihalf
This commit is contained in:
raj 2008-08-26 17:07:37 +00:00
parent e8ca7cbe6f
commit f016db7e16
3 changed files with 56 additions and 24 deletions

View File

@ -39,6 +39,8 @@
#include <machine/tlb.h>
#include <machine/bootinfo.h>
#define TMPSTACKSZ 16384
/*
* This symbol is here for the benefit of kvm_mkdb, and is supposed to
* mark the start of kernel text.
@ -258,9 +260,9 @@ __start:
/*
* Setup a temporary stack
*/
lis %r1, kstack0_space@ha
addi %r1, %r1, kstack0_space@l
addi %r1, %r1, (16384 - 512)
lis %r1, tmpstack@ha
addi %r1, %r1, tmpstack@l
addi %r1, %r1, (TMPSTACKSZ - 8)
/*
* Intialise exception vector offsets
@ -277,7 +279,13 @@ __start:
lis %r4, _end@ha
addi %r4, %r4, _end@l
bl e500_init /* Prepare e500 core */
bl e500_init
/* Switch to thread0.td_kstack */
mr %r1, %r3
li %r3, 0
stw %r3, 0(%r1)
bl mi_startup /* Machine independet part, does not return */
/************************************************************************/
@ -469,8 +477,8 @@ setfault:
/************************************************************************/
.data
.align 4
GLOBAL(kstack0_space)
.space 16384
tmpstack:
.space TMPSTACKSZ
/*
* Compiled KERNBASE locations
@ -481,7 +489,6 @@ GLOBAL(kstack0_space)
/*
* Globals
*/
#define INTSTK 16384 /* 16K interrupt stack */
#define INTRCNT_COUNT 256 /* max(HROWPIC_IRQMAX,OPENPIC_IRQMAX) */
GLOBAL(kernload)

View File

@ -154,10 +154,6 @@ extern unsigned char _end[];
extern struct mem_region availmem_regions[];
extern int availmem_regions_sz;
extern void *trapcode, *trapsize;
extern unsigned char kstack0_space[];
extern void dcache_enable(void);
extern void dcache_inval(void);
extern void icache_enable(void);
@ -185,11 +181,12 @@ SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_e500_startup, NULL);
void print_kernel_section_addr(void);
void dump_bootinfo(void);
void dump_kenv(void);
void e500_init(u_int32_t, u_int32_t, void *);
u_int e500_init(u_int32_t, u_int32_t, void *);
static void
cpu_e500_startup(void *dummy)
{
int indx, size;
/* Initialise the decrementer-based clock. */
decr_init();
@ -198,20 +195,17 @@ cpu_e500_startup(void *dummy)
cpu_setup(PCPU_GET(cpuid));
printf("real memory = %ld (%ld MB)\n", ptoa(physmem),
ptoa(physmem) / 1048576);
ptoa(physmem) / 1048576);
realmem = physmem;
/* Display any holes after the first chunk of extended memory. */
if (bootverbose) {
int indx;
printf("Physical memory chunk(s):\n");
for (indx = 0; phys_avail[indx + 1] != 0; indx += 2) {
int size1 = phys_avail[indx + 1] - phys_avail[indx];
size = phys_avail[indx + 1] - phys_avail[indx];
printf("0x%08x - 0x%08x, %d bytes (%d pages)\n",
phys_avail[indx], phys_avail[indx + 1] - 1, size1,
size1 / PAGE_SIZE);
phys_avail[indx], phys_avail[indx + 1] - 1, size,
size / PAGE_SIZE);
}
}
@ -328,7 +322,7 @@ bootinfo_eth(void)
return (eth);
}
void
u_int
e500_init(u_int32_t startkernel, u_int32_t endkernel, void *mdp)
{
struct pcpu *pc;
@ -445,9 +439,9 @@ e500_init(u_int32_t startkernel, u_int32_t endkernel, void *mdp)
init_param2(physmem);
/* Finish setting up thread0. */
thread0.td_kstack = (vm_offset_t)kstack0_space;
thread0.td_pcb = (struct pcb *)
(thread0.td_kstack + KSTACK_PAGES * PAGE_SIZE) - 1;
((thread0.td_kstack + thread0.td_kstack_pages * PAGE_SIZE -
sizeof(struct pcb)) & ~15);
bzero((void *)thread0.td_pcb, sizeof(struct pcb));
pc->pc_curpcb = thread0.td_pcb;
@ -480,7 +474,10 @@ e500_init(u_int32_t startkernel, u_int32_t endkernel, void *mdp)
printf("L1 I-cache %sabled\n",
(csr & L1CSR1_ICE) ? "en" : "dis");
debugf("e500_init: SP = 0x%08x\n", ((uintptr_t)thread0.td_pcb - 16) & ~15);
debugf("e500_init: e\n");
return (((uintptr_t)thread0.td_pcb - 16) & ~15);
}
/* Initialise a struct pcpu. */

View File

@ -897,8 +897,9 @@ mmu_booke_bootstrap(mmu_t mmu, vm_offset_t kernelstart, vm_offset_t kernelend)
int cnt, i, j;
u_int s, e, sz;
u_int phys_avail_count;
vm_size_t physsz, hwphyssz;
vm_offset_t kernel_pdir;
vm_size_t physsz, hwphyssz, kstack0_sz;
vm_offset_t kernel_pdir, kstack0;
vm_paddr_t kstack0_phys;
debugf("mmu_booke_bootstrap: entered\n");
@ -1055,6 +1056,16 @@ mmu_booke_bootstrap(mmu_t mmu, vm_offset_t kernelstart, vm_offset_t kernelend)
}
availmem_regions_sz = cnt;
/*******************************************************/
/* Steal physical memory for kernel stack from the end */
/* of the first avail region */
/*******************************************************/
kstack0_sz = KSTACK_PAGES * PAGE_SIZE;
kstack0_phys = availmem_regions[0].mr_start +
availmem_regions[0].mr_size;
kstack0_phys -= kstack0_sz;
availmem_regions[0].mr_size -= kstack0_sz;
/*******************************************************/
/* Fill in phys_avail table, based on availmem_regions */
/*******************************************************/
@ -1125,6 +1136,23 @@ mmu_booke_bootstrap(mmu_t mmu, vm_offset_t kernelstart, vm_offset_t kernelend)
/*******************************************************/
/* Final setup */
/*******************************************************/
/* Enter kstack0 into kernel map, provide guard page */
kstack0 = virtual_avail + KSTACK_GUARD_PAGES * PAGE_SIZE;
thread0.td_kstack = kstack0;
thread0.td_kstack_pages = KSTACK_PAGES;
debugf("kstack_sz = 0x%08x\n", kstack0_sz);
debugf("kstack0_phys at 0x%08x - 0x%08x\n",
kstack0_phys, kstack0_phys + kstack0_sz);
debugf("kstack0 at 0x%08x - 0x%08x\n", kstack0, kstack0 + kstack0_sz);
virtual_avail += KSTACK_GUARD_PAGES * PAGE_SIZE + kstack0_sz;
for (i = 0; i < KSTACK_PAGES; i++) {
mmu_booke_kenter(mmu, kstack0, kstack0_phys);
kstack0 += PAGE_SIZE;
kstack0_phys += PAGE_SIZE;
}
/* Initialize TLB0 handling. */
tlb0_init();