Add support for Juniper's loader. The difference between FreeBSD's and
Juniper's loader is that Juniper's loader maps all of the kernel and preloaded modules at the right virtual address before jumping into the kernel. FreeBSD's loader simply maps 16MB using the physical address and expects the kernel to jump through hoops to relocate itself to it's virtual address. The problem with the FreeBSD loader's approach is that it typically maps too much or too little. There's no harm if it's too much (other than wasting space), but if it's too little then the kernel will simply not boot, because the first thing the kernel needs is the bootinfo structure, which is never mapped in that case. The page fault that early is fatal. The changes constitute: 1. Do not remap the kernel in locore.S. We're mapped where we need to be so we can pretty much call into C code after setting up the stack. 2. With kernload and kernload_ap not set in locore.S, we need to set them in pmap.c: kernload gets defined when we preserve the TLB1. Here we also determine the size of the kernel mapped. kernload_ap is set first thing in the pmap_bootstrap() method. 3. Fix tlb1_map_region() and its use to properly externd the mapped kernel size to include low-level data structures. Approved by: re (blanket) Obtained from: Juniper Networks, Inc
This commit is contained in:
parent
e2133e0a40
commit
2b5bf115ae
@ -83,8 +83,7 @@ __start:
|
||||
* locore registers use:
|
||||
* r1 : stack pointer
|
||||
* r2 : trace pointer (AP only, for early diagnostics)
|
||||
* r3-r26 : scratch registers
|
||||
* r27 : kernload
|
||||
* r3-r27 : scratch registers
|
||||
* r28 : temp TLB1 entry
|
||||
* r29 : initial TLB1 entry we started in
|
||||
* r30-r31 : arguments (metadata pointer)
|
||||
@ -116,6 +115,9 @@ __start:
|
||||
li %r3, 0
|
||||
bl tlb_inval_all
|
||||
|
||||
cmpwi %r30, 0
|
||||
beq done_mapping
|
||||
|
||||
/*
|
||||
* Locate the TLB1 entry that maps this code
|
||||
*/
|
||||
@ -171,7 +173,6 @@ __start:
|
||||
bl 3f
|
||||
3: mflr %r4 /* Use current address */
|
||||
rlwinm %r4, %r4, 0, 0, 7 /* 16MB alignment mask */
|
||||
mr %r27, %r4 /* Keep kernel load address */
|
||||
ori %r4, %r4, (MAS3_SX | MAS3_SW | MAS3_SR)@l
|
||||
mtspr SPR_MAS3, %r4 /* Set RPN and protection */
|
||||
isync
|
||||
@ -197,23 +198,7 @@ __start:
|
||||
mr %r3, %r28
|
||||
bl tlb1_inval_entry
|
||||
|
||||
/*
|
||||
* Save kernel load address for later use.
|
||||
*/
|
||||
lis %r3, kernload@ha
|
||||
addi %r3, %r3, kernload@l
|
||||
stw %r27, 0(%r3)
|
||||
#ifdef SMP
|
||||
/*
|
||||
* APs need a separate copy of kernload info within the __boot_page
|
||||
* area so they can access this value very early, before their TLBs
|
||||
* are fully set up and the kernload global location is available.
|
||||
*/
|
||||
lis %r3, kernload_ap@ha
|
||||
addi %r3, %r3, kernload_ap@l
|
||||
stw %r27, 0(%r3)
|
||||
msync
|
||||
#endif
|
||||
done_mapping:
|
||||
|
||||
/*
|
||||
* Setup a temporary stack
|
||||
@ -257,7 +242,7 @@ __start:
|
||||
__boot_page:
|
||||
bl 1f
|
||||
|
||||
kernload_ap:
|
||||
GLOBAL(kernload_ap):
|
||||
.long 0
|
||||
|
||||
/*
|
||||
@ -785,8 +770,6 @@ tmpstack:
|
||||
*/
|
||||
#define INTRCNT_COUNT 256 /* max(HROWPIC_IRQMAX,OPENPIC_IRQMAX) */
|
||||
|
||||
GLOBAL(kernload)
|
||||
.long 0
|
||||
GLOBAL(intrnames)
|
||||
.space INTRCNT_COUNT * (MAXCOMLEN + 1) * 2
|
||||
GLOBAL(sintrnames)
|
||||
|
@ -56,7 +56,7 @@ __FBSDID("$FreeBSD$");
|
||||
#ifdef SMP
|
||||
extern void *ap_pcpu;
|
||||
extern uint8_t __boot_page[]; /* Boot page body */
|
||||
extern uint32_t kernload; /* Kernel physical load address */
|
||||
extern uint32_t kernload_ap; /* Kernel physical load address */
|
||||
#endif
|
||||
|
||||
extern uint32_t *bootinfo;
|
||||
@ -178,9 +178,13 @@ bare_timebase_freq(platform_t plat, struct cpuref *cpuref)
|
||||
phandle_t cpus, child;
|
||||
pcell_t freq;
|
||||
|
||||
if (bootinfo != NULL) {
|
||||
/* Backward compatibility. See 8-STABLE. */
|
||||
ticks = bootinfo[3] >> 3;
|
||||
if (bootinfo != NULL)
|
||||
if (bootinfo[0] == 1) {
|
||||
/* Backward compatibility. See 8-STABLE. */
|
||||
ticks = bootinfo[3] >> 3;
|
||||
} else {
|
||||
/* Compatbility with Juniper's loader. */
|
||||
ticks = bootinfo[5] >> 3;
|
||||
} else
|
||||
ticks = 0;
|
||||
|
||||
@ -268,7 +272,7 @@ bare_smp_start_cpu(platform_t plat, struct pcpu *pc)
|
||||
/*
|
||||
* Set BPTR to the physical address of the boot page
|
||||
*/
|
||||
bptr = ((uint32_t)__boot_page - KERNBASE) + kernload;
|
||||
bptr = ((uint32_t)__boot_page - KERNBASE) + kernload_ap;
|
||||
ccsr_write4(OCP85XX_BPTR, (bptr >> 12) | 0x80000000);
|
||||
|
||||
/*
|
||||
|
@ -60,6 +60,7 @@ __FBSDID("$FreeBSD$");
|
||||
#include <sys/queue.h>
|
||||
#include <sys/systm.h>
|
||||
#include <sys/kernel.h>
|
||||
#include <sys/linker.h>
|
||||
#include <sys/msgbuf.h>
|
||||
#include <sys/lock.h>
|
||||
#include <sys/mutex.h>
|
||||
@ -111,8 +112,13 @@ extern int dumpsys_minidump;
|
||||
extern unsigned char _etext[];
|
||||
extern unsigned char _end[];
|
||||
|
||||
/* Kernel physical load address. */
|
||||
extern uint32_t kernload;
|
||||
extern uint32_t *bootinfo;
|
||||
|
||||
#ifdef SMP
|
||||
extern uint32_t kernload_ap;
|
||||
#endif
|
||||
|
||||
vm_paddr_t kernload;
|
||||
vm_offset_t kernstart;
|
||||
vm_size_t kernsize;
|
||||
|
||||
@ -196,7 +202,7 @@ static void tlb_print_entry(int, uint32_t, uint32_t, uint32_t, uint32_t);
|
||||
static int tlb1_set_entry(vm_offset_t, vm_offset_t, vm_size_t, uint32_t);
|
||||
static void tlb1_write_entry(unsigned int);
|
||||
static int tlb1_iomapped(int, vm_paddr_t, vm_size_t, vm_offset_t *);
|
||||
static vm_size_t tlb1_mapin_region(vm_offset_t, vm_offset_t, vm_size_t);
|
||||
static vm_size_t tlb1_mapin_region(vm_offset_t, vm_paddr_t, vm_size_t);
|
||||
|
||||
static vm_size_t tsize2size(unsigned int);
|
||||
static unsigned int size2tsize(vm_size_t);
|
||||
@ -962,19 +968,37 @@ mmu_booke_bootstrap(mmu_t mmu, vm_offset_t start, vm_offset_t kernelend)
|
||||
|
||||
debugf("mmu_booke_bootstrap: entered\n");
|
||||
|
||||
#ifdef SMP
|
||||
kernload_ap = kernload;
|
||||
#endif
|
||||
|
||||
|
||||
/* Initialize invalidation mutex */
|
||||
mtx_init(&tlbivax_mutex, "tlbivax", NULL, MTX_SPIN);
|
||||
|
||||
/* Read TLB0 size and associativity. */
|
||||
tlb0_get_tlbconf();
|
||||
|
||||
/* Align kernel start and end address (kernel image). */
|
||||
/*
|
||||
* Align kernel start and end address (kernel image).
|
||||
* Note that kernel end does not necessarily relate to kernsize.
|
||||
* kernsize is the size of the kernel that is actually mapped.
|
||||
*/
|
||||
kernstart = trunc_page(start);
|
||||
data_start = round_page(kernelend);
|
||||
kernsize = data_start - kernstart;
|
||||
|
||||
data_end = data_start;
|
||||
|
||||
/*
|
||||
* Addresses of preloaded modules (like file systems) use
|
||||
* physical addresses. Make sure we relocate those into
|
||||
* virtual addresses.
|
||||
*/
|
||||
preload_addr_relocate = kernstart - kernload;
|
||||
|
||||
/* Allocate the dynamic per-cpu area. */
|
||||
dpcpu = (void *)data_end;
|
||||
data_end += DPCPU_SIZE;
|
||||
|
||||
/* Allocate space for the message buffer. */
|
||||
msgbufp = (struct msgbuf *)data_end;
|
||||
data_end += msgbufsize;
|
||||
@ -983,11 +1007,6 @@ mmu_booke_bootstrap(mmu_t mmu, vm_offset_t start, vm_offset_t kernelend)
|
||||
|
||||
data_end = round_page(data_end);
|
||||
|
||||
/* Allocate the dynamic per-cpu area. */
|
||||
dpcpu = (void *)data_end;
|
||||
data_end += DPCPU_SIZE;
|
||||
dpcpu_init(dpcpu, 0);
|
||||
|
||||
/* Allocate space for ptbl_bufs. */
|
||||
ptbl_bufs = (struct ptbl_buf *)data_end;
|
||||
data_end += sizeof(struct ptbl_buf) * PTBL_BUFS;
|
||||
@ -1005,22 +1024,19 @@ mmu_booke_bootstrap(mmu_t mmu, vm_offset_t start, vm_offset_t kernelend)
|
||||
debugf(" kernel pdir at 0x%08x end = 0x%08x\n", kernel_pdir, data_end);
|
||||
|
||||
debugf(" data_end: 0x%08x\n", data_end);
|
||||
if (data_end - kernstart > 0x1000000) {
|
||||
data_end = (data_end + 0x3fffff) & ~0x3fffff;
|
||||
tlb1_mapin_region(kernstart + 0x1000000,
|
||||
kernload + 0x1000000, data_end - kernstart - 0x1000000);
|
||||
} else
|
||||
data_end = (data_end + 0xffffff) & ~0xffffff;
|
||||
|
||||
if (data_end - kernstart > kernsize) {
|
||||
kernsize += tlb1_mapin_region(kernstart + kernsize,
|
||||
kernload + kernsize, (data_end - kernstart) - kernsize);
|
||||
}
|
||||
data_end = kernstart + kernsize;
|
||||
debugf(" updated data_end: 0x%08x\n", data_end);
|
||||
|
||||
kernsize += data_end - data_start;
|
||||
|
||||
/*
|
||||
* Clear the structures - note we can only do it safely after the
|
||||
* possible additional TLB1 translations are in place (above) so that
|
||||
* all range up to the currently calculated 'data_end' is covered.
|
||||
*/
|
||||
dpcpu_init(dpcpu, 0);
|
||||
memset((void *)ptbl_bufs, 0, sizeof(struct ptbl_buf) * PTBL_SIZE);
|
||||
memset((void *)kernel_pdir, 0, kernel_ptbls * PTBL_PAGES * PAGE_SIZE);
|
||||
|
||||
@ -2926,22 +2942,6 @@ tlb1_set_entry(vm_offset_t va, vm_offset_t pa, vm_size_t size,
|
||||
return (0);
|
||||
}
|
||||
|
||||
static int
|
||||
tlb1_entry_size_cmp(const void *a, const void *b)
|
||||
{
|
||||
const vm_size_t *sza;
|
||||
const vm_size_t *szb;
|
||||
|
||||
sza = a;
|
||||
szb = b;
|
||||
if (*sza > *szb)
|
||||
return (-1);
|
||||
else if (*sza < *szb)
|
||||
return (1);
|
||||
else
|
||||
return (0);
|
||||
}
|
||||
|
||||
/*
|
||||
* Map in contiguous RAM region into the TLB1 using maximum of
|
||||
* KERNEL_REGION_MAX_TLB_ENTRIES entries.
|
||||
@ -2950,64 +2950,60 @@ tlb1_entry_size_cmp(const void *a, const void *b)
|
||||
* used by all allocated entries.
|
||||
*/
|
||||
vm_size_t
|
||||
tlb1_mapin_region(vm_offset_t va, vm_offset_t pa, vm_size_t size)
|
||||
tlb1_mapin_region(vm_offset_t va, vm_paddr_t pa, vm_size_t size)
|
||||
{
|
||||
vm_size_t entry_size[KERNEL_REGION_MAX_TLB_ENTRIES];
|
||||
vm_size_t mapped_size, sz, esz;
|
||||
unsigned int log;
|
||||
int i;
|
||||
vm_size_t pgs[KERNEL_REGION_MAX_TLB_ENTRIES];
|
||||
vm_size_t mapped, pgsz, base, mask;
|
||||
int idx, nents;
|
||||
|
||||
CTR4(KTR_PMAP, "%s: region size = 0x%08x va = 0x%08x pa = 0x%08x",
|
||||
__func__, size, va, pa);
|
||||
/* Round up to the next 1M */
|
||||
size = (size + (1 << 20) - 1) & ~((1 << 20) - 1);
|
||||
|
||||
mapped_size = 0;
|
||||
sz = size;
|
||||
memset(entry_size, 0, sizeof(entry_size));
|
||||
|
||||
/* Calculate entry sizes. */
|
||||
for (i = 0; i < KERNEL_REGION_MAX_TLB_ENTRIES && sz > 0; i++) {
|
||||
|
||||
/* Largest region that is power of 4 and fits within size */
|
||||
log = ilog2(sz) / 2;
|
||||
esz = 1 << (2 * log);
|
||||
|
||||
/* If this is last entry cover remaining size. */
|
||||
if (i == KERNEL_REGION_MAX_TLB_ENTRIES - 1) {
|
||||
while (esz < sz)
|
||||
esz = esz << 2;
|
||||
mapped = 0;
|
||||
idx = 0;
|
||||
base = va;
|
||||
pgsz = 64*1024*1024;
|
||||
while (mapped < size) {
|
||||
while (mapped < size && idx < KERNEL_REGION_MAX_TLB_ENTRIES) {
|
||||
while (pgsz > (size - mapped))
|
||||
pgsz >>= 2;
|
||||
pgs[idx++] = pgsz;
|
||||
mapped += pgsz;
|
||||
}
|
||||
|
||||
entry_size[i] = esz;
|
||||
mapped_size += esz;
|
||||
if (esz < sz)
|
||||
sz -= esz;
|
||||
else
|
||||
sz = 0;
|
||||
/* We under-map. Correct for this. */
|
||||
if (mapped < size) {
|
||||
while (pgs[idx - 1] == pgsz) {
|
||||
idx--;
|
||||
mapped -= pgsz;
|
||||
}
|
||||
/* XXX We may increase beyond out starting point. */
|
||||
pgsz <<= 2;
|
||||
pgs[idx++] = pgsz;
|
||||
mapped += pgsz;
|
||||
}
|
||||
}
|
||||
|
||||
/* Sort entry sizes, required to get proper entry address alignment. */
|
||||
qsort(entry_size, KERNEL_REGION_MAX_TLB_ENTRIES,
|
||||
sizeof(vm_size_t), tlb1_entry_size_cmp);
|
||||
|
||||
/* Load TLB1 entries. */
|
||||
for (i = 0; i < KERNEL_REGION_MAX_TLB_ENTRIES; i++) {
|
||||
esz = entry_size[i];
|
||||
if (!esz)
|
||||
break;
|
||||
|
||||
CTR5(KTR_PMAP, "%s: entry %d: sz = 0x%08x (va = 0x%08x "
|
||||
"pa = 0x%08x)", __func__, tlb1_idx, esz, va, pa);
|
||||
|
||||
tlb1_set_entry(va, pa, esz, _TLB_ENTRY_MEM);
|
||||
|
||||
va += esz;
|
||||
pa += esz;
|
||||
nents = idx;
|
||||
mask = pgs[0] - 1;
|
||||
/* Align address to the boundary */
|
||||
if (va & mask) {
|
||||
va = (va + mask) & ~mask;
|
||||
pa = (pa + mask) & ~mask;
|
||||
}
|
||||
|
||||
CTR3(KTR_PMAP, "%s: mapped size 0x%08x (wasted space 0x%08x)",
|
||||
__func__, mapped_size, mapped_size - size);
|
||||
for (idx = 0; idx < nents; idx++) {
|
||||
pgsz = pgs[idx];
|
||||
debugf("%u: %x -> %x, size=%x\n", idx, pa, va, pgsz);
|
||||
tlb1_set_entry(va, pa, pgsz, _TLB_ENTRY_MEM);
|
||||
pa += pgsz;
|
||||
va += pgsz;
|
||||
}
|
||||
|
||||
return (mapped_size);
|
||||
mapped = (va - base);
|
||||
debugf("mapped size 0x%08x (wasted space 0x%08x)\n",
|
||||
mapped, mapped - size);
|
||||
return (mapped);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -3017,19 +3013,39 @@ tlb1_mapin_region(vm_offset_t va, vm_offset_t pa, vm_size_t size)
|
||||
void
|
||||
tlb1_init(vm_offset_t ccsrbar)
|
||||
{
|
||||
uint32_t mas0;
|
||||
uint32_t mas0, mas1, mas3;
|
||||
uint32_t tsz;
|
||||
u_int i;
|
||||
|
||||
/* TLB1[0] is used to map the kernel. Save that entry. */
|
||||
mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(0);
|
||||
mtspr(SPR_MAS0, mas0);
|
||||
__asm __volatile("isync; tlbre");
|
||||
if (bootinfo != NULL && bootinfo[0] != 1) {
|
||||
tlb1_idx = *((uint16_t *)(bootinfo + 8));
|
||||
} else
|
||||
tlb1_idx = 1;
|
||||
|
||||
tlb1[0].mas1 = mfspr(SPR_MAS1);
|
||||
tlb1[0].mas2 = mfspr(SPR_MAS2);
|
||||
tlb1[0].mas3 = mfspr(SPR_MAS3);
|
||||
/* The first entry/entries are used to map the kernel. */
|
||||
for (i = 0; i < tlb1_idx; i++) {
|
||||
mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(i);
|
||||
mtspr(SPR_MAS0, mas0);
|
||||
__asm __volatile("isync; tlbre");
|
||||
|
||||
/* Map in CCSRBAR in TLB1[1] */
|
||||
tlb1_idx = 1;
|
||||
mas1 = mfspr(SPR_MAS1);
|
||||
if ((mas1 & MAS1_VALID) == 0)
|
||||
continue;
|
||||
|
||||
mas3 = mfspr(SPR_MAS3);
|
||||
|
||||
tlb1[i].mas1 = mas1;
|
||||
tlb1[i].mas2 = mfspr(SPR_MAS2);
|
||||
tlb1[i].mas3 = mas3;
|
||||
|
||||
if (i == 0)
|
||||
kernload = mas3 & MAS3_RPN;
|
||||
|
||||
tsz = (mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT;
|
||||
kernsize += (tsz > 0) ? tsize2size(tsz) : 0;
|
||||
}
|
||||
|
||||
/* Map in CCSRBAR. */
|
||||
tlb1_set_entry(CCSRBAR_VA, ccsrbar, CCSRBAR_SIZE, _TLB_ENTRY_IO);
|
||||
|
||||
/* Setup TLB miss defaults */
|
||||
|
Loading…
Reference in New Issue
Block a user