powerpc: Prepare Book-E kernels for KERNBASE != run base

Book-E kernels really run at VM_MIN_KERNEL_ADDRESS, which currently happens to
be the same as KERNBASE.  KERNBASE is the linked address, which the loader also
takes to be the physical load address.  Treat KERNBASE as a physical address,
not a virtual, and change virtual address references for KERNBASE to use
something more appropriate.
This commit is contained in:
Justin Hibbits 2018-11-28 02:00:27 +00:00
parent 89797c881d
commit ea32838af0
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=341102
2 changed files with 8 additions and 7 deletions

View File

@ -113,7 +113,7 @@ __start:
* - Create temp entry in the second AS (make sure it's not TLB[1])
* - Switch to temp mapping
* - Map 64MB of RAM in TLB1[1]
* - Use AS=1, set EPN to KERNBASE and RPN to kernel load address
* - Use AS=0, set EPN to VM_MIN_KERNEL_ADDRESS and RPN to kernel load address
* - Switch to TLB1[1] mapping
* - Invalidate temp mapping
*
@ -238,7 +238,7 @@ __start:
mtspr SPR_MAS1, %r3 /* note TS was not filled, so it's TS=0 */
isync
LOAD_ADDR(%r3, KERNBASE)
LOAD_ADDR(%r3, VM_MIN_KERNEL_ADDRESS)
ori %r3, %r3, (_TLB_ENTRY_SHARED | MAS2_M)@l /* WIMGE = 0b00100 */
mtspr SPR_MAS2, %r3
isync
@ -471,7 +471,7 @@ bp_kernload:
mtspr SPR_MAS1, %r3 /* note TS was not filled, so it's TS=0 */
isync
LOAD_ADDR(%r3, KERNBASE)
LOAD_ADDR(%r3, VM_MIN_KERNEL_ADDRESS)
ori %r3, %r3, (_TLB_ENTRY_SHARED | MAS2_M)@l /* WIMGE = 0b00100 */
mtspr SPR_MAS2, %r3
isync
@ -526,8 +526,8 @@ bp_kernload:
7:
/*
* At this point we're running at virtual addresses KERNBASE and beyond so
* it's allowed to directly access all locations the kernel was linked
* At this point we're running at virtual addresses VM_MIN_KERNEL_ADDRESS and
* beyond so it's allowed to directly access all locations the kernel was linked
* against.
*/

View File

@ -68,6 +68,7 @@ extern void *ap_pcpu;
extern vm_paddr_t kernload; /* Kernel physical load address */
extern uint8_t __boot_page[]; /* Boot page body */
extern uint32_t bp_kernload;
extern vm_offset_t __startkernel;
struct cpu_release {
uint32_t entry_h;
@ -346,7 +347,7 @@ mpc85xx_smp_start_cpu_epapr(platform_t plat, struct pcpu *pc)
rel_va = rel_page + (rel_pa & PAGE_MASK);
pmap_kenter(rel_page, rel_pa & ~PAGE_MASK);
rel = (struct cpu_release *)rel_va;
bptr = ((vm_paddr_t)(uintptr_t)__boot_page - KERNBASE) + kernload;
bptr = ((vm_paddr_t)(uintptr_t)__boot_page - __startkernel) + kernload;
cpu_flush_dcache(__DEVOLATILE(struct cpu_release *,rel), sizeof(*rel));
rel->pir = pc->pc_cpuid; __asm __volatile("sync");
rel->entry_h = (bptr >> 32);
@ -415,7 +416,7 @@ mpc85xx_smp_start_cpu(platform_t plat, struct pcpu *pc)
/* Flush caches to have our changes hit DRAM. */
cpu_flush_dcache(__boot_page, 4096);
bptr = ((vm_paddr_t)(uintptr_t)__boot_page - KERNBASE) + kernload;
bptr = ((vm_paddr_t)(uintptr_t)__boot_page - __startkernel) + kernload;
KASSERT((bptr & 0xfff) == 0,
("%s: boot page is not aligned (%#jx)", __func__, (uintmax_t)bptr));
if (mpc85xx_is_qoriq()) {