First cut at having the kernel run within the PBVM:

o   The bootinfo structure is now a virtual pointer.
o   Replace VM_MAX_ADDRESS with VM_MAXUSER_ADDRESS and redefine
    VM_MAX_ADDRESS as the maximum address possible (~0UL).
o   Since we're not using direct-mapped translations, switching
    to physical addressing is less trivial. Reserve the boot stack
    for running in physical mode and special-case the EFI call,
    as we're still on the boot stack.
o   Region 4 belongs to the kernel now, not process space.
This commit is contained in:
Marcel Moolenaar 2011-03-12 02:00:28 +00:00
parent db06a6f4ef
commit 7bd6af277d
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/projects/altix/; revision=219555
16 changed files with 376 additions and 364 deletions

View File

@ -84,6 +84,7 @@ ia64/ia64/efi.c standard
ia64/ia64/elf_machdep.c standard
ia64/ia64/emulate.c standard
ia64/ia64/exception.S standard
ia64/ia64/firmware.S standard
ia64/ia64/gdb_machdep.c optional gdb
ia64/ia64/highfp.c standard
ia64/ia64/in_cksum.c optional inet

View File

@ -38,7 +38,6 @@
#include <sys/bus.h>
#include <sys/cons.h>
#include <machine/bootinfo.h>
#include <machine/intr.h>
#include <machine/md_var.h>

View File

@ -32,13 +32,11 @@ __FBSDID("$FreeBSD$");
#include <sys/systm.h>
#include <machine/bootinfo.h>
#include <machine/efi.h>
#include <machine/md_var.h>
#include <machine/sal.h>
#include <vm/vm.h>
#include <vm/pmap.h>
extern uint64_t ia64_call_efi_physical(uint64_t, uint64_t, uint64_t, uint64_t,
uint64_t, uint64_t);
static struct efi_systbl *efi_systbl;
static struct efi_cfgtbl *efi_cfgtbl;
static struct efi_rt *efi_runtime;
@ -96,6 +94,7 @@ efi_boot_finish(void)
int
efi_boot_minimal(uint64_t systbl)
{
ia64_efi_f setvirt;
struct efi_md *md;
efi_status status;
@ -121,18 +120,16 @@ efi_boot_minimal(uint64_t systbl)
md = efi_md_first();
while (md != NULL) {
if (md->md_attr & EFI_MD_ATTR_RT) {
if (md->md_attr & EFI_MD_ATTR_WB)
md->md_virt =
(void *)IA64_PHYS_TO_RR7(md->md_phys);
else if (md->md_attr & EFI_MD_ATTR_UC)
md->md_virt = pmap_mapdev(md->md_phys,
md->md_pages * EFI_PAGE_SIZE);
md->md_virt = (md->md_attr & EFI_MD_ATTR_WB) ?
(void *)IA64_PHYS_TO_RR7(md->md_phys) :
(void *)IA64_PHYS_TO_RR6(md->md_phys);
}
md = efi_md_next(md);
}
status = ia64_call_efi_physical((uint64_t)efi_runtime->rt_setvirtual,
bootinfo.bi_memmap_size, bootinfo.bi_memdesc_size,
bootinfo.bi_memdesc_version, bootinfo.bi_memmap, 0);
setvirt = (void *)IA64_PHYS_TO_RR7((u_long)efi_runtime->rt_setvirtual);
status = ia64_efi_physical(setvirt, bootinfo->bi_memmap_size,
bootinfo->bi_memdesc_size, bootinfo->bi_memdesc_version,
ia64_tpa(bootinfo->bi_memmap));
return ((status < 0) ? EFAULT : 0);
}
@ -165,9 +162,9 @@ struct efi_md *
efi_md_first(void)
{
if (bootinfo.bi_memmap == 0)
if (bootinfo->bi_memmap == 0)
return (NULL);
return ((struct efi_md *)IA64_PHYS_TO_RR7(bootinfo.bi_memmap));
return ((struct efi_md *)bootinfo->bi_memmap);
}
struct efi_md *
@ -175,8 +172,8 @@ efi_md_next(struct efi_md *md)
{
uint64_t plim;
plim = IA64_PHYS_TO_RR7(bootinfo.bi_memmap + bootinfo.bi_memmap_size);
md = (struct efi_md *)((uintptr_t)md + bootinfo.bi_memdesc_size);
plim = bootinfo->bi_memmap + bootinfo->bi_memmap_size;
md = (struct efi_md *)((uintptr_t)md + bootinfo->bi_memdesc_size);
return ((md >= (struct efi_md *)plim) ? NULL : md);
}

258
sys/ia64/ia64/firmware.S Normal file
View File

@ -0,0 +1,258 @@
/*-
* Copyright (c) 2011 Marcel Moolenaar
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*/
#include <machine/asm.h>
#include <machine/ia64_cpu.h>
.text
/*
* u_long ia64_efi_physical(ia64_efi_f, u_long, u_long, u_long, u_long)
*
* loc0 = ar.pfs
* loc1 = rp
* loc2 = psr
* loc3 = sp
* loc4 = bsp
* loc5 = gp
*/
ENTRY(ia64_efi_physical, 5)
.prologue
.regstk 5,6,4,0
.save ar.pfs,loc0
alloc loc0=ar.pfs,5,6,4,0
;;
.save rp,loc1
mov loc1=rp
;;
.body
mov loc2=psr // save psr
movl r16=IA64_PSR_I | IA64_PSR_IT | IA64_PSR_DT | \
IA64_PSR_RT | IA64_PSR_DFL | IA64_PSR_DFH
;;
andcm r14=loc2,r16
movl r15=IA64_PSR_BN
;;
rsm psr.i
mov r17=ar.rsc
or r16=r14,r15 // new psr
;;
mov ar.rsc=0
or loc2=loc2,r15
;;
flushrs
mov loc3=sp // save sp
;;
mov loc4=ar.bsp // save ar.bsp
mov r18=ar.rnat
;;
tpa r19=loc4 // new bspstore
mov loc5=gp
;;
tpa r20=loc3 // new sp
ld8 r21=[in0],8
;;
1:
mov r14=ip
;;
ld8 r22=[in0]
add r15=2f-1b,r14
;;
tpa r14=r15
;;
rsm psr.ic
;;
srlz.i
;;
mov cr.iip=r14
mov cr.ifs=r0
mov cr.ipsr=r16
;;
rfi
2:
mov ar.bspstore=r19
mov sp=r20
;;
mov ar.rnat=r18
mov ar.rsc=r17
;;
mov b6=r21
mov gp=r22
mov out0=in1
mov out1=in2
mov out2=in3
mov out3=in4
;;
br.call.sptk.many rp=b6
mov gp=loc5
;;
rsm psr.i | psr.ic
mov r16=ar.rsc
;;
srlz.i
mov ar.rsc=0
;;
flushrs
;;
mov r17=ar.rnat
movl r18=3f
;;
mov cr.iip=r18
mov cr.ifs=r0
mov cr.ipsr=loc2
;;
rfi
3:
mov ar.bspstore=loc4
mov sp=loc3
;;
mov ar.rnat=r17
mov ar.rsc=r16
;;
mov rp=loc1
mov ar.pfs=loc0
;;
br.ret.sptk.many rp
END(ia64_efi_physical)
/*
* ia64_pal_ret ia64_pal_physical(ia64_fw_f, u_long, u_long, u_long, u_long)
*
* loc0 = ar.pfs
* loc1 = rp
* loc2 = psr
* loc3 = sp
* loc4 = bsp
* loc5 = gp
*/
ENTRY(ia64_pal_physical, 5)
.prologue
.regstk 5,6,4,0
.save ar.pfs,loc0
alloc loc0=ar.pfs,5,6,4,0
;;
.save rp,loc1
mov loc1=rp
;;
.body
mov loc2=psr // save psr
movl r16=IA64_PSR_I | IA64_PSR_IT | IA64_PSR_DT | \
IA64_PSR_RT | IA64_PSR_DFL | IA64_PSR_DFH
;;
andcm r14=loc2,r16
movl r15=IA64_PSR_BN
;;
rsm psr.i
mov r17=ar.rsc
or r16=r14,r15 // new psr
;;
mov ar.rsc=0
or loc2=loc2,r15
;;
flushrs
mov loc3=sp // save sp
;;
mov loc4=ar.bsp // save ar.bsp
mov r18=ar.rnat
;;
mov loc5=gp
movl r14=kstack
;;
tpa r19=r14 // new bspstore
movl r15=kstack_top
;;
tpa r20=r15 // new sp
movl r21=ia64_pal_entry
;;
1:
mov r14=ip
ld8 r22=[r21]
;;
tpa r21=r22
add r15=2f-1b,r14
;;
tpa r14=r15
;;
rsm psr.ic
;;
srlz.i
;;
mov cr.iip=r14
mov cr.ifs=r0
mov cr.ipsr=r16
;;
rfi
2:
mov ar.bspstore=r19
add sp=-16,r20
;;
mov ar.rnat=r18
mov ar.rsc=r17
;;
mov b6=r21
mov out0=in0
mov out1=in1
mov out2=in2
mov out3=in3
// PAL static calls
mov r28=in0
mov r29=in1
mov r30=in2
mov r31=in3
br.call.sptk.many rp=b6
mov gp=loc5
;;
rsm psr.i | psr.ic
mov r16=ar.rsc
;;
srlz.i
mov ar.rsc=0
;;
flushrs
;;
mov r17=ar.rnat
movl r18=3f
;;
mov cr.iip=r18
mov cr.ifs=r0
mov cr.ipsr=loc2
;;
rfi
3:
mov ar.bspstore=loc4
mov sp=loc3
;;
mov ar.rnat=r17
mov ar.rsc=r16
;;
mov rp=loc1
mov ar.pfs=loc0
;;
br.ret.sptk.many rp
END(ia64_pal_physical)

View File

@ -77,8 +77,6 @@ ASSYM(ERESTART, ERESTART);
ASSYM(FRAME_SYSCALL, FRAME_SYSCALL);
ASSYM(IA64_ID_PAGE_SHIFT, IA64_ID_PAGE_SHIFT);
ASSYM(KSTACK_PAGES, KSTACK_PAGES);
ASSYM(MC_PRESERVED, offsetof(mcontext_t, mc_preserved));
@ -116,4 +114,4 @@ ASSYM(TDF_NEEDRESCHED, TDF_NEEDRESCHED);
ASSYM(UC_MCONTEXT, offsetof(ucontext_t, uc_mcontext));
ASSYM(VM_MAX_ADDRESS, VM_MAX_ADDRESS);
ASSYM(VM_MAXUSER_ADDRESS, VM_MAXUSER_ADDRESS);

View File

@ -34,10 +34,15 @@
#include <machine/intrcnt.h>
#include <assym.s>
.section .data.proc0,"aw"
.global kstack
#define FW_STACK_SIZE 16384
.section .data.kstack, "aw"
.align PAGE_SIZE
kstack: .space KSTACK_PAGES * PAGE_SIZE
.global kstack
kstack: .space FW_STACK_SIZE
.align PAGE_SIZE
.global kstack_top
kstack_top:
.text
@ -64,7 +69,7 @@ ENTRY_NOPROFILE(__start, 1)
srlz.i
;;
ssm IA64_PSR_DFH
mov r17=KSTACK_PAGES*PAGE_SIZE-SIZEOF_PCB-SIZEOF_TRAPFRAME-16
mov r17=FW_STACK_SIZE-16
;;
}
{ .mlx
@ -74,7 +79,7 @@ ENTRY_NOPROFILE(__start, 1)
}
{ .mlx
mov ar.bspstore=r16 // switch backing store
movl r16=pa_bootinfo
movl r16=bootinfo
;;
}
{ .mmi
@ -199,19 +204,25 @@ ENTRY_NOPROFILE(os_boot_rendez,0)
srlz.d
rsm IA64_PSR_IC|IA64_PSR_I
;;
mov r16 = (4<<8)|(16<<2)
movl r17 = 4<<61
;;
mov rr[r17] = r16
;;
srlz.d
mov r16 = (5<<8)|(PAGE_SHIFT<<2)|1
movl r17 = 5<<61
;;
mov rr[r17] = r16
;;
srlz.d
mov r16 = (6<<8)|(IA64_ID_PAGE_SHIFT<<2)
mov r16 = (6<<8)|(PAGE_SHIFT<<2)
movl r17 = 6<<61
;;
mov rr[r17] = r16
;;
srlz.d
mov r16 = (7<<8)|(IA64_ID_PAGE_SHIFT<<2)
mov r16 = (7<<8)|(PAGE_SHIFT<<2)
movl r17 = 7<<61
;;
mov rr[r17] = r16

View File

@ -115,8 +115,7 @@ SYSCTL_UINT(_hw_freq, OID_AUTO, itc, CTLFLAG_RD, &itc_freq, 0,
int cold = 1;
u_int64_t pa_bootinfo;
struct bootinfo bootinfo;
struct bootinfo *bootinfo;
struct pcpu pcpu0;
@ -678,15 +677,15 @@ map_gateway_page(void)
pte = PTE_PRESENT | PTE_MA_WB | PTE_ACCESSED | PTE_DIRTY |
PTE_PL_KERN | PTE_AR_X_RX;
pte |= (uint64_t)ia64_gateway_page & PTE_PPN_MASK;
pte |= ia64_tpa((uint64_t)ia64_gateway_page) & PTE_PPN_MASK;
__asm __volatile("ptr.d %0,%1; ptr.i %0,%1" ::
"r"(VM_MAX_ADDRESS), "r"(PAGE_SHIFT << 2));
"r"(VM_MAXUSER_ADDRESS), "r"(PAGE_SHIFT << 2));
__asm __volatile("mov %0=psr" : "=r"(psr));
__asm __volatile("rsm psr.ic|psr.i");
ia64_srlz_i();
ia64_set_ifa(VM_MAX_ADDRESS);
ia64_set_ifa(VM_MAXUSER_ADDRESS);
ia64_set_itir(PAGE_SHIFT << 2);
ia64_srlz_d();
__asm __volatile("itr.d dtr[%0]=%1" :: "r"(3), "r"(pte));
@ -696,7 +695,7 @@ map_gateway_page(void)
ia64_srlz_i();
/* Expose the mapping to userland in ar.k5 */
ia64_set_k5(VM_MAX_ADDRESS);
ia64_set_k5(VM_MAXUSER_ADDRESS);
}
static u_int
@ -760,17 +759,6 @@ ia64_init(void)
* information provided by the boot program).
*/
/*
* pa_bootinfo is the physical address of the bootinfo block as
* passed to us by the loader and set in locore.s.
*/
bootinfo = *(struct bootinfo *)(IA64_PHYS_TO_RR7(pa_bootinfo));
if (bootinfo.bi_magic != BOOTINFO_MAGIC || bootinfo.bi_version != 1) {
bzero(&bootinfo, sizeof(bootinfo));
bootinfo.bi_kernend = (vm_offset_t) round_page(_end);
}
/*
* Look for the I/O ports first - we need them for console
* probing.
@ -788,20 +776,20 @@ ia64_init(void)
}
metadata_missing = 0;
if (bootinfo.bi_modulep)
preload_metadata = (caddr_t)bootinfo.bi_modulep;
if (bootinfo->bi_modulep)
preload_metadata = (caddr_t)bootinfo->bi_modulep;
else
metadata_missing = 1;
if (envmode == 0 && bootinfo.bi_envp)
kern_envp = (caddr_t)bootinfo.bi_envp;
if (envmode == 0 && bootinfo->bi_envp)
kern_envp = (caddr_t)bootinfo->bi_envp;
else
kern_envp = static_env;
/*
* Look at arguments passed to us and compute boothowto.
*/
boothowto = bootinfo.bi_boothowto;
boothowto = bootinfo->bi_boothowto;
if (boothowto & RB_VERBOSE)
bootverbose = 1;
@ -811,51 +799,49 @@ ia64_init(void)
*/
kernstart = trunc_page(kernel_text);
#ifdef DDB
ksym_start = bootinfo.bi_symtab;
ksym_end = bootinfo.bi_esymtab;
ksym_start = bootinfo->bi_symtab;
ksym_end = bootinfo->bi_esymtab;
kernend = (vm_offset_t)round_page(ksym_end);
#else
kernend = (vm_offset_t)round_page(_end);
#endif
/* But if the bootstrap tells us otherwise, believe it! */
if (bootinfo.bi_kernend)
kernend = round_page(bootinfo.bi_kernend);
if (bootinfo->bi_kernend)
kernend = round_page(bootinfo->bi_kernend);
/*
* Setup the PCPU data for the bootstrap processor. It is needed
* by printf(). Also, since printf() has critical sections, we
* need to initialize at least pc_curthread.
*/
pcpup = &pcpu0;
ia64_set_k4((u_int64_t)pcpup);
pcpu_init(pcpup, 0, sizeof(pcpu0));
dpcpu_init((void *)kernend, 0);
cpu_pcpu_setup(pcpup, ~0U, ia64_get_lid());
kernend += DPCPU_SIZE;
PCPU_SET(curthread, &thread0);
/*
* Region 6 is direct mapped UC and region 7 is direct mapped
* WC. The details of this is controlled by the Alt {I,D}TLB
* handlers. Here we just make sure that they have the largest
* possible page size to minimise TLB usage.
*/
ia64_set_rr(IA64_RR_BASE(6), (6 << 8) | (PAGE_SHIFT << 2));
ia64_set_rr(IA64_RR_BASE(7), (7 << 8) | (PAGE_SHIFT << 2));
ia64_srlz_d();
#if 0
if (ia64_pal_base != 0) {
ia64_pal_base &= ~IA64_ID_PAGE_MASK;
/*
* We use a TR to map the first 256M of memory - this might
* cover the palcode too.
*/
if (ia64_pal_base == 0)
printf("PAL code mapped by the kernel's TR\n");
} else
printf("PAL code not found\n");
#endif
/*
* Wire things up so we can call the firmware.
*/
map_pal_code();
efi_boot_minimal(bootinfo.bi_systab);
efi_boot_minimal(bootinfo->bi_systab);
ia64_xiv_init();
ia64_sal_init();
calculate_frequencies();
/*
* Setup the PCPU data for the bootstrap processor. It is needed
* by printf(). Also, since printf() has critical sections, we
* need to initialize at least pc_curthread.
*/
pcpup = &pcpu0;
ia64_set_k4((u_int64_t)pcpup);
pcpu_init(pcpup, 0, sizeof(pcpu0));
dpcpu_init((void *)kernend, 0);
cpu_pcpu_setup(pcpup, ~0U, ia64_get_lid());
kernend += DPCPU_SIZE;
PCPU_SET(curthread, &thread0);
/*
* Initialize the console before we print anything out.
*/
@ -869,8 +855,8 @@ ia64_init(void)
check_sn_sal();
/* Get FPSWA interface */
fpswa_iface = (bootinfo.bi_fpswa == 0) ? NULL :
(struct fpswa_iface *)IA64_PHYS_TO_RR7(bootinfo.bi_fpswa);
fpswa_iface = (bootinfo->bi_fpswa == 0) ? NULL :
(struct fpswa_iface *)IA64_PHYS_TO_RR7(bootinfo->bi_fpswa);
/* Init basic tunables, including hz */
init_param1();
@ -1025,7 +1011,7 @@ uint64_t
ia64_get_hcdp(void)
{
return (bootinfo.bi_hcdp);
return (bootinfo->bi_hcdp);
}
void

View File

@ -75,63 +75,8 @@ psrsave = loc4
;;
srlz.d
br.ret.sptk rp
END(ia64_call_pal_static)
#ifdef _KERNEL
/*
* struct ia64_pal_result ia64_call_pal_static_physical(u_int64_t proc,
* u_int64_t arg1, u_int64_t arg2, u_int64_t arg3)
*/
ENTRY(ia64_call_pal_static_physical, 4)
.regstk 4,5,0,0
palret = loc0
entry = loc1
rpsave = loc2
pfssave = loc3
psrsave = loc4
alloc pfssave=ar.pfs,4,5,0,0
;;
mov rpsave=rp
movl entry=@gprel(ia64_pal_entry)
1: mov palret=ip // for return address
;;
add entry=entry,gp
mov r28=in0 // procedure number
;;
ld8 entry=[entry] // read entry point
mov r29=in1 // copy arguments
mov r30=in2
mov r31=in3
;;
dep entry=0,entry,61,3 // physical address
dep palret=0,palret,61,3 // physical address
br.call.sptk.many rp=ia64_physical_mode
mov psrsave=ret0
;;
mov b6=entry
add palret=2f-1b,palret // calculate return address
;;
mov b0=palret
br.cond.sptk b6 // call into firmware
;;
2: mov r14=psrsave
;;
br.call.sptk.many rp=ia64_change_mode
;;
mov rp=rpsave
mov ar.pfs=pfssave
;;
br.ret.sptk rp
END(ia64_call_pal_static_physical)
#endif
/*
* struct ia64_pal_result ia64_call_pal_stacked(u_int64_t proc,
* u_int64_t arg1, u_int64_t arg2, u_int64_t arg3)
@ -172,52 +117,3 @@ psrsave = loc3
br.ret.sptk rp
END(ia64_call_pal_stacked)
#ifdef _KERNEL
/*
* struct ia64_pal_result ia64_call_pal_stacked_physical(u_int64_t proc,
* u_int64_t arg1, u_int64_t arg2, u_int64_t arg3)
*/
ENTRY(ia64_call_pal_stacked_physical, 4)
.regstk 4,4,4,0
entry = loc0
rpsave = loc1
pfssave = loc2
psrsave = loc3
alloc pfssave=ar.pfs,4,4,4,0
;;
mov rpsave=rp
movl entry=@gprel(ia64_pal_entry)
;;
add entry=entry,gp
mov r28=in0 // procedure number
mov out0=in0
;;
ld8 entry=[entry] // read entry point
mov out1=in1 // copy arguments
mov out2=in2
mov out3=in3
;;
dep entry=0,entry,61,3 // physical address
br.call.sptk.many rp=ia64_physical_mode
mov psrsave=ret0
;;
mov b6=entry
;;
br.call.sptk.many rp=b6 // call into firmware
;;
mov r14=psrsave
;;
br.call.sptk.many rp=ia64_change_mode
;;
mov rp=rpsave
mov ar.pfs=pfssave
;;
br.ret.sptk rp
END(ia64_call_pal_stacked_physical)
#endif

View File

@ -390,7 +390,7 @@ pmap_bootstrap()
*/
ia64_kptdir = (void *)pmap_steal_memory(PAGE_SIZE);
nkpt = 0;
kernel_vm_end = VM_MIN_KERNEL_ADDRESS - VM_GATEWAY_SIZE;
kernel_vm_end = VM_MIN_KERNEL_ADDRESS;
for (i = 0; phys_avail[i+2]; i+= 2)
;
@ -451,7 +451,7 @@ pmap_bootstrap()
* Initialize the kernel pmap (which is statically allocated).
*/
PMAP_LOCK_INIT(kernel_pmap);
for (i = 0; i < 5; i++)
for (i = 0; i < 4; i++)
kernel_pmap->pm_rid[i] = 0;
TAILQ_INIT(&kernel_pmap->pm_pvlist);
PCPU_SET(md.current_pmap, kernel_pmap);
@ -462,16 +462,6 @@ pmap_bootstrap()
ia64_set_rr(IA64_RR_BASE(5),
(5 << 8) | (PAGE_SHIFT << 2) | 1);
/*
* Region 6 is direct mapped UC and region 7 is direct mapped
* WC. The details of this is controlled by the Alt {I,D}TLB
* handlers. Here we just make sure that they have the largest
* possible page size to minimise TLB usage.
*/
ia64_set_rr(IA64_RR_BASE(6), (6 << 8) | (IA64_ID_PAGE_SHIFT << 2));
ia64_set_rr(IA64_RR_BASE(7), (7 << 8) | (IA64_ID_PAGE_SHIFT << 2));
ia64_srlz_d();
/*
* Clear out any random TLB entries left over from booting.
*/
@ -678,7 +668,7 @@ pmap_pinit(struct pmap *pmap)
int i;
PMAP_LOCK_INIT(pmap);
for (i = 0; i < 5; i++)
for (i = 0; i < 4; i++)
pmap->pm_rid[i] = pmap_allocate_rid();
TAILQ_INIT(&pmap->pm_pvlist);
bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
@ -699,7 +689,7 @@ pmap_release(pmap_t pmap)
{
int i;
for (i = 0; i < 5; i++)
for (i = 0; i < 4; i++)
if (pmap->pm_rid[i])
pmap_free_rid(pmap->pm_rid[i]);
PMAP_LOCK_DESTROY(pmap);
@ -1219,7 +1209,6 @@ vm_paddr_t
pmap_kextract(vm_offset_t va)
{
struct ia64_lpte *pte;
vm_offset_t gwpage;
KASSERT(va >= IA64_RR_BASE(5), ("Must be kernel VA"));
@ -1227,19 +1216,16 @@ pmap_kextract(vm_offset_t va)
if (va >= IA64_RR_BASE(6))
return (IA64_RR_MASK(va));
/* EPC gateway page? */
gwpage = (vm_offset_t)ia64_get_k5();
if (va >= gwpage && va < gwpage + VM_GATEWAY_SIZE)
return (IA64_RR_MASK((vm_offset_t)ia64_gateway_page));
/* Bail out if the virtual address is beyond our limits. */
if (va >= kernel_vm_end)
return (0);
pte = pmap_find_kpte(va);
if (!pmap_present(pte))
return (0);
return (pmap_ppn(pte) | (va & PAGE_MASK));
if (va >= VM_MIN_KERNEL_ADDRESS) {
pte = pmap_find_kpte(va);
return (pmap_present(pte) ? pmap_ppn(pte)|(va&PAGE_MASK) : 0);
}
return (0);
}
/*
@ -2285,12 +2271,12 @@ pmap_switch(pmap_t pm)
if (prevpm == pm)
goto out;
if (pm == NULL) {
for (i = 0; i < 5; i++) {
for (i = 0; i < 4; i++) {
ia64_set_rr(IA64_RR_BASE(i),
(i << 8)|(PAGE_SHIFT << 2)|1);
}
} else {
for (i = 0; i < 5; i++) {
for (i = 0; i < 4; i++) {
ia64_set_rr(IA64_RR_BASE(i),
(pm->pm_rid[i] << 8)|(PAGE_SHIFT << 2)|1);
}
@ -2387,8 +2373,8 @@ print_trs(int type)
db_printf("V RID Virtual Page Physical Page PgSz ED AR PL D A MA P KEY\n");
for (i = 0; i <= maxtr; i++) {
bzero(&buf, sizeof(buf));
res = ia64_call_pal_stacked_physical
(PAL_VM_TR_READ, i, type, ia64_tpa((uint64_t) &buf));
res = ia64_pal_physical(PAL_VM_TR_READ, i, type,
ia64_tpa((uint64_t)&buf));
if (!(res.pal_result[0] & 1))
buf.pte &= ~PTE_AR_MASK;
if (!(res.pal_result[0] & 2))

View File

@ -58,131 +58,6 @@
.text
/*
* ia64_change_mode: change mode to/from physical mode
*
* Arguments:
* r14 psr for desired mode
*
* Modifies:
* r15-r19 scratch
* ar.bsp tranlated to new mode
*/
ENTRY_NOPROFILE(ia64_change_mode, 0)
rsm psr.i | psr.ic
mov r19=ar.rsc // save rsc while we change mode
tbit.nz p6,p7=r14,17 // physical or virtual ?
;;
mov ar.rsc=0 // turn off RSE
(p6) mov r15=7 // RR base for virtual addresses
(p7) mov r15=0 // RR base for physical addresses
;;
flushrs // no dirty registers please
srlz.i
;;
mov r16=ar.bsp
mov r17=rp
mov r18=ar.rnat
;;
dep r16=r15,r16,61,3 // new address of ar.bsp
dep r17=r15,r17,61,3 // new address of rp
dep sp=r15,sp,61,3 // new address of sp
;;
mov ar.bspstore=r16
mov rp=r17
;;
1: mov r16=ip
mov ar.rnat=r18
mov cr.ipsr=r14 // psr for new mode
;;
add r16=2f-1b,r16 // address to rfi to
;;
dep r16=r15,r16,61,3 // new mode address for rfi
;;
mov cr.iip=r16 // setup for rfi
mov cr.ifs=r0
;;
rfi
2: mov ar.rsc=r19 // restore ar.rsc
br.ret.sptk.few rp // now in new mode
END(ia64_change_mode)
/*
* ia64_physical_mode: change mode to physical mode
*
* Return:
* ret0 psr to restore
*
* Modifies:
* r15-r18 scratch
* ar.bsp tranlated to physical mode
* psr.i cleared
*/
ENTRY(ia64_physical_mode, 0)
mov r14=psr
mov ret0=psr
movl r15=(IA64_PSR_I|IA64_PSR_IT|IA64_PSR_DT|IA64_PSR_RT|IA64_PSR_DFL|IA64_PSR_DFH)
movl r16=IA64_PSR_BN
;;
andcm r14=r14,r15 // clear various xT bits
;;
or r14=r14,r16 // make sure BN=1
or ret0=ret0,r16 // make sure BN=1
br.cond.sptk.many ia64_change_mode
END(ia64_physical_mode)
/*
* ia64_call_efi_physical: call an EFI procedure in physical mode
*
* Arguments:
* in0 Address of EFI procedure descriptor
* in1-in5 Arguments to EFI procedure
*
* Return:
* ret0-ret3 return values from EFI
*
*/
ENTRY(ia64_call_efi_physical, 6)
.prologue
.regstk 6,4,5,0
.save ar.pfs,loc0
alloc loc0=ar.pfs,6,4,5,0
;;
.save rp,loc1
mov loc1=rp
;;
.body
br.call.sptk.many rp=ia64_physical_mode
;;
mov loc2=r8 // psr to restore mode
mov loc3=gp // save kernel gp
ld8 r14=[in0],8 // function address
;;
mov out0=in1
mov out1=in2
mov out2=in3
mov out3=in4
mov out4=in5
ld8 gp=[in0] // function gp value
;;
mov b6=r14
;;
br.call.sptk.many rp=b6 // call EFI procedure
mov gp=loc3 // restore kernel gp
;;
mov r14=loc2 // psr to restore mode
br.call.sptk.many rp=ia64_change_mode
;;
mov rp=loc1
mov ar.pfs=loc0
;;
br.ret.sptk.many rp
END(ia64_call_efi_physical)
/**************************************************************************/
ENTRY(fusufault, 0)
{ .mib
st8.rel [r15]=r0 // Clear onfault.
@ -199,7 +74,7 @@ END(fusufault)
ENTRY(casuword, 3)
{ .mlx
add r15=PC_CURTHREAD,r13
movl r14=VM_MAX_ADDRESS
movl r14=VM_MAXUSER_ADDRESS
;;
}
{ .mib
@ -249,7 +124,7 @@ END(casuword)
ENTRY(casuword32, 3)
{ .mlx
add r15=PC_CURTHREAD,r13
movl r14=VM_MAX_ADDRESS
movl r14=VM_MAXUSER_ADDRESS
;;
}
{ .mib
@ -303,7 +178,7 @@ END(casuword32)
ENTRY(subyte, 2)
{ .mlx
add r15=PC_CURTHREAD,r13
movl r14=VM_MAX_ADDRESS
movl r14=VM_MAXUSER_ADDRESS
;;
}
{ .mib
@ -349,7 +224,7 @@ END(subyte)
ENTRY(suword16, 2)
{ .mlx
add r15=PC_CURTHREAD,r13
movl r14=VM_MAX_ADDRESS
movl r14=VM_MAXUSER_ADDRESS
;;
}
{ .mib
@ -395,7 +270,7 @@ END(suword16)
ENTRY(suword32, 2)
{ .mlx
add r15=PC_CURTHREAD,r13
movl r14=VM_MAX_ADDRESS
movl r14=VM_MAXUSER_ADDRESS
;;
}
{ .mib
@ -442,7 +317,7 @@ ENTRY(suword64, 2)
XENTRY(suword)
{ .mlx
add r15=PC_CURTHREAD,r13
movl r14=VM_MAX_ADDRESS
movl r14=VM_MAXUSER_ADDRESS
;;
}
{ .mib
@ -496,7 +371,7 @@ END(suword64)
ENTRY(fubyte, 1)
{ .mlx
add r15=PC_CURTHREAD,r13
movl r14=VM_MAX_ADDRESS
movl r14=VM_MAXUSER_ADDRESS
;;
}
{ .mib
@ -542,7 +417,7 @@ END(fubyte)
ENTRY(fuword16, 2)
{ .mlx
add r15=PC_CURTHREAD,r13
movl r14=VM_MAX_ADDRESS
movl r14=VM_MAXUSER_ADDRESS
;;
}
{ .mib
@ -588,7 +463,7 @@ END(fuword16)
ENTRY(fuword32, 2)
{ .mlx
add r15=PC_CURTHREAD,r13
movl r14=VM_MAX_ADDRESS
movl r14=VM_MAXUSER_ADDRESS
;;
}
{ .mib
@ -635,7 +510,7 @@ ENTRY(fuword64, 2)
XENTRY(fuword)
{ .mlx
add r15=PC_CURTHREAD,r13
movl r14=VM_MAX_ADDRESS
movl r14=VM_MAXUSER_ADDRESS
;;
}
{ .mib
@ -750,7 +625,7 @@ ENTRY(copyinstr, 4)
mov loc1=rp
.body
movl loc2=VM_MAX_ADDRESS // make sure that src addr
movl loc2=VM_MAXUSER_ADDRESS // make sure that src addr
;;
cmp.geu p6,p0=in0,loc2 // is in user space.
;;
@ -863,7 +738,7 @@ ENTRY(copyin, 3)
mov loc1=rp
.body
movl loc2=VM_MAX_ADDRESS // make sure that src addr
movl loc2=VM_MAXUSER_ADDRESS // make sure that src addr
;;
cmp.geu p6,p0=in0,loc2 // is in user space.
;;
@ -901,7 +776,7 @@ ENTRY(copyout, 3)
mov loc1=rp
.body
movl loc2=VM_MAX_ADDRESS // make sure that dest addr
movl loc2=VM_MAXUSER_ADDRESS // make sure that dest addr
;;
cmp.geu p6,p0=in1,loc2 // is in user space.
;;

View File

@ -532,7 +532,7 @@ trap(int vector, struct trapframe *tf)
rv = 0;
va = trunc_page(tf->tf_special.ifa);
if (va >= VM_MAX_ADDRESS) {
if (va >= VM_MAXUSER_ADDRESS) {
/*
* Don't allow user-mode faults for kernel virtual
* addresses, including the gateway page.
@ -809,7 +809,7 @@ trap(int vector, struct trapframe *tf)
* iip and enable single stepping only when it's an user
* address.
*/
if (tf->tf_special.iip >= VM_MAX_ADDRESS)
if (tf->tf_special.iip >= VM_MAXUSER_ADDRESS)
return;
tf->tf_special.psr &= ~IA64_PSR_TB;
tf->tf_special.psr |= IA64_PSR_SS;

View File

@ -109,7 +109,7 @@ greg_ptr(mcontext_t *mc, int gr)
static uint64_t
rdreg(uint64_t *addr)
{
if ((uintptr_t)addr < VM_MAX_ADDRESS)
if ((uintptr_t)addr < VM_MAXUSER_ADDRESS)
return (fuword(addr));
return (*addr);
}
@ -117,7 +117,7 @@ rdreg(uint64_t *addr)
static void
wrreg(uint64_t *addr, uint64_t val)
{
if ((uintptr_t)addr < VM_MAX_ADDRESS)
if ((uintptr_t)addr < VM_MAXUSER_ADDRESS)
suword(addr, val);
else
*addr = val;

View File

@ -48,4 +48,4 @@ struct bootinfo {
uint64_t bi_modulep; /* preloaded modules */
};
extern struct bootinfo bootinfo;
extern struct bootinfo *bootinfo;

View File

@ -151,6 +151,12 @@ struct efi_systbl {
uint64_t st_cfgtbl;
};
#ifdef _KERNEL
typedef u_long (*ia64_efi_f)(u_long, u_long, u_long, u_long);
u_long ia64_efi_physical(ia64_efi_f, u_long, u_long, u_long, u_long);
void efi_boot_finish(void);
int efi_boot_minimal(uint64_t);
void *efi_get_table(struct uuid *);
@ -163,4 +169,6 @@ int efi_var_get(efi_char *, struct uuid *, uint32_t *, size_t *, void *);
int efi_var_nextname(size_t *, efi_char *, struct uuid *);
int efi_var_set(efi_char *, struct uuid *, uint32_t, size_t, void *);
#endif /* _KERNEL */
#endif /* _MACHINE_EFI_H_ */

View File

@ -111,16 +111,12 @@ struct ia64_pal_result {
uint64_t pal_result[3];
};
struct ia64_pal_result ia64_pal_physical(u_long, u_long, u_long, u_long);
struct ia64_pal_result ia64_call_pal_static(uint64_t proc, uint64_t arg1,
uint64_t arg2, uint64_t arg3);
struct ia64_pal_result ia64_call_pal_static_physical(uint64_t proc,
uint64_t arg1, uint64_t arg2, uint64_t arg3);
struct ia64_pal_result ia64_call_pal_stacked(uint64_t proc, uint64_t arg1,
uint64_t arg2, uint64_t arg3);
struct ia64_pal_result ia64_call_pal_stacked_physical(uint64_t proc,
uint64_t arg1, uint64_t arg2, uint64_t arg3);
#endif /* _MACHINE_PAL_H_ */

View File

@ -155,6 +155,8 @@
/* Place the backing store in the top of half if region 0. */
#define IA64_BACKINGSTORE IA64_REGION_TOP_HALF
#define VM_GATEWAY_SIZE PAGE_SIZE
/*
* Parameters for Pre-Boot Virtual Memory (PBVM).
* The kernel, its modules and metadata are loaded in the PBVM by the loader.
@ -198,10 +200,9 @@
/* user/kernel map constants */
#define VM_MIN_ADDRESS 0
#define VM_MAXUSER_ADDRESS IA64_RR_BASE(IA64_PBVM_RR)
#define VM_MAX_ADDRESS VM_MAXUSER_ADDRESS /* XXX */
#define VM_GATEWAY_SIZE PAGE_SIZE
#define VM_MIN_KERNEL_ADDRESS IA64_RR_BASE(5)
#define VM_MAX_KERNEL_ADDRESS (IA64_RR_BASE(6) - 1)
#define VM_MAX_ADDRESS ~0UL
#define KERNBASE VM_MAXUSER_ADDRESS