Add support for Pre-Boot Virtual Memory (PBVM) to the loader.

PBVM allows us to link the kernel at a fixed virtual address without
having to make any assumptions about the physical memory layout. On
the SGI Altix 350 for example, there's no usuable physical memory
below 192GB. Also, the PBVM allows us to control better where we're
going to physically load the kernel and its modules so that we can
make sure we load the kernel in memory that's close to the BSP.

The PBVM is managed by a simple page table. The minimum size of the
page table is 4KB (EFI page size) and the maximum is currently set
to 1MB. A page in the PBVM is 64KB, as that's the maximum alignment
one can specify in a linker script. The bottom line is that PBVM is
between 64KB and 8GB in size.

The loader maps the PBVM page table at a fixed virtual address and
using a single translations. The PBVM itself is also mapped using a
single translation for a maximum of 32MB.

While here, increase the heap in the EFI loader from 512KB to 2MB
and set the stage for supporting relocatable modules.
This commit is contained in:
marcel 2011-03-16 03:53:18 +00:00
parent e79931d617
commit 8e0b0a2284
15 changed files with 464 additions and 145 deletions

View File

@ -7,6 +7,8 @@ SRCS+= module.c panic.c
.if ${MACHINE} == "i386" || ${MACHINE_CPUARCH} == "amd64"
SRCS+= load_elf32.c load_elf32_obj.c reloc_elf32.c
SRCS+= load_elf64.c load_elf64_obj.c reloc_elf64.c
.elif ${MACHINE_CPUARCH} == "ia64"
SRCS+= load_elf64.c load_elf64_obj.c reloc_elf64.c
.elif ${MACHINE} == "pc98"
SRCS+= load_elf32.c load_elf32_obj.c reloc_elf32.c
.elif ${MACHINE_CPUARCH} == "arm"
@ -14,7 +16,7 @@ SRCS+= load_elf32.c reloc_elf32.c
.elif ${MACHINE_CPUARCH} == "powerpc"
SRCS+= load_elf32.c reloc_elf32.c
SRCS+= load_elf64.c reloc_elf64.c
.elif ${MACHINE_CPUARCH} == "sparc64" || ${MACHINE_CPUARCH} == "ia64"
.elif ${MACHINE_CPUARCH} == "sparc64"
SRCS+= load_elf64.c reloc_elf64.c
.endif

View File

@ -92,7 +92,7 @@ efi_main(EFI_HANDLE image_handle, EFI_SYSTEM_TABLE *system_table)
BS = ST->BootServices;
RS = ST->RuntimeServices;
heapsize = 512*1024;
heapsize = 2 * 1024 * 1024;
status = BS->AllocatePages(AllocateAnyPages, EfiLoaderData,
EFI_SIZE_TO_PAGES(heapsize), &heap);
if (status != EFI_SUCCESS)

View File

@ -226,7 +226,7 @@ bi_copymodules(vm_offset_t addr)
* - Module metadata are formatted and placed in kernel space.
*/
int
bi_load(struct preloaded_file *fp, uint64_t *bi_addr)
ia64_bootinfo(struct preloaded_file *fp, struct bootinfo **res)
{
struct bootinfo bi;
struct preloaded_file *xp;
@ -234,7 +234,9 @@ bi_load(struct preloaded_file *fp, uint64_t *bi_addr)
struct devdesc *rootdev;
char *rootdevname;
vm_offset_t addr, ssym, esym;
int error;
*res = NULL;
bzero(&bi, sizeof(struct bootinfo));
bi.bi_magic = BOOTINFO_MAGIC;
bi.bi_version = 1;
@ -289,8 +291,28 @@ bi_load(struct preloaded_file *fp, uint64_t *bi_addr)
bi.bi_envp = 0;
}
addr = (addr + PAGE_MASK) & ~PAGE_MASK;
addr = (addr + 15) & ~15;
bi.bi_kernend = addr;
return (ldr_bootinfo(&bi, bi_addr));
error = ia64_platform_bootinfo(&bi, res);
if (error)
return (error);
if (IS_LEGACY_KERNEL()) {
if (*res == NULL)
return (EDOOFUS);
bcopy(&bi, *res, sizeof(bi));
return (0);
}
bi.bi_pbvm_pgtbl = (uintptr_t)ia64_pgtbl;
bi.bi_pbvm_pgtblsz = ia64_pgtblsz;
ia64_copyin((void *)bi.bi_memmap, addr, bi.bi_memmap_size);
bi.bi_memmap = addr;
addr = (addr + bi.bi_memmap_size + 15) & ~15;
bi.bi_kernend = addr + sizeof(bi);
ia64_copyin(&bi, addr, sizeof(bi));
*res = (void *)addr;
return (0);
}

View File

@ -32,17 +32,98 @@ __FBSDID("$FreeBSD$");
#include "libia64.h"
uint64_t *ia64_pgtbl;
uint32_t ia64_pgtblsz;
static int
pgtbl_extend(u_int idx)
{
uint64_t *pgtbl;
uint32_t pgtblsz;
u_int pot;
pgtblsz = (idx + 1) << 3;
/* The minimum size is 4KB. */
if (pgtblsz < 4096)
pgtblsz = 4096;
/* Find the next higher power of 2. */
pgtblsz--;
for (pot = 1; pot < 32; pot <<= 1)
pgtblsz = pgtblsz | (pgtblsz >> pot);
pgtblsz++;
/* The maximum size is 1MB. */
if (pgtblsz > 1048576)
return (ENOMEM);
/* Make sure the size is a valid (mappable) page size. */
if (pgtblsz == 32*1024 || pgtblsz == 128*1024 || pgtblsz == 512*1024)
pgtblsz <<= 1;
/* Allocate naturally aligned memory. */
pgtbl = (void *)ia64_platform_alloc(0, pgtblsz);
if (pgtbl == NULL)
return (ENOMEM);
/* Initialize new page table. */
if (ia64_pgtbl != NULL && ia64_pgtbl != pgtbl)
bcopy(ia64_pgtbl, pgtbl, ia64_pgtblsz);
bzero(pgtbl + (ia64_pgtblsz >> 3), pgtblsz - ia64_pgtblsz);
if (ia64_pgtbl != NULL && ia64_pgtbl != pgtbl)
ia64_platform_free(0, (uintptr_t)ia64_pgtbl, ia64_pgtblsz);
ia64_pgtbl = pgtbl;
ia64_pgtblsz = pgtblsz;
return (0);
}
static void *
va2pa(vm_offset_t va, size_t *len)
{
uint64_t pa;
u_int idx, ofs;
int error;
/* Backward compatibility. */
if (va >= IA64_RR_BASE(7)) {
pa = IA64_RR_MASK(va);
return ((void *)pa);
}
printf("\n%s: va=%lx, *len=%lx\n", __func__, va, *len);
if (va < IA64_PBVM_BASE) {
error = EINVAL;
goto fail;
}
idx = (va - IA64_PBVM_BASE) >> IA64_PBVM_PAGE_SHIFT;
if (idx >= (ia64_pgtblsz >> 3)) {
error = pgtbl_extend(idx);
if (error)
goto fail;
}
ofs = va & IA64_PBVM_PAGE_MASK;
pa = ia64_pgtbl[idx];
if (pa == 0) {
pa = ia64_platform_alloc(va - ofs, IA64_PBVM_PAGE_SIZE);
if (pa == 0) {
error = ENOMEM;
goto fail;
}
ia64_pgtbl[idx] = pa;
}
pa += ofs;
/* We can not cross page boundaries (in general). */
if (*len + ofs > IA64_PBVM_PAGE_SIZE)
*len = IA64_PBVM_PAGE_SIZE - ofs;
return ((void *)pa);
fail:
*len = 0;
return (NULL);
}

View File

@ -36,25 +36,36 @@ __FBSDID("$FreeBSD$");
#include <machine/ia64_cpu.h>
#include <machine/pte.h>
#include <ia64/include/bootinfo.h>
#include <ia64/include/vmparam.h>
#include <efi.h>
#include <efilib.h>
#include "bootstrap.h"
#include "libia64.h"
#define _KERNEL
static int elf64_exec(struct preloaded_file *amp);
static int elf64_obj_exec(struct preloaded_file *amp);
static int elf64_exec(struct preloaded_file *amp);
static struct file_format ia64_elf = {
elf64_loadfile,
elf64_exec
};
static struct file_format ia64_elf_obj = {
elf64_obj_loadfile,
elf64_obj_exec
};
struct file_format ia64_elf = { elf64_loadfile, elf64_exec };
struct file_format *file_formats[] = {
&ia64_elf,
&ia64_elf_obj,
NULL
};
/*
* Entered with psr.ic and psr.i both zero.
*/
void
enter_kernel(uint64_t start, uint64_t bi)
static void
enter_kernel(uint64_t start, struct bootinfo *bi)
{
__asm __volatile("srlz.i;;");
@ -73,53 +84,130 @@ enter_kernel(uint64_t start, uint64_t bi)
/* NOTREACHED */
}
static int
elf64_exec(struct preloaded_file *fp)
static void
mmu_wire(vm_offset_t va, vm_paddr_t pa, vm_size_t sz, u_int acc)
{
struct file_metadata *md;
Elf_Ehdr *hdr;
pt_entry_t pte;
uint64_t bi_addr;
static u_int iidx = 0, didx = 0;
pt_entry_t pte;
u_int shft;
md = file_findmetadata(fp, MODINFOMD_ELFHDR);
if (md == NULL)
return (EINVAL);
hdr = (Elf_Ehdr *)&(md->md_data);
/* Round up to the smallest possible page size. */
if (sz < 4096)
sz = 4096;
/* Determine the exponent (base 2). */
shft = 0;
while (sz > 1) {
shft++;
sz >>= 1;
}
/* Truncate to the largest possible page size (256MB). */
if (shft > 28)
shft = 28;
/* Round down to a valid (mappable) page size. */
if (shft > 14 && (shft & 1) != 0)
shft--;
bi_load(fp, &bi_addr);
pte = PTE_PRESENT | PTE_MA_WB | PTE_ACCESSED | PTE_DIRTY |
PTE_PL_KERN | (acc & PTE_AR_MASK) | (pa & PTE_PPN_MASK);
printf("Entering %s at 0x%lx...\n", fp->f_name, hdr->e_entry);
__asm __volatile("mov cr.ifa=%0" :: "r"(va));
__asm __volatile("mov cr.itir=%0" :: "r"(shft << 2));
__asm __volatile("srlz.d;;");
ldr_enter(fp->f_name);
__asm __volatile("ptr.d %0,%1" :: "r"(va), "r"(shft << 2));
__asm __volatile("srlz.d;;");
__asm __volatile("itr.d dtr[%0]=%1" :: "r"(didx), "r"(pte));
__asm __volatile("srlz.d;;");
didx++;
__asm __volatile("rsm psr.ic|psr.i;;");
__asm __volatile("srlz.i;;");
if (acc == PTE_AR_RWX) {
__asm __volatile("ptr.i %0,%1;;" :: "r"(va), "r"(shft << 2));
__asm __volatile("srlz.i;;");
__asm __volatile("itr.i itr[%0]=%1;;" :: "r"(iidx), "r"(pte));
__asm __volatile("srlz.i;;");
iidx++;
}
}
static void
mmu_setup_legacy(uint64_t entry)
{
/*
* Region 6 is direct mapped UC and region 7 is direct mapped
* WC. The details of this is controlled by the Alt {I,D}TLB
* handlers. Here we just make sure that they have the largest
* handlers. Here we just make sure that they have the largest
* possible page size to minimise TLB usage.
*/
ia64_set_rr(IA64_RR_BASE(6), (6 << 8) | (28 << 2));
ia64_set_rr(IA64_RR_BASE(7), (7 << 8) | (28 << 2));
pte = PTE_PRESENT | PTE_MA_WB | PTE_ACCESSED | PTE_DIRTY |
PTE_PL_KERN | PTE_AR_RWX | PTE_ED;
pte |= IA64_RR_MASK(hdr->e_entry) & PTE_PPN_MASK;
__asm __volatile("mov cr.ifa=%0" :: "r"(hdr->e_entry));
__asm __volatile("mov cr.itir=%0" :: "r"(28 << 2));
__asm __volatile("ptr.i %0,%1" :: "r"(hdr->e_entry), "r"(28<<2));
__asm __volatile("ptr.d %0,%1" :: "r"(hdr->e_entry), "r"(28<<2));
__asm __volatile("srlz.i;;");
__asm __volatile("itr.i itr[%0]=%1;;" :: "r"(0), "r"(pte));
__asm __volatile("srlz.i;;");
__asm __volatile("itr.d dtr[%0]=%1;;" :: "r"(0), "r"(pte));
__asm __volatile("srlz.i;;");
enter_kernel(hdr->e_entry, bi_addr);
/* NOTREACHED */
return (0);
mmu_wire(entry, IA64_RR_MASK(entry), 1UL << 28, PTE_AR_RWX);
}
static void
mmu_setup_paged(vm_offset_t pbvm_top)
{
vm_size_t sz;
ia64_set_rr(IA64_RR_BASE(IA64_PBVM_RR),
(IA64_PBVM_RR << 8) | (IA64_PBVM_PAGE_SHIFT << 2));
__asm __volatile("srlz.i;;");
/* Wire the PBVM page table. */
mmu_wire(IA64_PBVM_PGTBL, (uintptr_t)ia64_pgtbl, ia64_pgtblsz,
PTE_AR_RW);
/* Wire as much of the PBVM we can. This must be a power of 2. */
sz = pbvm_top - IA64_PBVM_BASE;
sz = (sz + IA64_PBVM_PAGE_MASK) & ~IA64_PBVM_PAGE_MASK;
while (sz & (sz - 1))
sz -= IA64_PBVM_PAGE_SIZE;
mmu_wire(IA64_PBVM_BASE, ia64_pgtbl[0], sz, PTE_AR_RWX);
}
static int
elf64_exec(struct preloaded_file *fp)
{
struct bootinfo *bi;
struct file_metadata *md;
Elf_Ehdr *hdr;
int error;
md = file_findmetadata(fp, MODINFOMD_ELFHDR);
if (md == NULL)
return (EINVAL);
error = ia64_bootinfo(fp, &bi);
if (error)
return (error);
hdr = (Elf_Ehdr *)&(md->md_data);
printf("Entering %s at 0x%lx...\n", fp->f_name, hdr->e_entry);
error = ia64_platform_enter(fp->f_name);
if (error)
return (error);
__asm __volatile("rsm psr.ic|psr.i;;");
__asm __volatile("srlz.i;;");
if (IS_LEGACY_KERNEL())
mmu_setup_legacy(hdr->e_entry);
else
mmu_setup_paged((uintptr_t)(bi + 1));
enter_kernel(hdr->e_entry, bi);
/* NOTREACHED */
return (EDOOFUS);
}
static int
elf64_obj_exec(struct preloaded_file *fp)
{
printf("%s called for preloaded file %p (=%s):\n", __func__, fp,
fp->f_name);
return (ENOSYS);
}

View File

@ -31,28 +31,35 @@
#include <bootstrap.h>
#include <ia64/include/bootinfo.h>
#include <ia64/include/vmparam.h>
#define IS_LEGACY_KERNEL() (ia64_pgtbl == NULL || ia64_pgtblsz == 0)
/*
* Portability functions provided by the loader
* implementation specific to the platform.
*/
extern uint64_t ldr_alloc(vm_offset_t);
extern int ldr_bootinfo(struct bootinfo *, uint64_t *);
extern int ldr_enter(const char *);
vm_paddr_t ia64_platform_alloc(vm_offset_t, vm_size_t);
void ia64_platform_free(vm_offset_t, vm_paddr_t, vm_size_t);
int ia64_platform_bootinfo(struct bootinfo *, struct bootinfo **);
int ia64_platform_enter(const char *);
/*
* Functions and variables provided by the ia64 common code
* and shared by all loader implementations.
*/
extern uint64_t *ia64_pgtbl;
extern uint32_t ia64_pgtblsz;
extern int ia64_autoload(void);
int ia64_autoload(void);
int ia64_bootinfo(struct preloaded_file *, struct bootinfo **);
extern ssize_t ia64_copyin(const void *, vm_offset_t, size_t);
extern ssize_t ia64_copyout(vm_offset_t, void *, size_t);
extern ssize_t ia64_readin(int, vm_offset_t, size_t);
ssize_t ia64_copyin(const void *, vm_offset_t, size_t);
ssize_t ia64_copyout(vm_offset_t, void *, size_t);
ssize_t ia64_readin(int, vm_offset_t, size_t);
extern char *ia64_fmtdev(struct devdesc *);
extern int ia64_getdev(void **, const char *, const char **);
extern int ia64_setcurrdev(struct env_var *, int, const void *);
char *ia64_fmtdev(struct devdesc *);
int ia64_getdev(void **, const char *, const char **);
int ia64_setcurrdev(struct env_var *, int, const void *);
#endif /* !_LIBIA64_H_ */

View File

@ -68,17 +68,6 @@ struct netif_driver *netif_drivers[] = {
NULL
};
/*
* Sort formats so that those that can detect based on arguments
* rather than reading the file go first.
*/
extern struct file_format ia64_elf;
struct file_format *file_formats[] = {
&ia64_elf,
NULL
};
/*
* Consoles
*

View File

@ -45,26 +45,147 @@ static EFI_GUID fpswa_guid = EFI_INTEL_FPSWA;
static EFI_GUID hcdp_guid = HCDP_TABLE_GUID;
static EFI_MEMORY_DESCRIPTOR *memmap;
static UINTN memmapsz;
static UINTN mapkey;
static UINTN descsz;
static UINT32 descver;
uint64_t
ldr_alloc(vm_offset_t va)
#define IA64_EFI_CHUNK_SIZE (32 * 1048576)
static vm_paddr_t ia64_efi_chunk;
#define IA64_EFI_PGTBLSZ_MAX 1048576
static vm_paddr_t ia64_efi_pgtbl;
static vm_size_t ia64_efi_pgtblsz;
/* Don't allocate memory below the boundary */
#define IA64_EFI_ALLOC_BOUNDARY 1048576
static int
ia64_efi_memmap_update(void)
{
EFI_STATUS status;
if (memmap != NULL) {
free(memmap);
memmap = NULL;
}
memmapsz = 0;
BS->GetMemoryMap(&memmapsz, NULL, &mapkey, &descsz, &descver);
if (memmapsz == 0)
return (FALSE);
memmap = malloc(memmapsz);
if (memmap == NULL)
return (FALSE);
status = BS->GetMemoryMap(&memmapsz, memmap, &mapkey, &descsz,
&descver);
if (EFI_ERROR(status)) {
free(memmap);
memmap = NULL;
return (FALSE);
}
return (TRUE);
}
static vm_paddr_t
ia64_efi_alloc(vm_size_t sz)
{
EFI_PHYSICAL_ADDRESS pa;
EFI_MEMORY_DESCRIPTOR *mm;
uint8_t *mmiter, *mmiterend;
vm_size_t memsz;
UINTN npgs;
EFI_STATUS status;
/* We can't allocate less than a page */
if (sz < EFI_PAGE_SIZE)
return (0);
/* The size must be a power of 2. */
if (sz & (sz - 1))
return (0);
if (!ia64_efi_memmap_update())
return (0);
mmiter = (void *)memmap;
mmiterend = mmiter + memmapsz;
for (; mmiter < mmiterend; mmiter += descsz) {
mm = (void *)mmiter;
if (mm->Type != EfiConventionalMemory)
continue;
memsz = mm->NumberOfPages * EFI_PAGE_SIZE;
if (mm->PhysicalStart + memsz <= IA64_EFI_ALLOC_BOUNDARY)
continue;
/*
* XXX We really should make sure the memory is local to the
* BSP.
*/
pa = (mm->PhysicalStart < IA64_EFI_ALLOC_BOUNDARY) ?
IA64_EFI_ALLOC_BOUNDARY : mm->PhysicalStart;
pa = (pa + sz - 1) & ~(sz - 1);
if (pa + sz > mm->PhysicalStart + memsz)
continue;
npgs = EFI_SIZE_TO_PAGES(sz);
status = BS->AllocatePages(AllocateAddress, EfiLoaderData,
npgs, &pa);
if (!EFI_ERROR(status))
return (pa);
}
printf("%s: unable to allocate %lx bytes\n", __func__, sz);
return (0);
}
vm_paddr_t
ia64_platform_alloc(vm_offset_t va, vm_size_t sz)
{
if (va == 0) {
/* Page table itself. */
if (sz > IA64_EFI_PGTBLSZ_MAX)
return (0);
if (ia64_efi_pgtbl == 0)
ia64_efi_pgtbl = ia64_efi_alloc(IA64_EFI_PGTBLSZ_MAX);
if (ia64_efi_pgtbl != 0)
ia64_efi_pgtblsz = sz;
return (ia64_efi_pgtbl);
} else if (va < IA64_PBVM_BASE) {
/* Should not happen. */
return (0);
}
/* Loader virtual memory page. */
va -= IA64_PBVM_BASE;
/* Allocate a big chunk that can be wired with a single PTE. */
if (ia64_efi_chunk == 0)
ia64_efi_chunk = ia64_efi_alloc(IA64_EFI_CHUNK_SIZE);
if (va < IA64_EFI_CHUNK_SIZE)
return (ia64_efi_chunk + va);
/* Allocate a page at a time when we go beyond the chunk. */
return (ia64_efi_alloc(sz));
}
void
ia64_platform_free(vm_offset_t va, vm_paddr_t pa, vm_size_t sz)
{
BS->FreePages(pa, sz >> EFI_PAGE_SHIFT);
}
int
ldr_bootinfo(struct bootinfo *bi, uint64_t *bi_addr)
ia64_platform_bootinfo(struct bootinfo *bi, struct bootinfo **res)
{
VOID *fpswa;
EFI_MEMORY_DESCRIPTOR *mm;
EFI_PHYSICAL_ADDRESS addr;
EFI_HANDLE handle;
EFI_STATUS status;
size_t bisz;
UINTN mmsz, pages, sz;
UINT32 mmver;
UINTN sz;
bi->bi_systab = (uint64_t)ST;
bi->bi_hcdp = (uint64_t)efi_get_table(&hcdp_guid);
@ -75,55 +196,22 @@ ldr_bootinfo(struct bootinfo *bi, uint64_t *bi_addr)
status = BS->HandleProtocol(handle, &fpswa_guid, &fpswa);
bi->bi_fpswa = (status == 0) ? (uint64_t)fpswa : 0;
bisz = (sizeof(struct bootinfo) + 0x0f) & ~0x0f;
/*
* Allocate enough pages to hold the bootinfo block and the memory
* map EFI will return to us. The memory map has an unknown size,
* so we have to determine that first. Note that the AllocatePages
* call can itself modify the memory map, so we have to take that
* into account as well. The changes to the memory map are caused
* by splitting a range of free memory into two (AFAICT), so that
* one is marked as being loader data.
*/
sz = 0;
BS->GetMemoryMap(&sz, NULL, &mapkey, &mmsz, &mmver);
sz += mmsz;
sz = (sz + 15) & ~15;
pages = EFI_SIZE_TO_PAGES(sz + bisz);
status = BS->AllocatePages(AllocateAnyPages, EfiLoaderData, pages,
&addr);
if (EFI_ERROR(status)) {
printf("%s: AllocatePages() returned 0x%lx\n", __func__,
(long)status);
if (!ia64_efi_memmap_update())
return (ENOMEM);
}
/*
* Read the memory map and stash it after bootinfo. Align the
* memory map on a 16-byte boundary (the bootinfo block is page
* aligned).
*/
*bi_addr = addr;
mm = (void *)(addr + bisz);
sz = (EFI_PAGE_SIZE * pages) - bisz;
status = BS->GetMemoryMap(&sz, mm, &mapkey, &mmsz, &mmver);
if (EFI_ERROR(status)) {
printf("%s: GetMemoryMap() returned 0x%lx\n", __func__,
(long)status);
return (EINVAL);
}
bi->bi_memmap = (uint64_t)mm;
bi->bi_memmap_size = sz;
bi->bi_memdesc_size = mmsz;
bi->bi_memdesc_version = mmver;
bi->bi_memmap = (uint64_t)memmap;
bi->bi_memmap_size = memmapsz;
bi->bi_memdesc_size = descsz;
bi->bi_memdesc_version = descver;
if (IS_LEGACY_KERNEL())
*res = malloc(sizeof(**res));
bcopy(bi, (void *)(*bi_addr), sizeof(*bi));
return (0);
}
int
ldr_enter(const char *kernel)
ia64_platform_enter(const char *kernel)
{
EFI_STATUS status;

View File

@ -3,6 +3,7 @@ $FreeBSD$
NOTE ANY CHANGES YOU MAKE TO THE BOOTBLOCKS HERE. The format of this
file is important. Make sure the current version number is on line 6.
3.0: Add support for PBVM.
2.2: Create direct mapping based on start address instead of mapping
first 256M.
2.1: Add support for "-dev <part>" argument parsing.

View File

@ -60,18 +60,6 @@ struct fs_ops *file_system[] = {
NULL
};
/* Exported for ia64 only */
/*
* Sort formats so that those that can detect based on arguments
* rather than reading the file go first.
*/
extern struct file_format ia64_elf;
struct file_format *file_formats[] = {
&ia64_elf,
NULL
};
/*
* Consoles
*

View File

@ -46,7 +46,6 @@ extern int ski_boot(void);
struct bootinfo;
struct preloaded_file;
extern int bi_load(struct bootinfo *, struct preloaded_file *);
#define SSC_CONSOLE_INIT 20
#define SSC_GETCHAR 21

View File

@ -33,24 +33,31 @@ __FBSDID("$FreeBSD$");
#include "libski.h"
#define PHYS_START (4L*1024*1024*1024)
#define PHYS_SIZE (64L*1024*1024 - 4L*1024)
extern void acpi_stub_init(void);
extern void efi_stub_init(struct bootinfo *);
extern void sal_stub_init(void);
uint64_t
ldr_alloc(vm_offset_t va)
vm_paddr_t
ia64_platform_alloc(vm_offset_t va, vm_size_t sz __unused)
{
vm_paddr_t pa;
if (va >= PHYS_SIZE)
return (0);
return (va + PHYS_START);
if (va == 0)
pa = 1024 * 1024;
else
pa = (va - IA64_PBVM_BASE) + (64 * 1024 * 1024);
return (pa);
}
void
ia64_platform_free(vm_offset_t va __unused, vm_paddr_t pa __unused,
vm_size_t sz __unused)
{
}
int
ldr_bootinfo(struct bootinfo *bi, uint64_t *bi_addr)
ia64_platform_bootinfo(struct bootinfo *bi, struct bootinfo **res)
{
static struct bootinfo bootinfo;
@ -58,17 +65,16 @@ ldr_bootinfo(struct bootinfo *bi, uint64_t *bi_addr)
sal_stub_init();
acpi_stub_init();
*bi_addr = (uint64_t)(&bootinfo);
bootinfo = *bi;
*res = &bootinfo;
return (0);
}
int
ldr_enter(const char *kernel)
ia64_platform_enter(const char *kernel)
{
while (*kernel == '/')
kernel++;
ssc(0, (uint64_t)kernel, 0, 0, SSC_LOAD_SYMBOLS);
ssc(0, (uint64_t)kernel, 0, 0, SSC_LOAD_SYMBOLS);
return (0);
}

View File

@ -3,6 +3,7 @@ $FreeBSD$
NOTE ANY CHANGES YOU MAKE TO THE BOOTBLOCKS HERE. The format of this
file is important. Make sure the current version number is on line 6.
2.0: Add support for PBVM.
1.2: Restructured. Has some user visible differences. Due to code
sharing, has been given the same version number as the EFI
loader.

View File

@ -30,7 +30,8 @@ struct bootinfo {
uint64_t bi_magic; /* BOOTINFO_MAGIC */
#define BOOTINFO_MAGIC 0xdeadbeeffeedface
uint64_t bi_version; /* version 1 */
uint64_t bi_spare[6]; /* was: name of booted kernel */
uint64_t bi_spare[5]; /* was: name of booted kernel */
uint64_t bi_pbvm_pgtbl; /* PA of PBVM page table. */
uint64_t bi_hcdp; /* DIG64 HCDP table */
uint64_t bi_fpswa; /* FPSWA interface */
uint64_t bi_boothowto; /* value for boothowto */
@ -39,7 +40,7 @@ struct bootinfo {
uint64_t bi_memmap_size; /* size of EFI memory map */
uint64_t bi_memdesc_size; /* sizeof EFI memory desc */
uint32_t bi_memdesc_version; /* EFI memory desc version */
uint32_t bi_spare2;
uint32_t bi_pbvm_pgtblsz; /* PBVM page table size. */
uint64_t bi_symtab; /* start of kernel sym table */
uint64_t bi_esymtab; /* end of kernel sym table */
uint64_t bi_kernend; /* end of kernel space */

View File

@ -130,6 +130,16 @@
#define IA64_PHYS_TO_RR7(x) ((x) | IA64_RR_BASE(7))
/*
* The Itanium architecture defines that all implementations support at
* least 51 virtual address bits (i.e. IMPL_VA_MSB=50). The unimplemented
* bits are sign-extended from VA{IMPL_VA_MSB}. As such, there's a gap in
* the virtual address range, which extends at most from 0x0004000000000000
* to 0x1ffbffffffffffff. We define the top half of a region in terms of
* this worst-case gap.
*/
#define IA64_REGION_TOP_HALF 0x1ffc000000000000
/*
* Page size of the identity mappings in region 7.
*/
@ -143,6 +153,42 @@
#define IA64_BACKINGSTORE IA64_RR_BASE(4)
/*
* Parameters for Pre-Boot Virtual Memory (PBVM).
* The kernel, its modules and metadata are loaded in the PBVM by the loader.
* The PBVM consists of pages for which the mapping is maintained in a page
* table. The page table is at least 1 EFI page large (i.e. 4KB), but can be
* larger to accommodate more PBVM. The maximum page table size is 1MB. With
* 8 bytes per page table entry, this means that the PBVM has at least 512
* pages and at most 128K pages.
* The GNU toolchain (in particular GNU ld) does not support an alignment
* larger than 64K. This means that we cannot guarantee page alignment for
* a page size that's larger than 64K. We do want to have text and data in
* different pages, which means that the maximum usable page size is 64KB.
* Consequently:
* The maximum total PBVM size is 8GB -- enough for a DVD image. A page table
* of a single EFI page (4KB) allows for 32MB of PBVM.
*
* The kernel is given the PA and size of the page table that provides the
* mapping of the PBVM. The page table itself is assumed to be mapped at a
* known virtual address and using a single translation wired into the CPU.
* As such, the page table is assumed to be a power of 2 and naturally aligned.
* The kernel also assumes that a good portion of the kernel text is mapped
* and wired into the CPU, but does not assume that the mapping covers the
* whole of PBVM.
*/
#define IA64_PBVM_RR 4
#define IA64_PBVM_BASE \
(IA64_RR_BASE(IA64_PBVM_RR) + IA64_REGION_TOP_HALF)
#define IA64_PBVM_PGTBL_MAXSZ 1048576
#define IA64_PBVM_PGTBL \
(IA64_RR_BASE(IA64_PBVM_RR + 1) - IA64_PBVM_PGTBL_MAXSZ)
#define IA64_PBVM_PAGE_SHIFT 16 /* 64KB */
#define IA64_PBVM_PAGE_SIZE (1 << IA64_PBVM_PAGE_SHIFT)
#define IA64_PBVM_PAGE_MASK (IA64_PBVM_PAGE_SIZE - 1)
/*
* Mach derived constants
*/