Implement ia64_physmem_alloc() and use it consistently to get memory

before VM has been initialized. This includes:
1.  Replacing pmap_steal_memory(),
2.  Replace the handcrafted logic to allocate a naturally aligned VHPT,
3.  Properly allocate the DPCPU for the BSP.

Ad 3: Appending the DPCPU to kernend worked as long as we wouldn't
      cross into the next PBVM page. If we were to cross into the next
      page, then there wouldn't be a PTE entry on the page table for it
      and we would end up with a MCA following a page fault. As such,
      this commit fixes MCAs occasionally seen.
This commit is contained in:
Marcel Moolenaar 2012-07-07 05:17:43 +00:00
parent 8304b99a75
commit 373ca4ed22
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=238190
4 changed files with 82 additions and 80 deletions

View File

@ -675,7 +675,6 @@ ia64_init(void)
struct efi_md *md;
pt_entry_t *pbvm_pgtbl_ent, *pbvm_pgtbl_lim;
char *p;
vm_offset_t kernend;
vm_size_t mdlen;
int metadata_missing;
@ -772,20 +771,6 @@ ia64_init(void)
if (boothowto & RB_VERBOSE)
bootverbose = 1;
/*
* Find the end of the kernel.
*/
#ifdef DDB
ksym_start = bootinfo->bi_symtab;
ksym_end = bootinfo->bi_esymtab;
kernend = (vm_offset_t)round_page(ksym_end);
#else
kernend = (vm_offset_t)round_page(_end);
#endif
/* But if the bootstrap tells us otherwise, believe it! */
if (bootinfo->bi_kernend)
kernend = round_page(bootinfo->bi_kernend);
/*
* Wire things up so we can call the firmware.
*/
@ -805,9 +790,8 @@ ia64_init(void)
pcpup = &pcpu0;
ia64_set_k4((u_int64_t)pcpup);
pcpu_init(pcpup, 0, sizeof(pcpu0));
dpcpu_init((void *)kernend, 0);
dpcpu_init(ia64_physmem_alloc(DPCPU_SIZE, PAGE_SIZE), 0);
PCPU_SET(md.lid, ia64_get_lid());
kernend += DPCPU_SIZE;
PCPU_SET(curthread, &thread0);
/*
@ -838,14 +822,15 @@ ia64_init(void)
/*
* Initialize error message buffer (at end of core).
*/
msgbufp = (struct msgbuf *)pmap_steal_memory(msgbufsize);
msgbufp = ia64_physmem_alloc(msgbufsize, PAGE_SIZE);
msgbufinit(msgbufp, msgbufsize);
proc_linkup0(&proc0, &thread0);
/*
* Init mapping for kernel stack for proc 0
*/
thread0.td_kstack = pmap_steal_memory(KSTACK_PAGES * PAGE_SIZE);
p = ia64_physmem_alloc(KSTACK_PAGES * PAGE_SIZE, PAGE_SIZE);
thread0.td_kstack = (uintptr_t)p;
thread0.td_kstack_pages = KSTACK_PAGES;
mutex_init();
@ -871,6 +856,11 @@ ia64_init(void)
/*
* Initialize debuggers, and break into them if appropriate.
*/
#ifdef DDB
ksym_start = bootinfo->bi_symtab;
ksym_end = bootinfo->bi_esymtab;
#endif
kdb_init();
#ifdef KDB

View File

@ -187,9 +187,72 @@ ia64_physmem_track(vm_paddr_t base, vm_size_t len)
return (0);
}
vm_paddr_t
void *
ia64_physmem_alloc(vm_size_t len, vm_size_t align)
{
vm_paddr_t base, lim, pa;
void *ptr;
u_int idx;
return (0);
if (phys_avail_segs == 0)
return (NULL);
len = round_page(len);
/*
* Try and allocate with least effort.
*/
idx = phys_avail_segs * 2;
while (idx > 0) {
idx -= 2;
base = phys_avail[idx];
lim = phys_avail[idx + 1];
if (lim - base < len)
continue;
/* First try from the end. */
pa = lim - len;
if ((pa & (align - 1)) == 0) {
if (pa == base)
ia64_physmem_remove(idx);
else
phys_avail[idx + 1] = pa;
goto gotit;
}
/* Try from the start next. */
pa = base;
if ((pa & (align - 1)) == 0) {
if (pa + len == lim)
ia64_physmem_remove(idx);
else
phys_avail[idx] += len;
goto gotit;
}
}
/*
* Find a good segment and split it up.
*/
idx = phys_avail_segs * 2;
while (idx > 0) {
idx -= 2;
base = phys_avail[idx];
lim = phys_avail[idx + 1];
pa = (base + align - 1) & ~(align - 1);
if (pa + len <= lim) {
ia64_physmem_delete(pa, len);
goto gotit;
}
}
/* Out of luck. */
return (NULL);
gotit:
ptr = (void *)IA64_PHYS_TO_RR7(pa);
bzero(ptr, len);
return (ptr);
}

View File

@ -243,36 +243,6 @@ static int pmap_remove_vhpt(vm_offset_t va);
static boolean_t pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va,
vm_page_t m);
vm_offset_t
pmap_steal_memory(vm_size_t size)
{
vm_size_t bank_size;
vm_offset_t pa, va;
size = round_page(size);
bank_size = phys_avail[1] - phys_avail[0];
while (size > bank_size) {
int i;
for (i = 0; phys_avail[i+2]; i+= 2) {
phys_avail[i] = phys_avail[i+2];
phys_avail[i+1] = phys_avail[i+3];
}
phys_avail[i] = 0;
phys_avail[i+1] = 0;
if (!phys_avail[0])
panic("pmap_steal_memory: out of memory");
bank_size = phys_avail[1] - phys_avail[0];
}
pa = phys_avail[0];
phys_avail[0] += size;
va = IA64_PHYS_TO_RR7(pa);
bzero((caddr_t) va, size);
return va;
}
static void
pmap_initialize_vhpt(vm_offset_t vhpt)
{
@ -318,7 +288,7 @@ pmap_bootstrap()
struct ia64_pal_result res;
vm_offset_t base;
size_t size;
int i, j, count, ridbits;
int i, ridbits;
/*
* Query the PAL Code to find the loop parameters for the
@ -380,7 +350,7 @@ pmap_bootstrap()
pmap_ridmax = (1 << ridbits);
pmap_ridmapsz = pmap_ridmax / 64;
pmap_ridmap = (uint64_t *)pmap_steal_memory(pmap_ridmax / 8);
pmap_ridmap = ia64_physmem_alloc(pmap_ridmax / 8, PAGE_SIZE);
pmap_ridmap[0] |= 0xff;
pmap_rididx = 0;
pmap_ridcount = 8;
@ -389,14 +359,10 @@ pmap_bootstrap()
/*
* Allocate some memory for initial kernel 'page tables'.
*/
ia64_kptdir = (void *)pmap_steal_memory(PAGE_SIZE);
ia64_kptdir = ia64_physmem_alloc(PAGE_SIZE, PAGE_SIZE);
nkpt = 0;
kernel_vm_end = VM_MIN_KERNEL_ADDRESS;
for (i = 0; phys_avail[i+2]; i+= 2)
;
count = i+2;
/*
* Determine a valid (mappable) VHPT size.
*/
@ -410,35 +376,18 @@ pmap_bootstrap()
if (pmap_vhpt_log2size & 1)
pmap_vhpt_log2size--;
base = 0;
size = 1UL << pmap_vhpt_log2size;
for (i = 0; i < count; i += 2) {
base = (phys_avail[i] + size - 1) & ~(size - 1);
if (base + size <= phys_avail[i+1])
break;
}
if (!phys_avail[i])
base = (uintptr_t)ia64_physmem_alloc(size, size);
if (base == 0)
panic("Unable to allocate VHPT");
if (base != phys_avail[i]) {
/* Split this region. */
for (j = count; j > i; j -= 2) {
phys_avail[j] = phys_avail[j-2];
phys_avail[j+1] = phys_avail[j-2+1];
}
phys_avail[i+1] = base;
phys_avail[i+2] = base + size;
} else
phys_avail[i] = base + size;
base = IA64_PHYS_TO_RR7(base);
PCPU_SET(md.vhpt, base);
if (bootverbose)
printf("VHPT: address=%#lx, size=%#lx\n", base, size);
pmap_vhpt_nbuckets = size / sizeof(struct ia64_lpte);
pmap_vhpt_bucket = (void *)pmap_steal_memory(pmap_vhpt_nbuckets *
sizeof(struct ia64_bucket));
pmap_vhpt_bucket = ia64_physmem_alloc(pmap_vhpt_nbuckets *
sizeof(struct ia64_bucket), PAGE_SIZE);
for (i = 0; i < pmap_vhpt_nbuckets; i++) {
/* Stolen memory is zeroed. */
mtx_init(&pmap_vhpt_bucket[i].mutex, "VHPT bucket lock", NULL,

View File

@ -93,7 +93,7 @@ int ia64_highfp_save_ipi(void);
struct ia64_init_return ia64_init(void);
u_int ia64_itc_freq(void);
int ia64_physmem_add(vm_paddr_t, vm_size_t);
vm_paddr_t ia64_physmem_alloc(vm_size_t, vm_size_t);
void *ia64_physmem_alloc(vm_size_t, vm_size_t);
int ia64_physmem_delete(vm_paddr_t, vm_size_t);
int ia64_physmem_fini(void);
int ia64_physmem_init(void);