Explicitly map the VHPT on all processors. Previously we were

merely lucky that the VHPT was mapped as a side-effect of
mapping the kernel, but when there's enough physical memory,
this may not at all be the case.

Approved by: re (blanket)
This commit is contained in:
Marcel Moolenaar 2007-07-30 22:12:53 +00:00
parent c183b0f2c1
commit 8a2a70cb02
4 changed files with 27 additions and 0 deletions

View File

@ -434,6 +434,30 @@ spinlock_exit(void)
intr_restore(td->td_md.md_saved_intr);
}
void
map_vhpt(uintptr_t vhpt)
{
pt_entry_t pte;
uint64_t psr;
pte = PTE_PRESENT | PTE_MA_WB | PTE_ACCESSED | PTE_DIRTY |
PTE_PL_KERN | PTE_AR_RW;
pte |= vhpt & PTE_PPN_MASK;
__asm __volatile("ptr.d %0,%1" :: "r"(vhpt),
"r"(IA64_ID_PAGE_SHIFT<<2));
__asm __volatile("mov %0=psr" : "=r"(psr));
__asm __volatile("rsm psr.ic|psr.i");
__asm __volatile("srlz.i");
__asm __volatile("mov cr.ifa=%0" :: "r"(vhpt));
__asm __volatile("mov cr.itir=%0" :: "r"(IA64_ID_PAGE_SHIFT << 2));
__asm __volatile("itr.d dtr[%0]=%1" :: "r"(2), "r"(pte));
__asm __volatile("srlz.d"); /* XXX not needed. */
__asm __volatile("mov psr.l=%0" :: "r" (psr));
__asm __volatile("srlz.i");
}
void
map_pal_code(void)
{

View File

@ -90,6 +90,7 @@ ia64_ap_startup(void)
pcpup = ap_pcpu;
ia64_set_k4((intptr_t)pcpup);
map_vhpt(ap_vhpt);
__asm __volatile("mov cr.pta=%0;; srlz.i;;" ::
"r" (ap_vhpt + (1<<8) + (pmap_vhpt_log2size<<2) + 1));

View File

@ -449,6 +449,7 @@ pmap_bootstrap()
size);
}
map_vhpt(pmap_vhpt_base[0]);
__asm __volatile("mov cr.pta=%0;; srlz.i;;" ::
"r" (pmap_vhpt_base[0] + (1<<8) + (pmap_vhpt_log2size<<2) + 1));

View File

@ -87,6 +87,7 @@ void ia64_probe_sapics(void);
int interrupt(uint64_t, struct trapframe *);
void map_gateway_page(void);
void map_pal_code(void);
void map_vhpt(uintptr_t);
void os_boot_rendez(void);
void os_mca(void);
int syscall(struct trapframe *);