Add the necessary bits for dumps on ppc64.

MFC after:	2 weeks
This commit is contained in:
Justin Hibbits 2013-11-11 03:17:38 +00:00
parent 6785aaf203
commit a470e71336

View File

@ -192,6 +192,11 @@ struct ofw_map {
cell_t om_mode;
};
extern unsigned char _etext[];
extern unsigned char _end[];
extern int dumpsys_minidump;
/*
* Map of physical memory regions.
*/
@ -329,6 +334,9 @@ void moea64_kenter_attr(mmu_t, vm_offset_t, vm_offset_t, vm_memattr_t ma);
void moea64_kenter(mmu_t, vm_offset_t, vm_paddr_t);
boolean_t moea64_dev_direct_mapped(mmu_t, vm_paddr_t, vm_size_t);
static void moea64_sync_icache(mmu_t, pmap_t, vm_offset_t, vm_size_t);
vm_offset_t moea64_dumpsys_map(mmu_t mmu, struct pmap_md *md, vm_size_t ofs,
vm_size_t *sz);
struct pmap_md * moea64_scan_md(mmu_t mmu, struct pmap_md *prev);
static mmu_method_t moea64_methods[] = {
MMUMETHOD(mmu_change_wiring, moea64_change_wiring),
@ -374,6 +382,8 @@ static mmu_method_t moea64_methods[] = {
MMUMETHOD(mmu_kenter, moea64_kenter),
MMUMETHOD(mmu_kenter_attr, moea64_kenter_attr),
MMUMETHOD(mmu_dev_direct_mapped,moea64_dev_direct_mapped),
MMUMETHOD(mmu_scan_md, moea64_scan_md),
MMUMETHOD(mmu_dumpsys_map, moea64_dumpsys_map),
{ 0, 0 }
};
@ -2584,3 +2594,98 @@ moea64_sync_icache(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_size_t sz)
}
PMAP_UNLOCK(pm);
}
vm_offset_t
moea64_dumpsys_map(mmu_t mmu, struct pmap_md *md, vm_size_t ofs,
vm_size_t *sz)
{
if (md->md_vaddr == ~0UL)
return (md->md_paddr + ofs);
else
return (md->md_vaddr + ofs);
}
struct pmap_md *
moea64_scan_md(mmu_t mmu, struct pmap_md *prev)
{
static struct pmap_md md;
struct pvo_entry *pvo;
vm_offset_t va;
if (dumpsys_minidump) {
md.md_paddr = ~0UL; /* Minidumps use virtual addresses. */
if (prev == NULL) {
/* 1st: kernel .data and .bss. */
md.md_index = 1;
md.md_vaddr = trunc_page((uintptr_t)_etext);
md.md_size = round_page((uintptr_t)_end) - md.md_vaddr;
return (&md);
}
switch (prev->md_index) {
case 1:
/* 2nd: msgbuf and tables (see pmap_bootstrap()). */
md.md_index = 2;
md.md_vaddr = (vm_offset_t)msgbufp->msg_ptr;
md.md_size = round_page(msgbufp->msg_size);
break;
case 2:
/* 3rd: kernel VM. */
va = prev->md_vaddr + prev->md_size;
/* Find start of next chunk (from va). */
while (va < virtual_end) {
/* Don't dump the buffer cache. */
if (va >= kmi.buffer_sva &&
va < kmi.buffer_eva) {
va = kmi.buffer_eva;
continue;
}
pvo = moea64_pvo_find_va(kernel_pmap,
va & ~ADDR_POFF);
if (pvo != NULL &&
(pvo->pvo_pte.pte.pte_hi & PTE_VALID))
break;
va += PAGE_SIZE;
}
if (va < virtual_end) {
md.md_vaddr = va;
va += PAGE_SIZE;
/* Find last page in chunk. */
while (va < virtual_end) {
/* Don't run into the buffer cache. */
if (va == kmi.buffer_sva)
break;
pvo = moea64_pvo_find_va(kernel_pmap,
va & ~ADDR_POFF);
if (pvo == NULL ||
!(pvo->pvo_pte.pte.pte_hi & PTE_VALID))
break;
va += PAGE_SIZE;
}
md.md_size = va - md.md_vaddr;
break;
}
md.md_index = 3;
/* FALLTHROUGH */
default:
return (NULL);
}
} else { /* minidumps */
if (prev == NULL) {
/* first physical chunk. */
md.md_paddr = pregions[0].mr_start;
md.md_size = pregions[0].mr_size;
md.md_vaddr = ~0UL;
md.md_index = 1;
} else if (md.md_index < pregions_sz) {
md.md_paddr = pregions[md.md_index].mr_start;
md.md_size = pregions[md.md_index].mr_size;
md.md_vaddr = ~0UL;
md.md_index++;
} else {
/* There's no next physical chunk. */
return (NULL);
}
}
return (&md);
}