Sparsify the vm_page_dump bitmap
On Ampere Altra systems, the sparse population of RAM within the physical address space causes the vm_page_dump bitmap to be much larger than necessary, increasing the size from ~8 Mib to > 2 Gib (and overflowing `int` for the size). Changing the page dump bitmap also changes the minidump file format, so changes are also necessary in libkvm. Reviewed by: jhb Approved by: scottl (implicit) MFC after: 1 week Sponsored by: Ampere Computing, Inc. Differential Revision: https://reviews.freebsd.org/D26131
This commit is contained in:
parent
ab041f713a
commit
00e6614750
@ -82,7 +82,7 @@ static int
|
||||
_aarch64_minidump_initvtop(kvm_t *kd)
|
||||
{
|
||||
struct vmstate *vmst;
|
||||
off_t off, sparse_off;
|
||||
off_t off, dump_avail_off, sparse_off;
|
||||
|
||||
vmst = _kvm_malloc(kd, sizeof(*vmst));
|
||||
if (vmst == NULL) {
|
||||
@ -102,7 +102,7 @@ _aarch64_minidump_initvtop(kvm_t *kd)
|
||||
}
|
||||
|
||||
vmst->hdr.version = le32toh(vmst->hdr.version);
|
||||
if (vmst->hdr.version != MINIDUMP_VERSION) {
|
||||
if (vmst->hdr.version != MINIDUMP_VERSION && vmst->hdr.version != 1) {
|
||||
_kvm_err(kd, kd->program, "wrong minidump version. "
|
||||
"Expected %d got %d", MINIDUMP_VERSION, vmst->hdr.version);
|
||||
return (-1);
|
||||
@ -114,15 +114,21 @@ _aarch64_minidump_initvtop(kvm_t *kd)
|
||||
vmst->hdr.dmapphys = le64toh(vmst->hdr.dmapphys);
|
||||
vmst->hdr.dmapbase = le64toh(vmst->hdr.dmapbase);
|
||||
vmst->hdr.dmapend = le64toh(vmst->hdr.dmapend);
|
||||
vmst->hdr.dumpavailsize = vmst->hdr.version == MINIDUMP_VERSION ?
|
||||
le32toh(vmst->hdr.dumpavailsize) : 0;
|
||||
|
||||
/* Skip header and msgbuf */
|
||||
off = AARCH64_PAGE_SIZE + aarch64_round_page(vmst->hdr.msgbufsize);
|
||||
dump_avail_off = AARCH64_PAGE_SIZE + aarch64_round_page(vmst->hdr.msgbufsize);
|
||||
|
||||
/* Skip dump_avail */
|
||||
off = dump_avail_off + aarch64_round_page(vmst->hdr.dumpavailsize);
|
||||
|
||||
/* build physical address lookup table for sparse pages */
|
||||
sparse_off = off + aarch64_round_page(vmst->hdr.bitmapsize) +
|
||||
aarch64_round_page(vmst->hdr.pmapsize);
|
||||
if (_kvm_pt_init(kd, vmst->hdr.bitmapsize, off, sparse_off,
|
||||
AARCH64_PAGE_SIZE, sizeof(uint64_t)) == -1) {
|
||||
if (_kvm_pt_init(kd, vmst->hdr.dumpavailsize, dump_avail_off,
|
||||
vmst->hdr.bitmapsize, off, sparse_off, AARCH64_PAGE_SIZE,
|
||||
sizeof(uint64_t)) == -1) {
|
||||
return (-1);
|
||||
}
|
||||
off += aarch64_round_page(vmst->hdr.bitmapsize);
|
||||
@ -257,7 +263,9 @@ _aarch64_minidump_walk_pages(kvm_t *kd, kvm_walk_pages_cb_t *cb, void *arg)
|
||||
}
|
||||
|
||||
while (_kvm_bitmap_next(&bm, &bmindex)) {
|
||||
pa = bmindex * AARCH64_PAGE_SIZE;
|
||||
pa = _kvm_bit_id_pa(kd, bmindex, AARCH64_PAGE_SIZE);
|
||||
if (pa == _KVM_PA_INVALID)
|
||||
break;
|
||||
dva = vm->hdr.dmapbase + pa;
|
||||
if (vm->hdr.dmapend < (dva + AARCH64_PAGE_SIZE))
|
||||
break;
|
||||
|
@ -123,7 +123,7 @@ static int
|
||||
_amd64_minidump_initvtop(kvm_t *kd)
|
||||
{
|
||||
struct vmstate *vmst;
|
||||
off_t off, sparse_off;
|
||||
off_t off, dump_avail_off, sparse_off;
|
||||
|
||||
vmst = _kvm_malloc(kd, sizeof(*vmst));
|
||||
if (vmst == NULL) {
|
||||
@ -143,10 +143,10 @@ _amd64_minidump_initvtop(kvm_t *kd)
|
||||
|
||||
/*
|
||||
* NB: amd64 minidump header is binary compatible between version 1
|
||||
* and version 2; this may not be the case for the future versions.
|
||||
* and version 2; version 3 adds the dumpavailsize field
|
||||
*/
|
||||
vmst->hdr.version = le32toh(vmst->hdr.version);
|
||||
if (vmst->hdr.version != MINIDUMP_VERSION && vmst->hdr.version != 1) {
|
||||
if (vmst->hdr.version > MINIDUMP_VERSION || vmst->hdr.version < 1) {
|
||||
_kvm_err(kd, kd->program, "wrong minidump version. expected %d got %d",
|
||||
MINIDUMP_VERSION, vmst->hdr.version);
|
||||
return (-1);
|
||||
@ -157,14 +157,20 @@ _amd64_minidump_initvtop(kvm_t *kd)
|
||||
vmst->hdr.kernbase = le64toh(vmst->hdr.kernbase);
|
||||
vmst->hdr.dmapbase = le64toh(vmst->hdr.dmapbase);
|
||||
vmst->hdr.dmapend = le64toh(vmst->hdr.dmapend);
|
||||
vmst->hdr.dumpavailsize = vmst->hdr.version == MINIDUMP_VERSION ?
|
||||
le32toh(vmst->hdr.dumpavailsize) : 0;
|
||||
|
||||
/* Skip header and msgbuf */
|
||||
off = AMD64_PAGE_SIZE + amd64_round_page(vmst->hdr.msgbufsize);
|
||||
dump_avail_off = AMD64_PAGE_SIZE + amd64_round_page(vmst->hdr.msgbufsize);
|
||||
|
||||
/* Skip dump_avail */
|
||||
off = dump_avail_off + amd64_round_page(vmst->hdr.dumpavailsize);
|
||||
|
||||
sparse_off = off + amd64_round_page(vmst->hdr.bitmapsize) +
|
||||
amd64_round_page(vmst->hdr.pmapsize);
|
||||
if (_kvm_pt_init(kd, vmst->hdr.bitmapsize, off, sparse_off,
|
||||
AMD64_PAGE_SIZE, sizeof(uint64_t)) == -1) {
|
||||
if (_kvm_pt_init(kd, vmst->hdr.dumpavailsize, dump_avail_off,
|
||||
vmst->hdr.bitmapsize, off, sparse_off, AMD64_PAGE_SIZE,
|
||||
sizeof(uint64_t)) == -1) {
|
||||
return (-1);
|
||||
}
|
||||
off += amd64_round_page(vmst->hdr.bitmapsize);
|
||||
@ -372,7 +378,7 @@ _amd64_minidump_walk_pages(kvm_t *kd, kvm_walk_pages_cb_t *cb, void *arg)
|
||||
pa = (pde & AMD64_PG_PS_FRAME) +
|
||||
((va & AMD64_PDRMASK) ^ VA_OFF(vm, va));
|
||||
dva = vm->hdr.dmapbase + pa;
|
||||
_kvm_bitmap_set(&bm, pa, AMD64_PAGE_SIZE);
|
||||
_kvm_bitmap_set(&bm, _kvm_pa_bit_id(kd, pa, AMD64_PAGE_SIZE));
|
||||
if (!_kvm_visit_cb(kd, cb, arg, pa, va, dva,
|
||||
_amd64_entry_to_prot(pde), AMD64_NBPDR, pgsz)) {
|
||||
goto out;
|
||||
@ -392,7 +398,8 @@ _amd64_minidump_walk_pages(kvm_t *kd, kvm_walk_pages_cb_t *cb, void *arg)
|
||||
pa = pte & AMD64_PG_FRAME;
|
||||
dva = vm->hdr.dmapbase + pa;
|
||||
if ((pte & AMD64_PG_V) != 0) {
|
||||
_kvm_bitmap_set(&bm, pa, AMD64_PAGE_SIZE);
|
||||
_kvm_bitmap_set(&bm,
|
||||
_kvm_pa_bit_id(kd, pa, AMD64_PAGE_SIZE));
|
||||
if (!_kvm_visit_cb(kd, cb, arg, pa, va, dva,
|
||||
_amd64_entry_to_prot(pte), pgsz, 0)) {
|
||||
goto out;
|
||||
@ -403,7 +410,9 @@ _amd64_minidump_walk_pages(kvm_t *kd, kvm_walk_pages_cb_t *cb, void *arg)
|
||||
}
|
||||
|
||||
while (_kvm_bitmap_next(&bm, &bmindex)) {
|
||||
pa = bmindex * AMD64_PAGE_SIZE;
|
||||
pa = _kvm_bit_id_pa(kd, bmindex, AMD64_PAGE_SIZE);
|
||||
if (pa == _KVM_PA_INVALID)
|
||||
break;
|
||||
dva = vm->hdr.dmapbase + pa;
|
||||
if (vm->hdr.dmapend < (dva + pgsz))
|
||||
break;
|
||||
|
@ -86,7 +86,7 @@ static int
|
||||
_arm_minidump_initvtop(kvm_t *kd)
|
||||
{
|
||||
struct vmstate *vmst;
|
||||
off_t off, sparse_off;
|
||||
off_t off, dump_avail_off, sparse_off;
|
||||
|
||||
vmst = _kvm_malloc(kd, sizeof(*vmst));
|
||||
if (vmst == NULL) {
|
||||
@ -108,7 +108,7 @@ _arm_minidump_initvtop(kvm_t *kd)
|
||||
return (-1);
|
||||
}
|
||||
vmst->hdr.version = _kvm32toh(kd, vmst->hdr.version);
|
||||
if (vmst->hdr.version != MINIDUMP_VERSION) {
|
||||
if (vmst->hdr.version != MINIDUMP_VERSION && vmst->hdr.version != 1) {
|
||||
_kvm_err(kd, kd->program, "wrong minidump version. "
|
||||
"Expected %d got %d", MINIDUMP_VERSION, vmst->hdr.version);
|
||||
return (-1);
|
||||
@ -123,14 +123,20 @@ _arm_minidump_initvtop(kvm_t *kd)
|
||||
/* This is a safe default as 1K pages are not used. */
|
||||
vmst->hdr.mmuformat = MINIDUMP_MMU_FORMAT_V6;
|
||||
}
|
||||
vmst->hdr.dumpavailsize = vmst->hdr.version == MINIDUMP_VERSION ?
|
||||
_kvm32toh(kd, vmst->hdr.dumpavailsize) : 0;
|
||||
|
||||
/* Skip header and msgbuf */
|
||||
off = ARM_PAGE_SIZE + arm_round_page(vmst->hdr.msgbufsize);
|
||||
dump_avail_off = ARM_PAGE_SIZE + arm_round_page(vmst->hdr.msgbufsize);
|
||||
|
||||
/* Skip dump_avail */
|
||||
off = dump_avail_off + arm_round_page(vmst->hdr.dumpavailsize);
|
||||
|
||||
sparse_off = off + arm_round_page(vmst->hdr.bitmapsize) +
|
||||
arm_round_page(vmst->hdr.ptesize);
|
||||
if (_kvm_pt_init(kd, vmst->hdr.bitmapsize, off, sparse_off,
|
||||
ARM_PAGE_SIZE, sizeof(uint32_t)) == -1) {
|
||||
if (_kvm_pt_init(kd, vmst->hdr.dumpavailsize, dump_avail_off,
|
||||
vmst->hdr.bitmapsize, off, sparse_off, ARM_PAGE_SIZE,
|
||||
sizeof(uint32_t)) == -1) {
|
||||
return (-1);
|
||||
}
|
||||
off += arm_round_page(vmst->hdr.bitmapsize);
|
||||
|
@ -91,7 +91,7 @@ static int
|
||||
_i386_minidump_initvtop(kvm_t *kd)
|
||||
{
|
||||
struct vmstate *vmst;
|
||||
off_t off, sparse_off;
|
||||
off_t off, dump_avail_off, sparse_off;
|
||||
|
||||
vmst = _kvm_malloc(kd, sizeof(*vmst));
|
||||
if (vmst == NULL) {
|
||||
@ -109,7 +109,7 @@ _i386_minidump_initvtop(kvm_t *kd)
|
||||
return (-1);
|
||||
}
|
||||
vmst->hdr.version = le32toh(vmst->hdr.version);
|
||||
if (vmst->hdr.version != MINIDUMP_VERSION) {
|
||||
if (vmst->hdr.version != MINIDUMP_VERSION && vmst->hdr.version != 1) {
|
||||
_kvm_err(kd, kd->program, "wrong minidump version. expected %d got %d",
|
||||
MINIDUMP_VERSION, vmst->hdr.version);
|
||||
return (-1);
|
||||
@ -119,14 +119,20 @@ _i386_minidump_initvtop(kvm_t *kd)
|
||||
vmst->hdr.ptesize = le32toh(vmst->hdr.ptesize);
|
||||
vmst->hdr.kernbase = le32toh(vmst->hdr.kernbase);
|
||||
vmst->hdr.paemode = le32toh(vmst->hdr.paemode);
|
||||
vmst->hdr.dumpavailsize = vmst->hdr.version == MINIDUMP_VERSION ?
|
||||
le32toh(vmst->hdr.dumpavailsize) : 0;
|
||||
|
||||
/* Skip header and msgbuf */
|
||||
off = I386_PAGE_SIZE + i386_round_page(vmst->hdr.msgbufsize);
|
||||
dump_avail_off = I386_PAGE_SIZE + i386_round_page(vmst->hdr.msgbufsize);
|
||||
|
||||
/* Skip dump_avail */
|
||||
off = dump_avail_off + i386_round_page(vmst->hdr.dumpavailsize);
|
||||
|
||||
sparse_off = off + i386_round_page(vmst->hdr.bitmapsize) +
|
||||
i386_round_page(vmst->hdr.ptesize);
|
||||
if (_kvm_pt_init(kd, vmst->hdr.bitmapsize, off, sparse_off,
|
||||
I386_PAGE_SIZE, sizeof(uint32_t)) == -1) {
|
||||
if (_kvm_pt_init(kd, vmst->hdr.dumpavailsize, dump_avail_off,
|
||||
vmst->hdr.bitmapsize, off, sparse_off, I386_PAGE_SIZE,
|
||||
sizeof(uint32_t)) == -1) {
|
||||
return (-1);
|
||||
}
|
||||
off += i386_round_page(vmst->hdr.bitmapsize);
|
||||
|
@ -85,7 +85,7 @@ static int
|
||||
_mips_minidump_initvtop(kvm_t *kd)
|
||||
{
|
||||
struct vmstate *vmst;
|
||||
off_t off, sparse_off;
|
||||
off_t off, dump_avail_off, sparse_off;
|
||||
|
||||
vmst = _kvm_malloc(kd, sizeof(*vmst));
|
||||
if (vmst == NULL) {
|
||||
@ -113,7 +113,7 @@ _mips_minidump_initvtop(kvm_t *kd)
|
||||
return (-1);
|
||||
}
|
||||
vmst->hdr.version = _kvm32toh(kd, vmst->hdr.version);
|
||||
if (vmst->hdr.version != MINIDUMP_VERSION) {
|
||||
if (vmst->hdr.version != MINIDUMP_VERSION && vmst->hdr.version != 1) {
|
||||
_kvm_err(kd, kd->program, "wrong minidump version. "
|
||||
"Expected %d got %d", MINIDUMP_VERSION, vmst->hdr.version);
|
||||
return (-1);
|
||||
@ -124,14 +124,20 @@ _mips_minidump_initvtop(kvm_t *kd)
|
||||
vmst->hdr.kernbase = _kvm64toh(kd, vmst->hdr.kernbase);
|
||||
vmst->hdr.dmapbase = _kvm64toh(kd, vmst->hdr.dmapbase);
|
||||
vmst->hdr.dmapend = _kvm64toh(kd, vmst->hdr.dmapend);
|
||||
vmst->hdr.dumpavailsize = vmst->hdr.version == MINIDUMP_VERSION ?
|
||||
_kvm32toh(kd, vmst->hdr.dumpavailsize) : 0;
|
||||
|
||||
/* Skip header and msgbuf */
|
||||
off = MIPS_PAGE_SIZE + mips_round_page(vmst->hdr.msgbufsize);
|
||||
dump_avail_off = MIPS_PAGE_SIZE + mips_round_page(vmst->hdr.msgbufsize);
|
||||
|
||||
/* Skip dump_avail */
|
||||
off = dump_avail_off + mips_round_page(vmst->hdr.dumpavailsize);
|
||||
|
||||
sparse_off = off + mips_round_page(vmst->hdr.bitmapsize) +
|
||||
mips_round_page(vmst->hdr.ptesize);
|
||||
if (_kvm_pt_init(kd, vmst->hdr.bitmapsize, off, sparse_off,
|
||||
MIPS_PAGE_SIZE, sizeof(uint32_t)) == -1) {
|
||||
if (_kvm_pt_init(kd, vmst->hdr.dumpavailsize, dump_avail_off,
|
||||
vmst->hdr.bitmapsize, off, sparse_off, MIPS_PAGE_SIZE,
|
||||
sizeof(uint32_t)) == -1) {
|
||||
return (-1);
|
||||
}
|
||||
off += mips_round_page(vmst->hdr.bitmapsize);
|
||||
|
@ -68,7 +68,7 @@ _powerpc64_minidump_initvtop(kvm_t *kd)
|
||||
{
|
||||
struct vmstate *vmst;
|
||||
struct minidumphdr *hdr;
|
||||
off_t bitmap_off, pmap_off, sparse_off;
|
||||
off_t dump_avail_off, bitmap_off, pmap_off, sparse_off;
|
||||
const char *mmu_name;
|
||||
|
||||
/* Alloc VM */
|
||||
@ -92,7 +92,7 @@ _powerpc64_minidump_initvtop(kvm_t *kd)
|
||||
}
|
||||
/* Check version */
|
||||
hdr->version = be32toh(hdr->version);
|
||||
if (hdr->version != MINIDUMP_VERSION) {
|
||||
if (hdr->version != MINIDUMP_VERSION && hdr->version != 1) {
|
||||
_kvm_err(kd, kd->program, "wrong minidump version. "
|
||||
"Expected %d got %d", MINIDUMP_VERSION, hdr->version);
|
||||
goto failed;
|
||||
@ -108,6 +108,8 @@ _powerpc64_minidump_initvtop(kvm_t *kd)
|
||||
hdr->hw_direct_map = be32toh(hdr->hw_direct_map);
|
||||
hdr->startkernel = be64toh(hdr->startkernel);
|
||||
hdr->endkernel = be64toh(hdr->endkernel);
|
||||
hdr->dumpavailsize = hdr->version == MINIDUMP_VERSION ?
|
||||
be32toh(hdr->dumpavailsize) : 0;
|
||||
|
||||
vmst->kimg_start = PPC64_KERNBASE;
|
||||
vmst->kimg_end = PPC64_KERNBASE + hdr->endkernel - hdr->startkernel;
|
||||
@ -140,7 +142,8 @@ _powerpc64_minidump_initvtop(kvm_t *kd)
|
||||
goto failed;
|
||||
|
||||
/* Get dump parts' offsets */
|
||||
bitmap_off = PPC64_PAGE_SIZE + ppc64_round_page(hdr->msgbufsize);
|
||||
dump_avail_off = PPC64_PAGE_SIZE + ppc64_round_page(hdr->msgbufsize);
|
||||
bitmap_off = dump_avail_off + ppc64_round_page(hdr->dumpavailsize);
|
||||
pmap_off = bitmap_off + ppc64_round_page(hdr->bitmapsize);
|
||||
sparse_off = pmap_off + ppc64_round_page(hdr->pmapsize);
|
||||
|
||||
@ -151,8 +154,9 @@ _powerpc64_minidump_initvtop(kvm_t *kd)
|
||||
(uintmax_t)pmap_off, (uintmax_t)sparse_off);
|
||||
|
||||
/* build physical address lookup table for sparse pages */
|
||||
if (_kvm_pt_init(kd, hdr->bitmapsize, bitmap_off, sparse_off,
|
||||
PPC64_PAGE_SIZE, sizeof(uint64_t)) == -1)
|
||||
if (_kvm_pt_init(kd, hdr->dumpavailsize, dump_avail_off,
|
||||
hdr->bitmapsize, bitmap_off, sparse_off, PPC64_PAGE_SIZE,
|
||||
sizeof(uint64_t)) == -1)
|
||||
goto failed;
|
||||
|
||||
if (_kvm_pmap_init(kd, hdr->pmapsize, pmap_off) == -1)
|
||||
|
@ -83,7 +83,7 @@ static int
|
||||
_riscv_minidump_initvtop(kvm_t *kd)
|
||||
{
|
||||
struct vmstate *vmst;
|
||||
off_t off, sparse_off;
|
||||
off_t off, dump_avail_off, sparse_off;
|
||||
|
||||
vmst = _kvm_malloc(kd, sizeof(*vmst));
|
||||
if (vmst == NULL) {
|
||||
@ -103,7 +103,7 @@ _riscv_minidump_initvtop(kvm_t *kd)
|
||||
}
|
||||
|
||||
vmst->hdr.version = le32toh(vmst->hdr.version);
|
||||
if (vmst->hdr.version != MINIDUMP_VERSION) {
|
||||
if (vmst->hdr.version != MINIDUMP_VERSION && vmst->hdr.version != 1) {
|
||||
_kvm_err(kd, kd->program, "wrong minidump version. "
|
||||
"Expected %d got %d", MINIDUMP_VERSION, vmst->hdr.version);
|
||||
return (-1);
|
||||
@ -115,15 +115,21 @@ _riscv_minidump_initvtop(kvm_t *kd)
|
||||
vmst->hdr.dmapphys = le64toh(vmst->hdr.dmapphys);
|
||||
vmst->hdr.dmapbase = le64toh(vmst->hdr.dmapbase);
|
||||
vmst->hdr.dmapend = le64toh(vmst->hdr.dmapend);
|
||||
vmst->hdr.dumpavailsize = vmst->hdr.version == MINIDUMP_VERSION ?
|
||||
le32toh(vmst->hdr.dumpavailsize) : 0;
|
||||
|
||||
/* Skip header and msgbuf */
|
||||
off = RISCV_PAGE_SIZE + riscv_round_page(vmst->hdr.msgbufsize);
|
||||
dump_avail_off = RISCV_PAGE_SIZE + riscv_round_page(vmst->hdr.msgbufsize);
|
||||
|
||||
/* Skip dump_avail */
|
||||
off = dump_avail_off + riscv_round_page(vmst->hdr.dumpavailsize);
|
||||
|
||||
/* build physical address lookup table for sparse pages */
|
||||
sparse_off = off + riscv_round_page(vmst->hdr.bitmapsize) +
|
||||
riscv_round_page(vmst->hdr.pmapsize);
|
||||
if (_kvm_pt_init(kd, vmst->hdr.bitmapsize, off, sparse_off,
|
||||
RISCV_PAGE_SIZE, sizeof(uint64_t)) == -1) {
|
||||
if (_kvm_pt_init(kd, vmst->hdr.dumpavailsize, dump_avail_off,
|
||||
vmst->hdr.bitmapsize, off, sparse_off, RISCV_PAGE_SIZE,
|
||||
sizeof(uint64_t)) == -1) {
|
||||
return (-1);
|
||||
}
|
||||
off += riscv_round_page(vmst->hdr.bitmapsize);
|
||||
@ -258,7 +264,9 @@ _riscv_minidump_walk_pages(kvm_t *kd, kvm_walk_pages_cb_t *cb, void *arg)
|
||||
}
|
||||
|
||||
while (_kvm_bitmap_next(&bm, &bmindex)) {
|
||||
pa = bmindex * RISCV_PAGE_SIZE;
|
||||
pa = _kvm_bit_id_pa(kd, bmindex, RISCV_PAGE_SIZE);
|
||||
if (pa == _KVM_PA_INVALID)
|
||||
break;
|
||||
dva = vm->hdr.dmapbase + pa;
|
||||
if (vm->hdr.dmapend < (dva + RISCV_PAGE_SIZE))
|
||||
break;
|
||||
|
@ -290,8 +290,9 @@ _kvm_map_get(kvm_t *kd, u_long pa, unsigned int page_size)
|
||||
}
|
||||
|
||||
int
|
||||
_kvm_pt_init(kvm_t *kd, size_t map_len, off_t map_off, off_t sparse_off,
|
||||
int page_size, int word_size)
|
||||
_kvm_pt_init(kvm_t *kd, size_t dump_avail_size, off_t dump_avail_off,
|
||||
size_t map_len, off_t map_off, off_t sparse_off, int page_size,
|
||||
int word_size)
|
||||
{
|
||||
uint64_t *addr;
|
||||
uint32_t *popcount_bin;
|
||||
@ -299,6 +300,27 @@ _kvm_pt_init(kvm_t *kd, size_t map_len, off_t map_off, off_t sparse_off,
|
||||
uint64_t pc_bins, res;
|
||||
ssize_t rd;
|
||||
|
||||
kd->dump_avail_size = dump_avail_size;
|
||||
if (dump_avail_size > 0) {
|
||||
kd->dump_avail = mmap(NULL, kd->dump_avail_size, PROT_READ,
|
||||
MAP_PRIVATE, kd->pmfd, dump_avail_off);
|
||||
} else {
|
||||
/*
|
||||
* Older version minidumps don't provide dump_avail[],
|
||||
* so the bitmap is fully populated from 0 to
|
||||
* last_pa. Create an implied dump_avail that
|
||||
* expresses this.
|
||||
*/
|
||||
kd->dump_avail = calloc(4, word_size);
|
||||
if (word_size == sizeof(uint32_t)) {
|
||||
((uint32_t *)kd->dump_avail)[1] = _kvm32toh(kd,
|
||||
map_len * 8 * page_size);
|
||||
} else {
|
||||
kd->dump_avail[1] = _kvm64toh(kd,
|
||||
map_len * 8 * page_size);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Map the bitmap specified by the arguments.
|
||||
*/
|
||||
@ -394,6 +416,55 @@ _kvm_pmap_init(kvm_t *kd, uint32_t pmap_size, off_t pmap_off)
|
||||
return (0);
|
||||
}
|
||||
|
||||
static inline uint64_t
|
||||
dump_avail_n(kvm_t *kd, long i)
|
||||
{
|
||||
uint32_t *d32;
|
||||
|
||||
if (kd->pt_word_size == sizeof(uint32_t)) {
|
||||
d32 = (uint32_t *)kd->dump_avail;
|
||||
return (_kvm32toh(kd, d32[i]));
|
||||
} else
|
||||
return (_kvm64toh(kd, kd->dump_avail[i]));
|
||||
}
|
||||
|
||||
uint64_t
|
||||
_kvm_pa_bit_id(kvm_t *kd, uint64_t pa, unsigned int page_size)
|
||||
{
|
||||
uint64_t adj;
|
||||
long i;
|
||||
|
||||
adj = 0;
|
||||
for (i = 0; dump_avail_n(kd, i + 1) != 0; i += 2) {
|
||||
if (pa >= dump_avail_n(kd, i + 1)) {
|
||||
adj += howmany(dump_avail_n(kd, i + 1), page_size) -
|
||||
dump_avail_n(kd, i) / page_size;
|
||||
} else {
|
||||
return (pa / page_size -
|
||||
dump_avail_n(kd, i) / page_size + adj);
|
||||
}
|
||||
}
|
||||
return (_KVM_BIT_ID_INVALID);
|
||||
}
|
||||
|
||||
uint64_t
|
||||
_kvm_bit_id_pa(kvm_t *kd, uint64_t bit_id, unsigned int page_size)
|
||||
{
|
||||
uint64_t sz;
|
||||
long i;
|
||||
|
||||
for (i = 0; dump_avail_n(kd, i + 1) != 0; i += 2) {
|
||||
sz = howmany(dump_avail_n(kd, i + 1), page_size) -
|
||||
dump_avail_n(kd, i) / page_size;
|
||||
if (bit_id < sz) {
|
||||
return (rounddown2(dump_avail_n(kd, i), page_size) +
|
||||
bit_id * page_size);
|
||||
}
|
||||
bit_id -= sz;
|
||||
}
|
||||
return (_KVM_PA_INVALID);
|
||||
}
|
||||
|
||||
/*
|
||||
* Find the offset for the given physical page address; returns -1 otherwise.
|
||||
*
|
||||
@ -412,7 +483,7 @@ off_t
|
||||
_kvm_pt_find(kvm_t *kd, uint64_t pa, unsigned int page_size)
|
||||
{
|
||||
uint64_t *bitmap = kd->pt_map;
|
||||
uint64_t pte_bit_id = pa / page_size;
|
||||
uint64_t pte_bit_id = _kvm_pa_bit_id(kd, pa, page_size);
|
||||
uint64_t pte_u64 = pte_bit_id / BITS_IN(*bitmap);
|
||||
uint64_t popcount_id = pte_bit_id / POPCOUNT_BITS;
|
||||
uint64_t pte_mask = 1ULL << (pte_bit_id % BITS_IN(*bitmap));
|
||||
@ -420,7 +491,8 @@ _kvm_pt_find(kvm_t *kd, uint64_t pa, unsigned int page_size)
|
||||
uint32_t count;
|
||||
|
||||
/* Check whether the page address requested is in the dump. */
|
||||
if (pte_bit_id >= (kd->pt_map_size * NBBY) ||
|
||||
if (pte_bit_id == _KVM_BIT_ID_INVALID ||
|
||||
pte_bit_id >= (kd->pt_map_size * NBBY) ||
|
||||
(bitmap[pte_u64] & pte_mask) == 0)
|
||||
return (-1);
|
||||
|
||||
@ -714,12 +786,12 @@ _kvm_bitmap_init(struct kvm_bitmap *bm, u_long bitmapsize, u_long *idx)
|
||||
}
|
||||
|
||||
void
|
||||
_kvm_bitmap_set(struct kvm_bitmap *bm, u_long pa, unsigned int page_size)
|
||||
_kvm_bitmap_set(struct kvm_bitmap *bm, u_long bm_index)
|
||||
{
|
||||
u_long bm_index = pa / page_size;
|
||||
uint8_t *byte = &bm->map[bm_index / 8];
|
||||
|
||||
*byte |= (1UL << (bm_index % 8));
|
||||
if (bm_index / 8 < bm->size)
|
||||
*byte |= (1UL << (bm_index % 8));
|
||||
}
|
||||
|
||||
int
|
||||
|
@ -106,6 +106,8 @@ struct __kvm {
|
||||
/* Page table lookup structures. */
|
||||
uint64_t *pt_map;
|
||||
size_t pt_map_size;
|
||||
uint64_t *dump_avail; /* actually word sized */
|
||||
size_t dump_avail_size;
|
||||
off_t pt_sparse_off;
|
||||
uint64_t pt_sparse_size;
|
||||
uint32_t *pt_popcounts;
|
||||
@ -152,8 +154,13 @@ _kvm64toh(kvm_t *kd, uint64_t val)
|
||||
return (be64toh(val));
|
||||
}
|
||||
|
||||
uint64_t _kvm_pa_bit_id(kvm_t *kd, uint64_t pa, unsigned int page_size);
|
||||
uint64_t _kvm_bit_id_pa(kvm_t *kd, uint64_t bit_id, unsigned int page_size);
|
||||
#define _KVM_PA_INVALID ULONG_MAX
|
||||
#define _KVM_BIT_ID_INVALID ULONG_MAX
|
||||
|
||||
int _kvm_bitmap_init(struct kvm_bitmap *, u_long, u_long *);
|
||||
void _kvm_bitmap_set(struct kvm_bitmap *, u_long, unsigned int);
|
||||
void _kvm_bitmap_set(struct kvm_bitmap *, u_long);
|
||||
int _kvm_bitmap_next(struct kvm_bitmap *, u_long *);
|
||||
void _kvm_bitmap_deinit(struct kvm_bitmap *);
|
||||
|
||||
@ -173,7 +180,7 @@ kvaddr_t _kvm_dpcpu_validaddr(kvm_t *, kvaddr_t);
|
||||
int _kvm_probe_elf_kernel(kvm_t *, int, int);
|
||||
int _kvm_is_minidump(kvm_t *);
|
||||
int _kvm_read_core_phdrs(kvm_t *, size_t *, GElf_Phdr **);
|
||||
int _kvm_pt_init(kvm_t *, size_t, off_t, off_t, int, int);
|
||||
int _kvm_pt_init(kvm_t *, size_t, off_t, size_t, off_t, off_t, int, int);
|
||||
off_t _kvm_pt_find(kvm_t *, uint64_t, unsigned int);
|
||||
int _kvm_visit_cb(kvm_t *, kvm_walk_pages_cb_t *, void *, u_long,
|
||||
u_long, u_long, vm_prot_t, size_t, unsigned int);
|
||||
|
@ -299,6 +299,7 @@ minidumpsys(struct dumperinfo *di)
|
||||
/* Calculate dump size. */
|
||||
dumpsize = pmapsize;
|
||||
dumpsize += round_page(msgbufp->msg_size);
|
||||
dumpsize += round_page(sizeof(dump_avail));
|
||||
dumpsize += round_page(BITSET_SIZE(vm_page_dump_pages));
|
||||
VM_PAGE_DUMP_FOREACH(pa) {
|
||||
/* Clear out undumpable pages now if needed */
|
||||
@ -322,6 +323,7 @@ minidumpsys(struct dumperinfo *di)
|
||||
mdhdr.kernbase = VM_MIN_KERNEL_ADDRESS;
|
||||
mdhdr.dmapbase = DMAP_MIN_ADDRESS;
|
||||
mdhdr.dmapend = DMAP_MAX_ADDRESS;
|
||||
mdhdr.dumpavailsize = round_page(sizeof(dump_avail));
|
||||
|
||||
dump_init_header(di, &kdh, KERNELDUMPMAGIC, KERNELDUMP_AMD64_VERSION,
|
||||
dumpsize);
|
||||
@ -345,6 +347,15 @@ minidumpsys(struct dumperinfo *di)
|
||||
if (error)
|
||||
goto fail;
|
||||
|
||||
/* Dump dump_avail */
|
||||
_Static_assert(sizeof(dump_avail) <= sizeof(fakepd),
|
||||
"Large dump_avail not handled");
|
||||
bzero(&fakepd, sizeof(fakepd));
|
||||
memcpy(fakepd, dump_avail, sizeof(dump_avail));
|
||||
error = blk_write(di, (char *)fakepd, 0, PAGE_SIZE);
|
||||
if (error)
|
||||
goto fail;
|
||||
|
||||
/* Dump bitmap */
|
||||
error = blk_write(di, (char *)vm_page_dump, 0,
|
||||
round_page(BITSET_SIZE(vm_page_dump_pages)));
|
||||
|
@ -32,7 +32,7 @@
|
||||
#define _MACHINE_MINIDUMP_H_ 1
|
||||
|
||||
#define MINIDUMP_MAGIC "minidump FreeBSD/amd64"
|
||||
#define MINIDUMP_VERSION 2
|
||||
#define MINIDUMP_VERSION 3
|
||||
|
||||
struct minidumphdr {
|
||||
char magic[24];
|
||||
@ -43,6 +43,7 @@ struct minidumphdr {
|
||||
uint64_t kernbase;
|
||||
uint64_t dmapbase;
|
||||
uint64_t dmapend;
|
||||
uint32_t dumpavailsize;
|
||||
};
|
||||
|
||||
#endif /* _MACHINE_MINIDUMP_H_ */
|
||||
|
@ -206,6 +206,7 @@ minidumpsys(struct dumperinfo *di)
|
||||
/* Calculate dump size. */
|
||||
dumpsize = ptesize;
|
||||
dumpsize += round_page(msgbufp->msg_size);
|
||||
dumpsize += round_page(sizeof(dump_avail));
|
||||
dumpsize += round_page(BITSET_SIZE(vm_page_dump_pages));
|
||||
VM_PAGE_DUMP_FOREACH(pa) {
|
||||
/* Clear out undumpable pages now if needed */
|
||||
@ -232,6 +233,7 @@ minidumpsys(struct dumperinfo *di)
|
||||
#else
|
||||
mdhdr.mmuformat = MINIDUMP_MMU_FORMAT_V4;
|
||||
#endif
|
||||
mdhdr.dumpavailsize = round_page(sizeof(dump_avail));
|
||||
dump_init_header(di, &kdh, KERNELDUMPMAGIC, KERNELDUMP_ARM_VERSION,
|
||||
dumpsize);
|
||||
|
||||
@ -255,6 +257,15 @@ minidumpsys(struct dumperinfo *di)
|
||||
if (error)
|
||||
goto fail;
|
||||
|
||||
/* Dump dump_avail */
|
||||
_Static_assert(sizeof(dump_avail) <= sizeof(dumpbuf),
|
||||
"Large dump_avail not handled");
|
||||
bzero(dumpbuf, sizeof(dumpbuf));
|
||||
memcpy(dumpbuf, dump_avail, sizeof(dump_avail));
|
||||
error = blk_write(di, dumpbuf, 0, PAGE_SIZE);
|
||||
if (error)
|
||||
goto fail;
|
||||
|
||||
/* Dump bitmap */
|
||||
error = blk_write(di, (char *)vm_page_dump, 0,
|
||||
round_page(BITSET_SIZE(vm_page_dump_pages)));
|
||||
|
@ -33,7 +33,7 @@
|
||||
#define _MACHINE_MINIDUMP_H_
|
||||
|
||||
#define MINIDUMP_MAGIC "minidump FreeBSD/arm"
|
||||
#define MINIDUMP_VERSION 1
|
||||
#define MINIDUMP_VERSION 2
|
||||
|
||||
/*
|
||||
* The first page of vmcore is dedicated to the following header.
|
||||
@ -51,6 +51,7 @@ struct minidumphdr {
|
||||
uint32_t kernbase;
|
||||
uint32_t arch;
|
||||
uint32_t mmuformat;
|
||||
uint32_t dumpavailsize;
|
||||
};
|
||||
|
||||
#define MINIDUMP_MMU_FORMAT_UNKNOWN 0
|
||||
|
@ -249,6 +249,7 @@ minidumpsys(struct dumperinfo *di)
|
||||
/* Calculate dump size. */
|
||||
dumpsize = pmapsize;
|
||||
dumpsize += round_page(msgbufp->msg_size);
|
||||
dumpsize += round_page(sizeof(dump_avail));
|
||||
dumpsize += round_page(BITSET_SIZE(vm_page_dump_pages));
|
||||
VM_PAGE_DUMP_FOREACH(pa) {
|
||||
if (is_dumpable(pa))
|
||||
@ -271,6 +272,7 @@ minidumpsys(struct dumperinfo *di)
|
||||
mdhdr.dmapphys = DMAP_MIN_PHYSADDR;
|
||||
mdhdr.dmapbase = DMAP_MIN_ADDRESS;
|
||||
mdhdr.dmapend = DMAP_MAX_ADDRESS;
|
||||
mdhdr.dumpavailsize = round_page(sizeof(dump_avail));
|
||||
|
||||
dump_init_header(di, &kdh, KERNELDUMPMAGIC, KERNELDUMP_AARCH64_VERSION,
|
||||
dumpsize);
|
||||
@ -295,6 +297,15 @@ minidumpsys(struct dumperinfo *di)
|
||||
if (error)
|
||||
goto fail;
|
||||
|
||||
/* Dump dump_avail */
|
||||
_Static_assert(sizeof(dump_avail) <= sizeof(tmpbuffer),
|
||||
"Large dump_avail not handled");
|
||||
bzero(tmpbuffer, sizeof(tmpbuffer));
|
||||
memcpy(tmpbuffer, dump_avail, sizeof(dump_avail));
|
||||
error = blk_write(di, (char *)&tmpbuffer, 0, PAGE_SIZE);
|
||||
if (error)
|
||||
goto fail;
|
||||
|
||||
/* Dump bitmap */
|
||||
error = blk_write(di, (char *)vm_page_dump, 0,
|
||||
round_page(BITSET_SIZE(vm_page_dump_pages)));
|
||||
|
@ -31,7 +31,7 @@
|
||||
#define _MACHINE_MINIDUMP_H_ 1
|
||||
|
||||
#define MINIDUMP_MAGIC "minidump FreeBSD/arm64"
|
||||
#define MINIDUMP_VERSION 1
|
||||
#define MINIDUMP_VERSION 2
|
||||
|
||||
struct minidumphdr {
|
||||
char magic[24];
|
||||
@ -43,6 +43,7 @@ struct minidumphdr {
|
||||
uint64_t dmapphys;
|
||||
uint64_t dmapbase;
|
||||
uint64_t dmapend;
|
||||
uint32_t dumpavailsize;
|
||||
};
|
||||
|
||||
#endif /* _MACHINE_MINIDUMP_H_ */
|
||||
|
@ -224,6 +224,7 @@ minidumpsys(struct dumperinfo *di)
|
||||
/* Calculate dump size. */
|
||||
dumpsize = ptesize;
|
||||
dumpsize += round_page(msgbufp->msg_size);
|
||||
dumpsize += round_page(sizeof(dump_avail));
|
||||
dumpsize += round_page(BITSET_SIZE(vm_page_dump_pages));
|
||||
VM_PAGE_DUMP_FOREACH(pa) {
|
||||
/* Clear out undumpable pages now if needed */
|
||||
@ -246,6 +247,7 @@ minidumpsys(struct dumperinfo *di)
|
||||
mdhdr.ptesize = ptesize;
|
||||
mdhdr.kernbase = KERNBASE;
|
||||
mdhdr.paemode = pae_mode;
|
||||
mdhdr.dumpavailsize = round_page(sizeof(dump_avail));
|
||||
|
||||
dump_init_header(di, &kdh, KERNELDUMPMAGIC, KERNELDUMP_I386_VERSION,
|
||||
dumpsize);
|
||||
@ -269,6 +271,15 @@ minidumpsys(struct dumperinfo *di)
|
||||
if (error)
|
||||
goto fail;
|
||||
|
||||
/* Dump dump_avail */
|
||||
_Static_assert(sizeof(dump_avail) <= sizeof(fakept),
|
||||
"Large dump_avail not handled");
|
||||
bzero(fakept, sizeof(fakept));
|
||||
memcpy(fakept, dump_avail, sizeof(dump_avail));
|
||||
error = blk_write(di, (char *)&fakept, 0, PAGE_SIZE);
|
||||
if (error)
|
||||
goto fail;
|
||||
|
||||
/* Dump bitmap */
|
||||
error = blk_write(di, (char *)vm_page_dump, 0,
|
||||
round_page(BITSET_SIZE(vm_page_dump_pages)));
|
||||
|
@ -32,7 +32,7 @@
|
||||
#define _MACHINE_MINIDUMP_H_ 1
|
||||
|
||||
#define MINIDUMP_MAGIC "minidump FreeBSD/i386"
|
||||
#define MINIDUMP_VERSION 1
|
||||
#define MINIDUMP_VERSION 2
|
||||
|
||||
struct minidumphdr {
|
||||
char magic[24];
|
||||
@ -42,6 +42,7 @@ struct minidumphdr {
|
||||
uint32_t ptesize;
|
||||
uint32_t kernbase;
|
||||
uint32_t paemode;
|
||||
uint32_t dumpavailsize;
|
||||
};
|
||||
|
||||
#endif /* _MACHINE_MINIDUMP_H_ */
|
||||
|
@ -32,7 +32,7 @@
|
||||
#define _MACHINE_MINIDUMP_H_ 1
|
||||
|
||||
#define MINIDUMP_MAGIC "minidump FreeBSD/mips"
|
||||
#define MINIDUMP_VERSION 1
|
||||
#define MINIDUMP_VERSION 2
|
||||
|
||||
struct minidumphdr {
|
||||
char magic[24];
|
||||
@ -43,6 +43,7 @@ struct minidumphdr {
|
||||
uint64_t kernbase;
|
||||
uint64_t dmapbase;
|
||||
uint64_t dmapend;
|
||||
uint32_t dumpavailsize;
|
||||
};
|
||||
|
||||
#endif /* _MACHINE_MINIDUMP_H_ */
|
||||
|
@ -205,6 +205,7 @@ minidumpsys(struct dumperinfo *di)
|
||||
/* Calculate dump size. */
|
||||
dumpsize = ptesize;
|
||||
dumpsize += round_page(msgbufp->msg_size);
|
||||
dumpsize += round_page(sizeof(dump_avail));
|
||||
dumpsize += round_page(BITSET_SIZE(vm_page_dump_pages));
|
||||
VM_PAGE_DUMP_FOREACH(pa) {
|
||||
/* Clear out undumpable pages now if needed */
|
||||
@ -225,6 +226,7 @@ minidumpsys(struct dumperinfo *di)
|
||||
mdhdr.bitmapsize = round_page(BITSET_SIZE(vm_page_dump_pages));
|
||||
mdhdr.ptesize = ptesize;
|
||||
mdhdr.kernbase = VM_MIN_KERNEL_ADDRESS;
|
||||
mdhdr.dumpavailsize = round_page(sizeof(dump_avail));
|
||||
|
||||
dump_init_header(di, &kdh, KERNELDUMPMAGIC, KERNELDUMP_MIPS_VERSION,
|
||||
dumpsize);
|
||||
@ -249,6 +251,15 @@ minidumpsys(struct dumperinfo *di)
|
||||
if (error)
|
||||
goto fail;
|
||||
|
||||
/* Dump dump_avail */
|
||||
_Static_assert(sizeof(dump_avail) <= sizeof(tmpbuffer),
|
||||
"Large dump_avail not handled");
|
||||
bzero(tmpbuffer, sizeof(tmpbuffer));
|
||||
memcpy(tmpbuffer, dump_avail, sizeof(dump_avail));
|
||||
error = write_buffer(di, tmpbuffer, PAGE_SIZE);
|
||||
if (error)
|
||||
goto fail;
|
||||
|
||||
/* Dump bitmap */
|
||||
error = write_buffer(di, (char *)vm_page_dump,
|
||||
round_page(BITSET_SIZE(vm_page_dump_pages)));
|
||||
|
@ -31,7 +31,7 @@
|
||||
#define _MACHINE_MINIDUMP_H_ 1
|
||||
|
||||
#define MINIDUMP_MAGIC "minidump FreeBSD/powerpc64"
|
||||
#define MINIDUMP_VERSION 1
|
||||
#define MINIDUMP_VERSION 2
|
||||
|
||||
struct minidumphdr {
|
||||
char magic[32];
|
||||
@ -47,6 +47,7 @@ struct minidumphdr {
|
||||
int hw_direct_map;
|
||||
uint64_t startkernel;
|
||||
uint64_t endkernel;
|
||||
uint32_t dumpavailsize;
|
||||
};
|
||||
|
||||
#endif /* _MACHINE_MINIDUMP_H_ */
|
||||
|
@ -270,6 +270,7 @@ minidumpsys(struct dumperinfo *di)
|
||||
/* Calculate dump size */
|
||||
dumpsize = PAGE_SIZE; /* header */
|
||||
dumpsize += round_page(msgbufp->msg_size);
|
||||
dumpsize += round_page(sizeof(dump_avail));
|
||||
dumpsize += round_page(BITSET_SIZE(vm_page_dump_pages));
|
||||
dumpsize += pmapsize;
|
||||
VM_PAGE_DUMP_FOREACH(pa) {
|
||||
@ -296,6 +297,7 @@ minidumpsys(struct dumperinfo *di)
|
||||
mdhdr.hw_direct_map = hw_direct_map;
|
||||
mdhdr.startkernel = __startkernel;
|
||||
mdhdr.endkernel = __endkernel;
|
||||
mdhdr.dumpavailsize = round_page(sizeof(dump_avail));
|
||||
|
||||
dump_init_header(di, &kdh, KERNELDUMPMAGIC, KERNELDUMP_POWERPC_VERSION,
|
||||
dumpsize);
|
||||
@ -320,6 +322,16 @@ minidumpsys(struct dumperinfo *di)
|
||||
round_page(msgbufp->msg_size));
|
||||
dump_total("msgbuf", round_page(msgbufp->msg_size));
|
||||
|
||||
/* Dump dump_avail */
|
||||
_Static_assert(sizeof(dump_avail) <= sizeof(pgbuf),
|
||||
"Large dump_avail not handled");
|
||||
bzero(pgbuf, sizeof(mdhdr));
|
||||
memcpy(pgbuf, dump_avail, sizeof(dump_avail));
|
||||
error = blk_write(di, pgbuf, 0, PAGE_SIZE);
|
||||
if (error)
|
||||
goto fail;
|
||||
dump_total("dump_avail", round_page(sizeof(dump_avail)));
|
||||
|
||||
/* Dump bitmap */
|
||||
error = blk_write(di, (char *)vm_page_dump, 0,
|
||||
round_page(BITSET_SIZE(vm_page_dump_pages)));
|
||||
|
@ -31,7 +31,7 @@
|
||||
#define _MACHINE_MINIDUMP_H_ 1
|
||||
|
||||
#define MINIDUMP_MAGIC "minidump FreeBSD/riscv"
|
||||
#define MINIDUMP_VERSION 1
|
||||
#define MINIDUMP_VERSION 2
|
||||
|
||||
struct minidumphdr {
|
||||
char magic[24];
|
||||
@ -43,6 +43,7 @@ struct minidumphdr {
|
||||
uint64_t dmapphys;
|
||||
uint64_t dmapbase;
|
||||
uint64_t dmapend;
|
||||
uint32_t dumpavailsize;
|
||||
};
|
||||
|
||||
#endif /* _MACHINE_MINIDUMP_H_ */
|
||||
|
@ -257,6 +257,7 @@ minidumpsys(struct dumperinfo *di)
|
||||
/* Calculate dump size */
|
||||
dumpsize = pmapsize;
|
||||
dumpsize += round_page(msgbufp->msg_size);
|
||||
dumpsize += round_page(sizeof(dump_avail));
|
||||
dumpsize += round_page(BITSET_SIZE(vm_page_dump_pages));
|
||||
VM_PAGE_DUMP_FOREACH(pa) {
|
||||
/* Clear out undumpable pages now if needed */
|
||||
@ -280,6 +281,7 @@ minidumpsys(struct dumperinfo *di)
|
||||
mdhdr.dmapphys = DMAP_MIN_PHYSADDR;
|
||||
mdhdr.dmapbase = DMAP_MIN_ADDRESS;
|
||||
mdhdr.dmapend = DMAP_MAX_ADDRESS;
|
||||
mdhdr.dumpavailsize = round_page(sizeof(dump_avail));
|
||||
|
||||
dump_init_header(di, &kdh, KERNELDUMPMAGIC, KERNELDUMP_RISCV_VERSION,
|
||||
dumpsize);
|
||||
@ -304,6 +306,15 @@ minidumpsys(struct dumperinfo *di)
|
||||
if (error)
|
||||
goto fail;
|
||||
|
||||
/* Dump dump_avail */
|
||||
_Static_assert(sizeof(dump_avail) <= sizeof(tmpbuffer),
|
||||
"Large dump_avail not handled");
|
||||
bzero(tmpbuffer, sizeof(tmpbuffer));
|
||||
memcpy(tmpbuffer, dump_avail, sizeof(dump_avail));
|
||||
error = blk_write(di, (char *)&tmpbuffer, 0, PAGE_SIZE);
|
||||
if (error)
|
||||
goto fail;
|
||||
|
||||
/* Dump bitmap */
|
||||
error = blk_write(di, (char *)vm_page_dump, 0,
|
||||
round_page(BITSET_SIZE(vm_page_dump_pages)));
|
||||
|
@ -607,10 +607,13 @@ vm_page_startup(vm_offset_t vaddr)
|
||||
* included should the sf_buf code decide to use them.
|
||||
*/
|
||||
last_pa = 0;
|
||||
for (i = 0; dump_avail[i + 1] != 0; i += 2)
|
||||
vm_page_dump_pages = 0;
|
||||
for (i = 0; dump_avail[i + 1] != 0; i += 2) {
|
||||
vm_page_dump_pages += howmany(dump_avail[i + 1], PAGE_SIZE) -
|
||||
dump_avail[i] / PAGE_SIZE;
|
||||
if (dump_avail[i + 1] > last_pa)
|
||||
last_pa = dump_avail[i + 1];
|
||||
vm_page_dump_pages = last_pa / PAGE_SIZE;
|
||||
}
|
||||
vm_page_dump_size = round_page(BITSET_SIZE(vm_page_dump_pages));
|
||||
new_end -= vm_page_dump_size;
|
||||
vm_page_dump = (void *)(uintptr_t)pmap_map(&vaddr, new_end,
|
||||
|
@ -589,22 +589,65 @@ malloc2vm_flags(int malloc_flags)
|
||||
|
||||
extern struct bitset *vm_page_dump;
|
||||
extern long vm_page_dump_pages;
|
||||
extern vm_paddr_t dump_avail[];
|
||||
|
||||
static inline void
|
||||
dump_add_page(vm_paddr_t pa)
|
||||
{
|
||||
BIT_SET_ATOMIC(vm_page_dump_pages, pa >> PAGE_SHIFT, vm_page_dump);
|
||||
vm_pindex_t adj;
|
||||
int i;
|
||||
|
||||
adj = 0;
|
||||
for (i = 0; dump_avail[i + 1] != 0; i += 2) {
|
||||
if (pa >= dump_avail[i] && pa < dump_avail[i + 1]) {
|
||||
BIT_SET_ATOMIC(vm_page_dump_pages,
|
||||
(pa >> PAGE_SHIFT) - (dump_avail[i] >> PAGE_SHIFT) +
|
||||
adj, vm_page_dump);
|
||||
return;
|
||||
}
|
||||
adj += howmany(dump_avail[i + 1], PAGE_SIZE) -
|
||||
dump_avail[i] / PAGE_SIZE;
|
||||
}
|
||||
}
|
||||
|
||||
static inline void
|
||||
dump_drop_page(vm_paddr_t pa)
|
||||
{
|
||||
BIT_CLR_ATOMIC(vm_page_dump_pages, pa >> PAGE_SHIFT, vm_page_dump);
|
||||
vm_pindex_t adj;
|
||||
int i;
|
||||
|
||||
adj = 0;
|
||||
for (i = 0; dump_avail[i + 1] != 0; i += 2) {
|
||||
if (pa >= dump_avail[i] && pa < dump_avail[i + 1]) {
|
||||
BIT_CLR_ATOMIC(vm_page_dump_pages,
|
||||
(pa >> PAGE_SHIFT) - (dump_avail[i] >> PAGE_SHIFT) +
|
||||
adj, vm_page_dump);
|
||||
return;
|
||||
}
|
||||
adj += howmany(dump_avail[i + 1], PAGE_SIZE) -
|
||||
dump_avail[i] / PAGE_SIZE;
|
||||
}
|
||||
}
|
||||
|
||||
static inline vm_paddr_t
|
||||
vm_page_dump_index_to_pa(int bit)
|
||||
{
|
||||
int i, tot;
|
||||
|
||||
for (i = 0; dump_avail[i + 1] != 0; i += 2) {
|
||||
tot = howmany(dump_avail[i + 1], PAGE_SIZE) -
|
||||
dump_avail[i] / PAGE_SIZE;
|
||||
if (bit < tot)
|
||||
return ((vm_paddr_t)bit * PAGE_SIZE +
|
||||
dump_avail[i] & ~PAGE_MASK);
|
||||
bit -= tot;
|
||||
}
|
||||
return ((vm_paddr_t)NULL);
|
||||
}
|
||||
|
||||
#define VM_PAGE_DUMP_FOREACH(pa) \
|
||||
for (vm_pindex_t __b = BIT_FFS(vm_page_dump_pages, vm_page_dump); \
|
||||
(pa) = (__b - 1) * PAGE_SIZE, __b != 0; \
|
||||
(pa) = vm_page_dump_index_to_pa(__b - 1), __b != 0; \
|
||||
__b = BIT_FFS_AT(vm_page_dump_pages, vm_page_dump, __b))
|
||||
|
||||
bool vm_page_busy_acquire(vm_page_t m, int allocflags);
|
||||
|
@ -46,8 +46,8 @@
|
||||
#define VM_NFREEORDER_MAX VM_NFREEORDER
|
||||
#endif
|
||||
|
||||
extern vm_paddr_t phys_avail[];
|
||||
extern vm_paddr_t dump_avail[];
|
||||
extern vm_paddr_t phys_avail[PHYS_AVAIL_COUNT];
|
||||
extern vm_paddr_t dump_avail[PHYS_AVAIL_COUNT];
|
||||
|
||||
/* Domains must be dense (non-sparse) and zero-based. */
|
||||
struct mem_affinity {
|
||||
|
Loading…
Reference in New Issue
Block a user