Always use 64-bit physical addresses for dump_avail[] in minidumps
As of r365978, minidumps include a copy of dump_avail[]. This is an array of vm_paddr_t ranges. libkvm walks the array assuming that sizeof(vm_paddr_t) is equal to the platform "word size", but that's not correct on some platforms. For instance, i386 uses a 64-bit vm_paddr_t. Fix the problem by always dumping 64-bit addresses. On platforms where vm_paddr_t is 32 bits wide, namely arm and mips (sometimes), translate dump_avail[] to an array of uint64_t ranges. With this change, libkvm no longer needs to maintain a notion of the target word size, so get rid of it. This is a no-op on platforms where sizeof(vm_paddr_t) == 8. Reviewed by: alc, kib Sponsored by: The FreeBSD Foundation Differential Revision: https://reviews.freebsd.org/D27082
This commit is contained in:
parent
7be2770a42
commit
b957b18594
@ -127,8 +127,7 @@ _aarch64_minidump_initvtop(kvm_t *kd)
|
||||
sparse_off = off + aarch64_round_page(vmst->hdr.bitmapsize) +
|
||||
aarch64_round_page(vmst->hdr.pmapsize);
|
||||
if (_kvm_pt_init(kd, vmst->hdr.dumpavailsize, dump_avail_off,
|
||||
vmst->hdr.bitmapsize, off, sparse_off, AARCH64_PAGE_SIZE,
|
||||
sizeof(uint64_t)) == -1) {
|
||||
vmst->hdr.bitmapsize, off, sparse_off, AARCH64_PAGE_SIZE) == -1) {
|
||||
return (-1);
|
||||
}
|
||||
off += aarch64_round_page(vmst->hdr.bitmapsize);
|
||||
|
@ -169,8 +169,7 @@ _amd64_minidump_initvtop(kvm_t *kd)
|
||||
sparse_off = off + amd64_round_page(vmst->hdr.bitmapsize) +
|
||||
amd64_round_page(vmst->hdr.pmapsize);
|
||||
if (_kvm_pt_init(kd, vmst->hdr.dumpavailsize, dump_avail_off,
|
||||
vmst->hdr.bitmapsize, off, sparse_off, AMD64_PAGE_SIZE,
|
||||
sizeof(uint64_t)) == -1) {
|
||||
vmst->hdr.bitmapsize, off, sparse_off, AMD64_PAGE_SIZE) == -1) {
|
||||
return (-1);
|
||||
}
|
||||
off += amd64_round_page(vmst->hdr.bitmapsize);
|
||||
|
@ -135,8 +135,7 @@ _arm_minidump_initvtop(kvm_t *kd)
|
||||
sparse_off = off + arm_round_page(vmst->hdr.bitmapsize) +
|
||||
arm_round_page(vmst->hdr.ptesize);
|
||||
if (_kvm_pt_init(kd, vmst->hdr.dumpavailsize, dump_avail_off,
|
||||
vmst->hdr.bitmapsize, off, sparse_off, ARM_PAGE_SIZE,
|
||||
sizeof(uint32_t)) == -1) {
|
||||
vmst->hdr.bitmapsize, off, sparse_off, ARM_PAGE_SIZE) == -1) {
|
||||
return (-1);
|
||||
}
|
||||
off += arm_round_page(vmst->hdr.bitmapsize);
|
||||
|
@ -131,8 +131,7 @@ _i386_minidump_initvtop(kvm_t *kd)
|
||||
sparse_off = off + i386_round_page(vmst->hdr.bitmapsize) +
|
||||
i386_round_page(vmst->hdr.ptesize);
|
||||
if (_kvm_pt_init(kd, vmst->hdr.dumpavailsize, dump_avail_off,
|
||||
vmst->hdr.bitmapsize, off, sparse_off, I386_PAGE_SIZE,
|
||||
sizeof(uint32_t)) == -1) {
|
||||
vmst->hdr.bitmapsize, off, sparse_off, I386_PAGE_SIZE) == -1) {
|
||||
return (-1);
|
||||
}
|
||||
off += i386_round_page(vmst->hdr.bitmapsize);
|
||||
|
@ -136,8 +136,7 @@ _mips_minidump_initvtop(kvm_t *kd)
|
||||
sparse_off = off + mips_round_page(vmst->hdr.bitmapsize) +
|
||||
mips_round_page(vmst->hdr.ptesize);
|
||||
if (_kvm_pt_init(kd, vmst->hdr.dumpavailsize, dump_avail_off,
|
||||
vmst->hdr.bitmapsize, off, sparse_off, MIPS_PAGE_SIZE,
|
||||
sizeof(uint32_t)) == -1) {
|
||||
vmst->hdr.bitmapsize, off, sparse_off, MIPS_PAGE_SIZE) == -1) {
|
||||
return (-1);
|
||||
}
|
||||
off += mips_round_page(vmst->hdr.bitmapsize);
|
||||
|
@ -155,8 +155,7 @@ _powerpc64_minidump_initvtop(kvm_t *kd)
|
||||
|
||||
/* build physical address lookup table for sparse pages */
|
||||
if (_kvm_pt_init(kd, hdr->dumpavailsize, dump_avail_off,
|
||||
hdr->bitmapsize, bitmap_off, sparse_off, PPC64_PAGE_SIZE,
|
||||
sizeof(uint64_t)) == -1)
|
||||
hdr->bitmapsize, bitmap_off, sparse_off, PPC64_PAGE_SIZE) == -1)
|
||||
goto failed;
|
||||
|
||||
if (_kvm_pmap_init(kd, hdr->pmapsize, pmap_off) == -1)
|
||||
|
@ -128,8 +128,7 @@ _riscv_minidump_initvtop(kvm_t *kd)
|
||||
sparse_off = off + riscv_round_page(vmst->hdr.bitmapsize) +
|
||||
riscv_round_page(vmst->hdr.pmapsize);
|
||||
if (_kvm_pt_init(kd, vmst->hdr.dumpavailsize, dump_avail_off,
|
||||
vmst->hdr.bitmapsize, off, sparse_off, RISCV_PAGE_SIZE,
|
||||
sizeof(uint64_t)) == -1) {
|
||||
vmst->hdr.bitmapsize, off, sparse_off, RISCV_PAGE_SIZE) == -1) {
|
||||
return (-1);
|
||||
}
|
||||
off += riscv_round_page(vmst->hdr.bitmapsize);
|
||||
|
@ -291,8 +291,7 @@ _kvm_map_get(kvm_t *kd, u_long pa, unsigned int page_size)
|
||||
|
||||
int
|
||||
_kvm_pt_init(kvm_t *kd, size_t dump_avail_size, off_t dump_avail_off,
|
||||
size_t map_len, off_t map_off, off_t sparse_off, int page_size,
|
||||
int word_size)
|
||||
size_t map_len, off_t map_off, off_t sparse_off, int page_size)
|
||||
{
|
||||
uint64_t *addr;
|
||||
uint32_t *popcount_bin;
|
||||
@ -311,14 +310,8 @@ _kvm_pt_init(kvm_t *kd, size_t dump_avail_size, off_t dump_avail_off,
|
||||
* last_pa. Create an implied dump_avail that
|
||||
* expresses this.
|
||||
*/
|
||||
kd->dump_avail = calloc(4, word_size);
|
||||
if (word_size == sizeof(uint32_t)) {
|
||||
((uint32_t *)kd->dump_avail)[1] = _kvm32toh(kd,
|
||||
map_len * 8 * page_size);
|
||||
} else {
|
||||
kd->dump_avail[1] = _kvm64toh(kd,
|
||||
map_len * 8 * page_size);
|
||||
}
|
||||
kd->dump_avail = calloc(4, sizeof(uint64_t));
|
||||
kd->dump_avail[1] = _kvm64toh(kd, map_len * 8 * page_size);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -375,7 +368,6 @@ _kvm_pt_init(kvm_t *kd, size_t dump_avail_size, off_t dump_avail_off,
|
||||
kd->pt_sparse_off = sparse_off;
|
||||
kd->pt_sparse_size = (uint64_t)*popcount_bin * page_size;
|
||||
kd->pt_page_size = page_size;
|
||||
kd->pt_word_size = word_size;
|
||||
|
||||
/*
|
||||
* Map the sparse page array. This is useful for performing point
|
||||
@ -419,13 +411,7 @@ _kvm_pmap_init(kvm_t *kd, uint32_t pmap_size, off_t pmap_off)
|
||||
static inline uint64_t
|
||||
dump_avail_n(kvm_t *kd, long i)
|
||||
{
|
||||
uint32_t *d32;
|
||||
|
||||
if (kd->pt_word_size == sizeof(uint32_t)) {
|
||||
d32 = (uint32_t *)kd->dump_avail;
|
||||
return (_kvm32toh(kd, d32[i]));
|
||||
} else
|
||||
return (_kvm64toh(kd, kd->dump_avail[i]));
|
||||
return (_kvm64toh(kd, kd->dump_avail[i]));
|
||||
}
|
||||
|
||||
uint64_t
|
||||
|
@ -112,7 +112,6 @@ struct __kvm {
|
||||
uint64_t pt_sparse_size;
|
||||
uint32_t *pt_popcounts;
|
||||
unsigned int pt_page_size;
|
||||
unsigned int pt_word_size;
|
||||
|
||||
/* Page & sparse map structures. */
|
||||
void *page_map;
|
||||
@ -190,7 +189,7 @@ kvaddr_t _kvm_dpcpu_validaddr(kvm_t *, kvaddr_t);
|
||||
int _kvm_probe_elf_kernel(kvm_t *, int, int);
|
||||
int _kvm_is_minidump(kvm_t *);
|
||||
int _kvm_read_core_phdrs(kvm_t *, size_t *, GElf_Phdr **);
|
||||
int _kvm_pt_init(kvm_t *, size_t, off_t, size_t, off_t, off_t, int, int);
|
||||
int _kvm_pt_init(kvm_t *, size_t, off_t, size_t, off_t, off_t, int);
|
||||
off_t _kvm_pt_find(kvm_t *, uint64_t, unsigned int);
|
||||
int _kvm_visit_cb(kvm_t *, kvm_walk_pages_cb_t *, void *, u_long,
|
||||
u_long, u_long, vm_prot_t, size_t, unsigned int);
|
||||
|
@ -169,18 +169,18 @@ blk_write(struct dumperinfo *di, char *ptr, vm_paddr_t pa, size_t sz)
|
||||
}
|
||||
|
||||
/* A buffer for general use. Its size must be one page at least. */
|
||||
static char dumpbuf[PAGE_SIZE];
|
||||
static char dumpbuf[PAGE_SIZE] __aligned(sizeof(uint64_t));
|
||||
CTASSERT(sizeof(dumpbuf) % sizeof(pt2_entry_t) == 0);
|
||||
|
||||
int
|
||||
minidumpsys(struct dumperinfo *di)
|
||||
{
|
||||
struct minidumphdr mdhdr;
|
||||
uint64_t dumpsize;
|
||||
uint64_t dumpsize, *dump_avail_buf;
|
||||
uint32_t ptesize;
|
||||
uint32_t pa, prev_pa = 0, count = 0;
|
||||
vm_offset_t va;
|
||||
int error;
|
||||
int error, i;
|
||||
char *addr;
|
||||
|
||||
/*
|
||||
@ -207,7 +207,7 @@ minidumpsys(struct dumperinfo *di)
|
||||
/* Calculate dump size. */
|
||||
dumpsize = ptesize;
|
||||
dumpsize += round_page(msgbufp->msg_size);
|
||||
dumpsize += round_page(sizeof(dump_avail));
|
||||
dumpsize += round_page(nitems(dump_avail) * sizeof(uint64_t));
|
||||
dumpsize += round_page(BITSET_SIZE(vm_page_dump_pages));
|
||||
VM_PAGE_DUMP_FOREACH(pa) {
|
||||
/* Clear out undumpable pages now if needed */
|
||||
@ -230,7 +230,8 @@ minidumpsys(struct dumperinfo *di)
|
||||
mdhdr.kernbase = KERNBASE;
|
||||
mdhdr.arch = __ARM_ARCH;
|
||||
mdhdr.mmuformat = MINIDUMP_MMU_FORMAT_V6;
|
||||
mdhdr.dumpavailsize = round_page(sizeof(dump_avail));
|
||||
mdhdr.dumpavailsize = round_page(nitems(dump_avail) * sizeof(uint64_t));
|
||||
|
||||
dump_init_header(di, &kdh, KERNELDUMPMAGIC, KERNELDUMP_ARM_VERSION,
|
||||
dumpsize);
|
||||
|
||||
@ -254,11 +255,15 @@ minidumpsys(struct dumperinfo *di)
|
||||
if (error)
|
||||
goto fail;
|
||||
|
||||
/* Dump dump_avail */
|
||||
_Static_assert(sizeof(dump_avail) <= sizeof(dumpbuf),
|
||||
/* Dump dump_avail. Make a copy using 64-bit physical addresses. */
|
||||
_Static_assert(nitems(dump_avail) * sizeof(uint64_t) <= sizeof(dumpbuf),
|
||||
"Large dump_avail not handled");
|
||||
bzero(dumpbuf, sizeof(dumpbuf));
|
||||
memcpy(dumpbuf, dump_avail, sizeof(dump_avail));
|
||||
dump_avail_buf = (uint64_t *)dumpbuf;
|
||||
for (i = 0; dump_avail[i] != 0 || dump_avail[i + 1] != 0; i += 2) {
|
||||
dump_avail_buf[i] = dump_avail[i];
|
||||
dump_avail_buf[i + 1] = dump_avail[i + 1];
|
||||
}
|
||||
error = blk_write(di, dumpbuf, 0, PAGE_SIZE);
|
||||
if (error)
|
||||
goto fail;
|
||||
|
@ -290,7 +290,7 @@ dumpsys_generic(struct dumperinfo *di)
|
||||
size_t hdrsz;
|
||||
int error;
|
||||
|
||||
#if !defined(__powerpc__) || defined(__powerpc64__)
|
||||
#if MINIDUMP_PAGE_TRACKING == 1
|
||||
if (do_minidump)
|
||||
return (minidumpsys(di));
|
||||
#endif
|
||||
|
@ -60,7 +60,7 @@ static struct kerneldumpheader kdh;
|
||||
/* Handle chunked writes. */
|
||||
static uint64_t counter, progress, dumpsize;
|
||||
/* Just auxiliary bufffer */
|
||||
static char tmpbuffer[PAGE_SIZE];
|
||||
static char tmpbuffer[PAGE_SIZE] __aligned(sizeof(uint64_t));
|
||||
|
||||
extern pd_entry_t *kernel_segmap;
|
||||
|
||||
@ -165,6 +165,7 @@ int
|
||||
minidumpsys(struct dumperinfo *di)
|
||||
{
|
||||
struct minidumphdr mdhdr;
|
||||
uint64_t *dump_avail_buf;
|
||||
uint32_t ptesize;
|
||||
vm_paddr_t pa;
|
||||
vm_offset_t prev_pte = 0;
|
||||
@ -206,7 +207,7 @@ minidumpsys(struct dumperinfo *di)
|
||||
/* Calculate dump size. */
|
||||
dumpsize = ptesize;
|
||||
dumpsize += round_page(msgbufp->msg_size);
|
||||
dumpsize += round_page(sizeof(dump_avail));
|
||||
dumpsize += round_page(nitems(dump_avail) * sizeof(uint64_t));
|
||||
dumpsize += round_page(BITSET_SIZE(vm_page_dump_pages));
|
||||
VM_PAGE_DUMP_FOREACH(pa) {
|
||||
/* Clear out undumpable pages now if needed */
|
||||
@ -227,7 +228,7 @@ minidumpsys(struct dumperinfo *di)
|
||||
mdhdr.bitmapsize = round_page(BITSET_SIZE(vm_page_dump_pages));
|
||||
mdhdr.ptesize = ptesize;
|
||||
mdhdr.kernbase = VM_MIN_KERNEL_ADDRESS;
|
||||
mdhdr.dumpavailsize = round_page(sizeof(dump_avail));
|
||||
mdhdr.dumpavailsize = round_page(nitems(dump_avail) * sizeof(uint64_t));
|
||||
|
||||
dump_init_header(di, &kdh, KERNELDUMPMAGIC, KERNELDUMP_MIPS_VERSION,
|
||||
dumpsize);
|
||||
@ -252,11 +253,19 @@ minidumpsys(struct dumperinfo *di)
|
||||
if (error)
|
||||
goto fail;
|
||||
|
||||
/* Dump dump_avail */
|
||||
_Static_assert(sizeof(dump_avail) <= sizeof(tmpbuffer),
|
||||
"Large dump_avail not handled");
|
||||
/* Dump dump_avail. Make a copy using 64-bit physical addresses. */
|
||||
_Static_assert(nitems(dump_avail) * sizeof(uint64_t) <=
|
||||
sizeof(tmpbuffer), "Large dump_avail not handled");
|
||||
bzero(tmpbuffer, sizeof(tmpbuffer));
|
||||
memcpy(tmpbuffer, dump_avail, sizeof(dump_avail));
|
||||
if (sizeof(dump_avail[0]) != sizeof(uint64_t)) {
|
||||
dump_avail_buf = (uint64_t *)tmpbuffer;
|
||||
for (i = 0; dump_avail[i] != 0 || dump_avail[i + 1] != 0; i++) {
|
||||
dump_avail_buf[i] = dump_avail[i];
|
||||
dump_avail_buf[i + 1] = dump_avail[i + 1];
|
||||
}
|
||||
} else {
|
||||
memcpy(tmpbuffer, dump_avail, sizeof(dump_avail));
|
||||
}
|
||||
error = write_buffer(di, tmpbuffer, PAGE_SIZE);
|
||||
if (error)
|
||||
goto fail;
|
||||
|
Loading…
x
Reference in New Issue
Block a user