Add the kernel support for minidumps on arm64.

Obtained from:	ABT Systems Ltd
Sponsored by:	The FreeBSD Foundation
Differential Revision:	https://reviews.freebsd.org/D3318
This commit is contained in:
andrew 2015-08-20 12:49:56 +00:00
parent 04ea917e53
commit cf78c76d8f
3 changed files with 429 additions and 9 deletions

View File

@ -1,4 +1,5 @@
/*-
* Copyright (c) 2006 Peter Wemm
* Copyright (c) 2015 The FreeBSD Foundation
* All rights reserved.
*
@ -8,6 +9,7 @@
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
@ -32,19 +34,434 @@ __FBSDID("$FreeBSD$");
#include "opt_watchdog.h"
#include "opt_watchdog.h"
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/conf.h>
#include <sys/cons.h>
#include <sys/kernel.h>
#include <sys/kerneldump.h>
#include <sys/msgbuf.h>
#include <sys/watchdog.h>
#include <vm/vm.h>
#include <vm/vm_param.h>
#include <vm/vm_page.h>
#include <vm/vm_phys.h>
#include <vm/pmap.h>
#include <machine/md_var.h>
#include <machine/pmap.h>
#include <machine/pte.h>
#include <machine/vmparam.h>
#include <machine/minidump.h>
CTASSERT(sizeof(struct kerneldumpheader) == 512);
/*
* Don't touch the first SIZEOF_METADATA bytes on the dump device. This
* is to protect us from metadata and to protect metadata from us.
*/
#define SIZEOF_METADATA (64*1024)
uint64_t *vm_page_dump;
int vm_page_dump_size;
static struct kerneldumpheader kdh;
static off_t dumplo;
/* Handle chunked writes. */
static size_t fragsz;
static void *dump_va;
static size_t counter, progress, dumpsize;
static uint64_t tmpbuffer[PAGE_SIZE / sizeof(uint64_t)];
CTASSERT(sizeof(*vm_page_dump) == 8);
static int
is_dumpable(vm_paddr_t pa)
{
vm_page_t m;
int i;
if ((m = vm_phys_paddr_to_vm_page(pa)) != NULL)
return ((m->flags & PG_NODUMP) == 0);
for (i = 0; dump_avail[i] != 0 || dump_avail[i + 1] != 0; i += 2) {
if (pa >= dump_avail[i] && pa < dump_avail[i + 1])
return (1);
}
return (0);
}
static int
blk_flush(struct dumperinfo *di)
{
int error;
if (fragsz == 0)
return (0);
error = dump_write(di, dump_va, 0, dumplo, fragsz);
dumplo += fragsz;
fragsz = 0;
return (error);
}
static struct {
int min_per;
int max_per;
int visited;
} progress_track[10] = {
{ 0, 10, 0},
{ 10, 20, 0},
{ 20, 30, 0},
{ 30, 40, 0},
{ 40, 50, 0},
{ 50, 60, 0},
{ 60, 70, 0},
{ 70, 80, 0},
{ 80, 90, 0},
{ 90, 100, 0}
};
static void
report_progress(size_t progress, size_t dumpsize)
{
int sofar, i;
sofar = 100 - ((progress * 100) / dumpsize);
for (i = 0; i < nitems(progress_track); i++) {
if (sofar < progress_track[i].min_per ||
sofar > progress_track[i].max_per)
continue;
if (progress_track[i].visited)
return;
progress_track[i].visited = 1;
printf("..%d%%", sofar);
return;
}
}
static int
blk_write(struct dumperinfo *di, char *ptr, vm_paddr_t pa, size_t sz)
{
size_t len;
int error, c;
u_int maxdumpsz;
maxdumpsz = min(di->maxiosize, MAXDUMPPGS * PAGE_SIZE);
if (maxdumpsz == 0) /* seatbelt */
maxdumpsz = PAGE_SIZE;
error = 0;
if ((sz % PAGE_SIZE) != 0) {
printf("size not page aligned\n");
return (EINVAL);
}
if (ptr != NULL && pa != 0) {
printf("cant have both va and pa!\n");
return (EINVAL);
}
if ((((uintptr_t)pa) % PAGE_SIZE) != 0) {
printf("address not page aligned %p\n", ptr);
return (EINVAL);
}
if (ptr != NULL) {
/*
* If we're doing a virtual dump, flush any
* pre-existing pa pages.
*/
error = blk_flush(di);
if (error)
return (error);
}
while (sz) {
len = maxdumpsz - fragsz;
if (len > sz)
len = sz;
counter += len;
progress -= len;
if (counter >> 22) {
report_progress(progress, dumpsize);
counter &= (1 << 22) - 1;
}
wdog_kern_pat(WD_LASTVAL);
if (ptr) {
error = dump_write(di, ptr, 0, dumplo, len);
if (error)
return (error);
dumplo += len;
ptr += len;
sz -= len;
} else {
dump_va = (void *)PHYS_TO_DMAP(pa);
fragsz += len;
pa += len;
sz -= len;
error = blk_flush(di);
if (error)
return (error);
}
/* Check for user abort. */
c = cncheckc();
if (c == 0x03)
return (ECANCELED);
if (c != -1)
printf(" (CTRL-C to abort) ");
}
return (0);
}
int
minidumpsys(struct dumperinfo *di)
{
pd_entry_t *l1, *l2;
pt_entry_t *l3;
uint32_t pmapsize;
vm_offset_t va;
vm_paddr_t pa;
int error;
uint64_t bits;
int i, bit;
int retry_count;
struct minidumphdr mdhdr;
printf("minidumpsys\n");
while (1);
retry_count = 0;
retry:
retry_count++;
error = 0;
pmapsize = 0;
for (va = VM_MIN_KERNEL_ADDRESS; va < kernel_vm_end; va += L2_SIZE) {
pmapsize += PAGE_SIZE;
if (!pmap_get_tables(pmap_kernel(), va, &l1, &l2, &l3))
continue;
/* We should always be using the l2 table for kvm */
if (l2 == NULL)
continue;
if ((*l2 & ATTR_DESCR_MASK) == L2_BLOCK) {
pa = *l2 & ~ATTR_MASK;
for (i = 0; i < Ln_ENTRIES; i++, pa += PAGE_SIZE) {
if (is_dumpable(pa))
dump_add_page(pa);
}
} else if ((*l2 & ATTR_DESCR_MASK) == L2_TABLE) {
for (i = 0; i < Ln_ENTRIES; i++) {
if ((l3[i] & ATTR_DESCR_MASK) != L3_PAGE)
continue;
pa = l3[i] & ~ATTR_MASK;
if (is_dumpable(pa))
dump_add_page(pa);
}
}
}
/* Calculate dump size. */
dumpsize = pmapsize;
dumpsize += round_page(msgbufp->msg_size);
dumpsize += round_page(vm_page_dump_size);
for (i = 0; i < vm_page_dump_size / sizeof(*vm_page_dump); i++) {
bits = vm_page_dump[i];
while (bits) {
bit = ffsl(bits) - 1;
pa = (((uint64_t)i * sizeof(*vm_page_dump) * NBBY) +
bit) * PAGE_SIZE;
/* Clear out undumpable pages now if needed */
if (is_dumpable(pa))
dumpsize += PAGE_SIZE;
else
dump_drop_page(pa);
bits &= ~(1ul << bit);
}
}
dumpsize += PAGE_SIZE;
/* Determine dump offset on device. */
if (di->mediasize < SIZEOF_METADATA + dumpsize + sizeof(kdh) * 2) {
error = E2BIG;
goto fail;
}
dumplo = di->mediaoffset + di->mediasize - dumpsize;
dumplo -= sizeof(kdh) * 2;
progress = dumpsize;
/* Initialize mdhdr */
bzero(&mdhdr, sizeof(mdhdr));
strcpy(mdhdr.magic, MINIDUMP_MAGIC);
mdhdr.version = MINIDUMP_VERSION;
mdhdr.msgbufsize = msgbufp->msg_size;
mdhdr.bitmapsize = vm_page_dump_size;
mdhdr.pmapsize = pmapsize;
mdhdr.kernbase = VM_MIN_KERNEL_ADDRESS;
mdhdr.dmapphys = DMAP_MIN_PHYSADDR;
mdhdr.dmapbase = DMAP_MIN_ADDRESS;
mdhdr.dmapend = DMAP_MAX_ADDRESS;
mkdumpheader(&kdh, KERNELDUMPMAGIC, KERNELDUMP_AARCH64_VERSION,
dumpsize, di->blocksize);
printf("Dumping %llu out of %ju MB:", (long long)dumpsize >> 20,
ptoa((uintmax_t)physmem) / 1048576);
/* Dump leader */
error = dump_write(di, &kdh, 0, dumplo, sizeof(kdh));
if (error)
goto fail;
dumplo += sizeof(kdh);
/* Dump my header */
bzero(&tmpbuffer, sizeof(tmpbuffer));
bcopy(&mdhdr, &tmpbuffer, sizeof(mdhdr));
error = blk_write(di, (char *)&tmpbuffer, 0, PAGE_SIZE);
if (error)
goto fail;
/* Dump msgbuf up front */
error = blk_write(di, (char *)msgbufp->msg_ptr, 0,
round_page(msgbufp->msg_size));
if (error)
goto fail;
/* Dump bitmap */
error = blk_write(di, (char *)vm_page_dump, 0,
round_page(vm_page_dump_size));
if (error)
goto fail;
/* Dump kernel page directory pages */
bzero(&tmpbuffer, sizeof(tmpbuffer));
for (va = VM_MIN_KERNEL_ADDRESS; va < kernel_vm_end; va += L2_SIZE) {
if (!pmap_get_tables(pmap_kernel(), va, &l1, &l2, &l3)) {
/* We always write a page, even if it is zero */
error = blk_write(di, (char *)&tmpbuffer, 0, PAGE_SIZE);
if (error)
goto fail;
/* flush, in case we reuse tmpbuffer in the same block*/
error = blk_flush(di);
if (error)
goto fail;
} else if (l2 == NULL) {
pa = (*l1 & ~ATTR_MASK) | (va & L1_OFFSET);
/* Generate fake l3 entries based upon the l1 entry */
for (i = 0; i < Ln_ENTRIES; i++) {
tmpbuffer[i] = pa + (i * PAGE_SIZE) |
ATTR_DEFAULT | L3_PAGE;
}
/* We always write a page, even if it is zero */
error = blk_write(di, (char *)&tmpbuffer, 0, PAGE_SIZE);
if (error)
goto fail;
/* flush, in case we reuse tmpbuffer in the same block*/
error = blk_flush(di);
if (error)
goto fail;
bzero(&tmpbuffer, sizeof(tmpbuffer));
} else if ((*l2 & ATTR_DESCR_MASK) == L2_BLOCK) {
/* TODO: Handle an invalid L2 entry */
pa = (*l2 & ~ATTR_MASK) | (va & L2_OFFSET);
/* Generate fake l3 entries based upon the l1 entry */
for (i = 0; i < Ln_ENTRIES; i++) {
tmpbuffer[i] = pa + (i * PAGE_SIZE) |
ATTR_DEFAULT | L3_PAGE;
}
/* We always write a page, even if it is zero */
error = blk_write(di, (char *)&tmpbuffer, 0, PAGE_SIZE);
if (error)
goto fail;
/* flush, in case we reuse fakepd in the same block */
error = blk_flush(di);
if (error)
goto fail;
bzero(&tmpbuffer, sizeof(tmpbuffer));
continue;
} else {
pa = *l2 & ~ATTR_MASK;
/* We always write a page, even if it is zero */
error = blk_write(di, NULL, pa, PAGE_SIZE);
if (error)
goto fail;
}
}
/* Dump memory chunks */
/* XXX cluster it up and use blk_dump() */
for (i = 0; i < vm_page_dump_size / sizeof(*vm_page_dump); i++) {
bits = vm_page_dump[i];
while (bits) {
bit = ffsl(bits) - 1;
pa = (((uint64_t)i * sizeof(*vm_page_dump) * NBBY) +
bit) * PAGE_SIZE;
error = blk_write(di, 0, pa, PAGE_SIZE);
if (error)
goto fail;
bits &= ~(1ul << bit);
}
}
error = blk_flush(di);
if (error)
goto fail;
/* Dump trailer */
error = dump_write(di, &kdh, 0, dumplo, sizeof(kdh));
if (error)
goto fail;
dumplo += sizeof(kdh);
/* Signal completion, signoff and exit stage left. */
dump_write(di, NULL, 0, 0, 0);
printf("\nDump complete\n");
return (0);
fail:
if (error < 0)
error = -error;
printf("\n");
if (error == ENOSPC) {
printf("Dump map grown while dumping. ");
if (retry_count < 5) {
printf("Retrying...\n");
goto retry;
}
printf("Dump failed.\n");
}
else if (error == ECANCELED)
printf("Dump aborted\n");
else if (error == E2BIG)
printf("Dump failed. Partition too small.\n");
else
printf("** DUMP FAILED (ERROR %d) **\n", error);
return (error);
}
void
dump_add_page(vm_paddr_t pa)
{
int idx, bit;
pa >>= PAGE_SHIFT;
idx = pa >> 6; /* 2^6 = 64 */
bit = pa & 63;
atomic_set_long(&vm_page_dump[idx], 1ul << bit);
}
void
dump_drop_page(vm_paddr_t pa)
{
int idx, bit;
pa >>= PAGE_SHIFT;
idx = pa >> 6; /* 2^6 = 64 */
bit = pa & 63;
atomic_clear_long(&vm_page_dump[idx], 1ul << bit);
}

View File

@ -61,6 +61,8 @@ uma_small_alloc(uma_zone_t zone, vm_size_t bytes, u_int8_t *flags, int wait)
break;
}
pa = m->phys_addr;
if ((wait & M_NODUMP) == 0)
dump_add_page(pa);
va = (void *)PHYS_TO_DMAP(pa);
if ((wait & M_ZERO) && (m->flags & PG_ZERO) == 0)
bzero(va, PAGE_SIZE);
@ -74,6 +76,7 @@ uma_small_free(void *mem, vm_size_t size, u_int8_t flags)
vm_paddr_t pa;
pa = DMAP_TO_PHYS((vm_offset_t)mem);
dump_drop_page(pa);
m = PHYS_TO_VM_PAGE(pa);
m->wire_count--;
vm_page_free(m);

View File

@ -479,8 +479,8 @@ vm_page_startup(vm_offset_t vaddr)
bzero((void *)mapped, end - new_end);
uma_startup((void *)mapped, boot_pages);
#if defined(__amd64__) || defined(__i386__) || defined(__arm__) || \
defined(__mips__)
#if defined(__aarch64__) || defined(__amd64__) || defined(__arm__) || \
defined(__i386__) || defined(__mips__)
/*
* Allocate a bitmap to indicate that a random physical page
* needs to be included in a minidump.
@ -557,12 +557,12 @@ vm_page_startup(vm_offset_t vaddr)
*/
new_end = vm_reserv_startup(&vaddr, new_end, high_water);
#endif
#if defined(__amd64__) || defined(__mips__)
#if defined(__aarch64__) || defined(__amd64__) || defined(__mips__)
/*
* pmap_map on amd64 and mips can come out of the direct-map, not kvm
* like i386, so the pages must be tracked for a crashdump to include
* this data. This includes the vm_page_array and the early UMA
* bootstrap pages.
* pmap_map on arm64, amd64, and mips can come out of the direct-map,
* not kvm like i386, so the pages must be tracked for a crashdump to
* include this data. This includes the vm_page_array and the early
* UMA bootstrap pages.
*/
for (pa = new_end; pa < phys_avail[biggestone + 1]; pa += PAGE_SIZE)
dump_add_page(pa);