Factor out duplicated code from dumpsys() on each architecture into generic

code in sys/kern/kern_dump.c. Most dumpsys() implementations are nearly
identical and simply redefine a number of constants and helper subroutines;
a generic implementation will make it easier to implement features around
kernel core dumps. This change does not alter any minidump code and should
have no functional impact.

PR:		193873
Differential Revision:	https://reviews.freebsd.org/D904
Submitted by:	Conrad Meyer <conrad.meyer@isilon.com>
Reviewed by:	jhibbits (earlier version)
Sponsored by:	EMC / Isilon Storage Division
This commit is contained in:
Mark Johnston 2015-01-07 01:01:39 +00:00
parent ff7c6d42f3
commit bdb9ab0dd9
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=276772
25 changed files with 1143 additions and 1690 deletions

6
sys/amd64/include/dump.h Normal file
View File

@ -0,0 +1,6 @@
/*-
* This file is in the public domain.
*/
/* $FreeBSD$ */
#include <x86/dump.h>

View File

@ -32,148 +32,26 @@ __FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/conf.h>
#include <sys/cons.h>
#include <sys/sysctl.h>
#include <sys/kernel.h>
#include <sys/proc.h>
#include <sys/kerneldump.h>
#ifdef SW_WATCHDOG
#include <sys/watchdog.h>
#endif
#include <vm/vm.h>
#include <vm/pmap.h>
#include <machine/dump.h>
#include <machine/elf.h>
#include <machine/md_var.h>
#include <machine/pcb.h>
#include <machine/armreg.h>
CTASSERT(sizeof(struct kerneldumpheader) == 512);
int do_minidump = 1;
SYSCTL_INT(_debug, OID_AUTO, minidump, CTLFLAG_RWTUN, &do_minidump, 0,
"Enable mini crash dumps");
/*
* Don't touch the first SIZEOF_METADATA bytes on the dump device. This
* is to protect us from metadata and to protect metadata from us.
*/
#define SIZEOF_METADATA (64*1024)
#define MD_ALIGN(x) (((off_t)(x) + PAGE_MASK) & ~PAGE_MASK)
#define DEV_ALIGN(x) (((off_t)(x) + (DEV_BSIZE-1)) & ~(DEV_BSIZE-1))
extern struct pcb dumppcb;
struct md_pa {
vm_paddr_t md_start;
vm_paddr_t md_size;
};
typedef int callback_t(struct md_pa *, int, void *);
static struct kerneldumpheader kdh;
static off_t dumplo, fileofs;
/* Handle buffered writes. */
static char buffer[DEV_BSIZE];
static size_t fragsz;
/* XXX: I suppose 20 should be enough. */
static struct md_pa dump_map[20];
static void
md_pa_init(void)
void
dumpsys_wbinv_all(void)
{
int n, idx;
bzero(dump_map, sizeof(dump_map));
for (n = 0; n < sizeof(dump_map) / sizeof(dump_map[0]); n++) {
idx = n * 2;
if (dump_avail[idx] == 0 && dump_avail[idx + 1] == 0)
break;
dump_map[n].md_start = dump_avail[idx];
dump_map[n].md_size = dump_avail[idx + 1] - dump_avail[idx];
}
}
static struct md_pa *
md_pa_first(void)
{
return (&dump_map[0]);
}
static struct md_pa *
md_pa_next(struct md_pa *mdp)
{
mdp++;
if (mdp->md_size == 0)
mdp = NULL;
return (mdp);
}
static int
buf_write(struct dumperinfo *di, char *ptr, size_t sz)
{
size_t len;
int error;
while (sz) {
len = DEV_BSIZE - fragsz;
if (len > sz)
len = sz;
bcopy(ptr, buffer + fragsz, len);
fragsz += len;
ptr += len;
sz -= len;
if (fragsz == DEV_BSIZE) {
error = dump_write(di, buffer, 0, dumplo,
DEV_BSIZE);
if (error)
return error;
dumplo += DEV_BSIZE;
fragsz = 0;
}
}
return (0);
}
static int
buf_flush(struct dumperinfo *di)
{
int error;
if (fragsz == 0)
return (0);
error = dump_write(di, buffer, 0, dumplo, DEV_BSIZE);
dumplo += DEV_BSIZE;
fragsz = 0;
return (error);
}
extern vm_offset_t kernel_l1kva;
extern char *pouet2;
static int
cb_dumpdata(struct md_pa *mdp, int seqnr, void *arg)
{
struct dumperinfo *di = (struct dumperinfo*)arg;
vm_paddr_t a, pa;
void *va;
uint32_t pgs;
size_t counter, sz, chunk;
int i, c, error;
va = 0;
error = 0; /* catch case in which chunk size is 0 */
counter = 0;
pgs = mdp->md_size / PAGE_SIZE;
pa = mdp->md_start;
printf(" chunk %d: %dMB (%d pages)", seqnr, pgs * PAGE_SIZE / (
1024*1024), pgs);
/*
* Make sure we write coherent data. Note that in the SMP case this
@ -186,70 +64,25 @@ cb_dumpdata(struct md_pa *mdp, int seqnr, void *arg)
#ifdef __XSCALE__
xscale_cache_clean_minidata();
#endif
while (pgs) {
chunk = pgs;
if (chunk > MAXDUMPPGS)
chunk = MAXDUMPPGS;
sz = chunk << PAGE_SHIFT;
counter += sz;
if (counter >> 24) {
printf(" %d", pgs * PAGE_SIZE);
counter &= (1<<24) - 1;
}
for (i = 0; i < chunk; i++) {
a = pa + i * PAGE_SIZE;
va = pmap_kenter_temporary(trunc_page(a), i);
}
#ifdef SW_WATCHDOG
wdog_kern_pat(WD_LASTVAL);
#endif
error = dump_write(di, va, 0, dumplo, sz);
if (error)
break;
dumplo += sz;
pgs -= chunk;
pa += sz;
/* Check for user abort. */
c = cncheckc();
if (c == 0x03)
return (ECANCELED);
if (c != -1)
printf(" (CTRL-C to abort) ");
}
printf(" ... %s\n", (error) ? "fail" : "ok");
return (error);
}
static int
cb_dumphdr(struct md_pa *mdp, int seqnr, void *arg)
void
dumpsys_map_chunk(vm_paddr_t pa, size_t chunk, void **va)
{
struct dumperinfo *di = (struct dumperinfo*)arg;
Elf_Phdr phdr;
uint64_t size;
int error;
vm_paddr_t a;
int i;
size = mdp->md_size;
bzero(&phdr, sizeof(phdr));
phdr.p_type = PT_LOAD;
phdr.p_flags = PF_R; /* XXX */
phdr.p_offset = fileofs;
phdr.p_vaddr = mdp->md_start;
phdr.p_paddr = mdp->md_start;
phdr.p_filesz = size;
phdr.p_memsz = size;
phdr.p_align = PAGE_SIZE;
error = buf_write(di, (char*)&phdr, sizeof(phdr));
fileofs += phdr.p_filesz;
return (error);
for (i = 0; i < chunk; i++) {
a = pa + i * PAGE_SIZE;
*va = pmap_kenter_temporary(trunc_page(a), i);
}
}
/*
* Add a header to be used by libkvm to get the va to pa delta
*/
static int
dump_os_header(struct dumperinfo *di)
int
dumpsys_write_aux_headers(struct dumperinfo *di)
{
Elf_Phdr phdr;
int error;
@ -264,144 +97,6 @@ dump_os_header(struct dumperinfo *di)
phdr.p_memsz = 0;
phdr.p_align = PAGE_SIZE;
error = buf_write(di, (char*)&phdr, sizeof(phdr));
return (error);
}
static int
cb_size(struct md_pa *mdp, int seqnr, void *arg)
{
uint32_t *sz = (uint32_t*)arg;
*sz += (uint32_t)mdp->md_size;
return (0);
}
static int
foreach_chunk(callback_t cb, void *arg)
{
struct md_pa *mdp;
int error, seqnr;
seqnr = 0;
mdp = md_pa_first();
while (mdp != NULL) {
error = (*cb)(mdp, seqnr++, arg);
if (error)
return (-error);
mdp = md_pa_next(mdp);
}
return (seqnr);
}
int
dumpsys(struct dumperinfo *di)
{
Elf_Ehdr ehdr;
uint32_t dumpsize;
off_t hdrgap;
size_t hdrsz;
int error;
if (do_minidump)
return (minidumpsys(di));
bzero(&ehdr, sizeof(ehdr));
ehdr.e_ident[EI_MAG0] = ELFMAG0;
ehdr.e_ident[EI_MAG1] = ELFMAG1;
ehdr.e_ident[EI_MAG2] = ELFMAG2;
ehdr.e_ident[EI_MAG3] = ELFMAG3;
ehdr.e_ident[EI_CLASS] = ELF_CLASS;
#if BYTE_ORDER == LITTLE_ENDIAN
ehdr.e_ident[EI_DATA] = ELFDATA2LSB;
#else
ehdr.e_ident[EI_DATA] = ELFDATA2MSB;
#endif
ehdr.e_ident[EI_VERSION] = EV_CURRENT;
ehdr.e_ident[EI_OSABI] = ELFOSABI_STANDALONE; /* XXX big picture? */
ehdr.e_type = ET_CORE;
ehdr.e_machine = EM_ARM;
ehdr.e_phoff = sizeof(ehdr);
ehdr.e_flags = 0;
ehdr.e_ehsize = sizeof(ehdr);
ehdr.e_phentsize = sizeof(Elf_Phdr);
ehdr.e_shentsize = sizeof(Elf_Shdr);
md_pa_init();
/* Calculate dump size. */
dumpsize = 0L;
ehdr.e_phnum = foreach_chunk(cb_size, &dumpsize) + 1;
hdrsz = ehdr.e_phoff + ehdr.e_phnum * ehdr.e_phentsize;
fileofs = MD_ALIGN(hdrsz);
dumpsize += fileofs;
hdrgap = fileofs - DEV_ALIGN(hdrsz);
/* Determine dump offset on device. */
if (di->mediasize < SIZEOF_METADATA + dumpsize + sizeof(kdh) * 2) {
error = ENOSPC;
goto fail;
}
dumplo = di->mediaoffset + di->mediasize - dumpsize;
dumplo -= sizeof(kdh) * 2;
mkdumpheader(&kdh, KERNELDUMPMAGIC, KERNELDUMP_ARM_VERSION, dumpsize, di->blocksize);
printf("Dumping %llu MB (%d chunks)\n", (long long)dumpsize >> 20,
ehdr.e_phnum - 1);
/* Dump leader */
error = dump_write(di, &kdh, 0, dumplo, sizeof(kdh));
if (error)
goto fail;
dumplo += sizeof(kdh);
/* Dump ELF header */
error = buf_write(di, (char*)&ehdr, sizeof(ehdr));
if (error)
goto fail;
/* Dump program headers */
error = foreach_chunk(cb_dumphdr, di);
if (error >= 0)
error = dump_os_header(di);
if (error < 0)
goto fail;
buf_flush(di);
/*
* All headers are written using blocked I/O, so we know the
* current offset is (still) block aligned. Skip the alignement
* in the file to have the segment contents aligned at page
* boundary. We cannot use MD_ALIGN on dumplo, because we don't
* care and may very well be unaligned within the dump device.
*/
dumplo += hdrgap;
/* Dump memory chunks (updates dumplo) */
error = foreach_chunk(cb_dumpdata, di);
if (error < 0)
goto fail;
/* Dump trailer */
error = dump_write(di, &kdh, 0, dumplo, sizeof(kdh));
if (error)
goto fail;
/* Signal completion, signoff and exit stage left. */
dump_write(di, NULL, 0, 0, 0);
printf("\nDump complete\n");
return (0);
fail:
if (error < 0)
error = -error;
if (error == ECANCELED)
printf("\nDump aborted\n");
else if (error == ENOSPC)
printf("\nDump failed. Partition too small.\n");
else
printf("\n** DUMP FAILED (ERROR %d) **\n", error);
error = dumpsys_buf_write(di, (char*)&phdr, sizeof(phdr));
return (error);
}

70
sys/arm/include/dump.h Normal file
View File

@ -0,0 +1,70 @@
/*-
* Copyright (c) 2014 EMC Corp.
* Author: Conrad Meyer <conrad.meyer@isilon.com>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _MACHINE_DUMP_H_
#define _MACHINE_DUMP_H_
#define KERNELDUMP_ARCH_VERSION KERNELDUMP_ARM_VERSION
#define EM_VALUE EM_ARM
/* XXX: I suppose 20 should be enough. */
#define DUMPSYS_MD_PA_NPAIRS 20
#define DUMPSYS_NUM_AUX_HDRS 1
void dumpsys_wbinv_all(void);
int dumpsys_write_aux_headers(struct dumperinfo *di);
static inline void
dumpsys_pa_init(void)
{
dumpsys_gen_pa_init();
}
static inline struct dump_pa *
dumpsys_pa_next(struct dump_pa *p)
{
return (dumpsys_gen_pa_next(p));
}
static inline void
dumpsys_unmap_chunk(vm_paddr_t pa, size_t s, void *va)
{
dumpsys_gen_unmap_chunk(pa, s, va);
}
static inline int
dumpsys(struct dumperinfo *di)
{
return (dumpsys_generic(di));
}
#endif /* !_MACHINE_DUMP_H_ */

View File

@ -2947,6 +2947,7 @@ kern/kern_cpuset.c standard
kern/kern_context.c standard
kern/kern_descrip.c standard
kern/kern_dtrace.c optional kdtrace_hooks
kern/kern_dump.c standard
kern/kern_environment.c standard
kern/kern_et.c standard
kern/kern_event.c standard

6
sys/i386/include/dump.h Normal file
View File

@ -0,0 +1,6 @@
/*-
* This file is in the public domain.
*/
/* $FreeBSD$ */
#include <x86/dump.h>

393
sys/kern/kern_dump.c Normal file
View File

@ -0,0 +1,393 @@
/*-
* Copyright (c) 2002 Marcel Moolenaar
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include "opt_watchdog.h"
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/conf.h>
#include <sys/cons.h>
#include <sys/kernel.h>
#include <sys/proc.h>
#include <sys/kerneldump.h>
#ifdef SW_WATCHDOG
#include <sys/watchdog.h>
#endif
#include <vm/vm.h>
#include <vm/vm_param.h>
#include <vm/pmap.h>
#include <machine/dump.h>
#include <machine/elf.h>
#include <machine/md_var.h>
#include <machine/pcb.h>
CTASSERT(sizeof(struct kerneldumpheader) == 512);
/*
* Don't touch the first SIZEOF_METADATA bytes on the dump device. This
* is to protect us from metadata and to protect metadata from us.
*/
#define SIZEOF_METADATA (64*1024)
#define MD_ALIGN(x) (((off_t)(x) + PAGE_MASK) & ~PAGE_MASK)
#define DEV_ALIGN(x) (((off_t)(x) + (DEV_BSIZE-1)) & ~(DEV_BSIZE-1))
off_t dumplo;
/* Handle buffered writes. */
static char buffer[DEV_BSIZE];
static size_t fragsz;
struct dump_pa dump_map[DUMPSYS_MD_PA_NPAIRS];
void
dumpsys_gen_pa_init(void)
{
#if !defined(__sparc__) && !defined(__powerpc__)
int n, idx;
bzero(dump_map, sizeof(dump_map));
for (n = 0; n < sizeof(dump_map) / sizeof(dump_map[0]); n++) {
idx = n * 2;
if (dump_avail[idx] == 0 && dump_avail[idx + 1] == 0)
break;
dump_map[n].pa_start = dump_avail[idx];
dump_map[n].pa_size = dump_avail[idx + 1] - dump_avail[idx];
}
#endif
}
struct dump_pa *
dumpsys_gen_pa_next(struct dump_pa *mdp)
{
if (mdp == NULL)
return (&dump_map[0]);
mdp++;
if (mdp->pa_size == 0)
mdp = NULL;
return (mdp);
}
void
dumpsys_gen_wbinv_all(void)
{
}
void
dumpsys_gen_unmap_chunk(vm_paddr_t pa __unused, size_t chunk __unused,
void *va __unused)
{
}
int
dumpsys_gen_write_aux_headers(struct dumperinfo *di)
{
return (0);
}
int
dumpsys_buf_write(struct dumperinfo *di, char *ptr, size_t sz)
{
size_t len;
int error;
while (sz) {
len = DEV_BSIZE - fragsz;
if (len > sz)
len = sz;
bcopy(ptr, buffer + fragsz, len);
fragsz += len;
ptr += len;
sz -= len;
if (fragsz == DEV_BSIZE) {
error = dump_write(di, buffer, 0, dumplo,
DEV_BSIZE);
if (error)
return (error);
dumplo += DEV_BSIZE;
fragsz = 0;
}
}
return (0);
}
int
dumpsys_buf_flush(struct dumperinfo *di)
{
int error;
if (fragsz == 0)
return (0);
error = dump_write(di, buffer, 0, dumplo, DEV_BSIZE);
dumplo += DEV_BSIZE;
fragsz = 0;
return (error);
}
CTASSERT(PAGE_SHIFT < 20);
#define PG2MB(pgs) ((pgs + (1 << (20 - PAGE_SHIFT)) - 1) >> (20 - PAGE_SHIFT))
int
dumpsys_cb_dumpdata(struct dump_pa *mdp, int seqnr, void *arg)
{
struct dumperinfo *di = (struct dumperinfo*)arg;
vm_paddr_t pa;
void *va;
uint64_t pgs;
size_t counter, sz, chunk;
int c, error;
u_int maxdumppgs;
error = 0; /* catch case in which chunk size is 0 */
counter = 0; /* Update twiddle every 16MB */
va = 0;
pgs = mdp->pa_size / PAGE_SIZE;
pa = mdp->pa_start;
maxdumppgs = min(di->maxiosize / PAGE_SIZE, MAXDUMPPGS);
if (maxdumppgs == 0) /* seatbelt */
maxdumppgs = 1;
printf(" chunk %d: %juMB (%ju pages)", seqnr, (uintmax_t)PG2MB(pgs),
(uintmax_t)pgs);
dumpsys_wbinv_all();
while (pgs) {
chunk = pgs;
if (chunk > maxdumppgs)
chunk = maxdumppgs;
sz = chunk << PAGE_SHIFT;
counter += sz;
if (counter >> 24) {
printf(" %ju", (uintmax_t)PG2MB(pgs));
counter &= (1 << 24) - 1;
}
dumpsys_map_chunk(pa, chunk, &va);
#ifdef SW_WATCHDOG
wdog_kern_pat(WD_LASTVAL);
#endif
error = dump_write(di, va, 0, dumplo, sz);
dumpsys_unmap_chunk(pa, chunk, va);
if (error)
break;
dumplo += sz;
pgs -= chunk;
pa += sz;
/* Check for user abort. */
c = cncheckc();
if (c == 0x03)
return (ECANCELED);
if (c != -1)
printf(" (CTRL-C to abort) ");
}
printf(" ... %s\n", (error) ? "fail" : "ok");
return (error);
}
int
dumpsys_foreach_chunk(dumpsys_callback_t cb, void *arg)
{
struct dump_pa *mdp;
int error, seqnr;
seqnr = 0;
mdp = dumpsys_pa_next(NULL);
while (mdp != NULL) {
error = (*cb)(mdp, seqnr++, arg);
if (error)
return (-error);
mdp = dumpsys_pa_next(mdp);
}
return (seqnr);
}
static off_t fileofs;
static int
cb_dumphdr(struct dump_pa *mdp, int seqnr, void *arg)
{
struct dumperinfo *di = (struct dumperinfo*)arg;
Elf_Phdr phdr;
uint64_t size;
int error;
size = mdp->pa_size;
bzero(&phdr, sizeof(phdr));
phdr.p_type = PT_LOAD;
phdr.p_flags = PF_R; /* XXX */
phdr.p_offset = fileofs;
#ifdef __powerpc__
phdr.p_vaddr = (do_minidump? mdp->pa_start : ~0L);
phdr.p_paddr = (do_minidump? ~0L : mdp->pa_start);
#else
phdr.p_vaddr = mdp->pa_start;
phdr.p_paddr = mdp->pa_start;
#endif
phdr.p_filesz = size;
phdr.p_memsz = size;
phdr.p_align = PAGE_SIZE;
error = dumpsys_buf_write(di, (char*)&phdr, sizeof(phdr));
fileofs += phdr.p_filesz;
return (error);
}
static int
cb_size(struct dump_pa *mdp, int seqnr, void *arg)
{
uint64_t *sz;
sz = (uint64_t *)arg;
*sz += (uint64_t)mdp->pa_size;
return (0);
}
int
dumpsys_generic(struct dumperinfo *di)
{
static struct kerneldumpheader kdh;
Elf_Ehdr ehdr;
uint64_t dumpsize;
off_t hdrgap;
size_t hdrsz;
int error;
#ifndef __powerpc__
if (do_minidump)
return (minidumpsys(di));
#endif
bzero(&ehdr, sizeof(ehdr));
ehdr.e_ident[EI_MAG0] = ELFMAG0;
ehdr.e_ident[EI_MAG1] = ELFMAG1;
ehdr.e_ident[EI_MAG2] = ELFMAG2;
ehdr.e_ident[EI_MAG3] = ELFMAG3;
ehdr.e_ident[EI_CLASS] = ELF_CLASS;
#if BYTE_ORDER == LITTLE_ENDIAN
ehdr.e_ident[EI_DATA] = ELFDATA2LSB;
#else
ehdr.e_ident[EI_DATA] = ELFDATA2MSB;
#endif
ehdr.e_ident[EI_VERSION] = EV_CURRENT;
ehdr.e_ident[EI_OSABI] = ELFOSABI_STANDALONE; /* XXX big picture? */
ehdr.e_type = ET_CORE;
ehdr.e_machine = EM_VALUE;
ehdr.e_phoff = sizeof(ehdr);
ehdr.e_flags = 0;
ehdr.e_ehsize = sizeof(ehdr);
ehdr.e_phentsize = sizeof(Elf_Phdr);
ehdr.e_shentsize = sizeof(Elf_Shdr);
dumpsys_pa_init();
/* Calculate dump size. */
dumpsize = 0L;
ehdr.e_phnum = dumpsys_foreach_chunk(cb_size, &dumpsize) +
DUMPSYS_NUM_AUX_HDRS;
hdrsz = ehdr.e_phoff + ehdr.e_phnum * ehdr.e_phentsize;
fileofs = MD_ALIGN(hdrsz);
dumpsize += fileofs;
hdrgap = fileofs - DEV_ALIGN(hdrsz);
/* Determine dump offset on device. */
if (di->mediasize < SIZEOF_METADATA + dumpsize + sizeof(kdh) * 2) {
error = ENOSPC;
goto fail;
}
dumplo = di->mediaoffset + di->mediasize - dumpsize;
dumplo -= sizeof(kdh) * 2;
mkdumpheader(&kdh, KERNELDUMPMAGIC, KERNELDUMP_ARCH_VERSION, dumpsize,
di->blocksize);
printf("Dumping %ju MB (%d chunks)\n", (uintmax_t)dumpsize >> 20,
ehdr.e_phnum - DUMPSYS_NUM_AUX_HDRS);
/* Dump leader */
error = dump_write(di, &kdh, 0, dumplo, sizeof(kdh));
if (error)
goto fail;
dumplo += sizeof(kdh);
/* Dump ELF header */
error = dumpsys_buf_write(di, (char*)&ehdr, sizeof(ehdr));
if (error)
goto fail;
/* Dump program headers */
error = dumpsys_foreach_chunk(cb_dumphdr, di);
if (error < 0)
goto fail;
error = dumpsys_write_aux_headers(di);
if (error < 0)
goto fail;
dumpsys_buf_flush(di);
/*
* All headers are written using blocked I/O, so we know the
* current offset is (still) block aligned. Skip the alignement
* in the file to have the segment contents aligned at page
* boundary. We cannot use MD_ALIGN on dumplo, because we don't
* care and may very well be unaligned within the dump device.
*/
dumplo += hdrgap;
/* Dump memory chunks (updates dumplo) */
error = dumpsys_foreach_chunk(dumpsys_cb_dumpdata, di);
if (error < 0)
goto fail;
/* Dump trailer */
error = dump_write(di, &kdh, 0, dumplo, sizeof(kdh));
if (error)
goto fail;
/* Signal completion, signoff and exit stage left. */
dump_write(di, NULL, 0, 0, 0);
printf("\nDump complete\n");
return (0);
fail:
if (error < 0)
error = -error;
if (error == ECANCELED)
printf("\nDump aborted\n");
else if (error == ENOSPC)
printf("\nDump failed. Partition too small.\n");
else
printf("\n** DUMP FAILED (ERROR %d) **\n", error);
return (error);
}

View File

@ -73,6 +73,7 @@ __FBSDID("$FreeBSD$");
#include <ddb/ddb.h>
#include <machine/cpu.h>
#include <machine/dump.h>
#include <machine/pcb.h>
#include <machine/smp.h>

76
sys/mips/include/dump.h Normal file
View File

@ -0,0 +1,76 @@
/*-
* Copyright (c) 2014 EMC Corp.
* Author: Conrad Meyer <conrad.meyer@isilon.com>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _MACHINE_DUMP_H_
#define _MACHINE_DUMP_H_
#define KERNELDUMP_ARCH_VERSION KERNELDUMP_MIPS_VERSION
#define EM_VALUE EM_MIPS
/* XXX: I suppose 20 should be enough. */
#define DUMPSYS_MD_PA_NPAIRS 20
#define DUMPSYS_NUM_AUX_HDRS 0
void dumpsys_wbinv_all(void);
static inline void
dumpsys_pa_init(void)
{
dumpsys_gen_pa_init();
}
static inline struct dump_pa *
dumpsys_pa_next(struct dump_pa *p)
{
return (dumpsys_gen_pa_next(p));
}
static inline void
dumpsys_unmap_chunk(vm_paddr_t pa, size_t s, void *va)
{
dumpsys_gen_unmap_chunk(pa, s, va);
}
static inline int
dumpsys_write_aux_headers(struct dumperinfo *di)
{
return (dumpsys_gen_write_aux_headers(di));
}
static inline int
dumpsys(struct dumperinfo *di)
{
return (dumpsys_generic(di));
}
#endif /* !_MACHINE_DUMP_H_ */

View File

@ -80,4 +80,5 @@ struct dumperinfo;
void dump_add_page(vm_paddr_t);
void dump_drop_page(vm_paddr_t);
int minidumpsys(struct dumperinfo *);
#endif /* !_MACHINE_MD_VAR_H_ */

View File

@ -27,342 +27,30 @@
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include "opt_watchdog.h"
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/conf.h>
#include <sys/cons.h>
#include <sys/sysctl.h>
#include <sys/kernel.h>
#include <sys/proc.h>
#include <sys/kerneldump.h>
#ifdef SW_WATCHDOG
#include <sys/watchdog.h>
#endif
#include <vm/vm.h>
#include <vm/pmap.h>
#include <machine/elf.h>
#include <machine/md_var.h>
#include <machine/pcb.h>
#include <machine/cache.h>
#include <sys/sysctl.h>
CTASSERT(sizeof(struct kerneldumpheader) == 512);
#include <machine/cache.h>
#include <machine/dump.h>
int do_minidump = 1;
SYSCTL_INT(_debug, OID_AUTO, minidump, CTLFLAG_RWTUN, &do_minidump, 0,
"Enable mini crash dumps");
/*
* Don't touch the first SIZEOF_METADATA bytes on the dump device. This
* is to protect us from metadata and to protect metadata from us.
*/
#define SIZEOF_METADATA (64*1024)
#define MD_ALIGN(x) (((off_t)(x) + PAGE_MASK) & ~PAGE_MASK)
#define DEV_ALIGN(x) (((off_t)(x) + (DEV_BSIZE-1)) & ~(DEV_BSIZE-1))
extern struct pcb dumppcb;
struct md_pa {
vm_paddr_t md_start;
vm_paddr_t md_size;
};
typedef int callback_t(struct md_pa *, int, void *);
static struct kerneldumpheader kdh;
static off_t dumplo, fileofs;
/* Handle buffered writes. */
static char buffer[DEV_BSIZE];
static size_t fragsz;
/* XXX: I suppose 20 should be enough. */
static struct md_pa dump_map[20];
static void
md_pa_init(void)
void
dumpsys_wbinv_all(void)
{
int n, idx;
bzero(dump_map, sizeof(dump_map));
for (n = 0; n < sizeof(dump_map) / sizeof(dump_map[0]); n++) {
idx = n * 2;
if (dump_avail[idx] == 0 && dump_avail[idx + 1] == 0)
break;
dump_map[n].md_start = dump_avail[idx];
dump_map[n].md_size = dump_avail[idx + 1] - dump_avail[idx];
}
}
static struct md_pa *
md_pa_first(void)
{
return (&dump_map[0]);
}
static struct md_pa *
md_pa_next(struct md_pa *mdp)
{
mdp++;
if (mdp->md_size == 0)
mdp = NULL;
return (mdp);
}
static int
buf_write(struct dumperinfo *di, char *ptr, size_t sz)
{
size_t len;
int error;
while (sz) {
len = DEV_BSIZE - fragsz;
if (len > sz)
len = sz;
bcopy(ptr, buffer + fragsz, len);
fragsz += len;
ptr += len;
sz -= len;
if (fragsz == DEV_BSIZE) {
error = dump_write(di, buffer, 0, dumplo,
DEV_BSIZE);
if (error)
return error;
dumplo += DEV_BSIZE;
fragsz = 0;
}
}
return (0);
}
static int
buf_flush(struct dumperinfo *di)
{
int error;
if (fragsz == 0)
return (0);
error = dump_write(di, buffer, 0, dumplo, DEV_BSIZE);
dumplo += DEV_BSIZE;
fragsz = 0;
return (error);
}
extern vm_offset_t kernel_l1kva;
extern char *pouet2;
static int
cb_dumpdata(struct md_pa *mdp, int seqnr, void *arg)
{
struct dumperinfo *di = (struct dumperinfo*)arg;
vm_paddr_t pa;
uint32_t pgs;
size_t counter, sz, chunk;
int c, error;
error = 0; /* catch case in which chunk size is 0 */
counter = 0;
pgs = mdp->md_size / PAGE_SIZE;
pa = mdp->md_start;
printf(" chunk %d: %dMB (%d pages)", seqnr, pgs * PAGE_SIZE / (
1024*1024), pgs);
/* Make sure we write coherent datas. */
mips_dcache_wbinv_all();
while (pgs) {
chunk = pgs;
if (chunk > MAXDUMPPGS)
chunk = MAXDUMPPGS;
sz = chunk << PAGE_SHIFT;
counter += sz;
if (counter >> 24) {
printf(" %d", pgs * PAGE_SIZE);
counter &= (1<<24) - 1;
}
#ifdef SW_WATCHDOG
wdog_kern_pat(WD_LASTVAL);
#endif
error = dump_write(di, (void *)(intptr_t)(pa),0, dumplo, sz); /* XXX fix PA */
if (error)
break;
dumplo += sz;
pgs -= chunk;
pa += sz;
/* Check for user abort. */
c = cncheckc();
if (c == 0x03)
return (ECANCELED);
if (c != -1)
printf(" (CTRL-C to abort) ");
}
printf(" ... %s\n", (error) ? "fail" : "ok");
return (error);
}
static int
cb_dumphdr(struct md_pa *mdp, int seqnr, void *arg)
void
dumpsys_map_chunk(vm_paddr_t pa, size_t chunk __unused, void **va)
{
struct dumperinfo *di = (struct dumperinfo*)arg;
Elf_Phdr phdr;
uint64_t size;
int error;
size = mdp->md_size;
bzero(&phdr, sizeof(phdr));
phdr.p_type = PT_LOAD;
phdr.p_flags = PF_R; /* XXX */
phdr.p_offset = fileofs;
phdr.p_vaddr = mdp->md_start;
phdr.p_paddr = mdp->md_start;
phdr.p_filesz = size;
phdr.p_memsz = size;
phdr.p_align = PAGE_SIZE;
error = buf_write(di, (char*)&phdr, sizeof(phdr));
fileofs += phdr.p_filesz;
return (error);
}
static int
cb_size(struct md_pa *mdp, int seqnr, void *arg)
{
uint32_t *sz = (uint32_t*)arg;
*sz += (uint32_t)mdp->md_size;
return (0);
}
static int
foreach_chunk(callback_t cb, void *arg)
{
struct md_pa *mdp;
int error, seqnr;
seqnr = 0;
mdp = md_pa_first();
while (mdp != NULL) {
error = (*cb)(mdp, seqnr++, arg);
if (error)
return (-error);
mdp = md_pa_next(mdp);
}
return (seqnr);
}
int
dumpsys(struct dumperinfo *di)
{
Elf_Ehdr ehdr;
uint32_t dumpsize;
off_t hdrgap;
size_t hdrsz;
int error;
if (do_minidump)
return (minidumpsys(di));
bzero(&ehdr, sizeof(ehdr));
ehdr.e_ident[EI_MAG0] = ELFMAG0;
ehdr.e_ident[EI_MAG1] = ELFMAG1;
ehdr.e_ident[EI_MAG2] = ELFMAG2;
ehdr.e_ident[EI_MAG3] = ELFMAG3;
ehdr.e_ident[EI_CLASS] = ELF_CLASS;
#if BYTE_ORDER == LITTLE_ENDIAN
ehdr.e_ident[EI_DATA] = ELFDATA2LSB;
#else
ehdr.e_ident[EI_DATA] = ELFDATA2MSB;
#endif
ehdr.e_ident[EI_VERSION] = EV_CURRENT;
ehdr.e_ident[EI_OSABI] = ELFOSABI_STANDALONE; /* XXX big picture? */
ehdr.e_type = ET_CORE;
ehdr.e_machine = EM_MIPS;
ehdr.e_phoff = sizeof(ehdr);
ehdr.e_flags = 0;
ehdr.e_ehsize = sizeof(ehdr);
ehdr.e_phentsize = sizeof(Elf_Phdr);
ehdr.e_shentsize = sizeof(Elf_Shdr);
md_pa_init();
/* Calculate dump size. */
dumpsize = 0L;
ehdr.e_phnum = foreach_chunk(cb_size, &dumpsize);
hdrsz = ehdr.e_phoff + ehdr.e_phnum * ehdr.e_phentsize;
fileofs = MD_ALIGN(hdrsz);
dumpsize += fileofs;
hdrgap = fileofs - DEV_ALIGN(hdrsz);
/* Determine dump offset on device. */
if (di->mediasize < SIZEOF_METADATA + dumpsize + sizeof(kdh) * 2) {
error = ENOSPC;
goto fail;
}
dumplo = di->mediaoffset + di->mediasize - dumpsize;
dumplo -= sizeof(kdh) * 2;
mkdumpheader(&kdh, KERNELDUMPMAGIC, KERNELDUMP_MIPS_VERSION, dumpsize, di->blocksize);
printf("Dumping %llu MB (%d chunks)\n", (long long)dumpsize >> 20,
ehdr.e_phnum);
/* Dump leader */
error = dump_write(di, &kdh, 0, dumplo, sizeof(kdh));
if (error)
goto fail;
dumplo += sizeof(kdh);
/* Dump ELF header */
error = buf_write(di, (char*)&ehdr, sizeof(ehdr));
if (error)
goto fail;
/* Dump program headers */
error = foreach_chunk(cb_dumphdr, di);
if (error < 0)
goto fail;
buf_flush(di);
/*
* All headers are written using blocked I/O, so we know the
* current offset is (still) block aligned. Skip the alignement
* in the file to have the segment contents aligned at page
* boundary. We cannot use MD_ALIGN on dumplo, because we don't
* care and may very well be unaligned within the dump device.
*/
dumplo += hdrgap;
/* Dump memory chunks (updates dumplo) */
error = foreach_chunk(cb_dumpdata, di);
if (error < 0)
goto fail;
/* Dump trailer */
error = dump_write(di, &kdh, 0, dumplo, sizeof(kdh));
if (error)
goto fail;
/* Signal completion, signoff and exit stage left. */
dump_write(di, NULL, 0, 0, 0);
printf("\nDump complete\n");
return (0);
fail:
if (error < 0)
error = -error;
if (error == ECANCELED)
printf("\nDump aborted\n");
else if (error == ENOSPC)
printf("\nDump failed. Partition too small.\n");
else
printf("\n** DUMP FAILED (ERROR %d) **\n", error);
return (error);
*va = (void *)(intptr_t)pa;
}

6
sys/pc98/include/dump.h Normal file
View File

@ -0,0 +1,6 @@
/*-
* This file is in the public domain.
*/
/* $FreeBSD$ */
#include <x86/dump.h>

View File

@ -106,8 +106,10 @@ __FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/kernel.h>
#include <sys/conf.h>
#include <sys/queue.h>
#include <sys/cpuset.h>
#include <sys/kerneldump.h>
#include <sys/ktr.h>
#include <sys/lock.h>
#include <sys/msgbuf.h>
@ -163,8 +165,6 @@ struct ofw_map {
extern unsigned char _etext[];
extern unsigned char _end[];
extern int dumpsys_minidump;
/*
* Map of physical memory regions.
*/
@ -314,9 +314,8 @@ void moea_kenter(mmu_t, vm_offset_t, vm_paddr_t);
void moea_page_set_memattr(mmu_t mmu, vm_page_t m, vm_memattr_t ma);
boolean_t moea_dev_direct_mapped(mmu_t, vm_paddr_t, vm_size_t);
static void moea_sync_icache(mmu_t, pmap_t, vm_offset_t, vm_size_t);
vm_offset_t moea_dumpsys_map(mmu_t mmu, struct pmap_md *md, vm_size_t ofs,
vm_size_t *sz);
struct pmap_md * moea_scan_md(mmu_t mmu, struct pmap_md *prev);
void moea_dumpsys_map(mmu_t mmu, vm_paddr_t pa, size_t sz, void **va);
void moea_scan_init(mmu_t mmu);
static mmu_method_t moea_methods[] = {
MMUMETHOD(mmu_clear_modify, moea_clear_modify),
@ -363,7 +362,7 @@ static mmu_method_t moea_methods[] = {
MMUMETHOD(mmu_kenter, moea_kenter),
MMUMETHOD(mmu_kenter_attr, moea_kenter_attr),
MMUMETHOD(mmu_dev_direct_mapped,moea_dev_direct_mapped),
MMUMETHOD(mmu_scan_md, moea_scan_md),
MMUMETHOD(mmu_scan_init, moea_scan_init),
MMUMETHOD(mmu_dumpsys_map, moea_dumpsys_map),
{ 0, 0 }
@ -2628,100 +2627,74 @@ moea_sync_icache(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_size_t sz)
PMAP_UNLOCK(pm);
}
vm_offset_t
moea_dumpsys_map(mmu_t mmu, struct pmap_md *md, vm_size_t ofs,
vm_size_t *sz)
void
moea_dumpsys_map(mmu_t mmu, vm_paddr_t pa, size_t sz, void **va)
{
if (md->md_vaddr == ~0UL)
return (md->md_paddr + ofs);
else
return (md->md_vaddr + ofs);
*va = (void *)pa;
}
struct pmap_md *
moea_scan_md(mmu_t mmu, struct pmap_md *prev)
extern struct dump_pa dump_map[PHYS_AVAIL_SZ + 1];
void
moea_scan_init(mmu_t mmu)
{
static struct pmap_md md;
struct pvo_entry *pvo;
vm_offset_t va;
if (dumpsys_minidump) {
md.md_paddr = ~0UL; /* Minidumps use virtual addresses. */
if (prev == NULL) {
/* 1st: kernel .data and .bss. */
md.md_index = 1;
md.md_vaddr = trunc_page((uintptr_t)_etext);
md.md_size = round_page((uintptr_t)_end) - md.md_vaddr;
return (&md);
}
switch (prev->md_index) {
case 1:
/* 2nd: msgbuf and tables (see pmap_bootstrap()). */
md.md_index = 2;
md.md_vaddr = (vm_offset_t)msgbufp->msg_ptr;
md.md_size = round_page(msgbufp->msg_size);
break;
case 2:
/* 3rd: kernel VM. */
va = prev->md_vaddr + prev->md_size;
/* Find start of next chunk (from va). */
while (va < virtual_end) {
/* Don't dump the buffer cache. */
if (va >= kmi.buffer_sva &&
va < kmi.buffer_eva) {
va = kmi.buffer_eva;
continue;
}
pvo = moea_pvo_find_va(kernel_pmap,
va & ~ADDR_POFF, NULL);
if (pvo != NULL &&
(pvo->pvo_pte.pte.pte_hi & PTE_VALID))
break;
va += PAGE_SIZE;
}
if (va < virtual_end) {
md.md_vaddr = va;
va += PAGE_SIZE;
/* Find last page in chunk. */
while (va < virtual_end) {
/* Don't run into the buffer cache. */
if (va == kmi.buffer_sva)
break;
pvo = moea_pvo_find_va(kernel_pmap,
va & ~ADDR_POFF, NULL);
if (pvo == NULL ||
!(pvo->pvo_pte.pte.pte_hi & PTE_VALID))
break;
va += PAGE_SIZE;
}
md.md_size = va - md.md_vaddr;
break;
}
md.md_index = 3;
/* FALLTHROUGH */
default:
return (NULL);
}
} else { /* minidumps */
mem_regions(&pregions, &pregions_sz,
&regions, &regions_sz);
int i;
if (prev == NULL) {
/* first physical chunk. */
md.md_paddr = pregions[0].mr_start;
md.md_size = pregions[0].mr_size;
md.md_vaddr = ~0UL;
md.md_index = 1;
} else if (md.md_index < pregions_sz) {
md.md_paddr = pregions[md.md_index].mr_start;
md.md_size = pregions[md.md_index].mr_size;
md.md_vaddr = ~0UL;
md.md_index++;
} else {
/* There's no next physical chunk. */
return (NULL);
if (!do_minidump) {
/* Initialize phys. segments for dumpsys(). */
memset(&dump_map, 0, sizeof(dump_map));
mem_regions(&pregions, &pregions_sz, &regions, &regions_sz);
for (i = 0; i < pregions_sz; i++) {
dump_map[i].pa_start = pregions[i].mr_start;
dump_map[i].pa_size = pregions[i].mr_size;
}
return;
}
return (&md);
/* Virtual segments for minidumps: */
memset(&dump_map, 0, sizeof(dump_map));
/* 1st: kernel .data and .bss. */
dump_map[0].pa_start = trunc_page((uintptr_t)_etext);
dump_map[0].pa_size =
round_page((uintptr_t)_end) - dump_map[0].pa_start;
/* 2nd: msgbuf and tables (see pmap_bootstrap()). */
dump_map[1].pa_start = (vm_paddr_t)msgbufp->msg_ptr;
dump_map[1].pa_size = round_page(msgbufp->msg_size);
/* 3rd: kernel VM. */
va = dump_map[1].pa_start + dump_map[1].pa_size;
/* Find start of next chunk (from va). */
while (va < virtual_end) {
/* Don't dump the buffer cache. */
if (va >= kmi.buffer_sva && va < kmi.buffer_eva) {
va = kmi.buffer_eva;
continue;
}
pvo = moea_pvo_find_va(kernel_pmap, va & ~ADDR_POFF, NULL);
if (pvo != NULL && (pvo->pvo_pte.pte.pte_hi & PTE_VALID))
break;
va += PAGE_SIZE;
}
if (va < virtual_end) {
dump_map[2].pa_start = va;
va += PAGE_SIZE;
/* Find last page in chunk. */
while (va < virtual_end) {
/* Don't run into the buffer cache. */
if (va == kmi.buffer_sva)
break;
pvo = moea_pvo_find_va(kernel_pmap, va & ~ADDR_POFF,
NULL);
if (pvo == NULL ||
!(pvo->pvo_pte.pte.pte_hi & PTE_VALID))
break;
va += PAGE_SIZE;
}
dump_map[2].pa_size = va - dump_map[2].pa_start;
}
}

View File

@ -107,8 +107,10 @@ __FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/kernel.h>
#include <sys/conf.h>
#include <sys/queue.h>
#include <sys/cpuset.h>
#include <sys/kerneldump.h>
#include <sys/ktr.h>
#include <sys/lock.h>
#include <sys/msgbuf.h>
@ -187,7 +189,6 @@ struct ofw_map {
extern unsigned char _etext[];
extern unsigned char _end[];
extern int dumpsys_minidump;
extern int ofw_real_mode;
/*
@ -328,9 +329,9 @@ void moea64_kenter_attr(mmu_t, vm_offset_t, vm_offset_t, vm_memattr_t ma);
void moea64_kenter(mmu_t, vm_offset_t, vm_paddr_t);
boolean_t moea64_dev_direct_mapped(mmu_t, vm_paddr_t, vm_size_t);
static void moea64_sync_icache(mmu_t, pmap_t, vm_offset_t, vm_size_t);
vm_offset_t moea64_dumpsys_map(mmu_t mmu, struct pmap_md *md, vm_size_t ofs,
vm_size_t *sz);
struct pmap_md * moea64_scan_md(mmu_t mmu, struct pmap_md *prev);
void moea64_dumpsys_map(mmu_t mmu, vm_paddr_t pa, size_t sz,
void **va);
void moea64_scan_init(mmu_t mmu);
static mmu_method_t moea64_methods[] = {
MMUMETHOD(mmu_clear_modify, moea64_clear_modify),
@ -376,7 +377,7 @@ static mmu_method_t moea64_methods[] = {
MMUMETHOD(mmu_kenter, moea64_kenter),
MMUMETHOD(mmu_kenter_attr, moea64_kenter_attr),
MMUMETHOD(mmu_dev_direct_mapped,moea64_dev_direct_mapped),
MMUMETHOD(mmu_scan_md, moea64_scan_md),
MMUMETHOD(mmu_scan_init, moea64_scan_init),
MMUMETHOD(mmu_dumpsys_map, moea64_dumpsys_map),
{ 0, 0 }
@ -2615,97 +2616,72 @@ moea64_sync_icache(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_size_t sz)
PMAP_UNLOCK(pm);
}
vm_offset_t
moea64_dumpsys_map(mmu_t mmu, struct pmap_md *md, vm_size_t ofs,
vm_size_t *sz)
void
moea64_dumpsys_map(mmu_t mmu, vm_paddr_t pa, size_t sz, void **va)
{
if (md->md_vaddr == ~0UL)
return (md->md_paddr + ofs);
else
return (md->md_vaddr + ofs);
*va = (void *)pa;
}
struct pmap_md *
moea64_scan_md(mmu_t mmu, struct pmap_md *prev)
extern struct dump_pa dump_map[PHYS_AVAIL_SZ + 1];
void
moea64_scan_init(mmu_t mmu)
{
static struct pmap_md md;
struct pvo_entry *pvo;
vm_offset_t va;
if (dumpsys_minidump) {
md.md_paddr = ~0UL; /* Minidumps use virtual addresses. */
if (prev == NULL) {
/* 1st: kernel .data and .bss. */
md.md_index = 1;
md.md_vaddr = trunc_page((uintptr_t)_etext);
md.md_size = round_page((uintptr_t)_end) - md.md_vaddr;
return (&md);
}
switch (prev->md_index) {
case 1:
/* 2nd: msgbuf and tables (see pmap_bootstrap()). */
md.md_index = 2;
md.md_vaddr = (vm_offset_t)msgbufp->msg_ptr;
md.md_size = round_page(msgbufp->msg_size);
break;
case 2:
/* 3rd: kernel VM. */
va = prev->md_vaddr + prev->md_size;
/* Find start of next chunk (from va). */
while (va < virtual_end) {
/* Don't dump the buffer cache. */
if (va >= kmi.buffer_sva &&
va < kmi.buffer_eva) {
va = kmi.buffer_eva;
continue;
}
pvo = moea64_pvo_find_va(kernel_pmap,
va & ~ADDR_POFF);
if (pvo != NULL &&
(pvo->pvo_pte.lpte.pte_hi & LPTE_VALID))
break;
va += PAGE_SIZE;
}
if (va < virtual_end) {
md.md_vaddr = va;
va += PAGE_SIZE;
/* Find last page in chunk. */
while (va < virtual_end) {
/* Don't run into the buffer cache. */
if (va == kmi.buffer_sva)
break;
pvo = moea64_pvo_find_va(kernel_pmap,
va & ~ADDR_POFF);
if (pvo == NULL ||
!(pvo->pvo_pte.lpte.pte_hi & LPTE_VALID))
break;
va += PAGE_SIZE;
}
md.md_size = va - md.md_vaddr;
break;
}
md.md_index = 3;
/* FALLTHROUGH */
default:
return (NULL);
}
} else { /* minidumps */
if (prev == NULL) {
/* first physical chunk. */
md.md_paddr = pregions[0].mr_start;
md.md_size = pregions[0].mr_size;
md.md_vaddr = ~0UL;
md.md_index = 1;
} else if (md.md_index < pregions_sz) {
md.md_paddr = pregions[md.md_index].mr_start;
md.md_size = pregions[md.md_index].mr_size;
md.md_vaddr = ~0UL;
md.md_index++;
} else {
/* There's no next physical chunk. */
return (NULL);
int i;
if (!do_minidump) {
/* Initialize phys. segments for dumpsys(). */
memset(&dump_map, 0, sizeof(dump_map));
mem_regions(&pregions, &pregions_sz, &regions, &regions_sz);
for (i = 0; i < pregions_sz; i++) {
dump_map[i].pa_start = pregions[i].mr_start;
dump_map[i].pa_size = pregions[i].mr_size;
}
return;
}
return (&md);
/* Virtual segments for minidumps: */
memset(&dump_map, 0, sizeof(dump_map));
/* 1st: kernel .data and .bss. */
dump_map[0].pa_start = trunc_page((uintptr_t)_etext);
dump_map[0].pa_size = round_page((uintptr_t)_end) - dump_map[0].pa_start;
/* 2nd: msgbuf and tables (see pmap_bootstrap()). */
dump_map[1].pa_start = (vm_paddr_t)msgbufp->msg_ptr;
dump_map[1].pa_size = round_page(msgbufp->msg_size);
/* 3rd: kernel VM. */
va = dump_map[1].pa_start + dump_map[1].pa_size;
/* Find start of next chunk (from va). */
while (va < virtual_end) {
/* Don't dump the buffer cache. */
if (va >= kmi.buffer_sva && va < kmi.buffer_eva) {
va = kmi.buffer_eva;
continue;
}
pvo = moea64_pvo_find_va(kernel_pmap, va & ~ADDR_POFF);
if (pvo != NULL && (pvo->pvo_pte.lpte.pte_hi & LPTE_VALID))
break;
va += PAGE_SIZE;
}
if (va < virtual_end) {
dump_map[2].pa_start = va;
va += PAGE_SIZE;
/* Find last page in chunk. */
while (va < virtual_end) {
/* Don't run into the buffer cache. */
if (va == kmi.buffer_sva)
break;
pvo = moea64_pvo_find_va(kernel_pmap, va & ~ADDR_POFF);
if (pvo == NULL ||
!(pvo->pvo_pte.lpte.pte_hi & LPTE_VALID))
break;
va += PAGE_SIZE;
}
dump_map[2].pa_size = va - dump_map[2].pa_start;
}
}

View File

@ -52,6 +52,7 @@
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/conf.h>
#include <sys/malloc.h>
#include <sys/ktr.h>
#include <sys/proc.h>
@ -59,6 +60,7 @@ __FBSDID("$FreeBSD$");
#include <sys/queue.h>
#include <sys/systm.h>
#include <sys/kernel.h>
#include <sys/kerneldump.h>
#include <sys/linker.h>
#include <sys/msgbuf.h>
#include <sys/lock.h>
@ -100,8 +102,6 @@ __FBSDID("$FreeBSD$");
#define TODO panic("%s: not implemented", __func__);
extern int dumpsys_minidump;
extern unsigned char _etext[];
extern unsigned char _end[];
@ -322,11 +322,11 @@ static void mmu_booke_kremove(mmu_t, vm_offset_t);
static boolean_t mmu_booke_dev_direct_mapped(mmu_t, vm_paddr_t, vm_size_t);
static void mmu_booke_sync_icache(mmu_t, pmap_t, vm_offset_t,
vm_size_t);
static vm_offset_t mmu_booke_dumpsys_map(mmu_t, struct pmap_md *,
vm_size_t, vm_size_t *);
static void mmu_booke_dumpsys_unmap(mmu_t, struct pmap_md *,
vm_size_t, vm_offset_t);
static struct pmap_md *mmu_booke_scan_md(mmu_t, struct pmap_md *);
static void mmu_booke_dumpsys_map(mmu_t, vm_paddr_t pa, size_t,
void **);
static void mmu_booke_dumpsys_unmap(mmu_t, vm_paddr_t pa, size_t,
void *);
static void mmu_booke_scan_init(mmu_t);
static mmu_method_t mmu_booke_methods[] = {
/* pmap dispatcher interface */
@ -381,7 +381,7 @@ static mmu_method_t mmu_booke_methods[] = {
/* dumpsys() support */
MMUMETHOD(mmu_dumpsys_map, mmu_booke_dumpsys_map),
MMUMETHOD(mmu_dumpsys_unmap, mmu_booke_dumpsys_unmap),
MMUMETHOD(mmu_scan_md, mmu_booke_scan_md),
MMUMETHOD(mmu_scan_init, mmu_booke_scan_init),
{ 0, 0 }
};
@ -2534,139 +2534,125 @@ mmu_booke_dev_direct_mapped(mmu_t mmu, vm_paddr_t pa, vm_size_t size)
return (EFAULT);
}
vm_offset_t
mmu_booke_dumpsys_map(mmu_t mmu, struct pmap_md *md, vm_size_t ofs,
vm_size_t *sz)
void
mmu_booke_dumpsys_map(mmu_t mmu, vm_paddr_t pa, size_t sz, void **va)
{
vm_paddr_t pa, ppa;
vm_offset_t va;
vm_paddr_t ppa;
vm_offset_t ofs;
vm_size_t gran;
/* Raw physical memory dumps don't have a virtual address. */
if (md->md_vaddr == ~0UL) {
/* We always map a 256MB page at 256M. */
gran = 256 * 1024 * 1024;
pa = md->md_paddr + ofs;
ppa = pa & ~(gran - 1);
ofs = pa - ppa;
va = gran;
tlb1_set_entry(va, ppa, gran, _TLB_ENTRY_IO);
if (*sz > (gran - ofs))
*sz = gran - ofs;
return (va + ofs);
/* Minidumps are based on virtual memory addresses. */
if (do_minidump) {
*va = (void *)pa;
return;
}
/* Minidumps are based on virtual memory addresses. */
va = md->md_vaddr + ofs;
if (va >= kernstart + kernsize) {
gran = PAGE_SIZE - (va & PAGE_MASK);
if (*sz > gran)
*sz = gran;
}
return (va);
/* Raw physical memory dumps don't have a virtual address. */
/* We always map a 256MB page at 256M. */
gran = 256 * 1024 * 1024;
ppa = pa & ~(gran - 1);
ofs = pa - ppa;
*va = (void *)gran;
tlb1_set_entry((vm_offset_t)va, ppa, gran, _TLB_ENTRY_IO);
if (sz > (gran - ofs))
tlb1_set_entry((vm_offset_t)(va + gran), ppa + gran, gran,
_TLB_ENTRY_IO);
}
void
mmu_booke_dumpsys_unmap(mmu_t mmu, struct pmap_md *md, vm_size_t ofs,
vm_offset_t va)
mmu_booke_dumpsys_unmap(mmu_t mmu, vm_paddr_t pa, size_t sz, void *va)
{
vm_paddr_t ppa;
vm_offset_t ofs;
vm_size_t gran;
/* Minidumps are based on virtual memory addresses. */
/* Nothing to do... */
if (do_minidump)
return;
/* Raw physical memory dumps don't have a virtual address. */
if (md->md_vaddr == ~0UL) {
tlb1_idx--;
tlb1[tlb1_idx].mas1 = 0;
tlb1[tlb1_idx].mas2 = 0;
tlb1[tlb1_idx].mas3 = 0;
tlb1_write_entry(tlb1_idx);
gran = 256 * 1024 * 1024;
ppa = pa & ~(gran - 1);
ofs = pa - ppa;
if (sz > (gran - ofs)) {
tlb1_idx--;
tlb1[tlb1_idx].mas1 = 0;
tlb1[tlb1_idx].mas2 = 0;
tlb1[tlb1_idx].mas3 = 0;
tlb1_write_entry(tlb1_idx);
return;
}
/* Minidumps are based on virtual memory addresses. */
/* Nothing to do... */
}
struct pmap_md *
mmu_booke_scan_md(mmu_t mmu, struct pmap_md *prev)
{
static struct pmap_md md;
pte_t *pte;
vm_offset_t va;
if (dumpsys_minidump) {
md.md_paddr = ~0UL; /* Minidumps use virtual addresses. */
if (prev == NULL) {
/* 1st: kernel .data and .bss. */
md.md_index = 1;
md.md_vaddr = trunc_page((uintptr_t)_etext);
md.md_size = round_page((uintptr_t)_end) - md.md_vaddr;
return (&md);
}
switch (prev->md_index) {
case 1:
/* 2nd: msgbuf and tables (see pmap_bootstrap()). */
md.md_index = 2;
md.md_vaddr = data_start;
md.md_size = data_end - data_start;
break;
case 2:
/* 3rd: kernel VM. */
va = prev->md_vaddr + prev->md_size;
/* Find start of next chunk (from va). */
while (va < virtual_end) {
/* Don't dump the buffer cache. */
if (va >= kmi.buffer_sva &&
va < kmi.buffer_eva) {
va = kmi.buffer_eva;
continue;
}
pte = pte_find(mmu, kernel_pmap, va);
if (pte != NULL && PTE_ISVALID(pte))
break;
va += PAGE_SIZE;
}
if (va < virtual_end) {
md.md_vaddr = va;
va += PAGE_SIZE;
/* Find last page in chunk. */
while (va < virtual_end) {
/* Don't run into the buffer cache. */
if (va == kmi.buffer_sva)
break;
pte = pte_find(mmu, kernel_pmap, va);
if (pte == NULL || !PTE_ISVALID(pte))
break;
va += PAGE_SIZE;
}
md.md_size = va - md.md_vaddr;
break;
}
md.md_index = 3;
/* FALLTHROUGH */
default:
return (NULL);
}
} else { /* minidumps */
mem_regions(&physmem_regions, &physmem_regions_sz,
&availmem_regions, &availmem_regions_sz);
extern struct dump_pa dump_map[PHYS_AVAIL_SZ + 1];
if (prev == NULL) {
/* first physical chunk. */
md.md_paddr = physmem_regions[0].mr_start;
md.md_size = physmem_regions[0].mr_size;
md.md_vaddr = ~0UL;
md.md_index = 1;
} else if (md.md_index < physmem_regions_sz) {
md.md_paddr = physmem_regions[md.md_index].mr_start;
md.md_size = physmem_regions[md.md_index].mr_size;
md.md_vaddr = ~0UL;
md.md_index++;
} else {
/* There's no next physical chunk. */
return (NULL);
void
mmu_booke_scan_init(mmu_t mmu)
{
vm_offset_t va;
pte_t *pte;
int i;
if (!do_minidump) {
/* Initialize phys. segments for dumpsys(). */
memset(&dump_map, 0, sizeof(dump_map));
mem_regions(&physmem_regions, &physmem_regions_sz, &availmem_regions,
&availmem_regions_sz);
for (i = 0; i < physmem_regions_sz; i++) {
dump_map[i].pa_start = physmem_regions[i].mr_start;
dump_map[i].pa_size = physmem_regions[i].mr_size;
}
return;
}
return (&md);
/* Virtual segments for minidumps: */
memset(&dump_map, 0, sizeof(dump_map));
/* 1st: kernel .data and .bss. */
dump_map[0].pa_start = trunc_page((uintptr_t)_etext);
dump_map[0].pa_size =
round_page((uintptr_t)_end) - dump_map[0].pa_start;
/* 2nd: msgbuf and tables (see pmap_bootstrap()). */
dump_map[1].pa_start = data_start;
dump_map[1].pa_size = data_end - data_start;
/* 3rd: kernel VM. */
va = dump_map[1].pa_start + dump_map[1].pa_size;
/* Find start of next chunk (from va). */
while (va < virtual_end) {
/* Don't dump the buffer cache. */
if (va >= kmi.buffer_sva && va < kmi.buffer_eva) {
va = kmi.buffer_eva;
continue;
}
pte = pte_find(mmu, kernel_pmap, va);
if (pte != NULL && PTE_ISVALID(pte))
break;
va += PAGE_SIZE;
}
if (va < virtual_end) {
dump_map[2].pa_start = va;
va += PAGE_SIZE;
/* Find last page in chunk. */
while (va < virtual_end) {
/* Don't run into the buffer cache. */
if (va == kmi.buffer_sva)
break;
pte = pte_find(mmu, kernel_pmap, va);
if (pte == NULL || !PTE_ISVALID(pte))
break;
va += PAGE_SIZE;
}
dump_map[2].pa_size = va - dump_map[2].pa_start;
}
}
/*

View File

@ -0,0 +1,69 @@
/*-
* Copyright (c) 2014 EMC Corp.
* Author: Conrad Meyer <conrad.meyer@isilon.com>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _MACHINE_DUMP_H_
#define _MACHINE_DUMP_H_
#define KERNELDUMP_ARCH_VERSION KERNELDUMP_POWERPC_VERSION
#define EM_VALUE ELF_ARCH /* Defined in powerpc/include/elf.h */
#define DUMPSYS_MD_PA_NPAIRS (PHYS_AVAIL_SZ + 1)
#define DUMPSYS_NUM_AUX_HDRS 0
void dumpsys_pa_init(void);
void dumpsys_unmap_chunk(vm_paddr_t, size_t, void *);
static inline struct dump_pa *
dumpsys_pa_next(struct dump_pa *p)
{
return (dumpsys_gen_pa_next(p));
}
static inline void
dumpsys_wbinv_all(void)
{
dumpsys_gen_wbinv_all();
}
static inline int
dumpsys_write_aux_headers(struct dumperinfo *di)
{
return (dumpsys_gen_write_aux_headers(di));
}
static inline int
dumpsys(struct dumperinfo *di)
{
return (dumpsys_generic(di));
}
#endif /* !_MACHINE_DUMP_H_ */

View File

@ -74,13 +74,6 @@
#include <machine/slb.h>
#include <machine/tlb.h>
struct pmap_md {
u_int md_index;
vm_paddr_t md_paddr;
vm_offset_t md_vaddr;
vm_size_t md_size;
};
#if defined(AIM)
#if !defined(NPMAPS)
@ -252,11 +245,6 @@ extern vm_offset_t msgbuf_phys;
extern int pmap_bootstrapped;
extern vm_offset_t pmap_dumpsys_map(struct pmap_md *, vm_size_t, vm_size_t *);
extern void pmap_dumpsys_unmap(struct pmap_md *, vm_size_t, vm_offset_t);
extern struct pmap_md *pmap_scan_md(struct pmap_md *);
vm_offset_t pmap_early_io_map(vm_paddr_t pa, vm_size_t size);
#endif

View File

@ -27,289 +27,12 @@
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include "opt_watchdog.h"
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/conf.h>
#include <sys/cons.h>
#include <sys/kernel.h>
#include <sys/kerneldump.h>
#include <sys/sysctl.h>
#ifdef SW_WATCHDOG
#include <sys/watchdog.h>
#endif
#include <vm/vm.h>
#include <vm/pmap.h>
#include <machine/elf.h>
#include <machine/md_var.h>
CTASSERT(sizeof(struct kerneldumpheader) == 512);
/*
* Don't touch the first SIZEOF_METADATA bytes on the dump device. This
* is to protect us from metadata and to protect metadata from us.
*/
#define SIZEOF_METADATA (64*1024)
#define MD_ALIGN(x) (((off_t)(x) + PAGE_MASK) & ~PAGE_MASK)
#define DEV_ALIGN(x) (((off_t)(x) + (DEV_BSIZE-1)) & ~(DEV_BSIZE-1))
typedef int callback_t(struct pmap_md *, int, void *);
static struct kerneldumpheader kdh;
static off_t dumplo, fileofs;
/* Handle buffered writes. */
static char buffer[DEV_BSIZE];
static size_t fragsz;
int dumpsys_minidump = 1;
SYSCTL_INT(_debug, OID_AUTO, minidump, CTLFLAG_RD, &dumpsys_minidump, 0,
"Kernel makes compressed crash dumps");
static int
buf_write(struct dumperinfo *di, char *ptr, size_t sz)
{
size_t len;
int error;
while (sz) {
len = DEV_BSIZE - fragsz;
if (len > sz)
len = sz;
bcopy(ptr, buffer + fragsz, len);
fragsz += len;
ptr += len;
sz -= len;
if (fragsz == DEV_BSIZE) {
error = di->dumper(di->priv, buffer, 0, dumplo,
DEV_BSIZE);
if (error)
return error;
dumplo += DEV_BSIZE;
fragsz = 0;
}
}
return (0);
}
static int
buf_flush(struct dumperinfo *di)
{
int error;
if (fragsz == 0)
return (0);
error = di->dumper(di->priv, buffer, 0, dumplo, DEV_BSIZE);
dumplo += DEV_BSIZE;
fragsz = 0;
return (error);
}
static int
cb_dumpdata(struct pmap_md *md, int seqnr, void *arg)
{
struct dumperinfo *di = (struct dumperinfo*)arg;
vm_offset_t va;
size_t counter, ofs, resid, sz, maxsz;
int c, error, twiddle;
error = 0;
counter = 0; /* Update twiddle every 16MB */
twiddle = 0;
ofs = 0; /* Logical offset within the chunk */
resid = md->md_size;
maxsz = min(DFLTPHYS, di->maxiosize);
printf(" chunk %d: %lu bytes ", seqnr, (u_long)resid);
while (resid) {
sz = min(resid, maxsz);
va = pmap_dumpsys_map(md, ofs, &sz);
counter += sz;
if (counter >> 24) {
printf("%c\b", "|/-\\"[twiddle++ & 3]);
counter &= (1<<24) - 1;
}
#ifdef SW_WATCHDOG
wdog_kern_pat(WD_LASTVAL);
#endif
error = di->dumper(di->priv, (void*)va, 0, dumplo, sz);
pmap_dumpsys_unmap(md, ofs, va);
if (error)
break;
dumplo += sz;
resid -= sz;
ofs += sz;
/* Check for user abort. */
c = cncheckc();
if (c == 0x03)
return (ECANCELED);
if (c != -1)
printf("(CTRL-C to abort) ");
}
printf("... %s\n", (error) ? "fail" : "ok");
return (error);
}
static int
cb_dumphdr(struct pmap_md *md, int seqnr, void *arg)
{
struct dumperinfo *di = (struct dumperinfo*)arg;
Elf_Phdr phdr;
int error;
bzero(&phdr, sizeof(phdr));
phdr.p_type = PT_LOAD;
phdr.p_flags = PF_R; /* XXX */
phdr.p_offset = fileofs;
phdr.p_vaddr = md->md_vaddr;
phdr.p_paddr = md->md_paddr;
phdr.p_filesz = md->md_size;
phdr.p_memsz = md->md_size;
phdr.p_align = PAGE_SIZE;
error = buf_write(di, (char*)&phdr, sizeof(phdr));
fileofs += phdr.p_filesz;
return (error);
}
static int
cb_size(struct pmap_md *md, int seqnr, void *arg)
{
uint32_t *sz = (uint32_t*)arg;
*sz += md->md_size;
return (0);
}
static int
foreach_chunk(callback_t cb, void *arg)
{
struct pmap_md *md;
int error, seqnr;
seqnr = 0;
md = pmap_scan_md(NULL);
while (md != NULL) {
error = (*cb)(md, seqnr++, arg);
if (error)
return (-error);
md = pmap_scan_md(md);
}
return (seqnr);
}
int
dumpsys(struct dumperinfo *di)
{
Elf_Ehdr ehdr;
uint32_t dumpsize;
off_t hdrgap;
size_t hdrsz;
int error;
bzero(&ehdr, sizeof(ehdr));
ehdr.e_ident[EI_MAG0] = ELFMAG0;
ehdr.e_ident[EI_MAG1] = ELFMAG1;
ehdr.e_ident[EI_MAG2] = ELFMAG2;
ehdr.e_ident[EI_MAG3] = ELFMAG3;
ehdr.e_ident[EI_CLASS] = ELF_TARG_CLASS;
#if BYTE_ORDER == LITTLE_ENDIAN
ehdr.e_ident[EI_DATA] = ELFDATA2LSB;
#else
ehdr.e_ident[EI_DATA] = ELFDATA2MSB;
#endif
ehdr.e_ident[EI_VERSION] = EV_CURRENT;
ehdr.e_ident[EI_OSABI] = ELFOSABI_STANDALONE; /* XXX big picture? */
ehdr.e_type = ET_CORE;
ehdr.e_machine = ELF_ARCH; /* Defined in powerpc/include/elf.h */
ehdr.e_phoff = sizeof(ehdr);
ehdr.e_ehsize = sizeof(ehdr);
ehdr.e_phentsize = sizeof(Elf_Phdr);
ehdr.e_shentsize = sizeof(Elf_Shdr);
/* Calculate dump size. */
dumpsize = 0L;
ehdr.e_phnum = foreach_chunk(cb_size, &dumpsize);
hdrsz = ehdr.e_phoff + ehdr.e_phnum * ehdr.e_phentsize;
fileofs = MD_ALIGN(hdrsz);
dumpsize += fileofs;
hdrgap = fileofs - DEV_ALIGN(hdrsz);
/* For block devices, determine the dump offset on the device. */
if (di->mediasize > 0) {
if (di->mediasize <
SIZEOF_METADATA + dumpsize + sizeof(kdh) * 2) {
error = ENOSPC;
goto fail;
}
dumplo = di->mediaoffset + di->mediasize - dumpsize;
dumplo -= sizeof(kdh) * 2;
} else
dumplo = 0;
mkdumpheader(&kdh, KERNELDUMPMAGIC, KERNELDUMP_POWERPC_VERSION, dumpsize,
di->blocksize);
printf("Dumping %u MB (%d chunks)\n", dumpsize >> 20,
ehdr.e_phnum);
/* Dump leader */
error = dump_write(di, &kdh, 0, dumplo, sizeof(kdh));
if (error)
goto fail;
dumplo += sizeof(kdh);
/* Dump ELF header */
error = buf_write(di, (char*)&ehdr, sizeof(ehdr));
if (error)
goto fail;
/* Dump program headers */
error = foreach_chunk(cb_dumphdr, di);
if (error < 0)
goto fail;
buf_flush(di);
/*
* All headers are written using blocked I/O, so we know the
* current offset is (still) block aligned. Skip the alignement
* in the file to have the segment contents aligned at page
* boundary. We cannot use MD_ALIGN on dumplo, because we don't
* care and may very well be unaligned within the dump device.
*/
dumplo += hdrgap;
/* Dump memory chunks (updates dumplo) */
error = foreach_chunk(cb_dumpdata, di);
if (error < 0)
goto fail;
/* Dump trailer */
error = dump_write(di, &kdh, 0, dumplo, sizeof(kdh));
if (error)
goto fail;
/* Signal completion, signoff and exit stage left. */
dump_write(di, NULL, 0, 0, 0);
printf("\nDump complete\n");
return (0);
fail:
if (error < 0)
error = -error;
if (error == ECANCELED)
printf("\nDump aborted\n");
else if (error == ENOSPC)
printf("\nDump failed. Partition too small.\n");
else
printf("\n** DUMP FAILED (ERROR %d) **\n", error);
return (error);
}
int do_minidump = 1;
SYSCTL_INT(_debug, OID_AUTO, minidump, CTLFLAG_RWTUN, &do_minidump, 0,
"Enable mini crash dumps");

View File

@ -107,11 +107,6 @@ CODE {
return;
}
static struct pmap_md *mmu_null_scan_md(mmu_t mmu, struct pmap_md *p)
{
return (NULL);
}
static void *mmu_null_mapdev_attr(mmu_t mmu, vm_offset_t pa,
vm_size_t size, vm_memattr_t ma)
{
@ -905,46 +900,36 @@ METHOD void sync_icache {
/**
* @brief Create temporary memory mapping for use by dumpsys().
*
* @param _md The memory chunk in which the mapping lies.
* @param _ofs The offset within the chunk of the mapping.
* @param _pa The physical page to map.
* @param _sz The requested size of the mapping.
*
* @retval vm_offset_t The virtual address of the mapping.
*
* The sz argument is modified to reflect the actual size of the
* mapping.
* @param _va The virtual address of the mapping.
*/
METHOD vm_offset_t dumpsys_map {
METHOD void dumpsys_map {
mmu_t _mmu;
struct pmap_md *_md;
vm_size_t _ofs;
vm_size_t *_sz;
vm_paddr_t _pa;
size_t _sz;
void **_va;
};
/**
* @brief Remove temporary dumpsys() mapping.
*
* @param _md The memory chunk in which the mapping lies.
* @param _ofs The offset within the chunk of the mapping.
* @param _pa The physical page to map.
* @param _sz The requested size of the mapping.
* @param _va The virtual address of the mapping.
*/
METHOD void dumpsys_unmap {
mmu_t _mmu;
struct pmap_md *_md;
vm_size_t _ofs;
vm_offset_t _va;
vm_paddr_t _pa;
size_t _sz;
void *_va;
};
/**
* @brief Scan/iterate memory chunks.
*
* @param _prev The previously returned chunk or NULL.
*
* @retval The next (or first when _prev is NULL) chunk.
* @brief Initialize memory chunks for dumpsys.
*/
METHOD struct pmap_md * scan_md {
METHOD void scan_init {
mmu_t _mmu;
struct pmap_md *_prev;
} DEFAULT mmu_null_scan_md;
};

View File

@ -43,7 +43,9 @@ __FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/kernel.h>
#include <sys/conf.h>
#include <sys/lock.h>
#include <sys/kerneldump.h>
#include <sys/ktr.h>
#include <sys/mutex.h>
#include <sys/systm.h>
@ -51,6 +53,8 @@ __FBSDID("$FreeBSD$");
#include <vm/vm.h>
#include <vm/vm_page.h>
#include <machine/dump.h>
#include <machine/md_var.h>
#include <machine/mmuvar.h>
#include <machine/smp.h>
@ -522,28 +526,28 @@ pmap_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz)
return (MMU_SYNC_ICACHE(mmu_obj, pm, va, sz));
}
vm_offset_t
pmap_dumpsys_map(struct pmap_md *md, vm_size_t ofs, vm_size_t *sz)
void
dumpsys_map_chunk(vm_paddr_t pa, size_t sz, void **va)
{
CTR4(KTR_PMAP, "%s(%p, %#x, %#x)", __func__, md, ofs, *sz);
return (MMU_DUMPSYS_MAP(mmu_obj, md, ofs, sz));
CTR4(KTR_PMAP, "%s(%#jx, %#zx, %p)", __func__, (uintmax_t)pa, sz, va);
return (MMU_DUMPSYS_MAP(mmu_obj, pa, sz, va));
}
void
pmap_dumpsys_unmap(struct pmap_md *md, vm_size_t ofs, vm_offset_t va)
dumpsys_unmap_chunk(vm_paddr_t pa, size_t sz, void *va)
{
CTR4(KTR_PMAP, "%s(%p, %#x, %#x)", __func__, md, ofs, va);
return (MMU_DUMPSYS_UNMAP(mmu_obj, md, ofs, va));
CTR4(KTR_PMAP, "%s(%#jx, %#zx, %p)", __func__, (uintmax_t)pa, sz, va);
return (MMU_DUMPSYS_UNMAP(mmu_obj, pa, sz, va));
}
struct pmap_md *
pmap_scan_md(struct pmap_md *prev)
void
dumpsys_pa_init(void)
{
CTR2(KTR_PMAP, "%s(%p)", __func__, prev);
return (MMU_SCAN_MD(mmu_obj, prev));
CTR1(KTR_PMAP, "%s()", __func__);
return (MMU_SCAN_INIT(mmu_obj));
}
/*

View File

@ -0,0 +1,76 @@
/*-
* Copyright (c) 2014 EMC Corp.
* Author: Conrad Meyer <conrad.meyer@isilon.com>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _MACHINE_DUMP_H_
#define _MACHINE_DUMP_H_
#define DUMPSYS_MD_PA_NPAIRS 128
#define DUMPSYS_NUM_AUX_HDRS 0
#define KERNELDUMP_ARCH_VERSION KERNELDUMP_SPARC64_VERSION
#define EM_VALUE EM_SPARCV9
void dumpsys_pa_init(void);
int dumpsys(struct dumperinfo *);
static inline struct dump_pa *
dumpsys_pa_next(struct dump_pa *p)
{
return (dumpsys_gen_pa_next(p));
}
static inline void
dumpsys_wbinv_all(void)
{
dumpsys_gen_wbinv_all();
}
static inline void
dumpsys_unmap_chunk(vm_paddr_t pa, size_t s, void *va)
{
dumpsys_gen_unmap_chunk(pa, s, va);
}
static inline int
dumpsys_write_aux_headers(struct dumperinfo *di)
{
return (dumpsys_gen_write_aux_headers(di));
}
static inline int
minidumpsys(struct dumperinfo *di)
{
return (-ENOSYS);
}
#endif /* !_MACHINE_DUMP_H_ */

View File

@ -39,62 +39,38 @@ __FBSDID("$FreeBSD$");
#include <vm/vm_param.h>
#include <vm/pmap.h>
#include <machine/dump.h>
#include <machine/md_var.h>
#include <machine/metadata.h>
#include <machine/kerneldump.h>
#include <machine/ofw_mem.h>
#include <machine/tsb.h>
#include <machine/tlb.h>
CTASSERT(sizeof(struct kerneldumpheader) == DEV_BSIZE);
static off_t fileofs;
static struct kerneldumpheader kdh;
static off_t dumplo, dumppos;
extern off_t dumplo;
extern struct dump_pa dump_map[DUMPSYS_MD_PA_NPAIRS];
/* Handle buffered writes. */
static char buffer[DEV_BSIZE];
static vm_size_t fragsz;
int do_minidump = 0;
#define MAXDUMPSZ (MAXDUMPPGS << PAGE_SHIFT)
static int
buf_write(struct dumperinfo *di, char *ptr, size_t sz)
void
dumpsys_pa_init(void)
{
size_t len;
int error;
int i;
while (sz) {
len = DEV_BSIZE - fragsz;
if (len > sz)
len = sz;
bcopy(ptr, buffer + fragsz, len);
fragsz += len;
ptr += len;
sz -= len;
if (fragsz == DEV_BSIZE) {
error = dump_write(di, buffer, 0, dumplo,
DEV_BSIZE);
if (error)
return error;
dumplo += DEV_BSIZE;
fragsz = 0;
}
memset(dump_map, 0, sizeof(dump_map));
for (i = 0; i < sparc64_nmemreg; i++) {
dump_map[i].pa_start = sparc64_memreg[i].mr_start;
dump_map[i].pa_size = sparc64_memreg[i].mr_size;
}
return (0);
}
static int
buf_flush(struct dumperinfo *di)
void
dumpsys_map_chunk(vm_paddr_t pa, size_t chunk __unused, void **va)
{
int error;
if (fragsz == 0)
return (0);
error = dump_write(di, buffer, 0, dumplo, DEV_BSIZE);
dumplo += DEV_BSIZE;
fragsz = 0;
return (error);
*va = (void *)TLB_PHYS_TO_DIRECT(pa);
}
static int
@ -104,47 +80,16 @@ reg_write(struct dumperinfo *di, vm_paddr_t pa, vm_size_t size)
r.dr_pa = pa;
r.dr_size = size;
r.dr_offs = dumppos;
dumppos += size;
return (buf_write(di, (char *)&r, sizeof(r)));
}
static int
blk_dump(struct dumperinfo *di, vm_paddr_t pa, vm_size_t size)
{
vm_size_t pos, rsz;
vm_offset_t va;
int c, counter, error, twiddle;
printf(" chunk at %#lx: %ld bytes ", (u_long)pa, (long)size);
va = 0L;
error = counter = twiddle = 0;
for (pos = 0; pos < size; pos += MAXDUMPSZ, counter++) {
if (counter % 128 == 0)
printf("%c\b", "|/-\\"[twiddle++ & 3]);
rsz = size - pos;
rsz = (rsz > MAXDUMPSZ) ? MAXDUMPSZ : rsz;
va = TLB_PHYS_TO_DIRECT(pa + pos);
error = dump_write(di, (void *)va, 0, dumplo, rsz);
if (error)
break;
dumplo += rsz;
/* Check for user abort. */
c = cncheckc();
if (c == 0x03)
return (ECANCELED);
if (c != -1)
printf("(CTRL-C to abort) ");
}
printf("... %s\n", (error) ? "fail" : "ok");
return (error);
r.dr_offs = fileofs;
fileofs += size;
return (dumpsys_buf_write(di, (char *)&r, sizeof(r)));
}
int
dumpsys(struct dumperinfo *di)
{
static struct kerneldumpheader kdh;
struct sparc64_dump_hdr hdr;
vm_size_t size, totsize, hdrsize;
int error, i, nreg;
@ -189,10 +134,10 @@ dumpsys(struct dumperinfo *di)
hdr.dh_tsb_mask = tsb_kernel_mask;
hdr.dh_nregions = nreg;
if (buf_write(di, (char *)&hdr, sizeof(hdr)) != 0)
if (dumpsys_buf_write(di, (char *)&hdr, sizeof(hdr)) != 0)
goto fail;
dumppos = hdrsize;
fileofs = hdrsize;
/* Now, write out the region descriptors. */
for (i = 0; i < sparc64_nmemreg; i++) {
error = reg_write(di, sparc64_memreg[i].mr_start,
@ -200,15 +145,12 @@ dumpsys(struct dumperinfo *di)
if (error != 0)
goto fail;
}
buf_flush(di);
dumpsys_buf_flush(di);
/* Dump memory chunks. */
for (i = 0; i < sparc64_nmemreg; i++) {
error = blk_dump(di, sparc64_memreg[i].mr_start,
sparc64_memreg[i].mr_size);
if (error != 0)
goto fail;
}
error = dumpsys_foreach_chunk(dumpsys_cb_dumpdata, di);
if (error < 0)
goto fail;
/* Dump trailer */
error = dump_write(di, &kdh, 0, dumplo, sizeof(kdh));
@ -221,6 +163,9 @@ dumpsys(struct dumperinfo *di)
return (0);
fail:
if (error < 0)
error = -error;
/* XXX It should look more like VMS :-) */
printf("** DUMP FAILED (ERROR %d) **\n", error);
return (error);

View File

@ -338,7 +338,6 @@ struct dumperinfo {
int set_dumper(struct dumperinfo *, const char *_devname, struct thread *td);
int dump_write(struct dumperinfo *, void *, vm_offset_t, off_t, size_t);
int dumpsys(struct dumperinfo *);
int doadump(boolean_t);
extern int dumping; /* system is dumping */

View File

@ -100,8 +100,31 @@ kerneldump_parity(struct kerneldumpheader *kdhp)
}
#ifdef _KERNEL
struct dump_pa {
vm_paddr_t pa_start;
vm_paddr_t pa_size;
};
void mkdumpheader(struct kerneldumpheader *kdh, char *magic, uint32_t archver,
uint64_t dumplen, uint32_t blksz);
int dumpsys_generic(struct dumperinfo *);
void dumpsys_map_chunk(vm_paddr_t, size_t, void **);
typedef int dumpsys_callback_t(struct dump_pa *, int, void *);
int dumpsys_foreach_chunk(dumpsys_callback_t, void *);
int dumpsys_cb_dumpdata(struct dump_pa *, int, void *);
int dumpsys_buf_write(struct dumperinfo *, char *, size_t);
int dumpsys_buf_flush(struct dumperinfo *);
void dumpsys_gen_pa_init(void);
struct dump_pa *dumpsys_gen_pa_next(struct dump_pa *);
void dumpsys_gen_wbinv_all(void);
void dumpsys_gen_unmap_chunk(vm_paddr_t, size_t, void *);
int dumpsys_gen_write_aux_headers(struct dumperinfo *);
extern int do_minidump;
#endif
#endif /* _SYS_KERNELDUMP_H */

87
sys/x86/include/dump.h Normal file
View File

@ -0,0 +1,87 @@
/*-
* Copyright (c) 2014 EMC Corp.
* Author: Conrad Meyer <conrad.meyer@isilon.com>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _MACHINE_DUMP_H_
#define _MACHINE_DUMP_H_
#ifdef __amd64__
#define KERNELDUMP_ARCH_VERSION KERNELDUMP_AMD64_VERSION
#define EM_VALUE EM_X86_64
#else
#define KERNELDUMP_ARCH_VERSION KERNELDUMP_I386_VERSION
#define EM_VALUE EM_386
#endif
/* 20 phys_avail entry pairs correspond to 10 pa's */
#define DUMPSYS_MD_PA_NPAIRS 10
#define DUMPSYS_NUM_AUX_HDRS 0
static inline void
dumpsys_pa_init(void)
{
dumpsys_gen_pa_init();
}
static inline struct dump_pa *
dumpsys_pa_next(struct dump_pa *p)
{
return (dumpsys_gen_pa_next(p));
}
static inline void
dumpsys_wbinv_all(void)
{
dumpsys_gen_wbinv_all();
}
static inline void
dumpsys_unmap_chunk(vm_paddr_t pa, size_t s, void *va)
{
dumpsys_gen_unmap_chunk(pa, s, va);
}
static inline int
dumpsys_write_aux_headers(struct dumperinfo *di)
{
return (dumpsys_gen_write_aux_headers(di));
}
static inline int
dumpsys(struct dumperinfo *di)
{
return (dumpsys_generic(di));
}
#endif /* !_MACHINE_DUMP_H_ */

View File

@ -30,349 +30,25 @@ __FBSDID("$FreeBSD$");
#include "opt_watchdog.h"
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/conf.h>
#include <sys/cons.h>
#include <sys/sysctl.h>
#include <sys/kernel.h>
#include <sys/kerneldump.h>
#include <sys/watchdog.h>
#include <sys/sysctl.h>
#include <sys/systm.h>
#include <vm/vm.h>
#include <vm/pmap.h>
#include <machine/elf.h>
#include <machine/md_var.h>
#ifdef __amd64__
#define KERNELDUMP_VERSION KERNELDUMP_AMD64_VERSION
#define EM_VALUE EM_X86_64
#else
#define KERNELDUMP_VERSION KERNELDUMP_I386_VERSION
#define EM_VALUE EM_386
#endif
CTASSERT(sizeof(struct kerneldumpheader) == 512);
int do_minidump = 1;
SYSCTL_INT(_debug, OID_AUTO, minidump, CTLFLAG_RWTUN, &do_minidump, 0,
"Enable mini crash dumps");
/*
* Don't touch the first SIZEOF_METADATA bytes on the dump device. This
* is to protect us from metadata and to protect metadata from us.
*/
#define SIZEOF_METADATA (64*1024)
#define MD_ALIGN(x) (((off_t)(x) + PAGE_MASK) & ~PAGE_MASK)
#define DEV_ALIGN(x) (((off_t)(x) + (DEV_BSIZE-1)) & ~(DEV_BSIZE-1))
struct md_pa {
vm_paddr_t md_start;
vm_paddr_t md_size;
};
typedef int callback_t(struct md_pa *, int, void *);
static struct kerneldumpheader kdh;
static off_t dumplo, fileofs;
/* Handle buffered writes. */
static char buffer[DEV_BSIZE];
static size_t fragsz;
/* 20 phys_avail entry pairs correspond to 10 md_pa's */
static struct md_pa dump_map[10];
static void
md_pa_init(void)
void
dumpsys_map_chunk(vm_paddr_t pa, size_t chunk, void **va)
{
int n, idx;
int i;
vm_paddr_t a;
bzero(dump_map, sizeof(dump_map));
for (n = 0; n < sizeof(dump_map) / sizeof(dump_map[0]); n++) {
idx = n * 2;
if (dump_avail[idx] == 0 && dump_avail[idx + 1] == 0)
break;
dump_map[n].md_start = dump_avail[idx];
dump_map[n].md_size = dump_avail[idx + 1] - dump_avail[idx];
for (i = 0; i < chunk; i++) {
a = pa + i * PAGE_SIZE;
*va = pmap_kenter_temporary(trunc_page(a), i);
}
}
static struct md_pa *
md_pa_first(void)
{
return (&dump_map[0]);
}
static struct md_pa *
md_pa_next(struct md_pa *mdp)
{
mdp++;
if (mdp->md_size == 0)
mdp = NULL;
return (mdp);
}
static int
buf_write(struct dumperinfo *di, char *ptr, size_t sz)
{
size_t len;
int error;
while (sz) {
len = DEV_BSIZE - fragsz;
if (len > sz)
len = sz;
bcopy(ptr, buffer + fragsz, len);
fragsz += len;
ptr += len;
sz -= len;
if (fragsz == DEV_BSIZE) {
error = dump_write(di, buffer, 0, dumplo,
DEV_BSIZE);
if (error)
return error;
dumplo += DEV_BSIZE;
fragsz = 0;
}
}
return (0);
}
static int
buf_flush(struct dumperinfo *di)
{
int error;
if (fragsz == 0)
return (0);
error = dump_write(di, buffer, 0, dumplo, DEV_BSIZE);
dumplo += DEV_BSIZE;
fragsz = 0;
return (error);
}
#define PG2MB(pgs) ((pgs + (1 << 8) - 1) >> 8)
static int
cb_dumpdata(struct md_pa *mdp, int seqnr, void *arg)
{
struct dumperinfo *di = (struct dumperinfo*)arg;
vm_paddr_t a, pa;
void *va;
uint64_t pgs;
size_t counter, sz, chunk;
int i, c, error, twiddle;
u_int maxdumppgs;
error = 0; /* catch case in which chunk size is 0 */
counter = 0; /* Update twiddle every 16MB */
twiddle = 0;
va = 0;
pgs = mdp->md_size / PAGE_SIZE;
pa = mdp->md_start;
maxdumppgs = min(di->maxiosize / PAGE_SIZE, MAXDUMPPGS);
if (maxdumppgs == 0) /* seatbelt */
maxdumppgs = 1;
printf(" chunk %d: %juMB (%ju pages)", seqnr, (uintmax_t)PG2MB(pgs),
(uintmax_t)pgs);
while (pgs) {
chunk = pgs;
if (chunk > maxdumppgs)
chunk = maxdumppgs;
sz = chunk << PAGE_SHIFT;
counter += sz;
if (counter >> 24) {
printf(" %ju", (uintmax_t)PG2MB(pgs));
counter &= (1<<24) - 1;
}
for (i = 0; i < chunk; i++) {
a = pa + i * PAGE_SIZE;
va = pmap_kenter_temporary(trunc_page(a), i);
}
wdog_kern_pat(WD_LASTVAL);
error = dump_write(di, va, 0, dumplo, sz);
if (error)
break;
dumplo += sz;
pgs -= chunk;
pa += sz;
/* Check for user abort. */
c = cncheckc();
if (c == 0x03)
return (ECANCELED);
if (c != -1)
printf(" (CTRL-C to abort) ");
}
printf(" ... %s\n", (error) ? "fail" : "ok");
return (error);
}
static int
cb_dumphdr(struct md_pa *mdp, int seqnr, void *arg)
{
struct dumperinfo *di = (struct dumperinfo*)arg;
Elf_Phdr phdr;
uint64_t size;
int error;
size = mdp->md_size;
bzero(&phdr, sizeof(phdr));
phdr.p_type = PT_LOAD;
phdr.p_flags = PF_R; /* XXX */
phdr.p_offset = fileofs;
phdr.p_vaddr = mdp->md_start;
phdr.p_paddr = mdp->md_start;
phdr.p_filesz = size;
phdr.p_memsz = size;
phdr.p_align = PAGE_SIZE;
error = buf_write(di, (char*)&phdr, sizeof(phdr));
fileofs += phdr.p_filesz;
return (error);
}
static int
cb_size(struct md_pa *mdp, int seqnr, void *arg)
{
uint64_t *sz = (uint64_t*)arg;
*sz += (uint64_t)mdp->md_size;
return (0);
}
static int
foreach_chunk(callback_t cb, void *arg)
{
struct md_pa *mdp;
int error, seqnr;
seqnr = 0;
mdp = md_pa_first();
while (mdp != NULL) {
error = (*cb)(mdp, seqnr++, arg);
if (error)
return (-error);
mdp = md_pa_next(mdp);
}
return (seqnr);
}
int
dumpsys(struct dumperinfo *di)
{
Elf_Ehdr ehdr;
uint64_t dumpsize;
off_t hdrgap;
size_t hdrsz;
int error;
if (do_minidump)
return (minidumpsys(di));
bzero(&ehdr, sizeof(ehdr));
ehdr.e_ident[EI_MAG0] = ELFMAG0;
ehdr.e_ident[EI_MAG1] = ELFMAG1;
ehdr.e_ident[EI_MAG2] = ELFMAG2;
ehdr.e_ident[EI_MAG3] = ELFMAG3;
ehdr.e_ident[EI_CLASS] = ELF_CLASS;
#if BYTE_ORDER == LITTLE_ENDIAN
ehdr.e_ident[EI_DATA] = ELFDATA2LSB;
#else
ehdr.e_ident[EI_DATA] = ELFDATA2MSB;
#endif
ehdr.e_ident[EI_VERSION] = EV_CURRENT;
ehdr.e_ident[EI_OSABI] = ELFOSABI_STANDALONE; /* XXX big picture? */
ehdr.e_type = ET_CORE;
ehdr.e_machine = EM_VALUE;
ehdr.e_phoff = sizeof(ehdr);
ehdr.e_flags = 0;
ehdr.e_ehsize = sizeof(ehdr);
ehdr.e_phentsize = sizeof(Elf_Phdr);
ehdr.e_shentsize = sizeof(Elf_Shdr);
md_pa_init();
/* Calculate dump size. */
dumpsize = 0L;
ehdr.e_phnum = foreach_chunk(cb_size, &dumpsize);
hdrsz = ehdr.e_phoff + ehdr.e_phnum * ehdr.e_phentsize;
fileofs = MD_ALIGN(hdrsz);
dumpsize += fileofs;
hdrgap = fileofs - DEV_ALIGN(hdrsz);
/* Determine dump offset on device. */
if (di->mediasize < SIZEOF_METADATA + dumpsize + sizeof(kdh) * 2) {
error = ENOSPC;
goto fail;
}
dumplo = di->mediaoffset + di->mediasize - dumpsize;
dumplo -= sizeof(kdh) * 2;
mkdumpheader(&kdh, KERNELDUMPMAGIC, KERNELDUMP_VERSION, dumpsize,
di->blocksize);
printf("Dumping %llu MB (%d chunks)\n", (long long)dumpsize >> 20,
ehdr.e_phnum);
/* Dump leader */
error = dump_write(di, &kdh, 0, dumplo, sizeof(kdh));
if (error)
goto fail;
dumplo += sizeof(kdh);
/* Dump ELF header */
error = buf_write(di, (char*)&ehdr, sizeof(ehdr));
if (error)
goto fail;
/* Dump program headers */
error = foreach_chunk(cb_dumphdr, di);
if (error < 0)
goto fail;
buf_flush(di);
/*
* All headers are written using blocked I/O, so we know the
* current offset is (still) block aligned. Skip the alignement
* in the file to have the segment contents aligned at page
* boundary. We cannot use MD_ALIGN on dumplo, because we don't
* care and may very well be unaligned within the dump device.
*/
dumplo += hdrgap;
/* Dump memory chunks (updates dumplo) */
error = foreach_chunk(cb_dumpdata, di);
if (error < 0)
goto fail;
/* Dump trailer */
error = dump_write(di, &kdh, 0, dumplo, sizeof(kdh));
if (error)
goto fail;
/* Signal completion, signoff and exit stage left. */
dump_write(di, NULL, 0, 0, 0);
printf("\nDump complete\n");
return (0);
fail:
if (error < 0)
error = -error;
if (error == ECANCELED)
printf("\nDump aborted\n");
else if (error == ENOSPC)
printf("\nDump failed. Partition too small.\n");
else
printf("\n** DUMP FAILED (ERROR %d) **\n", error);
return (error);
}