[PPC64] Initial kernel minidump implementation

Based on POWER9BSD implementation, with all POWER9 specific code removed and
addition of new methods in PPC64 MMU interface, to isolate platform specific
code. Currently, the new methods are implemented on pseries and PowerNV
(D21643).

Reviewed by:	jhibbits
Differential Revision:	https://reviews.freebsd.org/D21551
This commit is contained in:
Leandro Lupori 2019-10-14 13:04:04 +00:00
parent fcfd8ad537
commit 0ecc478b74
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=353489
14 changed files with 723 additions and 5 deletions

View File

@ -224,6 +224,7 @@ powerpc/powerpc/intr_machdep.c standard
powerpc/powerpc/iommu_if.m standard
powerpc/powerpc/machdep.c standard
powerpc/powerpc/mem.c optional mem
powerpc/powerpc/minidump_machdep.c optional powerpc64
powerpc/powerpc/mmu_if.m standard
powerpc/powerpc/mp_machdep.c optional smp
powerpc/powerpc/nexus.c standard

View File

@ -292,7 +292,7 @@ dumpsys_generic(struct dumperinfo *di)
size_t hdrsz;
int error;
#ifndef __powerpc__
#if !defined(__powerpc__) || defined(__powerpc64__)
if (do_minidump)
return (minidumpsys(di));
#endif

View File

@ -307,6 +307,8 @@ static int moea64_map_user_ptr(mmu_t mmu, pmap_t pm,
volatile const void *uaddr, void **kaddr, size_t ulen, size_t *klen);
static int moea64_decode_kernel_ptr(mmu_t mmu, vm_offset_t addr,
int *is_user, vm_offset_t *decoded_addr);
static size_t moea64_scan_pmap(mmu_t mmu);
static void *moea64_dump_pmap_init(mmu_t mmu, unsigned blkpgs);
static mmu_method_t moea64_methods[] = {
@ -356,6 +358,8 @@ static mmu_method_t moea64_methods[] = {
MMUMETHOD(mmu_kenter_attr, moea64_kenter_attr),
MMUMETHOD(mmu_dev_direct_mapped,moea64_dev_direct_mapped),
MMUMETHOD(mmu_scan_init, moea64_scan_init),
MMUMETHOD(mmu_scan_pmap, moea64_scan_pmap),
MMUMETHOD(mmu_dump_pmap_init, moea64_dump_pmap_init),
MMUMETHOD(mmu_dumpsys_map, moea64_dumpsys_map),
MMUMETHOD(mmu_map_user_ptr, moea64_map_user_ptr),
MMUMETHOD(mmu_decode_kernel_ptr, moea64_decode_kernel_ptr),
@ -798,6 +802,8 @@ moea64_early_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelen
hwphyssz - physsz;
physsz = hwphyssz;
phys_avail_count++;
dump_avail[j] = phys_avail[j];
dump_avail[j + 1] = phys_avail[j + 1];
}
break;
}
@ -805,6 +811,8 @@ moea64_early_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelen
phys_avail[j + 1] = regions[i].mr_start + regions[i].mr_size;
phys_avail_count++;
physsz += regions[i].mr_size;
dump_avail[j] = phys_avail[j];
dump_avail[j + 1] = phys_avail[j + 1];
}
/* Check for overlap with the kernel and exception vectors */
@ -982,7 +990,7 @@ moea64_late_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend
* Set the start and end of kva.
*/
virtual_avail = VM_MIN_KERNEL_ADDRESS;
virtual_end = VM_MAX_SAFE_KERNEL_ADDRESS;
virtual_end = VM_MAX_SAFE_KERNEL_ADDRESS;
/*
* Map the entire KVA range into the SLB. We must not fault there.
@ -1056,6 +1064,9 @@ moea64_late_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend
}
dpcpu_init(dpcpu, curcpu);
crashdumpmap = (caddr_t)virtual_avail;
virtual_avail += MAXDUMPPGS * PAGE_SIZE;
/*
* Allocate some things for page zeroing. We put this directly
* in the page table and use MOEA64_PTE_REPLACE to avoid any
@ -2932,3 +2943,69 @@ moea64_scan_init(mmu_t mmu)
}
}
static size_t
moea64_scan_pmap(mmu_t mmu)
{
struct pvo_entry *pvo;
vm_paddr_t pa, pa_end;
vm_offset_t va, pgva, kstart, kend, kstart_lp, kend_lp;
uint64_t lpsize;
lpsize = moea64_large_page_size;
kstart = trunc_page((vm_offset_t)_etext);
kend = round_page((vm_offset_t)_end);
kstart_lp = kstart & ~moea64_large_page_mask;
kend_lp = (kend + moea64_large_page_mask) & ~moea64_large_page_mask;
CTR4(KTR_PMAP, "moea64_scan_pmap: kstart=0x%016lx, kend=0x%016lx, "
"kstart_lp=0x%016lx, kend_lp=0x%016lx",
kstart, kend, kstart_lp, kend_lp);
PMAP_LOCK(kernel_pmap);
RB_FOREACH(pvo, pvo_tree, &kernel_pmap->pmap_pvo) {
va = pvo->pvo_vaddr;
if (va & PVO_DEAD)
continue;
/* Skip DMAP (except kernel area) */
if (va >= DMAP_BASE_ADDRESS && va <= DMAP_MAX_ADDRESS) {
if (va & PVO_LARGE) {
pgva = va & ~moea64_large_page_mask;
if (pgva < kstart_lp || pgva >= kend_lp)
continue;
} else {
pgva = trunc_page(va);
if (pgva < kstart || pgva >= kend)
continue;
}
}
pa = pvo->pvo_pte.pa & LPTE_RPGN;
if (va & PVO_LARGE) {
pa_end = pa + lpsize;
for (; pa < pa_end; pa += PAGE_SIZE) {
if (is_dumpable(pa))
dump_add_page(pa);
}
} else {
if (is_dumpable(pa))
dump_add_page(pa);
}
}
PMAP_UNLOCK(kernel_pmap);
return (sizeof(struct lpte) * moea64_pteg_count * 8);
}
static struct dump_context dump_ctx;
static void *
moea64_dump_pmap_init(mmu_t mmu, unsigned blkpgs)
{
dump_ctx.ptex = 0;
dump_ctx.ptex_end = moea64_pteg_count * 8;
dump_ctx.blksz = blkpgs * PAGE_SIZE;
return (&dump_ctx);
}

View File

@ -34,6 +34,12 @@
#include <machine/mmuvar.h>
struct dump_context {
u_long ptex;
u_long ptex_end;
size_t blksz;
};
extern mmu_def_t oea64_mmu;
/*

View File

@ -37,6 +37,9 @@
void dumpsys_pa_init(void);
void dumpsys_unmap_chunk(vm_paddr_t, size_t, void *);
size_t dumpsys_scan_pmap(void);
void *dumpsys_dump_pmap_init(unsigned blkpgs);
void *dumpsys_dump_pmap(void *ctx, void *buf, u_long *nbytes);
static inline struct dump_pa *
dumpsys_pa_next(struct dump_pa *p)

View File

@ -41,6 +41,15 @@ extern int szsigcode32;
#ifdef __powerpc64__
extern char sigcode64[], sigcode64_elfv2[];
extern int szsigcode64, szsigcode64_elfv2;
extern uint64_t *vm_page_dump;
extern int vm_page_dump_size;
struct dumperinfo;
int minidumpsys(struct dumperinfo *);
int is_dumpable(vm_paddr_t);
void dump_add_page(vm_paddr_t);
void dump_drop_page(vm_paddr_t);
#endif
extern long Maxmem;

View File

@ -0,0 +1,52 @@
/*-
* Copyright (c) 2006 Peter Wemm
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* From i386: FreeBSD: 157909 2006-04-21 04:28:43Z peter
* $FreeBSD$
*/
#ifndef _MACHINE_MINIDUMP_H_
#define _MACHINE_MINIDUMP_H_ 1
#define MINIDUMP_MAGIC "minidump FreeBSD/powerpc64"
#define MINIDUMP_VERSION 1
struct minidumphdr {
char magic[32];
char mmu_name[32];
uint32_t version;
uint32_t msgbufsize;
uint32_t bitmapsize;
uint32_t pmapsize;
uint64_t kernbase;
uint64_t kernend;
uint64_t dmapbase;
uint64_t dmapend;
int hw_direct_map;
uint64_t startkernel;
uint64_t endkernel;
};
#endif /* _MACHINE_MINIDUMP_H_ */

View File

@ -266,11 +266,13 @@ void pmap_deactivate(struct thread *);
vm_paddr_t pmap_kextract(vm_offset_t);
int pmap_dev_direct_mapped(vm_paddr_t, vm_size_t);
boolean_t pmap_mmu_install(char *name, int prio);
const char *pmap_mmu_name(void);
#define vtophys(va) pmap_kextract((vm_offset_t)(va))
extern vm_offset_t virtual_avail;
extern vm_offset_t virtual_end;
extern caddr_t crashdumpmap;
extern vm_offset_t msgbuf_phys;

View File

@ -0,0 +1,442 @@
/*-
* Copyright (c) 2019 Leandro Lupori
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* $FreeBSD$
*/
#include <sys/types.h>
#include <sys/param.h>
#include <sys/cons.h>
#include <sys/kerneldump.h>
#include <sys/msgbuf.h>
#include <sys/proc.h>
#include <sys/sysctl.h>
#include <vm/vm.h>
#include <vm/vm_param.h>
#include <vm/vm_page.h>
#include <vm/vm_phys.h>
#include <vm/pmap.h>
#include <machine/atomic.h>
#include <machine/dump.h>
#include <machine/md_var.h>
#include <machine/minidump.h>
/*
* bit to physical address
*
* bm - bitmap
* i - bitmap entry index
* bit - bit number
*/
#define BTOP(bm, i, bit) \
(((uint64_t)(i) * sizeof(*(bm)) * NBBY + (bit)) * PAGE_SIZE)
/* Debugging stuff */
#define MINIDUMP_DEBUG 0
#if MINIDUMP_DEBUG
#define dprintf(fmt, ...) printf(fmt, ## __VA_ARGS__)
#define DBG(...) __VA_ARGS__
static size_t total, dumptotal;
static void dump_total(const char *id, size_t sz);
#else
#define dprintf(fmt, ...)
#define DBG(...)
#define dump_total(...)
#endif
extern vm_offset_t __startkernel, __endkernel;
int vm_page_dump_size;
uint64_t *vm_page_dump;
static int dump_retry_count = 5;
SYSCTL_INT(_machdep, OID_AUTO, dump_retry_count, CTLFLAG_RWTUN,
&dump_retry_count, 0,
"Number of times dump has to retry before bailing out");
static struct kerneldumpheader kdh;
static char pgbuf[PAGE_SIZE];
static struct {
int min_per;
int max_per;
int visited;
} progress_track[10] = {
{ 0, 10, 0},
{ 10, 20, 0},
{ 20, 30, 0},
{ 30, 40, 0},
{ 40, 50, 0},
{ 50, 60, 0},
{ 60, 70, 0},
{ 70, 80, 0},
{ 80, 90, 0},
{ 90, 100, 0}
};
static size_t counter, dumpsize, progress;
/* Handle chunked writes. */
static size_t fragsz;
void
dump_add_page(vm_paddr_t pa)
{
int idx, bit;
pa >>= PAGE_SHIFT;
idx = pa >> 6; /* 2^6 = 64 */
bit = pa & 63;
atomic_set_long(&vm_page_dump[idx], 1ul << bit);
}
void
dump_drop_page(vm_paddr_t pa)
{
int idx, bit;
pa >>= PAGE_SHIFT;
idx = pa >> 6; /* 2^6 = 64 */
bit = pa & 63;
atomic_clear_long(&vm_page_dump[idx], 1ul << bit);
}
int
is_dumpable(vm_paddr_t pa)
{
vm_page_t m;
int i;
if ((m = vm_phys_paddr_to_vm_page(pa)) != NULL)
return ((m->flags & PG_NODUMP) == 0);
for (i = 0; dump_avail[i] != 0 || dump_avail[i + 1] != 0; i += 2) {
if (pa >= dump_avail[i] && pa < dump_avail[i + 1])
return (1);
}
return (0);
}
static void
pmap_kenter_temporary(vm_offset_t va, vm_paddr_t pa)
{
pmap_kremove(va);
pmap_kenter(va, pa);
}
static void
report_progress(void)
{
int sofar, i;
sofar = 100 - ((progress * 100) / dumpsize);
for (i = 0; i < nitems(progress_track); i++) {
if (sofar < progress_track[i].min_per ||
sofar > progress_track[i].max_per)
continue;
if (progress_track[i].visited)
return;
progress_track[i].visited = 1;
printf("..%d%%", sofar);
return;
}
}
static int
blk_flush(struct dumperinfo *di)
{
int error;
if (fragsz == 0)
return (0);
error = dump_append(di, crashdumpmap, 0, fragsz);
DBG(dumptotal += fragsz;)
fragsz = 0;
return (error);
}
static int
blk_write(struct dumperinfo *di, char *ptr, vm_paddr_t pa, size_t sz)
{
size_t len, maxdumpsz;
int error, i, c;
maxdumpsz = MIN(di->maxiosize, MAXDUMPPGS * PAGE_SIZE);
if (maxdumpsz == 0) /* seatbelt */
maxdumpsz = PAGE_SIZE;
error = 0;
if ((sz % PAGE_SIZE) != 0) {
printf("Size not page aligned\n");
return (EINVAL);
}
if (ptr != NULL && pa != 0) {
printf("Can't have both va and pa!\n");
return (EINVAL);
}
if ((pa % PAGE_SIZE) != 0) {
printf("Address not page aligned 0x%lx\n", pa);
return (EINVAL);
}
if (ptr != NULL) {
/*
* If we're doing a virtual dump, flush any pre-existing
* pa pages
*/
error = blk_flush(di);
if (error)
return (error);
}
while (sz) {
len = maxdumpsz - fragsz;
if (len > sz)
len = sz;
counter += len;
progress -= len;
if (counter >> 20) {
report_progress();
counter &= (1<<20) - 1;
}
if (ptr) {
error = dump_append(di, ptr, 0, len);
if (error)
return (error);
DBG(dumptotal += len;)
ptr += len;
} else {
for (i = 0; i < len; i += PAGE_SIZE)
pmap_kenter_temporary(
(vm_offset_t)crashdumpmap + fragsz + i,
pa + i);
fragsz += len;
pa += len;
if (fragsz == maxdumpsz) {
error = blk_flush(di);
if (error)
return (error);
}
}
sz -= len;
/* Check for user abort. */
c = cncheckc();
if (c == 0x03)
return (ECANCELED);
if (c != -1)
printf(" (CTRL-C to abort) ");
}
return (0);
}
static int
dump_pmap(struct dumperinfo *di)
{
void *ctx;
char *buf;
u_long nbytes;
int error;
ctx = dumpsys_dump_pmap_init(sizeof(pgbuf) / PAGE_SIZE);
for (;;) {
buf = dumpsys_dump_pmap(ctx, pgbuf, &nbytes);
if (buf == NULL)
break;
error = blk_write(di, buf, 0, nbytes);
if (error)
return (error);
}
return (0);
}
int
minidumpsys(struct dumperinfo *di)
{
vm_paddr_t pa;
int bit, error, i, retry_count;
uint32_t pmapsize;
uint64_t bits;
struct minidumphdr mdhdr;
retry_count = 0;
retry:
retry_count++;
fragsz = 0;
DBG(total = dumptotal = 0;)
/* Reset progress */
counter = 0;
for (i = 0; i < nitems(progress_track); i++)
progress_track[i].visited = 0;
/* Build set of dumpable pages from kernel pmap */
pmapsize = dumpsys_scan_pmap();
if (pmapsize % PAGE_SIZE != 0) {
printf("pmapsize not page aligned: 0x%x\n", pmapsize);
return (EINVAL);
}
/* Calculate dump size */
dumpsize = PAGE_SIZE; /* header */
dumpsize += round_page(msgbufp->msg_size);
dumpsize += round_page(vm_page_dump_size);
dumpsize += pmapsize;
for (i = 0; i < vm_page_dump_size / sizeof(*vm_page_dump); i++) {
bits = vm_page_dump[i];
/* TODO optimize with bit manipulation instructions */
if (bits == 0)
continue;
for (bit = 0; bit < 64; bit++) {
if ((bits & (1ul<<bit)) == 0)
continue;
pa = BTOP(vm_page_dump, i, bit);
/* Clear out undumpable pages now if needed */
if (is_dumpable(pa))
dumpsize += PAGE_SIZE;
else
dump_drop_page(pa);
}
}
progress = dumpsize;
/* Initialize mdhdr */
bzero(&mdhdr, sizeof(mdhdr));
strcpy(mdhdr.magic, MINIDUMP_MAGIC);
strncpy(mdhdr.mmu_name, pmap_mmu_name(), sizeof(mdhdr.mmu_name) - 1);
mdhdr.version = MINIDUMP_VERSION;
mdhdr.msgbufsize = msgbufp->msg_size;
mdhdr.bitmapsize = vm_page_dump_size;
mdhdr.pmapsize = pmapsize;
mdhdr.kernbase = VM_MIN_KERNEL_ADDRESS;
mdhdr.kernend = VM_MAX_SAFE_KERNEL_ADDRESS;
mdhdr.dmapbase = DMAP_BASE_ADDRESS;
mdhdr.dmapend = DMAP_MAX_ADDRESS;
mdhdr.hw_direct_map = hw_direct_map;
mdhdr.startkernel = __startkernel;
mdhdr.endkernel = __endkernel;
dump_init_header(di, &kdh, KERNELDUMPMAGIC, KERNELDUMP_POWERPC_VERSION,
dumpsize);
error = dump_start(di, &kdh);
if (error)
goto fail;
printf("Dumping %lu out of %ju MB:", dumpsize >> 20,
ptoa((uintmax_t)physmem) / 1048576);
/* Dump minidump header */
bzero(pgbuf, sizeof(pgbuf));
memcpy(pgbuf, &mdhdr, sizeof(mdhdr));
error = blk_write(di, pgbuf, 0, PAGE_SIZE);
if (error)
goto fail;
dump_total("header", PAGE_SIZE);
/* Dump msgbuf up front */
error = blk_write(di, (char *)msgbufp->msg_ptr, 0,
round_page(msgbufp->msg_size));
dump_total("msgbuf", round_page(msgbufp->msg_size));
/* Dump bitmap */
error = blk_write(di, (char *)vm_page_dump, 0,
round_page(vm_page_dump_size));
if (error)
goto fail;
dump_total("bitmap", round_page(vm_page_dump_size));
/* Dump kernel page directory pages */
error = dump_pmap(di);
if (error)
goto fail;
dump_total("pmap", pmapsize);
/* Dump memory chunks */
/* XXX cluster it up and use blk_dump() */
for (i = 0; i < vm_page_dump_size / sizeof(*vm_page_dump); i++) {
bits = vm_page_dump[i];
/* TODO optimize with bit manipulation instructions */
if (bits == 0)
continue;
for (bit = 0; bit < 64; bit++) {
if ((bits & (1ul<<bit)) == 0)
continue;
pa = BTOP(vm_page_dump, i, bit);
error = blk_write(di, 0, pa, PAGE_SIZE);
if (error)
goto fail;
}
}
error = blk_flush(di);
if (error)
goto fail;
dump_total("mem_chunks", dumpsize - total);
error = dump_finish(di, &kdh);
if (error)
goto fail;
printf("\nDump complete\n");
return (0);
fail:
if (error < 0)
error = -error;
printf("\n");
if (error == ENOSPC) {
printf("Dump map grown while dumping. ");
if (retry_count < dump_retry_count) {
printf("Retrying...\n");
goto retry;
}
printf("Dump failed.\n");
} else if (error == ECANCELED)
printf("Dump aborted\n");
else if (error == E2BIG)
printf("Dump failed. Partition too small.\n");
else
printf("** DUMP FAILED (ERROR %d) **\n", error);
return (error);
}
#if MINIDUMP_DEBUG
static void
dump_total(const char *id, size_t sz)
{
total += sz;
dprintf("\n%s=%08lx/%08lx/%08lx\n",
id, sz, total, dumptotal);
}
#endif

View File

@ -130,6 +130,22 @@ CODE {
{
return (0);
}
static size_t mmu_null_scan_pmap(mmu_t mmu)
{
return (0);
}
static void *mmu_null_dump_pmap_init(mmu_t mmu, unsigned blkpgs)
{
return (NULL);
}
static void * mmu_null_dump_pmap(mmu_t mmu, void *ctx, void *buf,
u_long *nbytes)
{
return (NULL);
}
};
@ -974,6 +990,51 @@ METHOD void scan_init {
mmu_t _mmu;
};
/**
* @brief Scan kernel PMAP, adding mapped physical pages to dump.
*
* @retval pmap_size Number of bytes used by all PTE entries.
*/
METHOD size_t scan_pmap {
mmu_t _mmu;
} DEFAULT mmu_null_scan_pmap;
/**
* @brief Initialize a PMAP dump.
*
* @param _blkpgs Size of a dump block, in pages.
*
* @retval ctx Dump context, used by dump_pmap.
*/
METHOD void * dump_pmap_init {
mmu_t _mmu;
unsigned _blkpgs;
} DEFAULT mmu_null_dump_pmap_init;
/**
* @brief Dump a block of PTEs.
* The size of the dump block is specified in dump_pmap_init and
* the 'buf' argument must be big enough to hold a full block.
* If the page table resides in regular memory, then the 'buf'
* argument is ignored and a pointer to the specified dump block
* is returned instead, avoiding memory copy. Else, the buffer is
* filled with PTEs and the own buffer pointer is returned.
* In the end, the cursor in 'ctx' is adjusted to point to the next block.
*
* @param _ctx Dump context, retrieved from dump_pmap_init.
* @param _buf Buffer to hold the dump block contents.
* @param _nbytes Number of bytes dumped.
*
* @retval NULL No more blocks to dump.
* @retval buf Pointer to dumped data (may be different than _buf).
*/
METHOD void * dump_pmap {
mmu_t _mmu;
void *_ctx;
void *_buf;
u_long *_nbytes;
} DEFAULT mmu_null_dump_pmap;
/**
* @brief Create a temporary thread-local KVA mapping of a single page.
*

View File

@ -77,6 +77,7 @@ vm_offset_t msgbuf_phys;
vm_offset_t kernel_vm_end;
vm_offset_t virtual_avail;
vm_offset_t virtual_end;
caddr_t crashdumpmap;
int pmap_bootstrapped;
@ -567,6 +568,27 @@ dumpsys_pa_init(void)
return (MMU_SCAN_INIT(mmu_obj));
}
size_t
dumpsys_scan_pmap(void)
{
CTR1(KTR_PMAP, "%s()", __func__);
return (MMU_SCAN_PMAP(mmu_obj));
}
void *
dumpsys_dump_pmap_init(unsigned blkpgs)
{
CTR1(KTR_PMAP, "%s()", __func__);
return (MMU_DUMP_PMAP_INIT(mmu_obj, blkpgs));
}
void *
dumpsys_dump_pmap(void *ctx, void *buf, u_long *nbytes)
{
CTR1(KTR_PMAP, "%s()", __func__);
return (MMU_DUMP_PMAP(mmu_obj, ctx, buf, nbytes));
}
vm_offset_t
pmap_quick_enter_page(vm_page_t m)
{
@ -618,6 +640,12 @@ pmap_mmu_install(char *name, int prio)
return (FALSE);
}
const char *
pmap_mmu_name(void)
{
return (mmu_obj->ops->cls->name);
}
int unmapped_buf_allowed;
boolean_t

View File

@ -71,6 +71,9 @@ uma_small_alloc(uma_zone_t zone, vm_size_t bytes, int domain, u_int8_t *flags,
if ((vm_offset_t)pa != pa)
return (NULL);
if ((wait & M_NODUMP) == 0)
dump_add_page(pa);
if (!hw_direct_map) {
pmap_kenter(pa, pa);
va = (void *)(vm_offset_t)pa;
@ -100,6 +103,7 @@ uma_small_free(void *mem, vm_size_t size, u_int8_t flags)
m = PHYS_TO_VM_PAGE((vm_offset_t)mem);
KASSERT(m != NULL,
("Freeing UMA block at %p with no associated page", mem));
dump_add_page(VM_PAGE_TO_PHYS(m));
vm_page_unwire_noq(m);
vm_page_free(m);
atomic_subtract_int(&hw_uma_mdpages, 1);

View File

@ -78,6 +78,8 @@ static struct rmlock mphyp_eviction_lock;
static void mphyp_bootstrap(mmu_t mmup, vm_offset_t kernelstart,
vm_offset_t kernelend);
static void mphyp_cpu_bootstrap(mmu_t mmup, int ap);
static void *mphyp_dump_pmap(mmu_t mmu, void *ctx, void *buf,
u_long *nbytes);
static int64_t mphyp_pte_synch(mmu_t, struct pvo_entry *pvo);
static int64_t mphyp_pte_clear(mmu_t, struct pvo_entry *pvo, uint64_t ptebit);
static int64_t mphyp_pte_unset(mmu_t, struct pvo_entry *pvo);
@ -86,6 +88,7 @@ static int mphyp_pte_insert(mmu_t, struct pvo_entry *pvo);
static mmu_method_t mphyp_methods[] = {
MMUMETHOD(mmu_bootstrap, mphyp_bootstrap),
MMUMETHOD(mmu_cpu_bootstrap, mphyp_cpu_bootstrap),
MMUMETHOD(mmu_dump_pmap, mphyp_dump_pmap),
MMUMETHOD(moea64_pte_synch, mphyp_pte_synch),
MMUMETHOD(moea64_pte_clear, mphyp_pte_clear),
@ -505,3 +508,32 @@ mphyp_pte_insert(mmu_t mmu, struct pvo_entry *pvo)
return (result);
}
static void *
mphyp_dump_pmap(mmu_t mmu, void *ctx, void *buf, u_long *nbytes)
{
struct dump_context *dctx;
struct lpte p, *pbuf;
int bufidx;
uint64_t junk;
u_long ptex, ptex_end;
dctx = (struct dump_context *)ctx;
pbuf = (struct lpte *)buf;
bufidx = 0;
ptex = dctx->ptex;
ptex_end = ptex + dctx->blksz / sizeof(struct lpte);
ptex_end = MIN(ptex_end, dctx->ptex_end);
*nbytes = (ptex_end - ptex) * sizeof(struct lpte);
if (*nbytes == 0)
return (NULL);
for (; ptex < ptex_end; ptex++) {
phyp_pft_hcall(H_READ, 0, ptex, 0, 0,
&p.pte_hi, &p.pte_lo, &junk);
pbuf[bufidx++] = p;
}
dctx->ptex = ptex;
return (buf);
}

View File

@ -658,7 +658,8 @@ vm_page_startup(vm_offset_t vaddr)
#endif
#if defined(__aarch64__) || defined(__amd64__) || defined(__arm__) || \
defined(__i386__) || defined(__mips__) || defined(__riscv)
defined(__i386__) || defined(__mips__) || defined(__riscv) || \
defined(__powerpc64__)
/*
* Allocate a bitmap to indicate that a random physical page
* needs to be included in a minidump.
@ -684,7 +685,7 @@ vm_page_startup(vm_offset_t vaddr)
(void)last_pa;
#endif
#if defined(__aarch64__) || defined(__amd64__) || defined(__mips__) || \
defined(__riscv)
defined(__riscv) || defined(__powerpc64__)
/*
* Include the UMA bootstrap pages, witness pages and vm_page_dump
* in a crash dump. When pmap_map() uses the direct map, they are
@ -789,7 +790,7 @@ vm_page_startup(vm_offset_t vaddr)
new_end = vm_reserv_startup(&vaddr, new_end);
#endif
#if defined(__aarch64__) || defined(__amd64__) || defined(__mips__) || \
defined(__riscv)
defined(__riscv) || defined(__powerpc64__)
/*
* Include vm_page_array and vm_reserv_array in a crash dump.
*/