minidump: Use the provided dump bitset

When constructing the set of dumpable pages, use the bitset provided by
the state argument, rather than assuming vm_page_dump invariably. For
normal kernel minidumps this will be a pointer to vm_page_dump, but when
dumping the live system it will not.

To do this, the functions in vm_dumpset.h are extended to accept the
desired bitset as an argument. Note that this provided bitset is assumed
to be derived from vm_page_dump, and therefore has the same size.

Reviewed by:	kib, markj, jhb
MFC after:	2 weeks
Sponsored by:	Juniper Networks, Inc.
Sponsored by:	Klara, Inc.
Differential Revision:	https://reviews.freebsd.org/D31992
This commit is contained in:
Mitchell Horne 2021-11-17 11:35:18 -04:00
parent 1d2d1418b4
commit 10fe6f80a6
12 changed files with 71 additions and 57 deletions

View File

@ -211,7 +211,8 @@ cpu_minidumpsys(struct dumperinfo *di, const struct minidumpstate *state)
pa = pdpe & PG_PS_FRAME;
for (n = 0; n < NPDEPG * NPTEPG; n++) {
if (vm_phys_is_dumpable(pa))
dump_add_page(pa);
vm_page_dump_add(state->dump_bitset,
pa);
pa += PAGE_SIZE;
}
continue;
@ -229,7 +230,8 @@ cpu_minidumpsys(struct dumperinfo *di, const struct minidumpstate *state)
pa = pde & PG_PS_FRAME;
for (k = 0; k < NPTEPG; k++) {
if (vm_phys_is_dumpable(pa))
dump_add_page(pa);
vm_page_dump_add(
state->dump_bitset, pa);
pa += PAGE_SIZE;
}
continue;
@ -238,7 +240,7 @@ cpu_minidumpsys(struct dumperinfo *di, const struct minidumpstate *state)
pa = pde & PG_FRAME;
/* set bit for this PTE page */
if (vm_phys_is_dumpable(pa))
dump_add_page(pa);
vm_page_dump_add(state->dump_bitset, pa);
/* and for each valid page in this 2MB block */
pt = (uint64_t *)PHYS_TO_DMAP(pde & PG_FRAME);
for (k = 0; k < NPTEPG; k++) {
@ -247,7 +249,8 @@ cpu_minidumpsys(struct dumperinfo *di, const struct minidumpstate *state)
continue;
pa = pte & PG_FRAME;
if (PHYS_IN_DMAP(pa) && vm_phys_is_dumpable(pa))
dump_add_page(pa);
vm_page_dump_add(state->dump_bitset,
pa);
}
}
}
@ -258,12 +261,12 @@ cpu_minidumpsys(struct dumperinfo *di, const struct minidumpstate *state)
dumpsize += round_page(mbp->msg_size);
dumpsize += round_page(sizeof(dump_avail));
dumpsize += round_page(BITSET_SIZE(vm_page_dump_pages));
VM_PAGE_DUMP_FOREACH(pa) {
VM_PAGE_DUMP_FOREACH(state->dump_bitset, pa) {
/* Clear out undumpable pages now if needed */
if (PHYS_IN_DMAP(pa) && vm_phys_is_dumpable(pa)) {
dumpsize += PAGE_SIZE;
} else {
dump_drop_page(pa);
vm_page_dump_drop(state->dump_bitset, pa);
}
}
dumpsize += PAGE_SIZE;
@ -315,7 +318,7 @@ cpu_minidumpsys(struct dumperinfo *di, const struct minidumpstate *state)
goto fail;
/* Dump bitmap */
error = blk_write(di, (char *)vm_page_dump, 0,
error = blk_write(di, (char *)state->dump_bitset, 0,
round_page(BITSET_SIZE(vm_page_dump_pages)));
if (error)
goto fail;
@ -373,7 +376,7 @@ cpu_minidumpsys(struct dumperinfo *di, const struct minidumpstate *state)
}
/* Dump memory chunks */
VM_PAGE_DUMP_FOREACH(pa) {
VM_PAGE_DUMP_FOREACH(state->dump_bitset, pa) {
error = blk_write(di, 0, pa, PAGE_SIZE);
if (error)
goto fail;

View File

@ -186,7 +186,7 @@ cpu_minidumpsys(struct dumperinfo *di, const struct minidumpstate *state)
for (va = KERNBASE; va < kva_end; va += PAGE_SIZE) {
pa = pmap_dump_kextract(va, NULL);
if (pa != 0 && vm_phys_is_dumpable(pa))
dump_add_page(pa);
vm_page_dump_add(state->dump_bitset, pa);
ptesize += sizeof(pt2_entry_t);
}
@ -196,12 +196,12 @@ cpu_minidumpsys(struct dumperinfo *di, const struct minidumpstate *state)
dumpsize += round_page(mbp->msg_size);
dumpsize += round_page(nitems(dump_avail) * sizeof(uint64_t));
dumpsize += round_page(BITSET_SIZE(vm_page_dump_pages));
VM_PAGE_DUMP_FOREACH(pa) {
VM_PAGE_DUMP_FOREACH(state->dump_bitset, pa) {
/* Clear out undumpable pages now if needed */
if (vm_phys_is_dumpable(pa))
dumpsize += PAGE_SIZE;
else
dump_drop_page(pa);
vm_page_dump_drop(state->dump_bitset, pa);
}
dumpsize += PAGE_SIZE;
@ -255,7 +255,7 @@ cpu_minidumpsys(struct dumperinfo *di, const struct minidumpstate *state)
goto fail;
/* Dump bitmap */
error = blk_write(di, (char *)vm_page_dump, 0,
error = blk_write(di, (char *)state->dump_bitset, 0,
round_page(BITSET_SIZE(vm_page_dump_pages)));
if (error)
goto fail;
@ -279,7 +279,7 @@ cpu_minidumpsys(struct dumperinfo *di, const struct minidumpstate *state)
}
/* Dump memory chunks */
VM_PAGE_DUMP_FOREACH(pa) {
VM_PAGE_DUMP_FOREACH(state->dump_bitset, pa) {
if (!count) {
prev_pa = pa;
count++;

View File

@ -187,14 +187,16 @@ cpu_minidumpsys(struct dumperinfo *di, const struct minidumpstate *state)
for (i = 0; i < Ln_ENTRIES * Ln_ENTRIES;
i++, pa += PAGE_SIZE)
if (vm_phys_is_dumpable(pa))
dump_add_page(pa);
vm_page_dump_add(state->dump_bitset,
pa);
pmapsize += (Ln_ENTRIES - 1) * PAGE_SIZE;
va += L1_SIZE - L2_SIZE;
} else if ((l2e & ATTR_DESCR_MASK) == L2_BLOCK) {
pa = l2e & ~ATTR_MASK;
for (i = 0; i < Ln_ENTRIES; i++, pa += PAGE_SIZE) {
if (vm_phys_is_dumpable(pa))
dump_add_page(pa);
vm_page_dump_add(state->dump_bitset,
pa);
}
} else if ((l2e & ATTR_DESCR_MASK) == L2_TABLE) {
for (i = 0; i < Ln_ENTRIES; i++) {
@ -202,9 +204,9 @@ cpu_minidumpsys(struct dumperinfo *di, const struct minidumpstate *state)
if ((l3e & ATTR_DESCR_MASK) != L3_PAGE)
continue;
pa = l3e & ~ATTR_MASK;
pa = l3e & ~ATTR_MASK;
if (PHYS_IN_DMAP(pa) && vm_phys_is_dumpable(pa))
dump_add_page(pa);
vm_page_dump_add(state->dump_bitset,
pa);
}
}
}
@ -215,11 +217,11 @@ cpu_minidumpsys(struct dumperinfo *di, const struct minidumpstate *state)
dumpsize += round_page(mbp->msg_size);
dumpsize += round_page(sizeof(dump_avail));
dumpsize += round_page(BITSET_SIZE(vm_page_dump_pages));
VM_PAGE_DUMP_FOREACH(pa) {
VM_PAGE_DUMP_FOREACH(state->dump_bitset, pa) {
if (PHYS_IN_DMAP(pa) && vm_phys_is_dumpable(pa))
dumpsize += PAGE_SIZE;
else
dump_drop_page(pa);
vm_page_dump_drop(state->dump_bitset, pa);
}
dumpsize += PAGE_SIZE;
@ -270,7 +272,7 @@ cpu_minidumpsys(struct dumperinfo *di, const struct minidumpstate *state)
goto fail;
/* Dump bitmap */
error = blk_write(di, (char *)vm_page_dump, 0,
error = blk_write(di, (char *)state->dump_bitset, 0,
round_page(BITSET_SIZE(vm_page_dump_pages)));
if (error)
goto fail;
@ -351,7 +353,7 @@ cpu_minidumpsys(struct dumperinfo *di, const struct minidumpstate *state)
}
/* Dump memory chunks */
VM_PAGE_DUMP_FOREACH(pa) {
VM_PAGE_DUMP_FOREACH(state->dump_bitset, pa) {
error = blk_write(di, 0, pa, PAGE_SIZE);
if (error)
goto fail;

View File

@ -191,7 +191,8 @@ cpu_minidumpsys(struct dumperinfo *di, const struct minidumpstate *state)
pa = pde & PG_PS_FRAME;
for (k = 0; k < NPTEPG; k++) {
if (vm_phys_is_dumpable(pa))
dump_add_page(pa);
vm_page_dump_add(state->dump_bitset,
pa);
pa += PAGE_SIZE;
}
continue;
@ -204,7 +205,8 @@ cpu_minidumpsys(struct dumperinfo *di, const struct minidumpstate *state)
if ((pte & PG_V) == PG_V) {
pa = pte & PG_FRAME;
if (vm_phys_is_dumpable(pa))
dump_add_page(pa);
vm_page_dump_add(
state->dump_bitset, pa);
}
}
} else {
@ -218,12 +220,12 @@ cpu_minidumpsys(struct dumperinfo *di, const struct minidumpstate *state)
dumpsize += round_page(mbp->msg_size);
dumpsize += round_page(sizeof(dump_avail));
dumpsize += round_page(BITSET_SIZE(vm_page_dump_pages));
VM_PAGE_DUMP_FOREACH(pa) {
VM_PAGE_DUMP_FOREACH(state->dump_bitset, pa) {
/* Clear out undumpable pages now if needed */
if (vm_phys_is_dumpable(pa)) {
dumpsize += PAGE_SIZE;
} else {
dump_drop_page(pa);
vm_page_dump_drop(state->dump_bitset, pa);
}
}
dumpsize += PAGE_SIZE;
@ -317,7 +319,7 @@ cpu_minidumpsys(struct dumperinfo *di, const struct minidumpstate *state)
}
/* Dump memory chunks */
VM_PAGE_DUMP_FOREACH(pa) {
VM_PAGE_DUMP_FOREACH(state->dump_bitset, pa) {
error = blk_write(di, 0, pa, PAGE_SIZE);
if (error)
goto fail;

View File

@ -134,7 +134,8 @@ cpu_minidumpsys(struct dumperinfo *di, const struct minidumpstate *state)
if (pte_test(&pte[i], PTE_V)) {
pa = TLBLO_PTE_TO_PA(pte[i]);
if (vm_phys_is_dumpable(pa))
dump_add_page(pa);
vm_page_dump_add(state->dump_bitset,
pa);
}
}
}
@ -145,7 +146,7 @@ cpu_minidumpsys(struct dumperinfo *di, const struct minidumpstate *state)
*/
for (pa = 0; pa < phys_avail[0]; pa += PAGE_SIZE) {
if (vm_phys_is_dumpable(pa))
dump_add_page(pa);
vm_page_dump_add(state->dump_bitset, pa);
}
/* Calculate dump size. */
@ -154,12 +155,12 @@ cpu_minidumpsys(struct dumperinfo *di, const struct minidumpstate *state)
dumpsize += round_page(mbp->msg_size);
dumpsize += round_page(nitems(dump_avail) * sizeof(uint64_t));
dumpsize += round_page(BITSET_SIZE(vm_page_dump_pages));
VM_PAGE_DUMP_FOREACH(pa) {
VM_PAGE_DUMP_FOREACH(state->dump_bitset, pa) {
/* Clear out undumpable pages now if needed */
if (vm_phys_is_dumpable(pa))
dumpsize += PAGE_SIZE;
else
dump_drop_page(pa);
vm_page_dump_drop(state->dump_bitset, pa);
}
dumpsize += PAGE_SIZE;
@ -249,8 +250,8 @@ cpu_minidumpsys(struct dumperinfo *di, const struct minidumpstate *state)
prev_pte = 0;
}
/* Dump memory chunks page by page*/
VM_PAGE_DUMP_FOREACH(pa) {
/* Dump memory chunks page by page */
VM_PAGE_DUMP_FOREACH(state->dump_bitset, pa) {
dump_va = pmap_kenter_temporary(pa, 0);
error = write_buffer(di, dump_va, PAGE_SIZE);
if (error)

View File

@ -443,7 +443,7 @@ static int moea64_map_user_ptr(pmap_t pm,
volatile const void *uaddr, void **kaddr, size_t ulen, size_t *klen);
static int moea64_decode_kernel_ptr(vm_offset_t addr,
int *is_user, vm_offset_t *decoded_addr);
static size_t moea64_scan_pmap(void);
static size_t moea64_scan_pmap(struct bitset *dump_bitset);
static void *moea64_dump_pmap_init(unsigned blkpgs);
#ifdef __powerpc64__
static void moea64_page_array_startup(long);
@ -3317,7 +3317,7 @@ moea64_scan_init()
#ifdef __powerpc64__
static size_t
moea64_scan_pmap()
moea64_scan_pmap(struct bitset *dump_bitset)
{
struct pvo_entry *pvo;
vm_paddr_t pa, pa_end;
@ -3360,11 +3360,11 @@ moea64_scan_pmap()
pa_end = pa + lpsize;
for (; pa < pa_end; pa += PAGE_SIZE) {
if (vm_phys_is_dumpable(pa))
dump_add_page(pa);
vm_page_dump_add(dump_bitset, pa);
}
} else {
if (vm_phys_is_dumpable(pa))
dump_add_page(pa);
vm_page_dump_add(dump_bitset, pa);
}
}
PMAP_UNLOCK(kernel_pmap);
@ -3386,7 +3386,7 @@ moea64_dump_pmap_init(unsigned blkpgs)
#else
static size_t
moea64_scan_pmap()
moea64_scan_pmap(struct bitset *dump_bitset __unused)
{
return (0);
}

View File

@ -40,7 +40,7 @@
void dumpsys_pa_init(void);
void dumpsys_unmap_chunk(vm_paddr_t, size_t, void *);
size_t dumpsys_scan_pmap(void);
size_t dumpsys_scan_pmap(struct bitset *);
void *dumpsys_dump_pmap_init(unsigned blkpgs);
void *dumpsys_dump_pmap(void *ctx, void *buf, u_long *nbytes);

View File

@ -96,7 +96,7 @@ typedef void (*pmap_sync_icache_t)(pmap_t, vm_offset_t, vm_size_t);
typedef void (*pmap_dumpsys_map_chunk_t)(vm_paddr_t, size_t, void **);
typedef void (*pmap_dumpsys_unmap_chunk_t)(vm_paddr_t, size_t, void *);
typedef void (*pmap_dumpsys_pa_init_t)(void);
typedef size_t (*pmap_dumpsys_scan_pmap_t)(void);
typedef size_t (*pmap_dumpsys_scan_pmap_t)(struct bitset *dump_bitset);
typedef void *(*pmap_dumpsys_dump_pmap_init_t)(unsigned);
typedef void *(*pmap_dumpsys_dump_pmap_t)(void *, void *, u_long *);
typedef vm_offset_t (*pmap_quick_enter_page_t)(vm_page_t);

View File

@ -204,7 +204,7 @@ cpu_minidumpsys(struct dumperinfo *di, const struct minidumpstate *state)
DBG(total = dumptotal = 0;)
/* Build set of dumpable pages from kernel pmap */
pmapsize = dumpsys_scan_pmap();
pmapsize = dumpsys_scan_pmap(state->dump_bitset);
if (pmapsize % PAGE_SIZE != 0) {
printf("pmapsize not page aligned: 0x%x\n", pmapsize);
return (EINVAL);
@ -217,12 +217,12 @@ cpu_minidumpsys(struct dumperinfo *di, const struct minidumpstate *state)
dumpsize += round_page(sizeof(dump_avail));
dumpsize += round_page(BITSET_SIZE(vm_page_dump_pages));
dumpsize += pmapsize;
VM_PAGE_DUMP_FOREACH(pa) {
VM_PAGE_DUMP_FOREACH(state->dump_bitset, pa) {
/* Clear out undumpable pages now if needed */
if (vm_phys_is_dumpable(pa))
dumpsize += PAGE_SIZE;
else
dump_drop_page(pa);
vm_page_dump_drop(state->dump_bitset, pa);
}
dumpsys_pb_init(dumpsize);
@ -289,7 +289,7 @@ cpu_minidumpsys(struct dumperinfo *di, const struct minidumpstate *state)
dump_total("pmap", pmapsize);
/* Dump memory chunks */
VM_PAGE_DUMP_FOREACH(pa) {
VM_PAGE_DUMP_FOREACH(state->dump_bitset, pa) {
error = blk_write(di, 0, pa, PAGE_SIZE);
if (error)
goto fail;

View File

@ -188,7 +188,7 @@ DEFINE_PMAP_IFUNC(void, tlbie_all, (void));
DEFINE_DUMPSYS_IFUNC(void, map_chunk, (vm_paddr_t, size_t, void **));
DEFINE_DUMPSYS_IFUNC(void, unmap_chunk, (vm_paddr_t, size_t, void *));
DEFINE_DUMPSYS_IFUNC(void, pa_init, (void));
DEFINE_DUMPSYS_IFUNC(size_t, scan_pmap, (void));
DEFINE_DUMPSYS_IFUNC(size_t, scan_pmap, (struct bitset *));
DEFINE_DUMPSYS_IFUNC(void *, dump_pmap_init, (unsigned));
DEFINE_DUMPSYS_IFUNC(void *, dump_pmap, (void *, void *, u_long *));

View File

@ -197,7 +197,8 @@ cpu_minidumpsys(struct dumperinfo *di, const struct minidumpstate *state)
pa = (l2e >> PTE_PPN1_S) << L2_SHIFT;
for (i = 0; i < Ln_ENTRIES; i++, pa += PAGE_SIZE) {
if (vm_phys_is_dumpable(pa))
dump_add_page(pa);
vm_page_dump_add(state->dump_bitset,
pa);
}
} else {
for (i = 0; i < Ln_ENTRIES; i++) {
@ -206,7 +207,8 @@ cpu_minidumpsys(struct dumperinfo *di, const struct minidumpstate *state)
continue;
pa = (l3e >> PTE_PPN0_S) * PAGE_SIZE;
if (PHYS_IN_DMAP(pa) && vm_phys_is_dumpable(pa))
dump_add_page(pa);
vm_page_dump_add(state->dump_bitset,
pa);
}
}
}
@ -217,12 +219,12 @@ cpu_minidumpsys(struct dumperinfo *di, const struct minidumpstate *state)
dumpsize += round_page(mbp->msg_size);
dumpsize += round_page(sizeof(dump_avail));
dumpsize += round_page(BITSET_SIZE(vm_page_dump_pages));
VM_PAGE_DUMP_FOREACH(pa) {
VM_PAGE_DUMP_FOREACH(state->dump_bitset, pa) {
/* Clear out undumpable pages now if needed */
if (PHYS_IN_DMAP(pa) && vm_phys_is_dumpable(pa))
dumpsize += PAGE_SIZE;
else
dump_drop_page(pa);
vm_page_dump_drop(state->dump_bitset, pa);
}
dumpsize += PAGE_SIZE;
@ -327,7 +329,7 @@ cpu_minidumpsys(struct dumperinfo *di, const struct minidumpstate *state)
/* Dump memory chunks */
/* XXX cluster it up and use blk_dump() */
VM_PAGE_DUMP_FOREACH(pa) {
VM_PAGE_DUMP_FOREACH(state->dump_bitset, pa) {
error = blk_write(di, 0, pa, PAGE_SIZE);
if (error)
goto fail;

View File

@ -37,8 +37,12 @@ extern struct bitset *vm_page_dump;
extern long vm_page_dump_pages;
extern vm_paddr_t dump_avail[PHYS_AVAIL_COUNT];
/* For the common case: add/remove a page from the minidump bitset. */
#define dump_add_page(pa) vm_page_dump_add(vm_page_dump, pa)
#define dump_drop_page(pa) vm_page_dump_drop(vm_page_dump, pa)
static inline void
dump_add_page(vm_paddr_t pa)
vm_page_dump_add(struct bitset *bitset, vm_paddr_t pa)
{
vm_pindex_t adj;
int i;
@ -48,7 +52,7 @@ dump_add_page(vm_paddr_t pa)
if (pa >= dump_avail[i] && pa < dump_avail[i + 1]) {
BIT_SET_ATOMIC(vm_page_dump_pages,
(pa >> PAGE_SHIFT) - (dump_avail[i] >> PAGE_SHIFT) +
adj, vm_page_dump);
adj, bitset);
return;
}
adj += howmany(dump_avail[i + 1], PAGE_SIZE) -
@ -57,7 +61,7 @@ dump_add_page(vm_paddr_t pa)
}
static inline void
dump_drop_page(vm_paddr_t pa)
vm_page_dump_drop(struct bitset *bitset, vm_paddr_t pa)
{
vm_pindex_t adj;
int i;
@ -67,7 +71,7 @@ dump_drop_page(vm_paddr_t pa)
if (pa >= dump_avail[i] && pa < dump_avail[i + 1]) {
BIT_CLR_ATOMIC(vm_page_dump_pages,
(pa >> PAGE_SHIFT) - (dump_avail[i] >> PAGE_SHIFT) +
adj, vm_page_dump);
adj, bitset);
return;
}
adj += howmany(dump_avail[i + 1], PAGE_SIZE) -
@ -91,9 +95,9 @@ vm_page_dump_index_to_pa(int bit)
return ((vm_paddr_t)NULL);
}
#define VM_PAGE_DUMP_FOREACH(pa) \
for (vm_pindex_t __b = BIT_FFS(vm_page_dump_pages, vm_page_dump); \
(pa) = vm_page_dump_index_to_pa(__b - 1), __b != 0; \
__b = BIT_FFS_AT(vm_page_dump_pages, vm_page_dump, __b))
#define VM_PAGE_DUMP_FOREACH(bitset, pa) \
for (vm_pindex_t __b = BIT_FFS(vm_page_dump_pages, bitset); \
(pa) = vm_page_dump_index_to_pa(__b - 1), __b != 0; \
__b = BIT_FFS_AT(vm_page_dump_pages, bitset, __b))
#endif /* _SYS_DUMPSET_H_ */