exclude kmem_alloc'ed ARC data buffers from kernel minidumps on amd64

excluding other allocations including UMA now entails the addition of
a single flag to kmem_alloc or uma zone create

Reviewed by:	alc, avg
MFC after:	2 weeks
This commit is contained in:
Kip Macy 2012-01-27 20:18:31 +00:00
parent 7fa955cd7a
commit 263811f724
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=230623
11 changed files with 29 additions and 3 deletions

View File

@ -41,6 +41,7 @@ __FBSDID("$FreeBSD$");
#include <sys/watchdog.h>
#endif
#include <vm/vm.h>
#include <vm/vm_page.h>
#include <vm/pmap.h>
#include <machine/atomic.h>
#include <machine/elf.h>
@ -75,8 +76,11 @@ CTASSERT(sizeof(*vm_page_dump) == 8);
static int
is_dumpable(vm_paddr_t pa)
{
vm_page_t m;
int i;
if ((m = vm_phys_paddr_to_vm_page(pa)) != NULL)
return ((m->flags & PG_NODUMP) == 0);
for (i = 0; dump_avail[i] != 0 || dump_avail[i + 1] != 0; i += 2) {
if (pa >= dump_avail[i] && pa < dump_avail[i + 1])
return (1);

View File

@ -65,7 +65,8 @@ uma_small_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait)
break;
}
pa = m->phys_addr;
dump_add_page(pa);
if ((wait & M_NODUMP) == 0)
dump_add_page(pa);
va = (void *)PHYS_TO_DMAP(pa);
if ((wait & M_ZERO) && (m->flags & PG_ZERO) == 0)
pagezero(va);

View File

@ -45,7 +45,9 @@ MALLOC_DECLARE(M_SOLARIS);
#define KM_SLEEP M_WAITOK
#define KM_PUSHPAGE M_WAITOK
#define KM_NOSLEEP M_NOWAIT
#define KMC_NODEBUG 0
#define KM_ZERO M_ZERO
#define KM_NODEBUG M_NODUMP
#define KMC_NODEBUG UMA_ZONE_NODUMP
#define KMC_NOTOUCH 0
typedef struct kmem_cache {

View File

@ -242,7 +242,7 @@ zio_data_buf_alloc(size_t size)
if (zio_use_uma)
return (kmem_cache_alloc(zio_data_buf_cache[c], KM_PUSHPAGE));
else
return (kmem_alloc(size, KM_SLEEP));
return (kmem_alloc(size, KM_SLEEP | KM_NODEBUG));
}
void

View File

@ -50,6 +50,7 @@
#define M_ZERO 0x0100 /* bzero the allocation */
#define M_NOVM 0x0200 /* don't ask VM for pages */
#define M_USE_RESERVE 0x0400 /* can alloc out of reserve memory */
#define M_NODUMP 0x0800 /* don't dump pages in this allocation */
#define M_MAGIC 877983977 /* time when first defined :-) */

View File

@ -248,6 +248,10 @@ int uma_zsecond_add(uma_zone_t zone, uma_zone_t master);
* backend pages and can fail early.
*/
#define UMA_ZONE_VTOSLAB 0x2000 /* Zone uses vtoslab for lookup. */
#define UMA_ZONE_NODUMP 0x4000 /*
* Zone's pages will not be included in
* mini-dumps.
*/
/*
* These flags are shared between the keg and zone. In zones wishing to add

View File

@ -845,6 +845,9 @@ keg_alloc_slab(uma_keg_t keg, uma_zone_t zone, int wait)
else
wait &= ~M_ZERO;
if (keg->uk_flags & UMA_ZONE_NODUMP)
wait |= M_NODUMP;
/* zone is passed for legacy reasons. */
mem = allocf(zone, keg->uk_ppera * UMA_SLAB_SIZE, &flags, wait);
if (mem == NULL) {

View File

@ -315,6 +315,8 @@ kmem_alloc_contig(vm_map_t map, vm_size_t size, int flags, vm_paddr_t low,
pflags = VM_ALLOC_SYSTEM | VM_ALLOC_NOBUSY;
if (flags & M_ZERO)
pflags |= VM_ALLOC_ZERO;
if (flags & M_NODUMP)
pflags |= VM_ALLOC_NODUMP;
VM_OBJECT_LOCK(object);
tries = 0;
retry:

View File

@ -382,6 +382,8 @@ kmem_back(vm_map_t map, vm_offset_t addr, vm_size_t size, int flags)
if (flags & M_ZERO)
pflags |= VM_ALLOC_ZERO;
if (flags & M_NODUMP)
pflags |= VM_ALLOC_NODUMP;
VM_OBJECT_LOCK(kmem_object);
for (i = 0; i < size; i += PAGE_SIZE) {

View File

@ -1305,6 +1305,7 @@ vm_page_cache_transfer(vm_object_t orig_object, vm_pindex_t offidxstart,
* VM_ALLOC_IFNOTCACHED return NULL, do not reactivate if the page
* is cached
* VM_ALLOC_NOBUSY do not set the flag VPO_BUSY on the page
* VM_ALLOC_NODUMP do not include the page in a kernel core dump
* VM_ALLOC_NOOBJ page is not associated with an object and
* should not have the flag VPO_BUSY set
* VM_ALLOC_WIRED wire the allocated page
@ -1429,6 +1430,8 @@ vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int req)
* must be cleared before the free page queues lock is released.
*/
flags = 0;
if (req & VM_ALLOC_NODUMP)
flags |= PG_NODUMP;
if (m->flags & PG_ZERO) {
vm_page_zero_count--;
if (req & VM_ALLOC_ZERO)
@ -1599,6 +1602,8 @@ vm_page_alloc_contig(vm_object_t object, vm_pindex_t pindex, int req,
flags = 0;
if ((req & VM_ALLOC_ZERO) != 0)
flags = PG_ZERO;
if ((req & VM_ALLOC_NODUMP) != 0)
flags |= PG_NODUMP;
if ((req & VM_ALLOC_WIRED) != 0)
atomic_add_int(&cnt.v_wire_count, npages);
oflags = VPO_UNMANAGED;

View File

@ -263,6 +263,7 @@ extern struct vpglocks pa_lock[];
#define PG_MARKER 0x10 /* special queue marker page */
#define PG_SLAB 0x20 /* object pointer is actually a slab */
#define PG_WINATCFLS 0x40 /* flush dirty page on inactive q */
#define PG_NODUMP 0x80 /* don't include this page in the dump */
/*
* Misc constants.
@ -350,6 +351,7 @@ extern struct vpglocks vm_page_queue_lock;
#define VM_ALLOC_IFCACHED 0x0400 /* Fail if the page is not cached */
#define VM_ALLOC_IFNOTCACHED 0x0800 /* Fail if the page is cached */
#define VM_ALLOC_IGN_SBUSY 0x1000 /* vm_page_grab() only */
#define VM_ALLOC_NODUMP 0x2000 /* don't include in dump */
#define VM_ALLOC_COUNT_SHIFT 16
#define VM_ALLOC_COUNT(count) ((count) << VM_ALLOC_COUNT_SHIFT)