From b11fa1d14dcd0c423c44976ff988b593de218a89 Mon Sep 17 00:00:00 2001 From: dillon Date: Wed, 31 Oct 2001 03:06:33 +0000 Subject: [PATCH] Don't let pmap_object_init_pt() exhaust all available free pages (allocating pv entries w/ zalloci) when called in a loop due to an madvise(). It is possible to completely exhaust the free page list and cause a system panic when an expected allocation fails. --- sys/amd64/amd64/pmap.c | 18 +++++++++++++++++- sys/i386/i386/pmap.c | 18 +++++++++++++++++- sys/vm/vm_map.c | 2 +- sys/vm/vm_map.h | 1 + 4 files changed, 36 insertions(+), 3 deletions(-) diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c index 591207449177..edd075887212 100644 --- a/sys/amd64/amd64/pmap.c +++ b/sys/amd64/amd64/pmap.c @@ -2546,7 +2546,7 @@ retry: psize = i386_btop(size); if ((object->type != OBJT_VNODE) || - (limit && (psize > MAX_INIT_PT) && + ((limit & MAP_PREFAULT_PARTIAL) && (psize > MAX_INIT_PT) && (object->resident_page_count > MAX_INIT_PT))) { return; } @@ -2577,6 +2577,14 @@ retry: if (tmpidx >= psize) { continue; } + /* + * don't allow an madvise to blow away our really + * free pages allocating pv entries. + */ + if ((limit & MAP_PREFAULT_MADVISE) && + cnt.v_free_count < cnt.v_free_reserved) { + break; + } if (((p->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) && (p->busy == 0) && (p->flags & (PG_BUSY | PG_FICTITIOUS)) == 0) { @@ -2595,6 +2603,14 @@ retry: * else lookup the pages one-by-one. */ for (tmpidx = 0; tmpidx < psize; tmpidx += 1) { + /* + * don't allow an madvise to blow away our really + * free pages allocating pv entries. + */ + if ((limit & MAP_PREFAULT_MADVISE) && + cnt.v_free_count < cnt.v_free_reserved) { + break; + } p = vm_page_lookup(object, tmpidx + pindex); if (p && ((p->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) && diff --git a/sys/i386/i386/pmap.c b/sys/i386/i386/pmap.c index 591207449177..edd075887212 100644 --- a/sys/i386/i386/pmap.c +++ b/sys/i386/i386/pmap.c @@ -2546,7 +2546,7 @@ retry: psize = i386_btop(size); if ((object->type != OBJT_VNODE) || - (limit && (psize > MAX_INIT_PT) && + ((limit & MAP_PREFAULT_PARTIAL) && (psize > MAX_INIT_PT) && (object->resident_page_count > MAX_INIT_PT))) { return; } @@ -2577,6 +2577,14 @@ retry: if (tmpidx >= psize) { continue; } + /* + * don't allow an madvise to blow away our really + * free pages allocating pv entries. + */ + if ((limit & MAP_PREFAULT_MADVISE) && + cnt.v_free_count < cnt.v_free_reserved) { + break; + } if (((p->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) && (p->busy == 0) && (p->flags & (PG_BUSY | PG_FICTITIOUS)) == 0) { @@ -2595,6 +2603,14 @@ retry: * else lookup the pages one-by-one. */ for (tmpidx = 0; tmpidx < psize; tmpidx += 1) { + /* + * don't allow an madvise to blow away our really + * free pages allocating pv entries. + */ + if ((limit & MAP_PREFAULT_MADVISE) && + cnt.v_free_count < cnt.v_free_reserved) { + break; + } p = vm_page_lookup(object, tmpidx + pindex); if (p && ((p->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) && diff --git a/sys/vm/vm_map.c b/sys/vm/vm_map.c index 9bbe14bf280a..8dc355077805 100644 --- a/sys/vm/vm_map.c +++ b/sys/vm/vm_map.c @@ -1339,7 +1339,7 @@ vm_map_madvise( current->object.vm_object, pindex, (count << PAGE_SHIFT), - 0 + MAP_PREFAULT_MADVISE ); } } diff --git a/sys/vm/vm_map.h b/sys/vm/vm_map.h index 3776a6ee1c59..c9831d59fc6a 100644 --- a/sys/vm/vm_map.h +++ b/sys/vm/vm_map.h @@ -248,6 +248,7 @@ long vmspace_resident_count(struct vmspace *vmspace); #define MAP_PREFAULT_PARTIAL 0x0010 #define MAP_DISABLE_SYNCER 0x0020 #define MAP_DISABLE_COREDUMP 0x0100 +#define MAP_PREFAULT_MADVISE 0x0200 /* from (user) madvise request */ /* * vm_fault option flags