Don't let pmap_object_init_pt() exhaust all available free pages

(allocating pv entries w/ zalloci) when called in a loop due to
an madvise().  It is possible to completely exhaust the free page list and
cause a system panic when an expected allocation fails.
This commit is contained in:
dillon 2001-10-31 03:06:33 +00:00
parent de8bc4ba10
commit b11fa1d14d
4 changed files with 36 additions and 3 deletions

View File

@ -2546,7 +2546,7 @@ pmap_object_init_pt(pmap, addr, object, pindex, size, limit)
psize = i386_btop(size);
if ((object->type != OBJT_VNODE) ||
(limit && (psize > MAX_INIT_PT) &&
((limit & MAP_PREFAULT_PARTIAL) && (psize > MAX_INIT_PT) &&
(object->resident_page_count > MAX_INIT_PT))) {
return;
}
@ -2577,6 +2577,14 @@ pmap_object_init_pt(pmap, addr, object, pindex, size, limit)
if (tmpidx >= psize) {
continue;
}
/*
* don't allow an madvise to blow away our really
* free pages allocating pv entries.
*/
if ((limit & MAP_PREFAULT_MADVISE) &&
cnt.v_free_count < cnt.v_free_reserved) {
break;
}
if (((p->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) &&
(p->busy == 0) &&
(p->flags & (PG_BUSY | PG_FICTITIOUS)) == 0) {
@ -2595,6 +2603,14 @@ pmap_object_init_pt(pmap, addr, object, pindex, size, limit)
* else lookup the pages one-by-one.
*/
for (tmpidx = 0; tmpidx < psize; tmpidx += 1) {
/*
* don't allow an madvise to blow away our really
* free pages allocating pv entries.
*/
if ((limit & MAP_PREFAULT_MADVISE) &&
cnt.v_free_count < cnt.v_free_reserved) {
break;
}
p = vm_page_lookup(object, tmpidx + pindex);
if (p &&
((p->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) &&

View File

@ -2546,7 +2546,7 @@ pmap_object_init_pt(pmap, addr, object, pindex, size, limit)
psize = i386_btop(size);
if ((object->type != OBJT_VNODE) ||
(limit && (psize > MAX_INIT_PT) &&
((limit & MAP_PREFAULT_PARTIAL) && (psize > MAX_INIT_PT) &&
(object->resident_page_count > MAX_INIT_PT))) {
return;
}
@ -2577,6 +2577,14 @@ pmap_object_init_pt(pmap, addr, object, pindex, size, limit)
if (tmpidx >= psize) {
continue;
}
/*
* don't allow an madvise to blow away our really
* free pages allocating pv entries.
*/
if ((limit & MAP_PREFAULT_MADVISE) &&
cnt.v_free_count < cnt.v_free_reserved) {
break;
}
if (((p->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) &&
(p->busy == 0) &&
(p->flags & (PG_BUSY | PG_FICTITIOUS)) == 0) {
@ -2595,6 +2603,14 @@ pmap_object_init_pt(pmap, addr, object, pindex, size, limit)
* else lookup the pages one-by-one.
*/
for (tmpidx = 0; tmpidx < psize; tmpidx += 1) {
/*
* don't allow an madvise to blow away our really
* free pages allocating pv entries.
*/
if ((limit & MAP_PREFAULT_MADVISE) &&
cnt.v_free_count < cnt.v_free_reserved) {
break;
}
p = vm_page_lookup(object, tmpidx + pindex);
if (p &&
((p->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) &&

View File

@ -1339,7 +1339,7 @@ vm_map_madvise(
current->object.vm_object,
pindex,
(count << PAGE_SHIFT),
0
MAP_PREFAULT_MADVISE
);
}
}

View File

@ -248,6 +248,7 @@ long vmspace_resident_count(struct vmspace *vmspace);
#define MAP_PREFAULT_PARTIAL 0x0010
#define MAP_DISABLE_SYNCER 0x0020
#define MAP_DISABLE_COREDUMP 0x0100
#define MAP_PREFAULT_MADVISE 0x0200 /* from (user) madvise request */
/*
* vm_fault option flags