Implement i386/i386/pmap.c 1.292 for alpha, ia64 (avoid free

page exhaustion / kernel panic for certain madvise() scenarios)
This commit is contained in:
Matthew Dillon 2001-11-03 01:08:55 +00:00
parent 2899d60638
commit 5d339e3d47
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=85930
2 changed files with 34 additions and 2 deletions

View File

@ -2466,7 +2466,7 @@ pmap_object_init_pt(pmap_t pmap, vm_offset_t addr,
psize = alpha_btop(size);
if ((object->type != OBJT_VNODE) ||
(limit && (psize > MAX_INIT_PT) &&
((limit & MAP_PREFAULT_PARTIAL) && (psize > MAX_INIT_PT) &&
(object->resident_page_count > MAX_INIT_PT))) {
return;
}
@ -2494,6 +2494,14 @@ pmap_object_init_pt(pmap_t pmap, vm_offset_t addr,
if (tmpidx >= psize) {
continue;
}
/*
* don't allow an madvise to blow away our really
* free pages allocating pv entries.
*/
if ((limit & MAP_PREFAULT_MADVISE) &&
cnt.v_free_count < cnt.v_free_reserved) {
break;
}
if (((p->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) &&
(p->flags & (PG_BUSY | PG_FICTITIOUS)) == 0) {
if ((p->queue - p->pc) == PQ_CACHE)
@ -2511,6 +2519,14 @@ pmap_object_init_pt(pmap_t pmap, vm_offset_t addr,
* else lookup the pages one-by-one.
*/
for (tmpidx = 0; tmpidx < psize; tmpidx += 1) {
/*
* don't allow an madvise to blow away our really
* free pages allocating pv entries.
*/
if ((limit & MAP_PREFAULT_MADVISE) &&
cnt.v_free_count < cnt.v_free_reserved) {
break;
}
p = vm_page_lookup(object, tmpidx + pindex);
if (p &&
((p->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) &&

View File

@ -1743,7 +1743,7 @@ pmap_object_init_pt(pmap_t pmap, vm_offset_t addr,
psize = ia64_btop(size);
if ((object->type != OBJT_VNODE) ||
(limit && (psize > MAX_INIT_PT) &&
((limit & MAP_PREFAULT_PARTIAL) && (psize > MAX_INIT_PT) &&
(object->resident_page_count > MAX_INIT_PT))) {
pmap_install(oldpmap);
return;
@ -1771,6 +1771,14 @@ pmap_object_init_pt(pmap_t pmap, vm_offset_t addr,
if (tmpidx >= psize) {
continue;
}
/*
* don't allow an madvise to blow away our really
* free pages allocating pv entries.
*/
if ((limit & MAP_PREFAULT_MADVISE) &&
cnt.v_free_count < cnt.v_free_reserved) {
break;
}
if (((p->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) &&
(p->flags & (PG_BUSY | PG_FICTITIOUS)) == 0) {
if ((p->queue - p->pc) == PQ_CACHE)
@ -1788,6 +1796,14 @@ pmap_object_init_pt(pmap_t pmap, vm_offset_t addr,
* else lookup the pages one-by-one.
*/
for (tmpidx = 0; tmpidx < psize; tmpidx += 1) {
/*
* don't allow an madvise to blow away our really
* free pages allocating pv entries.
*/
if ((limit & MAP_PREFAULT_MADVISE) &&
cnt.v_free_count < cnt.v_free_reserved) {
break;
}
p = vm_page_lookup(object, tmpidx + pindex);
if (p &&
((p->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) &&