Make sure that pageout deadlocks cannot occur. There is a problem

that the datastructures needed to support the swap pager can take
enough space to fully deplete system memory, and cause a deadlock.
This change keeps large objects from being filled with dirty pages
without the appropriate swap pager datastructures.  Right now,
default objects greater than 1/4 the size of available system memory
are converted to swap objects, thereby eliminating the risk of deadlock.
This commit is contained in:
John Dyson 1996-05-29 05:12:23 +00:00
parent 256951297d
commit a5b6fd29a3
2 changed files with 33 additions and 9 deletions

@ -28,7 +28,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $Id: default_pager.c,v 1.6 1996/01/19 03:59:36 dyson Exp $
* $Id: default_pager.c,v 1.7 1996/05/24 05:14:44 dyson Exp $
*/
#include <sys/param.h>
@ -125,12 +125,10 @@ default_pager_putpages(object, m, c, sync, rtvals)
object->type = OBJT_SWAP;
if (swap_pager_swp_alloc(object, M_KERNEL) != 0) {
if (swap_pager_swp_alloc(object, M_NOWAIT) != 0) {
object->type = OBJT_DEFAULT;
for (i = 0; i < c; i++)
rtvals[i] = VM_PAGER_FAIL;
return VM_PAGER_FAIL;
}
object->type = OBJT_DEFAULT;
for (i = 0; i < c; i++)
rtvals[i] = VM_PAGER_FAIL;
return VM_PAGER_FAIL;
}
return swap_pager_putpages(object, m, c, sync, rtvals);
@ -145,3 +143,13 @@ default_pager_haspage(object, pindex, before, after)
{
return FALSE;
}
void
default_pager_convert_to_swap(object)
vm_object_t object;
{
object->type = OBJT_SWAP;
if (swap_pager_swp_alloc(object, M_KERNEL) != 0) {
object->type = OBJT_DEFAULT;
}
}

@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_map.c,v 1.46 1996/05/19 07:36:46 dyson Exp $
* $Id: vm_map.c,v 1.47 1996/05/23 00:45:54 dyson Exp $
*/
/*
@ -606,6 +606,7 @@ vm_map_insert(map, object, offset, start, end, prot, max, cow)
register vm_map_entry_t new_entry;
register vm_map_entry_t prev_entry;
vm_map_entry_t temp_entry;
vm_object_t prev_object;
/*
* Check that the start and end points are not bogus.
@ -642,17 +643,19 @@ vm_map_insert(map, object, offset, start, end, prot, max, cow)
(prev_entry->protection == prot) &&
(prev_entry->max_protection == max) &&
(prev_entry->wired_count == 0)) {
/*
* See if we can avoid creating a new entry by extending one of our
* neighbors.
*/
if (object == NULL) {
if (vm_object_coalesce(prev_entry->object.vm_object,
OFF_TO_IDX(prev_entry->offset),
(vm_size_t) (prev_entry->end
- prev_entry->start),
(vm_size_t) (end - prev_entry->end))) {
/*
* Coalesced the two objects - can extend the
* previous map entry to include the new
@ -660,6 +663,12 @@ vm_map_insert(map, object, offset, start, end, prot, max, cow)
*/
map->size += (end - prev_entry->end);
prev_entry->end = end;
prev_object = prev_entry->object.vm_object;
if (prev_object &&
(prev_object->type == OBJT_DEFAULT) &&
(prev_object->size >= ((cnt.v_page_count - cnt.v_wire_count) / 4))) {
default_pager_convert_to_swap(prev_object);
}
return (KERN_SUCCESS);
}
}
@ -707,6 +716,13 @@ vm_map_insert(map, object, offset, start, end, prot, max, cow)
(prev_entry->end >= new_entry->start))
map->first_free = new_entry;
if (object &&
(object != kernel_object) &&
(object != kmem_object) &&
(object->type == OBJT_DEFAULT) &&
(object->size >= ((cnt.v_page_count - cnt.v_wire_count) / 4))) {
default_pager_convert_to_swap(object);
}
return (KERN_SUCCESS);
}