Various bugfixes/cleanups from me and others:

1) Remove potential race conditions on waking up in vm_page_free_wakeup
   by making sure that it is at splvm().
2) Fix another bug in vm_map_simplify_entry.
3) Be more complete about converting from default to swap pager
   when an object grows to be large enough that there can be
   a problem with data structure allocation under low memory
   conditions.
4) Make some madvise code more efficient.
5) Added some comments.
This commit is contained in:
John Dyson 1996-06-16 20:37:31 +00:00
parent 9306343284
commit b5b40fa62b
6 changed files with 66 additions and 90 deletions

View File

@ -28,7 +28,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $Id: default_pager.c,v 1.7 1996/05/24 05:14:44 dyson Exp $
* $Id: default_pager.c,v 1.8 1996/05/29 05:12:23 dyson Exp $
*/
#include <sys/param.h>
@ -36,6 +36,7 @@
#include <sys/kernel.h>
#include <sys/malloc.h>
#include <sys/queue.h>
#include <sys/vmmeter.h>
#include <vm/vm.h>
#include <vm/vm_param.h>
@ -153,3 +154,15 @@ default_pager_convert_to_swap(object)
object->type = OBJT_DEFAULT;
}
}
void
default_pager_convert_to_swapq(object)
vm_object_t object;
{
if (object &&
(object->type == OBJT_DEFAULT) &&
(object != kernel_object && object != kmem_object) &&
(object->size > ((cnt.v_page_count - cnt.v_wire_count) / 4)))
default_pager_convert_to_swap(object);
}

View File

@ -28,12 +28,13 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $Id: default_pager.h,v 1.3 1995/12/14 09:54:48 phk Exp $
* $Id: default_pager.h,v 1.4 1996/05/31 00:37:55 dyson Exp $
*/
#ifndef _DEFAULT_PAGER_H_
#define _DEFAULT_PAGER_H_ 1
void default_pager_convert_to_swap __P((vm_object_t object));
void default_pager_convert_to_swapq __P((vm_object_t object));
#endif /* _DEFAULT_PAGER_H_ */

View File

@ -66,7 +66,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_fault.c,v 1.50 1996/06/10 00:25:40 dyson Exp $
* $Id: vm_fault.c,v 1.51 1996/06/14 23:26:40 davidg Exp $
*/
/*
@ -522,10 +522,13 @@ RetryFault:;
if (fault_type & VM_PROT_WRITE) {
/*
* We already have an empty page in first_object - use
* it.
* This allows pages to be virtually copied from a backing_object
* into the first_object, where the backing object has no other
* refs to it, and cannot gain any more refs. Instead of a
* bcopy, we just move the page from the backing object to the
* first object. Note that we must mark the page dirty in the
* first object so that it will go out to swap when needed.
*/
if (lookup_still_valid &&
/*
* Only one shadow object
@ -570,6 +573,17 @@ RetryFault:;
vm_page_copy(m, first_m);
}
/*
* This code handles the case where there are two references to the
* backing object, and one reference is getting a copy of the
* page. If the other reference is the only other object that
* points to the backing object, then perform a virtual copy
* from the backing object to the other object after the
* page is copied to the current first_object. If the other
* object already has the page, we destroy it in the backing object
* performing an optimized collapse-type operation. We don't
* bother removing the page from the backing object's swap space.
*/
if (lookup_still_valid &&
/*
* make sure that we have two shadow objs
@ -826,14 +840,6 @@ vm_fault_wire(map, start, end)
*/
for (va = start; va < end; va += PAGE_SIZE) {
/*
while( curproc != pageproc &&
(cnt.v_free_count <= cnt.v_pageout_free_min)) {
VM_WAIT;
}
*/
rv = vm_fault(map, va, VM_PROT_READ|VM_PROT_WRITE, TRUE);
if (rv) {
if (va != start)
@ -966,10 +972,10 @@ vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry)
* Enter it in the pmap...
*/
dst_m->flags |= PG_WRITEABLE|PG_MAPPED;
dst_m->flags &= ~PG_ZERO;
pmap_enter(dst_map->pmap, vaddr, VM_PAGE_TO_PHYS(dst_m),
prot, FALSE);
dst_m->flags |= PG_WRITEABLE|PG_MAPPED;
/*
* Mark it no longer busy, and put it on the active list.

View File

@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_map.c,v 1.49 1996/05/31 00:37:58 dyson Exp $
* $Id: vm_map.c,v 1.50 1996/06/12 04:03:21 dyson Exp $
*/
/*
@ -663,11 +663,7 @@ vm_map_insert(map, object, offset, start, end, prot, max, cow)
map->size += (end - prev_entry->end);
prev_entry->end = end;
prev_object = prev_entry->object.vm_object;
if (prev_object &&
(prev_object->type == OBJT_DEFAULT) &&
(prev_object->size >= ((cnt.v_page_count - cnt.v_wire_count) / 4))) {
default_pager_convert_to_swap(prev_object);
}
default_pager_convert_to_swapq(prev_object);
return (KERN_SUCCESS);
}
}
@ -715,13 +711,7 @@ vm_map_insert(map, object, offset, start, end, prot, max, cow)
(prev_entry->end >= new_entry->start))
map->first_free = new_entry;
if (object &&
(object != kernel_object) &&
(object != kmem_object) &&
(object->type == OBJT_DEFAULT) &&
(object->size >= ((cnt.v_page_count - cnt.v_wire_count) / 4))) {
default_pager_convert_to_swap(object);
}
default_pager_convert_to_swapq(object);
return (KERN_SUCCESS);
}
@ -861,7 +851,7 @@ vm_map_simplify_entry(map, entry)
prevsize = prev->end - prev->start;
if ( (prev->end == entry->start) &&
(prev->object.vm_object == entry->object.vm_object) &&
(prev->object.vm_object || (prev->object.vm_object->behavior == entry->object.vm_object->behavior)) &&
(!prev->object.vm_object || (prev->object.vm_object->behavior == entry->object.vm_object->behavior)) &&
(!prev->object.vm_object ||
(prev->offset + prevsize == entry->offset)) &&
(prev->needs_copy == entry->needs_copy) &&
@ -891,7 +881,7 @@ vm_map_simplify_entry(map, entry)
esize = entry->end - entry->start;
if ((entry->end == next->start) &&
(next->object.vm_object == entry->object.vm_object) &&
(next->object.vm_object || (next->object.vm_object->behavior == entry->object.vm_object->behavior)) &&
(!next->object.vm_object || (next->object.vm_object->behavior == entry->object.vm_object->behavior)) &&
(!entry->object.vm_object ||
(entry->offset + esize == next->offset)) &&
(next->needs_copy == entry->needs_copy) &&
@ -1462,7 +1452,8 @@ vm_map_pageable(map, start, end, new_pageable)
* hold the lock on the sharing map.
*/
if (!entry->is_a_map && !entry->is_sub_map) {
if (entry->needs_copy &&
int copyflag = entry->needs_copy;
if (copyflag &&
((entry->protection & VM_PROT_WRITE) != 0)) {
vm_object_shadow(&entry->object.vm_object,
@ -1476,6 +1467,7 @@ vm_map_pageable(map, start, end, new_pageable)
OFF_TO_IDX(entry->end - entry->start));
entry->offset = (vm_offset_t) 0;
}
default_pager_convert_to_swapq(entry->object.vm_object);
}
}
vm_map_clip_start(map, entry, start);
@ -1707,16 +1699,14 @@ vm_map_entry_delete(map, entry)
register vm_map_t map;
register vm_map_entry_t entry;
{
if (entry->wired_count != 0)
vm_map_entry_unwire(map, entry);
vm_map_entry_unlink(map, entry);
map->size -= entry->end - entry->start;
if (entry->is_a_map || entry->is_sub_map)
if (entry->is_a_map || entry->is_sub_map) {
vm_map_deallocate(entry->object.share_map);
else
} else {
vm_object_deallocate(entry->object.vm_object);
}
vm_map_entry_dispose(map, entry);
}
@ -2266,6 +2256,9 @@ RetryLookup:;
entry->offset = 0;
lock_write_to_read(&share_map->lock);
}
if (entry->object.vm_object != NULL)
default_pager_convert_to_swapq(entry->object.vm_object);
/*
* Return the object/offset from this entry. If the entry was
* copy-on-write or empty, it has been fixed up.

View File

@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_object.c,v 1.74 1996/05/24 05:17:21 dyson Exp $
* $Id: vm_object.c,v 1.75 1996/05/31 00:38:02 dyson Exp $
*/
/*
@ -397,8 +397,7 @@ vm_object_terminate(object)
vm_object_page_clean(object, 0, 0, TRUE, FALSE);
vinvalbuf(vp, V_SAVE, NOCRED, NULL, 0, 0);
VOP_UNLOCK(vp);
}
}
/*
* Now free the pages. For internal objects, this also removes them
* from paging queues.
@ -577,9 +576,6 @@ vm_object_page_clean(object, start, end, syncio, lockflag)
}
runlen = maxb + maxf + 1;
splx(s);
/*
printf("maxb: %d, maxf: %d, runlen: %d, offset: %d\n", maxb, maxf, runlen, ma[0]->pindex);
*/
vm_pageout_flush(ma, runlen, 0);
goto rescan;
}
@ -652,7 +648,9 @@ vm_object_pmap_copy(object, start, end)
if (object == NULL || (object->flags & OBJ_WRITEABLE) == 0)
return;
for (p = TAILQ_FIRST(&object->memq); p != NULL; p = TAILQ_NEXT(p, listq)) {
for (p = TAILQ_FIRST(&object->memq);
p != NULL;
p = TAILQ_NEXT(p, listq)) {
vm_page_protect(p, VM_PROT_READ);
}
@ -676,7 +674,9 @@ vm_object_pmap_remove(object, start, end)
register vm_page_t p;
if (object == NULL)
return;
for (p = TAILQ_FIRST(&object->memq); p != NULL; p = TAILQ_NEXT(p, listq)) {
for (p = TAILQ_FIRST(&object->memq);
p != NULL;
p = TAILQ_NEXT(p, listq)) {
if (p->pindex >= start && p->pindex < end)
vm_page_protect(p, VM_PROT_NONE);
}
@ -720,42 +720,17 @@ vm_object_madvise(object, pindex, count, advise)
vm_page_activate(m);
} else if ((advise == MADV_DONTNEED) ||
((advise == MADV_FREE) &&
((object->type != OBJT_DEFAULT) && (object->type != OBJT_SWAP)))) {
/*
* If the upper level VM system doesn't think that
* the page is dirty, check the pmap layer.
*/
if (m->dirty == 0) {
vm_page_test_dirty(m);
}
/*
* If the page is not dirty, then we place it onto
* the cache queue. When on the cache queue, it is
* available for immediate reuse.
*/
if (m->dirty == 0) {
vm_page_cache(m);
} else {
/*
* If the page IS dirty, then we remove it from all
* pmaps and deactivate it.
*/
vm_page_protect(m, VM_PROT_NONE);
vm_page_deactivate(m);
}
((object->type != OBJT_DEFAULT) &&
(object->type != OBJT_SWAP)))) {
vm_page_deactivate(m);
} else if (advise == MADV_FREE) {
/*
* Force a demand-zero on next ref
*/
if (object->type == OBJT_SWAP)
swap_pager_dmzspace(object, m->pindex, 1);
/*
vm_page_protect(m, VM_PROT_NONE);
vm_page_free(m);
*/
pmap_clear_modify(VM_PAGE_TO_PHYS(m));
m->dirty = 0;
vm_page_cache(m);
}
}
}
@ -1400,11 +1375,6 @@ vm_object_in_map( object)
for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) {
if( !p->p_vmspace /* || (p->p_flag & (P_SYSTEM|P_WEXIT)) */)
continue;
/*
if (p->p_stat != SRUN && p->p_stat != SSLEEP) {
continue;
}
*/
if( _vm_object_in_map(&p->p_vmspace->vm_map, object, 0))
return 1;
}

View File

@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* from: @(#)vm_page.c 7.4 (Berkeley) 5/7/91
* $Id: vm_page.c,v 1.55 1996/06/08 06:48:34 dyson Exp $
* $Id: vm_page.c,v 1.56 1996/06/12 06:52:06 dyson Exp $
*/
/*
@ -797,7 +797,6 @@ vm_page_free_wakeup()
wakeup(&vm_pageout_pages_needed);
vm_pageout_pages_needed = 0;
}
cnt.v_free_count++;
/*
* wakeup processes that are waiting on memory if we hit a
* high water mark. And wakeup scheduler process if we have
@ -846,8 +845,9 @@ vm_page_free(m)
TAILQ_INSERT_HEAD(&vm_page_queue_free, m, pageq);
}
splx(s);
cnt.v_free_count++;
vm_page_free_wakeup();
splx(s);
}
void
@ -869,9 +869,9 @@ vm_page_free_zero(m)
TAILQ_INSERT_HEAD(&vm_page_queue_zero, m, pageq);
++vm_page_zero_count;
splx(s);
cnt.v_free_count++;
vm_page_free_wakeup();
splx(s);
}
/*
@ -992,14 +992,7 @@ vm_page_cache(m)
TAILQ_INSERT_TAIL(&vm_page_queue_cache, m, pageq);
m->queue = PQ_CACHE;
cnt.v_cache_count++;
if ((cnt.v_free_count + cnt.v_cache_count) == cnt.v_free_min) {
wakeup(&cnt.v_free_count);
wakeup(&proc0);
}
if (vm_pageout_pages_needed) {
wakeup(&vm_pageout_pages_needed);
vm_pageout_pages_needed = 0;
}
vm_page_free_wakeup();
splx(s);
}