diff --git a/sys/vm/vm_object.c b/sys/vm/vm_object.c index b9a186e2e25b..66159af953c4 100644 --- a/sys/vm/vm_object.c +++ b/sys/vm/vm_object.c @@ -61,7 +61,7 @@ * any improvements or extensions that they make and grant Carnegie the * rights to redistribute these changes. * - * $Id: vm_object.c,v 1.8 1994/10/09 01:52:12 phk Exp $ + * $Id: vm_object.c,v 1.9 1994/10/15 10:28:46 davidg Exp $ */ /* @@ -80,6 +80,7 @@ #include static void _vm_object_allocate(vm_size_t, vm_object_t); +static void vm_object_rcollapse(vm_object_t, vm_object_t); /* * Virtual memory objects maintain the actual data @@ -126,6 +127,9 @@ _vm_object_allocate(size, object) { bzero(object, sizeof *object); TAILQ_INIT(&object->memq); +#ifdef REL2_1a + TAILQ_INIT(&object->reverse_shadow_head); +#endif vm_object_lock_init(object); object->ref_count = 1; object->resident_page_count = 0; @@ -249,6 +253,10 @@ vm_object_deallocate(object) */ vm_object_lock(object); if (--(object->ref_count) != 0) { +#ifdef REL2_1a + if( object->ref_count == 1) + vm_object_rcollapse(object->reverse_shadow_head.tqh_first, object); +#endif vm_object_unlock(object); /* @@ -291,6 +299,10 @@ vm_object_deallocate(object) vm_object_cache_unlock(); temp = object->shadow; +#ifdef REL2_1a + if( temp) + TAILQ_REMOVE(&temp->reverse_shadow_head, object, reverse_shadow_list); +#endif vm_object_terminate(object); /* unlocks and deallocates object */ object = temp; @@ -714,6 +726,7 @@ vm_object_pmap_remove(object, start, end) if (object == NULL) return; + ++object->paging_in_progress; vm_object_lock(object); again: @@ -733,6 +746,9 @@ again: } } vm_object_unlock(object); + --object->paging_in_progress; + if( object->paging_in_progress == 0) + wakeup((caddr_t) object); } /* @@ -901,7 +917,14 @@ void vm_object_copy(src_object, src_offset, size, */ src_object->ref_count--; /* remove ref. from old_copy */ +#ifdef REL2_1a + if( old_copy->shadow) + TAILQ_REMOVE(&old_copy->shadow->reverse_shadow_head, old_copy, reverse_shadow_list); +#endif old_copy->shadow = new_copy; +#ifdef REL2_1a + TAILQ_INSERT_TAIL(&old_copy->shadow->reverse_shadow_head, old_copy, reverse_shadow_list); +#endif new_copy->ref_count++; /* locking not needed - we have the only pointer */ vm_object_unlock(old_copy); /* done with old_copy */ @@ -915,6 +938,9 @@ void vm_object_copy(src_object, src_offset, size, */ new_copy->shadow = src_object; +#ifdef REL2_1a + TAILQ_INSERT_TAIL(&new_copy->shadow->reverse_shadow_head, new_copy, reverse_shadow_list); +#endif new_copy->shadow_offset = new_start; src_object->ref_count++; src_object->copy = new_copy; @@ -971,6 +997,9 @@ vm_object_shadow(object, offset, length) * count. */ result->shadow = source; +#ifdef REL2_1a + TAILQ_INSERT_TAIL(&result->shadow->reverse_shadow_head, result, reverse_shadow_list); +#endif /* * Store the offset into the source object, @@ -1119,6 +1148,136 @@ vm_object_remove(pager) } } +#ifdef REL2_1a +static void +vm_object_rcollapse(object, sobject) + register vm_object_t object, sobject; +{ + register vm_object_t backing_object; + register vm_offset_t backing_offset, new_offset; + register vm_page_t p, pp; + register vm_size_t size; + int s; + + if( !object) + return; + backing_object = object->shadow; + if( backing_object != sobject) { + printf("backing obj != sobject!!!\n"); + return; + } + if( !backing_object) + return; + if( (backing_object->flags & OBJ_INTERNAL) == 0) + return; + if (backing_object->shadow != NULL && + backing_object->shadow->copy == backing_object) + return; + if (backing_object->ref_count != 1) + return; + + s = splbio(); + while( backing_object->paging_in_progress) { + tsleep( backing_object, PVM, "rcolow", 0); + } + splx(s); + + backing_offset = object->shadow_offset; + size = object->size; + while (p = backing_object->memq.tqh_first) { + vm_page_t next; + + new_offset = (p->offset - backing_offset); + if (p->offset < backing_offset || + new_offset >= size) { + vm_page_lock_queues(); + if( backing_object->pager) + swap_pager_freespace(backing_object->pager, backing_object->paging_offset + p->offset, PAGE_SIZE); + pmap_page_protect(VM_PAGE_TO_PHYS(p), VM_PROT_NONE); + vm_page_free(p); + vm_page_unlock_queues(); + } else { + pp = vm_page_lookup(object, new_offset); + if (pp != NULL || (object->pager && vm_pager_has_page(object->pager, + object->paging_offset + new_offset))) { + vm_page_lock_queues(); + if( backing_object->pager) + swap_pager_freespace(backing_object->pager, backing_object->paging_offset + p->offset, PAGE_SIZE); + pmap_page_protect(VM_PAGE_TO_PHYS(p), VM_PROT_NONE); + vm_page_free(p); + vm_page_unlock_queues(); + } else { + vm_page_rename(p, object, new_offset); + } + } + } +} +#endif + +/* + * this version of collapse allows the operation to occur earlier and + * when paging_in_progress is true for an object... This is not a complete + * operation, but should plug 99.9% of the rest of the leaks. + */ +static void +vm_object_qcollapse(object) + register vm_object_t object; +{ + register vm_object_t backing_object; + register vm_offset_t backing_offset, new_offset; + register vm_page_t p, pp; + register vm_size_t size; + + backing_object = object->shadow; + if( !backing_object) + return; + if( (backing_object->flags & OBJ_INTERNAL) == 0) + return; + if (backing_object->shadow != NULL && + backing_object->shadow->copy == backing_object) + return; + if (backing_object->ref_count != 1) + return; + + backing_offset = object->shadow_offset; + size = object->size; + p = backing_object->memq.tqh_first; + while (p) { + vm_page_t next; + next = p->listq.tqe_next; + if( (p->flags & (PG_BUSY|PG_FAKE|PG_FICTITIOUS)) || + p->hold_count || p->wire_count) { + p = next; + continue; + } + + new_offset = (p->offset - backing_offset); + if (p->offset < backing_offset || + new_offset >= size) { + vm_page_lock_queues(); + if( backing_object->pager) + swap_pager_freespace(backing_object->pager, backing_object->paging_offset + p->offset, PAGE_SIZE); + pmap_page_protect(VM_PAGE_TO_PHYS(p), VM_PROT_NONE); + vm_page_free(p); + vm_page_unlock_queues(); + } else { + pp = vm_page_lookup(object, new_offset); + if (pp != NULL || (object->pager && vm_pager_has_page(object->pager, + object->paging_offset + new_offset))) { + vm_page_lock_queues(); + if( backing_object->pager) + swap_pager_freespace(backing_object->pager, backing_object->paging_offset + p->offset, PAGE_SIZE); + pmap_page_protect(VM_PAGE_TO_PHYS(p), VM_PROT_NONE); + vm_page_free(p); + vm_page_unlock_queues(); + } else { + vm_page_rename(p, object, new_offset); + } + } + p = next; + } +} + boolean_t vm_object_collapse_allowed = TRUE; /* * vm_object_collapse: @@ -1155,9 +1314,13 @@ vm_object_collapse(object) * The object exists and no pages in it are currently * being paged out. */ - if (object == NULL || - object->paging_in_progress != 0) + if (object == NULL) return; + if (object->paging_in_progress != 0) { + if( object->shadow) + vm_object_qcollapse(object); + return; + } /* * There is a backing object, and @@ -1178,6 +1341,7 @@ vm_object_collapse(object) if ((backing_object->flags & OBJ_INTERNAL) == 0 || backing_object->paging_in_progress != 0) { vm_object_unlock(backing_object); + vm_object_qcollapse(object); return; } @@ -1250,6 +1414,7 @@ vm_object_collapse(object) if (p->offset < backing_offset || new_offset >= size) { vm_page_lock_queues(); + pmap_page_protect(VM_PAGE_TO_PHYS(p), VM_PROT_NONE); vm_page_free(p); vm_page_unlock_queues(); } else { @@ -1257,6 +1422,7 @@ vm_object_collapse(object) if (pp != NULL || (object->pager && vm_pager_has_page(object->pager, object->paging_offset + new_offset))) { vm_page_lock_queues(); + pmap_page_protect(VM_PAGE_TO_PHYS(p), VM_PROT_NONE); vm_page_free(p); vm_page_unlock_queues(); } else { @@ -1317,7 +1483,17 @@ vm_object_collapse(object) * moves from within backing_object to within object. */ +#ifdef REL2_1a + TAILQ_REMOVE(&object->shadow->reverse_shadow_head, object, reverse_shadow_list); + if( backing_object->shadow) + TAILQ_REMOVE(&backing_object->shadow->reverse_shadow_head, backing_object, reverse_shadow_list); +#endif object->shadow = backing_object->shadow; +#ifdef REL2_1a + if( object->shadow) + TAILQ_INSERT_TAIL(&object->shadow->reverse_shadow_head, object, reverse_shadow_list); +#endif + object->shadow_offset += backing_object->shadow_offset; if (object->shadow != NULL && object->shadow->copy != NULL) { @@ -1398,7 +1574,14 @@ vm_object_collapse(object) * count is at least 2. */ +#ifdef REL2_1a + TAILQ_REMOVE(&object->shadow->reverse_shadow_head, object, reverse_shadow_list); +#endif vm_object_reference(object->shadow = backing_object->shadow); +#ifdef REL2_1a + if( object->shadow) + TAILQ_INSERT_TAIL(&object->shadow->reverse_shadow_head, object, reverse_shadow_list); +#endif object->shadow_offset += backing_object->shadow_offset; /* @@ -1450,6 +1633,7 @@ vm_object_page_remove(object, start, end) if (object == NULL) return; + object->paging_in_progress++; start = trunc_page(start); end = round_page(end); again: @@ -1493,6 +1677,9 @@ again: size -= PAGE_SIZE; } } + --object->paging_in_progress; + if( object->paging_in_progress == 0) + wakeup((caddr_t) object); } /* diff --git a/sys/vm/vm_object.h b/sys/vm/vm_object.h index 1ca8ec3a4cf8..cec90ae42544 100644 --- a/sys/vm/vm_object.h +++ b/sys/vm/vm_object.h @@ -61,7 +61,7 @@ * any improvements or extensions that they make and grant Carnegie the * rights to redistribute these changes. * - * $Id$ + * $Id: vm_object.h,v 1.2 1994/08/02 07:55:31 davidg Exp $ */ /* @@ -98,6 +98,8 @@ struct vm_object { struct vm_object *shadow; /* My shadow */ vm_offset_t shadow_offset; /* Offset in shadow */ TAILQ_ENTRY(vm_object) cached_list; /* for persistence */ + TAILQ_ENTRY(vm_object) reverse_shadow_list; /* chain of objects that are shadowed */ + TAILQ_HEAD(rslist, vm_object) reverse_shadow_head; /* objects that this is a shadow for */ }; /* * Flags diff --git a/sys/vm/vm_pageout.c b/sys/vm/vm_pageout.c index 5dd10c5b0835..2909c716fd29 100644 --- a/sys/vm/vm_pageout.c +++ b/sys/vm/vm_pageout.c @@ -65,7 +65,7 @@ * any improvements or extensions that they make and grant Carnegie the * rights to redistribute these changes. * - * $Id: vm_pageout.c,v 1.22 1994/10/23 21:03:09 davidg Exp $ + * $Id: vm_pageout.c,v 1.23 1994/10/25 05:35:44 davidg Exp $ */ /* @@ -95,6 +95,8 @@ int vm_desired_cache_size; extern int npendingio; extern int hz; int vm_pageout_proc_limit; +int vm_pageout_req_swapout; +int vm_daemon_needed; extern int nswiodone; extern int swap_pager_full; extern int vm_swap_size; @@ -172,16 +174,9 @@ vm_pageout_clean(m, sync) cnt.v_free_count < vm_pageout_free_min) return 0; - if (!object->pager && - object->shadow && - object->shadow->paging_in_progress) - return 0; - if( !sync) { if (object->shadow) { vm_object_collapse(object); - if (!vm_page_lookup(object, offset)) - return 0; } if ((m->busy != 0) || @@ -198,6 +193,10 @@ vm_pageout_clean(m, sync) for (i = 1; i < vm_pageout_page_count; i++) { ms[i] = vm_page_lookup(object, offset+i*NBPG); if (ms[i]) { + if (((ms[i]->flags & PG_CLEAN) != 0) && + pmap_is_modified(VM_PAGE_TO_PHYS(ms[i]))) { + ms[i]->flags &= ~PG_CLEAN; + } if (( ((ms[i]->flags & (PG_CLEAN|PG_INACTIVE|PG_BUSY)) == PG_INACTIVE) || ( (ms[i]->flags & (PG_CLEAN|PG_BUSY)) == 0 && sync == VM_PAGEOUT_FORCE)) && (ms[i]->wire_count == 0) @@ -344,6 +343,7 @@ vm_pageout_object_deactivate_pages(map, object, count) if (count == 0) count = 1; +#ifndef REL2_1 if (object->shadow) { int scount = count; if( object->shadow->ref_count > 1) @@ -351,6 +351,12 @@ vm_pageout_object_deactivate_pages(map, object, count) if( scount) dcount += vm_pageout_object_deactivate_pages(map, object->shadow, scount); } +#else + if (object->shadow) { + if( object->shadow->ref_count == 1) + dcount += vm_pageout_object_deactivate_pages(map, object->shadow, count/2); + } +#endif if (object->paging_in_progress) return dcount; @@ -473,6 +479,18 @@ vm_pageout_map_deactivate_pages(map, entry, count, freeer) return; } +#ifdef REL2_1 +void +vm_req_vmdaemon() { + extern int ticks; + static lastrun = 0; + if( (ticks > (lastrun + hz/10)) || (ticks < lastrun)) { + wakeup((caddr_t) &vm_daemon_needed); + lastrun = ticks; + } +} +#endif + /* * vm_pageout_scan does the dirty work for the pageout daemon. */ @@ -490,52 +508,7 @@ vm_pageout_scan() int force_wakeup = 0; int cache_size, orig_cache_size; -#if 0 - /* - * We manage the cached memory by attempting to keep it - * at about the desired level. - * We deactivate the pages for the oldest cached objects - * first. This keeps pages that are "cached" from hogging - * physical memory. - */ - orig_cache_size = 0; - object = vm_object_cached_list.tqh_first; - - /* calculate the total cached size */ - - while( object) { - orig_cache_size += object->resident_page_count; - object = object->cached_list.tqe_next; - } - -redeact: - cache_size = orig_cache_size; - object = vm_object_cached_list.tqh_first; - vm_object_cache_lock(); - while ( object && (cnt.v_inactive_count < cnt.v_inactive_target)) { - vm_object_cache_unlock(); - /* - * if there are no resident pages -- get rid of the object - */ - if( object->resident_page_count == 0) { - if (object != vm_object_lookup(object->pager)) - panic("vm_pageout_scan: I'm sooo confused."); - pager_cache(object, FALSE); - goto redeact; - } else if( cache_size >= (vm_swap_size?vm_desired_cache_size:0)) { - /* - * if there are resident pages -- deactivate them - */ - vm_object_deactivate_pages(object); - cache_size -= object->resident_page_count; - } - object = object->cached_list.tqe_next; - - vm_object_cache_lock(); - } - vm_object_cache_unlock(); -#endif - +#ifndef REL2_1 morefree: /* * now swap processes out if we are in low memory conditions @@ -597,6 +570,23 @@ morefree: (cnt.v_inactive_target + cnt.v_free_target)) && (cnt.v_free_count >= cnt.v_free_target)) return force_wakeup; +#else + /* calculate the total cached size */ + + if( cnt.v_inactive_count < cnt.v_inactive_target) { + vm_req_vmdaemon(); + } + +morefree: + /* + * now swap processes out if we are in low memory conditions + */ + if ((cnt.v_free_count <= cnt.v_free_min) && !swap_pager_full && vm_swap_size&& vm_pageout_req_swapout == 0) { + vm_pageout_req_swapout = 1; + vm_req_vmdaemon(); + } + +#endif pages_freed = 0; desired_free = cnt.v_free_target; @@ -641,20 +631,24 @@ rescan1: continue; } - /* - * NOTE: PG_CLEAN doesn't guarantee that the page is clean. - */ + if (((m->flags & PG_CLEAN) != 0) && pmap_is_modified(VM_PAGE_TO_PHYS(m))) + m->flags &= ~PG_CLEAN; + + if (((m->flags & PG_REFERENCED) == 0) && pmap_is_referenced(VM_PAGE_TO_PHYS(m))) { + m->flags |= PG_REFERENCED; + pmap_clear_reference(VM_PAGE_TO_PHYS(m)); + } + if (m->flags & PG_CLEAN) { /* * If we're not low on memory and the page has been reference, - * or if the page has been modified, then reactivate the page. + * then reactivate the page. */ - if (((cnt.v_free_count > vm_pageout_free_min) && - (pmap_is_referenced(VM_PAGE_TO_PHYS(m)) || ((m->flags & PG_REFERENCED) != 0))) || - pmap_is_modified(VM_PAGE_TO_PHYS(m))) { + if ((cnt.v_free_count > vm_pageout_free_min) && + ((m->flags & PG_REFERENCED) != 0)) { m->flags &= ~PG_REFERENCED; vm_page_activate(m); - } else if (!m->act_count) { + } else if (m->act_count == 0) { pmap_page_protect(VM_PAGE_TO_PHYS(m), VM_PROT_NONE); vm_page_free(m); @@ -667,14 +661,13 @@ rescan1: } } else if ((m->flags & PG_LAUNDRY) && maxlaunder > 0) { int written; - if (pmap_is_referenced(VM_PAGE_TO_PHYS(m)) || - ((m->flags & PG_REFERENCED) != 0)) { - pmap_clear_reference(VM_PAGE_TO_PHYS(m)); - vm_page_activate(m); + if ((m->flags & PG_REFERENCED) != 0) { m->flags &= ~PG_REFERENCED; + vm_page_activate(m); m = next; continue; } + /* * If a page is dirty, then it is either * being washed (but not yet cleaned) @@ -682,7 +675,6 @@ rescan1: * still in the laundry, then we start the * cleaning operation. */ - written = vm_pageout_clean(m,0); if (written) maxlaunder -= written; @@ -692,11 +684,9 @@ rescan1: /* * if the next page has been re-activated, start scanning again */ - if ((next->flags & PG_INACTIVE) == 0) + if ((written != 0) || ((next->flags & PG_INACTIVE) == 0)) goto rescan1; - } else if ((m->flags & PG_REFERENCED) || - pmap_is_referenced(VM_PAGE_TO_PHYS(m))) { - pmap_clear_reference(VM_PAGE_TO_PHYS(m)); + } else if ((m->flags & PG_REFERENCED) != 0) { m->flags &= ~PG_REFERENCED; vm_page_activate(m); } @@ -879,11 +869,6 @@ vm_pageout() */ while (TRUE) { int force_wakeup; -/* - cnt.v_free_min = 12 + averunnable.ldavg[0] / 1024; - cnt.v_free_target = 2*cnt.v_free_min + cnt.v_free_reserved; - cnt.v_inactive_target = cnt.v_free_target*2; -*/ tsleep((caddr_t) &vm_pages_needed, PVM, "psleep", 0); cnt.v_pdwakeups++; @@ -903,3 +888,107 @@ vm_pageout() } } +#ifdef REL2_1 +void +vm_daemon() { + int cache_size; + vm_object_t object; + struct proc *p; + while(TRUE) { + tsleep((caddr_t) &vm_daemon_needed, PUSER, "psleep", 0); + if( vm_pageout_req_swapout) { + /* + * swap out inactive processes + */ + swapout_threads(); + vm_pageout_req_swapout = 0; + } + /* + * scan the processes for exceeding their rlimits or if process + * is swapped out -- deactivate pages + */ + + for (p = (struct proc *)allproc; p != NULL; p = p->p_next) { + int overage; + quad_t limit; + vm_offset_t size; + + /* + * if this is a system process or if we have already + * looked at this process, skip it. + */ + if (p->p_flag & (P_SYSTEM|P_WEXIT)) { + continue; + } + + /* + * if the process is in a non-running type state, + * don't touch it. + */ + if (p->p_stat != SRUN && p->p_stat != SSLEEP) { + continue; + } + + /* + * get a limit + */ + limit = qmin(p->p_rlimit[RLIMIT_RSS].rlim_cur, + p->p_rlimit[RLIMIT_RSS].rlim_max); + + /* + * let processes that are swapped out really be swapped out + * set the limit to nothing (will force a swap-out.) + */ + if ((p->p_flag & P_INMEM) == 0) + limit = 0; + + size = p->p_vmspace->vm_pmap.pm_stats.resident_count * NBPG; + if (limit >= 0 && size >= limit) { + overage = (size - limit) / NBPG; + vm_pageout_map_deactivate_pages(&p->p_vmspace->vm_map, + (vm_map_entry_t) 0, &overage, vm_pageout_object_deactivate_pages); + } + } + + /* + * We manage the cached memory by attempting to keep it + * at about the desired level. + * We deactivate the pages for the oldest cached objects + * first. This keeps pages that are "cached" from hogging + * physical memory. + */ +restart: + cache_size = 0; + object = vm_object_cached_list.tqh_first; + /* calculate the total cached size */ + while( object) { + cache_size += object->resident_page_count; + object = object->cached_list.tqe_next; + } + + vm_object_cache_lock(); + object = vm_object_cached_list.tqh_first; + while ( object) { + vm_object_cache_unlock(); + /* + * if there are no resident pages -- get rid of the object + */ + if( object->resident_page_count == 0) { + if (object != vm_object_lookup(object->pager)) + panic("vm_object_cache_trim: I'm sooo confused."); + pager_cache(object, FALSE); + goto restart; + } else if( cache_size >= (vm_swap_size?vm_desired_cache_size:0)) { + /* + * if there are resident pages -- deactivate them + */ + vm_object_deactivate_pages(object); + cache_size -= object->resident_page_count; + } + object = object->cached_list.tqe_next; + vm_object_cache_lock(); + } + vm_object_cache_unlock(); + } +} +#endif