Initial support for MADV_FREE, support for pages that we don't care

about the contents anymore.  This gives us alot of the advantage of
freeing individual pages through munmap, but with almost none of the
overhead.
This commit is contained in:
John Dyson 1996-05-23 00:45:58 +00:00
parent b96e2ef41b
commit 0a47b48b9f
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=15873
5 changed files with 45 additions and 60 deletions

View File

@ -31,7 +31,7 @@
* SUCH DAMAGE.
*
* @(#)mman.h 8.2 (Berkeley) 1/9/95
* $Id: mman.h,v 1.12 1996/03/11 02:09:09 hsu Exp $
* $Id: mman.h,v 1.13 1996/05/19 07:36:41 dyson Exp $
*/
#ifndef _SYS_MMAN_H_
@ -83,6 +83,7 @@
#define MADV_SEQUENTIAL 2 /* expect sequential page references */
#define MADV_WILLNEED 3 /* will need these pages */
#define MADV_DONTNEED 4 /* dont need these pages */
#define MADV_FREE 5 /* dont need these pages, and junk contents */
/*
* Return bits from mincore

View File

@ -39,7 +39,7 @@
* from: Utah $Hdr: swap_pager.c 1.4 91/04/30$
*
* @(#)swap_pager.c 8.9 (Berkeley) 3/21/94
* $Id: swap_pager.c,v 1.65 1996/05/03 21:01:47 phk Exp $
* $Id: swap_pager.c,v 1.66 1996/05/18 03:37:32 dyson Exp $
*/
/*
@ -438,6 +438,32 @@ swap_pager_freespace(object, start, size)
splx(s);
}
/*
* same as freespace, but don't free, just force a DMZ next time
*/
void
swap_pager_dmzspace(object, start, size)
vm_object_t object;
vm_pindex_t start;
vm_size_t size;
{
vm_pindex_t i;
int s;
s = splbio();
for (i = start; i < start + size; i += 1) {
int valid;
daddr_t *addr = swap_pager_diskaddr(object, i, &valid);
if (addr && *addr != SWB_EMPTY) {
if (valid) {
swap_pager_setvalid(object, i, 0);
}
}
}
splx(s);
}
static void
swap_pager_free_swap(object)
vm_object_t object;

View File

@ -36,7 +36,7 @@
* SUCH DAMAGE.
*
* from: @(#)swap_pager.h 7.1 (Berkeley) 12/5/90
* $Id: swap_pager.h,v 1.14 1996/01/30 23:02:29 mpp Exp $
* $Id: swap_pager.h,v 1.15 1996/03/03 21:11:06 dyson Exp $
*/
/*
@ -73,6 +73,7 @@ int swap_pager_swp_alloc __P((vm_object_t, int));
void swap_pager_copy __P((vm_object_t, vm_pindex_t, vm_object_t,
vm_pindex_t, vm_pindex_t));
void swap_pager_freespace __P((vm_object_t, vm_pindex_t, vm_size_t));
void swap_pager_dmzspace __P((vm_object_t, vm_pindex_t, vm_size_t));
void swap_pager_swap_init __P((void));
#endif

View File

@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_map.c,v 1.45 1996/05/18 03:37:43 dyson Exp $
* $Id: vm_map.c,v 1.46 1996/05/19 07:36:46 dyson Exp $
*/
/*
@ -1225,6 +1225,7 @@ vm_map_madvise(map, pmap, start, end, advise)
* Right now, we could handle DONTNEED and WILLNEED with common code.
* They are mostly the same, except for the potential async reads (NYI).
*/
case MADV_FREE:
case MADV_DONTNEED:
{
vm_pindex_t pindex;

View File

@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_object.c,v 1.71 1996/05/21 05:26:27 dyson Exp $
* $Id: vm_object.c,v 1.72 1996/05/21 17:13:31 dyson Exp $
*/
/*
@ -709,7 +709,9 @@ vm_object_madvise(object, pindex, count, advise)
if (advise == MADV_WILLNEED) {
if (m->queue != PQ_ACTIVE)
vm_page_activate(m);
} else if (advise == MADV_DONTNEED) {
} else if ((advise == MADV_DONTNEED) ||
((advise == MADV_FREE) &&
((object->type != OBJT_DEFAULT) && (object->type != OBJT_SWAP)))) {
/*
* If the upper level VM system doesn't think that
* the page is dirty, check the pmap layer.
@ -732,64 +734,18 @@ vm_object_madvise(object, pindex, count, advise)
vm_page_protect(m, VM_PROT_NONE);
vm_page_deactivate(m);
}
} else if (advise == MADV_FREE) {
/*
* Force a demand-zero on next ref
*/
if (object->type == OBJT_SWAP)
swap_pager_dmzspace(object, m->pindex, 1);
vm_page_protect(m, VM_PROT_NONE);
vm_page_free(m);
}
}
}
/*
* vm_object_copy:
*
* Create a new object which is a copy of an existing
* object, and mark all of the pages in the existing
* object 'copy-on-write'. The new object has one reference.
* Returns the new object.
*
* May defer the copy until later if the object is not backed
* up by a non-default pager.
*
*/
void
vm_object_copy(src_object, src_offset,
dst_object, dst_offset, src_needs_copy)
register vm_object_t src_object;
vm_pindex_t src_offset;
vm_object_t *dst_object;/* OUT */
vm_pindex_t *dst_offset;/* OUT */
boolean_t *src_needs_copy; /* OUT */
{
if (src_object == NULL) {
/*
* Nothing to copy
*/
*dst_object = NULL;
*dst_offset = 0;
*src_needs_copy = FALSE;
return;
}
/*
* Try to collapse the object before copying it.
*/
if (src_object->handle == NULL &&
(src_object->type == OBJT_DEFAULT ||
src_object->type == OBJT_SWAP))
vm_object_collapse(src_object);
/*
* Make another reference to the object
*/
src_object->ref_count++;
*dst_object = src_object;
*dst_offset = src_offset;
/*
* Must make a shadow when write is desired
*/
*src_needs_copy = TRUE;
return;
}
/*
* vm_object_shadow:
*