swap_pager.c:

Fixed long standing bug in freeing swap space during object collapses.
Fixed 'out of space' messages from printing out too often.
Modified to use new kmem_malloc() calling convention.
Implemented an additional stat in the swap pager struct to count the
amount of space allocated to that pager. This may be removed at some
point in the future.
Minimized unnecessary wakeups.

vm_fault.c:
Don't try to collect fault stats on 'swapped' processes - there aren't
any upages to store the stats in.
Changed read-ahead policy (again!).

vm_glue.c:
Be sure to gain a reference to the process's map before swapping.
Be sure to lose it when done.

kern_malloc.c:
Added the ability to specify if allocations are at interrupt time or
are 'safe'; this affects what types of pages can be allocated.

vm_map.c:
Fixed a variety of map lock problems; there's still a lurking bug that
will eventually bite.

vm_object.c:
Explicitly initialize the object fields rather than bzeroing the struct.
Eliminated the 'rcollapse' code and folded it's functionality into the
"real" collapse routine.
Moved an object_unlock() so that the backing_object is protected in
the qcollapse routine.
Make sure nobody fools with the backing_object when we're destroying it.
Added some diagnostic code which can be called from the debugger that
looks through all the internal objects and makes certain that they
all belong to someone.

vm_page.c:
Fixed a rather serious logic bug that would result in random system
crashes. Changed pagedaemon wakeup policy (again!).

vm_pageout.c:
Removed unnecessary page rotations on the inactive queue.
Changed the number of pages to explicitly free to just free_reserved
level.

Submitted by:	John Dyson
This commit is contained in:
David Greenman 1995-02-02 09:09:15 +00:00
parent 8e95996cd7
commit a1f6d91cc2
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=6129
10 changed files with 375 additions and 289 deletions

View File

@ -39,7 +39,7 @@
* from: Utah $Hdr: swap_pager.c 1.4 91/04/30$
*
* @(#)swap_pager.c 8.9 (Berkeley) 3/21/94
* $Id: swap_pager.c,v 1.23 1995/01/10 07:32:43 davidg Exp $
* $Id: swap_pager.c,v 1.24 1995/01/24 10:12:12 davidg Exp $
*/
/*
@ -115,6 +115,7 @@ struct pagerlst swap_pager_list; /* list of "named" anon regions */
struct pagerlst swap_pager_un_list; /* list of "unnamed" anon pagers */
#define SWAP_FREE_NEEDED 0x1 /* need a swap block */
#define SWAP_FREE_NEEDED_BY_PAGEOUT 0x2
int swap_pager_needflags;
struct rlist *swapfrag;
@ -146,7 +147,7 @@ static inline void
swapsizecheck()
{
if (vm_swap_size < 128 * btodb(PAGE_SIZE)) {
if (swap_pager_full)
if (swap_pager_full == 0)
printf("swap_pager: out of space\n");
swap_pager_full = 1;
} else if (vm_swap_size > 192 * btodb(PAGE_SIZE))
@ -200,14 +201,6 @@ swap_pager_alloc(handle, size, prot, offset)
swp_clean_t spc;
struct buf *bp;
#if 0
int desiredpendingio;
desiredpendingio = cnt.v_page_count / 200 + 2;
if (desiredpendingio < npendingio)
npendingio = desiredpendingio;
#endif
/*
* kva's are allocated here so that we dont need to keep doing
* kmem_alloc pageables at runtime
@ -217,7 +210,7 @@ swap_pager_alloc(handle, size, prot, offset)
if (!spc->spc_kva) {
break;
}
spc->spc_bp = malloc(sizeof(*bp), M_TEMP, M_NOWAIT);
spc->spc_bp = malloc(sizeof(*bp), M_TEMP, M_KERNEL);
if (!spc->spc_bp) {
kmem_free_wakeup(pager_map, spc->spc_kva, PAGE_SIZE);
break;
@ -249,7 +242,7 @@ swap_pager_alloc(handle, size, prot, offset)
* Pager doesn't exist, allocate swap management resources and
* initialize.
*/
waitok = handle ? M_WAITOK : M_NOWAIT;
waitok = handle ? M_WAITOK : M_KERNEL;
pager = (vm_pager_t) malloc(sizeof *pager, M_VMPAGER, waitok);
if (pager == NULL)
return (NULL);
@ -277,6 +270,7 @@ swap_pager_alloc(handle, size, prot, offset)
}
swp->sw_poip = 0;
swp->sw_allocsize = 0;
if (handle) {
vm_object_t object;
@ -308,7 +302,7 @@ swap_pager_alloc(handle, size, prot, offset)
* if the block has been written
*/
static int *
inline static int *
swap_pager_diskaddr(swp, offset, valid)
sw_pager_t swp;
vm_offset_t offset;
@ -320,7 +314,8 @@ swap_pager_diskaddr(swp, offset, valid)
if (valid)
*valid = 0;
ix = offset / (SWB_NPAGES * PAGE_SIZE);
if (swp->sw_blocks == NULL || ix >= swp->sw_nblocks) {
if ((swp->sw_blocks == NULL) || (ix >= swp->sw_nblocks) ||
(offset >= swp->sw_osize)) {
return (FALSE);
}
swb = &swp->sw_blocks[ix];
@ -361,7 +356,7 @@ swap_pager_setvalid(swp, offset, valid)
* minimization policy.
*/
int
swap_pager_getswapspace(unsigned amount, unsigned *rtval)
swap_pager_getswapspace(sw_pager_t swp, unsigned amount, unsigned *rtval)
{
vm_swap_size -= amount;
if (!rlist_alloc(&swaplist, amount, rtval)) {
@ -369,6 +364,7 @@ swap_pager_getswapspace(unsigned amount, unsigned *rtval)
return 0;
} else {
swapsizecheck();
swp->sw_allocsize += amount;
return 1;
}
}
@ -378,10 +374,11 @@ swap_pager_getswapspace(unsigned amount, unsigned *rtval)
* minimization policy.
*/
void
swap_pager_freeswapspace(unsigned from, unsigned to)
swap_pager_freeswapspace(sw_pager_t swp, unsigned from, unsigned to)
{
rlist_free(&swaplist, from, to);
vm_swap_size += (to - from) + 1;
swp->sw_allocsize -= (to - from) + 1;
swapsizecheck();
}
/*
@ -402,7 +399,7 @@ _swap_pager_freespace(swp, start, size)
int *addr = swap_pager_diskaddr(swp, i, &valid);
if (addr && *addr != SWB_EMPTY) {
swap_pager_freeswapspace(*addr, *addr + btodb(PAGE_SIZE) - 1);
swap_pager_freeswapspace(swp, *addr, *addr + btodb(PAGE_SIZE) - 1);
if (valid) {
swap_pager_setvalid(swp, i, 0);
}
@ -421,6 +418,56 @@ swap_pager_freespace(pager, start, size)
_swap_pager_freespace((sw_pager_t) pager->pg_data, start, size);
}
static void
swap_pager_free_swap(swp)
sw_pager_t swp;
{
register int i, j;
register sw_blk_t bp;
int first_block=0, block_count=0;
int s;
/*
* Free left over swap blocks
*/
s = splbio();
for (i = 0, bp = swp->sw_blocks; i < swp->sw_nblocks; i++, bp++) {
for (j = 0; j < SWB_NPAGES; j++) {
if (bp->swb_block[j] != SWB_EMPTY) {
/*
* initially the length of the run is zero
*/
if( block_count == 0) {
first_block = bp->swb_block[j];
block_count = btodb(PAGE_SIZE);
bp->swb_block[j] = SWB_EMPTY;
/*
* if the new block can be included into the current run
*/
} else if( bp->swb_block[j] == first_block + block_count) {
block_count += btodb(PAGE_SIZE);
bp->swb_block[j] = SWB_EMPTY;
/*
* terminate the previous run, and start a new one
*/
} else {
swap_pager_freeswapspace(swp, first_block,
(unsigned) first_block + block_count - 1);
first_block = bp->swb_block[j];
block_count = btodb(PAGE_SIZE);
bp->swb_block[j] = SWB_EMPTY;
}
}
}
}
if( block_count) {
swap_pager_freeswapspace(swp, first_block,
(unsigned) first_block + block_count - 1);
}
splx(s);
}
/*
* swap_pager_reclaim frees up over-allocated space from all pagers
* this eliminates internal fragmentation due to allocation of space
@ -432,7 +479,7 @@ swap_pager_freespace(pager, start, size)
/*
* Maximum number of blocks (pages) to reclaim per pass
*/
#define MAXRECLAIM 256
#define MAXRECLAIM 128
void
swap_pager_reclaim()
@ -442,7 +489,10 @@ swap_pager_reclaim()
int i, j, k;
int s;
int reclaimcount;
static int reclaims[MAXRECLAIM];
static struct {
int address;
sw_pager_t pager;
} reclaims[MAXRECLAIM];
static int in_reclaim;
/*
@ -476,7 +526,8 @@ swap_pager_reclaim()
for (j = 0; j < SWB_NPAGES; j++) {
if (swb->swb_block[j] != SWB_EMPTY &&
(swb->swb_valid & (1 << j)) == 0) {
reclaims[reclaimcount++] = swb->swb_block[j];
reclaims[reclaimcount].address = swb->swb_block[j];
reclaims[reclaimcount++].pager = swp;
swb->swb_block[j] = SWB_EMPTY;
if (reclaimcount >= MAXRECLAIM)
goto rfinished;
@ -493,10 +544,8 @@ swap_pager_reclaim()
* free the blocks that have been added to the reclaim list
*/
for (i = 0; i < reclaimcount; i++) {
swap_pager_freeswapspace(reclaims[i], reclaims[i] + btodb(PAGE_SIZE) - 1);
wakeup((caddr_t) &in_reclaim);
swap_pager_freeswapspace(reclaims[i].pager, reclaims[i].address, reclaims[i].address + btodb(PAGE_SIZE) - 1);
}
splx(s);
in_reclaim = 0;
wakeup((caddr_t) &in_reclaim);
@ -518,6 +567,7 @@ swap_pager_copy(srcpager, srcoffset, dstpager, dstoffset, offset)
{
sw_pager_t srcswp, dstswp;
vm_offset_t i;
int origsize;
int s;
if (vm_swap_size)
@ -527,6 +577,7 @@ swap_pager_copy(srcpager, srcoffset, dstpager, dstoffset, offset)
return;
srcswp = (sw_pager_t) srcpager->pg_data;
origsize = srcswp->sw_allocsize;
dstswp = (sw_pager_t) dstpager->pg_data;
/*
@ -551,26 +602,13 @@ swap_pager_copy(srcpager, srcoffset, dstpager, dstoffset, offset)
(void) swap_pager_clean();
s = splbio();
/*
* clear source block before destination object
* (release allocated space)
*/
for (i = 0; i < offset + srcoffset; i += PAGE_SIZE) {
int valid;
int *addr = swap_pager_diskaddr(srcswp, i, &valid);
if (addr && *addr != SWB_EMPTY) {
swap_pager_freeswapspace(*addr, *addr + btodb(PAGE_SIZE) - 1);
*addr = SWB_EMPTY;
}
}
/*
* transfer source to destination
*/
for (i = 0; i < dstswp->sw_osize; i += PAGE_SIZE) {
int srcvalid, dstvalid;
int *srcaddrp = swap_pager_diskaddr(srcswp, i + offset + srcoffset,
&srcvalid);
&srcvalid);
int *dstaddrp;
/*
@ -583,19 +621,23 @@ swap_pager_copy(srcpager, srcoffset, dstpager, dstoffset, offset)
* dest.
*/
if (srcvalid) {
dstaddrp = swap_pager_diskaddr(dstswp, i + dstoffset, &dstvalid);
dstaddrp = swap_pager_diskaddr(dstswp, i + dstoffset,
&dstvalid);
/*
* if the dest already has a valid block,
* deallocate the source block without
* copying.
*/
if (!dstvalid && dstaddrp && *dstaddrp != SWB_EMPTY) {
swap_pager_freeswapspace(*dstaddrp, *dstaddrp + btodb(PAGE_SIZE) - 1);
swap_pager_freeswapspace(dstswp, *dstaddrp,
*dstaddrp + btodb(PAGE_SIZE) - 1);
*dstaddrp = SWB_EMPTY;
}
if (dstaddrp && *dstaddrp == SWB_EMPTY) {
*dstaddrp = *srcaddrp;
*srcaddrp = SWB_EMPTY;
dstswp->sw_allocsize += btodb(PAGE_SIZE);
srcswp->sw_allocsize -= btodb(PAGE_SIZE);
swap_pager_setvalid(dstswp, i + dstoffset, 1);
}
}
@ -604,27 +646,21 @@ swap_pager_copy(srcpager, srcoffset, dstpager, dstoffset, offset)
* deallocate the space.
*/
if (*srcaddrp != SWB_EMPTY) {
swap_pager_freeswapspace(*srcaddrp, *srcaddrp + btodb(PAGE_SIZE) - 1);
swap_pager_freeswapspace(srcswp, *srcaddrp,
*srcaddrp + btodb(PAGE_SIZE) - 1);
*srcaddrp = SWB_EMPTY;
}
}
}
/*
* deallocate the rest of the source object
*/
for (i = dstswp->sw_osize + offset + srcoffset; i < srcswp->sw_osize; i += PAGE_SIZE) {
int valid;
int *srcaddrp = swap_pager_diskaddr(srcswp, i, &valid);
if (srcaddrp && *srcaddrp != SWB_EMPTY) {
swap_pager_freeswapspace(*srcaddrp, *srcaddrp + btodb(PAGE_SIZE) - 1);
*srcaddrp = SWB_EMPTY;
}
}
splx(s);
/*
* Free left over swap blocks
*/
swap_pager_free_swap(srcswp);
if( srcswp->sw_allocsize)
printf("swap_pager_copy: *warning* pager with %d blocks (orig: %d)\n", srcswp->sw_allocsize, origsize);
free((caddr_t) srcswp->sw_blocks, M_VMPGDATA);
srcswp->sw_blocks = 0;
free((caddr_t) srcswp, M_VMPGDATA);
@ -634,13 +670,10 @@ swap_pager_copy(srcpager, srcoffset, dstpager, dstoffset, offset)
return;
}
void
swap_pager_dealloc(pager)
vm_pager_t pager;
{
register int i, j;
register sw_blk_t bp;
register sw_pager_t swp;
int s;
@ -672,17 +705,10 @@ swap_pager_dealloc(pager)
/*
* Free left over swap blocks
*/
s = splbio();
for (i = 0, bp = swp->sw_blocks; i < swp->sw_nblocks; i++, bp++) {
for (j = 0; j < SWB_NPAGES; j++)
if (bp->swb_block[j] != SWB_EMPTY) {
swap_pager_freeswapspace((unsigned) bp->swb_block[j],
(unsigned) bp->swb_block[j] + btodb(PAGE_SIZE) - 1);
bp->swb_block[j] = SWB_EMPTY;
}
}
splx(s);
swap_pager_free_swap(swp);
if( swp->sw_allocsize)
printf("swap_pager_dealloc: *warning* freeing pager with %d blocks\n", swp->sw_allocsize);
/*
* Free swap management resources
*/
@ -871,10 +897,6 @@ swap_pager_iodone1(bp)
bp->b_flags |= B_DONE;
bp->b_flags &= ~B_ASYNC;
wakeup((caddr_t) bp);
/*
if ((bp->b_flags & B_READ) == 0)
vwakeup(bp);
*/
}
@ -997,7 +1019,7 @@ swap_pager_input(swp, m, count, reqpage)
spc = NULL; /* we might not use an spc data structure */
if (count == 1) {
if ((count == 1) && (swap_pager_free.tqh_first != NULL)) {
/*
* if a kva has not been allocated, we can only do a one page
* transfer, so we free the other pages that might have been
@ -1021,6 +1043,8 @@ swap_pager_input(swp, m, count, reqpage)
wakeup((caddr_t) &vm_pages_needed);
while (swap_pager_free.tqh_first == NULL) {
swap_pager_needflags |= SWAP_FREE_NEEDED;
if (curproc == pageproc)
swap_pager_needflags |= SWAP_FREE_NEEDED_BY_PAGEOUT;
tsleep((caddr_t) &swap_pager_free,
PVM, "swpfre", 0);
if (curproc == pageproc)
@ -1119,9 +1143,11 @@ swap_pager_input(swp, m, count, reqpage)
crfree(bp->b_wcred);
TAILQ_INSERT_TAIL(&swap_pager_free, spc, spc_list);
if (swap_pager_needflags & SWAP_FREE_NEEDED) {
swap_pager_needflags &= ~SWAP_FREE_NEEDED;
wakeup((caddr_t) &swap_pager_free);
}
if( swap_pager_needflags & SWAP_FREE_NEEDED_BY_PAGEOUT)
wakeup((caddr_t) &vm_pages_needed);
swap_pager_needflags &= ~(SWAP_FREE_NEEDED|SWAP_FREE_NEEDED_BY_PAGEOUT);
} else {
/*
* release the physical I/O buffer
@ -1144,12 +1170,7 @@ swap_pager_input(swp, m, count, reqpage)
* results, it is best to deactivate
* the readahead pages.
*/
/*
if (sequential || (i == reqpage - 1) || (i == reqpage + 1))
vm_page_activate(m[i]);
else
*/
vm_page_deactivate(m[i]);
vm_page_deactivate(m[i]);
/*
* just in case someone was asking for
@ -1258,12 +1279,12 @@ swap_pager_output(swp, m, count, flags, rtvals)
* intent of this code is to allocate small chunks for
* small objects)
*/
if ((m[j]->offset == 0) && (ntoget * PAGE_SIZE > object->size)) {
if (ntoget * PAGE_SIZE > object->size) {
ntoget = (object->size + (PAGE_SIZE - 1)) / PAGE_SIZE;
}
retrygetspace:
if (!swap_pager_full && ntoget > 1 &&
swap_pager_getswapspace(ntoget * btodb(PAGE_SIZE), &blk)) {
swap_pager_getswapspace(swp, ntoget * btodb(PAGE_SIZE), &blk)) {
for (i = 0; i < ntoget; i++) {
swb[j]->swb_block[i] = blk + btodb(PAGE_SIZE) * i;
@ -1271,7 +1292,7 @@ swap_pager_output(swp, m, count, flags, rtvals)
}
reqaddr[j] = swb[j]->swb_block[off];
} else if (!swap_pager_getswapspace(btodb(PAGE_SIZE),
} else if (!swap_pager_getswapspace(swp, btodb(PAGE_SIZE),
&swb[j]->swb_block[off])) {
/*
* if the allocation has failed, we try to
@ -1326,9 +1347,6 @@ swap_pager_output(swp, m, count, flags, rtvals)
printf("I/O to empty block????\n");
}
/*
* */
/*
* For synchronous writes, we clean up all completed async pageouts.
*/
@ -1337,46 +1355,29 @@ swap_pager_output(swp, m, count, flags, rtvals)
}
kva = 0;
/*
* we allocate a new kva for transfers > 1 page but for transfers == 1
* page, the swap_pager_free list contains entries that have
* pre-allocated kva's (for efficiency). NOTE -- we do not use the
* physical buffer pool or the preallocated associated kva's because
* of the potential for deadlock. This is very subtile -- but
* deadlocks or resource contention must be avoided on pageouts -- or
* your system will sleep (forever) !!!
*/
/*
if ( count > 1) {
kva = kmem_alloc_pageable(pager_map, count*PAGE_SIZE);
if( !kva) {
for (i = 0; i < count; i++) {
if( swb[i])
--swb[i]->swb_locked;
rtvals[i] = VM_PAGER_AGAIN;
}
return VM_PAGER_AGAIN;
}
}
*/
/*
* get a swap pager clean data structure, block until we get it
*/
if (swap_pager_free.tqh_first == NULL || swap_pager_free.tqh_first->spc_list.tqe_next == NULL || swap_pager_free.tqh_first->spc_list.tqe_next->spc_list.tqe_next == NULL) {
if (swap_pager_free.tqh_first == NULL ||
swap_pager_free.tqh_first->spc_list.tqe_next == NULL ||
swap_pager_free.tqh_first->spc_list.tqe_next->spc_list.tqe_next == NULL) {
s = splbio();
if (curproc == pageproc) {
(void) swap_pager_clean();
/*
#if 0
splx(s);
return VM_PAGER_AGAIN;
*/
#endif
} else
wakeup((caddr_t) &vm_pages_needed);
while (swap_pager_free.tqh_first == NULL || swap_pager_free.tqh_first->spc_list.tqe_next == NULL || swap_pager_free.tqh_first->spc_list.tqe_next->spc_list.tqe_next == NULL) {
if (curproc == pageproc &&
(cnt.v_free_count + cnt.v_cache_count) >= cnt.v_free_min)
wakeup((caddr_t) &cnt.v_free_count);
while (swap_pager_free.tqh_first == NULL ||
swap_pager_free.tqh_first->spc_list.tqe_next == NULL ||
swap_pager_free.tqh_first->spc_list.tqe_next->spc_list.tqe_next == NULL) {
if (curproc == pageproc) {
swap_pager_needflags |= SWAP_FREE_NEEDED_BY_PAGEOUT;
if((cnt.v_free_count + cnt.v_cache_count) > cnt.v_free_reserved)
wakeup((caddr_t) &cnt.v_free_count);
}
swap_pager_needflags |= SWAP_FREE_NEEDED;
tsleep((caddr_t) &swap_pager_free,
@ -1538,9 +1539,11 @@ swap_pager_output(swp, m, count, flags, rtvals)
crfree(bp->b_wcred);
TAILQ_INSERT_TAIL(&swap_pager_free, spc, spc_list);
if (swap_pager_needflags & SWAP_FREE_NEEDED) {
swap_pager_needflags &= ~SWAP_FREE_NEEDED;
wakeup((caddr_t) &swap_pager_free);
}
if( swap_pager_needflags & SWAP_FREE_NEEDED_BY_PAGEOUT)
wakeup((caddr_t) &vm_pages_needed);
swap_pager_needflags &= ~(SWAP_FREE_NEEDED|SWAP_FREE_NEEDED_BY_PAGEOUT);
return (rv);
}
@ -1584,9 +1587,11 @@ swap_pager_clean()
spc->spc_flags = 0;
TAILQ_INSERT_TAIL(&swap_pager_free, spc, spc_list);
if (swap_pager_needflags & SWAP_FREE_NEEDED) {
swap_pager_needflags &= ~SWAP_FREE_NEEDED;
wakeup((caddr_t) &swap_pager_free);
}
if( swap_pager_needflags & SWAP_FREE_NEEDED_BY_PAGEOUT)
wakeup((caddr_t) &vm_pages_needed);
swap_pager_needflags &= ~(SWAP_FREE_NEEDED|SWAP_FREE_NEEDED_BY_PAGEOUT);
++cleandone;
splx(s);
}
@ -1656,10 +1661,6 @@ swap_pager_iodone(bp)
(bp->b_flags & B_READ) ? "pagein" : "pageout",
bp->b_error, (u_long) bp->b_blkno, bp->b_bcount);
}
/*
if ((bp->b_flags & B_READ) == 0)
vwakeup(bp);
*/
if (bp->b_vp)
pbrelvp(bp);
@ -1680,10 +1681,16 @@ swap_pager_iodone(bp)
swap_pager_inuse.tqh_first == 0) {
swap_pager_needflags &= ~SWAP_FREE_NEEDED;
wakeup((caddr_t) &swap_pager_free);
}
if( swap_pager_needflags & SWAP_FREE_NEEDED_BY_PAGEOUT) {
swap_pager_needflags &= ~SWAP_FREE_NEEDED_BY_PAGEOUT;
wakeup((caddr_t) &vm_pages_needed);
}
if (vm_pageout_pages_needed) {
wakeup((caddr_t) &vm_pageout_pages_needed);
vm_pageout_pages_needed = 0;
}
if ((swap_pager_inuse.tqh_first == NULL) ||
((cnt.v_free_count + cnt.v_cache_count) < cnt.v_free_min &&

View File

@ -36,7 +36,7 @@
* SUCH DAMAGE.
*
* from: @(#)swap_pager.h 7.1 (Berkeley) 12/5/90
* $Id: swap_pager.h,v 1.3 1994/10/09 01:52:06 phk Exp $
* $Id: swap_pager.h,v 1.4 1995/01/09 16:05:37 davidg Exp $
*/
/*
@ -69,6 +69,7 @@ typedef struct swblock *sw_blk_t;
struct swpager {
vm_size_t sw_osize; /* size of object we are backing (bytes) */
int sw_nblocks; /* number of blocks in list (sw_blk_t units) */
int sw_allocsize; /* amount of space actually allocated */
sw_blk_t sw_blocks; /* pointer to list of swap blocks */
short sw_flags; /* flags */
short sw_poip; /* pageouts in progress */

View File

@ -66,7 +66,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_fault.c,v 1.16 1995/01/24 10:12:29 davidg Exp $
* $Id: vm_fault.c,v 1.17 1995/01/26 01:40:04 davidg Exp $
*/
/*
@ -376,10 +376,6 @@ RetryFault:;
* if moved.
*/
m = vm_page_lookup(object, offset);
if (!m) {
printf("vm_fault: error fetching offset: %lx (fc: %d, rq: %d)\n",
offset, faultcount, reqpage);
}
m->valid = VM_PAGE_BITS_ALL;
pmap_clear_modify(VM_PAGE_TO_PHYS(m));
hardfault++;
@ -857,7 +853,7 @@ RetryFault:;
vm_page_activate(m);
}
if (curproc && curproc->p_stats) {
if (curproc && (curproc->p_flag & P_INMEM) && curproc->p_stats) {
if (hardfault) {
curproc->p_stats->p_ru.ru_majflt++;
} else {
@ -908,11 +904,11 @@ vm_fault_wire(map, start, end)
for (va = start; va < end; va += PAGE_SIZE) {
if( curproc != pageproc &&
while( curproc != pageproc &&
(cnt.v_free_count <= cnt.v_pageout_free_min))
VM_WAIT;
rv = vm_fault(map, va, VM_PROT_NONE, TRUE);
rv = vm_fault(map, va, VM_PROT_READ|VM_PROT_WRITE, TRUE);
if (rv) {
if (va != start)
vm_fault_unwire(map, start, va);
@ -1156,8 +1152,8 @@ vm_fault_additional_pages(first_object, first_offset, m, rbehind, raheada, marra
* try to do any readahead that we might have free pages for.
*/
rahead = raheada;
if ((rahead + rbehind) > ((cnt.v_free_count + cnt.v_cache_count) - cnt.v_free_reserved)) {
rahead = ((cnt.v_free_count + cnt.v_cache_count) - cnt.v_free_reserved) / 2;
if ((rahead + rbehind) > ((cnt.v_free_count + cnt.v_cache_count) - 2*cnt.v_free_reserved)) {
rahead = ((cnt.v_free_count + cnt.v_cache_count) - 2*cnt.v_free_reserved) / 2;
rbehind = rahead;
if (!rahead)
wakeup((caddr_t) &vm_pages_needed);

View File

@ -59,7 +59,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_glue.c,v 1.12 1995/01/10 07:32:45 davidg Exp $
* $Id: vm_glue.c,v 1.13 1995/01/24 10:12:39 davidg Exp $
*/
#include <sys/param.h>
@ -450,11 +450,13 @@ swapout_threads()
if ((p->p_priority & 0x7f) < PSOCK)
continue;
vm_map_reference(&p->p_vmspace->vm_map);
/*
* do not swapout a process that is waiting for VM
* datastructures there is a possible deadlock.
*/
if (!lock_try_write(&p->p_vmspace->vm_map.lock)) {
vm_map_deallocate(&p->p_vmspace->vm_map);
continue;
}
vm_map_unlock(&p->p_vmspace->vm_map);
@ -462,11 +464,13 @@ swapout_threads()
* If the process has been asleep for awhile and had
* most of its pages taken away already, swap it out.
*/
if (p->p_slptime > maxslp) {
if (p->p_slptime > 4) {
swapout(p);
vm_map_deallocate(&p->p_vmspace->vm_map);
didswap++;
goto retry;
}
vm_map_deallocate(&p->p_vmspace->vm_map);
}
}
/*

View File

@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_kern.c,v 1.8 1995/01/09 16:05:43 davidg Exp $
* $Id: vm_kern.c,v 1.9 1995/01/24 10:12:51 davidg Exp $
*/
/*
@ -72,6 +72,7 @@
#include <sys/systm.h>
#include <sys/kernel.h>
#include <sys/proc.h>
#include <sys/malloc.h>
#include <vm/vm.h>
#include <vm/vm_page.h>
@ -277,10 +278,10 @@ kmem_suballoc(parent, min, max, size, pageable)
* for wired maps are statically allocated.
*/
vm_offset_t
kmem_malloc(map, size, canwait)
kmem_malloc(map, size, waitflag)
register vm_map_t map;
register vm_size_t size;
boolean_t canwait;
boolean_t waitflag;
{
register vm_offset_t offset, i;
vm_map_entry_t entry;
@ -306,7 +307,7 @@ kmem_malloc(map, size, canwait)
panic("kmem_malloc: %s too small",
map == kmem_map ? "kmem_map" : "mb_map");
#endif
if (canwait)
if (waitflag == M_WAITOK)
panic("kmem_malloc: map too small");
return (0);
}
@ -318,7 +319,7 @@ kmem_malloc(map, size, canwait)
* If we can wait, just mark the range as wired (will fault pages as
* necessary).
*/
if (canwait) {
if (waitflag == M_WAITOK) {
vm_map_unlock(map);
(void) vm_map_pageable(map, (vm_offset_t) addr, addr + size,
FALSE);
@ -331,7 +332,8 @@ kmem_malloc(map, size, canwait)
*/
vm_object_lock(kmem_object);
for (i = 0; i < size; i += PAGE_SIZE) {
m = vm_page_alloc(kmem_object, offset + i, VM_ALLOC_INTERRUPT);
m = vm_page_alloc(kmem_object, offset + i,
(waitflag == M_NOWAIT) ? VM_ALLOC_INTERRUPT : VM_ALLOC_SYSTEM);
/*
* Ran out of space, free everything up and return. Don't need

View File

@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_map.c,v 1.11 1995/01/10 07:32:46 davidg Exp $
* $Id: vm_map.c,v 1.12 1995/01/24 10:13:02 davidg Exp $
*/
/*
@ -216,6 +216,9 @@ vmspace_free(vm)
register struct vmspace *vm;
{
if (vm->vm_refcnt == 0)
panic("vmspace_free: attempt to free already freed vmspace");
if (--vm->vm_refcnt == 0) {
/*
* Lock the map, to wait out all other references to it.
@ -225,6 +228,10 @@ vmspace_free(vm)
vm_map_lock(&vm->vm_map);
(void) vm_map_delete(&vm->vm_map, vm->vm_map.min_offset,
vm->vm_map.max_offset);
vm_map_unlock(&vm->vm_map);
while( vm->vm_map.ref_count != 1)
tsleep(&vm->vm_map.ref_count, PVM, "vmsfre", 0);
--vm->vm_map.ref_count;
pmap_release(&vm->vm_pmap);
FREE(vm, M_VMMAP);
}
@ -448,10 +455,15 @@ vm_map_deallocate(map)
return;
simple_lock(&map->ref_lock);
c = --map->ref_count;
c = map->ref_count;
simple_unlock(&map->ref_lock);
if (c > 0) {
if (c == 0)
panic("vm_map_deallocate: deallocating already freed map");
if (c != 1) {
--map->ref_count;
wakeup((caddr_t) &map->ref_count);
return;
}
/*
@ -459,11 +471,14 @@ vm_map_deallocate(map)
*/
vm_map_lock(map);
(void) vm_map_delete(map, map->min_offset, map->max_offset);
--map->ref_count;
if( map->ref_count != 0) {
vm_map_unlock(map);
return;
}
pmap_destroy(map->pmap);
FREE(map, M_VMMAP);
}
@ -1039,8 +1054,10 @@ vm_map_protect(map, start, end, new_prot, set_max)
current = entry;
while ((current != &map->header) && (current->start < end)) {
if (current->is_sub_map)
if (current->is_sub_map) {
vm_map_unlock(map);
return (KERN_INVALID_ARGUMENT);
}
if ((new_prot & current->max_protection) != new_prot) {
vm_map_unlock(map);
return (KERN_PROTECTION_FAILURE);

View File

@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_object.c,v 1.19 1995/01/24 10:13:14 davidg Exp $
* $Id: vm_object.c,v 1.20 1995/01/25 20:36:29 davidg Exp $
*/
/*
@ -82,9 +82,9 @@
#include <vm/vm_pager.h>
#include <vm/swap_pager.h>
#include <vm/vnode_pager.h>
#include <vm/vm_kern.h>
static void _vm_object_allocate(vm_size_t, vm_object_t);
static void vm_object_rcollapse(vm_object_t, vm_object_t);
/*
* Virtual memory objects maintain the actual data
@ -130,26 +130,23 @@ _vm_object_allocate(size, object)
vm_size_t size;
register vm_object_t object;
{
bzero(object, sizeof *object);
TAILQ_INIT(&object->memq);
TAILQ_INIT(&object->reverse_shadow_head);
vm_object_lock_init(object);
object->ref_count = 1;
object->resident_page_count = 0;
object->size = size;
object->ref_count = 1;
vm_object_lock_init(object);
object->flags = OBJ_INTERNAL; /* vm_allocate_with_pager will reset */
object->paging_in_progress = 0;
object->copy = NULL;
object->last_read = 0;
/*
* Object starts out read-write, with no pager.
*/
object->resident_page_count = 0;
object->pager = NULL;
object->paging_offset = 0;
object->shadow = NULL;
object->shadow_offset = (vm_offset_t) 0;
object->copy = NULL;
object->last_read = 0;
simple_lock(&vm_object_list_lock);
TAILQ_INSERT_TAIL(&vm_object_list, object, object_list);
@ -258,13 +255,40 @@ vm_object_deallocate(object)
vm_object_lock(object);
if (--(object->ref_count) != 0) {
if (object->ref_count == 1) {
if (object->reverse_shadow_head.tqh_first) {
++object->reverse_shadow_head.tqh_first->ref_count;
if (vm_object_lock_try(object->reverse_shadow_head.tqh_first)) {
vm_object_rcollapse(object->reverse_shadow_head.tqh_first, object);
vm_object_unlock(object->reverse_shadow_head.tqh_first);
vm_object_t robject;
robject = object->reverse_shadow_head.tqh_first;
if( robject) {
int s;
robject->ref_count += 2;
object->ref_count += 1;
do {
s = splhigh();
while( robject->paging_in_progress) {
tsleep(robject, PVM, "objde1", 0);
}
while( object->paging_in_progress) {
tsleep(object, PVM, "objde2", 0);
}
splx(s);
} while( object->paging_in_progress || robject->paging_in_progress);
object->ref_count -= 1;
robject->ref_count -= 2;
if( robject->ref_count == 0) {
vm_object_unlock(object);
vm_object_cache_unlock();
robject->ref_count += 1;
vm_object_deallocate(robject);
return;
}
vm_object_deallocate(object->reverse_shadow_head.tqh_first);
vm_object_cache_unlock();
vm_object_unlock(object);
vm_object_lock(robject);
vm_object_collapse(robject);
return;
}
}
vm_object_unlock(object);
@ -801,7 +825,6 @@ vm_object_copy(src_object, src_offset, size,
vm_object_collapse(src_object);
if (src_object->pager == NULL ||
src_object->pager->pg_type == PG_SWAP ||
(src_object->flags & OBJ_INTERNAL)) {
/*
@ -1127,91 +1150,6 @@ vm_object_remove(pager)
}
}
static void
vm_object_rcollapse(object, sobject)
register vm_object_t object, sobject;
{
register vm_object_t backing_object;
register vm_offset_t backing_offset, new_offset;
register vm_page_t p, pp;
register vm_size_t size;
int s;
if (!object)
return;
backing_object = object->shadow;
if (backing_object != sobject) {
printf("backing obj != sobject!!!\n");
return;
}
if (!backing_object)
return;
if ((backing_object->flags & OBJ_INTERNAL) == 0)
return;
if (backing_object->shadow != NULL &&
backing_object->shadow->copy == backing_object)
return;
if (backing_object->ref_count != 1)
return;
object->ref_count += 2;
backing_object->ref_count += 2;
backing_offset = object->shadow_offset;
size = object->size;
again:
s = splbio();
/* XXX what about object->paging_in_progress? */
while (backing_object->paging_in_progress) {
tsleep(backing_object, PVM, "rcolpp", 0);
}
splx(s);
p = backing_object->memq.tqh_first;
while (p) {
vm_page_t next;
next = p->listq.tqe_next;
if ((p->flags & (PG_BUSY | PG_FICTITIOUS | PG_CACHE)) ||
!p->valid || p->hold_count || p->wire_count || p->busy || p->bmapped) {
p = next;
continue;
}
pmap_page_protect(VM_PAGE_TO_PHYS(p), VM_PROT_NONE);
new_offset = (p->offset - backing_offset);
if (p->offset < backing_offset ||
new_offset >= size) {
if (backing_object->pager)
swap_pager_freespace(backing_object->pager,
backing_object->paging_offset + p->offset, PAGE_SIZE);
vm_page_lock_queues();
vm_page_free(p);
vm_page_unlock_queues();
} else {
pp = vm_page_lookup(object, new_offset);
if (pp != NULL ||
(object->pager &&
vm_pager_has_page(object->pager, object->paging_offset + new_offset))) {
if (backing_object->pager)
swap_pager_freespace(backing_object->pager,
backing_object->paging_offset + p->offset, PAGE_SIZE);
vm_page_lock_queues();
vm_page_free(p);
vm_page_unlock_queues();
} else {
if (!backing_object->pager ||
!vm_pager_has_page(backing_object->pager, backing_object->paging_offset + p->offset))
vm_page_rename(p, object, new_offset);
}
}
p = next;
}
backing_object->ref_count -= 2;
object->ref_count -= 2;
}
/*
* this version of collapse allows the operation to occur earlier and
* when paging_in_progress is true for an object... This is not a complete
@ -1272,9 +1210,10 @@ vm_object_qcollapse(object)
vm_page_free(p);
vm_page_unlock_queues();
} else {
if (!backing_object->pager ||
!vm_pager_has_page(backing_object->pager, backing_object->paging_offset + p->offset))
vm_page_rename(p, object, new_offset);
if( backing_object->pager)
swap_pager_freespace(backing_object->pager,
backing_object->paging_offset + p->offset, PAGE_SIZE);
vm_page_rename(p, object, new_offset);
}
}
p = next;
@ -1347,8 +1286,8 @@ vm_object_collapse(object)
if ((backing_object->flags & OBJ_INTERNAL) == 0 ||
backing_object->paging_in_progress != 0) {
vm_object_unlock(backing_object);
vm_object_qcollapse(object);
vm_object_unlock(backing_object);
return;
}
/*
@ -1391,6 +1330,7 @@ vm_object_collapse(object)
if (backing_object->ref_count == 1) {
backing_object->flags |= OBJ_DEAD;
/*
* We can collapse the backing object.
*
@ -1784,6 +1724,128 @@ vm_object_page_lookup(object, offset)
return m;
}
int
_vm_object_in_map(map, object, entry)
vm_map_t map;
vm_object_t object;
vm_map_entry_t entry;
{
vm_map_t tmpm;
vm_map_entry_t tmpe;
vm_object_t obj;
int entcount;
if (map == 0)
return 0;
if (entry == 0) {
tmpe = map->header.next;
entcount = map->nentries;
while (entcount-- && (tmpe != &map->header)) {
if( _vm_object_in_map(map, object, tmpe)) {
return 1;
}
tmpe = tmpe->next;
}
} else if (entry->is_sub_map || entry->is_a_map) {
tmpm = entry->object.share_map;
tmpe = tmpm->header.next;
entcount = tmpm->nentries;
while (entcount-- && tmpe != &tmpm->header) {
if( _vm_object_in_map(tmpm, object, tmpe)) {
return 1;
}
tmpe = tmpe->next;
}
} else if (obj = entry->object.vm_object) {
for(; obj; obj=obj->shadow)
if( obj == object) {
return 1;
}
}
return 0;
}
int
vm_object_in_map( object)
vm_object_t object;
{
struct proc *p;
for (p = (struct proc *) allproc; p != NULL; p = p->p_next) {
if( !p->p_vmspace /* || (p->p_flag & (P_SYSTEM|P_WEXIT)) */)
continue;
/*
if (p->p_stat != SRUN && p->p_stat != SSLEEP) {
continue;
}
*/
if( _vm_object_in_map(&p->p_vmspace->vm_map, object, 0))
return 1;
}
if( _vm_object_in_map( kernel_map, object, 0))
return 1;
if( _vm_object_in_map( kmem_map, object, 0))
return 1;
if( _vm_object_in_map( pager_map, object, 0))
return 1;
if( _vm_object_in_map( buffer_map, object, 0))
return 1;
if( _vm_object_in_map( io_map, object, 0))
return 1;
if( _vm_object_in_map( phys_map, object, 0))
return 1;
if( _vm_object_in_map( mb_map, object, 0))
return 1;
if( _vm_object_in_map( u_map, object, 0))
return 1;
return 0;
}
void
vm_object_check() {
int i;
int maxhash = 0;
vm_object_t object;
vm_object_hash_entry_t entry;
/*
* make sure that no internal objs are hashed
*/
for (i=0; i<VM_OBJECT_HASH_COUNT;i++) {
int lsize = 0;
for (entry = vm_object_hashtable[i].tqh_first;
entry != NULL;
entry = entry->hash_links.tqe_next) {
if( entry->object->flags & OBJ_INTERNAL) {
printf("vmochk: internal obj on hash: size: %d\n", entry->object->size);
}
++lsize;
}
if( lsize > maxhash)
maxhash = lsize;
}
printf("maximum object hash queue size: %d\n", maxhash);
/*
* make sure that internal objs are in a map somewhere
* and none have zero ref counts.
*/
for (object = vm_object_list.tqh_first;
object != NULL;
object = object->object_list.tqe_next) {
if( object->flags & OBJ_INTERNAL) {
if( object->ref_count == 0) {
printf("vmochk: internal obj has zero ref count: %d\n",
object->size);
}
if( !vm_object_in_map(object)) {
printf("vmochk: internal obj is not in a map: ref: %d, size: %d, pager: 0x%x, shadow: 0x%x\n", object->ref_count, object->size, object->pager, object->shadow);
}
}
}
}
#define DEBUG
#if defined(DEBUG) || defined(DDB)
/*

View File

@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_object.h,v 1.4 1995/01/09 16:05:50 davidg Exp $
* $Id: vm_object.h,v 1.5 1995/01/24 10:13:24 davidg Exp $
*/
/*
@ -82,26 +82,25 @@
struct vm_object {
struct pglist memq; /* Resident memory */
TAILQ_ENTRY(vm_object) object_list; /* list of all objects */
u_short flags; /* see below */
u_short paging_in_progress; /* Paging (in or out) so don't collapse or destroy */
TAILQ_HEAD(rslist, vm_object) reverse_shadow_head; /* objects that this is a shadow for */
TAILQ_ENTRY(vm_object) object_list; /* list of all objects */
TAILQ_ENTRY(vm_object) reverse_shadow_list; /* chain of objects that are shadowed */
TAILQ_ENTRY(vm_object) cached_list; /* for persistence */
vm_size_t size; /* Object size */
int ref_count; /* How many refs?? */
struct {
int recursion; /* object locking */
struct proc *proc; /* process owned */
} lock;
vm_size_t size; /* Object size */
int resident_page_count;
/* number of resident pages */
struct vm_object *copy; /* Object that holds copies of my changed pages */
u_short flags; /* see below */
u_short paging_in_progress; /* Paging (in or out) so don't collapse or destroy */
int resident_page_count; /* number of resident pages */
vm_pager_t pager; /* Where to get data */
vm_offset_t paging_offset; /* Offset into paging space */
struct vm_object *shadow; /* My shadow */
vm_offset_t shadow_offset; /* Offset in shadow */
struct vm_object *copy; /* Object that holds copies of my changed pages */
vm_offset_t last_read; /* last read in object -- detect seq behavior */
TAILQ_ENTRY(vm_object) cached_list; /* for persistence */
TAILQ_ENTRY(vm_object) reverse_shadow_list; /* chain of objects that are shadowed */
TAILQ_HEAD(rslist, vm_object) reverse_shadow_head; /* objects that this is a shadow for */
};
/*

View File

@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* from: @(#)vm_page.c 7.4 (Berkeley) 5/7/91
* $Id: vm_page.c,v 1.16 1995/01/15 07:31:34 davidg Exp $
* $Id: vm_page.c,v 1.17 1995/01/24 10:13:35 davidg Exp $
*/
/*
@ -622,11 +622,11 @@ vm_page_alloc(object, offset, page_req)
s = splhigh();
if (object != kernel_object &&
if (((cnt.v_free_count + cnt.v_cache_count) < cnt.v_free_reserved) &&
object != kernel_object &&
object != kmem_object &&
curproc != pageproc &&
curproc != &proc0 &&
(cnt.v_free_count + cnt.v_cache_count) < cnt.v_free_reserved) {
curproc != &proc0) {
simple_unlock(&vm_page_queue_free_lock);
splx(s);
return (NULL);
@ -641,10 +641,8 @@ vm_page_alloc(object, offset, page_req)
wakeup((caddr_t) &vm_pages_needed);
return NULL;
}
if( cnt.v_free_count < cnt.v_pageout_free_min)
wakeup((caddr_t) &vm_pages_needed);
} else {
if ((cnt.v_free_count < cnt.v_pageout_free_min) ||
if ((cnt.v_free_count < cnt.v_free_reserved) ||
(mem = vm_page_queue_free.tqh_first) == 0) {
mem = vm_page_queue_cache.tqh_first;
if (mem) {
@ -653,14 +651,16 @@ vm_page_alloc(object, offset, page_req)
cnt.v_cache_count--;
goto gotpage;
}
if( page_req == VM_ALLOC_SYSTEM) {
mem = vm_page_queue_free.tqh_first;
if( !mem) {
simple_unlock(&vm_page_queue_free_lock);
splx(s);
wakeup((caddr_t) &vm_pages_needed);
return (NULL);
}
}
if( !mem) {
simple_unlock(&vm_page_queue_free_lock);
splx(s);
wakeup((caddr_t) &vm_pages_needed);
return (NULL);
}
}
}
@ -814,8 +814,10 @@ vm_page_free(mem)
* if pageout daemon needs pages, then tell it that there are
* some free.
*/
if (vm_pageout_pages_needed)
if (vm_pageout_pages_needed) {
wakeup((caddr_t) &vm_pageout_pages_needed);
vm_pageout_pages_needed = 0;
}
/*
* wakeup processes that are waiting on memory if we hit a
@ -954,8 +956,10 @@ vm_page_cache(m)
wakeup((caddr_t) &cnt.v_free_count);
wakeup((caddr_t) &proc0);
}
if (vm_pageout_pages_needed)
if (vm_pageout_pages_needed) {
wakeup((caddr_t) &vm_pageout_pages_needed);
vm_pageout_pages_needed = 0;
}
splx(s);
}

View File

@ -65,7 +65,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_pageout.c,v 1.31 1995/01/24 10:13:58 davidg Exp $
* $Id: vm_pageout.c,v 1.32 1995/01/28 02:02:25 davidg Exp $
*/
/*
@ -642,11 +642,8 @@ vm_pageout_scan()
} else if (maxlaunder > 0) {
int written;
TAILQ_REMOVE(&vm_page_queue_inactive, m, pageq);
TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq);
object = m->object;
if (!vm_object_lock_try(object)) {
if ((object->flags & OBJ_DEAD) || !vm_object_lock_try(object)) {
m = next;
continue;
}
@ -670,9 +667,6 @@ vm_pageout_scan()
if ((next->flags & PG_INACTIVE) == 0) {
goto rescan1;
}
} else {
TAILQ_REMOVE(&vm_page_queue_inactive, m, pageq);
TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq);
}
m = next;
}
@ -770,7 +764,7 @@ vm_pageout_scan()
* We try to maintain some *really* free pages, this allows interrupt
* code to be guaranteed space.
*/
while (cnt.v_free_count < cnt.v_free_min) {
while (cnt.v_free_count < cnt.v_free_reserved) {
m = vm_page_queue_cache.tqh_first;
if (!m)
break;