Significantly improve the efficiency of the swap pager, which appears to

have declined due to code-rot over time.  The swap pager rundown code
has been clean-up, and unneeded wakeups removed.  Lots of splbio's
are changed to splvm's.  Also, set the dynamic tunables for the
pageout daemon to be more sane for larger systems (thereby decreasing
the daemon overheadla.)
This commit is contained in:
John Dyson 1998-02-23 08:22:48 +00:00
parent d9bed5bee1
commit e47ed70b0f
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=33758
8 changed files with 307 additions and 315 deletions

View File

@ -39,7 +39,7 @@
* from: Utah $Hdr: swap_pager.c 1.4 91/04/30$
*
* @(#)swap_pager.c 8.9 (Berkeley) 3/21/94
* $Id: swap_pager.c,v 1.87 1998/02/06 12:14:21 eivind Exp $
* $Id: swap_pager.c,v 1.88 1998/02/09 06:11:20 eivind Exp $
*/
/*
@ -59,6 +59,16 @@
#include <sys/vmmeter.h>
#include <sys/rlist.h>
#ifndef MAX_PAGEOUT_CLUSTER
#define MAX_PAGEOUT_CLUSTER 8
#endif
#ifndef NPENDINGIO
#define NPENDINGIO 16
#endif
#define SWB_NPAGES MAX_PAGEOUT_CLUSTER
#include <vm/vm.h>
#include <vm/vm_prot.h>
#include <vm/vm_object.h>
@ -68,19 +78,14 @@
#include <vm/swap_pager.h>
#include <vm/vm_extern.h>
#ifndef NPENDINGIO
#define NPENDINGIO 10
#endif
static int nswiodone;
int swap_pager_full;
extern int vm_swap_size;
static int suggest_more_swap = 0;
static int no_swap_space = 1;
static int max_pageout_cluster;
struct rlisthdr swaplist;
#define MAX_PAGEOUT_CLUSTER 8
TAILQ_HEAD(swpclean, swpagerclean);
typedef struct swpagerclean *swp_clean_t;
@ -111,6 +116,7 @@ static struct swpclean swap_pager_inuse;
/* list of free pager clean structs */
static struct swpclean swap_pager_free;
static int swap_pager_free_count;
static int swap_pager_free_pending;
/* list of "named" anon region objects */
static struct pagerlst swap_pager_object_list;
@ -139,6 +145,7 @@ static boolean_t
static int swap_pager_getpages __P((vm_object_t, vm_page_t *, int, int));
static void swap_pager_init __P((void));
static void swap_pager_sync __P((void));
static void spc_free __P((swp_clean_t));
struct pagerops swappagerops = {
swap_pager_init,
@ -150,7 +157,7 @@ struct pagerops swappagerops = {
swap_pager_sync
};
static int npendingio = NPENDINGIO;
static int npendingio;
static int dmmin;
int dmmax;
@ -192,6 +199,7 @@ swapsizecheck()
static void
swap_pager_init()
{
int maxsafepending;
TAILQ_INIT(&swap_pager_object_list);
TAILQ_INIT(&swap_pager_un_object_list);
@ -208,6 +216,17 @@ swap_pager_init()
*/
dmmin = PAGE_SIZE / DEV_BSIZE;
dmmax = btodb(SWB_NPAGES * PAGE_SIZE) * 2;
maxsafepending = cnt.v_free_min - cnt.v_free_reserved;
npendingio = NPENDINGIO;
max_pageout_cluster = MAX_PAGEOUT_CLUSTER;
if ((2 * NPENDINGIO * MAX_PAGEOUT_CLUSTER) > maxsafepending) {
max_pageout_cluster = MAX_PAGEOUT_CLUSTER / 2;
npendingio = maxsafepending / (2 * max_pageout_cluster);
if (npendingio < 2)
npendingio = 2;
}
}
void
@ -222,7 +241,7 @@ swap_pager_swap_init()
* kmem_alloc pageables at runtime
*/
for (i = 0, spc = swcleanlist; i < npendingio; i++, spc++) {
spc->spc_kva = kmem_alloc_pageable(pager_map, PAGE_SIZE * MAX_PAGEOUT_CLUSTER);
spc->spc_kva = kmem_alloc_pageable(pager_map, PAGE_SIZE * max_pageout_cluster);
if (!spc->spc_kva) {
break;
}
@ -425,7 +444,7 @@ swap_pager_freespace(object, start, size)
vm_pindex_t i;
int s;
s = splbio();
s = splvm();
for (i = start; i < start + size; i += 1) {
int valid;
daddr_t *addr = swap_pager_diskaddr(object, i, &valid);
@ -453,7 +472,7 @@ swap_pager_dmzspace(object, start, size)
vm_pindex_t i;
int s;
s = splbio();
s = splvm();
for (i = start; i < start + size; i += 1) {
int valid;
daddr_t *addr = swap_pager_diskaddr(object, i, &valid);
@ -551,7 +570,7 @@ swap_pager_reclaim()
/*
* allow only one process to be in the swap_pager_reclaim subroutine
*/
s = splbio();
s = splvm();
if (in_reclaim) {
tsleep(&in_reclaim, PSWP, "swrclm", 0);
splx(s);
@ -639,18 +658,17 @@ swap_pager_copy(srcobject, srcoffset, dstobject, dstoffset, offset)
TAILQ_REMOVE(&swap_pager_object_list, srcobject, pager_object_list);
}
s = splbio();
s = splvm();
while (srcobject->un_pager.swp.swp_poip) {
tsleep(srcobject, PVM, "spgout", 0);
}
splx(s);
/*
* clean all of the pages that are currently active and finished
*/
swap_pager_sync();
if (swap_pager_free_pending)
swap_pager_sync();
s = splbio();
/*
* transfer source to destination
*/
@ -741,14 +759,14 @@ swap_pager_dealloc(object)
* cleaning list.
*/
s = splbio();
s = splvm();
while (object->un_pager.swp.swp_poip) {
tsleep(object, PVM, "swpout", 0);
}
splx(s);
swap_pager_sync();
if (swap_pager_free_pending)
swap_pager_sync();
/*
* Free left over swap blocks
@ -853,6 +871,37 @@ swap_pager_freepage(m)
vm_page_free(m);
}
/*
* Wakeup based upon spc state
*/
static void
spc_wakeup(void)
{
if( swap_pager_needflags & SWAP_FREE_NEEDED_BY_PAGEOUT) {
swap_pager_needflags &= ~SWAP_FREE_NEEDED_BY_PAGEOUT;
wakeup(&swap_pager_needflags);
} else if ((swap_pager_needflags & SWAP_FREE_NEEDED) &&
swap_pager_free_count >= ((2 * npendingio) / 3)) {
swap_pager_needflags &= ~SWAP_FREE_NEEDED;
wakeup(&swap_pager_free);
}
}
/*
* Free an spc structure
*/
static void
spc_free(spc)
swp_clean_t spc;
{
spc->spc_flags = 0;
TAILQ_INSERT_TAIL(&swap_pager_free, spc, spc_list);
swap_pager_free_count++;
if (swap_pager_needflags) {
spc_wakeup();
}
}
/*
* swap_pager_ridpages is a convienience routine that deallocates all
* but the required page. this is usually used in error returns that
@ -895,7 +944,6 @@ swap_pager_getpages(object, m, count, reqpage)
int i;
boolean_t rv;
vm_offset_t kva, off[count];
swp_clean_t spc;
vm_pindex_t paging_offset;
int reqaddr[count];
int sequential;
@ -991,22 +1039,11 @@ swap_pager_getpages(object, m, count, reqpage)
* into "m" for the page actually faulted
*/
spc = NULL;
if ((count == 1) && ((spc = TAILQ_FIRST(&swap_pager_free)) != NULL)) {
TAILQ_REMOVE(&swap_pager_free, spc, spc_list);
swap_pager_free_count--;
kva = spc->spc_kva;
bp = spc->spc_bp;
bzero(bp, sizeof *bp);
bp->b_spc = spc;
bp->b_vnbufs.le_next = NOLIST;
} else {
/*
* Get a swap buffer header to perform the IO
*/
bp = getpbuf();
kva = (vm_offset_t) bp->b_data;
}
/*
* Get a swap buffer header to perform the IO
*/
bp = getpbuf();
kva = (vm_offset_t) bp->b_data;
/*
* map our page(s) into kva for input
@ -1036,7 +1073,7 @@ swap_pager_getpages(object, m, count, reqpage)
/*
* wait for the sync I/O to complete
*/
s = splbio();
s = splvm();
while ((bp->b_flags & B_DONE) == 0) {
if (tsleep(bp, PVM, "swread", hz*20)) {
printf("swap_pager: indefinite wait buffer: device: %#x, blkno: %d, size: %d\n",
@ -1052,12 +1089,6 @@ swap_pager_getpages(object, m, count, reqpage)
rv = VM_PAGER_OK;
}
/*
* relpbuf does this, but we maintain our own buffer list also...
*/
if (bp->b_vp)
pbrelvp(bp);
splx(s);
swb[reqpage]->swb_locked--;
@ -1066,88 +1097,58 @@ swap_pager_getpages(object, m, count, reqpage)
*/
pmap_qremove(kva, count);
if (spc) {
m[reqpage]->object->last_read = m[reqpage]->pindex;
if (bp->b_flags & B_WANTED)
wakeup(bp);
/*
* if we have used an spc, we need to free it.
*/
if (bp->b_rcred != NOCRED)
crfree(bp->b_rcred);
if (bp->b_wcred != NOCRED)
crfree(bp->b_wcred);
TAILQ_INSERT_TAIL(&swap_pager_free, spc, spc_list);
swap_pager_free_count++;
if (swap_pager_needflags & SWAP_FREE_NEEDED) {
wakeup(&swap_pager_free);
/*
* release the physical I/O buffer
*/
relpbuf(bp);
/*
* finish up input if everything is ok
*/
if (rv == VM_PAGER_OK) {
for (i = 0; i < count; i++) {
m[i]->dirty = 0;
m[i]->flags &= ~PG_ZERO;
if (i != reqpage) {
/*
* whether or not to leave the page
* activated is up in the air, but we
* should put the page on a page queue
* somewhere. (it already is in the
* object). After some emperical
* results, it is best to deactivate
* the readahead pages.
*/
vm_page_deactivate(m[i]);
/*
* just in case someone was asking for
* this page we now tell them that it
* is ok to use
*/
m[i]->valid = VM_PAGE_BITS_ALL;
PAGE_WAKEUP(m[i]);
}
}
if (swap_pager_needflags & SWAP_FREE_NEEDED_BY_PAGEOUT)
pagedaemon_wakeup();
swap_pager_needflags &= ~(SWAP_FREE_NEEDED|SWAP_FREE_NEEDED_BY_PAGEOUT);
if (rv == VM_PAGER_OK) {
#if notneeded
pmap_clear_modify(VM_PAGE_TO_PHYS(m[reqpage]));
#endif
m[reqpage]->valid = VM_PAGE_BITS_ALL;
m[reqpage]->dirty = 0;
}
} else {
m[reqpage]->object->last_read = m[count-1]->pindex;
/*
* release the physical I/O buffer
* If we're out of swap space, then attempt to free
* some whenever multiple pages are brought in. We
* must set the dirty bits so that the page contents
* will be preserved.
*/
relpbuf(bp);
/*
* finish up input if everything is ok
*/
if (rv == VM_PAGER_OK) {
if (SWAPLOW ||
(vm_swap_size < btodb((cnt.v_page_count - cnt.v_wire_count)) * PAGE_SIZE)) {
for (i = 0; i < count; i++) {
#if notneeded
pmap_clear_modify(VM_PAGE_TO_PHYS(m[i]));
#endif
m[i]->dirty = 0;
m[i]->flags &= ~PG_ZERO;
if (i != reqpage) {
/*
* whether or not to leave the page
* activated is up in the air, but we
* should put the page on a page queue
* somewhere. (it already is in the
* object). After some emperical
* results, it is best to deactivate
* the readahead pages.
*/
vm_page_deactivate(m[i]);
/*
* just in case someone was asking for
* this page we now tell them that it
* is ok to use
*/
m[i]->valid = VM_PAGE_BITS_ALL;
PAGE_WAKEUP(m[i]);
}
m[i]->dirty = VM_PAGE_BITS_ALL;
}
m[reqpage]->object->last_read = m[count-1]->pindex;
/*
* If we're out of swap space, then attempt to free
* some whenever multiple pages are brought in. We
* must set the dirty bits so that the page contents
* will be preserved.
*/
if (SWAPLOW ||
(vm_swap_size < btodb((cnt.v_page_count - cnt.v_wire_count)) * PAGE_SIZE)) {
for (i = 0; i < count; i++) {
m[i]->dirty = VM_PAGE_BITS_ALL;
}
swap_pager_freespace(object,
m[0]->pindex + paging_offset, count);
}
} else {
swap_pager_ridpages(m, count, reqpage);
swap_pager_freespace(object,
m[0]->pindex + paging_offset, count);
}
} else {
swap_pager_ridpages(m, count, reqpage);
}
return (rv);
}
@ -1179,7 +1180,9 @@ swap_pager_putpages(object, m, count, sync, rtvals)
rtvals[i] = VM_PAGER_FAIL;
return VM_PAGER_FAIL;
}
spc = NULL;
if (curproc != pageproc)
sync = TRUE;
object = m[0]->object;
paging_pindex = OFF_TO_IDX(object->paging_offset);
@ -1210,7 +1213,7 @@ swap_pager_putpages(object, m, count, sync, rtvals)
int ntoget;
tries = 0;
s = splbio();
s = splvm();
/*
* if any other pages have been allocated in this
@ -1291,16 +1294,24 @@ swap_pager_putpages(object, m, count, sync, rtvals)
}
if (firstidx == -1) {
if ((object->paging_in_progress == 0) &&
(object->flags & OBJ_PIPWNT)) {
object->flags &= ~OBJ_PIPWNT;
wakeup(object);
for (i = 0; i < count; i++) {
if (rtvals[i] == VM_PAGER_OK)
rtvals[i] = VM_PAGER_AGAIN;
}
return VM_PAGER_AGAIN;
}
lastidx = firstidx + ix;
if (ix > max_pageout_cluster) {
for (i = firstidx + max_pageout_cluster; i < lastidx; i++) {
if (rtvals[i] == VM_PAGER_OK)
rtvals[i] = VM_PAGER_AGAIN;
}
ix = max_pageout_cluster;
lastidx = firstidx + ix;
}
for (i = 0; i < firstidx; i++) {
if (swb[i])
swb[i]->swb_locked--;
@ -1311,32 +1322,29 @@ swap_pager_putpages(object, m, count, sync, rtvals)
swb[i]->swb_locked--;
}
#if defined(DIAGNOSTIC)
for (i = firstidx; i < lastidx; i++) {
if (reqaddr[i] == SWB_EMPTY) {
printf("I/O to empty block???? -- pindex: %d, i: %d\n",
m[i]->pindex, i);
}
}
#endif
/*
* For synchronous writes, we clean up all completed async pageouts.
* Clean up all completed async pageouts.
*/
if (sync == TRUE) {
if (swap_pager_free_pending)
swap_pager_sync();
}
kva = 0;
/*
* get a swap pager clean data structure, block until we get it
*/
if (swap_pager_free_count <= 3) {
s = splbio();
if (curproc == pageproc) {
retryfree:
/*
* pageout daemon needs a swap control block
*/
swap_pager_needflags |= SWAP_FREE_NEEDED_BY_PAGEOUT|SWAP_FREE_NEEDED;
if (curproc == pageproc) {
if (swap_pager_free_count == 0) {
s = splvm();
while (swap_pager_free_count == 0) {
swap_pager_needflags |= SWAP_FREE_NEEDED_BY_PAGEOUT;
/*
* if it does not get one within a short time, then
* there is a potential deadlock, so we go-on trying
@ -1347,43 +1355,44 @@ swap_pager_putpages(object, m, count, sync, rtvals)
* I/O subsystem is probably already fully utilized, might as
* well wait.
*/
if (tsleep(&swap_pager_free, PVM, "swpfre", hz/5)) {
swap_pager_sync();
if (swap_pager_free_count <= 3) {
for (i = firstidx; i < lastidx; i++) {
rtvals[i] = VM_PAGER_AGAIN;
if (tsleep(&swap_pager_needflags, PVM-1, "swpfre", hz/2)) {
if (swap_pager_free_pending)
swap_pager_sync();
if (swap_pager_free_count == 0) {
for (i = firstidx; i < lastidx; i++) {
rtvals[i] = VM_PAGER_AGAIN;
}
splx(s);
return VM_PAGER_AGAIN;
}
splx(s);
return VM_PAGER_AGAIN;
}
} else {
/*
* we make sure that pageouts aren't taking up all of
* the free swap control blocks.
*/
swap_pager_sync();
if (swap_pager_free_count <= 3) {
goto retryfree;
} else {
swap_pager_sync();
}
}
} else {
pagedaemon_wakeup();
while (swap_pager_free_count <= 3) {
swap_pager_needflags |= SWAP_FREE_NEEDED;
tsleep(&swap_pager_free, PVM, "swpfre", 0);
pagedaemon_wakeup();
}
splx(s);
}
splx(s);
}
spc = TAILQ_FIRST(&swap_pager_free);
if (spc == NULL)
panic("swap_pager_putpages: free queue is empty, %d expected\n",
swap_pager_free_count);
TAILQ_REMOVE(&swap_pager_free, spc, spc_list);
swap_pager_free_count--;
kva = spc->spc_kva;
spc = TAILQ_FIRST(&swap_pager_free);
#if defined(DIAGNOSTIC)
if (spc == NULL)
panic("swap_pager_putpages: free queue is empty, %d expected\n",
swap_pager_free_count);
#endif
TAILQ_REMOVE(&swap_pager_free, spc, spc_list);
swap_pager_free_count--;
kva = spc->spc_kva;
bp = spc->spc_bp;
bzero(bp, sizeof *bp);
bp->b_spc = spc;
bp->b_vnbufs.le_next = NOLIST;
bp->b_data = (caddr_t) kva;
} else {
spc = NULL;
bp = getpbuf();
kva = (vm_offset_t) bp->b_data;
bp->b_spc = NULL;
}
/*
* map our page(s) into kva for I/O
@ -1406,14 +1415,6 @@ swap_pager_putpages(object, m, count, sync, rtvals)
swb[i]->swb_locked--;
}
/*
* Get a swap buffer header and perform the IO
*/
bp = spc->spc_bp;
bzero(bp, sizeof *bp);
bp->b_spc = spc;
bp->b_vnbufs.le_next = NOLIST;
bp->b_flags = B_BUSY | B_PAGING;
bp->b_proc = &proc0; /* XXX (but without B_PHYS set this is ok) */
bp->b_rcred = bp->b_wcred = bp->b_proc->p_ucred;
@ -1421,26 +1422,35 @@ swap_pager_putpages(object, m, count, sync, rtvals)
crhold(bp->b_rcred);
if (bp->b_wcred != NOCRED)
crhold(bp->b_wcred);
bp->b_data = (caddr_t) kva;
bp->b_blkno = reqaddr[firstidx];
pbgetvp(swapdev_vp, bp);
bp->b_bcount = PAGE_SIZE * ix;
bp->b_bufsize = PAGE_SIZE * ix;
s = splvm();
swapdev_vp->v_numoutput++;
/*
* If this is an async write we set up additional buffer fields and
* place a "cleaning" entry on the inuse queue.
*/
s = splbio();
if (sync == FALSE) {
spc->spc_flags = 0;
spc->spc_object = object;
for (i = firstidx; i < lastidx; i++)
spc->spc_m[i] = m[i];
spc->spc_first = firstidx;
spc->spc_count = ix;
* place a "cleaning" entry on the inuse queue.
*/
object->un_pager.swp.swp_poip++;
if (spc) {
spc->spc_flags = 0;
spc->spc_object = object;
bp->b_npages = ix;
for (i = firstidx; i < lastidx; i++) {
spc->spc_m[i] = m[i];
bp->b_pages[i - firstidx] = m[i];
vm_page_protect(m[i], VM_PROT_READ);
pmap_clear_modify(VM_PAGE_TO_PHYS(m[i]));
m[i]->dirty = 0;
}
spc->spc_first = firstidx;
spc->spc_count = ix;
/*
* the completion routine for async writes
*/
@ -1448,36 +1458,40 @@ swap_pager_putpages(object, m, count, sync, rtvals)
bp->b_iodone = swap_pager_iodone;
bp->b_dirtyoff = 0;
bp->b_dirtyend = bp->b_bcount;
object->un_pager.swp.swp_poip++;
TAILQ_INSERT_TAIL(&swap_pager_inuse, spc, spc_list);
} else {
object->un_pager.swp.swp_poip++;
bp->b_flags |= B_CALL;
bp->b_iodone = swap_pager_iodone1;
bp->b_npages = ix;
for (i = firstidx; i < lastidx; i++)
bp->b_pages[i - firstidx] = m[i];
}
cnt.v_swapout++;
cnt.v_swappgsout += ix;
/*
* perform the I/O
*/
VOP_STRATEGY(bp);
if (sync == FALSE) {
if ((bp->b_flags & B_DONE) == B_DONE) {
if (swap_pager_free_pending) {
swap_pager_sync();
}
splx(s);
for (i = firstidx; i < lastidx; i++) {
rtvals[i] = VM_PAGER_PEND;
}
return VM_PAGER_PEND;
}
s = splvm();
/*
* wait for the sync I/O to complete
*/
while ((bp->b_flags & B_DONE) == 0) {
tsleep(bp, PVM, "swwrt", 0);
}
if (bp->b_flags & B_ERROR) {
printf("swap_pager: I/O error - pageout failed; blkno %d, size %d, error %d\n",
bp->b_blkno, bp->b_bcount, bp->b_error);
@ -1492,8 +1506,6 @@ swap_pager_putpages(object, m, count, sync, rtvals)
if (bp->b_vp)
pbrelvp(bp);
if (bp->b_flags & B_WANTED)
wakeup(bp);
splx(s);
@ -1530,84 +1542,42 @@ swap_pager_putpages(object, m, count, sync, rtvals)
crfree(bp->b_rcred);
if (bp->b_wcred != NOCRED)
crfree(bp->b_wcred);
TAILQ_INSERT_TAIL(&swap_pager_free, spc, spc_list);
swap_pager_free_count++;
if (swap_pager_needflags & SWAP_FREE_NEEDED) {
wakeup(&swap_pager_free);
}
if (swap_pager_needflags & SWAP_FREE_NEEDED_BY_PAGEOUT)
pagedaemon_wakeup();
swap_pager_needflags &= ~(SWAP_FREE_NEEDED|SWAP_FREE_NEEDED_BY_PAGEOUT);
spc_free(spc);
if (swap_pager_free_pending)
swap_pager_sync();
return (rv);
}
static void
swap_pager_sync()
{
register swp_clean_t spc, tspc;
register int s;
swp_clean_t spc;
tspc = NULL;
if (TAILQ_FIRST(&swap_pager_done) == NULL)
return;
for (;;) {
s = splbio();
/*
* Look up and removal from done list must be done at splbio()
* to avoid conflicts with swap_pager_iodone.
*/
while ((spc = TAILQ_FIRST(&swap_pager_done)) != 0) {
pmap_qremove(spc->spc_kva, spc->spc_count);
swap_pager_finish(spc);
TAILQ_REMOVE(&swap_pager_done, spc, spc_list);
goto doclean;
}
/*
* No operations done, thats all we can do for now.
*/
splx(s);
break;
/*
* The desired page was found to be busy earlier in the scan
* but has since completed.
*/
doclean:
if (tspc && tspc == spc) {
tspc = NULL;
}
spc->spc_flags = 0;
TAILQ_INSERT_TAIL(&swap_pager_free, spc, spc_list);
swap_pager_free_count++;
if (swap_pager_needflags & SWAP_FREE_NEEDED) {
wakeup(&swap_pager_free);
}
if( swap_pager_needflags & SWAP_FREE_NEEDED_BY_PAGEOUT)
pagedaemon_wakeup();
swap_pager_needflags &= ~(SWAP_FREE_NEEDED|SWAP_FREE_NEEDED_BY_PAGEOUT);
splx(s);
while (spc = TAILQ_FIRST(&swap_pager_done)) {
swap_pager_finish(spc);
}
return;
}
void
static void
swap_pager_finish(spc)
register swp_clean_t spc;
{
int lastidx = spc->spc_first + spc->spc_count;
vm_page_t *ma = spc->spc_m;
vm_object_t object = ma[spc->spc_first]->object;
int i;
int i, s, lastidx;
vm_object_t object;
vm_page_t *ma;
object->paging_in_progress -= spc->spc_count;
if ((object->paging_in_progress == 0) &&
(object->flags & OBJ_PIPWNT)) {
object->flags &= ~OBJ_PIPWNT;
wakeup(object);
}
ma = spc->spc_m;
object = ma[spc->spc_first]->object;
lastidx = spc->spc_first + spc->spc_count;
s = splvm();
TAILQ_REMOVE(&swap_pager_done, spc, spc_list);
splx(s);
pmap_qremove(spc->spc_kva, spc->spc_count);
/*
* If no error, mark as clean and inform the pmap system. If error,
@ -1615,14 +1585,23 @@ swap_pager_finish(spc)
* this, should give up after awhile)
*/
if (spc->spc_flags & SPC_ERROR) {
for (i = spc->spc_first; i < lastidx; i++) {
printf("swap_pager_finish: I/O error, clean of page %lx failed\n",
(u_long) VM_PAGE_TO_PHYS(ma[i]));
ma[i]->dirty = VM_PAGE_BITS_ALL;
PAGE_WAKEUP(ma[i]);
}
object->paging_in_progress -= spc->spc_count;
if ((object->paging_in_progress == 0) &&
(object->flags & OBJ_PIPWNT)) {
object->flags &= ~OBJ_PIPWNT;
wakeup(object);
}
} else {
for (i = spc->spc_first; i < lastidx; i++) {
pmap_clear_modify(VM_PAGE_TO_PHYS(ma[i]));
ma[i]->dirty = 0;
if ((ma[i]->queue != PQ_ACTIVE) &&
((ma[i]->flags & PG_WANTED) ||
pmap_ts_referenced(VM_PAGE_TO_PHYS(ma[i]))))
@ -1630,14 +1609,9 @@ swap_pager_finish(spc)
}
}
for (i = spc->spc_first; i < lastidx; i++) {
/*
* we wakeup any processes that are waiting on these pages.
*/
PAGE_WAKEUP(ma[i]);
}
nswiodone -= spc->spc_count;
swap_pager_free_pending--;
spc_free(spc);
return;
}
@ -1649,54 +1623,68 @@ static void
swap_pager_iodone(bp)
register struct buf *bp;
{
int i, s;
register swp_clean_t spc;
int s;
vm_object_t object;
s = splbio();
s = splvm();
spc = (swp_clean_t) bp->b_spc;
TAILQ_REMOVE(&swap_pager_inuse, spc, spc_list);
TAILQ_INSERT_TAIL(&swap_pager_done, spc, spc_list);
object = bp->b_pages[0]->object;
#if defined(DIAGNOSTIC)
if (object->paging_in_progress < spc->spc_count)
printf("swap_pager_iodone: paging_in_progress(%d) < spc_count(%d)\n",
object->paging_in_progress, spc->spc_count);
#endif
if (bp->b_flags & B_ERROR) {
spc->spc_flags |= SPC_ERROR;
printf("swap_pager: I/O error - async %s failed; blkno %lu, size %ld, error %d\n",
(bp->b_flags & B_READ) ? "pagein" : "pageout",
(u_long) bp->b_blkno, bp->b_bcount, bp->b_error);
} else {
for (i = 0; i < bp->b_npages; i++) {
/*
* we wakeup any processes that are waiting on these pages.
*/
PAGE_WAKEUP(bp->b_pages[i]);
}
object->paging_in_progress -= spc->spc_count;
if ((object->paging_in_progress == 0) &&
(object->flags & OBJ_PIPWNT)) {
object->flags &= ~OBJ_PIPWNT;
wakeup(object);
}
}
if (bp->b_vp)
pbrelvp(bp);
if (bp->b_flags & B_WANTED)
wakeup(bp);
if (bp->b_rcred != NOCRED)
crfree(bp->b_rcred);
if (bp->b_wcred != NOCRED)
crfree(bp->b_wcred);
nswiodone += spc->spc_count;
swap_pager_free_pending++;
if (--spc->spc_object->un_pager.swp.swp_poip == 0) {
wakeup(spc->spc_object);
}
if ((swap_pager_needflags & SWAP_FREE_NEEDED) ||
TAILQ_FIRST(&swap_pager_inuse) == 0) {
swap_pager_needflags &= ~SWAP_FREE_NEEDED;
wakeup(&swap_pager_free);
if (swap_pager_needflags &&
((swap_pager_free_count + swap_pager_free_pending) > (npendingio / 2))) {
spc_wakeup();
}
if( swap_pager_needflags & SWAP_FREE_NEEDED_BY_PAGEOUT) {
swap_pager_needflags &= ~SWAP_FREE_NEEDED_BY_PAGEOUT;
pagedaemon_wakeup();
}
if (vm_pageout_pages_needed) {
if ((TAILQ_FIRST(&swap_pager_inuse) == NULL) &&
vm_pageout_pages_needed) {
wakeup(&vm_pageout_pages_needed);
vm_pageout_pages_needed = 0;
}
if ((TAILQ_FIRST(&swap_pager_inuse) == NULL) ||
((cnt.v_free_count + cnt.v_cache_count) < cnt.v_free_min &&
nswiodone + cnt.v_free_count + cnt.v_cache_count >= cnt.v_free_min)) {
pagedaemon_wakeup();
}
splx(s);
}

View File

@ -36,7 +36,7 @@
* SUCH DAMAGE.
*
* from: @(#)swap_pager.h 7.1 (Berkeley) 12/5/90
* $Id$
* $Id: swap_pager.h,v 1.18 1997/02/22 09:48:08 peter Exp $
*/
/*
@ -55,7 +55,10 @@
* Above problem has been fixed, now we support 16 pages per block. Unused
* space is recovered by the swap pager now...
*/
#if !defined(SWB_NPAGES)
#define SWB_NPAGES 8
#endif
struct swblock {
unsigned short swb_valid; /* bitmask for valid pages */
unsigned short swb_locked; /* block locked */

View File

@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_kern.c,v 1.43 1998/02/05 03:32:39 dyson Exp $
* $Id: vm_kern.c,v 1.44 1998/02/06 12:14:23 eivind Exp $
*/
/*
@ -147,7 +147,7 @@ kmem_alloc(map, size)
* offset within the kernel map.
*/
vm_map_lock(map);
if (vm_map_findspace(map, 0, size, &addr)) {
if (vm_map_findspace(map, vm_map_min(map), size, &addr)) {
vm_map_unlock(map);
return (0);
}
@ -291,7 +291,7 @@ kmem_malloc(map, size, waitflag)
* offset within the kernel map.
*/
vm_map_lock(map);
if (vm_map_findspace(map, 0, size, &addr)) {
if (vm_map_findspace(map, vm_map_min(map), size, &addr)) {
vm_map_unlock(map);
if (map == mb_map) {
mb_map_full = TRUE;
@ -299,7 +299,8 @@ kmem_malloc(map, size, waitflag)
return (0);
}
if (waitflag == M_WAITOK)
panic("kmem_malloc: kmem_map too small");
panic("kmem_malloc(%d): kmem_map too small: %d total allocated",
size, map->size);
return (0);
}
offset = addr - VM_MIN_KERNEL_ADDRESS;
@ -390,7 +391,7 @@ kmem_alloc_wait(map, size)
* to lock out sleepers/wakers.
*/
vm_map_lock(map);
if (vm_map_findspace(map, 0, size, &addr) == 0)
if (vm_map_findspace(map, vm_map_min(map), size, &addr) == 0)
break;
/* no space now; see if we can ever get space */
if (vm_map_max(map) - vm_map_min(map) < size) {

View File

@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_map.c,v 1.114 1998/02/09 06:11:26 eivind Exp $
* $Id: vm_map.c,v 1.115 1998/02/20 13:11:54 bde Exp $
*/
/*
@ -1666,7 +1666,7 @@ vm_map_clean(map, start, end, syncio, invalidate)
*/
if (current->protection & VM_PROT_WRITE) {
if (object->type == OBJT_VNODE)
vn_lock(object->handle, LK_EXCLUSIVE|LK_RETRY, curproc);
vn_lock(object->handle, LK_EXCLUSIVE | LK_RETRY, curproc);
vm_object_page_clean(object,
OFF_TO_IDX(offset),
OFF_TO_IDX(offset + size + PAGE_MASK),

View File

@ -65,7 +65,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_pageout.c,v 1.113 1998/02/06 12:14:28 eivind Exp $
* $Id: vm_pageout.c,v 1.114 1998/02/09 06:11:34 eivind Exp $
*/
/*
@ -1145,7 +1145,6 @@ vm_pageout_page_stats()
m = next;
}
}
static int
vm_pageout_free_page_calc(count)
@ -1193,13 +1192,15 @@ vm_pageout()
if (cnt.v_free_count > 1024) {
cnt.v_cache_max = (cnt.v_free_count - 1024) / 2;
cnt.v_cache_min = (cnt.v_free_count - 1024) / 8;
cnt.v_cache_min = cnt.v_free_target * 2;
cnt.v_inactive_target = 2*cnt.v_cache_min + 192;
} else {
cnt.v_cache_min = 0;
cnt.v_cache_max = 0;
cnt.v_inactive_target = cnt.v_free_count / 4;
}
if (cnt.v_inactive_target > cnt.v_free_count / 3)
cnt.v_inactive_target = cnt.v_free_count / 3;
/* XXX does not really belong here */
if (vm_page_max_wired == 0)
@ -1244,13 +1245,10 @@ vm_pageout()
continue;
}
} else if (vm_pages_needed) {
tsleep(&vm_pages_needed, PVM, "psleep", hz/10);
vm_pages_needed = 0;
tsleep(&vm_pages_needed, PVM, "psleep", hz/2);
}
inactive_target =
(cnt.v_page_count - cnt.v_wire_count) / 4;
if (inactive_target < 2*cnt.v_free_min)
inactive_target = 2*cnt.v_free_min;
cnt.v_inactive_target = inactive_target;
if (vm_pages_needed)
cnt.v_pdwakeups++;
vm_pages_needed = 0;

View File

@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_pager.c,v 1.33 1998/02/04 22:33:57 eivind Exp $
* $Id: vm_pager.c,v 1.34 1998/02/06 12:14:29 eivind Exp $
*/
/*
@ -274,7 +274,7 @@ getpbuf()
int s;
struct buf *bp;
s = splbio();
s = splvm();
/* get a bp from the swap buffer header pool */
while ((bp = TAILQ_FIRST(&bswlist)) == NULL) {
bswneeded = 1;
@ -296,7 +296,7 @@ trypbuf()
int s;
struct buf *bp;
s = splbio();
s = splvm();
if ((bp = TAILQ_FIRST(&bswlist)) == NULL) {
splx(s);
return NULL;
@ -318,7 +318,7 @@ relpbuf(bp)
{
int s;
s = splbio();
s = splvm();
if (bp->b_rcred != NOCRED) {
crfree(bp->b_rcred);

View File

@ -31,7 +31,7 @@
* SUCH DAMAGE.
*
* @(#)vm_swap.c 8.5 (Berkeley) 2/17/94
* $Id: vm_swap.c,v 1.51 1998/01/25 04:13:25 eivind Exp $
* $Id: vm_swap.c,v 1.52 1998/02/19 12:15:06 msmith Exp $
*/
#include "opt_devfs.h"
@ -105,7 +105,7 @@ static void
swstrategy(bp)
register struct buf *bp;
{
int sz, off, seg, index;
int s, sz, off, seg, index;
register struct swdevt *sp;
struct vnode *vp;
@ -139,6 +139,7 @@ swstrategy(bp)
return;
}
vhold(sp->sw_vp);
s = splvm();
if ((bp->b_flags & B_READ) == 0) {
vp = bp->b_vp;
if (vp) {
@ -152,6 +153,7 @@ swstrategy(bp)
}
if (bp->b_vp != NULL)
pbrelvp(bp);
splx(s);
bp->b_vp = sp->sw_vp;
VOP_STRATEGY(bp);
}

View File

@ -38,7 +38,7 @@
* SUCH DAMAGE.
*
* from: @(#)vnode_pager.c 7.5 (Berkeley) 4/20/91
* $Id: vnode_pager.c,v 1.83 1998/02/05 03:32:49 dyson Exp $
* $Id: vnode_pager.c,v 1.84 1998/02/06 12:14:30 eivind Exp $
*/
/*
@ -170,7 +170,7 @@ vnode_pager_dealloc(object)
panic("vnode_pager_dealloc: pager already dealloced");
if (object->paging_in_progress) {
int s = splbio();
int s = splvm();
while (object->paging_in_progress) {
object->flags |= OBJ_PIPWNT;
tsleep(object, PVM, "vnpdea", 0);
@ -429,9 +429,9 @@ vnode_pager_input_smlfs(object, m)
/* do the input */
VOP_STRATEGY(bp);
/* we definitely need to be at splbio here */
/* we definitely need to be at splvm here */
s = splbio();
s = splvm();
while ((bp->b_flags & B_DONE) == 0) {
tsleep(bp, PVM, "vnsrd", 0);
}
@ -720,8 +720,8 @@ vnode_pager_leaf_getpages(object, m, count, reqpage)
/* do the input */
VOP_STRATEGY(bp);
s = splbio();
/* we definitely need to be at splbio here */
s = splvm();
/* we definitely need to be at splvm here */
while ((bp->b_flags & B_DONE) == 0) {
tsleep(bp, PVM, "vnread", 0);