Change the busy page mgmt, so that when pages are freed, they

MUST be PG_BUSY.  It is bogus to free a page that isn't busy,
because it is in a state of being "unavailable" when being
freed.  The additional advantage is that the page_remove code
has a better cross-check that the page should be busy and
unavailable for other use.  There were some minor problems
with the collapse code, and this plugs those subtile "holes."

Also, the vfs_bio code wasn't checking correctly for PG_BUSY
pages.  I am going to develop a more consistant scheme for
grabbing pages, busy or otherwise.  For now, we are stuck
with the current morass.
This commit is contained in:
John Dyson 1998-01-31 11:56:53 +00:00
parent 59a7338440
commit eaf13dd73a
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=32937
16 changed files with 253 additions and 125 deletions

View File

@ -39,7 +39,7 @@
* SUCH DAMAGE.
*
* from: @(#)pmap.c 7.7 (Berkeley) 5/12/91
* $Id: pmap.c,v 1.177 1998/01/17 09:16:18 dyson Exp $
* $Id: pmap.c,v 1.178 1998/01/22 17:29:30 dyson Exp $
*/
/*
@ -855,11 +855,11 @@ void
pmap_new_proc(p)
struct proc *p;
{
int i;
int i, updateneeded;
vm_object_t upobj;
vm_page_t m;
struct user *up;
unsigned *ptek;
unsigned *ptek, oldpte;
/*
* allocate object for the upages
@ -880,6 +880,7 @@ pmap_new_proc(p)
ptek = (unsigned *) vtopte((vm_offset_t) up);
updateneeded = 0;
for(i=0;i<UPAGES;i++) {
/*
* Get a kernel stack page
@ -895,15 +896,25 @@ pmap_new_proc(p)
m->wire_count++;
cnt.v_wire_count++;
oldpte = *(ptek + i);
/*
* Enter the page into the kernel address space.
*/
*(ptek + i) = VM_PAGE_TO_PHYS(m) | PG_RW | PG_V | pgeflag;
if (oldpte) {
if ((oldpte & PG_G) || (cpu_class > CPUCLASS_386)) {
invlpg((vm_offset_t) up + i * PAGE_SIZE);
} else {
updateneeded = 1;
}
}
m->flags &= ~(PG_ZERO|PG_BUSY);
m->flags |= PG_MAPPED|PG_WRITEABLE;
m->valid = VM_PAGE_BITS_ALL;
}
if (updateneeded)
invltlb();
}
/*
@ -917,18 +928,20 @@ pmap_dispose_proc(p)
int i;
vm_object_t upobj;
vm_page_t m;
unsigned *ptek;
unsigned *ptek, oldpte;
upobj = p->p_upages_obj;
ptek = (unsigned *) vtopte((vm_offset_t) p->p_addr);
for(i=0;i<UPAGES;i++) {
if ((m = vm_page_lookup(upobj, i)) == NULL)
panic("pmap_dispose_proc: upage already missing???");
m->flags |= PG_BUSY;
oldpte = *(ptek + i);
*(ptek + i) = 0;
if (cpu_class >= CPUCLASS_586)
if ((oldpte & PG_G) || (cpu_class > CPUCLASS_386))
invlpg((vm_offset_t) p->p_addr + i * PAGE_SIZE);
vm_page_unwire(m);
vm_page_free(m);
@ -1062,6 +1075,7 @@ _pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m) {
wakeup(m);
}
m->flags |= PG_BUSY;
vm_page_free_zero(m);
--cnt.v_wire_count;
}
@ -1237,6 +1251,7 @@ pmap_release_free_page(pmap, p)
if (pmap->pm_ptphint && (pmap->pm_ptphint->pindex == p->pindex))
pmap->pm_ptphint = NULL;
p->flags |= PG_BUSY;
vm_page_free_zero(p);
splx(s);
return 1;
@ -2335,7 +2350,6 @@ pmap_object_init_pt(pmap, addr, object, pindex, size, limit)
m[0] = p;
if (vm_pager_get_pages(object, m, 1, 0) != VM_PAGER_OK) {
PAGE_WAKEUP(p);
vm_page_free(p);
return;
}

View File

@ -39,7 +39,7 @@
* SUCH DAMAGE.
*
* from: @(#)pmap.c 7.7 (Berkeley) 5/12/91
* $Id: pmap.c,v 1.177 1998/01/17 09:16:18 dyson Exp $
* $Id: pmap.c,v 1.178 1998/01/22 17:29:30 dyson Exp $
*/
/*
@ -855,11 +855,11 @@ void
pmap_new_proc(p)
struct proc *p;
{
int i;
int i, updateneeded;
vm_object_t upobj;
vm_page_t m;
struct user *up;
unsigned *ptek;
unsigned *ptek, oldpte;
/*
* allocate object for the upages
@ -880,6 +880,7 @@ pmap_new_proc(p)
ptek = (unsigned *) vtopte((vm_offset_t) up);
updateneeded = 0;
for(i=0;i<UPAGES;i++) {
/*
* Get a kernel stack page
@ -895,15 +896,25 @@ pmap_new_proc(p)
m->wire_count++;
cnt.v_wire_count++;
oldpte = *(ptek + i);
/*
* Enter the page into the kernel address space.
*/
*(ptek + i) = VM_PAGE_TO_PHYS(m) | PG_RW | PG_V | pgeflag;
if (oldpte) {
if ((oldpte & PG_G) || (cpu_class > CPUCLASS_386)) {
invlpg((vm_offset_t) up + i * PAGE_SIZE);
} else {
updateneeded = 1;
}
}
m->flags &= ~(PG_ZERO|PG_BUSY);
m->flags |= PG_MAPPED|PG_WRITEABLE;
m->valid = VM_PAGE_BITS_ALL;
}
if (updateneeded)
invltlb();
}
/*
@ -917,18 +928,20 @@ pmap_dispose_proc(p)
int i;
vm_object_t upobj;
vm_page_t m;
unsigned *ptek;
unsigned *ptek, oldpte;
upobj = p->p_upages_obj;
ptek = (unsigned *) vtopte((vm_offset_t) p->p_addr);
for(i=0;i<UPAGES;i++) {
if ((m = vm_page_lookup(upobj, i)) == NULL)
panic("pmap_dispose_proc: upage already missing???");
m->flags |= PG_BUSY;
oldpte = *(ptek + i);
*(ptek + i) = 0;
if (cpu_class >= CPUCLASS_586)
if ((oldpte & PG_G) || (cpu_class > CPUCLASS_386))
invlpg((vm_offset_t) p->p_addr + i * PAGE_SIZE);
vm_page_unwire(m);
vm_page_free(m);
@ -1062,6 +1075,7 @@ _pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m) {
wakeup(m);
}
m->flags |= PG_BUSY;
vm_page_free_zero(m);
--cnt.v_wire_count;
}
@ -1237,6 +1251,7 @@ pmap_release_free_page(pmap, p)
if (pmap->pm_ptphint && (pmap->pm_ptphint->pindex == p->pindex))
pmap->pm_ptphint = NULL;
p->flags |= PG_BUSY;
vm_page_free_zero(p);
splx(s);
return 1;
@ -2335,7 +2350,6 @@ pmap_object_init_pt(pmap, addr, object, pindex, size, limit)
m[0] = p;
if (vm_pager_get_pages(object, m, 1, 0) != VM_PAGER_OK) {
PAGE_WAKEUP(p);
vm_page_free(p);
return;
}

View File

@ -11,7 +11,7 @@
* 2. Absolutely no warranty of function or purpose is made by the author
* John S. Dyson.
*
* $Id: vfs_bio.c,v 1.145 1998/01/24 02:01:20 dyson Exp $
* $Id: vfs_bio.c,v 1.146 1998/01/25 06:24:01 dyson Exp $
*/
/*
@ -796,8 +796,7 @@ vfs_vmio_release(bp)
else
vm_page_deactivate(m);
} else if (m->hold_count == 0) {
struct vnode *vp;
vp = bp->b_vp;
m->flags |= PG_BUSY;
vm_page_protect(m, VM_PROT_NONE);
vm_page_free(m);
}
@ -2145,7 +2144,7 @@ vfs_page_set_valid(struct buf *bp, vm_ooffset_t off, int pageno, vm_page_t m)
void
vfs_busy_pages(struct buf * bp, int clear_modify)
{
int i;
int i,s;
if (bp->b_flags & B_VMIO) {
struct vnode *vp = bp->b_vp;
@ -2156,7 +2155,24 @@ vfs_busy_pages(struct buf * bp, int clear_modify)
foff = (vm_ooffset_t) DEV_BSIZE * bp->b_lblkno;
else
foff = (vm_ooffset_t) vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno;
vfs_setdirty(bp);
retry:
for (i = 0; i < bp->b_npages; i++) {
vm_page_t m = bp->b_pages[i];
if (m && (m->flags & PG_BUSY)) {
s = splvm();
while (m->flags & PG_BUSY) {
m->flags |= PG_WANTED;
tsleep(m, PVM, "vbpage", 0);
}
splx(s);
goto retry;
}
}
for (i = 0; i < bp->b_npages; i++, foff += PAGE_SIZE) {
vm_page_t m = bp->b_pages[i];
@ -2164,6 +2180,7 @@ vfs_busy_pages(struct buf * bp, int clear_modify)
obj->paging_in_progress++;
m->busy++;
}
vm_page_protect(m, VM_PROT_NONE);
if (clear_modify)
vfs_page_set_valid(bp, foff, i, m);
@ -2299,6 +2316,7 @@ vm_hold_free_pages(struct buf * bp, vm_offset_t from, vm_offset_t to)
#endif
bp->b_pages[index] = NULL;
pmap_kremove(pg);
p->flags |= PG_BUSY;
vm_page_unwire(p);
vm_page_free(p);
}

View File

@ -33,7 +33,7 @@
* SUCH DAMAGE.
*
* @(#)vfs_cluster.c 8.7 (Berkeley) 2/13/94
* $Id: vfs_cluster.c,v 1.51 1998/01/24 02:01:21 dyson Exp $
* $Id: vfs_cluster.c,v 1.52 1998/01/31 07:23:11 eivind Exp $
*/
#include "opt_debug_cluster.h"
@ -720,9 +720,19 @@ cluster_wbuild(vp, size, start_lbn, len)
tbp->b_flags &= ~B_DONE;
splx(s);
}
if (tbp->b_flags & B_VMIO) {
vm_page_t m;
if (i != 0) {
for (j = 0; j < tbp->b_npages; j += 1) {
m = tbp->b_pages[j];
if (m->flags & PG_BUSY)
goto finishcluster;
}
}
for (j = 0; j < tbp->b_npages; j += 1) {
vm_page_t m;
m = tbp->b_pages[j];
++m->busy;
++m->object->paging_in_progress;
@ -746,6 +756,7 @@ cluster_wbuild(vp, size, start_lbn, len)
TAILQ_INSERT_TAIL(&bp->b_cluster.cluster_head,
tbp, b_cluster.cluster_entry);
}
finishcluster:
pmap_qenter(trunc_page((vm_offset_t) bp->b_data),
(vm_page_t *) bp->b_pages, bp->b_npages);
if (bp->b_bufsize > bp->b_kvasize)

View File

@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* @(#)nfs_serv.c 8.3 (Berkeley) 1/12/94
* $Id: nfs_serv.c,v 1.53 1997/12/27 02:56:34 bde Exp $
* $Id: nfs_serv.c,v 1.54 1997/12/29 00:23:34 dyson Exp $
*/
/*
@ -76,6 +76,7 @@
#include <vm/vm.h>
#include <vm/vm_extern.h>
#include <vm/vm_zone.h>
#include <vm/vm_object.h>
#include <nfs/nfsproto.h>
#include <nfs/rpcv2.h>
@ -3123,6 +3124,10 @@ nfsrv_commit(nfsd, slp, procp, mrq)
return (0);
}
for_ret = VOP_GETATTR(vp, &bfor, cred, procp);
if (vp->v_object &&
(vp->v_object->flags & OBJ_MIGHTBEDIRTY)) {
vm_object_page_clean(vp->v_object, 0, 0, TRUE);
}
error = VOP_FSYNC(vp, cred, MNT_WAIT, procp);
aft_ret = VOP_GETATTR(vp, &aft, cred, procp);
vput(vp);

View File

@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* @(#)nfs_serv.c 8.3 (Berkeley) 1/12/94
* $Id: nfs_serv.c,v 1.53 1997/12/27 02:56:34 bde Exp $
* $Id: nfs_serv.c,v 1.54 1997/12/29 00:23:34 dyson Exp $
*/
/*
@ -76,6 +76,7 @@
#include <vm/vm.h>
#include <vm/vm_extern.h>
#include <vm/vm_zone.h>
#include <vm/vm_object.h>
#include <nfs/nfsproto.h>
#include <nfs/rpcv2.h>
@ -3123,6 +3124,10 @@ nfsrv_commit(nfsd, slp, procp, mrq)
return (0);
}
for_ret = VOP_GETATTR(vp, &bfor, cred, procp);
if (vp->v_object &&
(vp->v_object->flags & OBJ_MIGHTBEDIRTY)) {
vm_object_page_clean(vp->v_object, 0, 0, TRUE);
}
error = VOP_FSYNC(vp, cred, MNT_WAIT, procp);
aft_ret = VOP_GETATTR(vp, &aft, cred, procp);
vput(vp);

View File

@ -36,7 +36,7 @@
* SUCH DAMAGE.
*
* @(#)device_pager.c 8.1 (Berkeley) 6/11/93
* $Id: device_pager.c,v 1.26 1997/08/25 22:15:11 bde Exp $
* $Id: device_pager.c,v 1.27 1997/09/01 03:17:12 bde Exp $
*/
#include <sys/param.h>
@ -214,7 +214,6 @@ dev_pager_getpages(object, m, count, reqpage)
page = dev_pager_getfake(paddr);
TAILQ_INSERT_TAIL(&object->un_pager.devp.devp_pglist, page, pageq);
for (i = 0; i < count; i++) {
PAGE_WAKEUP(m[i]);
vm_page_free(m[i]);
}
s = splhigh();

View File

@ -39,7 +39,7 @@
* from: Utah $Hdr: swap_pager.c 1.4 91/04/30$
*
* @(#)swap_pager.c 8.9 (Berkeley) 3/21/94
* $Id: swap_pager.c,v 1.81 1998/01/17 09:16:47 dyson Exp $
* $Id: swap_pager.c,v 1.82 1998/01/22 17:30:32 dyson Exp $
*/
/*
@ -849,7 +849,6 @@ static void
swap_pager_freepage(m)
vm_page_t m;
{
PAGE_WAKEUP(m);
vm_page_free(m);
}
@ -1489,8 +1488,7 @@ swap_pager_putpages(object, m, count, sync, rtvals)
* optimization, if a page has been read
* during the pageout process, we activate it.
*/
if ((m[i]->queue != PQ_ACTIVE) &&
((m[i]->flags & (PG_WANTED|PG_REFERENCED)) ||
if (((m[i]->flags & (PG_WANTED|PG_REFERENCED)) ||
pmap_ts_referenced(VM_PAGE_TO_PHYS(m[i])))) {
vm_page_activate(m[i]);
}
@ -1597,8 +1595,7 @@ swap_pager_finish(spc)
for (i = 0; i < spc->spc_count; i++) {
pmap_clear_modify(VM_PAGE_TO_PHYS(spc->spc_m[i]));
spc->spc_m[i]->dirty = 0;
if ((spc->spc_m[i]->queue != PQ_ACTIVE) &&
((spc->spc_m[i]->flags & PG_WANTED) || pmap_ts_referenced(VM_PAGE_TO_PHYS(spc->spc_m[i]))))
if (((spc->spc_m[i]->flags & PG_WANTED) || pmap_ts_referenced(VM_PAGE_TO_PHYS(spc->spc_m[i]))))
vm_page_activate(spc->spc_m[i]);
}
}

View File

@ -66,7 +66,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_fault.c,v 1.75 1998/01/17 09:16:49 dyson Exp $
* $Id: vm_fault.c,v 1.76 1998/01/22 17:30:33 dyson Exp $
*/
/*
@ -146,16 +146,12 @@ vm_fault(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type, int fault_flags)
* Recovery actions
*/
#define FREE_PAGE(m) { \
PAGE_WAKEUP(m); \
vm_page_free(m); \
}
#define RELEASE_PAGE(m) { \
PAGE_WAKEUP(m); \
if (m->queue != PQ_ACTIVE) { \
vm_page_activate(m); \
m->act_count = 0; \
} \
vm_page_activate(m); \
}
#define UNLOCK_MAP { \
@ -613,8 +609,7 @@ RetryFault:;
* get rid of the unnecessary page
*/
vm_page_protect(first_m, VM_PROT_NONE);
PAGE_WAKEUP(first_m);
vm_page_free(first_m);
FREE_PAGE(first_m);
/*
* grab the page and put it into the process'es object
*/
@ -630,10 +625,7 @@ RetryFault:;
}
if (m) {
if (m->queue != PQ_ACTIVE) {
vm_page_activate(m);
m->act_count = 0;
}
vm_page_activate(m);
/*
* We no longer need the old page or object.
@ -757,8 +749,7 @@ RetryFault:;
else
vm_page_unwire(m);
} else {
if (m->queue != PQ_ACTIVE)
vm_page_activate(m);
vm_page_activate(m);
}
if (curproc && (curproc->p_flag & P_INMEM) && curproc->p_stats) {

View File

@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_kern.c,v 1.39 1997/08/05 00:01:52 dyson Exp $
* $Id: vm_kern.c,v 1.40 1998/01/22 17:30:35 dyson Exp $
*/
/*
@ -328,7 +328,6 @@ kmem_malloc(map, size, waitflag)
i -= PAGE_SIZE;
m = vm_page_lookup(kmem_object,
OFF_TO_IDX(offset + i));
PAGE_WAKEUP(m);
vm_page_free(m);
}
vm_map_delete(map, addr, addr + size);

View File

@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_map.c,v 1.107 1998/01/21 12:18:00 dyson Exp $
* $Id: vm_map.c,v 1.108 1998/01/22 17:30:37 dyson Exp $
*/
/*
@ -2678,7 +2678,8 @@ vm_freeze_copyopts(object, froma, toa)
while (m_out && (m_out->flags & PG_BUSY)) {
m_out->flags |= PG_WANTED;
tsleep(m_out, PVM, "pwtfrz", 0);
m_out = vm_page_lookup(robject, idx);
splx(s);
goto m_outretry;
}
splx(s);
}
@ -2708,7 +2709,8 @@ vm_freeze_copyopts(object, froma, toa)
while (m_in && (m_in->busy || (m_in->flags & PG_BUSY))) {
m_in->flags |= PG_WANTED;
tsleep(m_in, PVM, "pwtfrz", 0);
m_in = vm_page_lookup(object, bo_pindex + idx);
splx(s);
goto m_inretry;
}
splx(s);
if (m_in == NULL) {

View File

@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_object.c,v 1.107 1998/01/17 09:16:55 dyson Exp $
* $Id: vm_object.c,v 1.108 1998/01/22 17:30:39 dyson Exp $
*/
/*
@ -155,6 +155,8 @@ _vm_object_allocate(type, size, object)
object->behavior = OBJ_NORMAL;
object->paging_in_progress = 0;
object->resident_page_count = 0;
object->cache_count = 0;
object->wire_count = 0;
object->shadow_count = 0;
object->pg_color = next_index;
if ( size > (PQ_L2_SIZE / 3 + PQ_PRIME1))
@ -376,6 +378,7 @@ vm_object_deallocate(object)
temp->shadow_count--;
if (temp->ref_count == 0)
temp->flags &= ~OBJ_OPT;
temp->generation++;
}
vm_object_terminate(object);
/* unlocks and deallocates object */
@ -445,7 +448,7 @@ vm_object_terminate(object)
while ((p = TAILQ_FIRST(&object->memq)) != NULL) {
if (p->busy || (p->flags & PG_BUSY))
printf("vm_object_terminate: freeing busy page\n");
PAGE_WAKEUP(p);
p->flags |= PG_BUSY;
vm_page_free(p);
cnt.v_pfree++;
}
@ -529,11 +532,15 @@ vm_object_page_clean(object, start, end, syncio)
} else {
tend = end;
}
for(p = TAILQ_FIRST(&object->memq); p; p = TAILQ_NEXT(p, listq)) {
p->flags |= PG_CLEANCHK;
vm_page_protect(p, VM_PROT_READ);
}
if ((tstart == 0) && (tend == object->size)) {
object->flags &= ~(OBJ_WRITEABLE|OBJ_MIGHTBEDIRTY);
}
for(p = TAILQ_FIRST(&object->memq); p; p = TAILQ_NEXT(p, listq))
p->flags |= PG_CLEANCHK;
rescan:
curgeneration = object->generation;
@ -565,9 +572,7 @@ vm_object_page_clean(object, start, end, syncio)
goto rescan;
}
}
splx(s);
s = splvm();
maxf = 0;
for(i=1;i<vm_pageout_page_count;i++) {
if (tp = vm_page_lookup(object, pi + i)) {
@ -812,8 +817,7 @@ vm_object_madvise(object, pindex, count, advise)
}
if (advise == MADV_WILLNEED) {
if (m->queue != PQ_ACTIVE)
vm_page_activate(m);
vm_page_activate(m);
} else if (advise == MADV_DONTNEED) {
vm_page_deactivate(m);
} else if (advise == MADV_FREE) {
@ -868,7 +872,8 @@ vm_object_shadow(object, offset, length)
result->backing_object = source;
if (source) {
TAILQ_INSERT_TAIL(&source->shadow_head, result, shadow_list);
++source->shadow_count;
source->shadow_count++;
source->generation++;
}
/*
@ -924,6 +929,8 @@ vm_object_qcollapse(object)
p = next;
continue;
}
p->flags |= PG_BUSY;
new_pindex = p->pindex - backing_offset_index;
if (p->pindex < backing_offset_index ||
new_pindex >= size) {
@ -935,7 +942,8 @@ vm_object_qcollapse(object)
vm_page_free(p);
} else {
pp = vm_page_lookup(object, new_pindex);
if (pp != NULL || (object->type == OBJT_SWAP && vm_pager_has_page(object,
if (pp != NULL ||
(object->type == OBJT_SWAP && vm_pager_has_page(object,
paging_offset_index + new_pindex, NULL, NULL))) {
if (backing_object->type == OBJT_SWAP)
swap_pager_freespace(backing_object,
@ -946,6 +954,7 @@ vm_object_qcollapse(object)
if (backing_object->type == OBJT_SWAP)
swap_pager_freespace(backing_object,
backing_object_paging_offset_index + p->pindex, 1);
vm_page_rename(p, object, new_pindex);
vm_page_protect(p, VM_PROT_NONE);
p->dirty = VM_PAGE_BITS_ALL;
@ -1041,6 +1050,7 @@ vm_object_collapse(object)
while ((p = TAILQ_FIRST(&backing_object->memq)) != 0) {
new_pindex = p->pindex - backing_offset_index;
p->flags |= PG_BUSY;
/*
* If the parent has a page here, or if this
@ -1053,14 +1063,12 @@ vm_object_collapse(object)
if (p->pindex < backing_offset_index ||
new_pindex >= size) {
vm_page_protect(p, VM_PROT_NONE);
PAGE_WAKEUP(p);
vm_page_free(p);
} else {
pp = vm_page_lookup(object, new_pindex);
if (pp != NULL || (object->type == OBJT_SWAP && vm_pager_has_page(object,
OFF_TO_IDX(object->paging_offset) + new_pindex, NULL, NULL))) {
vm_page_protect(p, VM_PROT_NONE);
PAGE_WAKEUP(p);
vm_page_free(p);
} else {
vm_page_protect(p, VM_PROT_NONE);
@ -1133,17 +1141,20 @@ vm_object_collapse(object)
TAILQ_REMOVE(&object->backing_object->shadow_head, object,
shadow_list);
--object->backing_object->shadow_count;
object->backing_object->shadow_count--;
object->backing_object->generation++;
if (backing_object->backing_object) {
TAILQ_REMOVE(&backing_object->backing_object->shadow_head,
backing_object, shadow_list);
--backing_object->backing_object->shadow_count;
backing_object->backing_object->shadow_count--;
backing_object->backing_object->generation++;
}
object->backing_object = backing_object->backing_object;
if (object->backing_object) {
TAILQ_INSERT_TAIL(&object->backing_object->shadow_head,
object, shadow_list);
++object->backing_object->shadow_count;
object->backing_object->shadow_count++;
object->backing_object->generation++;
}
object->backing_object_offset += backing_object->backing_object_offset;
@ -1182,7 +1193,11 @@ vm_object_collapse(object)
* here.
*/
for (p = TAILQ_FIRST(&backing_object->memq); p; p = TAILQ_NEXT(p, listq)) {
for (p = TAILQ_FIRST(&backing_object->memq); p;
p = TAILQ_NEXT(p, listq)) {
p->flags |= PG_BUSY;
new_pindex = p->pindex - backing_offset_index;
/*
@ -1198,15 +1213,25 @@ vm_object_collapse(object)
pp = vm_page_lookup(object, new_pindex);
if ((pp == NULL || pp->valid == 0) &&
if ((pp == NULL) || (pp->flags & PG_BUSY) || pp->busy) {
PAGE_WAKEUP(p);
return;
}
pp->flags |= PG_BUSY;
if ((pp->valid == 0) &&
!vm_pager_has_page(object, OFF_TO_IDX(object->paging_offset) + new_pindex, NULL, NULL)) {
/*
* Page still needed. Can't go any
* further.
*/
PAGE_WAKEUP(pp);
PAGE_WAKEUP(p);
return;
}
PAGE_WAKEUP(pp);
}
PAGE_WAKEUP(p);
}
/*
@ -1217,14 +1242,16 @@ vm_object_collapse(object)
TAILQ_REMOVE(&backing_object->shadow_head,
object, shadow_list);
--backing_object->shadow_count;
backing_object->shadow_count--;
backing_object->generation++;
new_backing_object = backing_object->backing_object;
if (object->backing_object = new_backing_object) {
vm_object_reference(new_backing_object);
TAILQ_INSERT_TAIL(&new_backing_object->shadow_head,
object, shadow_list);
++new_backing_object->shadow_count;
new_backing_object->shadow_count++;
new_backing_object->generation++;
object->backing_object_offset +=
backing_object->backing_object_offset;
}
@ -1232,12 +1259,11 @@ vm_object_collapse(object)
/*
* Drop the reference count on backing_object. Since
* its ref_count was at least 2, it will not vanish;
* so we don't need to call vm_object_deallocate.
* so we don't need to call vm_object_deallocate, but
* we do anyway.
*/
vm_object_deallocate(backing_object);
object_bypasses++;
}
/*
@ -1303,14 +1329,16 @@ vm_object_page_remove(object, start, end, clean_only)
if (p->valid & p->dirty)
continue;
}
p->flags |= PG_BUSY;
vm_page_protect(p, VM_PROT_NONE);
PAGE_WAKEUP(p);
vm_page_free(p);
}
}
} else {
while (size > 0) {
if ((p = vm_page_lookup(object, start)) != 0) {
if (p->wire_count != 0) {
p->valid = 0;
vm_page_protect(p, VM_PROT_NONE);
@ -1318,6 +1346,7 @@ vm_object_page_remove(object, start, end, clean_only)
size -= 1;
continue;
}
/*
* The busy flags are only cleared at
* interrupt -- minimize the spl transitions
@ -1332,6 +1361,7 @@ vm_object_page_remove(object, start, end, clean_only)
}
splx(s);
}
if (clean_only) {
vm_page_test_dirty(p);
if (p->valid & p->dirty) {
@ -1340,8 +1370,9 @@ vm_object_page_remove(object, start, end, clean_only)
continue;
}
}
p->flags |= PG_BUSY;
vm_page_protect(p, VM_PROT_NONE);
PAGE_WAKEUP(p);
vm_page_free(p);
}
start += 1;

View File

@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_object.h,v 1.42 1998/01/17 09:16:56 dyson Exp $
* $Id: vm_object.h,v 1.43 1998/01/22 17:30:40 dyson Exp $
*/
/*
@ -97,6 +97,8 @@ struct vm_object {
u_short paging_in_progress; /* Paging (in or out) so don't collapse or destroy */
u_short behavior; /* see below */
int resident_page_count; /* number of resident pages */
int cache_count; /* number of cached pages */
int wire_count; /* number of wired pages */
vm_ooffset_t paging_offset; /* Offset into paging space */
struct vm_object *backing_object; /* object that I'm a shadow of */
vm_ooffset_t backing_object_offset;/* Offset in backing object */

View File

@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* from: @(#)vm_page.c 7.4 (Berkeley) 5/7/91
* $Id: vm_page.c,v 1.86 1998/01/17 09:16:59 dyson Exp $
* $Id: vm_page.c,v 1.87 1998/01/22 17:30:41 dyson Exp $
*/
/*
@ -416,6 +416,12 @@ vm_page_insert(m, object, pindex)
m->object->page_hint = m;
m->object->generation++;
if (m->wire_count)
object->wire_count++;
if ((m->queue - m->pc) == PQ_CACHE)
object->cache_count++;
/*
* And show that the object has one more resident page.
*/
@ -442,9 +448,25 @@ vm_page_remove(m)
if (!(m->flags & PG_TABLED))
return;
if ((m->flags & PG_BUSY) == 0) {
panic("vm_page_remove: page not busy");
}
m->flags &= ~PG_BUSY;
if (m->flags & PG_WANTED) {
m->flags &= ~PG_WANTED;
wakeup(m);
}
if (m->object->page_hint == m)
m->object->page_hint = NULL;
if (m->wire_count)
m->object->wire_count--;
if ((m->queue - m->pc) == PQ_CACHE)
m->object->cache_count--;
/*
* Remove from the object_object/offset hash table
*/
@ -542,8 +564,11 @@ vm_page_unqueue_nowakeup(m)
pq = &vm_page_queues[queue];
m->queue = PQ_NONE;
TAILQ_REMOVE(pq->pl, m, pageq);
--(*pq->cnt);
--(*pq->lcnt);
(*pq->cnt)--;
(*pq->lcnt)--;
if ((queue - m->pc) == PQ_CACHE) {
m->object->cache_count--;
}
}
}
@ -560,12 +585,13 @@ vm_page_unqueue(m)
m->queue = PQ_NONE;
pq = &vm_page_queues[queue];
TAILQ_REMOVE(pq->pl, m, pageq);
--(*pq->cnt);
--(*pq->lcnt);
(*pq->cnt)--;
(*pq->lcnt)--;
if ((queue - m->pc) == PQ_CACHE) {
if ((cnt.v_cache_count + cnt.v_free_count) <
(cnt.v_free_reserved + cnt.v_cache_min))
pagedaemon_wakeup();
m->object->cache_count--;
}
}
}
@ -868,16 +894,17 @@ vm_page_alloc(object, pindex, page_req)
queue = m->queue;
qtype = queue - m->pc;
if (qtype == PQ_ZERO)
--vm_page_zero_count;
vm_page_zero_count--;
pq = &vm_page_queues[queue];
TAILQ_REMOVE(pq->pl, m, pageq);
--(*pq->cnt);
--(*pq->lcnt);
(*pq->cnt)--;
(*pq->lcnt)--;
oldobject = NULL;
if (qtype == PQ_ZERO) {
m->flags = PG_ZERO|PG_BUSY;
} else if (qtype == PQ_CACHE) {
oldobject = m->object;
m->flags |= PG_BUSY;
vm_page_remove(m);
m->flags = PG_BUSY;
} else {
@ -953,24 +980,34 @@ vm_page_activate(m)
register vm_page_t m;
{
int s;
vm_page_t np;
vm_object_t object;
s = splvm();
if (m->queue == PQ_ACTIVE)
panic("vm_page_activate: already active");
if (m->queue != PQ_ACTIVE) {
if ((m->queue - m->pc) == PQ_CACHE)
cnt.v_reactivated++;
if ((m->queue - m->pc) == PQ_CACHE)
cnt.v_reactivated++;
vm_page_unqueue(m);
vm_page_unqueue(m);
if (m->wire_count == 0) {
m->queue = PQ_ACTIVE;
++(*vm_page_queues[PQ_ACTIVE].lcnt);
TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq);
if (m->wire_count == 0) {
m->queue = PQ_ACTIVE;
++(*vm_page_queues[PQ_ACTIVE].lcnt);
TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq);
if (m->act_count < ACT_INIT)
m->act_count = ACT_INIT;
cnt.v_active_count++;
}
} else {
if (m->act_count < ACT_INIT)
m->act_count = ACT_INIT;
cnt.v_active_count++;
}
object = m->object;
TAILQ_REMOVE(&object->memq, m, listq);
TAILQ_INSERT_TAIL(&object->memq, m, listq);
object->generation++;
splx(s);
}
@ -986,9 +1023,7 @@ vm_page_freechk_and_unqueue(m)
oldobject = m->object;
#if !defined(MAX_PERF)
if (m->busy ||
(m->flags & PG_BUSY) ||
((m->queue - m->pc) == PQ_FREE) ||
if (m->busy || ((m->queue - m->pc) == PQ_FREE) ||
(m->hold_count != 0)) {
printf("vm_page_free: pindex(%ld), busy(%d), PG_BUSY(%d), hold(%d)\n",
m->pindex, m->busy,
@ -1000,17 +1035,20 @@ vm_page_freechk_and_unqueue(m)
}
#endif
vm_page_remove(m);
vm_page_unqueue_nowakeup(m);
vm_page_remove(m);
if ((m->flags & PG_FICTITIOUS) != 0) {
return 0;
}
if (m->wire_count != 0) {
if (m->wire_count > 1) {
panic("vm_page_free: invalid wire count (%d), pindex: 0x%x",
m->wire_count, m->pindex);
}
m->wire_count = 0;
m->object->wire_count--;
cnt.v_wire_count--;
}
@ -1147,8 +1185,9 @@ vm_page_wire(m)
vm_page_unqueue(m);
splx(s);
cnt.v_wire_count++;
m->object->wire_count++;
}
++(*vm_page_queues[PQ_NONE].lcnt);
(*vm_page_queues[PQ_NONE].lcnt)++;
m->wire_count++;
m->flags |= PG_MAPPED;
}
@ -1169,15 +1208,18 @@ vm_page_unwire(m)
s = splvm();
if (m->wire_count > 0)
if (m->wire_count > 0) {
m->wire_count--;
if (m->wire_count == 0) {
cnt.v_wire_count--;
TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq);
m->queue = PQ_ACTIVE;
++(*vm_page_queues[PQ_ACTIVE].lcnt);
cnt.v_active_count++;
if (m->wire_count == 0) {
m->object->wire_count--;
cnt.v_wire_count--;
TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq);
m->queue = PQ_ACTIVE;
(*vm_page_queues[PQ_ACTIVE].lcnt)++;
cnt.v_active_count++;
}
} else {
panic("vm_page_unwire: invalid wire count: %d\n", m->wire_count);
}
splx(s);
}
@ -1246,9 +1288,10 @@ vm_page_cache(m)
s = splvm();
vm_page_unqueue_nowakeup(m);
m->queue = PQ_CACHE + m->pc;
++(*vm_page_queues[m->queue].lcnt);
(*vm_page_queues[m->queue].lcnt)++;
TAILQ_INSERT_TAIL(vm_page_queues[m->queue].pl, m, pageq);
cnt.v_cache_count++;
m->object->cache_count++;
vm_page_free_wakeup();
splx(s);
}
@ -1415,9 +1458,7 @@ contigmalloc1(size, type, flags, low, high, alignment, boundary, map)
goto again1;
}
}
if ((m->dirty == 0) &&
(m->busy == 0) &&
(m->hold_count == 0))
if ((m->dirty == 0) && (m->busy == 0) && (m->hold_count == 0))
vm_page_cache(m);
}
@ -1449,9 +1490,7 @@ contigmalloc1(size, type, flags, low, high, alignment, boundary, map)
goto again1;
}
}
if ((m->dirty == 0) &&
(m->busy == 0) &&
(m->hold_count == 0))
if ((m->dirty == 0) && (m->busy == 0) && (m->hold_count == 0))
vm_page_cache(m);
}
@ -1479,11 +1518,13 @@ contigmalloc1(size, type, flags, low, high, alignment, boundary, map)
vm_page_t m = &pga[i];
pqtype = m->queue - m->pc;
if (pqtype == PQ_CACHE)
if (pqtype == PQ_CACHE) {
m->flags |= PG_BUSY;
vm_page_free(m);
}
TAILQ_REMOVE(vm_page_queues[m->queue].pl, m, pageq);
--(*vm_page_queues[m->queue].lcnt);
(*vm_page_queues[m->queue].lcnt)--;
cnt.v_free_count--;
m->valid = VM_PAGE_BITS_ALL;
m->flags = 0;

View File

@ -65,7 +65,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_pageout.c,v 1.108 1998/01/17 09:17:01 dyson Exp $
* $Id: vm_pageout.c,v 1.109 1998/01/22 17:30:42 dyson Exp $
*/
/*
@ -403,8 +403,7 @@ vm_pageout_flush(mc, count, sync)
* page so it doesn't clog the inactive list. (We
* will try paging out it again later).
*/
if (mt->queue == PQ_INACTIVE)
vm_page_activate(mt);
vm_page_activate(mt);
break;
case VM_PAGER_AGAIN:
break;
@ -505,6 +504,7 @@ vm_pageout_object_deactivate_pages(map, object, desired, map_remove_only)
splx(s);
}
} else {
vm_page_activate(p);
p->flags &= ~PG_REFERENCED;
if (p->act_count < (ACT_MAX - ACT_ADVANCE))
p->act_count += ACT_ADVANCE;
@ -595,18 +595,17 @@ vm_pageout_page_free(vm_page_t m) {
vm_object_t object;
object = m->object;
vp = NULL;
object->ref_count++;
if (object->type == OBJT_VNODE) {
vp = object->handle;
vp->v_usecount++;
if (VSHOULDBUSY(vp))
vbusy(vp);
}
m->flags |= PG_BUSY;
vm_page_protect(m, VM_PROT_NONE);
PAGE_WAKEUP(m);
vm_page_free(m);
vm_object_deallocate(object);
}
@ -778,7 +777,8 @@ vm_pageout_scan()
continue;
}
if (object->type == OBJT_VNODE && (object->flags & OBJ_DEAD) == 0) {
if ((object->type == OBJT_VNODE) &&
(object->flags & OBJ_DEAD) == 0) {
vp = object->handle;
if (VOP_ISLOCKED(vp) ||
vget(vp, LK_EXCLUSIVE|LK_NOOBJ, curproc)) {

View File

@ -38,7 +38,7 @@
* SUCH DAMAGE.
*
* from: @(#)vnode_pager.c 7.5 (Berkeley) 4/20/91
* $Id: vnode_pager.c,v 1.79 1998/01/06 05:26:17 dyson Exp $
* $Id: vnode_pager.c,v 1.80 1998/01/17 09:17:02 dyson Exp $
*/
/*
@ -315,7 +315,6 @@ void
vnode_pager_freepage(m)
vm_page_t m;
{
PAGE_WAKEUP(m);
vm_page_free(m);
}