Fix some vnode management problems, and better mgmt of vnode free list.

Fix the UIO optimization code.
Fix an assumption in vm_map_insert regarding allocation of swap pagers.
Fix an spl problem in the collapse handling in vm_object_deallocate.
When pages are freed from vnode objects, and the criteria for putting
the associated vnode onto the free list is reached, either put the
vnode onto the list, or put it onto an interrupt safe version of the
list, for further transfer onto the actual free list.
Some minor syntax changes changing pre-decs, pre-incs to post versions.
Remove a bogus timeout (that I added for debugging) from vn_lock.

PHK will likely still have problems with the vnode list management, and
so do I, but it is better than it was.
This commit is contained in:
John Dyson 1998-01-12 01:46:33 +00:00
parent de48a0f797
commit 925a3a419a
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=32454
11 changed files with 291 additions and 79 deletions

View File

@ -11,7 +11,7 @@
* 2. Absolutely no warranty of function or purpose is made by the author
* John S. Dyson.
*
* $Id: vfs_bio.c,v 1.140 1997/12/22 11:54:00 dyson Exp $
* $Id: vfs_bio.c,v 1.141 1998/01/06 05:15:55 dyson Exp $
*/
/*
@ -789,8 +789,15 @@ vfs_vmio_release(bp)
else
vm_page_deactivate(m);
} else if (m->hold_count == 0) {
struct vnode *vp;
vp = bp->b_vp;
vm_page_protect(m, VM_PROT_NONE);
vm_page_free(m);
if (vp && VSHOULDFREE(vp) &&
(vp->v_flag & (VFREE|VTBFREE)) == 0) {
TAILQ_INSERT_TAIL(&vnode_tobefree_list, vp, v_freelist);
vp->v_flag |= VTBFREE;
}
}
} else {
/*

View File

@ -36,7 +36,7 @@
* SUCH DAMAGE.
*
* @(#)vfs_subr.c 8.31 (Berkeley) 5/26/95
* $Id: vfs_subr.c,v 1.120 1998/01/06 05:16:18 dyson Exp $
* $Id: vfs_subr.c,v 1.121 1998/01/07 09:26:29 dyson Exp $
*/
/*
@ -101,13 +101,16 @@ int vttoif_tab[9] = {
LIST_REMOVE(bp, b_vnbufs); \
(bp)->b_vnbufs.le_next = NOLIST; \
}
TAILQ_HEAD(freelst, vnode) vnode_free_list; /* vnode free list */
struct tobefreelist vnode_tobefree_list; /* vnode free list */
static u_long wantfreevnodes = 25;
SYSCTL_INT(_debug, OID_AUTO, wantfreevnodes, CTLFLAG_RW, &wantfreevnodes, 0, "");
static u_long freevnodes = 0;
SYSCTL_INT(_debug, OID_AUTO, freevnodes, CTLFLAG_RD, &freevnodes, 0, "");
int vfs_ioopt = 0;
int vfs_ioopt = 2;
SYSCTL_INT(_vfs, OID_AUTO, ioopt, CTLFLAG_RW, &vfs_ioopt, 0, "");
struct mntlist mountlist; /* mounted filesystem list */
@ -138,6 +141,7 @@ vntblinit()
simple_lock_init(&mntid_slock);
simple_lock_init(&spechash_slock);
TAILQ_INIT(&vnode_free_list);
TAILQ_INIT(&vnode_tobefree_list);
simple_lock_init(&vnode_free_list_slock);
CIRCLEQ_INIT(&mountlist);
}
@ -350,8 +354,9 @@ getnewvnode(tag, mp, vops, vpp)
vop_t **vops;
struct vnode **vpp;
{
int s;
struct proc *p = curproc; /* XXX */
struct vnode *vp, *tvp;
struct vnode *vp, *tvp, *nvp;
vm_object_t object;
TAILQ_HEAD(freelst, vnode) vnode_tmp_list;
@ -362,9 +367,16 @@ getnewvnode(tag, mp, vops, vpp)
* Otherwise we allocate a new vnode
*/
s = splbio();
simple_lock(&vnode_free_list_slock);
TAILQ_INIT(&vnode_tmp_list);
for (vp = TAILQ_FIRST(&vnode_tobefree_list); vp; vp = nvp) {
nvp = TAILQ_NEXT(vp, v_freelist);
vp->v_flag &= ~VTBFREE;
TAILQ_INSERT_TAIL(&vnode_tmp_list, vp, v_freelist);
}
if (wantfreevnodes && freevnodes < wantfreevnodes) {
vp = NULL;
} else if (!wantfreevnodes && freevnodes <= desiredvnodes) {
@ -373,7 +385,10 @@ getnewvnode(tag, mp, vops, vpp)
*/
vp = NULL;
} else {
TAILQ_FOREACH(vp, &vnode_free_list, v_freelist) {
for (vp = TAILQ_FIRST(&vnode_free_list); vp; vp = nvp) {
nvp = TAILQ_NEXT(vp, v_freelist);
if (!simple_lock_try(&vp->v_interlock))
continue;
if (vp->v_usecount)
@ -395,7 +410,8 @@ getnewvnode(tag, mp, vops, vpp)
}
}
TAILQ_FOREACH(tvp, &vnode_tmp_list, v_freelist) {
for (tvp = TAILQ_FIRST(&vnode_tmp_list); tvp; tvp = nvp) {
nvp = TAILQ_NEXT(tvp, v_freelist);
TAILQ_REMOVE(&vnode_tmp_list, tvp, v_freelist);
TAILQ_INSERT_TAIL(&vnode_free_list, tvp, v_freelist);
simple_unlock(&tvp->v_interlock);
@ -454,6 +470,7 @@ getnewvnode(tag, mp, vops, vpp)
*vpp = vp;
vp->v_usecount = 1;
vp->v_data = 0;
splx(s);
return (0);
}
@ -1341,6 +1358,7 @@ vgonel(vp, p)
struct vnode *vp;
struct proc *p;
{
int s;
struct vnode *vq;
struct vnode *vx;
@ -1415,10 +1433,18 @@ vgonel(vp, p)
* close the previous instance of the underlying object.
*/
if (vp->v_usecount == 0 && !(vp->v_flag & VDOOMED)) {
s = splbio();
simple_lock(&vnode_free_list_slock);
TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
if (vp->v_flag & VFREE) {
TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
} else if (vp->v_flag & VTBFREE) {
TAILQ_REMOVE(&vnode_tobefree_list, vp, v_freelist);
vp->v_flag &= ~VTBFREE;
}
vp->v_flag |= VFREE;
TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
simple_unlock(&vnode_free_list_slock);
splx(s);
}
vp->v_type = VBAD;
@ -2138,7 +2164,14 @@ static void
vfree(vp)
struct vnode *vp;
{
int s;
s = splbio();
simple_lock(&vnode_free_list_slock);
if (vp->v_flag & VTBFREE) {
TAILQ_REMOVE(&vnode_tobefree_list, vp, v_freelist);
vp->v_flag &= ~VTBFREE;
}
if (vp->v_flag & VAGE) {
TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
} else {
@ -2148,17 +2181,27 @@ vfree(vp)
simple_unlock(&vnode_free_list_slock);
vp->v_flag &= ~VAGE;
vp->v_flag |= VFREE;
splx(s);
}
static void
vbusy(vp)
struct vnode *vp;
{
int s;
s = splbio();
simple_lock(&vnode_free_list_slock);
TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
freevnodes--;
if (vp->v_flag & VTBFREE) {
TAILQ_REMOVE(&vnode_tobefree_list, vp, v_freelist);
vp->v_flag &= ~VTBFREE;
} else {
TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
freevnodes--;
}
simple_unlock(&vnode_free_list_slock);
vp->v_flag &= ~VFREE;
splx(s);
}
/*

View File

@ -36,7 +36,7 @@
* SUCH DAMAGE.
*
* @(#)vfs_subr.c 8.31 (Berkeley) 5/26/95
* $Id: vfs_subr.c,v 1.120 1998/01/06 05:16:18 dyson Exp $
* $Id: vfs_subr.c,v 1.121 1998/01/07 09:26:29 dyson Exp $
*/
/*
@ -101,13 +101,16 @@ int vttoif_tab[9] = {
LIST_REMOVE(bp, b_vnbufs); \
(bp)->b_vnbufs.le_next = NOLIST; \
}
TAILQ_HEAD(freelst, vnode) vnode_free_list; /* vnode free list */
struct tobefreelist vnode_tobefree_list; /* vnode free list */
static u_long wantfreevnodes = 25;
SYSCTL_INT(_debug, OID_AUTO, wantfreevnodes, CTLFLAG_RW, &wantfreevnodes, 0, "");
static u_long freevnodes = 0;
SYSCTL_INT(_debug, OID_AUTO, freevnodes, CTLFLAG_RD, &freevnodes, 0, "");
int vfs_ioopt = 0;
int vfs_ioopt = 2;
SYSCTL_INT(_vfs, OID_AUTO, ioopt, CTLFLAG_RW, &vfs_ioopt, 0, "");
struct mntlist mountlist; /* mounted filesystem list */
@ -138,6 +141,7 @@ vntblinit()
simple_lock_init(&mntid_slock);
simple_lock_init(&spechash_slock);
TAILQ_INIT(&vnode_free_list);
TAILQ_INIT(&vnode_tobefree_list);
simple_lock_init(&vnode_free_list_slock);
CIRCLEQ_INIT(&mountlist);
}
@ -350,8 +354,9 @@ getnewvnode(tag, mp, vops, vpp)
vop_t **vops;
struct vnode **vpp;
{
int s;
struct proc *p = curproc; /* XXX */
struct vnode *vp, *tvp;
struct vnode *vp, *tvp, *nvp;
vm_object_t object;
TAILQ_HEAD(freelst, vnode) vnode_tmp_list;
@ -362,9 +367,16 @@ getnewvnode(tag, mp, vops, vpp)
* Otherwise we allocate a new vnode
*/
s = splbio();
simple_lock(&vnode_free_list_slock);
TAILQ_INIT(&vnode_tmp_list);
for (vp = TAILQ_FIRST(&vnode_tobefree_list); vp; vp = nvp) {
nvp = TAILQ_NEXT(vp, v_freelist);
vp->v_flag &= ~VTBFREE;
TAILQ_INSERT_TAIL(&vnode_tmp_list, vp, v_freelist);
}
if (wantfreevnodes && freevnodes < wantfreevnodes) {
vp = NULL;
} else if (!wantfreevnodes && freevnodes <= desiredvnodes) {
@ -373,7 +385,10 @@ getnewvnode(tag, mp, vops, vpp)
*/
vp = NULL;
} else {
TAILQ_FOREACH(vp, &vnode_free_list, v_freelist) {
for (vp = TAILQ_FIRST(&vnode_free_list); vp; vp = nvp) {
nvp = TAILQ_NEXT(vp, v_freelist);
if (!simple_lock_try(&vp->v_interlock))
continue;
if (vp->v_usecount)
@ -395,7 +410,8 @@ getnewvnode(tag, mp, vops, vpp)
}
}
TAILQ_FOREACH(tvp, &vnode_tmp_list, v_freelist) {
for (tvp = TAILQ_FIRST(&vnode_tmp_list); tvp; tvp = nvp) {
nvp = TAILQ_NEXT(tvp, v_freelist);
TAILQ_REMOVE(&vnode_tmp_list, tvp, v_freelist);
TAILQ_INSERT_TAIL(&vnode_free_list, tvp, v_freelist);
simple_unlock(&tvp->v_interlock);
@ -454,6 +470,7 @@ getnewvnode(tag, mp, vops, vpp)
*vpp = vp;
vp->v_usecount = 1;
vp->v_data = 0;
splx(s);
return (0);
}
@ -1341,6 +1358,7 @@ vgonel(vp, p)
struct vnode *vp;
struct proc *p;
{
int s;
struct vnode *vq;
struct vnode *vx;
@ -1415,10 +1433,18 @@ vgonel(vp, p)
* close the previous instance of the underlying object.
*/
if (vp->v_usecount == 0 && !(vp->v_flag & VDOOMED)) {
s = splbio();
simple_lock(&vnode_free_list_slock);
TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
if (vp->v_flag & VFREE) {
TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
} else if (vp->v_flag & VTBFREE) {
TAILQ_REMOVE(&vnode_tobefree_list, vp, v_freelist);
vp->v_flag &= ~VTBFREE;
}
vp->v_flag |= VFREE;
TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
simple_unlock(&vnode_free_list_slock);
splx(s);
}
vp->v_type = VBAD;
@ -2138,7 +2164,14 @@ static void
vfree(vp)
struct vnode *vp;
{
int s;
s = splbio();
simple_lock(&vnode_free_list_slock);
if (vp->v_flag & VTBFREE) {
TAILQ_REMOVE(&vnode_tobefree_list, vp, v_freelist);
vp->v_flag &= ~VTBFREE;
}
if (vp->v_flag & VAGE) {
TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
} else {
@ -2148,17 +2181,27 @@ vfree(vp)
simple_unlock(&vnode_free_list_slock);
vp->v_flag &= ~VAGE;
vp->v_flag |= VFREE;
splx(s);
}
static void
vbusy(vp)
struct vnode *vp;
{
int s;
s = splbio();
simple_lock(&vnode_free_list_slock);
TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
freevnodes--;
if (vp->v_flag & VTBFREE) {
TAILQ_REMOVE(&vnode_tobefree_list, vp, v_freelist);
vp->v_flag &= ~VTBFREE;
} else {
TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
freevnodes--;
}
simple_unlock(&vnode_free_list_slock);
vp->v_flag &= ~VFREE;
splx(s);
}
/*

View File

@ -36,7 +36,7 @@
* SUCH DAMAGE.
*
* @(#)vfs_vnops.c 8.2 (Berkeley) 1/21/94
* $Id: vfs_vnops.c,v 1.45 1997/12/29 01:03:43 dyson Exp $
* $Id: vfs_vnops.c,v 1.46 1998/01/06 05:16:32 dyson Exp $
*/
#include <sys/param.h>
@ -511,9 +511,7 @@ vn_lock(vp, flags, p)
if (vp->v_flag & VXLOCK) {
vp->v_flag |= VXWANT;
simple_unlock(&vp->v_interlock);
if (tsleep((caddr_t)vp, PINOD, "vn_lock", 120*hz)) {
vprint("vn_lock: timeout:", vp);
}
tsleep((caddr_t)vp, PINOD, "vn_lock", 0);
error = ENOENT;
} else {
error = VOP_LOCK(vp, flags | LK_NOPAUSE | LK_INTERLOCK, p);

View File

@ -31,7 +31,7 @@
* SUCH DAMAGE.
*
* @(#)vnode.h 8.7 (Berkeley) 2/4/94
* $Id: vnode.h,v 1.62 1997/12/29 16:53:53 dyson Exp $
* $Id: vnode.h,v 1.63 1998/01/06 05:23:04 dyson Exp $
*/
#ifndef _SYS_VNODE_H_
@ -153,6 +153,7 @@ struct vnode {
#define VOWANT 0x20000 /* a process is waiting for VOLOCK */
#define VDOOMED 0x40000 /* This vnode is being recycled */
#define VFREE 0x80000 /* This vnode is on the freelist */
#define VTBFREE 0x100000 /* This vnode is no the to be freelist */
/*
* Vnode attributes. A field value of VNOVAL represents a field whose value
@ -275,13 +276,14 @@ extern void (*lease_updatetime) __P((int deltat));
#define VSHOULDFREE(vp) \
(!((vp)->v_flag & (VFREE|VDOOMED)) && \
!(vp)->v_holdcnt && !(vp)->v_usecount)
!(vp)->v_holdcnt && !(vp)->v_usecount && \
(!(vp)->v_object || \
!((vp)->v_object->ref_count || (vp)->v_object->resident_page_count)))
#define VSHOULDBUSY(vp) \
(((vp)->v_flag & VFREE) && \
((vp)->v_holdcnt || (vp)->v_usecount))
#endif /* KERNEL */
@ -525,6 +527,10 @@ void vrele __P((struct vnode *vp));
void vref __P((struct vnode *vp));
extern vop_t **default_vnodeop_p;
extern TAILQ_HEAD(tobefreelist, vnode)
vnode_tobefree_list; /* vnode free list */
#endif /* KERNEL */
#endif /* !_SYS_VNODE_H_ */

View File

@ -66,7 +66,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_fault.c,v 1.72 1997/12/19 09:03:10 dyson Exp $
* $Id: vm_fault.c,v 1.73 1998/01/06 05:25:54 dyson Exp $
*/
/*
@ -523,8 +523,10 @@ RetryFault:;
}
}
#if defined(DIAGNOSTIC)
if ((m->flags & PG_BUSY) == 0)
panic("vm_fault: not busy after main loop");
#endif
/*
* PAGE HAS BEEN FOUND. [Loop invariant still holds -- the object lock

View File

@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_map.c,v 1.103 1997/12/29 01:03:34 dyson Exp $
* $Id: vm_map.c,v 1.104 1998/01/06 05:25:58 dyson Exp $
*/
/*
@ -2405,7 +2405,7 @@ RetryLookup:;
vm_map_lock_downgrade(share_map);
}
if (entry->object.vm_object != NULL)
if (entry->object.vm_object->type == OBJT_DEFAULT)
default_pager_convert_to_swapq(entry->object.vm_object);
/*
* Return the object/offset from this entry. If the entry was
@ -2479,16 +2479,20 @@ vm_uiomove(mapa, srcobject, cp, cnt, uaddra, npages)
vm_offset_t uaddr, start, end;
vm_pindex_t first_pindex, osize, oindex;
off_t ooffset;
int skipinit, allremoved;
if (npages)
*npages = 0;
allremoved = 0;
while (cnt > 0) {
map = mapa;
uaddr = uaddra;
skipinit = 0;
if ((vm_map_lookup(&map, uaddr,
VM_PROT_READ|VM_PROT_WRITE, &first_entry, &first_object,
VM_PROT_READ, &first_entry, &first_object,
&first_pindex, &prot, &wired, &su)) != KERN_SUCCESS) {
return EFAULT;
}
@ -2506,17 +2510,16 @@ vm_uiomove(mapa, srcobject, cp, cnt, uaddra, npages)
osize = atop(tcnt);
oindex = OFF_TO_IDX(cp);
if (npages) {
vm_pindex_t src_index, idx;
src_index = OFF_TO_IDX(cp);
vm_pindex_t idx;
for (idx = 0; idx < osize; idx++) {
vm_page_t m;
if ((m = vm_page_lookup(srcobject, src_index + idx)) == NULL) {
if ((m = vm_page_lookup(srcobject, oindex + idx)) == NULL) {
vm_map_lookup_done(map, first_entry);
return 0;
}
if ((m->flags & PG_BUSY) || m->busy ||
m->hold_count || m->wire_count ||
if ((m->flags & PG_BUSY) ||
((m->valid & VM_PAGE_BITS_ALL) != VM_PAGE_BITS_ALL)) {
vm_map_lookup_done(map, first_entry);
return 0;
@ -2524,46 +2527,113 @@ vm_uiomove(mapa, srcobject, cp, cnt, uaddra, npages)
}
}
oindex = OFF_TO_IDX(first_entry->offset);
/*
* If we are changing an existing map entry, just redirect
* the object, and change mappings.
*/
if ((first_object->ref_count == 1) &&
(first_object->backing_object == srcobject) &&
if (first_object->type == OBJT_VNODE) {
if (first_object != srcobject) {
vm_object_deallocate(first_object);
srcobject->flags |= OBJ_OPT;
vm_object_reference(srcobject);
first_entry->object.vm_object = srcobject;
first_entry->offset = cp;
} else if (first_entry->offset != cp) {
first_entry->offset = cp;
} else {
skipinit = 1;
}
if (skipinit == 0) {
/*
* Remove old window into the file
*/
if (!allremoved) {
pmap_remove (map->pmap, uaddra, uaddra + cnt);
allremoved = 1;
}
/*
* Force copy on write for mmaped regions
*/
vm_object_pmap_copy_1 (srcobject,
oindex, oindex + osize);
}
} else if ((first_object->ref_count == 1) &&
(first_object->size == osize) &&
(first_object->resident_page_count == 0)) {
vm_object_t oldobject;
/*
* Remove old window into the file
*/
pmap_remove (map->pmap, start, end);
oldobject = first_object->backing_object;
/*
* Force copy on write for mmaped regions
*/
vm_object_pmap_copy_1 (first_object,
oindex, oindex + osize);
if ((first_object->backing_object_offset != cp) ||
(oldobject != srcobject)) {
/*
* Remove old window into the file
*/
if (!allremoved) {
pmap_remove (map->pmap, uaddra, uaddra + cnt);
allremoved = 1;
}
/*
* Point the object appropriately
*/
first_object->backing_object_offset = cp;
/*
* Force copy on write for mmaped regions
*/
vm_object_pmap_copy_1 (srcobject,
oindex, oindex + osize);
/*
* Point the object appropriately
*/
if (oldobject != srcobject) {
/*
* Set the object optimization hint flag
*/
srcobject->flags |= OBJ_OPT;
vm_object_reference(srcobject);
if (oldobject) {
TAILQ_REMOVE(&oldobject->shadow_head,
first_object, shadow_list);
oldobject->shadow_count--;
if (oldobject->shadow_count == 0)
oldobject->flags &= ~OBJ_OPT;
vm_object_deallocate(oldobject);
}
TAILQ_INSERT_TAIL(&srcobject->shadow_head,
first_object, shadow_list);
srcobject->shadow_count++;
first_object->backing_object = srcobject;
}
first_object->backing_object_offset = cp;
} else {
skipinit = 1;
}
/*
* Otherwise, we have to do a logical mmap.
*/
} else {
object = srcobject;
object->flags |= OBJ_OPT;
vm_object_reference(object);
ooffset = cp;
srcobject->flags |= OBJ_OPT;
vm_object_reference(srcobject);
vm_object_shadow(&object, &ooffset, osize);
pmap_remove (map->pmap, start, end);
vm_object_pmap_copy_1 (first_object,
if (!allremoved) {
pmap_remove (map->pmap, uaddra, uaddra + cnt);
allremoved = 1;
}
vm_object_pmap_copy_1 (srcobject,
oindex, oindex + osize);
vm_map_lookup_done(map, first_entry);
@ -2578,8 +2648,8 @@ vm_uiomove(mapa, srcobject, cp, cnt, uaddra, npages)
SAVE_HINT(map, first_entry->prev);
vm_map_entry_delete(map, first_entry);
rv = vm_map_insert(map, object, 0, start, end,
VM_PROT_ALL, VM_PROT_ALL, MAP_COPY_ON_WRITE);
rv = vm_map_insert(map, srcobject, cp, start, end,
VM_PROT_ALL, VM_PROT_ALL, MAP_COPY_ON_WRITE | MAP_COPY_NEEDED);
if (rv != KERN_SUCCESS)
panic("vm_uiomove: could not insert new entry: %d", rv);
@ -2588,8 +2658,9 @@ vm_uiomove(mapa, srcobject, cp, cnt, uaddra, npages)
/*
* Map the window directly, if it is already in memory
*/
pmap_object_init_pt(map->pmap, start,
srcobject, (vm_pindex_t) OFF_TO_IDX(cp), end - start, 1);
if (!skipinit)
pmap_object_init_pt(map->pmap, start,
srcobject, (vm_pindex_t) OFF_TO_IDX(cp), end - start, 0);
vm_map_unlock(map);
@ -2663,10 +2734,14 @@ vm_freeze_copyopts(object, froma, toa)
continue;
vm_object_reference(robject);
s = splvm();
while (robject->paging_in_progress) {
robject->flags |= OBJ_PIPWNT;
tsleep(robject, PVM, "objfrz", 0);
}
splx(s);
if (robject->ref_count == 1) {
vm_object_deallocate(robject);
continue;
@ -2690,7 +2765,7 @@ vm_freeze_copyopts(object, froma, toa)
continue;
if( m_in->flags & PG_BUSY) {
s = splhigh();
s = splvm();
while (m_in && (m_in->flags & PG_BUSY)) {
m_in->flags |= PG_WANTED;
tsleep(m_in, PVM, "pwtfrz", 0);
@ -2705,7 +2780,7 @@ vm_freeze_copyopts(object, froma, toa)
retryout:
m_out = vm_page_lookup(robject, dstpindex);
if( m_out && (m_out->flags & PG_BUSY)) {
s = splhigh();
s = splvm();
while (m_out && (m_out->flags & PG_BUSY)) {
m_out->flags |= PG_WANTED;
tsleep(m_out, PVM, "pwtfrz", 0);
@ -2733,6 +2808,7 @@ vm_freeze_copyopts(object, froma, toa)
vm_object_pip_wakeup(robject);
if (((from - bo_pindex) == 0) && ((to - bo_pindex) == robject->size)) {
object->shadow_count--;
TAILQ_REMOVE(&object->shadow_head, robject, shadow_list);

View File

@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_object.c,v 1.104 1998/01/06 05:26:04 dyson Exp $
* $Id: vm_object.c,v 1.105 1998/01/07 03:12:19 dyson Exp $
*/
/*
@ -332,12 +332,14 @@ vm_object_deallocate(object)
if (robject->paging_in_progress) {
robject->flags |= OBJ_PIPWNT;
tsleep(robject, PVM, "objde1", 0);
splx(s);
goto retry;
}
if (object->paging_in_progress) {
object->flags |= OBJ_PIPWNT;
tsleep(object, PVM, "objde2", 0);
splx(s);
goto retry;
}
splx(s);

View File

@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* from: @(#)vm_page.c 7.4 (Berkeley) 5/7/91
* $Id: vm_page.c,v 1.83 1997/11/06 08:35:50 dyson Exp $
* $Id: vm_page.c,v 1.84 1997/12/29 00:24:58 dyson Exp $
*/
/*
@ -753,6 +753,7 @@ vm_page_alloc(object, pindex, page_req)
{
register vm_page_t m;
struct vpgqueues *pq;
vm_object_t oldobject;
int queue, qtype;
int s;
@ -861,9 +862,11 @@ vm_page_alloc(object, pindex, page_req)
TAILQ_REMOVE(pq->pl, m, pageq);
--(*pq->cnt);
--(*pq->lcnt);
oldobject = NULL;
if (qtype == PQ_ZERO) {
m->flags = PG_ZERO|PG_BUSY;
} else if (qtype == PQ_CACHE) {
oldobject = m->object;
vm_page_remove(m);
m->flags = PG_BUSY;
} else {
@ -891,6 +894,19 @@ vm_page_alloc(object, pindex, page_req)
(cnt.v_free_count < cnt.v_pageout_free_min))
pagedaemon_wakeup();
if (((page_req == VM_ALLOC_NORMAL) || (page_req == VM_ALLOC_ZERO)) &&
oldobject &&
((oldobject->type == OBJT_VNODE) &&
(oldobject->ref_count == 0) &&
(oldobject->resident_page_count == 0))) {
struct vnode *vp;
vp = (struct vnode *) oldobject->handle;
if (VSHOULDFREE(vp)) {
vm_object_reference(oldobject);
vm_object_vndeallocate(oldobject);
}
}
return (m);
}
@ -954,6 +970,7 @@ static int
vm_page_freechk_and_unqueue(m)
vm_page_t m;
{
#if !defined(MAX_PERF)
if (m->busy ||
(m->flags & PG_BUSY) ||
((m->queue - m->pc) == PQ_FREE) ||
@ -966,6 +983,7 @@ vm_page_freechk_and_unqueue(m)
else
panic("vm_page_free: freeing busy page");
}
#endif
vm_page_remove(m);
vm_page_unqueue_nowakeup(m);

View File

@ -65,7 +65,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_pageout.c,v 1.105 1997/12/29 00:25:03 dyson Exp $
* $Id: vm_pageout.c,v 1.106 1998/01/06 05:26:11 dyson Exp $
*/
/*
@ -382,10 +382,10 @@ vm_pageout_flush(mc, count, sync)
switch (pageout_status[i]) {
case VM_PAGER_OK:
++anyok;
anyok++;
break;
case VM_PAGER_PEND:
++anyok;
anyok++;
break;
case VM_PAGER_BAD:
/*
@ -592,6 +592,23 @@ vm_pageout_map_deactivate_pages(map, desired)
}
#endif
void
vm_pageout_page_free(vm_page_t m) {
vm_object_t objref = NULL;
m->flags |= PG_BUSY;
if (m->object->type == OBJT_VNODE) {
objref = m->object;
vm_object_reference(objref);
}
vm_page_protect(m, VM_PROT_NONE);
PAGE_WAKEUP(m);
vm_page_free(m);
if (objref) {
vm_object_vndeallocate(objref);
}
}
/*
* vm_pageout_scan does the dirty work for the pageout daemon.
*/
@ -716,17 +733,16 @@ vm_pageout_scan()
* Invalid pages can be easily freed
*/
if (m->valid == 0) {
vm_page_protect(m, VM_PROT_NONE);
vm_page_free(m);
vm_pageout_page_free(m);
cnt.v_dfree++;
++pages_freed;
pages_freed++;
/*
* Clean pages can be placed onto the cache queue.
*/
} else if (m->dirty == 0) {
vm_page_cache(m);
++pages_freed;
pages_freed++;
/*
* Dirty pages need to be paged out. Note that we clean
@ -774,7 +790,7 @@ vm_pageout_scan()
splx(s);
}
if (object->flags & OBJ_MIGHTBEDIRTY)
++vnodes_skipped;
vnodes_skipped++;
continue;
}
@ -784,7 +800,7 @@ vm_pageout_scan()
*/
if (m->queue != PQ_INACTIVE) {
if (object->flags & OBJ_MIGHTBEDIRTY)
++vnodes_skipped;
vnodes_skipped++;
vput(vp);
continue;
}
@ -808,7 +824,7 @@ vm_pageout_scan()
TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq);
splx(s);
if (object->flags & OBJ_MIGHTBEDIRTY)
++vnodes_skipped;
vnodes_skipped++;
vput(vp);
continue;
}
@ -922,7 +938,7 @@ vm_pageout_scan()
m->act_count -= min(m->act_count, ACT_DECLINE);
if (vm_pageout_algorithm_lru ||
(m->object->ref_count == 0) || (m->act_count == 0)) {
--page_shortage;
page_shortage--;
if (m->object->ref_count == 0) {
vm_page_protect(m, VM_PROT_NONE);
if (m->dirty == 0)
@ -953,7 +969,7 @@ vm_pageout_scan()
if (!m)
break;
cache_rover = (cache_rover + PQ_PRIME2) & PQ_L2_MASK;
vm_page_free(m);
vm_pageout_page_free(m);
cnt.v_dfree++;
}
splx(s);

View File

@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_pageout.h,v 1.20 1997/02/22 09:48:34 peter Exp $
* $Id: vm_pageout.h,v 1.21 1997/12/06 02:23:36 dyson Exp $
*/
#ifndef _VM_VM_PAGEOUT_H_
@ -105,6 +105,7 @@ extern void vm_wait __P((void));
void vm_pageout_page __P((vm_page_t, vm_object_t));
void vm_pageout_cluster __P((vm_page_t, vm_object_t));
int vm_pageout_flush __P((vm_page_t *, int, int));
void vm_pageout_page_free __P((vm_page_t));
#endif