Some performance improvements, and code cleanups (including changing our

expensive OFF_TO_IDX to btoc whenever possible.)
This commit is contained in:
John Dyson 1997-12-19 09:03:37 +00:00
parent f5fb6bd02a
commit 1efb74fbcc
11 changed files with 479 additions and 31 deletions

View File

@ -36,13 +36,19 @@
* SUCH DAMAGE.
*
* @(#)kern_subr.c 8.3 (Berkeley) 1/21/94
* $Id: kern_subr.c,v 1.12 1997/09/02 20:05:42 bde Exp $
* $Id: kern_subr.c,v 1.13 1997/10/10 18:14:23 phk Exp $
*/
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/proc.h>
#include <sys/malloc.h>
#include <sys/lock.h>
#include <vm/vm.h>
#include <vm/vm_prot.h>
#include <vm/vm_page.h>
#include <vm/vm_map.h>
int
uiomove(cp, n, uio)
@ -102,6 +108,75 @@ uiomove(cp, n, uio)
return (0);
}
int
uiomoveco(cp, n, uio, obj)
caddr_t cp;
int n;
struct uio *uio;
struct vm_object *obj;
{
struct iovec *iov;
u_int cnt;
int error;
#ifdef DIAGNOSTIC
if (uio->uio_rw != UIO_READ && uio->uio_rw != UIO_WRITE)
panic("uiomove: mode");
if (uio->uio_segflg == UIO_USERSPACE && uio->uio_procp != curproc)
panic("uiomove proc");
#endif
while (n > 0 && uio->uio_resid) {
iov = uio->uio_iov;
cnt = iov->iov_len;
if (cnt == 0) {
uio->uio_iov++;
uio->uio_iovcnt--;
continue;
}
if (cnt > n)
cnt = n;
switch (uio->uio_segflg) {
case UIO_USERSPACE:
case UIO_USERISPACE:
if (uio->uio_rw == UIO_READ) {
if (((cnt & PAGE_MASK) == 0) &&
((((int) iov->iov_base) & PAGE_MASK) == 0) &&
((uio->uio_offset & PAGE_MASK) == 0) &&
((((int) cp) & PAGE_MASK) == 0)) {
error = vm_uiomove(&curproc->p_vmspace->vm_map, obj,
uio->uio_offset, cnt,
(vm_offset_t) iov->iov_base);
} else {
error = copyout(cp, iov->iov_base, cnt);
}
} else {
error = copyin(iov->iov_base, cp, cnt);
}
if (error)
return (error);
break;
case UIO_SYSSPACE:
if (uio->uio_rw == UIO_READ)
bcopy((caddr_t)cp, iov->iov_base, cnt);
else
bcopy(iov->iov_base, (caddr_t)cp, cnt);
break;
case UIO_NOCOPY:
break;
}
iov->iov_base += cnt;
iov->iov_len -= cnt;
uio->uio_resid -= cnt;
uio->uio_offset += cnt;
cp += cnt;
n -= cnt;
}
return (0);
}
/*
* Give next character to user as result of read.
*/

View File

@ -36,7 +36,7 @@
* SUCH DAMAGE.
*
* @(#)vfs_subr.c 8.31 (Berkeley) 5/26/95
* $Id: vfs_subr.c,v 1.114 1997/11/22 08:35:39 bde Exp $
* $Id: vfs_subr.c,v 1.115 1997/12/15 03:09:32 wollman Exp $
*/
/*
@ -63,6 +63,8 @@
#include <vm/vm.h>
#include <vm/vm_object.h>
#include <vm/vm_extern.h>
#include <vm/pmap.h>
#include <vm/vm_map.h>
#include <vm/vnode_pager.h>
#include <sys/sysctl.h>
@ -921,6 +923,8 @@ vputrele(vp, put)
if ((vp->v_usecount == 2) &&
vp->v_object &&
(vp->v_object->flags & OBJ_VFS_REF)) {
vm_freeze_copyopts(vp->v_object, 0, vp->v_object->size);
vp->v_usecount--;
vp->v_object->flags &= ~OBJ_VFS_REF;
if (put) {

View File

@ -36,7 +36,7 @@
* SUCH DAMAGE.
*
* @(#)vfs_subr.c 8.31 (Berkeley) 5/26/95
* $Id: vfs_subr.c,v 1.114 1997/11/22 08:35:39 bde Exp $
* $Id: vfs_subr.c,v 1.115 1997/12/15 03:09:32 wollman Exp $
*/
/*
@ -63,6 +63,8 @@
#include <vm/vm.h>
#include <vm/vm_object.h>
#include <vm/vm_extern.h>
#include <vm/pmap.h>
#include <vm/vm_map.h>
#include <vm/vnode_pager.h>
#include <sys/sysctl.h>
@ -921,6 +923,8 @@ vputrele(vp, put)
if ((vp->v_usecount == 2) &&
vp->v_object &&
(vp->v_object->flags & OBJ_VFS_REF)) {
vm_freeze_copyopts(vp->v_object, 0, vp->v_object->size);
vp->v_usecount--;
vp->v_object->flags &= ~OBJ_VFS_REF;
if (put) {

View File

@ -31,12 +31,14 @@
* SUCH DAMAGE.
*
* @(#)uio.h 8.5 (Berkeley) 2/22/94
* $Id$
* $Id: uio.h,v 1.6 1997/02/22 09:46:19 peter Exp $
*/
#ifndef _SYS_UIO_H_
#define _SYS_UIO_H_
struct vm_object;
/*
* XXX
* iov_base should be a void *.
@ -77,6 +79,7 @@ struct uio {
#ifdef KERNEL
int uiomove __P((caddr_t, int, struct uio *));
int uiomoveco __P((caddr_t, int, struct uio *, struct vm_object *));
#else /* !KERNEL */

View File

@ -31,7 +31,7 @@
* SUCH DAMAGE.
*
* @(#)ufs_readwrite.c 8.11 (Berkeley) 5/8/95
* $Id: ufs_readwrite.c,v 1.34 1997/12/15 03:09:54 wollman Exp $
* $Id: ufs_readwrite.c,v 1.35 1997/12/16 22:28:26 eivind Exp $
*/
#ifdef LFS_READWRITE
@ -54,9 +54,16 @@
#define WRITE_S "ffs_write"
#include <vm/vm.h>
#include <vm/vm_pager.h>
#include <vm/vm_map.h>
#include <vm/vnode_pager.h>
#endif
#include <sys/poll.h>
#include <sys/sysctl.h>
int vfs_ioopt = 1;
SYSCTL_INT(_vfs, OID_AUTO, ioopt,
CTLFLAG_RW, &vfs_ioopt, 0, "");
/*
* Vnode op for reading.
@ -154,8 +161,20 @@ READ(ap)
break;
xfersize = size;
}
error =
uiomove((char *)bp->b_data + blkoffset, (int)xfersize, uio);
if (vfs_ioopt &&
(bp->b_flags & B_VMIO) &&
((blkoffset & PAGE_MASK) == 0) &&
((xfersize & PAGE_MASK) == 0)) {
error =
uiomoveco((char *)bp->b_data + blkoffset,
(int)xfersize, uio, vp->v_object);
} else {
error =
uiomove((char *)bp->b_data + blkoffset,
(int)xfersize, uio);
}
if (error)
break;
@ -273,6 +292,14 @@ WRITE(ap)
if (size < xfersize)
xfersize = size;
if (vfs_ioopt &&
vp->v_object && (vp->v_object->flags & OBJ_OPT) &&
vp->v_object->shadow_count) {
vm_freeze_copyopts(vp->v_object,
OFF_TO_IDX(uio->uio_offset),
OFF_TO_IDX(uio->uio_offset + size));
}
error =
uiomove((char *)bp->b_data + blkoffset, (int)xfersize, uio);
#ifdef LFS_READWRITE

View File

@ -66,7 +66,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_fault.c,v 1.70 1997/08/25 22:15:19 bde Exp $
* $Id: vm_fault.c,v 1.71 1997/09/01 03:17:15 bde Exp $
*/
/*
@ -202,7 +202,8 @@ RetryFault:;
* to COW .text. We simply keep .text from ever being COW'ed
* and take the heat that one cannot debug wired .text sections.
*/
if (((fault_flags & VM_FAULT_WIRE_MASK) == VM_FAULT_USER_WIRE) && (entry->eflags & MAP_ENTRY_NEEDS_COPY)) {
if (((fault_flags & VM_FAULT_WIRE_MASK) == VM_FAULT_USER_WIRE) &&
(entry->eflags & MAP_ENTRY_NEEDS_COPY)) {
if(entry->protection & VM_PROT_WRITE) {
int tresult;
vm_map_lookup_done(map, entry);
@ -222,6 +223,10 @@ RetryFault:;
}
vp = vnode_pager_lock(first_object);
if ((fault_type & VM_PROT_WRITE) &&
(first_object->type == OBJT_VNODE)) {
vm_freeze_copyopts(first_object, first_pindex, first_pindex + 1);
}
lookup_still_valid = TRUE;

View File

@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_map.c,v 1.97 1997/11/14 23:42:10 tegge Exp $
* $Id: vm_map.c,v 1.98 1997/11/24 15:03:13 bde Exp $
*/
/*
@ -74,6 +74,8 @@
#include <sys/proc.h>
#include <sys/vmmeter.h>
#include <sys/mman.h>
#include <sys/buf.h>
#include <sys/vnode.h>
#include <vm/vm.h>
#include <vm/vm_param.h>
@ -84,6 +86,7 @@
#include <vm/vm_map.h>
#include <vm/vm_page.h>
#include <vm/vm_object.h>
#include <vm/vm_pageout.h>
#include <vm/vm_kern.h>
#include <vm/vm_extern.h>
#include <vm/default_pager.h>
@ -168,6 +171,7 @@ static void vm_map_entry_dispose __P((vm_map_t, vm_map_entry_t));
static void vm_map_entry_unwire __P((vm_map_t, vm_map_entry_t));
static void vm_map_copy_entry __P((vm_map_t, vm_map_t, vm_map_entry_t,
vm_map_entry_t));
static vm_page_t vm_freeze_page_alloc __P((vm_object_t, vm_pindex_t));
void
vm_map_startup()
@ -844,7 +848,7 @@ _vm_map_clip_start(map, entry, start)
vm_object_t object;
object = vm_object_allocate(OBJT_DEFAULT,
OFF_TO_IDX(entry->end - entry->start));
btoc(entry->end - entry->start));
entry->object.vm_object = object;
entry->offset = 0;
}
@ -902,7 +906,7 @@ _vm_map_clip_end(map, entry, end)
vm_object_t object;
object = vm_object_allocate(OBJT_DEFAULT,
OFF_TO_IDX(entry->end - entry->start));
btoc(entry->end - entry->start));
entry->object.vm_object = object;
entry->offset = 0;
}
@ -1332,7 +1336,7 @@ vm_map_user_pageable(map, start, end, new_pageable)
vm_object_shadow(&entry->object.vm_object,
&entry->offset,
OFF_TO_IDX(entry->end
btoc(entry->end
- entry->start));
entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
@ -1340,7 +1344,7 @@ vm_map_user_pageable(map, start, end, new_pageable)
entry->object.vm_object =
vm_object_allocate(OBJT_DEFAULT,
OFF_TO_IDX(entry->end - entry->start));
btoc(entry->end - entry->start));
entry->offset = (vm_offset_t) 0;
}
@ -1525,13 +1529,12 @@ vm_map_pageable(map, start, end, new_pageable)
vm_object_shadow(&entry->object.vm_object,
&entry->offset,
OFF_TO_IDX(entry->end
- entry->start));
btoc(entry->end - entry->start));
entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
} else if (entry->object.vm_object == NULL) {
entry->object.vm_object =
vm_object_allocate(OBJT_DEFAULT,
OFF_TO_IDX(entry->end - entry->start));
btoc(entry->end - entry->start));
entry->offset = (vm_offset_t) 0;
}
default_pager_convert_to_swapq(entry->object.vm_object);
@ -2076,13 +2079,13 @@ vmspace_fork(vm1)
object = old_entry->object.vm_object;
if (object == NULL) {
object = vm_object_allocate(OBJT_DEFAULT,
OFF_TO_IDX(old_entry->end - old_entry->start));
btoc(old_entry->end - old_entry->start));
old_entry->object.vm_object = object;
old_entry->offset = (vm_offset_t) 0;
} else if (old_entry->eflags & MAP_ENTRY_NEEDS_COPY) {
vm_object_shadow(&old_entry->object.vm_object,
&old_entry->offset,
OFF_TO_IDX(old_entry->end - old_entry->start));
btoc(old_entry->end - old_entry->start));
old_entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
object = old_entry->object.vm_object;
}
@ -2366,7 +2369,7 @@ RetryLookup:;
vm_object_shadow(
&entry->object.vm_object,
&entry->offset,
OFF_TO_IDX(entry->end - entry->start));
btoc(entry->end - entry->start));
entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
vm_map_lock_downgrade(share_map);
@ -2390,7 +2393,7 @@ RetryLookup:;
goto RetryLookup;
}
entry->object.vm_object = vm_object_allocate(OBJT_DEFAULT,
OFF_TO_IDX(entry->end - entry->start));
btoc(entry->end - entry->start));
entry->offset = 0;
vm_map_lock_downgrade(share_map);
}
@ -2446,6 +2449,293 @@ vm_map_lookup_done(map, entry)
vm_map_unlock_read(map);
}
/*
* Implement uiomove with VM operations. This handles (and collateral changes)
* support every combination of source object modification, and COW type
* operations.
*/
int
vm_uiomove(mapa, srcobject, cp, cnt, uaddra)
vm_map_t mapa;
vm_object_t srcobject;
off_t cp;
int cnt;
vm_offset_t uaddra;
{
vm_map_t map;
vm_object_t first_object, object;
vm_map_entry_t first_entry, entry;
vm_prot_t prot;
boolean_t wired, su;
int tcnt, rv;
vm_offset_t uaddr, start, end;
vm_pindex_t first_pindex, osize, oindex;
off_t ooffset;
while (cnt > 0) {
map = mapa;
uaddr = uaddra;
if ((vm_map_lookup(&map, uaddr,
VM_PROT_READ|VM_PROT_WRITE, &first_entry, &first_object,
&first_pindex, &prot, &wired, &su)) != KERN_SUCCESS) {
return EFAULT;
}
#if 0
printf("foff: 0x%x, uaddr: 0x%x\norig entry: (0x%x, 0x%x), ",
(int) cp, uaddr, first_entry->start, first_entry->end);
#endif
vm_map_clip_start(map, first_entry, uaddr);
tcnt = cnt;
if ((uaddr + tcnt) > first_entry->end)
tcnt = first_entry->end - uaddr;
vm_map_clip_end(map, first_entry, uaddr + tcnt);
start = first_entry->start;
end = first_entry->end;
#if 0
printf("new entry: (0x%x, 0x%x)\n", start, end);
#endif
osize = btoc(tcnt);
oindex = OFF_TO_IDX(first_entry->offset);
/*
* If we are changing an existing map entry, just redirect
* the object, and change mappings.
*/
if ((first_object->ref_count == 1) &&
(first_object->backing_object == srcobject) &&
(first_object->size == osize) &&
(first_object->resident_page_count == 0)) {
/*
* Remove old window into the file
*/
pmap_remove (map->pmap, start, end);
/*
* Force copy on write for mmaped regions
*/
vm_object_pmap_copy_1 (first_object,
oindex, oindex + osize);
/*
* Point the object appropriately
*/
first_object->backing_object_offset = cp;
/*
* Otherwise, we have to do a logical mmap.
*/
} else {
object = srcobject;
object->flags |= OBJ_OPT;
object->ref_count++;
ooffset = cp;
vm_object_shadow(&object, &ooffset, osize);
pmap_remove (map->pmap, start, end);
vm_object_pmap_copy_1 (first_object,
oindex, oindex + osize);
vm_map_lookup_done(map, first_entry);
vm_map_lock(map);
if (first_entry == &map->header) {
map->first_free = &map->header;
} else if (map->first_free->start >= start) {
map->first_free = first_entry->prev;
}
SAVE_HINT(map, first_entry->prev);
vm_map_entry_delete(map, first_entry);
rv = vm_map_insert(map, object, 0, start, end,
VM_PROT_ALL, VM_PROT_ALL, MAP_COPY_ON_WRITE);
if (rv != KERN_SUCCESS)
panic("vm_uiomove: could not insert new entry: %d", rv);
}
/*
* Map the window directly, if it is already in memory
*/
pmap_object_init_pt(map->pmap, start,
srcobject, (vm_pindex_t) OFF_TO_IDX(cp), end - start, 1);
vm_map_unlock(map);
cnt -= tcnt;
uaddra += tcnt;
cp += tcnt;
}
return 0;
}
/*
* local routine to allocate a page for an object.
*/
static vm_page_t
vm_freeze_page_alloc(object, pindex)
vm_object_t object;
vm_pindex_t pindex;
{
vm_page_t m;
while ((m = vm_page_alloc(object, pindex, VM_ALLOC_NORMAL)) == NULL) {
VM_WAIT;
if (m = vm_page_lookup(object, pindex))
return NULL;
}
m->valid = VM_PAGE_BITS_ALL;
m->dirty = 0;
vm_page_deactivate(m);
return m;
}
/*
* Performs the copy_on_write operations necessary to allow the virtual copies
* into user space to work. This has to be called for write(2) system calls
* from other processes, file unlinking, and file size shrinkage.
*/
void
vm_freeze_copyopts(object, froma, toa)
vm_object_t object;
vm_pindex_t froma, toa;
{
int s;
vm_object_t robject, robjectn;
vm_pindex_t idx, from, to;
if ((object == NULL) || ((object->flags & OBJ_OPT) == 0))
return;
#if 0
printf("sc: %d, rc: %d\n", object->shadow_count, object->ref_count);
#endif
if (object->shadow_count > object->ref_count)
panic("vm_freeze_copyopts: sc > rc");
for( robject = TAILQ_FIRST(&object->shadow_head);
robject;
robject = robjectn) {
vm_pindex_t bo_pindex;
vm_pindex_t dstpindex;
vm_page_t m_in, m_out;
robjectn = TAILQ_NEXT(robject, shadow_list);
bo_pindex = OFF_TO_IDX(robject->backing_object_offset);
if (bo_pindex > toa)
continue;
if ((bo_pindex + robject->size) < froma)
continue;
robject->ref_count++;
while (robject->paging_in_progress) {
robject->flags |= OBJ_PIPWNT;
tsleep(robject, PVM, "objfrz", 0);
}
if (robject->ref_count == 1) {
vm_object_deallocate(robject);
continue;
}
robject->paging_in_progress++;
from = froma;
if (from < bo_pindex)
from = bo_pindex;
to = toa;
for (idx = from; idx < to; idx++) {
dstpindex = idx - bo_pindex;
if (dstpindex >= robject->size)
break;
m_in = vm_page_lookup(object, idx);
if (m_in == NULL)
continue;
if( m_in->flags & PG_BUSY) {
s = splhigh();
while (m_in && (m_in->flags & PG_BUSY)) {
m_in->flags |= PG_WANTED;
tsleep(m_in, PVM, "pwtfrz", 0);
m_in = vm_page_lookup(object, idx);
}
splx(s);
if (m_in == NULL)
continue;
}
m_in->flags |= PG_BUSY;
retryout:
m_out = vm_page_lookup(robject, dstpindex);
if( m_out && (m_out->flags & PG_BUSY)) {
s = splhigh();
while (m_out && (m_out->flags & PG_BUSY)) {
m_out->flags |= PG_WANTED;
tsleep(m_out, PVM, "pwtfrz", 0);
m_out = vm_page_lookup(robject, dstpindex);
}
splx(s);
}
if (m_out == NULL) {
m_out = vm_freeze_page_alloc(robject, dstpindex);
if (m_out == NULL)
goto retryout;
}
if (m_out->valid == 0) {
vm_page_protect(m_in, VM_PROT_NONE);
pmap_copy_page(VM_PAGE_TO_PHYS(m_in),
VM_PAGE_TO_PHYS(m_out));
m_out->valid = VM_PAGE_BITS_ALL;
}
PAGE_WAKEUP(m_out);
PAGE_WAKEUP(m_in);
}
vm_object_pip_wakeup(robject);
if (((from - bo_pindex) == 0) && ((to - bo_pindex) == robject->size)) {
#if 0
printf("removing obj: %d, %d\n", object->shadow_count, object->ref_count);
#endif
object->shadow_count--;
TAILQ_REMOVE(&object->shadow_head, robject, shadow_list);
robject->backing_object = NULL;
robject->backing_object_offset = 0;
if (object->ref_count == 1) {
if (object->shadow_count == 0)
object->flags &= ~OBJ_OPT;
vm_object_deallocate(object);
vm_object_deallocate(robject);
return;
} else {
object->ref_count--;
}
}
vm_object_deallocate(robject);
}
if (object->shadow_count == 0)
object->flags &= ~OBJ_OPT;
}
#include "opt_ddb.h"
#ifdef DDB
#include <sys/kernel.h>

View File

@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_map.h,v 1.27 1997/08/05 00:01:58 dyson Exp $
* $Id: vm_map.h,v 1.28 1997/08/18 02:06:24 dyson Exp $
*/
/*
@ -336,6 +336,8 @@ int vm_map_submap __P((vm_map_t, vm_offset_t, vm_offset_t, vm_map_t));
void vm_map_madvise __P((vm_map_t, pmap_t, vm_offset_t, vm_offset_t, int));
void vm_map_simplify_entry __P((vm_map_t, vm_map_entry_t));
void vm_init2 __P((void));
int vm_uiomove __P((vm_map_t, vm_object_t, off_t, int, vm_offset_t));
void vm_freeze_copyopts __P((vm_object_t, vm_pindex_t, vm_pindex_t));
#endif
#endif /* _VM_MAP_ */

View File

@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_object.c,v 1.100 1997/11/07 09:21:00 phk Exp $
* $Id: vm_object.c,v 1.101 1997/11/18 11:02:19 bde Exp $
*/
/*
@ -359,6 +359,15 @@ vm_object_deallocate(object)
*/
object->flags |= OBJ_DEAD;
if (object->type == OBJT_VNODE) {
struct vnode *vp = object->handle;
if (vp->v_flag & VVMIO) {
object->ref_count++;
vm_freeze_copyopts(object, 0, object->size);
object->ref_count--;
}
}
temp = object->backing_object;
if (temp) {
TAILQ_REMOVE(&temp->shadow_head, object, shadow_list);
@ -680,6 +689,30 @@ vm_object_pmap_copy(object, start, end)
object->flags &= ~OBJ_WRITEABLE;
}
/*
* Same as vm_object_pmap_copy_1, except range checking really
* works, and is meant for small sections of an object.
*/
void
vm_object_pmap_copy_1(object, start, end)
register vm_object_t object;
register vm_pindex_t start;
register vm_pindex_t end;
{
vm_pindex_t idx;
register vm_page_t p;
if (object == NULL || (object->flags & OBJ_WRITEABLE) == 0)
return;
for (idx = start; idx < end; idx++) {
p = vm_page_lookup(object, idx);
if (p == NULL)
continue;
vm_page_protect(p, VM_PROT_READ);
}
}
/*
* vm_object_pmap_remove:
*

View File

@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_object.h,v 1.37 1997/09/01 02:55:50 bde Exp $
* $Id: vm_object.h,v 1.38 1997/09/21 04:24:24 dyson Exp $
*/
/*
@ -131,9 +131,9 @@ struct vm_object {
#define OBJ_MIGHTBEDIRTY 0x0100 /* object might be dirty */
#define OBJ_CLEANING 0x0200
#define OBJ_VFS_REF 0x0400 /* object is refed by vfs layer */
#define OBJ_VNODE_GONE 0x0800 /* vnode is gone */
#define OBJ_OPT 0x1000 /* I/O optimization */
#define OBJ_NORMAL 0x0 /* default behavior */
#define OBJ_SEQUENTIAL 0x1 /* expect sequential accesses */
#define OBJ_RANDOM 0x2 /* expect random accesses */
@ -179,6 +179,7 @@ void vm_object_init __P((void));
void vm_object_page_clean __P((vm_object_t, vm_pindex_t, vm_pindex_t, boolean_t, boolean_t));
void vm_object_page_remove __P((vm_object_t, vm_pindex_t, vm_pindex_t, boolean_t));
void vm_object_pmap_copy __P((vm_object_t, vm_pindex_t, vm_pindex_t));
void vm_object_pmap_copy_1 __P((vm_object_t, vm_pindex_t, vm_pindex_t));
void vm_object_pmap_remove __P((vm_object_t, vm_pindex_t, vm_pindex_t));
void vm_object_reference __P((vm_object_t));
void vm_object_shadow __P((vm_object_t *, vm_ooffset_t *, vm_size_t));

View File

@ -38,7 +38,7 @@
* SUCH DAMAGE.
*
* from: @(#)vnode_pager.c 7.5 (Berkeley) 4/20/91
* $Id: vnode_pager.c,v 1.75 1997/10/06 02:38:30 dyson Exp $
* $Id: vnode_pager.c,v 1.76 1997/12/02 21:07:20 phk Exp $
*/
/*
@ -64,6 +64,7 @@
#include <vm/vm_object.h>
#include <vm/vm_page.h>
#include <vm/vm_pager.h>
#include <vm/vm_map.h>
#include <vm/vnode_pager.h>
#include <vm/vm_extern.h>
@ -291,10 +292,12 @@ vnode_pager_setsize(vp, nsize)
vm_ooffset_t nsizerounded;
nsizerounded = IDX_TO_OFF(OFF_TO_IDX(nsize + PAGE_MASK));
if (nsizerounded < object->un_pager.vnp.vnp_size) {
vm_object_page_remove(object,
OFF_TO_IDX(nsize + PAGE_MASK),
OFF_TO_IDX(object->un_pager.vnp.vnp_size),
FALSE);
vm_pindex_t st, end;
st = OFF_TO_IDX(nsize + PAGE_MASK);
end = OFF_TO_IDX(object->un_pager.vnp.vnp_size);
vm_freeze_copyopts(object, OFF_TO_IDX(nsize), object->size);
vm_object_page_remove(object, st, end, FALSE);
}
/*
* this gets rid of garbage at the end of a page that is now
@ -371,6 +374,7 @@ vnode_pager_uncache(vp, p)
return;
vm_object_reference(object);
vm_freeze_copyopts(object, 0, object->size);
/*
* XXX We really should handle locking on