Remove ENABLE_VFS_IOOPT. It is a long unfinished work-in-progress.
Discussed on: arch@
This commit is contained in:
parent
1b2c2ab29a
commit
09c80124a3
@ -756,21 +756,6 @@ options EXT2FS
|
||||
# unsuitable for inclusion on machines with untrusted local users.
|
||||
options VFS_AIO
|
||||
|
||||
# Enable the code UFS IO optimization through the VM system. This allows
|
||||
# use VM operations instead of copying operations when possible.
|
||||
#
|
||||
# Even with this enabled, actual use of the code is still controlled by the
|
||||
# sysctl vfs.ioopt. 0 gives no optimization, 1 gives normal (use VM
|
||||
# operations if a request happens to fit), 2 gives agressive optimization
|
||||
# (the operations are split to do as much as possible through the VM system.)
|
||||
#
|
||||
# Enabling this will probably not give an overall speedup except for
|
||||
# special workloads.
|
||||
#
|
||||
# WARNING: Do not enable this, it is known to be broken, and will result
|
||||
# in system instability, as well as possible data loss.
|
||||
options ENABLE_VFS_IOOPT
|
||||
|
||||
# Cryptographically secure random number generator; /dev/[u]random
|
||||
device random
|
||||
|
||||
|
@ -460,7 +460,6 @@ DEBUG_LOCKS opt_global.h
|
||||
DEBUG_VFS_LOCKS opt_global.h
|
||||
LOOKUP_SHARED opt_global.h
|
||||
DIAGNOSTIC opt_global.h
|
||||
ENABLE_VFS_IOOPT opt_global.h
|
||||
INVARIANT_SUPPORT opt_global.h
|
||||
INVARIANTS opt_global.h
|
||||
MCLSHIFT opt_global.h
|
||||
|
@ -59,8 +59,6 @@
|
||||
#include <vm/vm_map.h>
|
||||
#ifdef ZERO_COPY_SOCKETS
|
||||
#include <vm/vm_param.h>
|
||||
#endif
|
||||
#if defined(ZERO_COPY_SOCKETS) || defined(ENABLE_VFS_IOOPT)
|
||||
#include <vm/vm_object.h>
|
||||
#endif
|
||||
|
||||
@ -197,7 +195,7 @@ uiomove(void *cp, int n, struct uio *uio)
|
||||
return (error);
|
||||
}
|
||||
|
||||
#if defined(ENABLE_VFS_IOOPT) || defined(ZERO_COPY_SOCKETS)
|
||||
#ifdef ZERO_COPY_SOCKETS
|
||||
/*
|
||||
* Experimental support for zero-copy I/O
|
||||
*/
|
||||
@ -209,9 +207,6 @@ userspaceco(void *cp, u_int cnt, struct uio *uio, struct vm_object *obj,
|
||||
int error;
|
||||
|
||||
iov = uio->uio_iov;
|
||||
|
||||
#ifdef ZERO_COPY_SOCKETS
|
||||
|
||||
if (uio->uio_rw == UIO_READ) {
|
||||
if ((so_zero_copy_receive != 0)
|
||||
&& (obj != NULL)
|
||||
@ -240,43 +235,12 @@ userspaceco(void *cp, u_int cnt, struct uio *uio, struct vm_object *obj,
|
||||
*/
|
||||
if (error != 0)
|
||||
error = copyout(cp, iov->iov_base, cnt);
|
||||
#ifdef ENABLE_VFS_IOOPT
|
||||
} else if ((vfs_ioopt != 0)
|
||||
&& ((cnt & PAGE_MASK) == 0)
|
||||
&& ((((intptr_t) iov->iov_base) & PAGE_MASK) == 0)
|
||||
&& ((uio->uio_offset & PAGE_MASK) == 0)
|
||||
&& ((((intptr_t) cp) & PAGE_MASK) == 0)) {
|
||||
error = vm_uiomove(&curproc->p_vmspace->vm_map, obj,
|
||||
uio->uio_offset, cnt,
|
||||
(vm_offset_t) iov->iov_base, NULL);
|
||||
#endif /* ENABLE_VFS_IOOPT */
|
||||
} else {
|
||||
error = copyout(cp, iov->iov_base, cnt);
|
||||
}
|
||||
} else {
|
||||
error = copyin(iov->iov_base, cp, cnt);
|
||||
}
|
||||
#else /* ZERO_COPY_SOCKETS */
|
||||
if (uio->uio_rw == UIO_READ) {
|
||||
#ifdef ENABLE_VFS_IOOPT
|
||||
if ((vfs_ioopt != 0)
|
||||
&& ((cnt & PAGE_MASK) == 0)
|
||||
&& ((((intptr_t) iov->iov_base) & PAGE_MASK) == 0)
|
||||
&& ((uio->uio_offset & PAGE_MASK) == 0)
|
||||
&& ((((intptr_t) cp) & PAGE_MASK) == 0)) {
|
||||
error = vm_uiomove(&curproc->p_vmspace->vm_map, obj,
|
||||
uio->uio_offset, cnt,
|
||||
(vm_offset_t) iov->iov_base, NULL);
|
||||
} else
|
||||
#endif /* ENABLE_VFS_IOOPT */
|
||||
{
|
||||
error = copyout(cp, iov->iov_base, cnt);
|
||||
}
|
||||
} else {
|
||||
error = copyin(iov->iov_base, cp, cnt);
|
||||
}
|
||||
#endif /* ZERO_COPY_SOCKETS */
|
||||
|
||||
return (error);
|
||||
}
|
||||
|
||||
@ -334,75 +298,7 @@ uiomoveco(void *cp, int n, struct uio *uio, struct vm_object *obj,
|
||||
}
|
||||
return (0);
|
||||
}
|
||||
#endif /* ENABLE_VFS_IOOPT || ZERO_COPY_SOCKETS */
|
||||
|
||||
#ifdef ENABLE_VFS_IOOPT
|
||||
|
||||
/*
|
||||
* Experimental support for zero-copy I/O
|
||||
*/
|
||||
int
|
||||
uioread(int n, struct uio *uio, struct vm_object *obj, int *nread)
|
||||
{
|
||||
int npagesmoved;
|
||||
struct iovec *iov;
|
||||
u_int cnt, tcnt;
|
||||
int error;
|
||||
|
||||
*nread = 0;
|
||||
if (vfs_ioopt < 2)
|
||||
return 0;
|
||||
|
||||
error = 0;
|
||||
|
||||
while (n > 0 && uio->uio_resid) {
|
||||
iov = uio->uio_iov;
|
||||
cnt = iov->iov_len;
|
||||
if (cnt == 0) {
|
||||
uio->uio_iov++;
|
||||
uio->uio_iovcnt--;
|
||||
continue;
|
||||
}
|
||||
if (cnt > n)
|
||||
cnt = n;
|
||||
|
||||
if ((uio->uio_segflg == UIO_USERSPACE) &&
|
||||
((((intptr_t) iov->iov_base) & PAGE_MASK) == 0) &&
|
||||
((uio->uio_offset & PAGE_MASK) == 0) ) {
|
||||
|
||||
if (cnt < PAGE_SIZE)
|
||||
break;
|
||||
|
||||
cnt &= ~PAGE_MASK;
|
||||
|
||||
if (ticks - PCPU_GET(switchticks) >= hogticks)
|
||||
uio_yield();
|
||||
error = vm_uiomove(&curproc->p_vmspace->vm_map, obj,
|
||||
uio->uio_offset, cnt,
|
||||
(vm_offset_t) iov->iov_base, &npagesmoved);
|
||||
|
||||
if (npagesmoved == 0)
|
||||
break;
|
||||
|
||||
tcnt = npagesmoved * PAGE_SIZE;
|
||||
cnt = tcnt;
|
||||
|
||||
if (error)
|
||||
break;
|
||||
|
||||
iov->iov_base = (char *)iov->iov_base + cnt;
|
||||
iov->iov_len -= cnt;
|
||||
uio->uio_resid -= cnt;
|
||||
uio->uio_offset += cnt;
|
||||
*nread += cnt;
|
||||
n -= cnt;
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
return error;
|
||||
}
|
||||
#endif /* ENABLE_VFS_IOOPT */
|
||||
#endif /* ZERO_COPY_SOCKETS */
|
||||
|
||||
/*
|
||||
* Give next character to user as result of read.
|
||||
|
@ -130,12 +130,6 @@ SYSCTL_INT(_vfs, OID_AUTO, reassignbufcalls, CTLFLAG_RW, &reassignbufcalls, 0, "
|
||||
static int nameileafonly;
|
||||
SYSCTL_INT(_vfs, OID_AUTO, nameileafonly, CTLFLAG_RW, &nameileafonly, 0, "");
|
||||
|
||||
#ifdef ENABLE_VFS_IOOPT
|
||||
/* See NOTES for a description of this setting. */
|
||||
int vfs_ioopt;
|
||||
SYSCTL_INT(_vfs, OID_AUTO, ioopt, CTLFLAG_RW, &vfs_ioopt, 0, "");
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Cache for the mount type id assigned to NFS. This is used for
|
||||
* special checks in nfs/nfs_nqlease.c and vm/vnode_pager.c.
|
||||
|
@ -88,7 +88,6 @@ struct vm_object;
|
||||
void uio_yield(void);
|
||||
int uiomove(void *, int, struct uio *);
|
||||
int uiomoveco(void *, int, struct uio *, struct vm_object *, int);
|
||||
int uioread(int, struct uio *, struct vm_object *, int *);
|
||||
int copyinfrom(const void *src, void *dst, size_t len, int seg);
|
||||
int copyinstrfrom(const void *src, void *dst, size_t len,
|
||||
size_t *copied, int seg);
|
||||
|
@ -389,45 +389,6 @@ ffs_read(ap)
|
||||
vm_object_reference(object);
|
||||
}
|
||||
|
||||
#ifdef ENABLE_VFS_IOOPT
|
||||
/*
|
||||
* If IO optimisation is turned on,
|
||||
* and we are NOT a VM based IO request,
|
||||
* (i.e. not headed for the buffer cache)
|
||||
* but there IS a vm object associated with it.
|
||||
*/
|
||||
if ((ioflag & IO_VMIO) == 0 && (vfs_ioopt > 1) && object) {
|
||||
int nread, toread;
|
||||
|
||||
toread = uio->uio_resid;
|
||||
if (toread > bytesinfile)
|
||||
toread = bytesinfile;
|
||||
if (toread >= PAGE_SIZE) {
|
||||
/*
|
||||
* Then if it's at least a page in size, try
|
||||
* get the data from the object using vm tricks
|
||||
*/
|
||||
error = uioread(toread, uio, object, &nread);
|
||||
if ((uio->uio_resid == 0) || (error != 0)) {
|
||||
/*
|
||||
* If we finished or there was an error
|
||||
* then finish up (the reference previously
|
||||
* obtained on object must be released).
|
||||
*/
|
||||
if ((error == 0 ||
|
||||
uio->uio_resid != orig_resid) &&
|
||||
(vp->v_mount->mnt_flag & MNT_NOATIME) == 0)
|
||||
ip->i_flag |= IN_ACCESS;
|
||||
|
||||
if (object) {
|
||||
vm_object_vndeallocate(object);
|
||||
}
|
||||
return error;
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Ok so we couldn't do it all in one vm trick...
|
||||
* so cycle around trying smaller bites..
|
||||
@ -435,52 +396,6 @@ ffs_read(ap)
|
||||
for (error = 0, bp = NULL; uio->uio_resid > 0; bp = NULL) {
|
||||
if ((bytesinfile = ip->i_size - uio->uio_offset) <= 0)
|
||||
break;
|
||||
#ifdef ENABLE_VFS_IOOPT
|
||||
if ((ioflag & IO_VMIO) == 0 && (vfs_ioopt > 1) && object) {
|
||||
/*
|
||||
* Obviously we didn't finish above, but we
|
||||
* didn't get an error either. Try the same trick again.
|
||||
* but this time we are looping.
|
||||
*/
|
||||
int nread, toread;
|
||||
toread = uio->uio_resid;
|
||||
if (toread > bytesinfile)
|
||||
toread = bytesinfile;
|
||||
|
||||
/*
|
||||
* Once again, if there isn't enough for a
|
||||
* whole page, don't try optimising.
|
||||
*/
|
||||
if (toread >= PAGE_SIZE) {
|
||||
error = uioread(toread, uio, object, &nread);
|
||||
if ((uio->uio_resid == 0) || (error != 0)) {
|
||||
/*
|
||||
* If we finished or there was an
|
||||
* error then finish up (the reference
|
||||
* previously obtained on object must
|
||||
* be released).
|
||||
*/
|
||||
if ((error == 0 ||
|
||||
uio->uio_resid != orig_resid) &&
|
||||
(vp->v_mount->mnt_flag &
|
||||
MNT_NOATIME) == 0)
|
||||
ip->i_flag |= IN_ACCESS;
|
||||
if (object) {
|
||||
vm_object_vndeallocate(object);
|
||||
}
|
||||
return error;
|
||||
}
|
||||
/*
|
||||
* To get here we didnt't finish or err.
|
||||
* If we did get some data,
|
||||
* loop to try another bite.
|
||||
*/
|
||||
if (nread > 0) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
lbn = lblkno(fs, uio->uio_offset);
|
||||
nextlbn = lbn + 1;
|
||||
@ -575,22 +490,6 @@ ffs_read(ap)
|
||||
xfersize = size;
|
||||
}
|
||||
|
||||
#ifdef ENABLE_VFS_IOOPT
|
||||
if (vfs_ioopt && object &&
|
||||
(bp->b_flags & B_VMIO) &&
|
||||
((blkoffset & PAGE_MASK) == 0) &&
|
||||
((xfersize & PAGE_MASK) == 0)) {
|
||||
/*
|
||||
* If VFS IO optimisation is turned on,
|
||||
* and it's an exact page multiple
|
||||
* And a normal VM based op,
|
||||
* then use uiomiveco()
|
||||
*/
|
||||
error =
|
||||
uiomoveco((char *)bp->b_data + blkoffset,
|
||||
(int)xfersize, uio, object, 0);
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
/*
|
||||
* otherwise use the general form
|
||||
@ -755,13 +654,6 @@ ffs_write(ap)
|
||||
if ((ioflag & IO_SYNC) && !DOINGASYNC(vp))
|
||||
flags |= IO_SYNC;
|
||||
|
||||
#ifdef ENABLE_VFS_IOOPT
|
||||
if (object && (object->flags & OBJ_OPT)) {
|
||||
vm_freeze_copyopts(object,
|
||||
OFF_TO_IDX(uio->uio_offset),
|
||||
OFF_TO_IDX(uio->uio_offset + uio->uio_resid + PAGE_MASK));
|
||||
}
|
||||
#endif
|
||||
for (error = 0; uio->uio_resid > 0;) {
|
||||
lbn = lblkno(fs, uio->uio_offset);
|
||||
blkoffset = blkoff(fs, uio->uio_offset);
|
||||
|
@ -280,13 +280,6 @@ RetryFault:;
|
||||
fs.vp = vnode_pager_lock(fs.first_object);
|
||||
vm_object_pip_add(fs.first_object, 1);
|
||||
|
||||
#ifdef ENABLE_VFS_IOOPT
|
||||
if ((fault_type & VM_PROT_WRITE) &&
|
||||
(fs.first_object->type == OBJT_VNODE)) {
|
||||
vm_freeze_copyopts(fs.first_object,
|
||||
fs.first_pindex, fs.first_pindex + 1);
|
||||
}
|
||||
#endif
|
||||
fs.lookup_still_valid = TRUE;
|
||||
|
||||
if (wired)
|
||||
|
249
sys/vm/vm_map.c
249
sys/vm/vm_map.c
@ -2879,255 +2879,6 @@ vm_map_lookup_done(vm_map_t map, vm_map_entry_t entry)
|
||||
vm_map_unlock_read(map);
|
||||
}
|
||||
|
||||
#ifdef ENABLE_VFS_IOOPT
|
||||
/*
|
||||
* Experimental support for zero-copy I/O
|
||||
*
|
||||
* Implement uiomove with VM operations. This handles (and collateral changes)
|
||||
* support every combination of source object modification, and COW type
|
||||
* operations.
|
||||
*/
|
||||
int
|
||||
vm_uiomove(
|
||||
vm_map_t mapa,
|
||||
vm_object_t srcobject,
|
||||
off_t cp,
|
||||
int cnta,
|
||||
vm_offset_t uaddra,
|
||||
int *npages)
|
||||
{
|
||||
vm_map_t map;
|
||||
vm_object_t first_object, oldobject, object;
|
||||
vm_map_entry_t entry;
|
||||
vm_prot_t prot;
|
||||
boolean_t wired;
|
||||
int tcnt, rv;
|
||||
vm_offset_t uaddr, start, end, tend;
|
||||
vm_pindex_t first_pindex, oindex;
|
||||
vm_size_t osize;
|
||||
off_t ooffset;
|
||||
int cnt;
|
||||
|
||||
GIANT_REQUIRED;
|
||||
|
||||
if (npages)
|
||||
*npages = 0;
|
||||
|
||||
cnt = cnta;
|
||||
uaddr = uaddra;
|
||||
|
||||
while (cnt > 0) {
|
||||
map = mapa;
|
||||
|
||||
if ((vm_map_lookup(&map, uaddr,
|
||||
VM_PROT_READ, &entry, &first_object,
|
||||
&first_pindex, &prot, &wired)) != KERN_SUCCESS) {
|
||||
return EFAULT;
|
||||
}
|
||||
|
||||
vm_map_clip_start(map, entry, uaddr);
|
||||
|
||||
tcnt = cnt;
|
||||
tend = uaddr + tcnt;
|
||||
if (tend > entry->end) {
|
||||
tcnt = entry->end - uaddr;
|
||||
tend = entry->end;
|
||||
}
|
||||
|
||||
vm_map_clip_end(map, entry, tend);
|
||||
|
||||
start = entry->start;
|
||||
end = entry->end;
|
||||
|
||||
osize = atop(tcnt);
|
||||
|
||||
oindex = OFF_TO_IDX(cp);
|
||||
if (npages) {
|
||||
vm_size_t idx;
|
||||
for (idx = 0; idx < osize; idx++) {
|
||||
vm_page_t m;
|
||||
if ((m = vm_page_lookup(srcobject, oindex + idx)) == NULL) {
|
||||
vm_map_lookup_done(map, entry);
|
||||
return 0;
|
||||
}
|
||||
/*
|
||||
* disallow busy or invalid pages, but allow
|
||||
* m->busy pages if they are entirely valid.
|
||||
*/
|
||||
if ((m->flags & PG_BUSY) ||
|
||||
((m->valid & VM_PAGE_BITS_ALL) != VM_PAGE_BITS_ALL)) {
|
||||
vm_map_lookup_done(map, entry);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* If we are changing an existing map entry, just redirect
|
||||
* the object, and change mappings.
|
||||
*/
|
||||
if ((first_object->type == OBJT_VNODE) &&
|
||||
((oldobject = entry->object.vm_object) == first_object)) {
|
||||
|
||||
if ((entry->offset != cp) || (oldobject != srcobject)) {
|
||||
/*
|
||||
* Remove old window into the file
|
||||
*/
|
||||
vm_page_lock_queues();
|
||||
pmap_remove(map->pmap, uaddr, tend);
|
||||
vm_page_unlock_queues();
|
||||
|
||||
/*
|
||||
* Force copy on write for mmaped regions
|
||||
*/
|
||||
vm_object_pmap_copy_1 (srcobject, oindex, oindex + osize);
|
||||
|
||||
/*
|
||||
* Point the object appropriately
|
||||
*/
|
||||
if (oldobject != srcobject) {
|
||||
|
||||
/*
|
||||
* Set the object optimization hint flag
|
||||
*/
|
||||
vm_object_set_flag(srcobject, OBJ_OPT);
|
||||
vm_object_reference(srcobject);
|
||||
entry->object.vm_object = srcobject;
|
||||
|
||||
if (oldobject) {
|
||||
vm_object_deallocate(oldobject);
|
||||
}
|
||||
}
|
||||
|
||||
entry->offset = cp;
|
||||
map->timestamp++;
|
||||
} else {
|
||||
vm_page_lock_queues();
|
||||
pmap_remove(map->pmap, uaddr, tend);
|
||||
vm_page_unlock_queues();
|
||||
}
|
||||
|
||||
} else if ((first_object->ref_count == 1) &&
|
||||
(first_object->size == osize) &&
|
||||
((first_object->type == OBJT_DEFAULT) ||
|
||||
(first_object->type == OBJT_SWAP)) ) {
|
||||
|
||||
oldobject = first_object->backing_object;
|
||||
|
||||
if ((first_object->backing_object_offset != cp) ||
|
||||
(oldobject != srcobject)) {
|
||||
/*
|
||||
* Remove old window into the file
|
||||
*/
|
||||
vm_page_lock_queues();
|
||||
pmap_remove(map->pmap, uaddr, tend);
|
||||
vm_page_unlock_queues();
|
||||
|
||||
/*
|
||||
* Remove unneeded old pages
|
||||
*/
|
||||
vm_object_lock(first_object);
|
||||
vm_object_page_remove(first_object, 0, 0, 0);
|
||||
vm_object_unlock(first_object);
|
||||
|
||||
/*
|
||||
* Invalidate swap space
|
||||
*/
|
||||
if (first_object->type == OBJT_SWAP) {
|
||||
swap_pager_freespace(first_object,
|
||||
0,
|
||||
first_object->size);
|
||||
}
|
||||
|
||||
/*
|
||||
* Force copy on write for mmaped regions
|
||||
*/
|
||||
vm_object_pmap_copy_1 (srcobject, oindex, oindex + osize);
|
||||
|
||||
/*
|
||||
* Point the object appropriately
|
||||
*/
|
||||
if (oldobject != srcobject) {
|
||||
/*
|
||||
* Set the object optimization hint flag
|
||||
*/
|
||||
vm_object_set_flag(srcobject, OBJ_OPT);
|
||||
vm_object_reference(srcobject);
|
||||
|
||||
if (oldobject) {
|
||||
TAILQ_REMOVE(&oldobject->shadow_head,
|
||||
first_object, shadow_list);
|
||||
oldobject->shadow_count--;
|
||||
/* XXX bump generation? */
|
||||
vm_object_deallocate(oldobject);
|
||||
}
|
||||
|
||||
TAILQ_INSERT_TAIL(&srcobject->shadow_head,
|
||||
first_object, shadow_list);
|
||||
srcobject->shadow_count++;
|
||||
/* XXX bump generation? */
|
||||
|
||||
first_object->backing_object = srcobject;
|
||||
}
|
||||
first_object->backing_object_offset = cp;
|
||||
map->timestamp++;
|
||||
} else {
|
||||
vm_page_lock_queues();
|
||||
pmap_remove(map->pmap, uaddr, tend);
|
||||
vm_page_unlock_queues();
|
||||
}
|
||||
/*
|
||||
* Otherwise, we have to do a logical mmap.
|
||||
*/
|
||||
} else {
|
||||
|
||||
vm_object_set_flag(srcobject, OBJ_OPT);
|
||||
vm_object_reference(srcobject);
|
||||
|
||||
vm_page_lock_queues();
|
||||
pmap_remove(map->pmap, uaddr, tend);
|
||||
vm_page_unlock_queues();
|
||||
|
||||
vm_object_pmap_copy_1 (srcobject, oindex, oindex + osize);
|
||||
vm_map_lock_upgrade(map);
|
||||
|
||||
if (entry == &map->header) {
|
||||
map->first_free = &map->header;
|
||||
} else if (map->first_free->start >= start) {
|
||||
map->first_free = entry->prev;
|
||||
}
|
||||
|
||||
vm_map_entry_delete(map, entry);
|
||||
|
||||
object = srcobject;
|
||||
ooffset = cp;
|
||||
|
||||
rv = vm_map_insert(map, object, ooffset, start, tend,
|
||||
VM_PROT_ALL, VM_PROT_ALL, MAP_COPY_ON_WRITE);
|
||||
|
||||
if (rv != KERN_SUCCESS)
|
||||
panic("vm_uiomove: could not insert new entry: %d", rv);
|
||||
}
|
||||
|
||||
/*
|
||||
* Map the window directly, if it is already in memory
|
||||
*/
|
||||
pmap_object_init_pt(map->pmap, uaddr,
|
||||
srcobject, oindex, tcnt, 0);
|
||||
|
||||
map->timestamp++;
|
||||
vm_map_unlock(map);
|
||||
|
||||
cnt -= tcnt;
|
||||
uaddr += tcnt;
|
||||
cp += tcnt;
|
||||
if (npages)
|
||||
*npages += osize;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
#include "opt_ddb.h"
|
||||
#ifdef DDB
|
||||
#include <sys/kernel.h>
|
||||
|
@ -322,6 +322,5 @@ int vm_map_unwire(vm_map_t map, vm_offset_t start, vm_offset_t end,
|
||||
int vm_map_wire(vm_map_t map, vm_offset_t start, vm_offset_t end,
|
||||
boolean_t user_wire);
|
||||
int vmspace_swap_count (struct vmspace *vmspace);
|
||||
int vm_uiomove(vm_map_t, vm_object_t, off_t, int, vm_offset_t, int *);
|
||||
#endif /* _KERNEL */
|
||||
#endif /* _VM_MAP_ */
|
||||
|
@ -407,9 +407,6 @@ vm_object_vndeallocate(vm_object_t object)
|
||||
if (object->ref_count == 0) {
|
||||
mp_fixme("Unlocked vflag access.");
|
||||
vp->v_vflag &= ~VV_TEXT;
|
||||
#ifdef ENABLE_VFS_IOOPT
|
||||
vm_object_clear_flag(object, OBJ_OPT);
|
||||
#endif
|
||||
}
|
||||
/*
|
||||
* vrele may need a vop lock
|
||||
@ -502,10 +499,6 @@ vm_object_deallocate(vm_object_t object)
|
||||
if (temp) {
|
||||
TAILQ_REMOVE(&temp->shadow_head, object, shadow_list);
|
||||
temp->shadow_count--;
|
||||
#ifdef ENABLE_VFS_IOOPT
|
||||
if (temp->ref_count == 0)
|
||||
vm_object_clear_flag(temp, OBJ_OPT);
|
||||
#endif
|
||||
temp->generation++;
|
||||
object->backing_object = NULL;
|
||||
}
|
||||
@ -556,12 +549,6 @@ vm_object_terminate(vm_object_t object)
|
||||
if (object->type == OBJT_VNODE) {
|
||||
struct vnode *vp;
|
||||
|
||||
#ifdef ENABLE_VFS_IOOPT
|
||||
/*
|
||||
* Freeze optimized copies.
|
||||
*/
|
||||
vm_freeze_copyopts(object, 0, object->size);
|
||||
#endif
|
||||
/*
|
||||
* Clean pages and flush buffers.
|
||||
*/
|
||||
@ -937,39 +924,6 @@ vm_object_page_collect_flush(vm_object_t object, vm_page_t p, int curgeneration,
|
||||
return(maxf + 1);
|
||||
}
|
||||
|
||||
#ifdef ENABLE_VFS_IOOPT
|
||||
/*
|
||||
* Same as vm_object_pmap_copy, except range checking really
|
||||
* works, and is meant for small sections of an object.
|
||||
*
|
||||
* This code protects resident pages by making them read-only
|
||||
* and is typically called on a fork or split when a page
|
||||
* is converted to copy-on-write.
|
||||
*
|
||||
* NOTE: If the page is already at VM_PROT_NONE, calling
|
||||
* pmap_page_protect will have no effect.
|
||||
*/
|
||||
void
|
||||
vm_object_pmap_copy_1(vm_object_t object, vm_pindex_t start, vm_pindex_t end)
|
||||
{
|
||||
vm_pindex_t idx;
|
||||
vm_page_t p;
|
||||
|
||||
GIANT_REQUIRED;
|
||||
|
||||
if (object == NULL || (object->flags & OBJ_WRITEABLE) == 0)
|
||||
return;
|
||||
vm_page_lock_queues();
|
||||
for (idx = start; idx < end; idx++) {
|
||||
p = vm_page_lookup(object, idx);
|
||||
if (p == NULL)
|
||||
continue;
|
||||
pmap_page_protect(p, VM_PROT_READ);
|
||||
}
|
||||
vm_page_unlock_queues();
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* vm_object_madvise:
|
||||
*
|
||||
@ -1852,94 +1806,6 @@ vm_object_set_writeable_dirty(vm_object_t object)
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef ENABLE_VFS_IOOPT
|
||||
/*
|
||||
* Experimental support for zero-copy I/O
|
||||
*
|
||||
* Performs the copy_on_write operations necessary to allow the virtual copies
|
||||
* into user space to work. This has to be called for write(2) system calls
|
||||
* from other processes, file unlinking, and file size shrinkage.
|
||||
*/
|
||||
void
|
||||
vm_freeze_copyopts(vm_object_t object, vm_pindex_t froma, vm_pindex_t toa)
|
||||
{
|
||||
int rv;
|
||||
vm_object_t robject;
|
||||
vm_pindex_t idx;
|
||||
|
||||
GIANT_REQUIRED;
|
||||
if ((object == NULL) ||
|
||||
((object->flags & OBJ_OPT) == 0))
|
||||
return;
|
||||
|
||||
if (object->shadow_count > object->ref_count)
|
||||
panic("vm_freeze_copyopts: sc > rc");
|
||||
|
||||
while ((robject = TAILQ_FIRST(&object->shadow_head)) != NULL) {
|
||||
vm_pindex_t bo_pindex;
|
||||
vm_page_t m_in, m_out;
|
||||
|
||||
bo_pindex = OFF_TO_IDX(robject->backing_object_offset);
|
||||
|
||||
vm_object_reference(robject);
|
||||
|
||||
vm_object_pip_wait(robject, "objfrz");
|
||||
|
||||
if (robject->ref_count == 1) {
|
||||
vm_object_deallocate(robject);
|
||||
continue;
|
||||
}
|
||||
|
||||
vm_object_pip_add(robject, 1);
|
||||
|
||||
for (idx = 0; idx < robject->size; idx++) {
|
||||
|
||||
m_out = vm_page_grab(robject, idx,
|
||||
VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
|
||||
|
||||
if (m_out->valid == 0) {
|
||||
m_in = vm_page_grab(object, bo_pindex + idx,
|
||||
VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
|
||||
vm_page_lock_queues();
|
||||
if (m_in->valid == 0) {
|
||||
vm_page_unlock_queues();
|
||||
rv = vm_pager_get_pages(object, &m_in, 1, 0);
|
||||
if (rv != VM_PAGER_OK) {
|
||||
printf("vm_freeze_copyopts: cannot read page from file: %lx\n", (long)m_in->pindex);
|
||||
continue;
|
||||
}
|
||||
vm_page_lock_queues();
|
||||
vm_page_deactivate(m_in);
|
||||
}
|
||||
|
||||
pmap_remove_all(m_in);
|
||||
vm_page_unlock_queues();
|
||||
pmap_copy_page(m_in, m_out);
|
||||
vm_page_lock_queues();
|
||||
m_out->valid = m_in->valid;
|
||||
vm_page_dirty(m_out);
|
||||
vm_page_activate(m_out);
|
||||
vm_page_wakeup(m_in);
|
||||
} else
|
||||
vm_page_lock_queues();
|
||||
vm_page_wakeup(m_out);
|
||||
vm_page_unlock_queues();
|
||||
}
|
||||
|
||||
object->shadow_count--;
|
||||
object->ref_count--;
|
||||
TAILQ_REMOVE(&object->shadow_head, robject, shadow_list);
|
||||
robject->backing_object = NULL;
|
||||
robject->backing_object_offset = 0;
|
||||
|
||||
vm_object_pip_wakeup(robject);
|
||||
vm_object_deallocate(robject);
|
||||
}
|
||||
|
||||
vm_object_clear_flag(object, OBJ_OPT);
|
||||
}
|
||||
#endif
|
||||
|
||||
#include "opt_ddb.h"
|
||||
#ifdef DDB
|
||||
#include <sys/kernel.h>
|
||||
|
@ -151,7 +151,6 @@ struct vm_object {
|
||||
#define OBJ_WRITEABLE 0x0080 /* object has been made writable */
|
||||
#define OBJ_MIGHTBEDIRTY 0x0100 /* object might be dirty */
|
||||
#define OBJ_CLEANING 0x0200
|
||||
#define OBJ_OPT 0x1000 /* I/O optimization */
|
||||
#define OBJ_ONEMAPPING 0x2000 /* One USE (a single, non-forked) mapping flag */
|
||||
|
||||
#define IDX_TO_OFF(idx) (((vm_ooffset_t)(idx)) << PAGE_SHIFT)
|
||||
@ -176,8 +175,6 @@ extern vm_object_t kmem_object;
|
||||
#define vm_object_unlock(object) \
|
||||
mtx_unlock((object) == kmem_object ? &kmem_object->mtx : &Giant)
|
||||
|
||||
void vm_freeze_copyopts(vm_object_t, vm_pindex_t, vm_pindex_t);
|
||||
|
||||
void vm_object_set_flag(vm_object_t object, u_short bits);
|
||||
void vm_object_clear_flag(vm_object_t object, u_short bits);
|
||||
void vm_object_pip_add(vm_object_t object, short i);
|
||||
@ -199,7 +196,6 @@ void vm_object_set_writeable_dirty (vm_object_t);
|
||||
void vm_object_init (void);
|
||||
void vm_object_page_clean (vm_object_t, vm_pindex_t, vm_pindex_t, boolean_t);
|
||||
void vm_object_page_remove (vm_object_t, vm_pindex_t, vm_pindex_t, boolean_t);
|
||||
void vm_object_pmap_copy_1 (vm_object_t, vm_pindex_t, vm_pindex_t);
|
||||
void vm_object_reference (vm_object_t);
|
||||
void vm_object_shadow (vm_object_t *, vm_ooffset_t *, vm_size_t);
|
||||
void vm_object_split(vm_map_entry_t);
|
||||
|
@ -307,9 +307,6 @@ vnode_pager_setsize(vp, nsize)
|
||||
* File has shrunk. Toss any cached pages beyond the new EOF.
|
||||
*/
|
||||
if (nsize < object->un_pager.vnp.vnp_size) {
|
||||
#ifdef ENABLE_VFS_IOOPT
|
||||
vm_freeze_copyopts(object, OFF_TO_IDX(nsize), object->size);
|
||||
#endif
|
||||
if (nobjsize < object->size) {
|
||||
vm_object_lock(object);
|
||||
vm_object_page_remove(object, nobjsize, object->size,
|
||||
|
Loading…
Reference in New Issue
Block a user