Use vop_*vobject() VOPs to get reference to VM object from upper or lower fs.

This commit is contained in:
Boris Popov 2001-05-17 04:52:57 +00:00
parent 9dbd7336ee
commit 3413421bda
6 changed files with 120 additions and 120 deletions

View File

@ -133,7 +133,6 @@ extern int union_cn_close __P((struct vnode *, int, struct ucred *,
extern void union_removed_upper __P((struct union_node *un));
extern struct vnode *union_lowervp __P((struct vnode *));
extern void union_newsize __P((struct vnode *, off_t, off_t));
extern void union_vm_coherency __P((struct vnode *, struct uio *, int));
extern int (*union_dircheckp) __P((struct proc *, struct vnode **,
struct file *));

View File

@ -271,7 +271,10 @@ union_newsize(vp, uppersz, lowersz)
if (sz != VNOVAL) {
UDEBUG(("union: %s size now %ld\n",
(uppersz != VNOVAL ? "upper" : "lower"), (long)sz));
vnode_pager_setsize(vp, sz);
/*
* There is no need to change size of non-existent object
*/
/* vnode_pager_setsize(vp, sz); */
}
}
@ -1284,40 +1287,6 @@ union_dircache(vp, p)
return (nvp);
}
/*
* Guarentee coherency with the VM cache by invalidating any clean VM pages
* associated with this write and updating any dirty VM pages. Since our
* vnode is locked, other processes will not be able to read the pages in
* again until after our write completes.
*
* We also have to be coherent with reads, by flushing any pending dirty
* pages prior to issuing the read.
*
* XXX this is somewhat of a hack at the moment. To support this properly
* we would have to be able to run VOP_READ and VOP_WRITE through the VM
* cache. Then we wouldn't need to worry about coherency.
*/
void
union_vm_coherency(struct vnode *vp, struct uio *uio, int cleanfls)
{
vm_object_t object;
vm_pindex_t pstart;
vm_pindex_t pend;
int pgoff;
if ((object = vp->v_object) == NULL)
return;
pgoff = uio->uio_offset & PAGE_MASK;
pstart = uio->uio_offset / PAGE_SIZE;
pend = pstart + (uio->uio_resid + pgoff + PAGE_MASK) / PAGE_SIZE;
vm_object_page_clean(object, pstart, pend, OBJPC_SYNC);
if (cleanfls)
vm_object_page_remove(object, pstart, pend, TRUE);
}
/*
* Module glue to remove #ifdef UNION from vfs_syscalls.c
*/

View File

@ -71,8 +71,11 @@ static int union_access __P((struct vop_access_args *ap));
static int union_advlock __P((struct vop_advlock_args *ap));
static int union_close __P((struct vop_close_args *ap));
static int union_create __P((struct vop_create_args *ap));
static int union_createvobject __P((struct vop_createvobject_args *ap));
static int union_destroyvobject __P((struct vop_destroyvobject_args *ap));
static int union_fsync __P((struct vop_fsync_args *ap));
static int union_getattr __P((struct vop_getattr_args *ap));
static int union_getvobject __P((struct vop_getvobject_args *ap));
static int union_inactive __P((struct vop_inactive_args *ap));
static int union_ioctl __P((struct vop_ioctl_args *ap));
static int union_lease __P((struct vop_lease_args *ap));
@ -1029,9 +1032,6 @@ union_read(ap)
uvp = union_lock_other(un, p);
KASSERT(uvp != NULL, ("union_read: backing vnode missing!"));
if (ap->a_vp->v_flag & VOBJBUF)
union_vm_coherency(ap->a_vp, ap->a_uio, 0);
error = VOP_READ(uvp, ap->a_uio, ap->a_ioflag, ap->a_cred);
union_unlock_other(uvp, p);
@ -1073,27 +1073,6 @@ union_write(ap)
if ((uppervp = union_lock_upper(un, p)) == NULLVP)
panic("union: missing upper layer in write");
/*
* Since our VM pages are associated with our vnode rather then
* the real vnode, and since we do not run our reads and writes
* through our own VM cache, we have a VM/VFS coherency problem.
* We solve them by invalidating or flushing the associated VM
* pages prior to allowing a normal read or write to occur.
*
* VM-backed writes (UIO_NOCOPY) have to be converted to normal
* writes because we are not cache-coherent. Normal writes need
* to be made coherent with our VM-backing store, which we do by
* first flushing any dirty VM pages associated with the write
* range, and then destroying any clean VM pages associated with
* the write range.
*/
if (ap->a_uio->uio_segflg == UIO_NOCOPY) {
ap->a_uio->uio_segflg = UIO_SYSSPACE;
} else if (ap->a_vp->v_flag & VOBJBUF) {
union_vm_coherency(ap->a_vp, ap->a_uio, 1);
}
error = VOP_WRITE(uppervp, ap->a_uio, ap->a_ioflag, ap->a_cred);
/*
@ -1792,6 +1771,56 @@ union_unlock(ap)
return(error);
}
/*
* unionvp do not hold a VM object and there is no need to create one for
* upper or lower vp because it is done in the union_open()
*/
static int
union_createvobject(ap)
struct vop_createvobject_args /* {
struct vnode *vp;
struct ucred *cred;
struct proc *p;
} */ *ap;
{
struct vnode *vp = ap->a_vp;
vp->v_flag |= VOBJBUF;
return (0);
}
/*
* We have nothing to destroy and this operation shouldn't be bypassed.
*/
static int
union_destroyvobject(ap)
struct vop_destroyvobject_args /* {
struct vnode *vp;
} */ *ap;
{
struct vnode *vp = ap->a_vp;
vp->v_flag &= ~VOBJBUF;
return (0);
}
/*
* Get VM object from the upper or lower vp
*/
static int
union_getvobject(ap)
struct vop_getvobject_args /* {
struct vnode *vp;
struct vm_object **objpp;
} */ *ap;
{
struct vnode *ovp = OTHERVP(ap->a_vp);
if (ovp == NULL)
return EINVAL;
return (VOP_GETVOBJECT(ovp, ap->a_objpp));
}
static int
union_print(ap)
struct vop_print_args /* {
@ -1888,8 +1917,11 @@ static struct vnodeopv_entry_desc union_vnodeop_entries[] = {
{ &vop_bmap_desc, (vop_t *) vop_eopnotsupp },
{ &vop_close_desc, (vop_t *) union_close },
{ &vop_create_desc, (vop_t *) union_create },
{ &vop_createvobject_desc, (vop_t *) union_createvobject },
{ &vop_destroyvobject_desc, (vop_t *) union_destroyvobject },
{ &vop_fsync_desc, (vop_t *) union_fsync },
{ &vop_getattr_desc, (vop_t *) union_getattr },
{ &vop_getvobject_desc, (vop_t *) union_getvobject },
{ &vop_inactive_desc, (vop_t *) union_inactive },
{ &vop_ioctl_desc, (vop_t *) union_ioctl },
{ &vop_islocked_desc, (vop_t *) vop_stdislocked },

View File

@ -133,7 +133,6 @@ extern int union_cn_close __P((struct vnode *, int, struct ucred *,
extern void union_removed_upper __P((struct union_node *un));
extern struct vnode *union_lowervp __P((struct vnode *));
extern void union_newsize __P((struct vnode *, off_t, off_t));
extern void union_vm_coherency __P((struct vnode *, struct uio *, int));
extern int (*union_dircheckp) __P((struct proc *, struct vnode **,
struct file *));

View File

@ -271,7 +271,10 @@ union_newsize(vp, uppersz, lowersz)
if (sz != VNOVAL) {
UDEBUG(("union: %s size now %ld\n",
(uppersz != VNOVAL ? "upper" : "lower"), (long)sz));
vnode_pager_setsize(vp, sz);
/*
* There is no need to change size of non-existent object
*/
/* vnode_pager_setsize(vp, sz); */
}
}
@ -1284,40 +1287,6 @@ union_dircache(vp, p)
return (nvp);
}
/*
* Guarentee coherency with the VM cache by invalidating any clean VM pages
* associated with this write and updating any dirty VM pages. Since our
* vnode is locked, other processes will not be able to read the pages in
* again until after our write completes.
*
* We also have to be coherent with reads, by flushing any pending dirty
* pages prior to issuing the read.
*
* XXX this is somewhat of a hack at the moment. To support this properly
* we would have to be able to run VOP_READ and VOP_WRITE through the VM
* cache. Then we wouldn't need to worry about coherency.
*/
void
union_vm_coherency(struct vnode *vp, struct uio *uio, int cleanfls)
{
vm_object_t object;
vm_pindex_t pstart;
vm_pindex_t pend;
int pgoff;
if ((object = vp->v_object) == NULL)
return;
pgoff = uio->uio_offset & PAGE_MASK;
pstart = uio->uio_offset / PAGE_SIZE;
pend = pstart + (uio->uio_resid + pgoff + PAGE_MASK) / PAGE_SIZE;
vm_object_page_clean(object, pstart, pend, OBJPC_SYNC);
if (cleanfls)
vm_object_page_remove(object, pstart, pend, TRUE);
}
/*
* Module glue to remove #ifdef UNION from vfs_syscalls.c
*/

View File

@ -71,8 +71,11 @@ static int union_access __P((struct vop_access_args *ap));
static int union_advlock __P((struct vop_advlock_args *ap));
static int union_close __P((struct vop_close_args *ap));
static int union_create __P((struct vop_create_args *ap));
static int union_createvobject __P((struct vop_createvobject_args *ap));
static int union_destroyvobject __P((struct vop_destroyvobject_args *ap));
static int union_fsync __P((struct vop_fsync_args *ap));
static int union_getattr __P((struct vop_getattr_args *ap));
static int union_getvobject __P((struct vop_getvobject_args *ap));
static int union_inactive __P((struct vop_inactive_args *ap));
static int union_ioctl __P((struct vop_ioctl_args *ap));
static int union_lease __P((struct vop_lease_args *ap));
@ -1029,9 +1032,6 @@ union_read(ap)
uvp = union_lock_other(un, p);
KASSERT(uvp != NULL, ("union_read: backing vnode missing!"));
if (ap->a_vp->v_flag & VOBJBUF)
union_vm_coherency(ap->a_vp, ap->a_uio, 0);
error = VOP_READ(uvp, ap->a_uio, ap->a_ioflag, ap->a_cred);
union_unlock_other(uvp, p);
@ -1073,27 +1073,6 @@ union_write(ap)
if ((uppervp = union_lock_upper(un, p)) == NULLVP)
panic("union: missing upper layer in write");
/*
* Since our VM pages are associated with our vnode rather then
* the real vnode, and since we do not run our reads and writes
* through our own VM cache, we have a VM/VFS coherency problem.
* We solve them by invalidating or flushing the associated VM
* pages prior to allowing a normal read or write to occur.
*
* VM-backed writes (UIO_NOCOPY) have to be converted to normal
* writes because we are not cache-coherent. Normal writes need
* to be made coherent with our VM-backing store, which we do by
* first flushing any dirty VM pages associated with the write
* range, and then destroying any clean VM pages associated with
* the write range.
*/
if (ap->a_uio->uio_segflg == UIO_NOCOPY) {
ap->a_uio->uio_segflg = UIO_SYSSPACE;
} else if (ap->a_vp->v_flag & VOBJBUF) {
union_vm_coherency(ap->a_vp, ap->a_uio, 1);
}
error = VOP_WRITE(uppervp, ap->a_uio, ap->a_ioflag, ap->a_cred);
/*
@ -1792,6 +1771,56 @@ union_unlock(ap)
return(error);
}
/*
* unionvp do not hold a VM object and there is no need to create one for
* upper or lower vp because it is done in the union_open()
*/
static int
union_createvobject(ap)
struct vop_createvobject_args /* {
struct vnode *vp;
struct ucred *cred;
struct proc *p;
} */ *ap;
{
struct vnode *vp = ap->a_vp;
vp->v_flag |= VOBJBUF;
return (0);
}
/*
* We have nothing to destroy and this operation shouldn't be bypassed.
*/
static int
union_destroyvobject(ap)
struct vop_destroyvobject_args /* {
struct vnode *vp;
} */ *ap;
{
struct vnode *vp = ap->a_vp;
vp->v_flag &= ~VOBJBUF;
return (0);
}
/*
* Get VM object from the upper or lower vp
*/
static int
union_getvobject(ap)
struct vop_getvobject_args /* {
struct vnode *vp;
struct vm_object **objpp;
} */ *ap;
{
struct vnode *ovp = OTHERVP(ap->a_vp);
if (ovp == NULL)
return EINVAL;
return (VOP_GETVOBJECT(ovp, ap->a_objpp));
}
static int
union_print(ap)
struct vop_print_args /* {
@ -1888,8 +1917,11 @@ static struct vnodeopv_entry_desc union_vnodeop_entries[] = {
{ &vop_bmap_desc, (vop_t *) vop_eopnotsupp },
{ &vop_close_desc, (vop_t *) union_close },
{ &vop_create_desc, (vop_t *) union_create },
{ &vop_createvobject_desc, (vop_t *) union_createvobject },
{ &vop_destroyvobject_desc, (vop_t *) union_destroyvobject },
{ &vop_fsync_desc, (vop_t *) union_fsync },
{ &vop_getattr_desc, (vop_t *) union_getattr },
{ &vop_getvobject_desc, (vop_t *) union_getvobject },
{ &vop_inactive_desc, (vop_t *) union_inactive },
{ &vop_ioctl_desc, (vop_t *) union_ioctl },
{ &vop_islocked_desc, (vop_t *) vop_stdislocked },