Don't use VOP_GETVOBJECT, use vp->v_object directly.
This commit is contained in:
parent
69816ea35e
commit
8516dd18e1
@ -563,7 +563,7 @@ __elfN(load_file)(struct proc *p, const char *file, u_long *addr,
|
||||
if (error == 0)
|
||||
nd->ni_vp->v_vflag |= VV_TEXT;
|
||||
|
||||
VOP_GETVOBJECT(nd->ni_vp, &imgp->object);
|
||||
imgp->object = nd->ni_vp->v_object;
|
||||
vm_object_reference(imgp->object);
|
||||
|
||||
VOP_UNLOCK(nd->ni_vp, 0, curthread); /* XXXKSE */
|
||||
|
@ -1998,8 +1998,7 @@ fget_write(struct thread *td, int fd, struct file **fpp)
|
||||
/*
|
||||
* Like fget() but loads the underlying vnode, or returns an error if
|
||||
* the descriptor does not represent a vnode. Note that pipes use vnodes
|
||||
* but never have VM objects (so VOP_GETVOBJECT() calls will return an
|
||||
* error). The returned vnode will be vref()d.
|
||||
* but never have VM objects. The returned vnode will be vref()d.
|
||||
*
|
||||
* XXX: what about the unused flags ?
|
||||
*/
|
||||
|
@ -367,7 +367,8 @@ do_execve(td, fname, argv, envv, mac_p)
|
||||
if (error)
|
||||
goto exec_fail_dealloc;
|
||||
|
||||
if (VOP_GETVOBJECT(imgp->vp, &imgp->object) == 0)
|
||||
imgp->object = imgp->vp->v_object;
|
||||
if (imgp->object != NULL)
|
||||
vm_object_reference(imgp->object);
|
||||
|
||||
/*
|
||||
@ -775,7 +776,7 @@ exec_map_first_page(imgp)
|
||||
if (imgp->firstpage != NULL)
|
||||
exec_unmap_first_page(imgp);
|
||||
|
||||
VOP_GETVOBJECT(imgp->vp, &object);
|
||||
object = imgp->vp->v_object;
|
||||
VM_OBJECT_LOCK(object);
|
||||
ma[0] = vm_page_grab(object, 0, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
|
||||
if ((ma[0]->valid & VM_PAGE_BITS_ALL) != VM_PAGE_BITS_ALL) {
|
||||
|
@ -1742,8 +1742,9 @@ do_sendfile(struct thread *td, struct sendfile_args *uap, int compat)
|
||||
if ((error = fgetvp_read(td, uap->fd, &vp)) != 0)
|
||||
goto done;
|
||||
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
|
||||
obj = vp->v_object;
|
||||
VOP_UNLOCK(vp, 0, td);
|
||||
if (vp->v_type != VREG || VOP_GETVOBJECT(vp, &obj) != 0) {
|
||||
if (obj == NULL) {
|
||||
error = EINVAL;
|
||||
goto done;
|
||||
}
|
||||
|
@ -2283,8 +2283,9 @@ inmem(struct vnode * vp, daddr_t blkno)
|
||||
return 1;
|
||||
if (vp->v_mount == NULL)
|
||||
return 0;
|
||||
if (VOP_GETVOBJECT(vp, &obj) != 0 || vp->v_object == NULL)
|
||||
return 0;
|
||||
obj = vp->v_object;
|
||||
if (obj == NULL)
|
||||
return (0);
|
||||
|
||||
size = PAGE_SIZE;
|
||||
if (size > vp->v_mount->mnt_stat.f_iosize)
|
||||
@ -2448,7 +2449,6 @@ getblk(struct vnode * vp, daddr_t blkno, int size, int slpflag, int slptimeo,
|
||||
struct bufobj *bo;
|
||||
int s;
|
||||
int error;
|
||||
struct vm_object *vmo;
|
||||
|
||||
CTR3(KTR_BUF, "getblk(%p, %ld, %d)", vp, (long)blkno, size);
|
||||
ASSERT_VOP_LOCKED(vp, "getblk");
|
||||
@ -2607,8 +2607,7 @@ getblk(struct vnode * vp, daddr_t blkno, int size, int slpflag, int slptimeo,
|
||||
|
||||
bsize = bo->bo_bsize;
|
||||
offset = blkno * bsize;
|
||||
vmio = (VOP_GETVOBJECT(vp, NULL) == 0) &&
|
||||
vp->v_object != NULL;
|
||||
vmio = vp->v_object != NULL;
|
||||
maxsize = vmio ? size + (offset & PAGE_MASK) : size;
|
||||
maxsize = imax(maxsize, bsize);
|
||||
|
||||
@ -2668,10 +2667,9 @@ getblk(struct vnode * vp, daddr_t blkno, int size, int slpflag, int slptimeo,
|
||||
printf("getblk: VMIO on vnode type %d\n",
|
||||
vp->v_type);
|
||||
#endif
|
||||
VOP_GETVOBJECT(vp, &vmo);
|
||||
KASSERT(vmo == bp->b_bufobj->bo_object,
|
||||
KASSERT(vp->v_object == bp->b_bufobj->bo_object,
|
||||
("ARGH! different b_bufobj->bo_object %p %p %p\n",
|
||||
bp, vmo, bp->b_bufobj->bo_object));
|
||||
bp, vp->v_object, bp->b_bufobj->bo_object));
|
||||
} else {
|
||||
bp->b_flags &= ~B_VMIO;
|
||||
KASSERT(bp->b_bufobj->bo_object == NULL,
|
||||
@ -3229,8 +3227,8 @@ bufdone(struct buf *bp)
|
||||
panic("biodone: zero vnode ref count");
|
||||
}
|
||||
|
||||
if (vp->v_object == NULL)
|
||||
panic("biodone: vnode is not setup for merged cache");
|
||||
KASSERT(vp->v_object != NULL,
|
||||
("biodone: vnode %p has no vm_object", vp));
|
||||
#endif
|
||||
|
||||
foff = bp->b_offset;
|
||||
|
@ -3166,7 +3166,6 @@ fsync(td, uap)
|
||||
struct vnode *vp;
|
||||
struct mount *mp;
|
||||
struct file *fp;
|
||||
vm_object_t obj;
|
||||
int vfslocked;
|
||||
int error;
|
||||
|
||||
@ -3177,10 +3176,10 @@ fsync(td, uap)
|
||||
if ((error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) != 0)
|
||||
goto drop;
|
||||
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
|
||||
if (VOP_GETVOBJECT(vp, &obj) == 0) {
|
||||
VM_OBJECT_LOCK(obj);
|
||||
vm_object_page_clean(obj, 0, 0, 0);
|
||||
VM_OBJECT_UNLOCK(obj);
|
||||
if (vp->v_object != NULL) {
|
||||
VM_OBJECT_LOCK(vp->v_object);
|
||||
vm_object_page_clean(vp->v_object, 0, 0, 0);
|
||||
VM_OBJECT_UNLOCK(vp->v_object);
|
||||
}
|
||||
error = VOP_FSYNC(vp, MNT_WAIT, td);
|
||||
if (error == 0 && vp->v_mount && (vp->v_mount->mnt_flag & MNT_SOFTDEP)
|
||||
|
@ -672,7 +672,8 @@ vtryrecycle(struct vnode *vp)
|
||||
/*
|
||||
* Don't recycle if we still have cached pages.
|
||||
*/
|
||||
if (VOP_GETVOBJECT(vp, &object) == 0) {
|
||||
object = vp->v_object;
|
||||
if (object != NULL) {
|
||||
VM_OBJECT_LOCK(object);
|
||||
if (object->resident_page_count ||
|
||||
object->ref_count) {
|
||||
@ -930,7 +931,6 @@ vinvalbuf(vp, flags, td, slpflag, slptimeo)
|
||||
int slpflag, slptimeo;
|
||||
{
|
||||
int error;
|
||||
vm_object_t object;
|
||||
struct bufobj *bo;
|
||||
|
||||
ASSERT_VOP_LOCKED(vp, "vinvalbuf");
|
||||
@ -981,10 +981,10 @@ vinvalbuf(vp, flags, td, slpflag, slptimeo)
|
||||
do {
|
||||
bufobj_wwait(bo, 0, 0);
|
||||
VI_UNLOCK(vp);
|
||||
if (VOP_GETVOBJECT(vp, &object) == 0) {
|
||||
VM_OBJECT_LOCK(object);
|
||||
vm_object_pip_wait(object, "vnvlbx");
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
if (vp->v_object != NULL) {
|
||||
VM_OBJECT_LOCK(vp->v_object);
|
||||
vm_object_pip_wait(vp->v_object, "vnvlbx");
|
||||
VM_OBJECT_UNLOCK(vp->v_object);
|
||||
}
|
||||
VI_LOCK(vp);
|
||||
} while (bo->bo_numoutput > 0);
|
||||
@ -993,11 +993,11 @@ vinvalbuf(vp, flags, td, slpflag, slptimeo)
|
||||
/*
|
||||
* Destroy the copy in the VM cache, too.
|
||||
*/
|
||||
if (VOP_GETVOBJECT(vp, &object) == 0) {
|
||||
VM_OBJECT_LOCK(object);
|
||||
vm_object_page_remove(object, 0, 0,
|
||||
if (vp->v_object != NULL) {
|
||||
VM_OBJECT_LOCK(vp->v_object);
|
||||
vm_object_page_remove(vp->v_object, 0, 0,
|
||||
(flags & V_SAVE) ? TRUE : FALSE);
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VM_OBJECT_UNLOCK(vp->v_object);
|
||||
}
|
||||
|
||||
#ifdef INVARIANTS
|
||||
@ -2825,7 +2825,8 @@ vfs_msync(struct mount *mp, int flags)
|
||||
continue;
|
||||
}
|
||||
|
||||
if (VOP_GETVOBJECT(vp, &obj) == 0) {
|
||||
obj = vp->v_object;
|
||||
if (obj != NULL) {
|
||||
VM_OBJECT_LOCK(obj);
|
||||
vm_object_page_clean(obj, 0, 0,
|
||||
flags == MNT_WAIT ?
|
||||
|
@ -3166,7 +3166,6 @@ fsync(td, uap)
|
||||
struct vnode *vp;
|
||||
struct mount *mp;
|
||||
struct file *fp;
|
||||
vm_object_t obj;
|
||||
int vfslocked;
|
||||
int error;
|
||||
|
||||
@ -3177,10 +3176,10 @@ fsync(td, uap)
|
||||
if ((error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) != 0)
|
||||
goto drop;
|
||||
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
|
||||
if (VOP_GETVOBJECT(vp, &obj) == 0) {
|
||||
VM_OBJECT_LOCK(obj);
|
||||
vm_object_page_clean(obj, 0, 0, 0);
|
||||
VM_OBJECT_UNLOCK(obj);
|
||||
if (vp->v_object != NULL) {
|
||||
VM_OBJECT_LOCK(vp->v_object);
|
||||
vm_object_page_clean(vp->v_object, 0, 0, 0);
|
||||
VM_OBJECT_UNLOCK(vp->v_object);
|
||||
}
|
||||
error = VOP_FSYNC(vp, MNT_WAIT, td);
|
||||
if (error == 0 && vp->v_mount && (vp->v_mount->mnt_flag & MNT_SOFTDEP)
|
||||
|
@ -126,12 +126,11 @@ ffs_rawread_sync(struct vnode *vp, struct thread *td)
|
||||
/* Attempt to msync mmap() regions to clean dirty mmap */
|
||||
VI_LOCK(vp);
|
||||
if ((vp->v_iflag & VI_OBJDIRTY) != 0) {
|
||||
struct vm_object *obj;
|
||||
VI_UNLOCK(vp);
|
||||
if (VOP_GETVOBJECT(vp, &obj) == 0) {
|
||||
VM_OBJECT_LOCK(obj);
|
||||
vm_object_page_clean(obj, 0, 0, OBJPC_SYNC);
|
||||
VM_OBJECT_UNLOCK(obj);
|
||||
if (vp->v_object != NULL) {
|
||||
VM_OBJECT_LOCK(vp->v_object);
|
||||
vm_object_page_clean(vp->v_object, 0, 0, OBJPC_SYNC);
|
||||
VM_OBJECT_UNLOCK(vp->v_object);
|
||||
}
|
||||
VI_LOCK(vp);
|
||||
}
|
||||
|
@ -1078,11 +1078,12 @@ vm_mmap_vnode(struct thread *td, vm_size_t objsize,
|
||||
return (error);
|
||||
}
|
||||
flags = *flagsp;
|
||||
obj = vp->v_object;
|
||||
if (vp->v_type == VREG) {
|
||||
/*
|
||||
* Get the proper underlying object
|
||||
*/
|
||||
if (VOP_GETVOBJECT(vp, &obj) != 0) {
|
||||
if (obj == NULL) {
|
||||
error = EINVAL;
|
||||
goto done;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user