Eliminate a deadlock when creating snapshots. Blocking vn_start_write() must
be called without any vnode locks held. Remove calls to vn_start_write() and vn_finished_write() in vnode_pager_putpages() and add these calls before the vnode lock is obtained to most of the callers that don't already have them.
This commit is contained in:
parent
29fc266ddd
commit
774f51ad2c
@ -2801,6 +2801,7 @@ vfs_msync(struct mount *mp, int flags)
|
||||
struct vnode *vp, *mvp;
|
||||
struct vm_object *obj;
|
||||
|
||||
(void) vn_start_write(NULL, &mp, V_WAIT);
|
||||
MNT_ILOCK(mp);
|
||||
MNT_VNODE_FOREACH(vp, mp, mvp) {
|
||||
VI_LOCK(vp);
|
||||
@ -2831,6 +2832,7 @@ vfs_msync(struct mount *mp, int flags)
|
||||
VI_UNLOCK(vp);
|
||||
}
|
||||
MNT_IUNLOCK(mp);
|
||||
vn_finished_write(mp);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -330,6 +330,7 @@ mac_cred_mmapped_drop_perms_recurse(struct thread *td, struct ucred *cred,
|
||||
vm_object_t backing_object, object;
|
||||
vm_ooffset_t offset;
|
||||
struct vnode *vp;
|
||||
struct mount *mp;
|
||||
|
||||
if (!mac_mmap_revocation)
|
||||
return;
|
||||
@ -407,6 +408,7 @@ mac_cred_mmapped_drop_perms_recurse(struct thread *td, struct ucred *cred,
|
||||
* copy-on-write.
|
||||
*/
|
||||
vm_object_reference(object);
|
||||
(void) vn_start_write(vp, &mp, V_WAIT);
|
||||
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
|
||||
VM_OBJECT_LOCK(object);
|
||||
vm_object_page_clean(object,
|
||||
@ -416,6 +418,7 @@ mac_cred_mmapped_drop_perms_recurse(struct thread *td, struct ucred *cred,
|
||||
OBJPC_SYNC);
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VOP_UNLOCK(vp, 0, td);
|
||||
vn_finished_write(mp);
|
||||
vm_object_deallocate(object);
|
||||
/*
|
||||
* Why bother if there's no read permissions
|
||||
|
@ -101,6 +101,7 @@ ffs_rawread_sync(struct vnode *vp, struct thread *td)
|
||||
int error;
|
||||
int upgraded;
|
||||
struct bufobj *bo;
|
||||
struct mount *mp;
|
||||
|
||||
/* Check for dirty mmap, pending writes and dirty buffers */
|
||||
spl = splbio();
|
||||
@ -112,7 +113,15 @@ ffs_rawread_sync(struct vnode *vp, struct thread *td)
|
||||
splx(spl);
|
||||
VI_UNLOCK(vp);
|
||||
|
||||
if (VOP_ISLOCKED(vp, td) != LK_EXCLUSIVE) {
|
||||
if (vn_start_write(vp, &mp, V_NOWAIT) != 0) {
|
||||
if (VOP_ISLOCKED(vp, td) != LK_EXCLUSIVE)
|
||||
upgraded = 1;
|
||||
else
|
||||
upgraded = 0;
|
||||
VOP_UNLOCK(vp, 0, td);
|
||||
(void) vn_start_write(vp, &mp, V_WAIT);
|
||||
VOP_LOCK(vp, LK_EXCLUSIVE, td);
|
||||
} else if (VOP_ISLOCKED(vp, td) != LK_EXCLUSIVE) {
|
||||
upgraded = 1;
|
||||
/* Upgrade to exclusive lock, this might block */
|
||||
VOP_LOCK(vp, LK_UPGRADE, td);
|
||||
@ -161,6 +170,7 @@ ffs_rawread_sync(struct vnode *vp, struct thread *td)
|
||||
VI_UNLOCK(vp);
|
||||
if (upgraded != 0)
|
||||
VOP_LOCK(vp, LK_DOWNGRADE, td);
|
||||
vn_finished_write(mp);
|
||||
} else {
|
||||
splx(spl);
|
||||
VI_UNLOCK(vp);
|
||||
|
@ -91,6 +91,7 @@ vm_contig_launder_page(vm_page_t m)
|
||||
vm_object_t object;
|
||||
vm_page_t m_tmp;
|
||||
struct vnode *vp;
|
||||
struct mount *mp;
|
||||
|
||||
object = m->object;
|
||||
if (!VM_OBJECT_TRYLOCK(object))
|
||||
@ -109,12 +110,14 @@ vm_contig_launder_page(vm_page_t m)
|
||||
vp = object->handle;
|
||||
vm_object_reference_locked(object);
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
(void) vn_start_write(vp, &mp, V_WAIT);
|
||||
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, curthread);
|
||||
VM_OBJECT_LOCK(object);
|
||||
vm_object_page_clean(object, 0, 0, OBJPC_SYNC);
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VOP_UNLOCK(vp, 0, curthread);
|
||||
vm_object_deallocate(object);
|
||||
vn_finished_write(mp);
|
||||
vm_page_lock_queues();
|
||||
return (0);
|
||||
} else if (object->type == OBJT_SWAP ||
|
||||
|
@ -993,6 +993,7 @@ vm_object_sync(vm_object_t object, vm_ooffset_t offset, vm_size_t size,
|
||||
{
|
||||
vm_object_t backing_object;
|
||||
struct vnode *vp;
|
||||
struct mount *mp;
|
||||
int flags;
|
||||
|
||||
if (object == NULL)
|
||||
@ -1023,6 +1024,7 @@ vm_object_sync(vm_object_t object, vm_ooffset_t offset, vm_size_t size,
|
||||
int vfslocked;
|
||||
vp = object->handle;
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
(void) vn_start_write(vp, &mp, V_WAIT);
|
||||
vfslocked = VFS_LOCK_GIANT(vp->v_mount);
|
||||
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, curthread);
|
||||
flags = (syncio || invalidate) ? OBJPC_SYNC : 0;
|
||||
@ -1035,6 +1037,7 @@ vm_object_sync(vm_object_t object, vm_ooffset_t offset, vm_size_t size,
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
VOP_UNLOCK(vp, 0, curthread);
|
||||
VFS_UNLOCK_GIANT(vfslocked);
|
||||
vn_finished_write(mp);
|
||||
VM_OBJECT_LOCK(object);
|
||||
}
|
||||
if ((object->type == OBJT_VNODE ||
|
||||
|
@ -1017,11 +1017,9 @@ vnode_pager_putpages(object, m, count, sync, rtvals)
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
if (vp->v_type != VREG)
|
||||
mp = NULL;
|
||||
(void)vn_start_write(vp, &mp, V_WAIT);
|
||||
rtval = VOP_PUTPAGES(vp, m, bytes, sync, rtvals, 0);
|
||||
KASSERT(rtval != EOPNOTSUPP,
|
||||
("vnode_pager: stale FS putpages\n"));
|
||||
vn_finished_write(mp);
|
||||
VM_OBJECT_LOCK(object);
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user