- Remove GIANT_REQUIRED where giant is no longer required.
- Use VFS_LOCK_GIANT() rather than directly acquiring giant in places where giant is only held because vfs requires it. Sponsored By: Isilon Systems, Inc.
This commit is contained in:
parent
e9f3e3f8ca
commit
ae51ff1127
@ -97,6 +97,8 @@ __FBSDID("$FreeBSD$");
|
|||||||
#include <vm/vnode_pager.h>
|
#include <vm/vnode_pager.h>
|
||||||
#include <vm/vm_extern.h>
|
#include <vm/vm_extern.h>
|
||||||
|
|
||||||
|
#include <sys/mount.h> /* XXX Temporary for VFS_LOCK_GIANT() */
|
||||||
|
|
||||||
#define PFBAK 4
|
#define PFBAK 4
|
||||||
#define PFFOR 4
|
#define PFFOR 4
|
||||||
#define PAGEORDER_SIZE (PFBAK+PFFOR)
|
#define PAGEORDER_SIZE (PFBAK+PFFOR)
|
||||||
@ -165,10 +167,12 @@ unlock_and_deallocate(struct faultstate *fs)
|
|||||||
vm_object_deallocate(fs->first_object);
|
vm_object_deallocate(fs->first_object);
|
||||||
unlock_map(fs);
|
unlock_map(fs);
|
||||||
if (fs->vp != NULL) {
|
if (fs->vp != NULL) {
|
||||||
mtx_lock(&Giant);
|
int vfslocked;
|
||||||
|
|
||||||
|
vfslocked = VFS_LOCK_GIANT(fs->vp->v_mount);
|
||||||
vput(fs->vp);
|
vput(fs->vp);
|
||||||
mtx_unlock(&Giant);
|
|
||||||
fs->vp = NULL;
|
fs->vp = NULL;
|
||||||
|
VFS_UNLOCK_GIANT(vfslocked);
|
||||||
}
|
}
|
||||||
if (!fs->map->system_map)
|
if (!fs->map->system_map)
|
||||||
VM_UNLOCK_GIANT();
|
VM_UNLOCK_GIANT();
|
||||||
|
@ -1067,11 +1067,14 @@ vm_mmap_vnode(struct thread *td, vm_size_t objsize,
|
|||||||
struct vattr va;
|
struct vattr va;
|
||||||
void *handle;
|
void *handle;
|
||||||
vm_object_t obj;
|
vm_object_t obj;
|
||||||
|
struct mount *mp;
|
||||||
int error, flags, type;
|
int error, flags, type;
|
||||||
|
int vfslocked;
|
||||||
|
|
||||||
mtx_lock(&Giant);
|
mp = vp->v_mount;
|
||||||
|
vfslocked = VFS_LOCK_GIANT(mp);
|
||||||
if ((error = vget(vp, LK_EXCLUSIVE, td)) != 0) {
|
if ((error = vget(vp, LK_EXCLUSIVE, td)) != 0) {
|
||||||
mtx_unlock(&Giant);
|
VFS_UNLOCK_GIANT(vfslocked);
|
||||||
return (error);
|
return (error);
|
||||||
}
|
}
|
||||||
flags = *flagsp;
|
flags = *flagsp;
|
||||||
@ -1157,7 +1160,7 @@ vm_mmap_vnode(struct thread *td, vm_size_t objsize,
|
|||||||
*flagsp = flags;
|
*flagsp = flags;
|
||||||
done:
|
done:
|
||||||
vput(vp);
|
vput(vp);
|
||||||
mtx_unlock(&Giant);
|
VFS_UNLOCK_GIANT(vfslocked);
|
||||||
return (error);
|
return (error);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -393,7 +393,7 @@ vm_object_vndeallocate(vm_object_t object)
|
|||||||
{
|
{
|
||||||
struct vnode *vp = (struct vnode *) object->handle;
|
struct vnode *vp = (struct vnode *) object->handle;
|
||||||
|
|
||||||
GIANT_REQUIRED;
|
VFS_ASSERT_GIANT(vp->v_mount);
|
||||||
VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
|
VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
|
||||||
KASSERT(object->type == OBJT_VNODE,
|
KASSERT(object->type == OBJT_VNODE,
|
||||||
("vm_object_vndeallocate: not a vnode object"));
|
("vm_object_vndeallocate: not a vnode object"));
|
||||||
@ -434,18 +434,22 @@ vm_object_deallocate(vm_object_t object)
|
|||||||
vm_object_t temp;
|
vm_object_t temp;
|
||||||
|
|
||||||
while (object != NULL) {
|
while (object != NULL) {
|
||||||
|
int vfslocked;
|
||||||
/*
|
/*
|
||||||
* In general, the object should be locked when working with
|
* In general, the object should be locked when working with
|
||||||
* its type. In this case, in order to maintain proper lock
|
* its type. In this case, in order to maintain proper lock
|
||||||
* ordering, an exception is possible because a vnode-backed
|
* ordering, an exception is possible because a vnode-backed
|
||||||
* object never changes its type.
|
* object never changes its type.
|
||||||
*/
|
*/
|
||||||
if (object->type == OBJT_VNODE)
|
vfslocked = 0;
|
||||||
mtx_lock(&Giant);
|
if (object->type == OBJT_VNODE) {
|
||||||
|
struct vnode *vp = (struct vnode *) object->handle;
|
||||||
|
vfslocked = VFS_LOCK_GIANT(vp->v_mount);
|
||||||
|
}
|
||||||
VM_OBJECT_LOCK(object);
|
VM_OBJECT_LOCK(object);
|
||||||
if (object->type == OBJT_VNODE) {
|
if (object->type == OBJT_VNODE) {
|
||||||
vm_object_vndeallocate(object);
|
vm_object_vndeallocate(object);
|
||||||
mtx_unlock(&Giant);
|
VFS_UNLOCK_GIANT(vfslocked);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -671,7 +675,6 @@ vm_object_page_clean(vm_object_t object, vm_pindex_t start, vm_pindex_t end, int
|
|||||||
int pagerflags;
|
int pagerflags;
|
||||||
int curgeneration;
|
int curgeneration;
|
||||||
|
|
||||||
GIANT_REQUIRED;
|
|
||||||
VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
|
VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
|
||||||
if (object->type != OBJT_VNODE ||
|
if (object->type != OBJT_VNODE ||
|
||||||
(object->flags & OBJ_MIGHTBEDIRTY) == 0)
|
(object->flags & OBJ_MIGHTBEDIRTY) == 0)
|
||||||
@ -1000,9 +1003,10 @@ vm_object_sync(vm_object_t object, vm_ooffset_t offset, vm_size_t size,
|
|||||||
*/
|
*/
|
||||||
if (object->type == OBJT_VNODE &&
|
if (object->type == OBJT_VNODE &&
|
||||||
(object->flags & OBJ_MIGHTBEDIRTY) != 0) {
|
(object->flags & OBJ_MIGHTBEDIRTY) != 0) {
|
||||||
|
int vfslocked;
|
||||||
vp = object->handle;
|
vp = object->handle;
|
||||||
VM_OBJECT_UNLOCK(object);
|
VM_OBJECT_UNLOCK(object);
|
||||||
mtx_lock(&Giant);
|
vfslocked = VFS_LOCK_GIANT(vp->v_mount);
|
||||||
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, curthread);
|
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, curthread);
|
||||||
flags = (syncio || invalidate) ? OBJPC_SYNC : 0;
|
flags = (syncio || invalidate) ? OBJPC_SYNC : 0;
|
||||||
flags |= invalidate ? OBJPC_INVAL : 0;
|
flags |= invalidate ? OBJPC_INVAL : 0;
|
||||||
@ -1013,7 +1017,7 @@ vm_object_sync(vm_object_t object, vm_ooffset_t offset, vm_size_t size,
|
|||||||
flags);
|
flags);
|
||||||
VM_OBJECT_UNLOCK(object);
|
VM_OBJECT_UNLOCK(object);
|
||||||
VOP_UNLOCK(vp, 0, curthread);
|
VOP_UNLOCK(vp, 0, curthread);
|
||||||
mtx_unlock(&Giant);
|
VFS_UNLOCK_GIANT(vfslocked);
|
||||||
VM_OBJECT_LOCK(object);
|
VM_OBJECT_LOCK(object);
|
||||||
}
|
}
|
||||||
if ((object->type == OBJT_VNODE ||
|
if ((object->type == OBJT_VNODE ||
|
||||||
|
@ -215,6 +215,7 @@ vnode_pager_haspage(object, pindex, before, after)
|
|||||||
int poff;
|
int poff;
|
||||||
int bsize;
|
int bsize;
|
||||||
int pagesperblock, blocksperpage;
|
int pagesperblock, blocksperpage;
|
||||||
|
int vfslocked;
|
||||||
|
|
||||||
VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
|
VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
|
||||||
/*
|
/*
|
||||||
@ -248,9 +249,9 @@ vnode_pager_haspage(object, pindex, before, after)
|
|||||||
reqblock = pindex * blocksperpage;
|
reqblock = pindex * blocksperpage;
|
||||||
}
|
}
|
||||||
VM_OBJECT_UNLOCK(object);
|
VM_OBJECT_UNLOCK(object);
|
||||||
mtx_lock(&Giant);
|
vfslocked = VFS_LOCK_GIANT(vp->v_mount);
|
||||||
err = VOP_BMAP(vp, reqblock, NULL, &bn, after, before);
|
err = VOP_BMAP(vp, reqblock, NULL, &bn, after, before);
|
||||||
mtx_unlock(&Giant);
|
VFS_UNLOCK_GIANT(vfslocked);
|
||||||
VM_OBJECT_LOCK(object);
|
VM_OBJECT_LOCK(object);
|
||||||
if (err)
|
if (err)
|
||||||
return TRUE;
|
return TRUE;
|
||||||
@ -397,7 +398,6 @@ vnode_pager_addr(vp, address, run)
|
|||||||
daddr_t vblock;
|
daddr_t vblock;
|
||||||
int voffset;
|
int voffset;
|
||||||
|
|
||||||
GIANT_REQUIRED;
|
|
||||||
if (address < 0)
|
if (address < 0)
|
||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
@ -441,8 +441,6 @@ vnode_pager_input_smlfs(object, m)
|
|||||||
vm_offset_t bsize;
|
vm_offset_t bsize;
|
||||||
int error = 0;
|
int error = 0;
|
||||||
|
|
||||||
GIANT_REQUIRED;
|
|
||||||
|
|
||||||
vp = object->handle;
|
vp = object->handle;
|
||||||
if (vp->v_mount == NULL)
|
if (vp->v_mount == NULL)
|
||||||
return VM_PAGER_BAD;
|
return VM_PAGER_BAD;
|
||||||
@ -619,14 +617,15 @@ vnode_pager_getpages(object, m, count, reqpage)
|
|||||||
int rtval;
|
int rtval;
|
||||||
struct vnode *vp;
|
struct vnode *vp;
|
||||||
int bytes = count * PAGE_SIZE;
|
int bytes = count * PAGE_SIZE;
|
||||||
|
int vfslocked;
|
||||||
|
|
||||||
vp = object->handle;
|
vp = object->handle;
|
||||||
VM_OBJECT_UNLOCK(object);
|
VM_OBJECT_UNLOCK(object);
|
||||||
mtx_lock(&Giant);
|
vfslocked = VFS_LOCK_GIANT(vp->v_mount);
|
||||||
rtval = VOP_GETPAGES(vp, m, bytes, reqpage, 0);
|
rtval = VOP_GETPAGES(vp, m, bytes, reqpage, 0);
|
||||||
KASSERT(rtval != EOPNOTSUPP,
|
KASSERT(rtval != EOPNOTSUPP,
|
||||||
("vnode_pager: FS getpages not implemented\n"));
|
("vnode_pager: FS getpages not implemented\n"));
|
||||||
mtx_unlock(&Giant);
|
VFS_UNLOCK_GIANT(vfslocked);
|
||||||
VM_OBJECT_LOCK(object);
|
VM_OBJECT_LOCK(object);
|
||||||
return rtval;
|
return rtval;
|
||||||
}
|
}
|
||||||
@ -653,7 +652,6 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage)
|
|||||||
int count;
|
int count;
|
||||||
int error = 0;
|
int error = 0;
|
||||||
|
|
||||||
GIANT_REQUIRED;
|
|
||||||
object = vp->v_object;
|
object = vp->v_object;
|
||||||
count = bytecount / PAGE_SIZE;
|
count = bytecount / PAGE_SIZE;
|
||||||
|
|
||||||
@ -946,7 +944,6 @@ vnode_pager_putpages(object, m, count, sync, rtvals)
|
|||||||
struct mount *mp;
|
struct mount *mp;
|
||||||
int bytes = count * PAGE_SIZE;
|
int bytes = count * PAGE_SIZE;
|
||||||
|
|
||||||
GIANT_REQUIRED;
|
|
||||||
/*
|
/*
|
||||||
* Force synchronous operation if we are extremely low on memory
|
* Force synchronous operation if we are extremely low on memory
|
||||||
* to prevent a low-memory deadlock. VOP operations often need to
|
* to prevent a low-memory deadlock. VOP operations often need to
|
||||||
@ -1006,7 +1003,6 @@ vnode_pager_generic_putpages(vp, m, bytecount, flags, rtvals)
|
|||||||
int error;
|
int error;
|
||||||
int ioflags;
|
int ioflags;
|
||||||
|
|
||||||
GIANT_REQUIRED;
|
|
||||||
object = vp->v_object;
|
object = vp->v_object;
|
||||||
count = bytecount / PAGE_SIZE;
|
count = bytecount / PAGE_SIZE;
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user