- Add a new object flag "OBJ_NEEDSGIANT". We set this flag if the
underlying vnode requires Giant. - In vm_fault only acquire Giant if the underlying object has NEEDSGIANT set. - In vm_object_shadow inherit the NEEDSGIANT flag from the backing object.
This commit is contained in:
parent
d65736a1c0
commit
ed4fe4f4f5
@ -290,14 +290,17 @@ RetryFault:;
|
||||
*
|
||||
* XXX vnode_pager_lock() can block without releasing the map lock.
|
||||
*/
|
||||
if (!fs.map->system_map)
|
||||
if (fs.first_object->flags & OBJ_NEEDGIANT)
|
||||
mtx_lock(&Giant);
|
||||
VM_OBJECT_LOCK(fs.first_object);
|
||||
vm_object_reference_locked(fs.first_object);
|
||||
fs.vp = vnode_pager_lock(fs.first_object);
|
||||
KASSERT(fs.vp == NULL || !fs.map->system_map,
|
||||
("vm_fault: vnode-backed object mapped by system map"));
|
||||
if (debug_mpsafevm && !fs.map->system_map)
|
||||
KASSERT((fs.first_object->flags & OBJ_NEEDGIANT) == 0 ||
|
||||
!fs.map->system_map,
|
||||
("vm_fault: Object requiring giant mapped by system map"));
|
||||
if (fs.first_object->flags & OBJ_NEEDGIANT && debug_mpsafevm)
|
||||
mtx_unlock(&Giant);
|
||||
vm_object_pip_add(fs.first_object, 1);
|
||||
|
||||
@ -378,10 +381,12 @@ RetryFault:;
|
||||
}
|
||||
unlock_map(&fs);
|
||||
if (fs.vp != NULL) {
|
||||
mtx_lock(&Giant);
|
||||
int vfslck;
|
||||
|
||||
vfslck = VFS_LOCK_GIANT(fs.vp->v_mount);
|
||||
vput(fs.vp);
|
||||
mtx_unlock(&Giant);
|
||||
fs.vp = NULL;
|
||||
VFS_UNLOCK_GIANT(vfslck);
|
||||
}
|
||||
VM_OBJECT_LOCK(fs.object);
|
||||
if (fs.m == vm_page_lookup(fs.object,
|
||||
|
@ -1230,6 +1230,7 @@ vm_object_shadow(
|
||||
length = PQ_L2_SIZE / 3 + PQ_PRIME1;
|
||||
result->pg_color = (source->pg_color +
|
||||
length * source->generation) & PQ_L2_MASK;
|
||||
result->flags |= source->flags & OBJ_NEEDGIANT;
|
||||
VM_OBJECT_UNLOCK(source);
|
||||
next_index = (result->pg_color + PQ_L2_SIZE / 3 + PQ_PRIME1) &
|
||||
PQ_L2_MASK;
|
||||
|
@ -145,6 +145,7 @@ struct vm_object {
|
||||
#define OBJ_CLEANING 0x0200
|
||||
#define OBJ_ONEMAPPING 0x2000 /* One USE (a single, non-forked) mapping flag */
|
||||
#define OBJ_DISCONNECTWNT 0x4000 /* disconnect from vnode wanted */
|
||||
#define OBJ_NEEDGIANT 0x8000 /* object requires Giant */
|
||||
|
||||
#define IDX_TO_OFF(idx) (((vm_ooffset_t)(idx)) << PAGE_SHIFT)
|
||||
#define OFF_TO_IDX(off) ((vm_pindex_t)(((vm_ooffset_t)(off)) >> PAGE_SHIFT))
|
||||
|
@ -231,6 +231,8 @@ vnode_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
|
||||
object->un_pager.vnp.vnp_size = size;
|
||||
|
||||
object->handle = handle;
|
||||
if (VFS_NEEDSGIANT(vp->v_mount))
|
||||
vm_object_set_flag(object, OBJ_NEEDGIANT);
|
||||
vp->v_object = object;
|
||||
} else {
|
||||
object->ref_count++;
|
||||
@ -1188,6 +1190,7 @@ vnode_pager_lock(vm_object_t first_object)
|
||||
VM_OBJECT_UNLOCK(object);
|
||||
if (first_object != object)
|
||||
VM_OBJECT_UNLOCK(first_object);
|
||||
VFS_ASSERT_GIANT(vp->v_mount);
|
||||
if (vget(vp, LK_CANRECURSE | LK_INTERLOCK |
|
||||
LK_RETRY | LK_SHARED, curthread)) {
|
||||
VM_OBJECT_LOCK(first_object);
|
||||
|
Loading…
x
Reference in New Issue
Block a user