- Acquire the vm_object's lock when performing vm_object_page_clean().

- Add a parameter to vm_pageout_flush() that tells vm_pageout_flush()
   whether its caller has locked the vm_object.  (This is a temporary
   measure to bootstrap vm_object locking.)
This commit is contained in:
Alan Cox 2003-04-24 04:31:25 +00:00
parent 1f7440d9f6
commit b6e48e0372
18 changed files with 45 additions and 14 deletions

View File

@ -2093,11 +2093,13 @@ mac_cred_mmapped_drop_perms_recurse(struct thread *td, struct ucred *cred,
*/
vm_object_reference(object);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
VM_OBJECT_LOCK(object);
vm_object_page_clean(object,
OFF_TO_IDX(offset),
OFF_TO_IDX(offset + vme->end - vme->start +
PAGE_MASK),
OBJPC_SYNC);
VM_OBJECT_UNLOCK(object);
VOP_UNLOCK(vp, 0, td);
vm_object_deallocate(object);
/*

View File

@ -2739,7 +2739,9 @@ fsync(td, uap)
}
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
if (VOP_GETVOBJECT(vp, &obj) == 0) {
VM_OBJECT_LOCK(obj);
vm_object_page_clean(obj, 0, 0, 0);
VM_OBJECT_UNLOCK(obj);
}
error = VOP_FSYNC(vp, fp->f_cred, MNT_WAIT, td);
if (error == 0 && vp->v_mount && (vp->v_mount->mnt_flag & MNT_SOFTDEP)

View File

@ -3206,9 +3206,11 @@ vfs_msync(struct mount *mp, int flags)
}
if (VOP_GETVOBJECT(vp, &obj) == 0) {
VM_OBJECT_LOCK(obj);
vm_object_page_clean(obj, 0, 0,
flags == MNT_WAIT ?
OBJPC_SYNC : OBJPC_NOSYNC);
VM_OBJECT_UNLOCK(obj);
}
vput(vp);
}

View File

@ -2739,7 +2739,9 @@ fsync(td, uap)
}
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
if (VOP_GETVOBJECT(vp, &obj) == 0) {
VM_OBJECT_LOCK(obj);
vm_object_page_clean(obj, 0, 0, 0);
VM_OBJECT_UNLOCK(obj);
}
error = VOP_FSYNC(vp, fp->f_cred, MNT_WAIT, td);
if (error == 0 && vp->v_mount && (vp->v_mount->mnt_flag & MNT_SOFTDEP)

View File

@ -3654,7 +3654,9 @@ nfsrv_commit(struct nfsrv_descript *nfsd, struct nfssvc_sock *slp,
*/
if (vp->v_object &&
(vp->v_object->flags & OBJ_MIGHTBEDIRTY)) {
VM_OBJECT_LOCK(vp->v_object);
vm_object_page_clean(vp->v_object, 0, 0, OBJPC_SYNC);
VM_OBJECT_UNLOCK(vp->v_object);
}
error = VOP_FSYNC(vp, cred, MNT_WAIT, td);
} else {
@ -3683,7 +3685,9 @@ nfsrv_commit(struct nfsrv_descript *nfsd, struct nfssvc_sock *slp,
if (vp->v_object &&
(vp->v_object->flags & OBJ_MIGHTBEDIRTY)) {
VM_OBJECT_LOCK(vp->v_object);
vm_object_page_clean(vp->v_object, off / PAGE_SIZE, (cnt + PAGE_MASK) / PAGE_SIZE, OBJPC_SYNC);
VM_OBJECT_UNLOCK(vp->v_object);
}
s = splbio();

View File

@ -2093,11 +2093,13 @@ mac_cred_mmapped_drop_perms_recurse(struct thread *td, struct ucred *cred,
*/
vm_object_reference(object);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
VM_OBJECT_LOCK(object);
vm_object_page_clean(object,
OFF_TO_IDX(offset),
OFF_TO_IDX(offset + vme->end - vme->start +
PAGE_MASK),
OBJPC_SYNC);
VM_OBJECT_UNLOCK(object);
VOP_UNLOCK(vp, 0, td);
vm_object_deallocate(object);
/*

View File

@ -2093,11 +2093,13 @@ mac_cred_mmapped_drop_perms_recurse(struct thread *td, struct ucred *cred,
*/
vm_object_reference(object);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
VM_OBJECT_LOCK(object);
vm_object_page_clean(object,
OFF_TO_IDX(offset),
OFF_TO_IDX(offset + vme->end - vme->start +
PAGE_MASK),
OBJPC_SYNC);
VM_OBJECT_UNLOCK(object);
VOP_UNLOCK(vp, 0, td);
vm_object_deallocate(object);
/*

View File

@ -2093,11 +2093,13 @@ mac_cred_mmapped_drop_perms_recurse(struct thread *td, struct ucred *cred,
*/
vm_object_reference(object);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
VM_OBJECT_LOCK(object);
vm_object_page_clean(object,
OFF_TO_IDX(offset),
OFF_TO_IDX(offset + vme->end - vme->start +
PAGE_MASK),
OBJPC_SYNC);
VM_OBJECT_UNLOCK(object);
VOP_UNLOCK(vp, 0, td);
vm_object_deallocate(object);
/*

View File

@ -2093,11 +2093,13 @@ mac_cred_mmapped_drop_perms_recurse(struct thread *td, struct ucred *cred,
*/
vm_object_reference(object);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
VM_OBJECT_LOCK(object);
vm_object_page_clean(object,
OFF_TO_IDX(offset),
OFF_TO_IDX(offset + vme->end - vme->start +
PAGE_MASK),
OBJPC_SYNC);
VM_OBJECT_UNLOCK(object);
VOP_UNLOCK(vp, 0, td);
vm_object_deallocate(object);
/*

View File

@ -2093,11 +2093,13 @@ mac_cred_mmapped_drop_perms_recurse(struct thread *td, struct ucred *cred,
*/
vm_object_reference(object);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
VM_OBJECT_LOCK(object);
vm_object_page_clean(object,
OFF_TO_IDX(offset),
OFF_TO_IDX(offset + vme->end - vme->start +
PAGE_MASK),
OBJPC_SYNC);
VM_OBJECT_UNLOCK(object);
VOP_UNLOCK(vp, 0, td);
vm_object_deallocate(object);
/*

View File

@ -2093,11 +2093,13 @@ mac_cred_mmapped_drop_perms_recurse(struct thread *td, struct ucred *cred,
*/
vm_object_reference(object);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
VM_OBJECT_LOCK(object);
vm_object_page_clean(object,
OFF_TO_IDX(offset),
OFF_TO_IDX(offset + vme->end - vme->start +
PAGE_MASK),
OBJPC_SYNC);
VM_OBJECT_UNLOCK(object);
VOP_UNLOCK(vp, 0, td);
vm_object_deallocate(object);
/*

View File

@ -2093,11 +2093,13 @@ mac_cred_mmapped_drop_perms_recurse(struct thread *td, struct ucred *cred,
*/
vm_object_reference(object);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
VM_OBJECT_LOCK(object);
vm_object_page_clean(object,
OFF_TO_IDX(offset),
OFF_TO_IDX(offset + vme->end - vme->start +
PAGE_MASK),
OBJPC_SYNC);
VM_OBJECT_UNLOCK(object);
VOP_UNLOCK(vp, 0, td);
vm_object_deallocate(object);
/*

View File

@ -2093,11 +2093,13 @@ mac_cred_mmapped_drop_perms_recurse(struct thread *td, struct ucred *cred,
*/
vm_object_reference(object);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
VM_OBJECT_LOCK(object);
vm_object_page_clean(object,
OFF_TO_IDX(offset),
OFF_TO_IDX(offset + vme->end - vme->start +
PAGE_MASK),
OBJPC_SYNC);
VM_OBJECT_UNLOCK(object);
VOP_UNLOCK(vp, 0, td);
vm_object_deallocate(object);
/*

View File

@ -105,14 +105,16 @@ vm_contig_launder(int queue)
vm_page_unlock_queues();
vn_lock(object->handle,
LK_EXCLUSIVE | LK_RETRY, curthread);
VM_OBJECT_LOCK(object);
vm_object_page_clean(object, 0, 0, OBJPC_SYNC);
VM_OBJECT_UNLOCK(object);
VOP_UNLOCK(object->handle, 0, curthread);
vm_page_lock_queues();
return (TRUE);
} else if (object->type == OBJT_SWAP ||
object->type == OBJT_DEFAULT) {
m_tmp = m;
vm_pageout_flush(&m_tmp, 1, 0);
vm_pageout_flush(&m_tmp, 1, 0, FALSE);
return (TRUE);
}
} else if (m->busy == 0 && m->hold_count == 0)

View File

@ -1976,10 +1976,12 @@ vm_map_clean(
vn_lock(object->handle, LK_EXCLUSIVE | LK_RETRY, curthread);
flags = (syncio || invalidate) ? OBJPC_SYNC : 0;
flags |= invalidate ? OBJPC_INVAL : 0;
VM_OBJECT_LOCK(object);
vm_object_page_clean(object,
OFF_TO_IDX(offset),
OFF_TO_IDX(offset + size + PAGE_MASK),
flags);
VM_OBJECT_UNLOCK(object);
VOP_UNLOCK(object->handle, 0, curthread);
vm_object_deallocate(object);
}

View File

@ -557,7 +557,9 @@ vm_object_terminate(vm_object_t object)
/*
* Clean pages and flush buffers.
*/
VM_OBJECT_LOCK(object);
vm_object_page_clean(object, 0, 0, OBJPC_SYNC);
VM_OBJECT_UNLOCK(object);
vp = (struct vnode *) object->handle;
vinvalbuf(vp, V_SAVE, NOCRED, NULL, 0, 0);
@ -638,7 +640,7 @@ vm_object_page_clean(vm_object_t object, vm_pindex_t start, vm_pindex_t end, int
int curgeneration;
GIANT_REQUIRED;
VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
if (object->type != OBJT_VNODE ||
(object->flags & OBJ_MIGHTBEDIRTY) == 0)
return;
@ -721,9 +723,7 @@ vm_object_page_clean(vm_object_t object, vm_pindex_t start, vm_pindex_t end, int
*/
if (tscan >= tend && (tstart || tend < object->size)) {
vm_page_unlock_queues();
VM_OBJECT_LOCK(object);
vm_object_clear_flag(object, OBJ_CLEANING);
VM_OBJECT_UNLOCK(object);
return;
}
pagerflags &= ~VM_PAGER_IGNORE_CLEANCHK;
@ -749,9 +749,7 @@ vm_object_page_clean(vm_object_t object, vm_pindex_t start, vm_pindex_t end, int
if (clearobjflags && (tstart == 0) && (tend == object->size)) {
struct vnode *vp;
VM_OBJECT_LOCK(object);
vm_object_clear_flag(object, OBJ_WRITEABLE|OBJ_MIGHTBEDIRTY);
VM_OBJECT_UNLOCK(object);
if (object->type == OBJT_VNODE &&
(vp = (struct vnode *)object->handle) != NULL) {
VI_LOCK(vp);
@ -817,9 +815,7 @@ vm_object_page_clean(vm_object_t object, vm_pindex_t start, vm_pindex_t end, int
VOP_FSYNC(vp, NULL, (pagerflags & VM_PAGER_PUT_SYNC)?MNT_WAIT:0, curproc);
#endif
VM_OBJECT_LOCK(object);
vm_object_clear_flag(object, OBJ_CLEANING);
VM_OBJECT_UNLOCK(object);
return;
}
@ -917,7 +913,7 @@ vm_object_page_collect_flush(vm_object_t object, vm_page_t p, int curgeneration,
runlen = maxb + maxf + 1;
splx(s);
vm_pageout_flush(ma, runlen, pagerflags);
vm_pageout_flush(ma, runlen, pagerflags, TRUE);
for (i = 0; i < runlen; i++) {
if (ma[i]->valid & ma[i]->dirty) {
pmap_page_protect(ma[i], VM_PROT_READ);

View File

@ -348,7 +348,7 @@ vm_pageout_clean(m)
/*
* we allow reads during pageouts...
*/
return vm_pageout_flush(&mc[page_base], pageout_count, 0);
return vm_pageout_flush(&mc[page_base], pageout_count, 0, FALSE);
}
/*
@ -361,10 +361,11 @@ vm_pageout_clean(m)
* the ordering.
*/
int
vm_pageout_flush(mc, count, flags)
vm_pageout_flush(mc, count, flags, is_object_locked)
vm_page_t *mc;
int count;
int flags;
int is_object_locked;
{
vm_object_t object;
int pageout_status[count];
@ -389,7 +390,8 @@ vm_pageout_flush(mc, count, flags)
}
object = mc[0]->object;
vm_page_unlock_queues();
VM_OBJECT_LOCK(object);
if (!is_object_locked)
VM_OBJECT_LOCK(object);
vm_object_pip_add(object, count);
VM_OBJECT_UNLOCK(object);
@ -442,7 +444,8 @@ vm_pageout_flush(mc, count, flags)
pmap_page_protect(mt, VM_PROT_READ);
}
}
VM_OBJECT_UNLOCK(object);
if (!is_object_locked)
VM_OBJECT_UNLOCK(object);
return numpagedout;
}

View File

@ -113,6 +113,6 @@ int swap_pager_isswapped(vm_object_t, int);
#ifdef _KERNEL
void vm_pageout_page(vm_page_t, vm_object_t);
void vm_pageout_cluster(vm_page_t, vm_object_t);
int vm_pageout_flush(vm_page_t *, int, int);
int vm_pageout_flush(vm_page_t *, int, int, int is_object_locked);
#endif
#endif /* _VM_VM_PAGEOUT_H_ */