VI_OBJDIRTY vnode flag mirrors the state of OBJ_MIGHTBEDIRTY vm object
flag. Besides providing the redundand information, need to update both vnode and object flags causes more acquisition of vnode interlock. OBJ_MIGHTBEDIRTY is only checked for vnode-backed vm objects. Remove VI_OBJDIRTY and make sure that OBJ_MIGHTBEDIRTY is set only for vnode-backed vm objects. Suggested and reviewed by: alc Tested by: pho MFC after: 3 weeks
This commit is contained in:
parent
4b6831a036
commit
b79e14054c
@ -2689,14 +2689,12 @@ vn_printf(struct vnode *vp, const char *fmt, ...)
|
||||
strlcat(buf, "|VI_DOOMED", sizeof(buf));
|
||||
if (vp->v_iflag & VI_FREE)
|
||||
strlcat(buf, "|VI_FREE", sizeof(buf));
|
||||
if (vp->v_iflag & VI_OBJDIRTY)
|
||||
strlcat(buf, "|VI_OBJDIRTY", sizeof(buf));
|
||||
if (vp->v_iflag & VI_DOINGINACT)
|
||||
strlcat(buf, "|VI_DOINGINACT", sizeof(buf));
|
||||
if (vp->v_iflag & VI_OWEINACT)
|
||||
strlcat(buf, "|VI_OWEINACT", sizeof(buf));
|
||||
flags = vp->v_iflag & ~(VI_MOUNT | VI_AGE | VI_DOOMED | VI_FREE |
|
||||
VI_OBJDIRTY | VI_DOINGINACT | VI_OWEINACT);
|
||||
VI_DOINGINACT | VI_OWEINACT);
|
||||
if (flags != 0) {
|
||||
snprintf(buf2, sizeof(buf2), "|VI(0x%lx)", flags);
|
||||
strlcat(buf, buf2, sizeof(buf));
|
||||
@ -3190,7 +3188,8 @@ vfs_msync(struct mount *mp, int flags)
|
||||
MNT_ILOCK(mp);
|
||||
MNT_VNODE_FOREACH(vp, mp, mvp) {
|
||||
VI_LOCK(vp);
|
||||
if ((vp->v_iflag & VI_OBJDIRTY) &&
|
||||
obj = vp->v_object;
|
||||
if (obj != NULL && (obj->flags & OBJ_MIGHTBEDIRTY) != 0 &&
|
||||
(flags == MNT_WAIT || VOP_ISLOCKED(vp) == 0)) {
|
||||
MNT_IUNLOCK(mp);
|
||||
if (!vget(vp,
|
||||
|
@ -237,7 +237,6 @@ struct xvnode {
|
||||
#define VI_AGE 0x0040 /* Insert vnode at head of free list */
|
||||
#define VI_DOOMED 0x0080 /* This vnode is being recycled */
|
||||
#define VI_FREE 0x0100 /* This vnode is on the freelist */
|
||||
#define VI_OBJDIRTY 0x0400 /* object might be dirty */
|
||||
#define VI_DOINGINACT 0x0800 /* VOP_INACTIVE is in progress */
|
||||
#define VI_OWEINACT 0x1000 /* Need to call inactive */
|
||||
|
||||
|
@ -101,6 +101,7 @@ ffs_rawread_sync(struct vnode *vp)
|
||||
int upgraded;
|
||||
struct bufobj *bo;
|
||||
struct mount *mp;
|
||||
vm_object_t obj;
|
||||
|
||||
/* Check for dirty mmap, pending writes and dirty buffers */
|
||||
bo = &vp->v_bufobj;
|
||||
@ -108,7 +109,8 @@ ffs_rawread_sync(struct vnode *vp)
|
||||
VI_LOCK(vp);
|
||||
if (bo->bo_numoutput > 0 ||
|
||||
bo->bo_dirty.bv_cnt > 0 ||
|
||||
(vp->v_iflag & VI_OBJDIRTY) != 0) {
|
||||
((obj = vp->v_object) != NULL &&
|
||||
(obj->flags & OBJ_MIGHTBEDIRTY) != 0)) {
|
||||
VI_UNLOCK(vp);
|
||||
BO_UNLOCK(bo);
|
||||
|
||||
@ -138,13 +140,12 @@ ffs_rawread_sync(struct vnode *vp)
|
||||
return (EIO);
|
||||
}
|
||||
/* Attempt to msync mmap() regions to clean dirty mmap */
|
||||
if ((vp->v_iflag & VI_OBJDIRTY) != 0) {
|
||||
if ((obj = vp->v_object) != NULL &&
|
||||
(obj->flags & OBJ_MIGHTBEDIRTY) != 0) {
|
||||
VI_UNLOCK(vp);
|
||||
if (vp->v_object != NULL) {
|
||||
VM_OBJECT_LOCK(vp->v_object);
|
||||
vm_object_page_clean(vp->v_object, 0, 0, OBJPC_SYNC);
|
||||
VM_OBJECT_UNLOCK(vp->v_object);
|
||||
}
|
||||
VM_OBJECT_LOCK(obj);
|
||||
vm_object_page_clean(obj, 0, 0, OBJPC_SYNC);
|
||||
VM_OBJECT_UNLOCK(obj);
|
||||
} else
|
||||
VI_UNLOCK(vp);
|
||||
|
||||
|
@ -773,9 +773,9 @@ vm_object_page_clean(vm_object_t object, vm_pindex_t start, vm_pindex_t end, int
|
||||
int curgeneration;
|
||||
|
||||
VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
|
||||
if (object->type != OBJT_VNODE ||
|
||||
(object->flags & OBJ_MIGHTBEDIRTY) == 0)
|
||||
if ((object->flags & OBJ_MIGHTBEDIRTY) == 0)
|
||||
return;
|
||||
KASSERT(object->type == OBJT_VNODE, ("Not a vnode object"));
|
||||
|
||||
pagerflags = (flags & (OBJPC_SYNC | OBJPC_INVAL)) ? VM_PAGER_PUT_SYNC : VM_PAGER_CLUSTER_OK;
|
||||
pagerflags |= (flags & OBJPC_INVAL) ? VM_PAGER_PUT_INVAL : 0;
|
||||
@ -875,18 +875,8 @@ vm_object_page_clean(vm_object_t object, vm_pindex_t start, vm_pindex_t end, int
|
||||
pmap_remove_write(p);
|
||||
}
|
||||
|
||||
if (clearobjflags && (tstart == 0) && (tend == object->size)) {
|
||||
struct vnode *vp;
|
||||
|
||||
if (clearobjflags && (tstart == 0) && (tend == object->size))
|
||||
vm_object_clear_flag(object, OBJ_MIGHTBEDIRTY);
|
||||
if (object->type == OBJT_VNODE &&
|
||||
(vp = (struct vnode *)object->handle) != NULL) {
|
||||
VI_LOCK(vp);
|
||||
if (vp->v_iflag & VI_OBJDIRTY)
|
||||
vp->v_iflag &= ~VI_OBJDIRTY;
|
||||
VI_UNLOCK(vp);
|
||||
}
|
||||
}
|
||||
|
||||
rescan:
|
||||
curgeneration = object->generation;
|
||||
@ -2148,18 +2138,12 @@ vm_object_coalesce(vm_object_t prev_object, vm_ooffset_t prev_offset,
|
||||
void
|
||||
vm_object_set_writeable_dirty(vm_object_t object)
|
||||
{
|
||||
struct vnode *vp;
|
||||
|
||||
VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
|
||||
if ((object->flags & OBJ_MIGHTBEDIRTY) != 0)
|
||||
if (object->type != OBJT_VNODE ||
|
||||
(object->flags & OBJ_MIGHTBEDIRTY) != 0)
|
||||
return;
|
||||
vm_object_set_flag(object, OBJ_MIGHTBEDIRTY);
|
||||
if (object->type == OBJT_VNODE &&
|
||||
(vp = (struct vnode *)object->handle) != NULL) {
|
||||
VI_LOCK(vp);
|
||||
vp->v_iflag |= VI_OBJDIRTY;
|
||||
VI_UNLOCK(vp);
|
||||
}
|
||||
}
|
||||
|
||||
#include "opt_ddb.h"
|
||||
|
@ -154,7 +154,7 @@ struct vm_object {
|
||||
#define OBJ_DEAD 0x0008 /* dead objects (during rundown) */
|
||||
#define OBJ_NOSPLIT 0x0010 /* dont split this object */
|
||||
#define OBJ_PIPWNT 0x0040 /* paging in progress wanted */
|
||||
#define OBJ_MIGHTBEDIRTY 0x0100 /* object might be dirty */
|
||||
#define OBJ_MIGHTBEDIRTY 0x0100 /* object might be dirty, only for vnode */
|
||||
#define OBJ_CLEANING 0x0200
|
||||
#define OBJ_COLORED 0x1000 /* pg_color is defined */
|
||||
#define OBJ_ONEMAPPING 0x2000 /* One USE (a single, non-forked) mapping flag */
|
||||
|
Loading…
Reference in New Issue
Block a user