Update mtime for tmpfs files modified through memory mapping. Similar

to UFS, perform updates during syncer scans, which in particular means
that tmpfs now performs scan on sync.  Also, this means that a mtime
update may be delayed up to 30 seconds after the write.

The vm_object' OBJ_TMPFS_DIRTY flag for tmpfs swap object is similar
to the OBJ_MIGHTBEDIRTY flag for the vnode object, it indicates that
object could have been dirtied.  Adapt fast page fault handler and
vm_object_set_writeable_dirty() to handle OBJ_TMPFS_NODE same as
OBJT_VNODE.

Reported by:	Ronald Klop <ronald-lists@klop.ws>
Tested by:	pho
Sponsored by:	The FreeBSD Foundation
MFC after:	2 weeks
This commit is contained in:
Konstantin Belousov 2015-01-28 10:37:23 +00:00
parent 3544b0f68f
commit f40cb1c645
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=277828
7 changed files with 78 additions and 8 deletions

View File

@ -398,6 +398,7 @@ int tmpfs_alloc_vp(struct mount *, struct tmpfs_node *, int,
void tmpfs_free_vp(struct vnode *);
int tmpfs_alloc_file(struct vnode *, struct vnode **, struct vattr *,
struct componentname *, char *);
void tmpfs_check_mtime(struct vnode *);
void tmpfs_dir_attach(struct vnode *, struct tmpfs_dirent *);
void tmpfs_dir_detach(struct vnode *, struct tmpfs_dirent *);
void tmpfs_dir_destroy(struct tmpfs_mount *, struct tmpfs_node *);

View File

@ -1415,6 +1415,31 @@ tmpfs_reg_resize(struct vnode *vp, off_t newsize, boolean_t ignerr)
return (0);
}
void
tmpfs_check_mtime(struct vnode *vp)
{
struct tmpfs_node *node;
struct vm_object *obj;
ASSERT_VOP_ELOCKED(vp, "check_mtime");
if (vp->v_type != VREG)
return;
node = VP_TO_TMPFS_NODE(vp);
obj = vp->v_object;
KASSERT((obj->flags & (OBJ_TMPFS_NODE | OBJ_TMPFS)) ==
(OBJ_TMPFS_NODE | OBJ_TMPFS), ("non-tmpfs obj"));
/* unlocked read */
if ((obj->flags & OBJ_TMPFS_DIRTY) != 0) {
VM_OBJECT_WLOCK(obj);
if ((obj->flags & OBJ_TMPFS_DIRTY) != 0) {
obj->flags &= ~OBJ_TMPFS_DIRTY;
node = VP_TO_TMPFS_NODE(vp);
node->tn_status |= TMPFS_NODE_MODIFIED;
}
VM_OBJECT_WUNLOCK(obj);
}
}
/*
* Change flags of the given vnode.
* Caller should execute tmpfs_update on vp after a successful execution.

View File

@ -50,6 +50,7 @@ __FBSDID("$FreeBSD$");
#include <sys/proc.h>
#include <sys/jail.h>
#include <sys/kernel.h>
#include <sys/rwlock.h>
#include <sys/stat.h>
#include <sys/systm.h>
#include <sys/sysctl.h>
@ -418,11 +419,45 @@ tmpfs_statfs(struct mount *mp, struct statfs *sbp)
static int
tmpfs_sync(struct mount *mp, int waitfor)
{
struct vnode *vp, *mvp;
struct vm_object *obj;
if (waitfor == MNT_SUSPEND) {
MNT_ILOCK(mp);
mp->mnt_kern_flag |= MNTK_SUSPEND2 | MNTK_SUSPENDED;
MNT_IUNLOCK(mp);
} else if (waitfor == MNT_LAZY) {
/*
* Handle lazy updates of mtime from writes to mmaped
* regions. Use MNT_VNODE_FOREACH_ALL instead of
* MNT_VNODE_FOREACH_ACTIVE, since unmap of the
* tmpfs-backed vnode does not call vinactive(), due
* to vm object type is OBJT_SWAP.
*/
MNT_VNODE_FOREACH_ALL(vp, mp, mvp) {
if (vp->v_type != VREG) {
VI_UNLOCK(vp);
continue;
}
obj = vp->v_object;
KASSERT((obj->flags & (OBJ_TMPFS_NODE | OBJ_TMPFS)) ==
(OBJ_TMPFS_NODE | OBJ_TMPFS), ("non-tmpfs obj"));
/*
* Unlocked read, avoid taking vnode lock if
* not needed. Lost update will be handled on
* the next call.
*/
if ((obj->flags & OBJ_TMPFS_DIRTY) == 0) {
VI_UNLOCK(vp);
continue;
}
if (vget(vp, LK_EXCLUSIVE | LK_RETRY | LK_INTERLOCK,
curthread) != 0)
continue;
tmpfs_check_mtime(vp);
vput(vp);
}
}
return (0);
}

View File

@ -505,6 +505,7 @@ tmpfs_fsync(struct vop_fsync_args *v)
MPASS(VOP_ISLOCKED(vp));
tmpfs_check_mtime(vp);
tmpfs_update(vp);
return 0;
@ -1222,16 +1223,16 @@ tmpfs_readlink(struct vop_readlink_args *v)
static int
tmpfs_inactive(struct vop_inactive_args *v)
{
struct vnode *vp = v->a_vp;
struct vnode *vp;
struct tmpfs_node *node;
vp = v->a_vp;
node = VP_TO_TMPFS_NODE(vp);
if (node->tn_links == 0)
vrecycle(vp);
return 0;
else
tmpfs_check_mtime(vp);
return (0);
}
int

View File

@ -358,11 +358,13 @@ RetryFault:;
(fault_flags & (VM_FAULT_CHANGE_WIRING | VM_FAULT_DIRTY)) == 0 &&
/* avoid calling vm_object_set_writeable_dirty() */
((prot & VM_PROT_WRITE) == 0 ||
fs.first_object->type != OBJT_VNODE ||
(fs.first_object->type != OBJT_VNODE &&
(fs.first_object->flags & OBJ_TMPFS_NODE) == 0) ||
(fs.first_object->flags & OBJ_MIGHTBEDIRTY) != 0)) {
VM_OBJECT_RLOCK(fs.first_object);
if ((prot & VM_PROT_WRITE) != 0 &&
fs.first_object->type == OBJT_VNODE &&
(fs.first_object->type == OBJT_VNODE ||
(fs.first_object->flags & OBJ_TMPFS_NODE) != 0) &&
(fs.first_object->flags & OBJ_MIGHTBEDIRTY) == 0)
goto fast_failed;
m = vm_page_lookup(fs.first_object, fs.first_pindex);

View File

@ -2199,8 +2199,13 @@ vm_object_set_writeable_dirty(vm_object_t object)
{
VM_OBJECT_ASSERT_WLOCKED(object);
if (object->type != OBJT_VNODE)
if (object->type != OBJT_VNODE) {
if ((object->flags & OBJ_TMPFS_NODE) != 0) {
KASSERT(object->type == OBJT_SWAP, ("non-swap tmpfs"));
vm_object_set_flag(object, OBJ_TMPFS_DIRTY);
}
return;
}
object->generation++;
if ((object->flags & OBJ_MIGHTBEDIRTY) != 0)
return;

View File

@ -187,6 +187,7 @@ struct vm_object {
#define OBJ_PIPWNT 0x0040 /* paging in progress wanted */
#define OBJ_MIGHTBEDIRTY 0x0100 /* object might be dirty, only for vnode */
#define OBJ_TMPFS_NODE 0x0200 /* object belongs to tmpfs VREG node */
#define OBJ_TMPFS_DIRTY 0x0400 /* dirty tmpfs obj */
#define OBJ_COLORED 0x1000 /* pg_color is defined */
#define OBJ_ONEMAPPING 0x2000 /* One USE (a single, non-forked) mapping flag */
#define OBJ_DISCONNECTWNT 0x4000 /* disconnect from vnode wanted */