This update uses the MNT_VNODE_FOREACH_ACTIVE interface that loops
over just the active vnodes associated with a mount point to replace MNT_VNODE_FOREACH_ALL in the vfs_msync, ffs_sync_lazy, and qsync routines. The vfs_msync routine is run every 30 seconds for every writably mounted filesystem. It ensures that any files mmap'ed from the filesystem with modified pages have those pages queued to be written back to the file from which they are mapped. The ffs_lazy_sync and qsync routines are run every 30 seconds for every writably mounted UFS/FFS filesystem. The ffs_lazy_sync routine ensures that any files that have been accessed in the previous 30 seconds have had their access times queued for updating in the filesystem. The qsync routine ensures that any files with modified quotas have those quotas queued to be written back to their associated quota file. In a system configured with 250,000 vnodes, less than 1000 are typically active at any point in time. Prior to this change all 250,000 vnodes would be locked and inspected twice every minute by the syncer. For UFS/FFS filesystems they would be locked and inspected six times every minute (twice by each of these three routines since each of these routines does its own pass over the vnodes associated with a mount point). With this change the syncer now locks and inspects only the tiny set of vnodes that are active. Reviewed by: kib Tested by: Peter Holm MFC after: 2 weeks
This commit is contained in:
parent
f257ebbb2e
commit
dca5e0ec50
@ -2484,6 +2484,7 @@ vdropl(struct vnode *vp)
|
||||
void
|
||||
vinactive(struct vnode *vp, struct thread *td)
|
||||
{
|
||||
struct vm_object *obj;
|
||||
|
||||
ASSERT_VOP_ELOCKED(vp, "vinactive");
|
||||
ASSERT_VI_LOCKED(vp, "vinactive");
|
||||
@ -2493,6 +2494,17 @@ vinactive(struct vnode *vp, struct thread *td)
|
||||
vp->v_iflag |= VI_DOINGINACT;
|
||||
vp->v_iflag &= ~VI_OWEINACT;
|
||||
VI_UNLOCK(vp);
|
||||
/*
|
||||
* Before moving off the active list, we must be sure that any
|
||||
* modified pages are on the vnode's dirty list since these will
|
||||
* no longer be checked once the vnode is on the inactive list.
|
||||
*/
|
||||
obj = vp->v_object;
|
||||
if (obj != NULL && (obj->flags & OBJ_MIGHTBEDIRTY) != 0) {
|
||||
VM_OBJECT_LOCK(obj);
|
||||
vm_object_page_clean(obj, 0, 0, OBJPC_NOSYNC);
|
||||
VM_OBJECT_UNLOCK(obj);
|
||||
}
|
||||
VOP_INACTIVE(vp, td);
|
||||
VI_LOCK(vp);
|
||||
VNASSERT(vp->v_iflag & VI_DOINGINACT, vp,
|
||||
@ -3362,7 +3374,7 @@ vfs_msync(struct mount *mp, int flags)
|
||||
struct vm_object *obj;
|
||||
|
||||
CTR2(KTR_VFS, "%s: mp %p", __func__, mp);
|
||||
MNT_VNODE_FOREACH_ALL(vp, mp, mvp) {
|
||||
MNT_VNODE_FOREACH_ACTIVE(vp, mp, mvp) {
|
||||
obj = vp->v_object;
|
||||
if (obj != NULL && (obj->flags & OBJ_MIGHTBEDIRTY) != 0 &&
|
||||
(flags == MNT_WAIT || VOP_ISLOCKED(vp) == 0)) {
|
||||
|
@ -1432,7 +1432,7 @@ ffs_sync_lazy(mp)
|
||||
td = curthread;
|
||||
if ((mp->mnt_flag & MNT_NOATIME) != 0)
|
||||
goto qupdate;
|
||||
MNT_VNODE_FOREACH_ALL(vp, mp, mvp) {
|
||||
MNT_VNODE_FOREACH_ACTIVE(vp, mp, mvp) {
|
||||
if (vp->v_type == VNON) {
|
||||
VI_UNLOCK(vp);
|
||||
continue;
|
||||
|
@ -227,9 +227,10 @@ void dqinit(void);
|
||||
void dqrele(struct vnode *, struct dquot *);
|
||||
void dquninit(void);
|
||||
int getinoquota(struct inode *);
|
||||
int qsync(struct mount *mp);
|
||||
int quotaoff(struct thread *td, struct mount *, int);
|
||||
int quotaon(struct thread *td, struct mount *, int, void *);
|
||||
int qsync(struct mount *);
|
||||
int qsyncvp(struct vnode *);
|
||||
int quotaoff(struct thread *, struct mount *, int);
|
||||
int quotaon(struct thread *, struct mount *, int, void *);
|
||||
int getquota32(struct thread *, struct mount *, u_long, int, void *);
|
||||
int setquota32(struct thread *, struct mount *, u_long, int, void *);
|
||||
int setuse32(struct thread *, struct mount *, u_long, int, void *);
|
||||
|
@ -87,6 +87,14 @@ ufs_inactive(ap)
|
||||
goto out;
|
||||
#ifdef UFS_GJOURNAL
|
||||
ufs_gjournal_close(vp);
|
||||
#endif
|
||||
#ifdef QUOTA
|
||||
/*
|
||||
* Before moving off the active list, we must be sure that
|
||||
* any modified quotas have been pushed since these will no
|
||||
* longer be checked once the vnode is on the inactive list.
|
||||
*/
|
||||
qsyncvp(vp);
|
||||
#endif
|
||||
if ((ip->i_effnlink == 0 && DOINGSOFTDEP(vp)) ||
|
||||
(ip->i_nlink <= 0 && !UFS_RDONLY(ip))) {
|
||||
|
@ -1044,7 +1044,7 @@ qsync(struct mount *mp)
|
||||
* synchronizing any modified dquot structures.
|
||||
*/
|
||||
again:
|
||||
MNT_VNODE_FOREACH_ALL(vp, mp, mvp) {
|
||||
MNT_VNODE_FOREACH_ACTIVE(vp, mp, mvp) {
|
||||
if (vp->v_type == VNON) {
|
||||
VI_UNLOCK(vp);
|
||||
continue;
|
||||
@ -1067,6 +1067,39 @@ again:
|
||||
return (0);
|
||||
}
|
||||
|
||||
/*
|
||||
* Sync quota file for given vnode to disk.
|
||||
*/
|
||||
int
|
||||
qsyncvp(struct vnode *vp)
|
||||
{
|
||||
struct ufsmount *ump = VFSTOUFS(vp->v_mount);
|
||||
struct dquot *dq;
|
||||
int i;
|
||||
|
||||
/*
|
||||
* Check if the mount point has any quotas.
|
||||
* If not, simply return.
|
||||
*/
|
||||
UFS_LOCK(ump);
|
||||
for (i = 0; i < MAXQUOTAS; i++)
|
||||
if (ump->um_quotas[i] != NULLVP)
|
||||
break;
|
||||
UFS_UNLOCK(ump);
|
||||
if (i == MAXQUOTAS)
|
||||
return (0);
|
||||
/*
|
||||
* Search quotas associated with this vnode
|
||||
* synchronizing any modified dquot structures.
|
||||
*/
|
||||
for (i = 0; i < MAXQUOTAS; i++) {
|
||||
dq = VTOI(vp)->i_dquot[i];
|
||||
if (dq != NODQUOT)
|
||||
dqsync(vp, dq);
|
||||
}
|
||||
return (0);
|
||||
}
|
||||
|
||||
/*
|
||||
* Code pertaining to management of the in-core dquot data structures.
|
||||
*/
|
||||
|
Loading…
x
Reference in New Issue
Block a user