2005-01-07 02:29:27 +00:00
|
|
|
/*-
|
1994-05-24 10:09:53 +00:00
|
|
|
* Copyright (c) 1989, 1991, 1993, 1994
|
|
|
|
* The Regents of the University of California. All rights reserved.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
* 4. Neither the name of the University nor the names of its contributors
|
|
|
|
* may be used to endorse or promote products derived from this software
|
|
|
|
* without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
|
|
|
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
|
|
|
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
|
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
|
|
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
|
|
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
|
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
|
|
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
|
|
|
* SUCH DAMAGE.
|
|
|
|
*
|
1997-02-10 02:22:35 +00:00
|
|
|
* @(#)ffs_vfsops.c 8.31 (Berkeley) 5/20/95
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
|
|
|
|
2003-06-11 06:34:30 +00:00
|
|
|
#include <sys/cdefs.h>
|
|
|
|
__FBSDID("$FreeBSD$");
|
|
|
|
|
1996-01-05 18:31:58 +00:00
|
|
|
#include "opt_quota.h"
|
2001-03-19 04:35:40 +00:00
|
|
|
#include "opt_ufs.h"
|
2004-10-26 10:44:10 +00:00
|
|
|
#include "opt_ffs.h"
|
2008-09-16 11:19:38 +00:00
|
|
|
#include "opt_ddb.h"
|
1996-01-05 18:31:58 +00:00
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
#include <sys/param.h>
|
|
|
|
#include <sys/systm.h>
|
|
|
|
#include <sys/namei.h>
|
2006-11-06 13:42:10 +00:00
|
|
|
#include <sys/priv.h>
|
1994-05-24 10:09:53 +00:00
|
|
|
#include <sys/proc.h>
|
|
|
|
#include <sys/kernel.h>
|
|
|
|
#include <sys/vnode.h>
|
|
|
|
#include <sys/mount.h>
|
2000-05-05 09:59:14 +00:00
|
|
|
#include <sys/bio.h>
|
1994-05-24 10:09:53 +00:00
|
|
|
#include <sys/buf.h>
|
1997-09-27 13:40:20 +00:00
|
|
|
#include <sys/conf.h>
|
1997-03-23 03:37:54 +00:00
|
|
|
#include <sys/fcntl.h>
|
1994-05-24 10:09:53 +00:00
|
|
|
#include <sys/malloc.h>
|
2001-01-24 12:35:55 +00:00
|
|
|
#include <sys/mutex.h>
|
2000-10-04 01:29:17 +00:00
|
|
|
|
2006-10-22 11:52:19 +00:00
|
|
|
#include <security/mac/mac_framework.h>
|
|
|
|
|
Introduce extended attribute support for FFS, allowing arbitrary
(name, value) pairs to be associated with inodes. This support is
used for ACLs, MAC labels, and Capabilities in the TrustedBSD
security extensions, which are currently under development.
In this implementation, attributes are backed to data vnodes in the
style of the quota support in FFS. Support for FFS extended
attributes may be enabled using the FFS_EXTATTR kernel option
(disabled by default). Userland utilities and man pages will be
committed in the next batch. VFS interfaces and man pages have
been in the repo since 4.0-RELEASE and are unchanged.
o ufs/ufs/extattr.h: UFS-specific extattr defines
o ufs/ufs/ufs_extattr.c: bulk of support routines
o ufs/{ufs,ffs,mfs}/*.[ch]: hooks and extattr.h includes
o contrib/softupdates/ffs_softdep.c: extattr.h includes
o conf/options, conf/files, i386/conf/LINT: added FFS_EXTATTR
o coda/coda_vfsops.c: XXX required extattr.h due to ufsmount.h
(This should not be the case, and will be fixed in a future commit)
Currently attributes are not supported in MFS. This will be fixed.
Reviewed by: adrian, bp, freebsd-fs, other unthanked souls
Obtained from: TrustedBSD Project
2000-04-15 03:34:27 +00:00
|
|
|
#include <ufs/ufs/extattr.h>
|
2006-10-31 21:48:54 +00:00
|
|
|
#include <ufs/ufs/gjournal.h>
|
1994-05-24 10:09:53 +00:00
|
|
|
#include <ufs/ufs/quota.h>
|
|
|
|
#include <ufs/ufs/ufsmount.h>
|
|
|
|
#include <ufs/ufs/inode.h>
|
|
|
|
#include <ufs/ufs/ufs_extern.h>
|
|
|
|
|
|
|
|
#include <ufs/ffs/fs.h>
|
|
|
|
#include <ufs/ffs/ffs_extern.h>
|
|
|
|
|
1995-04-09 06:03:56 +00:00
|
|
|
#include <vm/vm.h>
|
2002-12-27 11:05:05 +00:00
|
|
|
#include <vm/uma.h>
|
1995-04-09 06:03:56 +00:00
|
|
|
#include <vm/vm_page.h>
|
|
|
|
|
2004-10-29 10:15:56 +00:00
|
|
|
#include <geom/geom.h>
|
|
|
|
#include <geom/geom_vfs.h>
|
|
|
|
|
2008-09-16 11:19:38 +00:00
|
|
|
#include <ddb/ddb.h>
|
|
|
|
|
2005-02-10 12:20:08 +00:00
|
|
|
static uma_zone_t uma_inode, uma_ufs1, uma_ufs2;
|
1997-10-11 18:31:40 +00:00
|
|
|
|
2004-07-30 22:08:52 +00:00
|
|
|
static int ffs_reload(struct mount *, struct thread *);
|
2002-12-27 10:06:37 +00:00
|
|
|
static int ffs_mountfs(struct vnode *, struct mount *, struct thread *);
|
2002-06-21 06:18:05 +00:00
|
|
|
static void ffs_oldfscompat_read(struct fs *, struct ufsmount *,
|
|
|
|
ufs2_daddr_t);
|
2002-12-27 10:06:37 +00:00
|
|
|
static void ffs_ifree(struct ufsmount *ump, struct inode *ip);
|
2012-03-28 14:06:47 +00:00
|
|
|
static int ffs_sync_lazy(struct mount *mp);
|
|
|
|
|
2002-08-13 10:05:50 +00:00
|
|
|
static vfs_init_t ffs_init;
|
|
|
|
static vfs_uninit_t ffs_uninit;
|
2002-08-13 10:33:57 +00:00
|
|
|
static vfs_extattrctl_t ffs_extattrctl;
|
2004-12-07 08:15:41 +00:00
|
|
|
static vfs_cmount_t ffs_cmount;
|
2005-02-10 12:20:08 +00:00
|
|
|
static vfs_unmount_t ffs_unmount;
|
2004-12-07 08:15:41 +00:00
|
|
|
static vfs_mount_t ffs_mount;
|
2005-02-10 12:20:08 +00:00
|
|
|
static vfs_statfs_t ffs_statfs;
|
|
|
|
static vfs_fhtovp_t ffs_fhtovp;
|
|
|
|
static vfs_sync_t ffs_sync;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
1998-02-09 06:11:36 +00:00
|
|
|
static struct vfsops ufs_vfsops = {
|
2003-06-12 20:48:38 +00:00
|
|
|
.vfs_extattrctl = ffs_extattrctl,
|
|
|
|
.vfs_fhtovp = ffs_fhtovp,
|
|
|
|
.vfs_init = ffs_init,
|
2004-12-07 08:15:41 +00:00
|
|
|
.vfs_mount = ffs_mount,
|
|
|
|
.vfs_cmount = ffs_cmount,
|
2003-06-12 20:48:38 +00:00
|
|
|
.vfs_quotactl = ufs_quotactl,
|
|
|
|
.vfs_root = ufs_root,
|
|
|
|
.vfs_statfs = ffs_statfs,
|
|
|
|
.vfs_sync = ffs_sync,
|
|
|
|
.vfs_uninit = ffs_uninit,
|
|
|
|
.vfs_unmount = ffs_unmount,
|
|
|
|
.vfs_vget = ffs_vget,
|
2008-09-16 11:51:06 +00:00
|
|
|
.vfs_susp_clean = process_deferred_inactive,
|
1994-05-24 10:09:53 +00:00
|
|
|
};
|
|
|
|
|
1998-09-07 13:17:06 +00:00
|
|
|
VFS_SET(ufs_vfsops, ufs, 0);
|
2006-07-09 14:11:09 +00:00
|
|
|
MODULE_VERSION(ufs, 1);
|
1994-09-21 03:47:43 +00:00
|
|
|
|
2004-10-26 10:44:10 +00:00
|
|
|
static b_strategy_t ffs_geom_strategy;
|
2005-02-08 20:29:10 +00:00
|
|
|
static b_write_t ffs_bufwrite;
|
2004-10-26 10:44:10 +00:00
|
|
|
|
|
|
|
static struct buf_ops ffs_ops = {
|
2004-10-26 20:13:21 +00:00
|
|
|
.bop_name = "FFS",
|
2005-02-08 20:29:10 +00:00
|
|
|
.bop_write = ffs_bufwrite,
|
2004-10-26 20:13:21 +00:00
|
|
|
.bop_strategy = ffs_geom_strategy,
|
2005-01-11 10:43:08 +00:00
|
|
|
.bop_sync = bufsync,
|
Cylinder group bitmaps and blocks containing inode for a snapshot
file are after snaplock, while other ffs device buffers are before
snaplock in global lock order. By itself, this could cause deadlock
when bdwrite() tries to flush dirty buffers on snapshotted ffs. If,
during the flush, COW activity for snapshot needs to allocate block
and ffs_alloccg() selects the cylinder group that is being written
by bdwrite(), then kernel would panic due to recursive buffer lock
acquision.
Avoid dealing with buffers in bdwrite() that are from other side of
snaplock divisor in the lock order then the buffer being written. Add
new BOP, bop_bdwrite(), to do dirty buffer flushing for same vnode in
the bdwrite(). Default implementation, bufbdflush(), refactors the code
from bdwrite(). For ffs device buffers, specialized implementation is
used.
Reviewed by: tegge, jeff, Russell Cattelan (cattelan xfs org, xfs changes)
Tested by: Peter Holm
X-MFC after: 3 weeks (if ever: it changes ABI)
2007-01-23 10:01:19 +00:00
|
|
|
#ifdef NO_FFS_SNAPSHOT
|
|
|
|
.bop_bdflush = bufbdflush,
|
|
|
|
#else
|
|
|
|
.bop_bdflush = ffs_bdflush,
|
|
|
|
#endif
|
2004-10-26 10:44:10 +00:00
|
|
|
};
|
|
|
|
|
2010-05-19 09:32:11 +00:00
|
|
|
/*
|
|
|
|
* Note that userquota and groupquota options are not currently used
|
|
|
|
* by UFS/FFS code and generally mount(8) does not pass those options
|
|
|
|
* from userland, but they can be passed by loader(8) via
|
|
|
|
* vfs.root.mountfrom.options.
|
|
|
|
*/
|
2008-03-26 20:48:07 +00:00
|
|
|
static const char *ffs_opts[] = { "acls", "async", "noatime", "noclusterr",
|
2010-05-19 09:32:11 +00:00
|
|
|
"noclusterw", "noexec", "export", "force", "from", "groupquota",
|
2011-07-15 16:20:33 +00:00
|
|
|
"multilabel", "nfsv4acls", "fsckpid", "snapshot", "nosuid", "suiddir",
|
|
|
|
"nosymfollow", "sync", "union", "userquota", NULL };
|
2004-12-07 08:15:41 +00:00
|
|
|
|
2004-07-30 22:08:52 +00:00
|
|
|
static int
|
2009-05-11 15:33:26 +00:00
|
|
|
ffs_mount(struct mount *mp)
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2004-12-07 08:15:41 +00:00
|
|
|
struct vnode *devvp;
|
2009-05-11 15:33:26 +00:00
|
|
|
struct thread *td;
|
1995-08-28 09:19:25 +00:00
|
|
|
struct ufsmount *ump = 0;
|
2002-05-13 09:22:31 +00:00
|
|
|
struct fs *fs;
|
2011-07-15 16:20:33 +00:00
|
|
|
pid_t fsckpid = 0;
|
2000-07-11 22:07:57 +00:00
|
|
|
int error, flags;
|
2012-01-17 01:08:01 +00:00
|
|
|
uint64_t mntorflags;
|
2008-10-28 13:44:11 +00:00
|
|
|
accmode_t accmode;
|
2004-07-30 22:08:52 +00:00
|
|
|
struct nameidata ndp;
|
2004-12-07 08:15:41 +00:00
|
|
|
char *fspec;
|
1995-05-30 08:16:23 +00:00
|
|
|
|
2009-05-11 15:33:26 +00:00
|
|
|
td = curthread;
|
2004-12-07 08:15:41 +00:00
|
|
|
if (vfs_filteropt(mp->mnt_optnew, ffs_opts))
|
|
|
|
return (EINVAL);
|
2002-12-27 11:05:05 +00:00
|
|
|
if (uma_inode == NULL) {
|
|
|
|
uma_inode = uma_zcreate("FFS inode",
|
|
|
|
sizeof(struct inode), NULL, NULL, NULL, NULL,
|
|
|
|
UMA_ALIGN_PTR, 0);
|
|
|
|
uma_ufs1 = uma_zcreate("FFS1 dinode",
|
|
|
|
sizeof(struct ufs1_dinode), NULL, NULL, NULL, NULL,
|
|
|
|
UMA_ALIGN_PTR, 0);
|
|
|
|
uma_ufs2 = uma_zcreate("FFS2 dinode",
|
|
|
|
sizeof(struct ufs2_dinode), NULL, NULL, NULL, NULL,
|
|
|
|
UMA_ALIGN_PTR, 0);
|
|
|
|
}
|
2004-10-05 11:26:43 +00:00
|
|
|
|
2010-05-19 09:32:11 +00:00
|
|
|
vfs_deleteopt(mp->mnt_optnew, "groupquota");
|
|
|
|
vfs_deleteopt(mp->mnt_optnew, "userquota");
|
|
|
|
|
2004-12-07 08:15:41 +00:00
|
|
|
fspec = vfs_getopts(mp->mnt_optnew, "from", &error);
|
|
|
|
if (error)
|
|
|
|
return (error);
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2006-09-26 04:12:49 +00:00
|
|
|
mntorflags = 0;
|
- Add parsing for the following existing UFS/FFS mount options in the nmount()
callpath via vfs_getopt(), and set the appropriate MNT_* flag:
-> acls, async, force, multilabel, noasync, noatime,
-> noclusterr, noclusterw, snapshot, update
- Allow errmsg as a valid mount option via vfs_getopt(),
so we can later add a hook to propagate mount errors back
to userspace via vfs_mount_error().
2005-11-18 06:06:10 +00:00
|
|
|
if (vfs_getopt(mp->mnt_optnew, "acls", NULL, NULL) == 0)
|
2006-09-26 04:12:49 +00:00
|
|
|
mntorflags |= MNT_ACLS;
|
- Add parsing for the following existing UFS/FFS mount options in the nmount()
callpath via vfs_getopt(), and set the appropriate MNT_* flag:
-> acls, async, force, multilabel, noasync, noatime,
-> noclusterr, noclusterw, snapshot, update
- Allow errmsg as a valid mount option via vfs_getopt(),
so we can later add a hook to propagate mount errors back
to userspace via vfs_mount_error().
2005-11-18 06:06:10 +00:00
|
|
|
|
2008-05-24 00:41:32 +00:00
|
|
|
if (vfs_getopt(mp->mnt_optnew, "snapshot", NULL, NULL) == 0) {
|
2006-09-26 04:12:49 +00:00
|
|
|
mntorflags |= MNT_SNAPSHOT;
|
2008-05-24 00:41:32 +00:00
|
|
|
/*
|
|
|
|
* Once we have set the MNT_SNAPSHOT flag, do not
|
|
|
|
* persist "snapshot" in the options list.
|
|
|
|
*/
|
|
|
|
vfs_deleteopt(mp->mnt_optnew, "snapshot");
|
2008-08-10 12:15:36 +00:00
|
|
|
vfs_deleteopt(mp->mnt_opt, "snapshot");
|
2008-05-24 00:41:32 +00:00
|
|
|
}
|
- Add parsing for the following existing UFS/FFS mount options in the nmount()
callpath via vfs_getopt(), and set the appropriate MNT_* flag:
-> acls, async, force, multilabel, noasync, noatime,
-> noclusterr, noclusterw, snapshot, update
- Allow errmsg as a valid mount option via vfs_getopt(),
so we can later add a hook to propagate mount errors back
to userspace via vfs_mount_error().
2005-11-18 06:06:10 +00:00
|
|
|
|
2011-07-15 16:20:33 +00:00
|
|
|
if (vfs_getopt(mp->mnt_optnew, "fsckpid", NULL, NULL) == 0 &&
|
|
|
|
vfs_scanopt(mp->mnt_optnew, "fsckpid", "%d", &fsckpid) == 1) {
|
|
|
|
/*
|
|
|
|
* Once we have set the restricted PID, do not
|
|
|
|
* persist "fsckpid" in the options list.
|
|
|
|
*/
|
|
|
|
vfs_deleteopt(mp->mnt_optnew, "fsckpid");
|
|
|
|
vfs_deleteopt(mp->mnt_opt, "fsckpid");
|
|
|
|
if (mp->mnt_flag & MNT_UPDATE) {
|
|
|
|
if (VFSTOUFS(mp)->um_fs->fs_ronly == 0 &&
|
|
|
|
vfs_flagopt(mp->mnt_optnew, "ro", NULL, 0) == 0) {
|
2012-01-14 07:26:16 +00:00
|
|
|
vfs_mount_error(mp,
|
|
|
|
"Checker enable: Must be read-only");
|
2011-07-15 16:20:33 +00:00
|
|
|
return (EINVAL);
|
|
|
|
}
|
|
|
|
} else if (vfs_flagopt(mp->mnt_optnew, "ro", NULL, 0) == 0) {
|
2012-01-14 07:26:16 +00:00
|
|
|
vfs_mount_error(mp,
|
|
|
|
"Checker enable: Must be read-only");
|
2011-07-15 16:20:33 +00:00
|
|
|
return (EINVAL);
|
|
|
|
}
|
|
|
|
/* Set to -1 if we are done */
|
|
|
|
if (fsckpid == 0)
|
|
|
|
fsckpid = -1;
|
|
|
|
}
|
|
|
|
|
2009-12-21 19:39:10 +00:00
|
|
|
if (vfs_getopt(mp->mnt_optnew, "nfsv4acls", NULL, NULL) == 0) {
|
|
|
|
if (mntorflags & MNT_ACLS) {
|
2012-01-14 07:26:16 +00:00
|
|
|
vfs_mount_error(mp,
|
|
|
|
"\"acls\" and \"nfsv4acls\" options "
|
|
|
|
"are mutually exclusive");
|
2009-12-21 19:39:10 +00:00
|
|
|
return (EINVAL);
|
|
|
|
}
|
|
|
|
mntorflags |= MNT_NFS4ACLS;
|
|
|
|
}
|
|
|
|
|
2006-09-26 04:12:49 +00:00
|
|
|
MNT_ILOCK(mp);
|
2010-02-10 18:56:49 +00:00
|
|
|
mp->mnt_flag |= mntorflags;
|
2006-09-26 04:12:49 +00:00
|
|
|
MNT_IUNLOCK(mp);
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* If updating, check whether changing from read-only to
|
|
|
|
* read/write; if there is no device name, that's all we do.
|
|
|
|
*/
|
|
|
|
if (mp->mnt_flag & MNT_UPDATE) {
|
|
|
|
ump = VFSTOUFS(mp);
|
|
|
|
fs = ump->um_fs;
|
1998-03-27 14:20:57 +00:00
|
|
|
devvp = ump->um_devvp;
|
2011-07-15 16:20:33 +00:00
|
|
|
if (fsckpid == -1 && ump->um_fsckpid > 0) {
|
|
|
|
if ((error = ffs_flushfiles(mp, WRITECLOSE, td)) != 0 ||
|
|
|
|
(error = ffs_sbupdate(ump, MNT_WAIT, 0)) != 0)
|
|
|
|
return (error);
|
|
|
|
DROP_GIANT();
|
|
|
|
g_topology_lock();
|
|
|
|
/*
|
|
|
|
* Return to normal read-only mode.
|
|
|
|
*/
|
|
|
|
error = g_access(ump->um_cp, 0, -1, 0);
|
|
|
|
g_topology_unlock();
|
|
|
|
PICKUP_GIANT();
|
|
|
|
ump->um_fsckpid = 0;
|
|
|
|
}
|
2004-12-07 08:15:41 +00:00
|
|
|
if (fs->fs_ronly == 0 &&
|
|
|
|
vfs_flagopt(mp->mnt_optnew, "ro", NULL, 0)) {
|
2002-01-15 07:17:12 +00:00
|
|
|
/*
|
2008-09-16 11:55:53 +00:00
|
|
|
* Flush any dirty data and suspend filesystem.
|
2002-01-15 07:17:12 +00:00
|
|
|
*/
|
2008-09-16 11:55:53 +00:00
|
|
|
if ((error = vn_start_write(NULL, &mp, V_WAIT)) != 0)
|
2002-10-25 00:20:37 +00:00
|
|
|
return (error);
|
2008-09-16 11:55:53 +00:00
|
|
|
for (;;) {
|
|
|
|
vn_finished_write(mp);
|
|
|
|
if ((error = vfs_write_suspend(mp)) != 0)
|
|
|
|
return (error);
|
|
|
|
MNT_ILOCK(mp);
|
|
|
|
if (mp->mnt_kern_flag & MNTK_SUSPENDED) {
|
|
|
|
/*
|
|
|
|
* Allow the secondary writes
|
|
|
|
* to proceed.
|
|
|
|
*/
|
|
|
|
mp->mnt_kern_flag &= ~(MNTK_SUSPENDED |
|
|
|
|
MNTK_SUSPEND2);
|
|
|
|
wakeup(&mp->mnt_flag);
|
|
|
|
MNT_IUNLOCK(mp);
|
|
|
|
/*
|
|
|
|
* Allow the curthread to
|
|
|
|
* ignore the suspension to
|
|
|
|
* synchronize on-disk state.
|
|
|
|
*/
|
2009-05-11 15:33:26 +00:00
|
|
|
td->td_pflags |= TDP_IGNSUSP;
|
2008-09-16 11:55:53 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
MNT_IUNLOCK(mp);
|
|
|
|
vn_start_write(NULL, &mp, V_WAIT);
|
2002-10-25 00:20:37 +00:00
|
|
|
}
|
2002-01-15 07:17:12 +00:00
|
|
|
/*
|
|
|
|
* Check for and optionally get rid of files open
|
|
|
|
* for writing.
|
|
|
|
*/
|
1994-05-24 10:09:53 +00:00
|
|
|
flags = WRITECLOSE;
|
|
|
|
if (mp->mnt_flag & MNT_FORCE)
|
|
|
|
flags |= FORCECLOSE;
|
2011-07-30 00:43:18 +00:00
|
|
|
if (MOUNTEDSOFTDEP(mp)) {
|
2001-09-12 08:38:13 +00:00
|
|
|
error = softdep_flushfiles(mp, flags, td);
|
1998-03-08 09:59:44 +00:00
|
|
|
} else {
|
2001-09-12 08:38:13 +00:00
|
|
|
error = ffs_flushfiles(mp, flags, td);
|
1998-03-08 09:59:44 +00:00
|
|
|
}
|
2000-07-11 22:07:57 +00:00
|
|
|
if (error) {
|
2008-09-16 11:55:53 +00:00
|
|
|
vfs_write_resume(mp);
|
2000-07-11 22:07:57 +00:00
|
|
|
return (error);
|
|
|
|
}
|
2001-05-08 07:42:20 +00:00
|
|
|
if (fs->fs_pendingblocks != 0 ||
|
|
|
|
fs->fs_pendinginodes != 0) {
|
2012-01-14 07:26:16 +00:00
|
|
|
printf("WARNING: %s Update error: blocks %jd "
|
|
|
|
"files %d\n", fs->fs_fsmnt,
|
2002-06-21 06:18:05 +00:00
|
|
|
(intmax_t)fs->fs_pendingblocks,
|
2001-05-08 07:42:20 +00:00
|
|
|
fs->fs_pendinginodes);
|
|
|
|
fs->fs_pendingblocks = 0;
|
|
|
|
fs->fs_pendinginodes = 0;
|
|
|
|
}
|
2001-04-14 05:26:28 +00:00
|
|
|
if ((fs->fs_flags & (FS_UNCLEAN | FS_NEEDSFSCK)) == 0)
|
2000-07-11 22:07:57 +00:00
|
|
|
fs->fs_clean = 1;
|
2006-03-08 23:43:39 +00:00
|
|
|
if ((error = ffs_sbupdate(ump, MNT_WAIT, 0)) != 0) {
|
2000-07-11 22:07:57 +00:00
|
|
|
fs->fs_ronly = 0;
|
|
|
|
fs->fs_clean = 0;
|
2008-09-16 11:55:53 +00:00
|
|
|
vfs_write_resume(mp);
|
2000-07-11 22:07:57 +00:00
|
|
|
return (error);
|
|
|
|
}
|
2011-07-30 00:43:18 +00:00
|
|
|
if (MOUNTEDSOFTDEP(mp))
|
2011-06-12 18:46:48 +00:00
|
|
|
softdep_unmount(mp);
|
2004-10-29 10:15:56 +00:00
|
|
|
DROP_GIANT();
|
|
|
|
g_topology_lock();
|
2011-07-10 00:41:31 +00:00
|
|
|
/*
|
|
|
|
* Drop our write and exclusive access.
|
|
|
|
*/
|
|
|
|
g_access(ump->um_cp, 0, -1, -1);
|
2004-10-29 10:15:56 +00:00
|
|
|
g_topology_unlock();
|
|
|
|
PICKUP_GIANT();
|
2004-12-07 08:15:41 +00:00
|
|
|
fs->fs_ronly = 1;
|
2006-09-26 04:12:49 +00:00
|
|
|
MNT_ILOCK(mp);
|
2004-12-07 08:15:41 +00:00
|
|
|
mp->mnt_flag |= MNT_RDONLY;
|
2006-09-26 04:12:49 +00:00
|
|
|
MNT_IUNLOCK(mp);
|
2008-09-16 11:55:53 +00:00
|
|
|
/*
|
|
|
|
* Allow the writers to note that filesystem
|
|
|
|
* is ro now.
|
|
|
|
*/
|
|
|
|
vfs_write_resume(mp);
|
1995-08-28 09:19:25 +00:00
|
|
|
}
|
2000-07-11 22:07:57 +00:00
|
|
|
if ((mp->mnt_flag & MNT_RELOAD) &&
|
2004-07-30 22:08:52 +00:00
|
|
|
(error = ffs_reload(mp, td)) != 0)
|
2000-07-11 22:07:57 +00:00
|
|
|
return (error);
|
2004-12-07 08:15:41 +00:00
|
|
|
if (fs->fs_ronly &&
|
|
|
|
!vfs_flagopt(mp->mnt_optnew, "ro", NULL, 0)) {
|
2011-07-15 16:20:33 +00:00
|
|
|
/*
|
|
|
|
* If we are running a checker, do not allow upgrade.
|
|
|
|
*/
|
|
|
|
if (ump->um_fsckpid > 0) {
|
2012-01-14 07:26:16 +00:00
|
|
|
vfs_mount_error(mp,
|
|
|
|
"Active checker, cannot upgrade to write");
|
2011-07-15 16:20:33 +00:00
|
|
|
return (EINVAL);
|
|
|
|
}
|
1998-02-25 04:47:04 +00:00
|
|
|
/*
|
|
|
|
* If upgrade to read-write by non-root, then verify
|
|
|
|
* that user has necessary permissions on the device.
|
|
|
|
*/
|
2008-01-10 01:10:58 +00:00
|
|
|
vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
|
2006-11-06 13:42:10 +00:00
|
|
|
error = VOP_ACCESS(devvp, VREAD | VWRITE,
|
|
|
|
td->td_ucred, td);
|
|
|
|
if (error)
|
|
|
|
error = priv_check(td, PRIV_VFS_MOUNT_PERM);
|
|
|
|
if (error) {
|
2008-01-13 14:44:15 +00:00
|
|
|
VOP_UNLOCK(devvp, 0);
|
2006-11-06 13:42:10 +00:00
|
|
|
return (error);
|
1998-02-25 04:47:04 +00:00
|
|
|
}
|
2008-01-13 14:44:15 +00:00
|
|
|
VOP_UNLOCK(devvp, 0);
|
1999-12-23 15:42:14 +00:00
|
|
|
fs->fs_flags &= ~FS_UNCLEAN;
|
1998-09-26 04:59:42 +00:00
|
|
|
if (fs->fs_clean == 0) {
|
1999-12-23 15:42:14 +00:00
|
|
|
fs->fs_flags |= FS_UNCLEAN;
|
2001-03-21 04:09:01 +00:00
|
|
|
if ((mp->mnt_flag & MNT_FORCE) ||
|
2010-04-24 07:05:35 +00:00
|
|
|
((fs->fs_flags &
|
|
|
|
(FS_SUJ | FS_NEEDSFSCK)) == 0 &&
|
2001-04-14 05:26:28 +00:00
|
|
|
(fs->fs_flags & FS_DOSOFTDEP))) {
|
2012-01-14 07:26:16 +00:00
|
|
|
printf("WARNING: %s was not properly "
|
|
|
|
"dismounted\n", fs->fs_fsmnt);
|
1998-09-26 04:59:42 +00:00
|
|
|
} else {
|
2012-01-14 07:26:16 +00:00
|
|
|
vfs_mount_error(mp,
|
|
|
|
"R/W mount of %s denied. %s.%s",
|
|
|
|
fs->fs_fsmnt,
|
|
|
|
"Filesystem is not clean - run fsck",
|
|
|
|
(fs->fs_flags & FS_SUJ) == 0 ? "" :
|
|
|
|
" Forced mount will invalidate"
|
|
|
|
" journal contents");
|
2000-07-11 22:07:57 +00:00
|
|
|
return (EPERM);
|
1998-09-26 04:59:42 +00:00
|
|
|
}
|
|
|
|
}
|
2004-11-04 09:11:22 +00:00
|
|
|
DROP_GIANT();
|
|
|
|
g_topology_lock();
|
|
|
|
/*
|
2011-07-10 00:41:31 +00:00
|
|
|
* Request exclusive write access.
|
2004-11-04 09:11:22 +00:00
|
|
|
*/
|
2011-07-10 00:41:31 +00:00
|
|
|
error = g_access(ump->um_cp, 0, 1, 1);
|
2004-11-04 09:11:22 +00:00
|
|
|
g_topology_unlock();
|
|
|
|
PICKUP_GIANT();
|
|
|
|
if (error)
|
|
|
|
return (error);
|
2000-07-11 22:07:57 +00:00
|
|
|
if ((error = vn_start_write(NULL, &mp, V_WAIT)) != 0)
|
|
|
|
return (error);
|
|
|
|
fs->fs_ronly = 0;
|
2006-09-26 04:12:49 +00:00
|
|
|
MNT_ILOCK(mp);
|
2004-12-07 08:15:41 +00:00
|
|
|
mp->mnt_flag &= ~MNT_RDONLY;
|
2006-09-26 04:12:49 +00:00
|
|
|
MNT_IUNLOCK(mp);
|
2010-04-24 07:05:35 +00:00
|
|
|
fs->fs_mtime = time_second;
|
1998-03-27 14:20:57 +00:00
|
|
|
/* check to see if we need to start softdep */
|
2000-07-11 22:07:57 +00:00
|
|
|
if ((fs->fs_flags & FS_DOSOFTDEP) &&
|
2002-02-27 18:32:23 +00:00
|
|
|
(error = softdep_mount(devvp, mp, fs, td->td_ucred))){
|
2000-07-11 22:07:57 +00:00
|
|
|
vn_finished_write(mp);
|
|
|
|
return (error);
|
1998-03-27 14:20:57 +00:00
|
|
|
}
|
2010-04-24 07:05:35 +00:00
|
|
|
fs->fs_clean = 0;
|
|
|
|
if ((error = ffs_sbupdate(ump, MNT_WAIT, 0)) != 0) {
|
|
|
|
vn_finished_write(mp);
|
|
|
|
return (error);
|
|
|
|
}
|
2000-07-11 22:07:57 +00:00
|
|
|
if (fs->fs_snapinum[0] != 0)
|
|
|
|
ffs_snapshot_mount(mp);
|
|
|
|
vn_finished_write(mp);
|
1995-05-15 08:39:37 +00:00
|
|
|
}
|
1998-05-18 06:38:18 +00:00
|
|
|
/*
|
|
|
|
* Soft updates is incompatible with "async",
|
|
|
|
* so if we are doing softupdates stop the user
|
|
|
|
* from setting the async flag in an update.
|
2010-09-17 09:14:40 +00:00
|
|
|
* Softdep_mount() clears it in an initial mount
|
1998-05-18 06:38:18 +00:00
|
|
|
* or ro->rw remount.
|
|
|
|
*/
|
2011-07-30 00:43:18 +00:00
|
|
|
if (MOUNTEDSOFTDEP(mp)) {
|
2006-09-26 04:12:49 +00:00
|
|
|
/* XXX: Reset too late ? */
|
|
|
|
MNT_ILOCK(mp);
|
1998-05-18 06:38:18 +00:00
|
|
|
mp->mnt_flag &= ~MNT_ASYNC;
|
2006-09-26 04:12:49 +00:00
|
|
|
MNT_IUNLOCK(mp);
|
|
|
|
}
|
2005-01-15 17:09:53 +00:00
|
|
|
/*
|
|
|
|
* Keep MNT_ACLS flag if it is stored in superblock.
|
|
|
|
*/
|
2006-09-26 04:12:49 +00:00
|
|
|
if ((fs->fs_flags & FS_ACLS) != 0) {
|
|
|
|
/* XXX: Set too late ? */
|
|
|
|
MNT_ILOCK(mp);
|
2005-01-15 17:09:53 +00:00
|
|
|
mp->mnt_flag |= MNT_ACLS;
|
2006-09-26 04:12:49 +00:00
|
|
|
MNT_IUNLOCK(mp);
|
|
|
|
}
|
2005-11-20 17:04:50 +00:00
|
|
|
|
2009-12-21 19:39:10 +00:00
|
|
|
if ((fs->fs_flags & FS_NFS4ACLS) != 0) {
|
|
|
|
/* XXX: Set too late ? */
|
|
|
|
MNT_ILOCK(mp);
|
|
|
|
mp->mnt_flag |= MNT_NFS4ACLS;
|
|
|
|
MNT_IUNLOCK(mp);
|
|
|
|
}
|
2011-07-15 16:20:33 +00:00
|
|
|
/*
|
|
|
|
* If this is a request from fsck to clean up the filesystem,
|
|
|
|
* then allow the specified pid to proceed.
|
|
|
|
*/
|
|
|
|
if (fsckpid > 0) {
|
|
|
|
if (ump->um_fsckpid != 0) {
|
2012-01-14 07:26:16 +00:00
|
|
|
vfs_mount_error(mp,
|
|
|
|
"Active checker already running on %s",
|
2011-07-15 16:20:33 +00:00
|
|
|
fs->fs_fsmnt);
|
|
|
|
return (EINVAL);
|
|
|
|
}
|
2011-07-30 00:43:18 +00:00
|
|
|
KASSERT(MOUNTEDSOFTDEP(mp) == 0,
|
2011-07-15 16:20:33 +00:00
|
|
|
("soft updates enabled on read-only file system"));
|
|
|
|
DROP_GIANT();
|
|
|
|
g_topology_lock();
|
|
|
|
/*
|
|
|
|
* Request write access.
|
|
|
|
*/
|
|
|
|
error = g_access(ump->um_cp, 0, 1, 0);
|
|
|
|
g_topology_unlock();
|
|
|
|
PICKUP_GIANT();
|
|
|
|
if (error) {
|
2012-01-14 07:26:16 +00:00
|
|
|
vfs_mount_error(mp,
|
|
|
|
"Checker activation failed on %s",
|
2011-07-15 16:20:33 +00:00
|
|
|
fs->fs_fsmnt);
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
ump->um_fsckpid = fsckpid;
|
|
|
|
if (fs->fs_snapinum[0] != 0)
|
|
|
|
ffs_snapshot_mount(mp);
|
|
|
|
fs->fs_mtime = time_second;
|
|
|
|
fs->fs_fmod = 1;
|
|
|
|
fs->fs_clean = 0;
|
|
|
|
(void) ffs_sbupdate(ump, MNT_WAIT, 0);
|
|
|
|
}
|
2010-09-17 09:14:40 +00:00
|
|
|
|
2000-07-11 22:07:57 +00:00
|
|
|
/*
|
|
|
|
* If this is a snapshot request, take the snapshot.
|
|
|
|
*/
|
|
|
|
if (mp->mnt_flag & MNT_SNAPSHOT)
|
2004-12-07 08:15:41 +00:00
|
|
|
return (ffs_snapshot(mp, fspec));
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
1995-08-28 09:19:25 +00:00
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* Not an update, or updating the name: look up the name
|
2004-02-14 04:41:13 +00:00
|
|
|
* and verify that it refers to a sensible disk device.
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
2005-09-02 13:52:55 +00:00
|
|
|
NDINIT(&ndp, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE, fspec, td);
|
2004-07-30 22:08:52 +00:00
|
|
|
if ((error = namei(&ndp)) != 0)
|
2000-07-11 22:07:57 +00:00
|
|
|
return (error);
|
2004-07-30 22:08:52 +00:00
|
|
|
NDFREE(&ndp, NDF_ONLY_PNBUF);
|
|
|
|
devvp = ndp.ni_vp;
|
2000-07-11 22:07:57 +00:00
|
|
|
if (!vn_isdisk(devvp, &error)) {
|
2005-09-02 13:52:55 +00:00
|
|
|
vput(devvp);
|
2000-07-11 22:07:57 +00:00
|
|
|
return (error);
|
|
|
|
}
|
1998-02-25 04:47:04 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If mount by non-root, then verify that user has necessary
|
|
|
|
* permissions on the device.
|
|
|
|
*/
|
2008-10-28 13:44:11 +00:00
|
|
|
accmode = VREAD;
|
2006-11-06 13:42:10 +00:00
|
|
|
if ((mp->mnt_flag & MNT_RDONLY) == 0)
|
2008-10-28 13:44:11 +00:00
|
|
|
accmode |= VWRITE;
|
|
|
|
error = VOP_ACCESS(devvp, accmode, td->td_ucred, td);
|
2006-11-06 13:42:10 +00:00
|
|
|
if (error)
|
|
|
|
error = priv_check(td, PRIV_VFS_MOUNT_PERM);
|
|
|
|
if (error) {
|
|
|
|
vput(devvp);
|
|
|
|
return (error);
|
1998-02-25 04:47:04 +00:00
|
|
|
}
|
|
|
|
|
1995-08-28 09:19:25 +00:00
|
|
|
if (mp->mnt_flag & MNT_UPDATE) {
|
|
|
|
/*
|
2000-07-11 22:07:57 +00:00
|
|
|
* Update only
|
|
|
|
*
|
1998-04-19 23:32:49 +00:00
|
|
|
* If it's not the same vnode, or at least the same device
|
|
|
|
* then it's not correct.
|
1995-08-28 09:19:25 +00:00
|
|
|
*/
|
|
|
|
|
2004-10-29 10:15:56 +00:00
|
|
|
if (devvp->v_rdev != ump->um_devvp->v_rdev)
|
2000-07-11 22:07:57 +00:00
|
|
|
error = EINVAL; /* needs translation */
|
2005-09-02 13:52:55 +00:00
|
|
|
vput(devvp);
|
2000-07-11 22:07:57 +00:00
|
|
|
if (error)
|
|
|
|
return (error);
|
1995-08-28 09:19:25 +00:00
|
|
|
} else {
|
|
|
|
/*
|
2000-07-11 22:07:57 +00:00
|
|
|
* New mount
|
|
|
|
*
|
|
|
|
* We need the name for the mount point (also used for
|
|
|
|
* "last mounted on") copied in. If an error occurs,
|
|
|
|
* the mount point is discarded by the upper level code.
|
2001-03-01 21:00:17 +00:00
|
|
|
* Note that vfs_mount() populates f_mntonname for us.
|
1995-08-28 09:19:25 +00:00
|
|
|
*/
|
2002-12-27 10:06:37 +00:00
|
|
|
if ((error = ffs_mountfs(devvp, mp, td)) != 0) {
|
2000-07-11 22:07:57 +00:00
|
|
|
vrele(devvp);
|
|
|
|
return (error);
|
|
|
|
}
|
2011-07-15 16:20:33 +00:00
|
|
|
if (fsckpid > 0) {
|
2011-07-30 00:43:18 +00:00
|
|
|
KASSERT(MOUNTEDSOFTDEP(mp) == 0,
|
2011-07-15 16:20:33 +00:00
|
|
|
("soft updates enabled on read-only file system"));
|
|
|
|
ump = VFSTOUFS(mp);
|
|
|
|
fs = ump->um_fs;
|
|
|
|
DROP_GIANT();
|
|
|
|
g_topology_lock();
|
|
|
|
/*
|
|
|
|
* Request write access.
|
|
|
|
*/
|
|
|
|
error = g_access(ump->um_cp, 0, 1, 0);
|
|
|
|
g_topology_unlock();
|
|
|
|
PICKUP_GIANT();
|
|
|
|
if (error) {
|
2012-01-14 07:26:16 +00:00
|
|
|
printf("WARNING: %s: Checker activation "
|
|
|
|
"failed\n", fs->fs_fsmnt);
|
2011-07-15 16:20:33 +00:00
|
|
|
} else {
|
|
|
|
ump->um_fsckpid = fsckpid;
|
|
|
|
if (fs->fs_snapinum[0] != 0)
|
|
|
|
ffs_snapshot_mount(mp);
|
|
|
|
fs->fs_mtime = time_second;
|
|
|
|
fs->fs_clean = 0;
|
|
|
|
(void) ffs_sbupdate(ump, MNT_WAIT, 0);
|
|
|
|
}
|
|
|
|
}
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
2004-12-07 08:15:41 +00:00
|
|
|
vfs_mountedfrom(mp, fspec);
|
2000-07-11 22:07:57 +00:00
|
|
|
return (0);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
2004-12-07 08:15:41 +00:00
|
|
|
/*
|
|
|
|
* Compatibility with old mount system call.
|
|
|
|
*/
|
|
|
|
|
|
|
|
static int
|
2012-01-17 01:08:01 +00:00
|
|
|
ffs_cmount(struct mntarg *ma, void *data, uint64_t flags)
|
2004-12-07 08:15:41 +00:00
|
|
|
{
|
|
|
|
struct ufs_args args;
|
2010-10-10 07:05:47 +00:00
|
|
|
struct export_args exp;
|
2004-12-07 08:15:41 +00:00
|
|
|
int error;
|
|
|
|
|
|
|
|
if (data == NULL)
|
|
|
|
return (EINVAL);
|
|
|
|
error = copyin(data, &args, sizeof args);
|
|
|
|
if (error)
|
|
|
|
return (error);
|
2010-10-10 07:05:47 +00:00
|
|
|
vfs_oexport_conv(&args.export, &exp);
|
2004-12-07 08:15:41 +00:00
|
|
|
|
|
|
|
ma = mount_argsu(ma, "from", args.fspec, MAXPATHLEN);
|
2010-10-10 07:05:47 +00:00
|
|
|
ma = mount_arg(ma, "export", &exp, sizeof(exp));
|
2004-12-07 08:15:41 +00:00
|
|
|
error = kernel_mount(ma, flags);
|
|
|
|
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* Reload all incore data for a filesystem (used after running fsck on
|
|
|
|
* the root filesystem and finding things to fix). The filesystem must
|
|
|
|
* be mounted read-only.
|
|
|
|
*
|
|
|
|
* Things to do to update the mount:
|
|
|
|
* 1) invalidate all cached meta-data.
|
|
|
|
* 2) re-read superblock from disk.
|
|
|
|
* 3) re-read summary information from disk.
|
|
|
|
* 4) invalidate all inactive vnodes.
|
|
|
|
* 5) invalidate all cached file data.
|
|
|
|
* 6) re-read inode data for all active vnodes.
|
|
|
|
*/
|
2004-07-30 22:08:52 +00:00
|
|
|
static int
|
|
|
|
ffs_reload(struct mount *mp, struct thread *td)
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2006-01-09 20:42:19 +00:00
|
|
|
struct vnode *vp, *mvp, *devvp;
|
1994-05-24 10:09:53 +00:00
|
|
|
struct inode *ip;
|
2001-01-15 18:30:40 +00:00
|
|
|
void *space;
|
1994-05-24 10:09:53 +00:00
|
|
|
struct buf *bp;
|
1997-02-10 02:22:35 +00:00
|
|
|
struct fs *fs, *newfs;
|
2005-01-24 10:12:28 +00:00
|
|
|
struct ufsmount *ump;
|
2002-06-21 06:18:05 +00:00
|
|
|
ufs2_daddr_t sblockloc;
|
1994-05-24 10:09:53 +00:00
|
|
|
int i, blks, size, error;
|
1997-02-10 02:22:35 +00:00
|
|
|
int32_t *lp;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
1995-08-28 09:19:25 +00:00
|
|
|
if ((mp->mnt_flag & MNT_RDONLY) == 0)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (EINVAL);
|
2005-01-24 10:12:28 +00:00
|
|
|
ump = VFSTOUFS(mp);
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* Step 1: invalidate all cached meta-data.
|
|
|
|
*/
|
1995-08-28 09:19:25 +00:00
|
|
|
devvp = VFSTOUFS(mp)->um_devvp;
|
2008-01-10 01:10:58 +00:00
|
|
|
vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
|
2008-10-10 21:23:50 +00:00
|
|
|
if (vinvalbuf(devvp, 0, 0, 0) != 0)
|
1994-05-24 10:09:53 +00:00
|
|
|
panic("ffs_reload: dirty1");
|
2008-01-13 14:44:15 +00:00
|
|
|
VOP_UNLOCK(devvp, 0);
|
Make our v_usecount vnode reference count work identically to the
original BSD code. The association between the vnode and the vm_object
no longer includes reference counts. The major difference is that
vm_object's are no longer freed gratuitiously from the vnode, and so
once an object is created for the vnode, it will last as long as the
vnode does.
When a vnode object reference count is incremented, then the underlying
vnode reference count is incremented also. The two "objects" are now
more intimately related, and so the interactions are now much less
complex.
When vnodes are now normally placed onto the free queue with an object still
attached. The rundown of the object happens at vnode rundown time, and
happens with exactly the same filesystem semantics of the original VFS
code. There is absolutely no need for vnode_pager_uncache and other
travesties like that anymore.
A side-effect of these changes is that SMP locking should be much simpler,
the I/O copyin/copyout optimizations work, NFS should be more ponderable,
and further work on layered filesystems should be less frustrating, because
of the totally coherent management of the vnode objects and vnodes.
Please be careful with your system while running this code, but I would
greatly appreciate feedback as soon a reasonably possible.
1998-01-06 05:26:17 +00:00
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* Step 2: re-read superblock from disk.
|
|
|
|
*/
|
2002-06-21 06:18:05 +00:00
|
|
|
fs = VFSTOUFS(mp)->um_fs;
|
2002-11-27 02:18:58 +00:00
|
|
|
if ((error = bread(devvp, btodb(fs->fs_sblockloc), fs->fs_sbsize,
|
2002-06-21 06:18:05 +00:00
|
|
|
NOCRED, &bp)) != 0)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
1997-02-10 02:22:35 +00:00
|
|
|
newfs = (struct fs *)bp->b_data;
|
2002-06-21 06:18:05 +00:00
|
|
|
if ((newfs->fs_magic != FS_UFS1_MAGIC &&
|
|
|
|
newfs->fs_magic != FS_UFS2_MAGIC) ||
|
|
|
|
newfs->fs_bsize > MAXBSIZE ||
|
|
|
|
newfs->fs_bsize < sizeof(struct fs)) {
|
1997-02-10 02:22:35 +00:00
|
|
|
brelse(bp);
|
|
|
|
return (EIO); /* XXX needs translation */
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
1997-02-10 02:22:35 +00:00
|
|
|
/*
|
|
|
|
* Copy pointer fields back into superblock before copying in XXX
|
|
|
|
* new superblock. These should really be in the ufsmount. XXX
|
|
|
|
* Note that important parameters (eg fs_ncg) are unchanged.
|
|
|
|
*/
|
2001-01-15 18:30:40 +00:00
|
|
|
newfs->fs_csp = fs->fs_csp;
|
1997-02-10 02:22:35 +00:00
|
|
|
newfs->fs_maxcluster = fs->fs_maxcluster;
|
2001-04-24 00:37:16 +00:00
|
|
|
newfs->fs_contigdirs = fs->fs_contigdirs;
|
2001-12-16 18:54:09 +00:00
|
|
|
newfs->fs_active = fs->fs_active;
|
2003-12-07 05:16:52 +00:00
|
|
|
/* The file system is still read-only. */
|
|
|
|
newfs->fs_ronly = 1;
|
2002-06-21 06:18:05 +00:00
|
|
|
sblockloc = fs->fs_sblockloc;
|
1997-02-10 02:22:35 +00:00
|
|
|
bcopy(newfs, fs, (u_int)fs->fs_sbsize);
|
1994-05-24 10:09:53 +00:00
|
|
|
brelse(bp);
|
1997-02-10 02:22:35 +00:00
|
|
|
mp->mnt_maxsymlinklen = fs->fs_maxsymlinklen;
|
2002-06-21 06:18:05 +00:00
|
|
|
ffs_oldfscompat_read(fs, VFSTOUFS(mp), sblockloc);
|
2005-01-24 10:12:28 +00:00
|
|
|
UFS_LOCK(ump);
|
2001-05-08 07:42:20 +00:00
|
|
|
if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) {
|
2012-01-14 07:26:16 +00:00
|
|
|
printf("WARNING: %s: reload pending error: blocks %jd "
|
|
|
|
"files %d\n", fs->fs_fsmnt, (intmax_t)fs->fs_pendingblocks,
|
2002-06-21 06:18:05 +00:00
|
|
|
fs->fs_pendinginodes);
|
2001-05-08 07:42:20 +00:00
|
|
|
fs->fs_pendingblocks = 0;
|
|
|
|
fs->fs_pendinginodes = 0;
|
|
|
|
}
|
2005-01-24 10:12:28 +00:00
|
|
|
UFS_UNLOCK(ump);
|
1997-02-10 02:22:35 +00:00
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* Step 3: re-read summary information from disk.
|
|
|
|
*/
|
2012-04-08 13:44:55 +00:00
|
|
|
size = fs->fs_cssize;
|
|
|
|
blks = howmany(size, fs->fs_fsize);
|
|
|
|
if (fs->fs_contigsumsize > 0)
|
|
|
|
size += fs->fs_ncg * sizeof(int32_t);
|
|
|
|
size += fs->fs_ncg * sizeof(u_int8_t);
|
|
|
|
free(fs->fs_csp, M_UFSMNT);
|
|
|
|
space = malloc((u_long)size, M_UFSMNT, M_WAITOK);
|
|
|
|
fs->fs_csp = space;
|
1994-05-24 10:09:53 +00:00
|
|
|
for (i = 0; i < blks; i += fs->fs_frag) {
|
|
|
|
size = fs->fs_bsize;
|
|
|
|
if (i + fs->fs_frag > blks)
|
|
|
|
size = (blks - i) * fs->fs_fsize;
|
1994-10-08 06:20:06 +00:00
|
|
|
error = bread(devvp, fsbtodb(fs, fs->fs_csaddr + i), size,
|
|
|
|
NOCRED, &bp);
|
|
|
|
if (error)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
2001-01-15 18:30:40 +00:00
|
|
|
bcopy(bp->b_data, space, (u_int)size);
|
|
|
|
space = (char *)space + size;
|
1994-05-24 10:09:53 +00:00
|
|
|
brelse(bp);
|
|
|
|
}
|
1997-02-10 02:22:35 +00:00
|
|
|
/*
|
|
|
|
* We no longer know anything about clusters per cylinder group.
|
|
|
|
*/
|
|
|
|
if (fs->fs_contigsumsize > 0) {
|
2012-04-21 10:45:46 +00:00
|
|
|
fs->fs_maxcluster = lp = space;
|
1997-02-10 02:22:35 +00:00
|
|
|
for (i = 0; i < fs->fs_ncg; i++)
|
|
|
|
*lp++ = fs->fs_contigsumsize;
|
2012-04-21 10:45:46 +00:00
|
|
|
space = lp;
|
1997-02-10 02:22:35 +00:00
|
|
|
}
|
2012-04-21 10:45:46 +00:00
|
|
|
size = fs->fs_ncg * sizeof(u_int8_t);
|
|
|
|
fs->fs_contigdirs = (u_int8_t *)space;
|
|
|
|
bzero(fs->fs_contigdirs, size);
|
1997-02-10 02:22:35 +00:00
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
loop:
|
2012-04-17 16:28:22 +00:00
|
|
|
MNT_VNODE_FOREACH_ALL(vp, mp, mvp) {
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
2005-03-13 12:03:14 +00:00
|
|
|
* Step 4: invalidate all cached file data.
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
2001-09-12 08:38:13 +00:00
|
|
|
if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, td)) {
|
2012-04-17 16:28:22 +00:00
|
|
|
MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp);
|
1994-05-24 10:09:53 +00:00
|
|
|
goto loop;
|
1997-02-10 02:22:35 +00:00
|
|
|
}
|
2008-10-10 21:23:50 +00:00
|
|
|
if (vinvalbuf(vp, 0, 0, 0))
|
1994-05-24 10:09:53 +00:00
|
|
|
panic("ffs_reload: dirty2");
|
|
|
|
/*
|
2005-03-13 12:03:14 +00:00
|
|
|
* Step 5: re-read inode data for all active vnodes.
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
|
|
|
ip = VTOI(vp);
|
1994-10-08 06:20:06 +00:00
|
|
|
error =
|
1994-05-24 10:09:53 +00:00
|
|
|
bread(devvp, fsbtodb(fs, ino_to_fsba(fs, ip->i_number)),
|
1994-10-08 06:20:06 +00:00
|
|
|
(int)fs->fs_bsize, NOCRED, &bp);
|
|
|
|
if (error) {
|
2008-01-13 14:44:15 +00:00
|
|
|
VOP_UNLOCK(vp, 0);
|
2003-11-03 04:46:19 +00:00
|
|
|
vrele(vp);
|
2012-04-17 16:28:22 +00:00
|
|
|
MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp);
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
}
|
2002-12-27 10:23:03 +00:00
|
|
|
ffs_load_inode(bp, ip, fs, ip->i_number);
|
1998-03-08 09:59:44 +00:00
|
|
|
ip->i_effnlink = ip->i_nlink;
|
1994-05-24 10:09:53 +00:00
|
|
|
brelse(bp);
|
2008-01-13 14:44:15 +00:00
|
|
|
VOP_UNLOCK(vp, 0);
|
2003-11-02 04:52:53 +00:00
|
|
|
vrele(vp);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
2002-06-21 06:18:05 +00:00
|
|
|
/*
|
|
|
|
* Possible superblock locations ordered from most to least likely.
|
|
|
|
*/
|
|
|
|
static int sblock_try[] = SBLOCKSEARCH;
|
2001-04-17 05:37:51 +00:00
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* Common code for mount and mountroot
|
|
|
|
*/
|
2002-12-27 10:06:37 +00:00
|
|
|
static int
|
|
|
|
ffs_mountfs(devvp, mp, td)
|
2002-05-13 09:22:31 +00:00
|
|
|
struct vnode *devvp;
|
1994-05-24 10:09:53 +00:00
|
|
|
struct mount *mp;
|
2001-09-12 08:38:13 +00:00
|
|
|
struct thread *td;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2002-05-13 09:22:31 +00:00
|
|
|
struct ufsmount *ump;
|
1994-05-24 10:09:53 +00:00
|
|
|
struct buf *bp;
|
2002-05-13 09:22:31 +00:00
|
|
|
struct fs *fs;
|
2004-06-16 09:47:26 +00:00
|
|
|
struct cdev *dev;
|
2001-01-15 18:30:40 +00:00
|
|
|
void *space;
|
2002-06-21 06:18:05 +00:00
|
|
|
ufs2_daddr_t sblockloc;
|
1998-10-25 17:44:59 +00:00
|
|
|
int error, i, blks, size, ronly;
|
1997-02-10 02:22:35 +00:00
|
|
|
int32_t *lp;
|
|
|
|
struct ucred *cred;
|
2004-10-29 10:15:56 +00:00
|
|
|
struct g_consumer *cp;
|
2006-03-31 03:54:20 +00:00
|
|
|
struct mount *nmp;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2009-02-11 22:19:54 +00:00
|
|
|
bp = NULL;
|
|
|
|
ump = NULL;
|
2002-02-27 18:32:23 +00:00
|
|
|
cred = td ? td->td_ucred : NOCRED;
|
2004-10-29 10:15:56 +00:00
|
|
|
ronly = (mp->mnt_flag & MNT_RDONLY) != 0;
|
2009-02-11 22:19:54 +00:00
|
|
|
|
|
|
|
dev = devvp->v_rdev;
|
|
|
|
dev_ref(dev);
|
2004-10-29 10:15:56 +00:00
|
|
|
DROP_GIANT();
|
|
|
|
g_topology_lock();
|
|
|
|
error = g_vfs_open(devvp, &cp, "ffs", ronly ? 0 : 1);
|
|
|
|
g_topology_unlock();
|
|
|
|
PICKUP_GIANT();
|
2008-01-13 14:44:15 +00:00
|
|
|
VOP_UNLOCK(devvp, 0);
|
1994-10-08 06:20:06 +00:00
|
|
|
if (error)
|
2009-02-11 22:19:54 +00:00
|
|
|
goto out;
|
2002-03-30 15:12:57 +00:00
|
|
|
if (devvp->v_rdev->si_iosize_max != 0)
|
1999-09-29 20:05:33 +00:00
|
|
|
mp->mnt_iosize_max = devvp->v_rdev->si_iosize_max;
|
|
|
|
if (mp->mnt_iosize_max > MAXPHYS)
|
|
|
|
mp->mnt_iosize_max = MAXPHYS;
|
Make our v_usecount vnode reference count work identically to the
original BSD code. The association between the vnode and the vm_object
no longer includes reference counts. The major difference is that
vm_object's are no longer freed gratuitiously from the vnode, and so
once an object is created for the vnode, it will last as long as the
vnode does.
When a vnode object reference count is incremented, then the underlying
vnode reference count is incremented also. The two "objects" are now
more intimately related, and so the interactions are now much less
complex.
When vnodes are now normally placed onto the free queue with an object still
attached. The rundown of the object happens at vnode rundown time, and
happens with exactly the same filesystem semantics of the original VFS
code. There is absolutely no need for vnode_pager_uncache and other
travesties like that anymore.
A side-effect of these changes is that SMP locking should be much simpler,
the I/O copyin/copyout optimizations work, NFS should be more ponderable,
and further work on layered filesystems should be less frustrating, because
of the totally coherent management of the vnode objects and vnodes.
Please be careful with your system while running this code, but I would
greatly appreciate feedback as soon a reasonably possible.
1998-01-06 05:26:17 +00:00
|
|
|
|
2004-10-26 10:44:10 +00:00
|
|
|
devvp->v_bufobj.bo_ops = &ffs_ops;
|
|
|
|
|
2002-06-21 06:18:05 +00:00
|
|
|
fs = NULL;
|
|
|
|
sblockloc = 0;
|
|
|
|
/*
|
|
|
|
* Try reading the superblock in each of its possible locations.
|
|
|
|
*/
|
|
|
|
for (i = 0; sblock_try[i] != -1; i++) {
|
2006-06-03 21:20:37 +00:00
|
|
|
if ((SBLOCKSIZE % cp->provider->sectorsize) != 0) {
|
|
|
|
error = EINVAL;
|
|
|
|
vfs_mount_error(mp,
|
|
|
|
"Invalid sectorsize %d for superblock size %d",
|
|
|
|
cp->provider->sectorsize, SBLOCKSIZE);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
if ((error = bread(devvp, btodb(sblock_try[i]), SBLOCKSIZE,
|
2002-06-21 06:18:05 +00:00
|
|
|
cred, &bp)) != 0)
|
|
|
|
goto out;
|
|
|
|
fs = (struct fs *)bp->b_data;
|
2002-11-27 02:18:58 +00:00
|
|
|
sblockloc = sblock_try[i];
|
2002-06-21 06:18:05 +00:00
|
|
|
if ((fs->fs_magic == FS_UFS1_MAGIC ||
|
|
|
|
(fs->fs_magic == FS_UFS2_MAGIC &&
|
2002-11-27 02:18:58 +00:00
|
|
|
(fs->fs_sblockloc == sblockloc ||
|
|
|
|
(fs->fs_old_flags & FS_FLAGS_UPDATED) == 0))) &&
|
2002-06-21 06:18:05 +00:00
|
|
|
fs->fs_bsize <= MAXBSIZE &&
|
|
|
|
fs->fs_bsize >= sizeof(struct fs))
|
|
|
|
break;
|
|
|
|
brelse(bp);
|
|
|
|
bp = NULL;
|
|
|
|
}
|
|
|
|
if (sblock_try[i] == -1) {
|
1994-05-24 10:09:53 +00:00
|
|
|
error = EINVAL; /* XXX needs translation */
|
|
|
|
goto out;
|
|
|
|
}
|
1996-11-13 01:45:56 +00:00
|
|
|
fs->fs_fmod = 0;
|
2002-06-21 06:18:05 +00:00
|
|
|
fs->fs_flags &= ~FS_INDEXDIRS; /* no support for directory indicies */
|
1998-09-26 04:59:42 +00:00
|
|
|
fs->fs_flags &= ~FS_UNCLEAN;
|
|
|
|
if (fs->fs_clean == 0) {
|
|
|
|
fs->fs_flags |= FS_UNCLEAN;
|
2001-03-21 04:09:01 +00:00
|
|
|
if (ronly || (mp->mnt_flag & MNT_FORCE) ||
|
2010-04-24 07:05:35 +00:00
|
|
|
((fs->fs_flags & (FS_SUJ | FS_NEEDSFSCK)) == 0 &&
|
2001-04-14 05:26:28 +00:00
|
|
|
(fs->fs_flags & FS_DOSOFTDEP))) {
|
2010-09-17 09:14:40 +00:00
|
|
|
printf("WARNING: %s was not properly dismounted\n",
|
1998-09-26 04:59:42 +00:00
|
|
|
fs->fs_fsmnt);
|
1995-05-15 08:39:37 +00:00
|
|
|
} else {
|
2012-01-14 07:26:16 +00:00
|
|
|
vfs_mount_error(mp, "R/W mount of %s denied. %s%s",
|
|
|
|
fs->fs_fsmnt, "Filesystem is not clean - run fsck.",
|
|
|
|
(fs->fs_flags & FS_SUJ) == 0 ? "" :
|
|
|
|
" Forced mount will invalidate journal contents");
|
1995-05-15 08:39:37 +00:00
|
|
|
error = EPERM;
|
|
|
|
goto out;
|
|
|
|
}
|
2002-06-21 06:18:05 +00:00
|
|
|
if ((fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) &&
|
|
|
|
(mp->mnt_flag & MNT_FORCE)) {
|
2012-01-14 07:26:16 +00:00
|
|
|
printf("WARNING: %s: lost blocks %jd files %d\n",
|
|
|
|
fs->fs_fsmnt, (intmax_t)fs->fs_pendingblocks,
|
2002-06-21 06:18:05 +00:00
|
|
|
fs->fs_pendinginodes);
|
2001-05-08 07:42:20 +00:00
|
|
|
fs->fs_pendingblocks = 0;
|
|
|
|
fs->fs_pendinginodes = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) {
|
2012-01-14 07:26:16 +00:00
|
|
|
printf("WARNING: %s: mount pending error: blocks %jd "
|
|
|
|
"files %d\n", fs->fs_fsmnt, (intmax_t)fs->fs_pendingblocks,
|
2002-06-21 06:18:05 +00:00
|
|
|
fs->fs_pendinginodes);
|
2001-05-08 07:42:20 +00:00
|
|
|
fs->fs_pendingblocks = 0;
|
|
|
|
fs->fs_pendinginodes = 0;
|
1995-05-15 08:39:37 +00:00
|
|
|
}
|
2006-10-31 21:48:54 +00:00
|
|
|
if ((fs->fs_flags & FS_GJOURNAL) != 0) {
|
|
|
|
#ifdef UFS_GJOURNAL
|
|
|
|
/*
|
|
|
|
* Get journal provider name.
|
|
|
|
*/
|
|
|
|
size = 1024;
|
|
|
|
mp->mnt_gjprovider = malloc(size, M_UFSMNT, M_WAITOK);
|
|
|
|
if (g_io_getattr("GJOURNAL::provider", cp, &size,
|
|
|
|
mp->mnt_gjprovider) == 0) {
|
|
|
|
mp->mnt_gjprovider = realloc(mp->mnt_gjprovider, size,
|
|
|
|
M_UFSMNT, M_WAITOK);
|
|
|
|
MNT_ILOCK(mp);
|
|
|
|
mp->mnt_flag |= MNT_GJOURNAL;
|
|
|
|
MNT_IUNLOCK(mp);
|
|
|
|
} else {
|
2012-01-14 07:26:16 +00:00
|
|
|
printf("WARNING: %s: GJOURNAL flag on fs "
|
|
|
|
"but no gjournal provider below\n",
|
2006-10-31 21:48:54 +00:00
|
|
|
mp->mnt_stat.f_mntonname);
|
|
|
|
free(mp->mnt_gjprovider, M_UFSMNT);
|
|
|
|
mp->mnt_gjprovider = NULL;
|
|
|
|
}
|
|
|
|
#else
|
2012-01-14 07:26:16 +00:00
|
|
|
printf("WARNING: %s: GJOURNAL flag on fs but no "
|
|
|
|
"UFS_GJOURNAL support\n", mp->mnt_stat.f_mntonname);
|
2006-10-31 21:48:54 +00:00
|
|
|
#endif
|
|
|
|
} else {
|
|
|
|
mp->mnt_gjprovider = NULL;
|
|
|
|
}
|
2003-02-19 05:47:46 +00:00
|
|
|
ump = malloc(sizeof *ump, M_UFSMNT, M_WAITOK | M_ZERO);
|
2004-10-29 10:15:56 +00:00
|
|
|
ump->um_cp = cp;
|
|
|
|
ump->um_bo = &devvp->v_bufobj;
|
2004-10-26 20:13:21 +00:00
|
|
|
ump->um_fs = malloc((u_long)fs->fs_sbsize, M_UFSMNT, M_WAITOK);
|
2002-06-21 06:18:05 +00:00
|
|
|
if (fs->fs_magic == FS_UFS1_MAGIC) {
|
|
|
|
ump->um_fstype = UFS1;
|
|
|
|
ump->um_balloc = ffs_balloc_ufs1;
|
|
|
|
} else {
|
|
|
|
ump->um_fstype = UFS2;
|
|
|
|
ump->um_balloc = ffs_balloc_ufs2;
|
|
|
|
}
|
VFS mega cleanup commit (x/N)
1. Add new file "sys/kern/vfs_default.c" where default actions for
VOPs go. Implement proper defaults for ABORTOP, BWRITE, LEASE,
POLL, REVOKE and STRATEGY. Various stuff spread over the entire
tree belongs here.
2. Change VOP_BLKATOFF to a normal function in cd9660.
3. Kill VOP_BLKATOFF, VOP_TRUNCATE, VOP_VFREE, VOP_VALLOC. These
are private interface functions between UFS and the underlying
storage manager layer (FFS/LFS/MFS/EXT2FS). The functions now
live in struct ufsmount instead.
4. Remove a kludge of VOP_ functions in all filesystems, that did
nothing but obscure the simplicity and break the expandability.
If a filesystem doesn't implement VOP_FOO, it shouldn't have an
entry for it in its vnops table. The system will try to DTRT
if it is not implemented. There are still some cruft left, but
the bulk of it is done.
5. Fix another VCALL in vfs_cache.c (thanks Bruce!)
1997-10-16 10:50:27 +00:00
|
|
|
ump->um_blkatoff = ffs_blkatoff;
|
|
|
|
ump->um_truncate = ffs_truncate;
|
1997-10-16 20:32:40 +00:00
|
|
|
ump->um_update = ffs_update;
|
VFS mega cleanup commit (x/N)
1. Add new file "sys/kern/vfs_default.c" where default actions for
VOPs go. Implement proper defaults for ABORTOP, BWRITE, LEASE,
POLL, REVOKE and STRATEGY. Various stuff spread over the entire
tree belongs here.
2. Change VOP_BLKATOFF to a normal function in cd9660.
3. Kill VOP_BLKATOFF, VOP_TRUNCATE, VOP_VFREE, VOP_VALLOC. These
are private interface functions between UFS and the underlying
storage manager layer (FFS/LFS/MFS/EXT2FS). The functions now
live in struct ufsmount instead.
4. Remove a kludge of VOP_ functions in all filesystems, that did
nothing but obscure the simplicity and break the expandability.
If a filesystem doesn't implement VOP_FOO, it shouldn't have an
entry for it in its vnops table. The system will try to DTRT
if it is not implemented. There are still some cruft left, but
the bulk of it is done.
5. Fix another VCALL in vfs_cache.c (thanks Bruce!)
1997-10-16 10:50:27 +00:00
|
|
|
ump->um_valloc = ffs_valloc;
|
|
|
|
ump->um_vfree = ffs_vfree;
|
2002-12-27 10:06:37 +00:00
|
|
|
ump->um_ifree = ffs_ifree;
|
2008-09-16 10:59:35 +00:00
|
|
|
ump->um_rdonly = ffs_rdonly;
|
2011-03-20 21:05:09 +00:00
|
|
|
ump->um_snapgone = ffs_snapgone;
|
2005-01-24 10:12:28 +00:00
|
|
|
mtx_init(UFS_MTX(ump), "FFS", "FFS Lock", MTX_DEF);
|
1994-05-24 10:09:53 +00:00
|
|
|
bcopy(bp->b_data, ump->um_fs, (u_int)fs->fs_sbsize);
|
2002-06-21 06:18:05 +00:00
|
|
|
if (fs->fs_sbsize < SBLOCKSIZE)
|
2000-07-11 22:07:57 +00:00
|
|
|
bp->b_flags |= B_INVAL | B_NOCACHE;
|
1994-05-24 10:09:53 +00:00
|
|
|
brelse(bp);
|
|
|
|
bp = NULL;
|
|
|
|
fs = ump->um_fs;
|
2002-06-21 06:18:05 +00:00
|
|
|
ffs_oldfscompat_read(fs, ump, sblockloc);
|
1994-05-24 10:09:53 +00:00
|
|
|
fs->fs_ronly = ronly;
|
1997-02-10 02:22:35 +00:00
|
|
|
size = fs->fs_cssize;
|
|
|
|
blks = howmany(size, fs->fs_fsize);
|
|
|
|
if (fs->fs_contigsumsize > 0)
|
|
|
|
size += fs->fs_ncg * sizeof(int32_t);
|
Directory layout preference improvements from Grigoriy Orlov <gluk@ptci.ru>.
His description of the problem and solution follow. My own tests show
speedups on typical filesystem intensive workloads of 5% to 12% which
is very impressive considering the small amount of code change involved.
------
One day I noticed that some file operations run much faster on
small file systems then on big ones. I've looked at the ffs
algorithms, thought about them, and redesigned the dirpref algorithm.
First I want to describe the results of my tests. These results are old
and I have improved the algorithm after these tests were done. Nevertheless
they show how big the perfomance speedup may be. I have done two file/directory
intensive tests on a two OpenBSD systems with old and new dirpref algorithm.
The first test is "tar -xzf ports.tar.gz", the second is "rm -rf ports".
The ports.tar.gz file is the ports collection from the OpenBSD 2.8 release.
It contains 6596 directories and 13868 files. The test systems are:
1. Celeron-450, 128Mb, two IDE drives, the system at wd0, file system for
test is at wd1. Size of test file system is 8 Gb, number of cg=991,
size of cg is 8m, block size = 8k, fragment size = 1k OpenBSD-current
from Dec 2000 with BUFCACHEPERCENT=35
2. PIII-600, 128Mb, two IBM DTLA-307045 IDE drives at i815e, the system
at wd0, file system for test is at wd1. Size of test file system is 40 Gb,
number of cg=5324, size of cg is 8m, block size = 8k, fragment size = 1k
OpenBSD-current from Dec 2000 with BUFCACHEPERCENT=50
You can get more info about the test systems and methods at:
http://www.ptci.ru/gluk/dirpref/old/dirpref.html
Test Results
tar -xzf ports.tar.gz rm -rf ports
mode old dirpref new dirpref speedup old dirprefnew dirpref speedup
First system
normal 667 472 1.41 477 331 1.44
async 285 144 1.98 130 14 9.29
sync 768 616 1.25 477 334 1.43
softdep 413 252 1.64 241 38 6.34
Second system
normal 329 81 4.06 263.5 93.5 2.81
async 302 25.7 11.75 112 2.26 49.56
sync 281 57.0 4.93 263 90.5 2.9
softdep 341 40.6 8.4 284 4.76 59.66
"old dirpref" and "new dirpref" columns give a test time in seconds.
speedup - speed increasement in times, ie. old dirpref / new dirpref.
------
Algorithm description
The old dirpref algorithm is described in comments:
/*
* Find a cylinder to place a directory.
*
* The policy implemented by this algorithm is to select from
* among those cylinder groups with above the average number of
* free inodes, the one with the smallest number of directories.
*/
A new directory is allocated in a different cylinder groups than its
parent directory resulting in a directory tree that is spreaded across
all the cylinder groups. This spreading out results in a non-optimal
access to the directories and files. When we have a small filesystem
it is not a problem but when the filesystem is big then perfomance
degradation becomes very apparent.
What I mean by a big file system ?
1. A big filesystem is a filesystem which occupy 20-30 or more percent
of total drive space, i.e. first and last cylinder are physically
located relatively far from each other.
2. It has a relatively large number of cylinder groups, for example
more cylinder groups than 50% of the buffers in the buffer cache.
The first results in long access times, while the second results in
many buffers being used by metadata operations. Such operations use
cylinder group blocks and on-disk inode blocks. The cylinder group
block (fs->fs_cblkno) contains struct cg, inode and block bit maps.
It is 2k in size for the default filesystem parameters. If new and
parent directories are located in different cylinder groups then the
system performs more input/output operations and uses more buffers.
On filesystems with many cylinder groups, lots of cache buffers are
used for metadata operations.
My solution for this problem is very simple. I allocate many directories
in one cylinder group. I also do some things, so that the new allocation
method does not cause excessive fragmentation and all directory inodes
will not be located at a location far from its file's inodes and data.
The algorithm is:
/*
* Find a cylinder group to place a directory.
*
* The policy implemented by this algorithm is to allocate a
* directory inode in the same cylinder group as its parent
* directory, but also to reserve space for its files inodes
* and data. Restrict the number of directories which may be
* allocated one after another in the same cylinder group
* without intervening allocation of files.
*
* If we allocate a first level directory then force allocation
* in another cylinder group.
*/
My early versions of dirpref give me a good results for a wide range of
file operations and different filesystem capacities except one case:
those applications that create their entire directory structure first
and only later fill this structure with files.
My solution for such and similar cases is to limit a number of
directories which may be created one after another in the same cylinder
group without intervening file creations. For this purpose, I allocate
an array of counters at mount time. This array is linked to the superblock
fs->fs_contigdirs[cg]. Each time a directory is created the counter
increases and each time a file is created the counter decreases. A 60Gb
filesystem with 8mb/cg requires 10kb of memory for the counters array.
The maxcontigdirs is a maximum number of directories which may be created
without an intervening file creation. I found in my tests that the best
performance occurs when I restrict the number of directories in one cylinder
group such that all its files may be located in the same cylinder group.
There may be some deterioration in performance if all the file inodes
are in the same cylinder group as its containing directory, but their
data partially resides in a different cylinder group. The maxcontigdirs
value is calculated to try to prevent this condition. Since there is
no way to know how many files and directories will be allocated later
I added two optimization parameters in superblock/tunefs. They are:
int32_t fs_avgfilesize; /* expected average file size */
int32_t fs_avgfpdir; /* expected # of files per directory */
These parameters have reasonable defaults but may be tweeked for special
uses of a filesystem. They are only necessary in rare cases like better
tuning a filesystem being used to store a squid cache.
I have been using this algorithm for about 3 months. I have done
a lot of testing on filesystems with different capacities, average
filesize, average number of files per directory, and so on. I think
this algorithm has no negative impact on filesystem perfomance. It
works better than the default one in all cases. The new dirpref
will greatly improve untarring/removing/coping of big directories,
decrease load on cvs servers and much more. The new dirpref doesn't
speedup a compilation process, but also doesn't slow it down.
Obtained from: Grigoriy Orlov <gluk@ptci.ru>
2001-04-10 08:38:59 +00:00
|
|
|
size += fs->fs_ncg * sizeof(u_int8_t);
|
2003-02-19 05:47:46 +00:00
|
|
|
space = malloc((u_long)size, M_UFSMNT, M_WAITOK);
|
2001-01-15 18:30:40 +00:00
|
|
|
fs->fs_csp = space;
|
1994-05-24 10:09:53 +00:00
|
|
|
for (i = 0; i < blks; i += fs->fs_frag) {
|
|
|
|
size = fs->fs_bsize;
|
|
|
|
if (i + fs->fs_frag > blks)
|
|
|
|
size = (blks - i) * fs->fs_fsize;
|
1999-01-28 00:57:57 +00:00
|
|
|
if ((error = bread(devvp, fsbtodb(fs, fs->fs_csaddr + i), size,
|
|
|
|
cred, &bp)) != 0) {
|
2001-01-15 18:30:40 +00:00
|
|
|
free(fs->fs_csp, M_UFSMNT);
|
1994-05-24 10:09:53 +00:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
bcopy(bp->b_data, space, (u_int)size);
|
2001-01-15 18:30:40 +00:00
|
|
|
space = (char *)space + size;
|
1994-05-24 10:09:53 +00:00
|
|
|
brelse(bp);
|
|
|
|
bp = NULL;
|
|
|
|
}
|
1997-02-10 02:22:35 +00:00
|
|
|
if (fs->fs_contigsumsize > 0) {
|
2001-01-15 18:30:40 +00:00
|
|
|
fs->fs_maxcluster = lp = space;
|
1997-02-10 02:22:35 +00:00
|
|
|
for (i = 0; i < fs->fs_ncg; i++)
|
|
|
|
*lp++ = fs->fs_contigsumsize;
|
2001-09-09 23:48:28 +00:00
|
|
|
space = lp;
|
1997-02-10 02:22:35 +00:00
|
|
|
}
|
Directory layout preference improvements from Grigoriy Orlov <gluk@ptci.ru>.
His description of the problem and solution follow. My own tests show
speedups on typical filesystem intensive workloads of 5% to 12% which
is very impressive considering the small amount of code change involved.
------
One day I noticed that some file operations run much faster on
small file systems then on big ones. I've looked at the ffs
algorithms, thought about them, and redesigned the dirpref algorithm.
First I want to describe the results of my tests. These results are old
and I have improved the algorithm after these tests were done. Nevertheless
they show how big the perfomance speedup may be. I have done two file/directory
intensive tests on a two OpenBSD systems with old and new dirpref algorithm.
The first test is "tar -xzf ports.tar.gz", the second is "rm -rf ports".
The ports.tar.gz file is the ports collection from the OpenBSD 2.8 release.
It contains 6596 directories and 13868 files. The test systems are:
1. Celeron-450, 128Mb, two IDE drives, the system at wd0, file system for
test is at wd1. Size of test file system is 8 Gb, number of cg=991,
size of cg is 8m, block size = 8k, fragment size = 1k OpenBSD-current
from Dec 2000 with BUFCACHEPERCENT=35
2. PIII-600, 128Mb, two IBM DTLA-307045 IDE drives at i815e, the system
at wd0, file system for test is at wd1. Size of test file system is 40 Gb,
number of cg=5324, size of cg is 8m, block size = 8k, fragment size = 1k
OpenBSD-current from Dec 2000 with BUFCACHEPERCENT=50
You can get more info about the test systems and methods at:
http://www.ptci.ru/gluk/dirpref/old/dirpref.html
Test Results
tar -xzf ports.tar.gz rm -rf ports
mode old dirpref new dirpref speedup old dirprefnew dirpref speedup
First system
normal 667 472 1.41 477 331 1.44
async 285 144 1.98 130 14 9.29
sync 768 616 1.25 477 334 1.43
softdep 413 252 1.64 241 38 6.34
Second system
normal 329 81 4.06 263.5 93.5 2.81
async 302 25.7 11.75 112 2.26 49.56
sync 281 57.0 4.93 263 90.5 2.9
softdep 341 40.6 8.4 284 4.76 59.66
"old dirpref" and "new dirpref" columns give a test time in seconds.
speedup - speed increasement in times, ie. old dirpref / new dirpref.
------
Algorithm description
The old dirpref algorithm is described in comments:
/*
* Find a cylinder to place a directory.
*
* The policy implemented by this algorithm is to select from
* among those cylinder groups with above the average number of
* free inodes, the one with the smallest number of directories.
*/
A new directory is allocated in a different cylinder groups than its
parent directory resulting in a directory tree that is spreaded across
all the cylinder groups. This spreading out results in a non-optimal
access to the directories and files. When we have a small filesystem
it is not a problem but when the filesystem is big then perfomance
degradation becomes very apparent.
What I mean by a big file system ?
1. A big filesystem is a filesystem which occupy 20-30 or more percent
of total drive space, i.e. first and last cylinder are physically
located relatively far from each other.
2. It has a relatively large number of cylinder groups, for example
more cylinder groups than 50% of the buffers in the buffer cache.
The first results in long access times, while the second results in
many buffers being used by metadata operations. Such operations use
cylinder group blocks and on-disk inode blocks. The cylinder group
block (fs->fs_cblkno) contains struct cg, inode and block bit maps.
It is 2k in size for the default filesystem parameters. If new and
parent directories are located in different cylinder groups then the
system performs more input/output operations and uses more buffers.
On filesystems with many cylinder groups, lots of cache buffers are
used for metadata operations.
My solution for this problem is very simple. I allocate many directories
in one cylinder group. I also do some things, so that the new allocation
method does not cause excessive fragmentation and all directory inodes
will not be located at a location far from its file's inodes and data.
The algorithm is:
/*
* Find a cylinder group to place a directory.
*
* The policy implemented by this algorithm is to allocate a
* directory inode in the same cylinder group as its parent
* directory, but also to reserve space for its files inodes
* and data. Restrict the number of directories which may be
* allocated one after another in the same cylinder group
* without intervening allocation of files.
*
* If we allocate a first level directory then force allocation
* in another cylinder group.
*/
My early versions of dirpref give me a good results for a wide range of
file operations and different filesystem capacities except one case:
those applications that create their entire directory structure first
and only later fill this structure with files.
My solution for such and similar cases is to limit a number of
directories which may be created one after another in the same cylinder
group without intervening file creations. For this purpose, I allocate
an array of counters at mount time. This array is linked to the superblock
fs->fs_contigdirs[cg]. Each time a directory is created the counter
increases and each time a file is created the counter decreases. A 60Gb
filesystem with 8mb/cg requires 10kb of memory for the counters array.
The maxcontigdirs is a maximum number of directories which may be created
without an intervening file creation. I found in my tests that the best
performance occurs when I restrict the number of directories in one cylinder
group such that all its files may be located in the same cylinder group.
There may be some deterioration in performance if all the file inodes
are in the same cylinder group as its containing directory, but their
data partially resides in a different cylinder group. The maxcontigdirs
value is calculated to try to prevent this condition. Since there is
no way to know how many files and directories will be allocated later
I added two optimization parameters in superblock/tunefs. They are:
int32_t fs_avgfilesize; /* expected average file size */
int32_t fs_avgfpdir; /* expected # of files per directory */
These parameters have reasonable defaults but may be tweeked for special
uses of a filesystem. They are only necessary in rare cases like better
tuning a filesystem being used to store a squid cache.
I have been using this algorithm for about 3 months. I have done
a lot of testing on filesystems with different capacities, average
filesize, average number of files per directory, and so on. I think
this algorithm has no negative impact on filesystem perfomance. It
works better than the default one in all cases. The new dirpref
will greatly improve untarring/removing/coping of big directories,
decrease load on cvs servers and much more. The new dirpref doesn't
speedup a compilation process, but also doesn't slow it down.
Obtained from: Grigoriy Orlov <gluk@ptci.ru>
2001-04-10 08:38:59 +00:00
|
|
|
size = fs->fs_ncg * sizeof(u_int8_t);
|
|
|
|
fs->fs_contigdirs = (u_int8_t *)space;
|
|
|
|
bzero(fs->fs_contigdirs, size);
|
2001-12-16 18:54:09 +00:00
|
|
|
fs->fs_active = NULL;
|
2007-10-16 10:54:55 +00:00
|
|
|
mp->mnt_data = ump;
|
1999-07-11 19:16:50 +00:00
|
|
|
mp->mnt_stat.f_fsid.val[0] = fs->fs_id[0];
|
|
|
|
mp->mnt_stat.f_fsid.val[1] = fs->fs_id[1];
|
2006-03-31 03:54:20 +00:00
|
|
|
nmp = NULL;
|
2010-09-17 09:14:40 +00:00
|
|
|
if (fs->fs_id[0] == 0 || fs->fs_id[1] == 0 ||
|
2006-03-31 03:54:20 +00:00
|
|
|
(nmp = vfs_getvfs(&mp->mnt_stat.f_fsid))) {
|
|
|
|
if (nmp)
|
|
|
|
vfs_rel(nmp);
|
1999-07-11 19:16:50 +00:00
|
|
|
vfs_getnewfsid(mp);
|
2006-03-31 03:54:20 +00:00
|
|
|
}
|
1994-05-24 10:09:53 +00:00
|
|
|
mp->mnt_maxsymlinklen = fs->fs_maxsymlinklen;
|
2006-09-26 04:12:49 +00:00
|
|
|
MNT_ILOCK(mp);
|
1997-03-18 19:50:12 +00:00
|
|
|
mp->mnt_flag |= MNT_LOCAL;
|
2006-09-26 04:12:49 +00:00
|
|
|
MNT_IUNLOCK(mp);
|
|
|
|
if ((fs->fs_flags & FS_MULTILABEL) != 0) {
|
2006-04-22 04:22:15 +00:00
|
|
|
#ifdef MAC
|
2006-09-26 04:12:49 +00:00
|
|
|
MNT_ILOCK(mp);
|
2002-10-15 20:00:06 +00:00
|
|
|
mp->mnt_flag |= MNT_MULTILABEL;
|
2006-09-26 04:12:49 +00:00
|
|
|
MNT_IUNLOCK(mp);
|
2006-04-22 04:22:15 +00:00
|
|
|
#else
|
2012-01-14 07:26:16 +00:00
|
|
|
printf("WARNING: %s: multilabel flag on fs but "
|
|
|
|
"no MAC support\n", mp->mnt_stat.f_mntonname);
|
2006-04-22 04:22:15 +00:00
|
|
|
#endif
|
2006-09-26 04:12:49 +00:00
|
|
|
}
|
|
|
|
if ((fs->fs_flags & FS_ACLS) != 0) {
|
2006-04-22 04:22:15 +00:00
|
|
|
#ifdef UFS_ACL
|
2006-09-26 04:12:49 +00:00
|
|
|
MNT_ILOCK(mp);
|
2009-12-21 19:39:10 +00:00
|
|
|
|
|
|
|
if (mp->mnt_flag & MNT_NFS4ACLS)
|
2012-01-14 07:26:16 +00:00
|
|
|
printf("WARNING: %s: ACLs flag on fs conflicts with "
|
|
|
|
"\"nfsv4acls\" mount option; option ignored\n",
|
|
|
|
mp->mnt_stat.f_mntonname);
|
2009-12-21 19:39:10 +00:00
|
|
|
mp->mnt_flag &= ~MNT_NFS4ACLS;
|
2002-10-15 20:00:06 +00:00
|
|
|
mp->mnt_flag |= MNT_ACLS;
|
2009-12-21 19:39:10 +00:00
|
|
|
|
2006-09-26 04:12:49 +00:00
|
|
|
MNT_IUNLOCK(mp);
|
2006-04-22 04:22:15 +00:00
|
|
|
#else
|
2010-09-17 09:14:40 +00:00
|
|
|
printf("WARNING: %s: ACLs flag on fs but no ACLs support\n",
|
2006-07-09 14:10:35 +00:00
|
|
|
mp->mnt_stat.f_mntonname);
|
2006-04-22 04:22:15 +00:00
|
|
|
#endif
|
2006-09-26 04:12:49 +00:00
|
|
|
}
|
2009-12-21 19:39:10 +00:00
|
|
|
if ((fs->fs_flags & FS_NFS4ACLS) != 0) {
|
|
|
|
#ifdef UFS_ACL
|
|
|
|
MNT_ILOCK(mp);
|
|
|
|
|
|
|
|
if (mp->mnt_flag & MNT_ACLS)
|
2012-01-14 07:26:16 +00:00
|
|
|
printf("WARNING: %s: NFSv4 ACLs flag on fs conflicts "
|
|
|
|
"with \"acls\" mount option; option ignored\n",
|
|
|
|
mp->mnt_stat.f_mntonname);
|
2009-12-21 19:39:10 +00:00
|
|
|
mp->mnt_flag &= ~MNT_ACLS;
|
|
|
|
mp->mnt_flag |= MNT_NFS4ACLS;
|
|
|
|
|
|
|
|
MNT_IUNLOCK(mp);
|
|
|
|
#else
|
2012-01-14 07:26:16 +00:00
|
|
|
printf("WARNING: %s: NFSv4 ACLs flag on fs but no "
|
|
|
|
"ACLs support\n", mp->mnt_stat.f_mntonname);
|
2009-12-21 19:39:10 +00:00
|
|
|
#endif
|
|
|
|
}
|
2010-12-29 12:25:28 +00:00
|
|
|
if ((fs->fs_flags & FS_TRIM) != 0) {
|
|
|
|
size = sizeof(int);
|
|
|
|
if (g_io_getattr("GEOM::candelete", cp, &size,
|
|
|
|
&ump->um_candelete) == 0) {
|
|
|
|
if (!ump->um_candelete)
|
2012-01-14 07:26:16 +00:00
|
|
|
printf("WARNING: %s: TRIM flag on fs but disk "
|
|
|
|
"does not support TRIM\n",
|
2010-12-29 12:25:28 +00:00
|
|
|
mp->mnt_stat.f_mntonname);
|
|
|
|
} else {
|
2012-01-14 07:26:16 +00:00
|
|
|
printf("WARNING: %s: TRIM flag on fs but disk does "
|
|
|
|
"not confirm that it supports TRIM\n",
|
2010-12-29 12:25:28 +00:00
|
|
|
mp->mnt_stat.f_mntonname);
|
|
|
|
ump->um_candelete = 0;
|
|
|
|
}
|
|
|
|
}
|
2009-12-21 19:39:10 +00:00
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
ump->um_mountp = mp;
|
|
|
|
ump->um_dev = dev;
|
|
|
|
ump->um_devvp = devvp;
|
|
|
|
ump->um_nindir = fs->fs_nindir;
|
|
|
|
ump->um_bptrtodb = fs->fs_fsbtodb;
|
|
|
|
ump->um_seqinc = fs->fs_frag;
|
|
|
|
for (i = 0; i < MAXQUOTAS; i++)
|
|
|
|
ump->um_quotas[i] = NULLVP;
|
2001-03-19 04:35:40 +00:00
|
|
|
#ifdef UFS_EXTATTR
|
Introduce extended attribute support for FFS, allowing arbitrary
(name, value) pairs to be associated with inodes. This support is
used for ACLs, MAC labels, and Capabilities in the TrustedBSD
security extensions, which are currently under development.
In this implementation, attributes are backed to data vnodes in the
style of the quota support in FFS. Support for FFS extended
attributes may be enabled using the FFS_EXTATTR kernel option
(disabled by default). Userland utilities and man pages will be
committed in the next batch. VFS interfaces and man pages have
been in the repo since 4.0-RELEASE and are unchanged.
o ufs/ufs/extattr.h: UFS-specific extattr defines
o ufs/ufs/ufs_extattr.c: bulk of support routines
o ufs/{ufs,ffs,mfs}/*.[ch]: hooks and extattr.h includes
o contrib/softupdates/ffs_softdep.c: extattr.h includes
o conf/options, conf/files, i386/conf/LINT: added FFS_EXTATTR
o coda/coda_vfsops.c: XXX required extattr.h due to ufsmount.h
(This should not be the case, and will be fixed in a future commit)
Currently attributes are not supported in MFS. This will be fixed.
Reviewed by: adrian, bp, freebsd-fs, other unthanked souls
Obtained from: TrustedBSD Project
2000-04-15 03:34:27 +00:00
|
|
|
ufs_extattr_uepm_init(&ump->um_extattr);
|
|
|
|
#endif
|
1995-08-28 09:19:25 +00:00
|
|
|
/*
|
|
|
|
* Set FS local "last mounted on" information (NULL pad)
|
|
|
|
*/
|
2005-08-21 22:06:41 +00:00
|
|
|
bzero(fs->fs_fsmnt, MAXMNTLEN);
|
|
|
|
strlcpy(fs->fs_fsmnt, mp->mnt_stat.f_mntonname, MAXMNTLEN);
|
2010-04-24 07:05:35 +00:00
|
|
|
mp->mnt_stat.f_iosize = fs->fs_bsize;
|
1995-08-28 09:19:25 +00:00
|
|
|
|
2011-07-10 00:41:31 +00:00
|
|
|
if (mp->mnt_flag & MNT_ROOTFS) {
|
1995-08-28 09:19:25 +00:00
|
|
|
/*
|
|
|
|
* Root mount; update timestamp in mount structure.
|
|
|
|
* this will be used by the common root mount code
|
|
|
|
* to update the system clock.
|
|
|
|
*/
|
|
|
|
mp->mnt_time = fs->fs_time;
|
|
|
|
}
|
1997-02-10 02:22:35 +00:00
|
|
|
|
|
|
|
if (ronly == 0) {
|
2010-04-24 07:05:35 +00:00
|
|
|
fs->fs_mtime = time_second;
|
1998-03-08 09:59:44 +00:00
|
|
|
if ((fs->fs_flags & FS_DOSOFTDEP) &&
|
|
|
|
(error = softdep_mount(devvp, mp, fs, cred)) != 0) {
|
2001-01-15 18:30:40 +00:00
|
|
|
free(fs->fs_csp, M_UFSMNT);
|
2010-12-01 21:19:11 +00:00
|
|
|
ffs_flushfiles(mp, FORCECLOSE, td);
|
1998-03-08 09:59:44 +00:00
|
|
|
goto out;
|
|
|
|
}
|
2012-03-28 20:49:11 +00:00
|
|
|
if (devvp->v_type == VCHR && devvp->v_rdev != NULL)
|
|
|
|
devvp->v_rdev->si_mountpt = mp;
|
2000-07-11 22:07:57 +00:00
|
|
|
if (fs->fs_snapinum[0] != 0)
|
|
|
|
ffs_snapshot_mount(mp);
|
2000-01-10 00:24:24 +00:00
|
|
|
fs->fs_fmod = 1;
|
1997-02-10 02:22:35 +00:00
|
|
|
fs->fs_clean = 0;
|
2006-03-08 23:43:39 +00:00
|
|
|
(void) ffs_sbupdate(ump, MNT_WAIT, 0);
|
1997-02-10 02:22:35 +00:00
|
|
|
}
|
2004-07-14 14:19:32 +00:00
|
|
|
/*
|
|
|
|
* Initialize filesystem stat information in mount struct.
|
|
|
|
*/
|
2008-03-04 12:10:03 +00:00
|
|
|
MNT_ILOCK(mp);
|
2009-03-11 14:13:47 +00:00
|
|
|
mp->mnt_kern_flag |= MNTK_MPSAFE | MNTK_LOOKUP_SHARED |
|
|
|
|
MNTK_EXTENDED_SHARED;
|
2008-03-04 12:10:03 +00:00
|
|
|
MNT_IUNLOCK(mp);
|
2001-03-19 04:35:40 +00:00
|
|
|
#ifdef UFS_EXTATTR
|
|
|
|
#ifdef UFS_EXTATTR_AUTOSTART
|
2000-10-04 04:44:51 +00:00
|
|
|
/*
|
|
|
|
*
|
o Implement "options FFS_EXTATTR_AUTOSTART", which depends on
"options FFS_EXTATTR". When extended attribute auto-starting
is enabled, FFS will scan the .attribute directory off of the
root of each file system, as it is mounted. If .attribute
exists, EA support will be started for the file system. If
there are files in the directory, FFS will attempt to start
them as attribute backing files for attributes baring the same
name. All attributes are started before access to the file
system is permitted, so this permits race-free enabling of
attributes. For attributes backing support for security
features, such as ACLs, MAC, Capabilities, this is vital, as
it prevents the file system attributes from getting out of
sync as a result of file system operations between mount-time
and the enabling of the extended attribute. The userland
extattrctl tool will still function exactly as previously.
Files must be placed directly in .attribute, which must be
directly off of the file system root: symbolic links are
not permitted. FFS_EXTATTR will continue to be able
to function without FFS_EXTATTR_AUTOSTART for sites that do not
want/require auto-starting. If you're using the UFS_ACL code
available from www.TrustedBSD.org, using FFS_EXTATTR_AUTOSTART
is recommended.
o This support is implemented by adding an invocation of
ufs_extattr_autostart() to ffs_mountfs(). In addition,
several new supporting calls are introduced in
ufs_extattr.c:
ufs_extattr_autostart(): start EAs on the specified mount
ufs_extattr_lookup(): given a directory and filename,
return the vnode for the file.
ufs_extattr_enable_with_open(): invoke ufs_extattr_enable()
after doing the equililent of vn_open()
on the passed file.
ufs_extattr_iterate_directory(): iterate over a directory,
invoking ufs_extattr_lookup() and
ufs_extattr_enable_with_open() on each
entry.
o This feature is not widely tested, and therefore may contain
bugs, caution is advised. Several changes are in the pipeline
for this feature, including breaking out of EA namespaces into
subdirectories of .attribute (this is waiting on the updated
EA API), as well as a per-filesystem flag indicating whether
or not EAs should be auto-started. This is required because
administrators may not want .attribute auto-started on all
file systems, especially if non-administrators have write access
to the root of a file system.
Obtained from: TrustedBSD Project
2001-03-14 05:32:31 +00:00
|
|
|
* Auto-starting does the following:
|
2000-10-04 04:44:51 +00:00
|
|
|
* - check for /.attribute in the fs, and extattr_start if so
|
|
|
|
* - for each file in .attribute, enable that file with
|
|
|
|
* an attribute of the same name.
|
|
|
|
* Not clear how to report errors -- probably eat them.
|
2002-05-16 21:28:32 +00:00
|
|
|
* This would all happen while the filesystem was busy/not
|
2000-10-04 04:44:51 +00:00
|
|
|
* available, so would effectively be "atomic".
|
|
|
|
*/
|
2001-09-12 08:38:13 +00:00
|
|
|
(void) ufs_extattr_autostart(mp, td);
|
2001-03-19 04:35:40 +00:00
|
|
|
#endif /* !UFS_EXTATTR_AUTOSTART */
|
|
|
|
#endif /* !UFS_EXTATTR */
|
1994-05-24 10:09:53 +00:00
|
|
|
return (0);
|
|
|
|
out:
|
|
|
|
if (bp)
|
|
|
|
brelse(bp);
|
2004-10-29 10:15:56 +00:00
|
|
|
if (cp != NULL) {
|
|
|
|
DROP_GIANT();
|
|
|
|
g_topology_lock();
|
2008-10-10 21:23:50 +00:00
|
|
|
g_vfs_close(cp);
|
2004-10-29 10:15:56 +00:00
|
|
|
g_topology_unlock();
|
|
|
|
PICKUP_GIANT();
|
|
|
|
}
|
1994-05-24 10:09:53 +00:00
|
|
|
if (ump) {
|
2005-01-24 10:12:28 +00:00
|
|
|
mtx_destroy(UFS_MTX(ump));
|
2006-10-31 21:48:54 +00:00
|
|
|
if (mp->mnt_gjprovider != NULL) {
|
|
|
|
free(mp->mnt_gjprovider, M_UFSMNT);
|
|
|
|
mp->mnt_gjprovider = NULL;
|
|
|
|
}
|
1994-05-24 10:09:53 +00:00
|
|
|
free(ump->um_fs, M_UFSMNT);
|
|
|
|
free(ump, M_UFSMNT);
|
2007-10-16 10:54:55 +00:00
|
|
|
mp->mnt_data = NULL;
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
2009-01-29 16:47:15 +00:00
|
|
|
dev_rel(dev);
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
2002-06-21 06:18:05 +00:00
|
|
|
#include <sys/sysctl.h>
|
2005-02-10 12:20:08 +00:00
|
|
|
static int bigcgs = 0;
|
2002-06-21 06:18:05 +00:00
|
|
|
SYSCTL_INT(_debug, OID_AUTO, bigcgs, CTLFLAG_RW, &bigcgs, 0, "");
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
2002-06-21 06:18:05 +00:00
|
|
|
* Sanity checks for loading old filesystem superblocks.
|
|
|
|
* See ffs_oldfscompat_write below for unwound actions.
|
1994-05-24 10:09:53 +00:00
|
|
|
*
|
2002-06-21 06:18:05 +00:00
|
|
|
* XXX - Parts get retired eventually.
|
|
|
|
* Unfortunately new bits get added.
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
2002-06-21 06:18:05 +00:00
|
|
|
static void
|
|
|
|
ffs_oldfscompat_read(fs, ump, sblockloc)
|
1994-05-24 10:09:53 +00:00
|
|
|
struct fs *fs;
|
2002-06-21 06:18:05 +00:00
|
|
|
struct ufsmount *ump;
|
|
|
|
ufs2_daddr_t sblockloc;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2002-06-21 06:18:05 +00:00
|
|
|
off_t maxfilesize;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2002-11-27 02:18:58 +00:00
|
|
|
/*
|
|
|
|
* If not yet done, update fs_flags location and value of fs_sblockloc.
|
|
|
|
*/
|
|
|
|
if ((fs->fs_old_flags & FS_FLAGS_UPDATED) == 0) {
|
|
|
|
fs->fs_flags = fs->fs_old_flags;
|
|
|
|
fs->fs_old_flags |= FS_FLAGS_UPDATED;
|
|
|
|
fs->fs_sblockloc = sblockloc;
|
|
|
|
}
|
2002-06-21 06:18:05 +00:00
|
|
|
/*
|
|
|
|
* If not yet done, update UFS1 superblock with new wider fields.
|
|
|
|
*/
|
2003-02-25 23:21:08 +00:00
|
|
|
if (fs->fs_magic == FS_UFS1_MAGIC && fs->fs_maxbsize != fs->fs_bsize) {
|
2002-06-21 06:18:05 +00:00
|
|
|
fs->fs_maxbsize = fs->fs_bsize;
|
|
|
|
fs->fs_time = fs->fs_old_time;
|
|
|
|
fs->fs_size = fs->fs_old_size;
|
|
|
|
fs->fs_dsize = fs->fs_old_dsize;
|
|
|
|
fs->fs_csaddr = fs->fs_old_csaddr;
|
|
|
|
fs->fs_cstotal.cs_ndir = fs->fs_old_cstotal.cs_ndir;
|
|
|
|
fs->fs_cstotal.cs_nbfree = fs->fs_old_cstotal.cs_nbfree;
|
|
|
|
fs->fs_cstotal.cs_nifree = fs->fs_old_cstotal.cs_nifree;
|
|
|
|
fs->fs_cstotal.cs_nffree = fs->fs_old_cstotal.cs_nffree;
|
|
|
|
}
|
|
|
|
if (fs->fs_magic == FS_UFS1_MAGIC &&
|
|
|
|
fs->fs_old_inodefmt < FS_44INODEFMT) {
|
2005-10-21 01:54:00 +00:00
|
|
|
fs->fs_maxfilesize = ((uint64_t)1 << 31) - 1;
|
2002-06-21 06:18:05 +00:00
|
|
|
fs->fs_qbmask = ~fs->fs_bmask;
|
|
|
|
fs->fs_qfmask = ~fs->fs_fmask;
|
|
|
|
}
|
2002-06-26 18:34:51 +00:00
|
|
|
if (fs->fs_magic == FS_UFS1_MAGIC) {
|
|
|
|
ump->um_savedmaxfilesize = fs->fs_maxfilesize;
|
2005-10-21 01:54:00 +00:00
|
|
|
maxfilesize = (uint64_t)0x80000000 * fs->fs_bsize - 1;
|
2002-06-26 18:34:51 +00:00
|
|
|
if (fs->fs_maxfilesize > maxfilesize)
|
|
|
|
fs->fs_maxfilesize = maxfilesize;
|
|
|
|
}
|
2002-06-21 06:18:05 +00:00
|
|
|
/* Compatibility for old filesystems */
|
|
|
|
if (fs->fs_avgfilesize <= 0)
|
|
|
|
fs->fs_avgfilesize = AVFILESIZ;
|
|
|
|
if (fs->fs_avgfpdir <= 0)
|
|
|
|
fs->fs_avgfpdir = AFPDIR;
|
|
|
|
if (bigcgs) {
|
|
|
|
fs->fs_save_cgsize = fs->fs_cgsize;
|
|
|
|
fs->fs_cgsize = fs->fs_bsize;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Unwinding superblock updates for old filesystems.
|
|
|
|
* See ffs_oldfscompat_read above for details.
|
|
|
|
*
|
|
|
|
* XXX - Parts get retired eventually.
|
|
|
|
* Unfortunately new bits get added.
|
|
|
|
*/
|
2010-04-24 07:05:35 +00:00
|
|
|
void
|
2002-06-21 06:18:05 +00:00
|
|
|
ffs_oldfscompat_write(fs, ump)
|
|
|
|
struct fs *fs;
|
|
|
|
struct ufsmount *ump;
|
|
|
|
{
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Copy back UFS2 updated fields that UFS1 inspects.
|
|
|
|
*/
|
|
|
|
if (fs->fs_magic == FS_UFS1_MAGIC) {
|
|
|
|
fs->fs_old_time = fs->fs_time;
|
|
|
|
fs->fs_old_cstotal.cs_ndir = fs->fs_cstotal.cs_ndir;
|
|
|
|
fs->fs_old_cstotal.cs_nbfree = fs->fs_cstotal.cs_nbfree;
|
|
|
|
fs->fs_old_cstotal.cs_nifree = fs->fs_cstotal.cs_nifree;
|
|
|
|
fs->fs_old_cstotal.cs_nffree = fs->fs_cstotal.cs_nffree;
|
2002-06-26 18:34:51 +00:00
|
|
|
fs->fs_maxfilesize = ump->um_savedmaxfilesize;
|
2002-06-21 06:18:05 +00:00
|
|
|
}
|
|
|
|
if (bigcgs) {
|
|
|
|
fs->fs_cgsize = fs->fs_save_cgsize;
|
|
|
|
fs->fs_save_cgsize = 0;
|
|
|
|
}
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* unmount system call
|
|
|
|
*/
|
2005-02-10 12:20:08 +00:00
|
|
|
static int
|
2009-05-11 15:33:26 +00:00
|
|
|
ffs_unmount(mp, mntflags)
|
1994-05-24 10:09:53 +00:00
|
|
|
struct mount *mp;
|
|
|
|
int mntflags;
|
|
|
|
{
|
2009-05-11 15:33:26 +00:00
|
|
|
struct thread *td;
|
2002-05-13 09:22:31 +00:00
|
|
|
struct ufsmount *ump = VFSTOUFS(mp);
|
|
|
|
struct fs *fs;
|
2008-09-16 11:55:53 +00:00
|
|
|
int error, flags, susp;
|
2009-01-08 12:48:27 +00:00
|
|
|
#ifdef UFS_EXTATTR
|
|
|
|
int e_restart;
|
|
|
|
#endif
|
1994-05-24 10:09:53 +00:00
|
|
|
|
|
|
|
flags = 0;
|
2009-05-11 15:33:26 +00:00
|
|
|
td = curthread;
|
2008-09-16 11:55:53 +00:00
|
|
|
fs = ump->um_fs;
|
2012-01-14 07:26:16 +00:00
|
|
|
susp = 0;
|
1994-05-24 10:09:53 +00:00
|
|
|
if (mntflags & MNT_FORCE) {
|
|
|
|
flags |= FORCECLOSE;
|
2008-09-16 11:55:53 +00:00
|
|
|
susp = fs->fs_ronly != 0;
|
2012-01-14 07:26:16 +00:00
|
|
|
}
|
2001-03-19 04:35:40 +00:00
|
|
|
#ifdef UFS_EXTATTR
|
2001-09-12 08:38:13 +00:00
|
|
|
if ((error = ufs_extattr_stop(mp, td))) {
|
2000-06-04 04:50:36 +00:00
|
|
|
if (error != EOPNOTSUPP)
|
2012-01-14 07:26:16 +00:00
|
|
|
printf("WARNING: unmount %s: ufs_extattr_stop "
|
|
|
|
"returned errno %d\n", mp->mnt_stat.f_mntonname,
|
2000-06-04 04:50:36 +00:00
|
|
|
error);
|
2009-01-08 12:48:27 +00:00
|
|
|
e_restart = 0;
|
2001-09-01 20:11:05 +00:00
|
|
|
} else {
|
|
|
|
ufs_extattr_uepm_destroy(&ump->um_extattr);
|
2009-01-08 12:48:27 +00:00
|
|
|
e_restart = 1;
|
2001-09-01 20:11:05 +00:00
|
|
|
}
|
Introduce extended attribute support for FFS, allowing arbitrary
(name, value) pairs to be associated with inodes. This support is
used for ACLs, MAC labels, and Capabilities in the TrustedBSD
security extensions, which are currently under development.
In this implementation, attributes are backed to data vnodes in the
style of the quota support in FFS. Support for FFS extended
attributes may be enabled using the FFS_EXTATTR kernel option
(disabled by default). Userland utilities and man pages will be
committed in the next batch. VFS interfaces and man pages have
been in the repo since 4.0-RELEASE and are unchanged.
o ufs/ufs/extattr.h: UFS-specific extattr defines
o ufs/ufs/ufs_extattr.c: bulk of support routines
o ufs/{ufs,ffs,mfs}/*.[ch]: hooks and extattr.h includes
o contrib/softupdates/ffs_softdep.c: extattr.h includes
o conf/options, conf/files, i386/conf/LINT: added FFS_EXTATTR
o coda/coda_vfsops.c: XXX required extattr.h due to ufsmount.h
(This should not be the case, and will be fixed in a future commit)
Currently attributes are not supported in MFS. This will be fixed.
Reviewed by: adrian, bp, freebsd-fs, other unthanked souls
Obtained from: TrustedBSD Project
2000-04-15 03:34:27 +00:00
|
|
|
#endif
|
2008-09-16 11:55:53 +00:00
|
|
|
if (susp) {
|
|
|
|
/*
|
|
|
|
* dounmount already called vn_start_write().
|
|
|
|
*/
|
|
|
|
for (;;) {
|
|
|
|
vn_finished_write(mp);
|
|
|
|
if ((error = vfs_write_suspend(mp)) != 0)
|
|
|
|
return (error);
|
|
|
|
MNT_ILOCK(mp);
|
|
|
|
if (mp->mnt_kern_flag & MNTK_SUSPENDED) {
|
|
|
|
mp->mnt_kern_flag &= ~(MNTK_SUSPENDED |
|
|
|
|
MNTK_SUSPEND2);
|
|
|
|
wakeup(&mp->mnt_flag);
|
|
|
|
MNT_IUNLOCK(mp);
|
2009-05-11 15:33:26 +00:00
|
|
|
td->td_pflags |= TDP_IGNSUSP;
|
2008-09-16 11:55:53 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
MNT_IUNLOCK(mp);
|
|
|
|
vn_start_write(NULL, &mp, V_WAIT);
|
|
|
|
}
|
|
|
|
}
|
2011-07-30 00:43:18 +00:00
|
|
|
if (MOUNTEDSOFTDEP(mp))
|
2009-02-23 20:56:27 +00:00
|
|
|
error = softdep_flushfiles(mp, flags, td);
|
|
|
|
else
|
|
|
|
error = ffs_flushfiles(mp, flags, td);
|
2009-02-23 21:09:28 +00:00
|
|
|
if (error != 0 && error != ENXIO)
|
2009-02-23 20:56:27 +00:00
|
|
|
goto fail;
|
|
|
|
|
2005-01-24 10:12:28 +00:00
|
|
|
UFS_LOCK(ump);
|
2001-05-08 07:42:20 +00:00
|
|
|
if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) {
|
2012-01-14 07:26:16 +00:00
|
|
|
printf("WARNING: unmount %s: pending error: blocks %jd "
|
|
|
|
"files %d\n", fs->fs_fsmnt, (intmax_t)fs->fs_pendingblocks,
|
2002-06-21 06:18:05 +00:00
|
|
|
fs->fs_pendinginodes);
|
2001-05-08 07:42:20 +00:00
|
|
|
fs->fs_pendingblocks = 0;
|
|
|
|
fs->fs_pendinginodes = 0;
|
|
|
|
}
|
2005-01-24 10:12:28 +00:00
|
|
|
UFS_UNLOCK(ump);
|
2010-04-24 07:05:35 +00:00
|
|
|
softdep_unmount(mp);
|
2011-07-15 16:20:33 +00:00
|
|
|
if (fs->fs_ronly == 0 || ump->um_fsckpid > 0) {
|
2001-04-14 05:26:28 +00:00
|
|
|
fs->fs_clean = fs->fs_flags & (FS_UNCLEAN|FS_NEEDSFSCK) ? 0 : 1;
|
2006-03-08 23:43:39 +00:00
|
|
|
error = ffs_sbupdate(ump, MNT_WAIT, 0);
|
2009-02-23 21:09:28 +00:00
|
|
|
if (error && error != ENXIO) {
|
1997-02-10 02:22:35 +00:00
|
|
|
fs->fs_clean = 0;
|
2008-09-16 11:55:53 +00:00
|
|
|
goto fail;
|
1997-02-10 02:22:35 +00:00
|
|
|
}
|
1994-08-20 16:03:26 +00:00
|
|
|
}
|
2008-09-16 11:55:53 +00:00
|
|
|
if (susp) {
|
|
|
|
vfs_write_resume(mp);
|
|
|
|
vn_start_write(NULL, &mp, V_WAIT);
|
|
|
|
}
|
2004-10-29 10:15:56 +00:00
|
|
|
DROP_GIANT();
|
|
|
|
g_topology_lock();
|
2011-07-15 16:20:33 +00:00
|
|
|
if (ump->um_fsckpid > 0) {
|
|
|
|
/*
|
|
|
|
* Return to normal read-only mode.
|
|
|
|
*/
|
|
|
|
error = g_access(ump->um_cp, 0, -1, 0);
|
|
|
|
ump->um_fsckpid = 0;
|
|
|
|
}
|
2008-10-10 21:23:50 +00:00
|
|
|
g_vfs_close(ump->um_cp);
|
2004-10-29 10:15:56 +00:00
|
|
|
g_topology_unlock();
|
|
|
|
PICKUP_GIANT();
|
2012-03-28 20:49:11 +00:00
|
|
|
if (ump->um_devvp->v_type == VCHR && ump->um_devvp->v_rdev != NULL)
|
|
|
|
ump->um_devvp->v_rdev->si_mountpt = NULL;
|
1996-08-21 21:56:23 +00:00
|
|
|
vrele(ump->um_devvp);
|
2009-01-29 16:47:15 +00:00
|
|
|
dev_rel(ump->um_dev);
|
2005-01-24 10:12:28 +00:00
|
|
|
mtx_destroy(UFS_MTX(ump));
|
2006-10-31 21:48:54 +00:00
|
|
|
if (mp->mnt_gjprovider != NULL) {
|
|
|
|
free(mp->mnt_gjprovider, M_UFSMNT);
|
|
|
|
mp->mnt_gjprovider = NULL;
|
|
|
|
}
|
2001-01-15 18:30:40 +00:00
|
|
|
free(fs->fs_csp, M_UFSMNT);
|
1994-05-24 10:09:53 +00:00
|
|
|
free(fs, M_UFSMNT);
|
|
|
|
free(ump, M_UFSMNT);
|
2007-10-16 10:54:55 +00:00
|
|
|
mp->mnt_data = NULL;
|
2006-09-26 04:12:49 +00:00
|
|
|
MNT_ILOCK(mp);
|
1997-03-18 19:50:12 +00:00
|
|
|
mp->mnt_flag &= ~MNT_LOCAL;
|
2006-09-26 04:12:49 +00:00
|
|
|
MNT_IUNLOCK(mp);
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
2008-09-16 11:55:53 +00:00
|
|
|
|
|
|
|
fail:
|
|
|
|
if (susp) {
|
|
|
|
vfs_write_resume(mp);
|
|
|
|
vn_start_write(NULL, &mp, V_WAIT);
|
|
|
|
}
|
2009-01-08 12:48:27 +00:00
|
|
|
#ifdef UFS_EXTATTR
|
|
|
|
if (e_restart) {
|
|
|
|
ufs_extattr_uepm_init(&ump->um_extattr);
|
|
|
|
#ifdef UFS_EXTATTR_AUTOSTART
|
|
|
|
(void) ufs_extattr_autostart(mp, td);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2008-09-16 11:55:53 +00:00
|
|
|
return (error);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Flush out all the files in a filesystem.
|
|
|
|
*/
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
ffs_flushfiles(mp, flags, td)
|
2002-05-13 09:22:31 +00:00
|
|
|
struct mount *mp;
|
1994-05-24 10:09:53 +00:00
|
|
|
int flags;
|
2001-09-12 08:38:13 +00:00
|
|
|
struct thread *td;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2002-05-13 09:22:31 +00:00
|
|
|
struct ufsmount *ump;
|
1994-10-08 06:20:06 +00:00
|
|
|
int error;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
|
|
|
ump = VFSTOUFS(mp);
|
|
|
|
#ifdef QUOTA
|
|
|
|
if (mp->mnt_flag & MNT_QUOTA) {
|
1994-10-10 01:04:55 +00:00
|
|
|
int i;
|
2004-07-12 08:14:09 +00:00
|
|
|
error = vflush(mp, 0, SKIPSYSTEM|flags, td);
|
1994-10-10 01:04:55 +00:00
|
|
|
if (error)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
for (i = 0; i < MAXQUOTAS; i++) {
|
2001-09-12 08:38:13 +00:00
|
|
|
quotaoff(td, mp, i);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
/*
|
|
|
|
* Here we fall through to vflush again to ensure
|
|
|
|
* that we have gotten rid of all the system vnodes.
|
|
|
|
*/
|
|
|
|
}
|
|
|
|
#endif
|
2002-08-04 10:29:36 +00:00
|
|
|
ASSERT_VOP_LOCKED(ump->um_devvp, "ffs_flushfiles");
|
|
|
|
if (ump->um_devvp->v_vflag & VV_COPYONWRITE) {
|
2004-07-12 08:14:09 +00:00
|
|
|
if ((error = vflush(mp, 0, SKIPSYSTEM | flags, td)) != 0)
|
2000-07-11 22:07:57 +00:00
|
|
|
return (error);
|
|
|
|
ffs_snapshot_unmount(mp);
|
2006-03-19 21:09:19 +00:00
|
|
|
flags |= FORCECLOSE;
|
2000-07-11 22:07:57 +00:00
|
|
|
/*
|
|
|
|
* Here we fall through to vflush again to ensure
|
|
|
|
* that we have gotten rid of all the system vnodes.
|
|
|
|
*/
|
|
|
|
}
|
1998-03-08 09:59:44 +00:00
|
|
|
/*
|
|
|
|
* Flush all the files.
|
|
|
|
*/
|
2004-07-12 08:14:09 +00:00
|
|
|
if ((error = vflush(mp, 0, flags, td)) != 0)
|
1998-03-08 09:59:44 +00:00
|
|
|
return (error);
|
|
|
|
/*
|
|
|
|
* Flush filesystem metadata.
|
|
|
|
*/
|
2008-01-10 01:10:58 +00:00
|
|
|
vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY);
|
2005-01-11 07:36:22 +00:00
|
|
|
error = VOP_FSYNC(ump->um_devvp, MNT_WAIT, td);
|
2008-01-13 14:44:15 +00:00
|
|
|
VOP_UNLOCK(ump->um_devvp, 0);
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2002-05-16 21:28:32 +00:00
|
|
|
* Get filesystem statistics.
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
2005-02-10 12:20:08 +00:00
|
|
|
static int
|
2009-05-11 15:33:26 +00:00
|
|
|
ffs_statfs(mp, sbp)
|
1994-05-24 10:09:53 +00:00
|
|
|
struct mount *mp;
|
2002-05-13 09:22:31 +00:00
|
|
|
struct statfs *sbp;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2002-05-13 09:22:31 +00:00
|
|
|
struct ufsmount *ump;
|
|
|
|
struct fs *fs;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
|
|
|
ump = VFSTOUFS(mp);
|
|
|
|
fs = ump->um_fs;
|
2002-06-21 06:18:05 +00:00
|
|
|
if (fs->fs_magic != FS_UFS1_MAGIC && fs->fs_magic != FS_UFS2_MAGIC)
|
1994-05-24 10:09:53 +00:00
|
|
|
panic("ffs_statfs");
|
2003-11-12 08:01:40 +00:00
|
|
|
sbp->f_version = STATFS_VERSION;
|
1994-05-24 10:09:53 +00:00
|
|
|
sbp->f_bsize = fs->fs_fsize;
|
|
|
|
sbp->f_iosize = fs->fs_bsize;
|
|
|
|
sbp->f_blocks = fs->fs_dsize;
|
2005-01-24 10:12:28 +00:00
|
|
|
UFS_LOCK(ump);
|
1994-05-24 10:09:53 +00:00
|
|
|
sbp->f_bfree = fs->fs_cstotal.cs_nbfree * fs->fs_frag +
|
2001-05-08 07:42:20 +00:00
|
|
|
fs->fs_cstotal.cs_nffree + dbtofsb(fs, fs->fs_pendingblocks);
|
|
|
|
sbp->f_bavail = freespace(fs, fs->fs_minfree) +
|
|
|
|
dbtofsb(fs, fs->fs_pendingblocks);
|
1994-05-24 10:09:53 +00:00
|
|
|
sbp->f_files = fs->fs_ncg * fs->fs_ipg - ROOTINO;
|
2001-05-08 07:42:20 +00:00
|
|
|
sbp->f_ffree = fs->fs_cstotal.cs_nifree + fs->fs_pendinginodes;
|
2005-01-24 10:12:28 +00:00
|
|
|
UFS_UNLOCK(ump);
|
2003-11-12 08:01:40 +00:00
|
|
|
sbp->f_namemax = NAME_MAX;
|
1994-05-24 10:09:53 +00:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
2012-03-28 14:06:47 +00:00
|
|
|
/*
|
|
|
|
* For a lazy sync, we only care about access times, quotas and the
|
|
|
|
* superblock. Other filesystem changes are already converted to
|
|
|
|
* cylinder group blocks or inode blocks updates and are written to
|
|
|
|
* disk by syncer.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
ffs_sync_lazy(mp)
|
|
|
|
struct mount *mp;
|
|
|
|
{
|
|
|
|
struct vnode *mvp, *vp;
|
|
|
|
struct inode *ip;
|
|
|
|
struct thread *td;
|
|
|
|
int allerror, error;
|
|
|
|
|
|
|
|
allerror = 0;
|
|
|
|
td = curthread;
|
|
|
|
if ((mp->mnt_flag & MNT_NOATIME) != 0)
|
|
|
|
goto qupdate;
|
2012-04-20 07:00:28 +00:00
|
|
|
MNT_VNODE_FOREACH_ACTIVE(vp, mp, mvp) {
|
2012-04-17 16:28:22 +00:00
|
|
|
if (vp->v_type == VNON) {
|
2012-03-28 14:06:47 +00:00
|
|
|
VI_UNLOCK(vp);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
ip = VTOI(vp);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The IN_ACCESS flag is converted to IN_MODIFIED by
|
|
|
|
* ufs_close() and ufs_getattr() by the calls to
|
2012-03-28 14:16:15 +00:00
|
|
|
* ufs_itimes_locked(), without subsequent UFS_UPDATE().
|
|
|
|
* Test also all the other timestamp flags too, to pick up
|
|
|
|
* any other cases that could be missed.
|
2012-03-28 14:06:47 +00:00
|
|
|
*/
|
|
|
|
if ((ip->i_flag & (IN_ACCESS | IN_CHANGE | IN_MODIFIED |
|
|
|
|
IN_UPDATE)) == 0) {
|
|
|
|
VI_UNLOCK(vp);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if ((error = vget(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK,
|
2012-04-17 16:28:22 +00:00
|
|
|
td)) != 0)
|
2012-03-28 14:06:47 +00:00
|
|
|
continue;
|
|
|
|
error = ffs_update(vp, 0);
|
|
|
|
if (error != 0)
|
|
|
|
allerror = error;
|
|
|
|
vput(vp);
|
|
|
|
}
|
|
|
|
|
|
|
|
qupdate:
|
|
|
|
#ifdef QUOTA
|
|
|
|
qsync(mp);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
if (VFSTOUFS(mp)->um_fs->fs_fmod != 0 &&
|
|
|
|
(error = ffs_sbupdate(VFSTOUFS(mp), MNT_LAZY, 0)) != 0)
|
|
|
|
allerror = error;
|
|
|
|
return (allerror);
|
|
|
|
}
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* Go through the disk queues to initiate sandbagged IO;
|
|
|
|
* go through the inodes to write those that have been modified;
|
|
|
|
* initiate the writing of the super block if it has been modified.
|
|
|
|
*
|
2012-03-28 14:06:47 +00:00
|
|
|
* Note: we are always called with the filesystem marked busy using
|
|
|
|
* vfs_busy().
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
2005-02-10 12:20:08 +00:00
|
|
|
static int
|
2009-05-11 15:33:26 +00:00
|
|
|
ffs_sync(mp, waitfor)
|
1994-05-24 10:09:53 +00:00
|
|
|
struct mount *mp;
|
|
|
|
int waitfor;
|
|
|
|
{
|
2006-01-09 20:42:19 +00:00
|
|
|
struct vnode *mvp, *vp, *devvp;
|
2009-05-11 15:33:26 +00:00
|
|
|
struct thread *td;
|
1997-02-10 02:22:35 +00:00
|
|
|
struct inode *ip;
|
|
|
|
struct ufsmount *ump = VFSTOUFS(mp);
|
|
|
|
struct fs *fs;
|
2000-07-24 05:28:33 +00:00
|
|
|
int error, count, wait, lockreq, allerror = 0;
|
2006-03-08 23:43:39 +00:00
|
|
|
int suspend;
|
|
|
|
int suspended;
|
|
|
|
int secondary_writes;
|
|
|
|
int secondary_accwrites;
|
|
|
|
int softdep_deps;
|
|
|
|
int softdep_accdeps;
|
2004-10-25 09:14:03 +00:00
|
|
|
struct bufobj *bo;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
In the original days of BSD, a sync was issued on every filesystem
every 30 seconds. This spike in I/O caused the system to pause every
30 seconds which was quite annoying. So, the way that sync worked
was changed so that when a vnode was first dirtied, it was put on
a 30-second cleaning queue (see the syncer_workitem_pending queues
in kern/vfs_subr.c). If the file has not been written or deleted
after 30 seconds, the syncer pushes it out. As the syncer runs once
per second, dirty files are trickled out slowly over the 30-second
period instead of all at once by a call to sync(2).
The one drawback to this is that it does not cover the filesystem
metadata. To handle the metadata, vfs_allocate_syncvnode() is called
to create a "filesystem syncer vnode" at mount time which cycles
around the cleaning queue being sync'ed every 30 seconds. In the
original design, the only things it would sync for UFS were the
filesystem metadata: inode blocks, cylinder group bitmaps, and the
superblock (e.g., by VOP_FSYNC'ing devvp, the device vnode from
which the filesystem is mounted).
Somewhere in its path to integration with FreeBSD the flushing of
the filesystem syncer vnode got changed to sync every vnode associated
with the filesystem. The result of this change is to return to the
old filesystem-wide flush every 30-seconds behavior and makes the
whole 30-second delay per vnode useless.
This change goes back to the originally intended trickle out sync
behavior. Key to ensuring that all the intended semantics are
preserved (e.g., that all inode updates get flushed within a bounded
period of time) is that all inode modifications get pushed to their
corresponding inode blocks so that the metadata flush by the
filesystem syncer vnode gets them to the disk in a timely way.
Thanks to Konstantin Belousov (kib@) for doing the audit and commit
-r231122 which ensures that all of these updates are being made.
Reviewed by: kib
Tested by: scottl
MFC after: 2 weeks
2012-02-07 20:43:28 +00:00
|
|
|
wait = 0;
|
|
|
|
suspend = 0;
|
|
|
|
suspended = 0;
|
2009-05-11 15:33:26 +00:00
|
|
|
td = curthread;
|
1994-05-24 10:09:53 +00:00
|
|
|
fs = ump->um_fs;
|
2012-01-14 07:26:16 +00:00
|
|
|
if (fs->fs_fmod != 0 && fs->fs_ronly != 0 && ump->um_fsckpid == 0)
|
|
|
|
panic("%s: ffs_sync: modification on read-only filesystem",
|
|
|
|
fs->fs_fsmnt);
|
2012-03-28 14:06:47 +00:00
|
|
|
if (waitfor == MNT_LAZY)
|
|
|
|
return (ffs_sync_lazy(mp));
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* Write back each (modified) inode.
|
|
|
|
*/
|
2001-10-26 00:08:05 +00:00
|
|
|
lockreq = LK_EXCLUSIVE | LK_NOWAIT;
|
2006-03-08 23:43:39 +00:00
|
|
|
if (waitfor == MNT_SUSPEND) {
|
|
|
|
suspend = 1;
|
|
|
|
waitfor = MNT_WAIT;
|
|
|
|
}
|
2000-07-24 05:28:33 +00:00
|
|
|
if (waitfor == MNT_WAIT) {
|
|
|
|
wait = 1;
|
2001-10-26 00:08:05 +00:00
|
|
|
lockreq = LK_EXCLUSIVE;
|
2000-07-24 05:28:33 +00:00
|
|
|
}
|
2005-04-03 10:38:18 +00:00
|
|
|
lockreq |= LK_INTERLOCK | LK_SLEEPFAIL;
|
1994-05-24 10:09:53 +00:00
|
|
|
loop:
|
2006-03-08 23:43:39 +00:00
|
|
|
/* Grab snapshot of secondary write counts */
|
2012-04-17 16:28:22 +00:00
|
|
|
MNT_ILOCK(mp);
|
2006-03-08 23:43:39 +00:00
|
|
|
secondary_writes = mp->mnt_secondary_writes;
|
|
|
|
secondary_accwrites = mp->mnt_secondary_accwrites;
|
2012-04-17 16:28:22 +00:00
|
|
|
MNT_IUNLOCK(mp);
|
2006-03-08 23:43:39 +00:00
|
|
|
|
|
|
|
/* Grab snapshot of softdep dependency counts */
|
|
|
|
softdep_get_depcounts(mp, &softdep_deps, &softdep_accdeps);
|
|
|
|
|
2012-04-17 16:28:22 +00:00
|
|
|
MNT_VNODE_FOREACH_ALL(vp, mp, mvp) {
|
2001-10-26 00:08:05 +00:00
|
|
|
/*
|
2012-03-28 13:47:07 +00:00
|
|
|
* Depend on the vnode interlock to keep things stable enough
|
2001-10-26 00:08:05 +00:00
|
|
|
* for a quick test. Since there might be hundreds of
|
|
|
|
* thousands of vnodes, we cannot afford even a subroutine
|
|
|
|
* call unless there's a good chance that we have work to do.
|
|
|
|
*/
|
2012-04-17 16:28:22 +00:00
|
|
|
if (vp->v_type == VNON) {
|
2003-10-05 07:16:45 +00:00
|
|
|
VI_UNLOCK(vp);
|
|
|
|
continue;
|
|
|
|
}
|
1994-05-24 10:09:53 +00:00
|
|
|
ip = VTOI(vp);
|
2012-04-17 16:28:22 +00:00
|
|
|
if ((ip->i_flag &
|
2001-10-26 00:08:05 +00:00
|
|
|
(IN_ACCESS | IN_CHANGE | IN_MODIFIED | IN_UPDATE)) == 0 &&
|
2012-04-17 16:28:22 +00:00
|
|
|
vp->v_bufobj.bo_dirty.bv_cnt == 0) {
|
2003-10-05 07:16:45 +00:00
|
|
|
VI_UNLOCK(vp);
|
1994-05-24 10:09:53 +00:00
|
|
|
continue;
|
1997-02-10 02:22:35 +00:00
|
|
|
}
|
2003-10-05 09:42:24 +00:00
|
|
|
if ((error = vget(vp, lockreq, td)) != 0) {
|
2006-01-09 20:42:19 +00:00
|
|
|
if (error == ENOENT || error == ENOLCK) {
|
2012-04-17 16:28:22 +00:00
|
|
|
MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp);
|
2003-10-05 09:42:24 +00:00
|
|
|
goto loop;
|
2006-01-09 20:42:19 +00:00
|
|
|
}
|
2003-10-05 09:42:24 +00:00
|
|
|
continue;
|
|
|
|
}
|
2012-03-25 00:02:37 +00:00
|
|
|
if ((error = ffs_syncvnode(vp, waitfor, 0)) != 0)
|
2003-10-05 22:56:33 +00:00
|
|
|
allerror = error;
|
2005-04-03 10:38:18 +00:00
|
|
|
vput(vp);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
/*
|
2002-05-16 21:28:32 +00:00
|
|
|
* Force stale filesystem control information to be flushed.
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
2000-07-24 05:28:33 +00:00
|
|
|
if (waitfor == MNT_WAIT) {
|
2001-09-12 08:38:13 +00:00
|
|
|
if ((error = softdep_flushworklist(ump->um_mountp, &count, td)))
|
2000-07-24 05:28:33 +00:00
|
|
|
allerror = error;
|
|
|
|
/* Flushed work items may create new vnodes to clean */
|
2012-04-17 16:28:22 +00:00
|
|
|
if (allerror == 0 && count)
|
2000-07-24 05:28:33 +00:00
|
|
|
goto loop;
|
|
|
|
}
|
2001-03-07 07:09:55 +00:00
|
|
|
#ifdef QUOTA
|
|
|
|
qsync(mp);
|
|
|
|
#endif
|
In the original days of BSD, a sync was issued on every filesystem
every 30 seconds. This spike in I/O caused the system to pause every
30 seconds which was quite annoying. So, the way that sync worked
was changed so that when a vnode was first dirtied, it was put on
a 30-second cleaning queue (see the syncer_workitem_pending queues
in kern/vfs_subr.c). If the file has not been written or deleted
after 30 seconds, the syncer pushes it out. As the syncer runs once
per second, dirty files are trickled out slowly over the 30-second
period instead of all at once by a call to sync(2).
The one drawback to this is that it does not cover the filesystem
metadata. To handle the metadata, vfs_allocate_syncvnode() is called
to create a "filesystem syncer vnode" at mount time which cycles
around the cleaning queue being sync'ed every 30 seconds. In the
original design, the only things it would sync for UFS were the
filesystem metadata: inode blocks, cylinder group bitmaps, and the
superblock (e.g., by VOP_FSYNC'ing devvp, the device vnode from
which the filesystem is mounted).
Somewhere in its path to integration with FreeBSD the flushing of
the filesystem syncer vnode got changed to sync every vnode associated
with the filesystem. The result of this change is to return to the
old filesystem-wide flush every 30-seconds behavior and makes the
whole 30-second delay per vnode useless.
This change goes back to the originally intended trickle out sync
behavior. Key to ensuring that all the intended semantics are
preserved (e.g., that all inode updates get flushed within a bounded
period of time) is that all inode modifications get pushed to their
corresponding inode blocks so that the metadata flush by the
filesystem syncer vnode gets them to the disk in a timely way.
Thanks to Konstantin Belousov (kib@) for doing the audit and commit
-r231122 which ensures that all of these updates are being made.
Reviewed by: kib
Tested by: scottl
MFC after: 2 weeks
2012-02-07 20:43:28 +00:00
|
|
|
|
2001-04-25 08:11:18 +00:00
|
|
|
devvp = ump->um_devvp;
|
2004-10-25 09:14:03 +00:00
|
|
|
bo = &devvp->v_bufobj;
|
2008-03-22 09:15:16 +00:00
|
|
|
BO_LOCK(bo);
|
In the original days of BSD, a sync was issued on every filesystem
every 30 seconds. This spike in I/O caused the system to pause every
30 seconds which was quite annoying. So, the way that sync worked
was changed so that when a vnode was first dirtied, it was put on
a 30-second cleaning queue (see the syncer_workitem_pending queues
in kern/vfs_subr.c). If the file has not been written or deleted
after 30 seconds, the syncer pushes it out. As the syncer runs once
per second, dirty files are trickled out slowly over the 30-second
period instead of all at once by a call to sync(2).
The one drawback to this is that it does not cover the filesystem
metadata. To handle the metadata, vfs_allocate_syncvnode() is called
to create a "filesystem syncer vnode" at mount time which cycles
around the cleaning queue being sync'ed every 30 seconds. In the
original design, the only things it would sync for UFS were the
filesystem metadata: inode blocks, cylinder group bitmaps, and the
superblock (e.g., by VOP_FSYNC'ing devvp, the device vnode from
which the filesystem is mounted).
Somewhere in its path to integration with FreeBSD the flushing of
the filesystem syncer vnode got changed to sync every vnode associated
with the filesystem. The result of this change is to return to the
old filesystem-wide flush every 30-seconds behavior and makes the
whole 30-second delay per vnode useless.
This change goes back to the originally intended trickle out sync
behavior. Key to ensuring that all the intended semantics are
preserved (e.g., that all inode updates get flushed within a bounded
period of time) is that all inode modifications get pushed to their
corresponding inode blocks so that the metadata flush by the
filesystem syncer vnode gets them to the disk in a timely way.
Thanks to Konstantin Belousov (kib@) for doing the audit and commit
-r231122 which ensures that all of these updates are being made.
Reviewed by: kib
Tested by: scottl
MFC after: 2 weeks
2012-02-07 20:43:28 +00:00
|
|
|
if (bo->bo_numoutput > 0 || bo->bo_dirty.bv_cnt > 0) {
|
2008-03-22 09:15:16 +00:00
|
|
|
BO_UNLOCK(bo);
|
|
|
|
vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
|
2005-01-11 07:36:22 +00:00
|
|
|
if ((error = VOP_FSYNC(devvp, waitfor, td)) != 0)
|
1998-03-08 09:59:44 +00:00
|
|
|
allerror = error;
|
2008-01-13 14:44:15 +00:00
|
|
|
VOP_UNLOCK(devvp, 0);
|
2012-04-17 16:28:22 +00:00
|
|
|
if (allerror == 0 && waitfor == MNT_WAIT)
|
2001-04-25 08:11:18 +00:00
|
|
|
goto loop;
|
2006-03-08 23:43:39 +00:00
|
|
|
} else if (suspend != 0) {
|
|
|
|
if (softdep_check_suspend(mp,
|
|
|
|
devvp,
|
|
|
|
softdep_deps,
|
|
|
|
softdep_accdeps,
|
|
|
|
secondary_writes,
|
2012-04-17 16:28:22 +00:00
|
|
|
secondary_accwrites) != 0) {
|
|
|
|
MNT_IUNLOCK(mp);
|
2006-03-08 23:43:39 +00:00
|
|
|
goto loop; /* More work needed */
|
2012-04-17 16:28:22 +00:00
|
|
|
}
|
2006-03-08 23:43:39 +00:00
|
|
|
mtx_assert(MNT_MTX(mp), MA_OWNED);
|
2006-03-11 01:08:37 +00:00
|
|
|
mp->mnt_kern_flag |= MNTK_SUSPEND2 | MNTK_SUSPENDED;
|
2006-03-08 23:43:39 +00:00
|
|
|
MNT_IUNLOCK(mp);
|
|
|
|
suspended = 1;
|
2001-04-25 08:11:18 +00:00
|
|
|
} else
|
2008-03-22 09:15:16 +00:00
|
|
|
BO_UNLOCK(bo);
|
1997-02-10 02:22:35 +00:00
|
|
|
/*
|
|
|
|
* Write back modified superblock.
|
|
|
|
*/
|
2006-03-08 23:43:39 +00:00
|
|
|
if (fs->fs_fmod != 0 &&
|
|
|
|
(error = ffs_sbupdate(ump, waitfor, suspended)) != 0)
|
1998-03-08 09:59:44 +00:00
|
|
|
allerror = error;
|
1994-05-24 10:09:53 +00:00
|
|
|
return (allerror);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2002-03-17 01:25:47 +00:00
|
|
|
ffs_vget(mp, ino, flags, vpp)
|
1994-05-24 10:09:53 +00:00
|
|
|
struct mount *mp;
|
|
|
|
ino_t ino;
|
2002-03-17 01:25:47 +00:00
|
|
|
int flags;
|
1994-05-24 10:09:53 +00:00
|
|
|
struct vnode **vpp;
|
2008-08-28 09:18:20 +00:00
|
|
|
{
|
|
|
|
return (ffs_vgetf(mp, ino, flags, vpp, 0));
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
ffs_vgetf(mp, ino, flags, vpp, ffs_flags)
|
|
|
|
struct mount *mp;
|
|
|
|
ino_t ino;
|
|
|
|
int flags;
|
|
|
|
struct vnode **vpp;
|
|
|
|
int ffs_flags;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
1997-02-10 02:22:35 +00:00
|
|
|
struct fs *fs;
|
|
|
|
struct inode *ip;
|
1994-05-24 10:09:53 +00:00
|
|
|
struct ufsmount *ump;
|
|
|
|
struct buf *bp;
|
|
|
|
struct vnode *vp;
|
2004-06-16 09:47:26 +00:00
|
|
|
struct cdev *dev;
|
2002-05-30 22:04:17 +00:00
|
|
|
int error;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2005-03-16 11:20:51 +00:00
|
|
|
error = vfs_hash_get(mp, ino, flags, curthread, vpp, NULL, NULL);
|
2005-03-15 08:07:07 +00:00
|
|
|
if (error || *vpp != NULL)
|
2005-03-14 10:21:16 +00:00
|
|
|
return (error);
|
2002-05-30 22:04:17 +00:00
|
|
|
|
2005-03-29 10:10:51 +00:00
|
|
|
/*
|
|
|
|
* We must promote to an exclusive lock for vnode creation. This
|
|
|
|
* can happen if lookup is passed LOCKSHARED.
|
|
|
|
*/
|
|
|
|
if ((flags & LK_TYPE_MASK) == LK_SHARED) {
|
|
|
|
flags &= ~LK_TYPE_MASK;
|
|
|
|
flags |= LK_EXCLUSIVE;
|
|
|
|
}
|
|
|
|
|
2002-05-30 22:04:17 +00:00
|
|
|
/*
|
2002-06-06 20:43:03 +00:00
|
|
|
* We do not lock vnode creation as it is believed to be too
|
2002-05-30 22:04:17 +00:00
|
|
|
* expensive for such rare case as simultaneous creation of vnode
|
|
|
|
* for same ino by different processes. We just allow them to race
|
|
|
|
* and check later to decide who wins. Let the race begin!
|
|
|
|
*/
|
2005-03-14 10:21:16 +00:00
|
|
|
|
|
|
|
ump = VFSTOUFS(mp);
|
|
|
|
dev = ump->um_dev;
|
|
|
|
fs = ump->um_fs;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
1996-06-12 03:37:57 +00:00
|
|
|
/*
|
2008-10-23 15:53:51 +00:00
|
|
|
* If this malloc() is performed after the getnewvnode()
|
1996-06-12 03:37:57 +00:00
|
|
|
* it might block, leaving a vnode with a NULL v_data to be
|
|
|
|
* found by ffs_sync() if a sync happens to fire right then,
|
|
|
|
* which will cause a panic because ffs_sync() blindly
|
|
|
|
* dereferences vp->v_data (as well it should).
|
|
|
|
*/
|
2005-03-14 10:21:16 +00:00
|
|
|
ip = uma_zalloc(uma_inode, M_WAITOK | M_ZERO);
|
1996-06-12 03:37:57 +00:00
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/* Allocate a new vnode/inode. */
|
2005-02-08 21:03:52 +00:00
|
|
|
if (fs->fs_magic == FS_UFS1_MAGIC)
|
|
|
|
error = getnewvnode("ufs", mp, &ffs_vnodeops1, &vp);
|
|
|
|
else
|
|
|
|
error = getnewvnode("ufs", mp, &ffs_vnodeops2, &vp);
|
1994-10-08 06:20:06 +00:00
|
|
|
if (error) {
|
1994-05-24 10:09:53 +00:00
|
|
|
*vpp = NULL;
|
2002-12-27 11:05:05 +00:00
|
|
|
uma_zfree(uma_inode, ip);
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
}
|
2000-09-25 15:24:04 +00:00
|
|
|
/*
|
2009-03-11 14:13:47 +00:00
|
|
|
* FFS supports recursive locking.
|
2000-09-25 15:24:04 +00:00
|
|
|
*/
|
2010-08-20 19:46:50 +00:00
|
|
|
lockmgr(vp->v_vnlock, LK_EXCLUSIVE, NULL);
|
2008-02-24 16:38:58 +00:00
|
|
|
VN_LOCK_AREC(vp);
|
1994-05-24 10:09:53 +00:00
|
|
|
vp->v_data = ip;
|
2004-10-26 07:39:12 +00:00
|
|
|
vp->v_bufobj.bo_bsize = fs->fs_bsize;
|
1994-05-24 10:09:53 +00:00
|
|
|
ip->i_vnode = vp;
|
2002-06-21 06:18:05 +00:00
|
|
|
ip->i_ump = ump;
|
2004-07-07 20:04:06 +00:00
|
|
|
ip->i_fs = fs;
|
1994-05-24 10:09:53 +00:00
|
|
|
ip->i_dev = dev;
|
|
|
|
ip->i_number = ino;
|
2009-03-12 12:43:56 +00:00
|
|
|
ip->i_ea_refs = 0;
|
1994-05-24 10:09:53 +00:00
|
|
|
#ifdef QUOTA
|
1994-10-10 01:04:55 +00:00
|
|
|
{
|
1995-07-21 03:52:40 +00:00
|
|
|
int i;
|
|
|
|
for (i = 0; i < MAXQUOTAS; i++)
|
|
|
|
ip->i_dquot[i] = NODQUOT;
|
1994-10-10 01:04:55 +00:00
|
|
|
}
|
1994-05-24 10:09:53 +00:00
|
|
|
#endif
|
|
|
|
|
2008-08-28 09:18:20 +00:00
|
|
|
if (ffs_flags & FFSV_FORCEINSMQ)
|
|
|
|
vp->v_vflag |= VV_FORCEINSMQ;
|
2007-03-13 01:50:27 +00:00
|
|
|
error = insmntque(vp, mp);
|
|
|
|
if (error != 0) {
|
2009-09-07 11:55:34 +00:00
|
|
|
uma_zfree(uma_inode, ip);
|
2007-03-13 01:50:27 +00:00
|
|
|
*vpp = NULL;
|
|
|
|
return (error);
|
|
|
|
}
|
2008-08-28 09:18:20 +00:00
|
|
|
vp->v_vflag &= ~VV_FORCEINSMQ;
|
2008-07-19 22:29:44 +00:00
|
|
|
error = vfs_hash_insert(vp, ino, flags, curthread, vpp, NULL, NULL);
|
2005-03-15 20:00:03 +00:00
|
|
|
if (error || *vpp != NULL)
|
2002-05-30 22:04:17 +00:00
|
|
|
return (error);
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/* Read in the disk contents for the inode, copy into the inode. */
|
1994-10-08 06:20:06 +00:00
|
|
|
error = bread(ump->um_devvp, fsbtodb(fs, ino_to_fsba(fs, ino)),
|
|
|
|
(int)fs->fs_bsize, NOCRED, &bp);
|
|
|
|
if (error) {
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* The inode does not contain anything useful, so it would
|
|
|
|
* be misleading to leave it on its hash chain. With mode
|
|
|
|
* still zero, it will be unlinked and returned to the free
|
|
|
|
* list by vput().
|
|
|
|
*/
|
|
|
|
brelse(bp);
|
1996-01-19 04:00:31 +00:00
|
|
|
vput(vp);
|
1994-05-24 10:09:53 +00:00
|
|
|
*vpp = NULL;
|
|
|
|
return (error);
|
|
|
|
}
|
2002-12-27 10:23:03 +00:00
|
|
|
if (ip->i_ump->um_fstype == UFS1)
|
2003-02-19 05:47:46 +00:00
|
|
|
ip->i_din1 = uma_zalloc(uma_ufs1, M_WAITOK);
|
2002-12-27 10:23:03 +00:00
|
|
|
else
|
2003-02-19 05:47:46 +00:00
|
|
|
ip->i_din2 = uma_zalloc(uma_ufs2, M_WAITOK);
|
2002-12-27 10:23:03 +00:00
|
|
|
ffs_load_inode(bp, ip, fs, ino);
|
1998-03-08 09:59:44 +00:00
|
|
|
if (DOINGSOFTDEP(vp))
|
|
|
|
softdep_load_inodeblock(ip);
|
|
|
|
else
|
|
|
|
ip->i_effnlink = ip->i_nlink;
|
1996-01-19 04:00:31 +00:00
|
|
|
bqrelse(bp);
|
1994-05-24 10:09:53 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Initialize the vnode from the inode, check for aliases.
|
|
|
|
* Note that the underlying vnode may have changed.
|
|
|
|
*/
|
2005-02-08 21:03:52 +00:00
|
|
|
if (ip->i_ump->um_fstype == UFS1)
|
|
|
|
error = ufs_vinit(mp, &ffs_fifoops1, &vp);
|
|
|
|
else
|
|
|
|
error = ufs_vinit(mp, &ffs_fifoops2, &vp);
|
1994-10-08 06:20:06 +00:00
|
|
|
if (error) {
|
1994-05-24 10:09:53 +00:00
|
|
|
vput(vp);
|
|
|
|
*vpp = NULL;
|
|
|
|
return (error);
|
|
|
|
}
|
2005-03-15 20:50:58 +00:00
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
2003-08-15 20:03:19 +00:00
|
|
|
* Finish inode initialization.
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
2009-03-11 14:13:47 +00:00
|
|
|
if (vp->v_type != VFIFO) {
|
|
|
|
/* FFS supports shared locking for all files except fifos. */
|
|
|
|
VN_LOCK_ASHARE(vp);
|
|
|
|
}
|
2005-03-15 20:50:58 +00:00
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* Set up a generation number for this inode if it does not
|
|
|
|
* already have one. This should only happen on old filesystems.
|
|
|
|
*/
|
|
|
|
if (ip->i_gen == 0) {
|
2003-02-14 21:31:58 +00:00
|
|
|
ip->i_gen = arc4random() / 2 + 1;
|
2002-06-21 06:18:05 +00:00
|
|
|
if ((vp->v_mount->mnt_flag & MNT_RDONLY) == 0) {
|
1994-05-24 10:09:53 +00:00
|
|
|
ip->i_flag |= IN_MODIFIED;
|
2004-07-28 06:41:27 +00:00
|
|
|
DIP_SET(ip, i_gen, ip->i_gen);
|
2002-06-21 06:18:05 +00:00
|
|
|
}
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
Slightly change the semantics of vnode labels for MAC: rather than
"refreshing" the label on the vnode before use, just get the label
right from inception. For single-label file systems, set the label
in the generic VFS getnewvnode() code; for multi-label file systems,
leave the labeling up to the file system. With UFS1/2, this means
reading the extended attribute during vfs_vget() as the inode is
pulled off disk, rather than hitting the extended attributes
frequently during operations later, improving performance. This
also corrects sematics for shared vnode locks, which were not
previously present in the system. This chances the cache
coherrency properties WRT out-of-band access to label data, but in
an acceptable form. With UFS1, there is a small race condition
during automatic extended attribute start -- this is not present
with UFS2, and occurs because EAs aren't available at vnode
inception. We'll introduce a work around for this shortly.
Approved by: re
Obtained from: TrustedBSD Project
Sponsored by: DARPA, Network Associates Laboratories
2002-10-26 14:38:24 +00:00
|
|
|
#ifdef MAC
|
|
|
|
if ((mp->mnt_flag & MNT_MULTILABEL) && ip->i_mode) {
|
|
|
|
/*
|
|
|
|
* If this vnode is already allocated, and we're running
|
|
|
|
* multi-label, attempt to perform a label association
|
|
|
|
* from the extended attributes on the inode.
|
|
|
|
*/
|
2007-10-24 19:04:04 +00:00
|
|
|
error = mac_vnode_associate_extattr(mp, vp);
|
Slightly change the semantics of vnode labels for MAC: rather than
"refreshing" the label on the vnode before use, just get the label
right from inception. For single-label file systems, set the label
in the generic VFS getnewvnode() code; for multi-label file systems,
leave the labeling up to the file system. With UFS1/2, this means
reading the extended attribute during vfs_vget() as the inode is
pulled off disk, rather than hitting the extended attributes
frequently during operations later, improving performance. This
also corrects sematics for shared vnode locks, which were not
previously present in the system. This chances the cache
coherrency properties WRT out-of-band access to label data, but in
an acceptable form. With UFS1, there is a small race condition
during automatic extended attribute start -- this is not present
with UFS2, and occurs because EAs aren't available at vnode
inception. We'll introduce a work around for this shortly.
Approved by: re
Obtained from: TrustedBSD Project
Sponsored by: DARPA, Network Associates Laboratories
2002-10-26 14:38:24 +00:00
|
|
|
if (error) {
|
|
|
|
/* ufs_inactive will release ip->i_devvp ref. */
|
|
|
|
vput(vp);
|
|
|
|
*vpp = NULL;
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
*vpp = vp;
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* File handle to vnode
|
|
|
|
*
|
|
|
|
* Have to be really careful about stale file handles:
|
|
|
|
* - check that the inode number is valid
|
|
|
|
* - call ffs_vget() to get the locked inode
|
|
|
|
* - check for an unallocated inode (i_mode == 0)
|
|
|
|
* - check that the given client host has export rights and return
|
|
|
|
* those rights via. exflagsp and credanonp
|
|
|
|
*/
|
2005-02-10 12:20:08 +00:00
|
|
|
static int
|
2011-05-22 01:07:54 +00:00
|
|
|
ffs_fhtovp(mp, fhp, flags, vpp)
|
2002-05-13 09:22:31 +00:00
|
|
|
struct mount *mp;
|
1994-05-24 10:09:53 +00:00
|
|
|
struct fid *fhp;
|
2011-05-22 01:07:54 +00:00
|
|
|
int flags;
|
1994-05-24 10:09:53 +00:00
|
|
|
struct vnode **vpp;
|
|
|
|
{
|
2002-05-13 09:22:31 +00:00
|
|
|
struct ufid *ufhp;
|
1994-05-24 10:09:53 +00:00
|
|
|
struct fs *fs;
|
|
|
|
|
|
|
|
ufhp = (struct ufid *)fhp;
|
|
|
|
fs = VFSTOUFS(mp)->um_fs;
|
|
|
|
if (ufhp->ufid_ino < ROOTINO ||
|
|
|
|
ufhp->ufid_ino >= fs->fs_ncg * fs->fs_ipg)
|
|
|
|
return (ESTALE);
|
2011-05-22 01:07:54 +00:00
|
|
|
return (ufs_fhtovp(mp, ufhp, flags, vpp));
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
1997-02-10 02:22:35 +00:00
|
|
|
/*
|
2002-07-01 11:00:47 +00:00
|
|
|
* Initialize the filesystem.
|
1997-02-10 02:22:35 +00:00
|
|
|
*/
|
|
|
|
static int
|
|
|
|
ffs_init(vfsp)
|
|
|
|
struct vfsconf *vfsp;
|
|
|
|
{
|
|
|
|
|
1998-03-08 09:59:44 +00:00
|
|
|
softdep_initialize();
|
1997-02-10 02:22:35 +00:00
|
|
|
return (ufs_init(vfsp));
|
|
|
|
}
|
|
|
|
|
2002-07-01 11:00:47 +00:00
|
|
|
/*
|
|
|
|
* Undo the work of ffs_init().
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
ffs_uninit(vfsp)
|
|
|
|
struct vfsconf *vfsp;
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = ufs_uninit(vfsp);
|
|
|
|
softdep_uninitialize();
|
|
|
|
return (ret);
|
|
|
|
}
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* Write a superblock and associated information back to disk.
|
|
|
|
*/
|
2006-10-31 21:48:54 +00:00
|
|
|
int
|
2011-07-15 16:20:33 +00:00
|
|
|
ffs_sbupdate(ump, waitfor, suspended)
|
|
|
|
struct ufsmount *ump;
|
1994-05-24 10:09:53 +00:00
|
|
|
int waitfor;
|
2006-03-08 23:43:39 +00:00
|
|
|
int suspended;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2011-07-15 16:20:33 +00:00
|
|
|
struct fs *fs = ump->um_fs;
|
2005-01-24 10:12:28 +00:00
|
|
|
struct buf *sbbp;
|
2002-05-13 09:22:31 +00:00
|
|
|
struct buf *bp;
|
1994-05-24 10:09:53 +00:00
|
|
|
int blks;
|
2001-01-15 18:30:40 +00:00
|
|
|
void *space;
|
1997-02-10 02:22:35 +00:00
|
|
|
int i, size, error, allerror = 0;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2003-02-25 23:21:08 +00:00
|
|
|
if (fs->fs_ronly == 1 &&
|
2011-07-15 16:20:33 +00:00
|
|
|
(ump->um_mountp->mnt_flag & (MNT_RDONLY | MNT_UPDATE)) !=
|
|
|
|
(MNT_RDONLY | MNT_UPDATE) && ump->um_fsckpid == 0)
|
2003-02-25 23:21:08 +00:00
|
|
|
panic("ffs_sbupdate: write read-only filesystem");
|
2005-01-24 10:12:28 +00:00
|
|
|
/*
|
|
|
|
* We use the superblock's buf to serialize calls to ffs_sbupdate().
|
|
|
|
*/
|
2011-07-15 16:20:33 +00:00
|
|
|
sbbp = getblk(ump->um_devvp, btodb(fs->fs_sblockloc),
|
|
|
|
(int)fs->fs_sbsize, 0, 0, 0);
|
1997-02-10 02:22:35 +00:00
|
|
|
/*
|
|
|
|
* First write back the summary information.
|
|
|
|
*/
|
1994-05-24 10:09:53 +00:00
|
|
|
blks = howmany(fs->fs_cssize, fs->fs_fsize);
|
2001-01-15 18:30:40 +00:00
|
|
|
space = fs->fs_csp;
|
1994-05-24 10:09:53 +00:00
|
|
|
for (i = 0; i < blks; i += fs->fs_frag) {
|
|
|
|
size = fs->fs_bsize;
|
|
|
|
if (i + fs->fs_frag > blks)
|
|
|
|
size = (blks - i) * fs->fs_fsize;
|
2011-07-15 16:20:33 +00:00
|
|
|
bp = getblk(ump->um_devvp, fsbtodb(fs, fs->fs_csaddr + i),
|
2003-03-04 00:04:44 +00:00
|
|
|
size, 0, 0, 0);
|
1994-05-24 10:09:53 +00:00
|
|
|
bcopy(space, bp->b_data, (u_int)size);
|
2001-01-15 18:30:40 +00:00
|
|
|
space = (char *)space + size;
|
2006-03-08 23:43:39 +00:00
|
|
|
if (suspended)
|
|
|
|
bp->b_flags |= B_VALIDSUSPWRT;
|
1997-02-10 02:22:35 +00:00
|
|
|
if (waitfor != MNT_WAIT)
|
1994-05-24 10:09:53 +00:00
|
|
|
bawrite(bp);
|
1999-01-28 00:57:57 +00:00
|
|
|
else if ((error = bwrite(bp)) != 0)
|
1997-02-10 02:22:35 +00:00
|
|
|
allerror = error;
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
1997-02-10 02:22:35 +00:00
|
|
|
/*
|
|
|
|
* Now write back the superblock itself. If any errors occurred
|
|
|
|
* up to this point, then fail so that the superblock avoids
|
|
|
|
* being written out as clean.
|
|
|
|
*/
|
2005-01-24 10:12:28 +00:00
|
|
|
if (allerror) {
|
|
|
|
brelse(sbbp);
|
1997-02-10 02:22:35 +00:00
|
|
|
return (allerror);
|
2005-01-24 10:12:28 +00:00
|
|
|
}
|
|
|
|
bp = sbbp;
|
2002-11-30 19:04:57 +00:00
|
|
|
if (fs->fs_magic == FS_UFS1_MAGIC && fs->fs_sblockloc != SBLOCK_UFS1 &&
|
|
|
|
(fs->fs_flags & FS_FLAGS_UPDATED) == 0) {
|
2012-01-14 07:26:16 +00:00
|
|
|
printf("WARNING: %s: correcting fs_sblockloc from %jd to %d\n",
|
2002-11-29 19:20:15 +00:00
|
|
|
fs->fs_fsmnt, fs->fs_sblockloc, SBLOCK_UFS1);
|
|
|
|
fs->fs_sblockloc = SBLOCK_UFS1;
|
|
|
|
}
|
2002-11-30 19:04:57 +00:00
|
|
|
if (fs->fs_magic == FS_UFS2_MAGIC && fs->fs_sblockloc != SBLOCK_UFS2 &&
|
|
|
|
(fs->fs_flags & FS_FLAGS_UPDATED) == 0) {
|
2012-01-14 07:26:16 +00:00
|
|
|
printf("WARNING: %s: correcting fs_sblockloc from %jd to %d\n",
|
2002-11-29 19:20:15 +00:00
|
|
|
fs->fs_fsmnt, fs->fs_sblockloc, SBLOCK_UFS2);
|
|
|
|
fs->fs_sblockloc = SBLOCK_UFS2;
|
|
|
|
}
|
1998-03-08 09:59:44 +00:00
|
|
|
fs->fs_fmod = 0;
|
1998-03-30 09:56:58 +00:00
|
|
|
fs->fs_time = time_second;
|
2010-04-24 07:05:35 +00:00
|
|
|
if (fs->fs_flags & FS_DOSOFTDEP)
|
2011-07-15 16:20:33 +00:00
|
|
|
softdep_setup_sbupdate(ump, (struct fs *)bp->b_data, bp);
|
1997-02-10 02:22:35 +00:00
|
|
|
bcopy((caddr_t)fs, bp->b_data, (u_int)fs->fs_sbsize);
|
2011-07-15 16:20:33 +00:00
|
|
|
ffs_oldfscompat_write((struct fs *)bp->b_data, ump);
|
2006-03-08 23:43:39 +00:00
|
|
|
if (suspended)
|
|
|
|
bp->b_flags |= B_VALIDSUSPWRT;
|
1997-02-10 02:22:35 +00:00
|
|
|
if (waitfor != MNT_WAIT)
|
|
|
|
bawrite(bp);
|
1999-01-28 00:57:57 +00:00
|
|
|
else if ((error = bwrite(bp)) != 0)
|
1997-02-10 02:22:35 +00:00
|
|
|
allerror = error;
|
|
|
|
return (allerror);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
2002-08-13 10:33:57 +00:00
|
|
|
|
|
|
|
static int
|
|
|
|
ffs_extattrctl(struct mount *mp, int cmd, struct vnode *filename_vp,
|
2009-05-11 15:33:26 +00:00
|
|
|
int attrnamespace, const char *attrname)
|
2002-08-13 10:33:57 +00:00
|
|
|
{
|
|
|
|
|
|
|
|
#ifdef UFS_EXTATTR
|
|
|
|
return (ufs_extattrctl(mp, cmd, filename_vp, attrnamespace,
|
2009-05-11 15:33:26 +00:00
|
|
|
attrname));
|
2002-08-13 10:33:57 +00:00
|
|
|
#else
|
|
|
|
return (vfs_stdextattrctl(mp, cmd, filename_vp, attrnamespace,
|
2009-05-11 15:33:26 +00:00
|
|
|
attrname));
|
2002-08-13 10:33:57 +00:00
|
|
|
#endif
|
|
|
|
}
|
2002-12-27 10:06:37 +00:00
|
|
|
|
|
|
|
static void
|
|
|
|
ffs_ifree(struct ufsmount *ump, struct inode *ip)
|
|
|
|
{
|
|
|
|
|
2003-05-01 06:41:59 +00:00
|
|
|
if (ump->um_fstype == UFS1 && ip->i_din1 != NULL)
|
2002-12-27 11:05:05 +00:00
|
|
|
uma_zfree(uma_ufs1, ip->i_din1);
|
2003-05-01 06:41:59 +00:00
|
|
|
else if (ip->i_din2 != NULL)
|
2003-05-01 06:38:27 +00:00
|
|
|
uma_zfree(uma_ufs2, ip->i_din2);
|
2002-12-27 11:05:05 +00:00
|
|
|
uma_zfree(uma_inode, ip);
|
2002-12-27 10:06:37 +00:00
|
|
|
}
|
2004-10-26 10:44:10 +00:00
|
|
|
|
2005-02-08 20:29:10 +00:00
|
|
|
static int dobkgrdwrite = 1;
|
|
|
|
SYSCTL_INT(_debug, OID_AUTO, dobkgrdwrite, CTLFLAG_RW, &dobkgrdwrite, 0,
|
|
|
|
"Do background writes (honoring the BV_BKGRDWRITE flag)?");
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Complete a background write started from bwrite.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
ffs_backgroundwritedone(struct buf *bp)
|
|
|
|
{
|
2005-05-30 07:04:15 +00:00
|
|
|
struct bufobj *bufobj;
|
2005-02-08 20:29:10 +00:00
|
|
|
struct buf *origbp;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Find the original buffer that we are writing.
|
|
|
|
*/
|
2005-05-30 07:04:15 +00:00
|
|
|
bufobj = bp->b_bufobj;
|
|
|
|
BO_LOCK(bufobj);
|
2005-02-08 20:29:10 +00:00
|
|
|
if ((origbp = gbincore(bp->b_bufobj, bp->b_lblkno)) == NULL)
|
|
|
|
panic("backgroundwritedone: lost buffer");
|
2005-05-30 07:04:15 +00:00
|
|
|
/* Grab an extra reference to be dropped by the bufdone() below. */
|
|
|
|
bufobj_wrefl(bufobj);
|
|
|
|
BO_UNLOCK(bufobj);
|
2005-02-08 20:29:10 +00:00
|
|
|
/*
|
|
|
|
* Process dependencies then return any unfinished ones.
|
|
|
|
*/
|
2007-04-04 07:29:53 +00:00
|
|
|
if (!LIST_EMPTY(&bp->b_dep))
|
2005-02-08 20:29:10 +00:00
|
|
|
buf_complete(bp);
|
|
|
|
#ifdef SOFTUPDATES
|
2007-04-04 07:29:53 +00:00
|
|
|
if (!LIST_EMPTY(&bp->b_dep))
|
2005-02-08 20:29:10 +00:00
|
|
|
softdep_move_dependencies(bp, origbp);
|
|
|
|
#endif
|
|
|
|
/*
|
2005-05-30 07:04:15 +00:00
|
|
|
* This buffer is marked B_NOCACHE so when it is released
|
|
|
|
* by biodone it will be tossed.
|
2005-02-08 20:29:10 +00:00
|
|
|
*/
|
|
|
|
bp->b_flags |= B_NOCACHE;
|
2005-07-20 19:06:06 +00:00
|
|
|
bp->b_flags &= ~B_CACHE;
|
2005-02-08 20:29:10 +00:00
|
|
|
bufdone(bp);
|
2005-05-30 07:04:15 +00:00
|
|
|
BO_LOCK(bufobj);
|
2005-02-08 20:29:10 +00:00
|
|
|
/*
|
|
|
|
* Clear the BV_BKGRDINPROG flag in the original buffer
|
|
|
|
* and awaken it if it is waiting for the write to complete.
|
|
|
|
* If BV_BKGRDINPROG is not set in the original buffer it must
|
|
|
|
* have been released and re-instantiated - which is not legal.
|
|
|
|
*/
|
|
|
|
KASSERT((origbp->b_vflags & BV_BKGRDINPROG),
|
|
|
|
("backgroundwritedone: lost buffer2"));
|
|
|
|
origbp->b_vflags &= ~BV_BKGRDINPROG;
|
|
|
|
if (origbp->b_vflags & BV_BKGRDWAIT) {
|
|
|
|
origbp->b_vflags &= ~BV_BKGRDWAIT;
|
|
|
|
wakeup(&origbp->b_xflags);
|
|
|
|
}
|
2005-05-30 07:04:15 +00:00
|
|
|
BO_UNLOCK(bufobj);
|
2005-02-08 20:29:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Write, release buffer on completion. (Done by iodone
|
|
|
|
* if async). Do not bother writing anything if the buffer
|
|
|
|
* is invalid.
|
|
|
|
*
|
|
|
|
* Note that we set B_CACHE here, indicating that buffer is
|
|
|
|
* fully valid and thus cacheable. This is true even of NFS
|
2010-09-17 09:14:40 +00:00
|
|
|
* now so we set it generally. This could be set either here
|
2005-02-08 20:29:10 +00:00
|
|
|
* or in biodone() since the I/O is synchronous. We put it
|
|
|
|
* here.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
ffs_bufwrite(struct buf *bp)
|
|
|
|
{
|
|
|
|
int oldflags, s;
|
|
|
|
struct buf *newbp;
|
|
|
|
|
|
|
|
CTR3(KTR_BUF, "bufwrite(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
|
|
|
|
if (bp->b_flags & B_INVAL) {
|
|
|
|
brelse(bp);
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
oldflags = bp->b_flags;
|
|
|
|
|
2008-01-19 17:36:23 +00:00
|
|
|
if (!BUF_ISLOCKED(bp))
|
2005-02-08 20:29:10 +00:00
|
|
|
panic("bufwrite: buffer is not busy???");
|
|
|
|
s = splbio();
|
|
|
|
/*
|
|
|
|
* If a background write is already in progress, delay
|
|
|
|
* writing this block if it is asynchronous. Otherwise
|
|
|
|
* wait for the background write to complete.
|
|
|
|
*/
|
|
|
|
BO_LOCK(bp->b_bufobj);
|
|
|
|
if (bp->b_vflags & BV_BKGRDINPROG) {
|
|
|
|
if (bp->b_flags & B_ASYNC) {
|
|
|
|
BO_UNLOCK(bp->b_bufobj);
|
|
|
|
splx(s);
|
|
|
|
bdwrite(bp);
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
bp->b_vflags |= BV_BKGRDWAIT;
|
|
|
|
msleep(&bp->b_xflags, BO_MTX(bp->b_bufobj), PRIBIO, "bwrbg", 0);
|
|
|
|
if (bp->b_vflags & BV_BKGRDINPROG)
|
|
|
|
panic("bufwrite: still writing");
|
|
|
|
}
|
|
|
|
BO_UNLOCK(bp->b_bufobj);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If this buffer is marked for background writing and we
|
|
|
|
* do not have to wait for it, make a copy and write the
|
|
|
|
* copy so as to leave this buffer ready for further use.
|
|
|
|
*
|
|
|
|
* This optimization eats a lot of memory. If we have a page
|
|
|
|
* or buffer shortfall we can't do it.
|
|
|
|
*/
|
2010-09-17 09:14:40 +00:00
|
|
|
if (dobkgrdwrite && (bp->b_xflags & BX_BKGRDWRITE) &&
|
2005-02-08 20:29:10 +00:00
|
|
|
(bp->b_flags & B_ASYNC) &&
|
|
|
|
!vm_page_count_severe() &&
|
|
|
|
!buf_dirty_count_severe()) {
|
|
|
|
KASSERT(bp->b_iodone == NULL,
|
|
|
|
("bufwrite: needs chained iodone (%p)", bp->b_iodone));
|
|
|
|
|
|
|
|
/* get a new block */
|
Fix two issues with bufdaemon, often causing the processes to hang in
the "nbufkv" sleep.
First, ffs background cg group block write requests a new buffer for
the shadow copy. When ffs_bufwrite() is called from the bufdaemon due
to buffers shortage, requesting the buffer deadlock bufdaemon.
Introduce a new flag for getnewbuf(), GB_NOWAIT_BD, to request getblk
to not block while allocating the buffer, and return failure
instead. Add a flag argument to the geteblk to allow to pass the flags
to getblk(). Do not repeat the getnewbuf() call from geteblk if buffer
allocation failed and either GB_NOWAIT_BD is specified, or geteblk()
is called from bufdaemon (or its helper, see below). In
ffs_bufwrite(), fall back to synchronous cg block write if shadow
block allocation failed.
Since r107847, buffer write assumes that vnode owning the buffer is
locked. The second problem is that buffer cache may accumulate many
buffers belonging to limited number of vnodes. With such workload,
quite often threads that own the mentioned vnodes locks are trying to
read another block from the vnodes, and, due to buffer cache
exhaustion, are asking bufdaemon for help. Bufdaemon is unable to make
any substantial progress because the vnodes are locked.
Allow the threads owning vnode locks to help the bufdaemon by doing
the flush pass over the buffer cache before getnewbuf() is going to
uninterruptible sleep. Move the flushing code from buf_daemon() to new
helper function buf_do_flush(), that is called from getnewbuf(). The
number of buffers flushed by single call to buf_do_flush() from
getnewbuf() is limited by new sysctl vfs.flushbufqtarget. Prevent
recursive calls to buf_do_flush() by marking the bufdaemon and threads
that temporarily help bufdaemon by TDP_BUFNEED flag.
In collaboration with: pho
Reviewed by: tegge (previous version)
Tested by: glebius, yandex ...
MFC after: 3 weeks
2009-03-16 15:39:46 +00:00
|
|
|
newbp = geteblk(bp->b_bufsize, GB_NOWAIT_BD);
|
|
|
|
if (newbp == NULL)
|
|
|
|
goto normal_write;
|
2005-02-08 20:29:10 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* set it to be identical to the old block. We have to
|
|
|
|
* set b_lblkno and BKGRDMARKER before calling bgetvp()
|
|
|
|
* to avoid confusing the splay tree and gbincore().
|
|
|
|
*/
|
|
|
|
memcpy(newbp->b_data, bp->b_data, bp->b_bufsize);
|
|
|
|
newbp->b_lblkno = bp->b_lblkno;
|
|
|
|
newbp->b_xflags |= BX_BKGRDMARKER;
|
|
|
|
BO_LOCK(bp->b_bufobj);
|
|
|
|
bp->b_vflags |= BV_BKGRDINPROG;
|
|
|
|
bgetvp(bp->b_vp, newbp);
|
|
|
|
BO_UNLOCK(bp->b_bufobj);
|
|
|
|
newbp->b_bufobj = &bp->b_vp->v_bufobj;
|
|
|
|
newbp->b_blkno = bp->b_blkno;
|
|
|
|
newbp->b_offset = bp->b_offset;
|
|
|
|
newbp->b_iodone = ffs_backgroundwritedone;
|
|
|
|
newbp->b_flags |= B_ASYNC;
|
|
|
|
newbp->b_flags &= ~B_INVAL;
|
|
|
|
|
|
|
|
#ifdef SOFTUPDATES
|
2010-04-24 07:05:35 +00:00
|
|
|
/*
|
|
|
|
* Move over the dependencies. If there are rollbacks,
|
|
|
|
* leave the parent buffer dirtied as it will need to
|
|
|
|
* be written again.
|
|
|
|
*/
|
|
|
|
if (LIST_EMPTY(&bp->b_dep) ||
|
|
|
|
softdep_move_dependencies(bp, newbp) == 0)
|
|
|
|
bundirty(bp);
|
|
|
|
#else
|
|
|
|
bundirty(bp);
|
2010-09-17 09:14:40 +00:00
|
|
|
#endif
|
2005-02-08 20:29:10 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Initiate write on the copy, release the original to
|
|
|
|
* the B_LOCKED queue so that it cannot go away until
|
|
|
|
* the background write completes. If not locked it could go
|
|
|
|
* away and then be reconstituted while it was being written.
|
|
|
|
* If the reconstituted buffer were written, we could end up
|
|
|
|
* with two background copies being written at the same time.
|
|
|
|
*/
|
|
|
|
bqrelse(bp);
|
|
|
|
bp = newbp;
|
2010-04-24 07:05:35 +00:00
|
|
|
} else
|
|
|
|
/* Mark the buffer clean */
|
|
|
|
bundirty(bp);
|
|
|
|
|
2005-02-08 20:29:10 +00:00
|
|
|
|
|
|
|
/* Let the normal bufwrite do the rest for us */
|
Fix two issues with bufdaemon, often causing the processes to hang in
the "nbufkv" sleep.
First, ffs background cg group block write requests a new buffer for
the shadow copy. When ffs_bufwrite() is called from the bufdaemon due
to buffers shortage, requesting the buffer deadlock bufdaemon.
Introduce a new flag for getnewbuf(), GB_NOWAIT_BD, to request getblk
to not block while allocating the buffer, and return failure
instead. Add a flag argument to the geteblk to allow to pass the flags
to getblk(). Do not repeat the getnewbuf() call from geteblk if buffer
allocation failed and either GB_NOWAIT_BD is specified, or geteblk()
is called from bufdaemon (or its helper, see below). In
ffs_bufwrite(), fall back to synchronous cg block write if shadow
block allocation failed.
Since r107847, buffer write assumes that vnode owning the buffer is
locked. The second problem is that buffer cache may accumulate many
buffers belonging to limited number of vnodes. With such workload,
quite often threads that own the mentioned vnodes locks are trying to
read another block from the vnodes, and, due to buffer cache
exhaustion, are asking bufdaemon for help. Bufdaemon is unable to make
any substantial progress because the vnodes are locked.
Allow the threads owning vnode locks to help the bufdaemon by doing
the flush pass over the buffer cache before getnewbuf() is going to
uninterruptible sleep. Move the flushing code from buf_daemon() to new
helper function buf_do_flush(), that is called from getnewbuf(). The
number of buffers flushed by single call to buf_do_flush() from
getnewbuf() is limited by new sysctl vfs.flushbufqtarget. Prevent
recursive calls to buf_do_flush() by marking the bufdaemon and threads
that temporarily help bufdaemon by TDP_BUFNEED flag.
In collaboration with: pho
Reviewed by: tegge (previous version)
Tested by: glebius, yandex ...
MFC after: 3 weeks
2009-03-16 15:39:46 +00:00
|
|
|
normal_write:
|
2005-10-09 20:49:01 +00:00
|
|
|
return (bufwrite(bp));
|
2005-02-08 20:29:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2004-10-26 20:13:21 +00:00
|
|
|
static void
|
2004-10-26 10:44:10 +00:00
|
|
|
ffs_geom_strategy(struct bufobj *bo, struct buf *bp)
|
|
|
|
{
|
2005-04-03 10:29:55 +00:00
|
|
|
struct vnode *vp;
|
|
|
|
int error;
|
2006-03-19 21:43:36 +00:00
|
|
|
struct buf *tbp;
|
2010-04-24 07:05:35 +00:00
|
|
|
int nocopy;
|
2004-10-26 10:44:10 +00:00
|
|
|
|
2005-04-03 10:29:55 +00:00
|
|
|
vp = bo->__bo_vnode;
|
|
|
|
if (bp->b_iocmd == BIO_WRITE) {
|
|
|
|
if ((bp->b_flags & B_VALIDSUSPWRT) == 0 &&
|
|
|
|
bp->b_vp != NULL && bp->b_vp->v_mount != NULL &&
|
|
|
|
(bp->b_vp->v_mount->mnt_kern_flag & MNTK_SUSPENDED) != 0)
|
|
|
|
panic("ffs_geom_strategy: bad I/O");
|
2010-04-24 07:05:35 +00:00
|
|
|
nocopy = bp->b_flags & B_NOCOPY;
|
|
|
|
bp->b_flags &= ~(B_VALIDSUSPWRT | B_NOCOPY);
|
|
|
|
if ((vp->v_vflag & VV_COPYONWRITE) && nocopy == 0 &&
|
2006-03-19 21:43:36 +00:00
|
|
|
vp->v_rdev->si_snapdata != NULL) {
|
|
|
|
if ((bp->b_flags & B_CLUSTER) != 0) {
|
2006-05-03 00:10:29 +00:00
|
|
|
runningbufwakeup(bp);
|
2006-03-19 21:43:36 +00:00
|
|
|
TAILQ_FOREACH(tbp, &bp->b_cluster.cluster_head,
|
|
|
|
b_cluster.cluster_entry) {
|
|
|
|
error = ffs_copyonwrite(vp, tbp);
|
|
|
|
if (error != 0 &&
|
|
|
|
error != EOPNOTSUPP) {
|
|
|
|
bp->b_error = error;
|
|
|
|
bp->b_ioflags |= BIO_ERROR;
|
|
|
|
bufdone(bp);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
2006-05-03 00:10:29 +00:00
|
|
|
bp->b_runningbufspace = bp->b_bufsize;
|
Adjust some variables (mostly related to the buffer cache) that hold
address space sizes to be longs instead of ints. Specifically, the follow
values are now longs: runningbufspace, bufspace, maxbufspace,
bufmallocspace, maxbufmallocspace, lobufspace, hibufspace, lorunningspace,
hirunningspace, maxswzone, maxbcache, and maxpipekva. Previously, a
relatively small number (~ 44000) of buffers set in kern.nbuf would result
in integer overflows resulting either in hangs or bogus values of
hidirtybuffers and lodirtybuffers. Now one has to overflow a long to see
such problems. There was a check for a nbuf setting that would cause
overflows in the auto-tuning of nbuf. I've changed it to always check and
cap nbuf but warn if a user-supplied tunable would cause overflow.
Note that this changes the ABI of several sysctls that are used by things
like top(1), etc., so any MFC would probably require a some gross shims
to allow for that.
MFC after: 1 month
2009-03-09 19:35:20 +00:00
|
|
|
atomic_add_long(&runningbufspace,
|
2006-05-03 00:10:29 +00:00
|
|
|
bp->b_runningbufspace);
|
2006-03-19 21:43:36 +00:00
|
|
|
} else {
|
|
|
|
error = ffs_copyonwrite(vp, bp);
|
|
|
|
if (error != 0 && error != EOPNOTSUPP) {
|
|
|
|
bp->b_error = error;
|
|
|
|
bp->b_ioflags |= BIO_ERROR;
|
|
|
|
bufdone(bp);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
2005-04-03 10:29:55 +00:00
|
|
|
}
|
2006-03-19 21:43:36 +00:00
|
|
|
#ifdef SOFTUPDATES
|
|
|
|
if ((bp->b_flags & B_CLUSTER) != 0) {
|
|
|
|
TAILQ_FOREACH(tbp, &bp->b_cluster.cluster_head,
|
|
|
|
b_cluster.cluster_entry) {
|
2007-04-04 07:29:53 +00:00
|
|
|
if (!LIST_EMPTY(&tbp->b_dep))
|
2006-03-19 21:43:36 +00:00
|
|
|
buf_start(tbp);
|
|
|
|
}
|
|
|
|
} else {
|
2007-04-04 07:29:53 +00:00
|
|
|
if (!LIST_EMPTY(&bp->b_dep))
|
2006-03-19 21:43:36 +00:00
|
|
|
buf_start(bp);
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif
|
2005-04-03 10:29:55 +00:00
|
|
|
}
|
2004-10-29 10:15:56 +00:00
|
|
|
g_vfs_strategy(bo, bp);
|
2004-10-26 10:44:10 +00:00
|
|
|
}
|
2008-09-16 11:19:38 +00:00
|
|
|
|
|
|
|
#ifdef DDB
|
|
|
|
|
|
|
|
static void
|
|
|
|
db_print_ffs(struct ufsmount *ump)
|
|
|
|
{
|
2011-06-10 22:48:35 +00:00
|
|
|
db_printf("mp %p %s devvp %p fs %p su_wl %d su_deps %d su_req %d\n",
|
2008-09-16 11:19:38 +00:00
|
|
|
ump->um_mountp, ump->um_mountp->mnt_stat.f_mntonname,
|
|
|
|
ump->um_devvp, ump->um_fs, ump->softdep_on_worklist,
|
2011-06-10 22:48:35 +00:00
|
|
|
ump->softdep_deps, ump->softdep_req);
|
2008-09-16 11:19:38 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
DB_SHOW_COMMAND(ffs, db_show_ffs)
|
|
|
|
{
|
|
|
|
struct mount *mp;
|
|
|
|
struct ufsmount *ump;
|
|
|
|
|
|
|
|
if (have_addr) {
|
|
|
|
ump = VFSTOUFS((struct mount *)addr);
|
|
|
|
db_print_ffs(ump);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
TAILQ_FOREACH(mp, &mountlist, mnt_list) {
|
|
|
|
if (!strcmp(mp->mnt_stat.f_fstypename, ufs_vfsconf.vfc_name))
|
|
|
|
db_print_ffs(VFSTOUFS(mp));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif /* DDB */
|