1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* Copyright (c) 1989, 1991, 1993, 1994
|
|
|
|
* The Regents of the University of California. All rights reserved.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
* 3. All advertising materials mentioning features or use of this software
|
|
|
|
* must display the following acknowledgement:
|
|
|
|
* This product includes software developed by the University of
|
|
|
|
* California, Berkeley and its contributors.
|
|
|
|
* 4. Neither the name of the University nor the names of its contributors
|
|
|
|
* may be used to endorse or promote products derived from this software
|
|
|
|
* without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
|
|
|
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
|
|
|
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
|
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
|
|
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
|
|
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
|
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
|
|
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
|
|
|
* SUCH DAMAGE.
|
|
|
|
*
|
1997-02-10 02:22:35 +00:00
|
|
|
* @(#)ffs_vfsops.c 8.31 (Berkeley) 5/20/95
|
1999-08-28 01:08:13 +00:00
|
|
|
* $FreeBSD$
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
|
|
|
|
1996-01-05 18:31:58 +00:00
|
|
|
#include "opt_quota.h"
|
2001-03-19 04:35:40 +00:00
|
|
|
#include "opt_ufs.h"
|
1996-01-05 18:31:58 +00:00
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
#include <sys/param.h>
|
|
|
|
#include <sys/systm.h>
|
|
|
|
#include <sys/namei.h>
|
|
|
|
#include <sys/proc.h>
|
|
|
|
#include <sys/kernel.h>
|
|
|
|
#include <sys/vnode.h>
|
|
|
|
#include <sys/mount.h>
|
2000-05-05 09:59:14 +00:00
|
|
|
#include <sys/bio.h>
|
1994-05-24 10:09:53 +00:00
|
|
|
#include <sys/buf.h>
|
1997-09-27 13:40:20 +00:00
|
|
|
#include <sys/conf.h>
|
1997-03-23 03:37:54 +00:00
|
|
|
#include <sys/fcntl.h>
|
1994-05-24 10:09:53 +00:00
|
|
|
#include <sys/disklabel.h>
|
|
|
|
#include <sys/malloc.h>
|
2001-01-24 12:35:55 +00:00
|
|
|
#include <sys/mutex.h>
|
2000-10-04 01:29:17 +00:00
|
|
|
|
Introduce extended attribute support for FFS, allowing arbitrary
(name, value) pairs to be associated with inodes. This support is
used for ACLs, MAC labels, and Capabilities in the TrustedBSD
security extensions, which are currently under development.
In this implementation, attributes are backed to data vnodes in the
style of the quota support in FFS. Support for FFS extended
attributes may be enabled using the FFS_EXTATTR kernel option
(disabled by default). Userland utilities and man pages will be
committed in the next batch. VFS interfaces and man pages have
been in the repo since 4.0-RELEASE and are unchanged.
o ufs/ufs/extattr.h: UFS-specific extattr defines
o ufs/ufs/ufs_extattr.c: bulk of support routines
o ufs/{ufs,ffs,mfs}/*.[ch]: hooks and extattr.h includes
o contrib/softupdates/ffs_softdep.c: extattr.h includes
o conf/options, conf/files, i386/conf/LINT: added FFS_EXTATTR
o coda/coda_vfsops.c: XXX required extattr.h due to ufsmount.h
(This should not be the case, and will be fixed in a future commit)
Currently attributes are not supported in MFS. This will be fixed.
Reviewed by: adrian, bp, freebsd-fs, other unthanked souls
Obtained from: TrustedBSD Project
2000-04-15 03:34:27 +00:00
|
|
|
#include <ufs/ufs/extattr.h>
|
1994-05-24 10:09:53 +00:00
|
|
|
#include <ufs/ufs/quota.h>
|
|
|
|
#include <ufs/ufs/ufsmount.h>
|
|
|
|
#include <ufs/ufs/inode.h>
|
|
|
|
#include <ufs/ufs/ufs_extern.h>
|
|
|
|
|
|
|
|
#include <ufs/ffs/fs.h>
|
|
|
|
#include <ufs/ffs/ffs_extern.h>
|
|
|
|
|
1995-04-09 06:03:56 +00:00
|
|
|
#include <vm/vm.h>
|
|
|
|
#include <vm/vm_page.h>
|
|
|
|
|
1997-10-12 20:26:33 +00:00
|
|
|
static MALLOC_DEFINE(M_FFSNODE, "FFS node", "FFS vnode private part");
|
1997-10-11 18:31:40 +00:00
|
|
|
|
1995-12-17 21:14:36 +00:00
|
|
|
static int ffs_sbupdate __P((struct ufsmount *, int));
|
2001-09-12 08:38:13 +00:00
|
|
|
int ffs_reload __P((struct mount *,struct ucred *,struct thread *));
|
1995-12-17 21:14:36 +00:00
|
|
|
static int ffs_oldfscompat __P((struct fs *));
|
1997-02-10 02:22:35 +00:00
|
|
|
static int ffs_init __P((struct vfsconf *));
|
1994-05-24 10:09:53 +00:00
|
|
|
|
1998-02-09 06:11:36 +00:00
|
|
|
static struct vfsops ufs_vfsops = {
|
1994-05-24 10:09:53 +00:00
|
|
|
ffs_mount,
|
|
|
|
ufs_start,
|
|
|
|
ffs_unmount,
|
|
|
|
ufs_root,
|
|
|
|
ufs_quotactl,
|
|
|
|
ffs_statfs,
|
|
|
|
ffs_sync,
|
|
|
|
ffs_vget,
|
|
|
|
ffs_fhtovp,
|
2001-04-25 07:07:52 +00:00
|
|
|
vfs_stdcheckexp,
|
1994-05-24 10:09:53 +00:00
|
|
|
ffs_vptofh,
|
|
|
|
ffs_init,
|
1999-12-19 06:08:07 +00:00
|
|
|
vfs_stduninit,
|
2001-03-19 04:35:40 +00:00
|
|
|
#ifdef UFS_EXTATTR
|
Introduce extended attribute support for FFS, allowing arbitrary
(name, value) pairs to be associated with inodes. This support is
used for ACLs, MAC labels, and Capabilities in the TrustedBSD
security extensions, which are currently under development.
In this implementation, attributes are backed to data vnodes in the
style of the quota support in FFS. Support for FFS extended
attributes may be enabled using the FFS_EXTATTR kernel option
(disabled by default). Userland utilities and man pages will be
committed in the next batch. VFS interfaces and man pages have
been in the repo since 4.0-RELEASE and are unchanged.
o ufs/ufs/extattr.h: UFS-specific extattr defines
o ufs/ufs/ufs_extattr.c: bulk of support routines
o ufs/{ufs,ffs,mfs}/*.[ch]: hooks and extattr.h includes
o contrib/softupdates/ffs_softdep.c: extattr.h includes
o conf/options, conf/files, i386/conf/LINT: added FFS_EXTATTR
o coda/coda_vfsops.c: XXX required extattr.h due to ufsmount.h
(This should not be the case, and will be fixed in a future commit)
Currently attributes are not supported in MFS. This will be fixed.
Reviewed by: adrian, bp, freebsd-fs, other unthanked souls
Obtained from: TrustedBSD Project
2000-04-15 03:34:27 +00:00
|
|
|
ufs_extattrctl,
|
|
|
|
#else
|
1999-12-19 06:08:07 +00:00
|
|
|
vfs_stdextattrctl,
|
Introduce extended attribute support for FFS, allowing arbitrary
(name, value) pairs to be associated with inodes. This support is
used for ACLs, MAC labels, and Capabilities in the TrustedBSD
security extensions, which are currently under development.
In this implementation, attributes are backed to data vnodes in the
style of the quota support in FFS. Support for FFS extended
attributes may be enabled using the FFS_EXTATTR kernel option
(disabled by default). Userland utilities and man pages will be
committed in the next batch. VFS interfaces and man pages have
been in the repo since 4.0-RELEASE and are unchanged.
o ufs/ufs/extattr.h: UFS-specific extattr defines
o ufs/ufs/ufs_extattr.c: bulk of support routines
o ufs/{ufs,ffs,mfs}/*.[ch]: hooks and extattr.h includes
o contrib/softupdates/ffs_softdep.c: extattr.h includes
o conf/options, conf/files, i386/conf/LINT: added FFS_EXTATTR
o coda/coda_vfsops.c: XXX required extattr.h due to ufsmount.h
(This should not be the case, and will be fixed in a future commit)
Currently attributes are not supported in MFS. This will be fixed.
Reviewed by: adrian, bp, freebsd-fs, other unthanked souls
Obtained from: TrustedBSD Project
2000-04-15 03:34:27 +00:00
|
|
|
#endif
|
1994-05-24 10:09:53 +00:00
|
|
|
};
|
|
|
|
|
1998-09-07 13:17:06 +00:00
|
|
|
VFS_SET(ufs_vfsops, ufs, 0);
|
1994-09-21 03:47:43 +00:00
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
1995-08-28 09:19:25 +00:00
|
|
|
* ffs_mount
|
|
|
|
*
|
|
|
|
* Called when mounting local physical media
|
|
|
|
*
|
|
|
|
* PARAMETERS:
|
|
|
|
* mountroot
|
|
|
|
* mp mount point structure
|
|
|
|
* path NULL (flag for root mount!!!)
|
|
|
|
* data <unused>
|
|
|
|
* ndp <unused>
|
|
|
|
* p process (user credentials check [statfs])
|
|
|
|
*
|
|
|
|
* mount
|
|
|
|
* mp mount point structure
|
|
|
|
* path path to mount point
|
|
|
|
* data pointer to argument struct in user space
|
|
|
|
* ndp mount point namei() return (used for
|
|
|
|
* credentials on reload), reused to look
|
|
|
|
* up block device.
|
|
|
|
* p process (user credentials check)
|
|
|
|
*
|
|
|
|
* RETURNS: 0 Success
|
|
|
|
* !0 error number (errno.h)
|
|
|
|
*
|
|
|
|
* LOCK STATE:
|
1994-05-24 10:09:53 +00:00
|
|
|
*
|
1995-08-28 09:19:25 +00:00
|
|
|
* ENTRY
|
|
|
|
* mount point is locked
|
|
|
|
* EXIT
|
|
|
|
* mount point is locked
|
|
|
|
*
|
|
|
|
* NOTES:
|
|
|
|
* A NULL path can be used for a flag since the mount
|
|
|
|
* system call will fail with EFAULT in copyinstr in
|
|
|
|
* namei() if it is a genuine NULL from the user.
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
Initial commit of IFS - a inode-namespaced FFS. Here is a short
description:
How it works:
--
Basically ifs is a copy of ffs, overriding some vfs/vnops. (Yes, hack.)
I didn't see the need in duplicating all of sys/ufs/ffs to get this
off the ground.
File creation is done through a special file - 'newfile' . When newfile
is called, the system allocates and returns an inode. Note that newfile
is done in a cloning fashion:
fd = open("newfile", O_CREAT|O_RDWR, 0644);
fstat(fd, &st);
printf("new file is %d\n", (int)st.st_ino);
Once you have created a file, you can open() and unlink() it by its returned
inode number retrieved from the stat call, ie:
fd = open("5", O_RDWR);
The creation permissions depend entirely if you have write access to the
root directory of the filesystem.
To get the list of currently allocated inodes, VOP_READDIR has been added
which returns a directory listing of those currently allocated.
--
What this entails:
* patching conf/files and conf/options to include IFS as a new compile
option (and since ifs depends upon FFS, include the FFS routines)
* An entry in i386/conf/NOTES indicating IFS exists and where to go for
an explanation
* Unstaticize a couple of routines in src/sys/ufs/ffs/ which the IFS
routines require (ffs_mount() and ffs_reload())
* a new bunch of routines in src/sys/ufs/ifs/ which implement the IFS
routines. IFS replaces some of the vfsops, and a handful of vnops -
most notably are VFS_VGET(), VOP_LOOKUP(), VOP_UNLINK() and VOP_READDIR().
Any other directory operation is marked as invalid.
What this results in:
* an IFS partition's create permissions are controlled by the perm/ownership of
the root mount point, just like a normal directory
* Each inode has perm and ownership too
* IFS does *NOT* mean an FFS partition can be opened per inode. This is a
completely seperate filesystem here
* Softupdates doesn't work with IFS, and really I don't think it needs it.
Besides, fsck's are FAST. (Try it :-)
* Inodes 0 and 1 aren't allocatable because they are special (dump/swap IIRC).
Inode 2 isn't allocatable since UFS/FFS locks all inodes in the system against
this particular inode, and unravelling THAT code isn't trivial. Therefore,
useful inodes start at 3.
Enjoy, and feedback is definitely appreciated!
2000-10-14 03:02:30 +00:00
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
ffs_mount(mp, path, data, ndp, td)
|
1997-02-10 02:22:35 +00:00
|
|
|
struct mount *mp; /* mount struct pointer*/
|
1995-08-28 09:19:25 +00:00
|
|
|
char *path; /* path to mount point*/
|
|
|
|
caddr_t data; /* arguments to FS specific mount*/
|
|
|
|
struct nameidata *ndp; /* mount point credentials*/
|
2001-09-12 08:38:13 +00:00
|
|
|
struct thread *td; /* process requesting mount*/
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
1998-06-04 17:21:39 +00:00
|
|
|
size_t size;
|
1995-08-28 09:19:25 +00:00
|
|
|
struct vnode *devvp;
|
|
|
|
struct ufs_args args;
|
|
|
|
struct ufsmount *ump = 0;
|
1994-05-24 10:09:53 +00:00
|
|
|
register struct fs *fs;
|
2000-07-11 22:07:57 +00:00
|
|
|
int error, flags;
|
1998-02-25 04:47:04 +00:00
|
|
|
mode_t accessmode;
|
1995-05-30 08:16:23 +00:00
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
2000-07-11 22:07:57 +00:00
|
|
|
* Use NULL path to indicate we are mounting the root file system.
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
2000-07-11 22:07:57 +00:00
|
|
|
if (path == NULL) {
|
|
|
|
if ((error = bdevvp(rootdev, &rootvp))) {
|
1999-11-01 23:57:28 +00:00
|
|
|
printf("ffs_mountroot: can't find rootvp\n");
|
2000-07-11 22:07:57 +00:00
|
|
|
return (error);
|
1995-08-28 09:19:25 +00:00
|
|
|
}
|
|
|
|
|
2001-09-12 08:38:13 +00:00
|
|
|
if ((error = ffs_mountfs(rootvp, mp, td, M_FFSNODE)) != 0)
|
2000-07-11 22:07:57 +00:00
|
|
|
return (error);
|
1995-08-28 09:19:25 +00:00
|
|
|
|
2001-09-12 08:38:13 +00:00
|
|
|
(void)VFS_STATFS(mp, &mp->mnt_stat, td);
|
2000-07-11 22:07:57 +00:00
|
|
|
return (0);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
1995-08-28 09:19:25 +00:00
|
|
|
/*
|
|
|
|
* Mounting non-root file system or updating a file system
|
|
|
|
*/
|
2000-07-11 22:07:57 +00:00
|
|
|
if ((error = copyin(data, (caddr_t)&args, sizeof(struct ufs_args)))!= 0)
|
|
|
|
return (error);
|
1994-05-24 10:09:53 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If updating, check whether changing from read-only to
|
|
|
|
* read/write; if there is no device name, that's all we do.
|
|
|
|
*/
|
|
|
|
if (mp->mnt_flag & MNT_UPDATE) {
|
|
|
|
ump = VFSTOUFS(mp);
|
|
|
|
fs = ump->um_fs;
|
1998-03-27 14:20:57 +00:00
|
|
|
devvp = ump->um_devvp;
|
2000-07-11 22:07:57 +00:00
|
|
|
if (fs->fs_ronly == 0 && (mp->mnt_flag & MNT_RDONLY)) {
|
|
|
|
if ((error = vn_start_write(NULL, &mp, V_WAIT)) != 0)
|
|
|
|
return (error);
|
2002-01-15 07:17:12 +00:00
|
|
|
/*
|
|
|
|
* Flush any dirty data.
|
|
|
|
*/
|
|
|
|
VFS_SYNC(mp, MNT_WAIT, td->td_proc->p_ucred, td);
|
|
|
|
/*
|
|
|
|
* Check for and optionally get rid of files open
|
|
|
|
* for writing.
|
|
|
|
*/
|
1994-05-24 10:09:53 +00:00
|
|
|
flags = WRITECLOSE;
|
|
|
|
if (mp->mnt_flag & MNT_FORCE)
|
|
|
|
flags |= FORCECLOSE;
|
1998-03-08 09:59:44 +00:00
|
|
|
if (mp->mnt_flag & MNT_SOFTDEP) {
|
2001-09-12 08:38:13 +00:00
|
|
|
error = softdep_flushfiles(mp, flags, td);
|
1998-03-08 09:59:44 +00:00
|
|
|
} else {
|
2001-09-12 08:38:13 +00:00
|
|
|
error = ffs_flushfiles(mp, flags, td);
|
1998-03-08 09:59:44 +00:00
|
|
|
}
|
2000-07-11 22:07:57 +00:00
|
|
|
if (error) {
|
|
|
|
vn_finished_write(mp);
|
|
|
|
return (error);
|
|
|
|
}
|
2001-05-08 07:42:20 +00:00
|
|
|
if (fs->fs_pendingblocks != 0 ||
|
|
|
|
fs->fs_pendinginodes != 0) {
|
|
|
|
printf("%s: update error: blocks %d files %d\n",
|
|
|
|
fs->fs_fsmnt, fs->fs_pendingblocks,
|
|
|
|
fs->fs_pendinginodes);
|
|
|
|
fs->fs_pendingblocks = 0;
|
|
|
|
fs->fs_pendinginodes = 0;
|
|
|
|
}
|
2000-07-11 22:07:57 +00:00
|
|
|
fs->fs_ronly = 1;
|
2001-04-14 05:26:28 +00:00
|
|
|
if ((fs->fs_flags & (FS_UNCLEAN | FS_NEEDSFSCK)) == 0)
|
2000-07-11 22:07:57 +00:00
|
|
|
fs->fs_clean = 1;
|
|
|
|
if ((error = ffs_sbupdate(ump, MNT_WAIT)) != 0) {
|
|
|
|
fs->fs_ronly = 0;
|
|
|
|
fs->fs_clean = 0;
|
|
|
|
vn_finished_write(mp);
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
vn_finished_write(mp);
|
1995-08-28 09:19:25 +00:00
|
|
|
}
|
2000-07-11 22:07:57 +00:00
|
|
|
if ((mp->mnt_flag & MNT_RELOAD) &&
|
2001-09-12 08:38:13 +00:00
|
|
|
(error = ffs_reload(mp, ndp->ni_cnd.cn_cred, td)) != 0)
|
2000-07-11 22:07:57 +00:00
|
|
|
return (error);
|
|
|
|
if (fs->fs_ronly && (mp->mnt_kern_flag & MNTK_WANTRDWR)) {
|
1998-02-25 04:47:04 +00:00
|
|
|
/*
|
|
|
|
* If upgrade to read-write by non-root, then verify
|
|
|
|
* that user has necessary permissions on the device.
|
|
|
|
*/
|
2001-10-02 14:34:22 +00:00
|
|
|
if (suser_td(td)) {
|
2001-09-12 08:38:13 +00:00
|
|
|
vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, td);
|
1999-01-28 00:57:57 +00:00
|
|
|
if ((error = VOP_ACCESS(devvp, VREAD | VWRITE,
|
2002-02-27 18:32:23 +00:00
|
|
|
td->td_ucred, td)) != 0) {
|
2001-09-12 08:38:13 +00:00
|
|
|
VOP_UNLOCK(devvp, 0, td);
|
1998-02-25 04:47:04 +00:00
|
|
|
return (error);
|
|
|
|
}
|
2001-09-12 08:38:13 +00:00
|
|
|
VOP_UNLOCK(devvp, 0, td);
|
1998-02-25 04:47:04 +00:00
|
|
|
}
|
1999-12-23 15:42:14 +00:00
|
|
|
fs->fs_flags &= ~FS_UNCLEAN;
|
1998-09-26 04:59:42 +00:00
|
|
|
if (fs->fs_clean == 0) {
|
1999-12-23 15:42:14 +00:00
|
|
|
fs->fs_flags |= FS_UNCLEAN;
|
2001-03-21 04:09:01 +00:00
|
|
|
if ((mp->mnt_flag & MNT_FORCE) ||
|
2001-04-14 05:26:28 +00:00
|
|
|
((fs->fs_flags & FS_NEEDSFSCK) == 0 &&
|
|
|
|
(fs->fs_flags & FS_DOSOFTDEP))) {
|
2000-07-11 22:07:57 +00:00
|
|
|
printf("WARNING: %s was not %s\n",
|
|
|
|
fs->fs_fsmnt, "properly dismounted");
|
1998-09-26 04:59:42 +00:00
|
|
|
} else {
|
|
|
|
printf(
|
|
|
|
"WARNING: R/W mount of %s denied. Filesystem is not clean - run fsck\n",
|
|
|
|
fs->fs_fsmnt);
|
2000-07-11 22:07:57 +00:00
|
|
|
return (EPERM);
|
1998-09-26 04:59:42 +00:00
|
|
|
}
|
|
|
|
}
|
2000-07-11 22:07:57 +00:00
|
|
|
if ((error = vn_start_write(NULL, &mp, V_WAIT)) != 0)
|
|
|
|
return (error);
|
|
|
|
fs->fs_ronly = 0;
|
|
|
|
fs->fs_clean = 0;
|
|
|
|
if ((error = ffs_sbupdate(ump, MNT_WAIT)) != 0) {
|
|
|
|
vn_finished_write(mp);
|
|
|
|
return (error);
|
|
|
|
}
|
1998-03-27 14:20:57 +00:00
|
|
|
/* check to see if we need to start softdep */
|
2000-07-11 22:07:57 +00:00
|
|
|
if ((fs->fs_flags & FS_DOSOFTDEP) &&
|
2002-02-27 18:32:23 +00:00
|
|
|
(error = softdep_mount(devvp, mp, fs, td->td_ucred))){
|
2000-07-11 22:07:57 +00:00
|
|
|
vn_finished_write(mp);
|
|
|
|
return (error);
|
1998-03-27 14:20:57 +00:00
|
|
|
}
|
2000-07-11 22:07:57 +00:00
|
|
|
if (fs->fs_snapinum[0] != 0)
|
|
|
|
ffs_snapshot_mount(mp);
|
|
|
|
vn_finished_write(mp);
|
1995-05-15 08:39:37 +00:00
|
|
|
}
|
1998-05-18 06:38:18 +00:00
|
|
|
/*
|
|
|
|
* Soft updates is incompatible with "async",
|
|
|
|
* so if we are doing softupdates stop the user
|
|
|
|
* from setting the async flag in an update.
|
|
|
|
* Softdep_mount() clears it in an initial mount
|
|
|
|
* or ro->rw remount.
|
|
|
|
*/
|
2000-07-11 22:07:57 +00:00
|
|
|
if (mp->mnt_flag & MNT_SOFTDEP)
|
1998-05-18 06:38:18 +00:00
|
|
|
mp->mnt_flag &= ~MNT_ASYNC;
|
2000-07-11 22:07:57 +00:00
|
|
|
/*
|
|
|
|
* If not updating name, process export requests.
|
|
|
|
*/
|
|
|
|
if (args.fspec == 0)
|
2001-04-25 07:07:52 +00:00
|
|
|
return (vfs_export(mp, &args.export));
|
2000-07-11 22:07:57 +00:00
|
|
|
/*
|
|
|
|
* If this is a snapshot request, take the snapshot.
|
|
|
|
*/
|
|
|
|
if (mp->mnt_flag & MNT_SNAPSHOT)
|
|
|
|
return (ffs_snapshot(mp, args.fspec));
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
1995-08-28 09:19:25 +00:00
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* Not an update, or updating the name: look up the name
|
|
|
|
* and verify that it refers to a sensible block device.
|
|
|
|
*/
|
2001-09-12 08:38:13 +00:00
|
|
|
NDINIT(ndp, LOOKUP, FOLLOW, UIO_USERSPACE, args.fspec, td);
|
2000-07-11 22:07:57 +00:00
|
|
|
if ((error = namei(ndp)) != 0)
|
|
|
|
return (error);
|
1999-12-15 23:02:35 +00:00
|
|
|
NDFREE(ndp, NDF_ONLY_PNBUF);
|
1994-05-24 10:09:53 +00:00
|
|
|
devvp = ndp->ni_vp;
|
2000-07-11 22:07:57 +00:00
|
|
|
if (!vn_isdisk(devvp, &error)) {
|
|
|
|
vrele(devvp);
|
|
|
|
return (error);
|
|
|
|
}
|
1998-02-25 04:47:04 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If mount by non-root, then verify that user has necessary
|
|
|
|
* permissions on the device.
|
|
|
|
*/
|
2001-10-02 14:34:22 +00:00
|
|
|
if (suser_td(td)) {
|
1998-02-25 04:47:04 +00:00
|
|
|
accessmode = VREAD;
|
|
|
|
if ((mp->mnt_flag & MNT_RDONLY) == 0)
|
|
|
|
accessmode |= VWRITE;
|
2001-09-12 08:38:13 +00:00
|
|
|
vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, td);
|
2002-02-27 18:32:23 +00:00
|
|
|
if ((error = VOP_ACCESS(devvp, accessmode, td->td_ucred, td))!= 0){
|
1998-02-25 04:47:04 +00:00
|
|
|
vput(devvp);
|
|
|
|
return (error);
|
|
|
|
}
|
2001-09-12 08:38:13 +00:00
|
|
|
VOP_UNLOCK(devvp, 0, td);
|
1998-02-25 04:47:04 +00:00
|
|
|
}
|
|
|
|
|
1995-08-28 09:19:25 +00:00
|
|
|
if (mp->mnt_flag & MNT_UPDATE) {
|
|
|
|
/*
|
2000-07-11 22:07:57 +00:00
|
|
|
* Update only
|
|
|
|
*
|
1998-04-19 23:32:49 +00:00
|
|
|
* If it's not the same vnode, or at least the same device
|
|
|
|
* then it's not correct.
|
1995-08-28 09:19:25 +00:00
|
|
|
*/
|
|
|
|
|
2000-07-11 22:07:57 +00:00
|
|
|
if (devvp != ump->um_devvp &&
|
|
|
|
devvp->v_rdev != ump->um_devvp->v_rdev)
|
|
|
|
error = EINVAL; /* needs translation */
|
|
|
|
vrele(devvp);
|
|
|
|
if (error)
|
|
|
|
return (error);
|
1995-08-28 09:19:25 +00:00
|
|
|
} else {
|
|
|
|
/*
|
2000-07-11 22:07:57 +00:00
|
|
|
* New mount
|
|
|
|
*
|
|
|
|
* We need the name for the mount point (also used for
|
|
|
|
* "last mounted on") copied in. If an error occurs,
|
|
|
|
* the mount point is discarded by the upper level code.
|
2001-03-01 21:00:17 +00:00
|
|
|
* Note that vfs_mount() populates f_mntonname for us.
|
1995-08-28 09:19:25 +00:00
|
|
|
*/
|
2001-09-12 08:38:13 +00:00
|
|
|
if ((error = ffs_mountfs(devvp, mp, td, M_FFSNODE)) != 0) {
|
2000-07-11 22:07:57 +00:00
|
|
|
vrele(devvp);
|
|
|
|
return (error);
|
|
|
|
}
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
1995-08-28 09:19:25 +00:00
|
|
|
/*
|
2000-07-11 22:07:57 +00:00
|
|
|
* Save "mounted from" device name info for mount point (NULL pad).
|
|
|
|
*/
|
|
|
|
copyinstr(args.fspec, mp->mnt_stat.f_mntfromname, MNAMELEN - 1, &size);
|
|
|
|
bzero( mp->mnt_stat.f_mntfromname + size, MNAMELEN - size);
|
|
|
|
/*
|
|
|
|
* Initialize filesystem stat information in mount struct.
|
1995-08-28 09:19:25 +00:00
|
|
|
*/
|
2001-09-12 08:38:13 +00:00
|
|
|
(void)VFS_STATFS(mp, &mp->mnt_stat, td);
|
2000-07-11 22:07:57 +00:00
|
|
|
return (0);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Reload all incore data for a filesystem (used after running fsck on
|
|
|
|
* the root filesystem and finding things to fix). The filesystem must
|
|
|
|
* be mounted read-only.
|
|
|
|
*
|
|
|
|
* Things to do to update the mount:
|
|
|
|
* 1) invalidate all cached meta-data.
|
|
|
|
* 2) re-read superblock from disk.
|
|
|
|
* 3) re-read summary information from disk.
|
|
|
|
* 4) invalidate all inactive vnodes.
|
|
|
|
* 5) invalidate all cached file data.
|
|
|
|
* 6) re-read inode data for all active vnodes.
|
|
|
|
*/
|
Initial commit of IFS - a inode-namespaced FFS. Here is a short
description:
How it works:
--
Basically ifs is a copy of ffs, overriding some vfs/vnops. (Yes, hack.)
I didn't see the need in duplicating all of sys/ufs/ffs to get this
off the ground.
File creation is done through a special file - 'newfile' . When newfile
is called, the system allocates and returns an inode. Note that newfile
is done in a cloning fashion:
fd = open("newfile", O_CREAT|O_RDWR, 0644);
fstat(fd, &st);
printf("new file is %d\n", (int)st.st_ino);
Once you have created a file, you can open() and unlink() it by its returned
inode number retrieved from the stat call, ie:
fd = open("5", O_RDWR);
The creation permissions depend entirely if you have write access to the
root directory of the filesystem.
To get the list of currently allocated inodes, VOP_READDIR has been added
which returns a directory listing of those currently allocated.
--
What this entails:
* patching conf/files and conf/options to include IFS as a new compile
option (and since ifs depends upon FFS, include the FFS routines)
* An entry in i386/conf/NOTES indicating IFS exists and where to go for
an explanation
* Unstaticize a couple of routines in src/sys/ufs/ffs/ which the IFS
routines require (ffs_mount() and ffs_reload())
* a new bunch of routines in src/sys/ufs/ifs/ which implement the IFS
routines. IFS replaces some of the vfsops, and a handful of vnops -
most notably are VFS_VGET(), VOP_LOOKUP(), VOP_UNLINK() and VOP_READDIR().
Any other directory operation is marked as invalid.
What this results in:
* an IFS partition's create permissions are controlled by the perm/ownership of
the root mount point, just like a normal directory
* Each inode has perm and ownership too
* IFS does *NOT* mean an FFS partition can be opened per inode. This is a
completely seperate filesystem here
* Softupdates doesn't work with IFS, and really I don't think it needs it.
Besides, fsck's are FAST. (Try it :-)
* Inodes 0 and 1 aren't allocatable because they are special (dump/swap IIRC).
Inode 2 isn't allocatable since UFS/FFS locks all inodes in the system against
this particular inode, and unravelling THAT code isn't trivial. Therefore,
useful inodes start at 3.
Enjoy, and feedback is definitely appreciated!
2000-10-14 03:02:30 +00:00
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
ffs_reload(mp, cred, td)
|
1995-08-28 09:19:25 +00:00
|
|
|
register struct mount *mp;
|
1994-05-24 10:09:53 +00:00
|
|
|
struct ucred *cred;
|
2001-09-12 08:38:13 +00:00
|
|
|
struct thread *td;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
|
|
|
register struct vnode *vp, *nvp, *devvp;
|
|
|
|
struct inode *ip;
|
2001-01-15 18:30:40 +00:00
|
|
|
void *space;
|
1994-05-24 10:09:53 +00:00
|
|
|
struct buf *bp;
|
1997-02-10 02:22:35 +00:00
|
|
|
struct fs *fs, *newfs;
|
|
|
|
struct partinfo dpart;
|
Make our v_usecount vnode reference count work identically to the
original BSD code. The association between the vnode and the vm_object
no longer includes reference counts. The major difference is that
vm_object's are no longer freed gratuitiously from the vnode, and so
once an object is created for the vnode, it will last as long as the
vnode does.
When a vnode object reference count is incremented, then the underlying
vnode reference count is incremented also. The two "objects" are now
more intimately related, and so the interactions are now much less
complex.
When vnodes are now normally placed onto the free queue with an object still
attached. The rundown of the object happens at vnode rundown time, and
happens with exactly the same filesystem semantics of the original VFS
code. There is absolutely no need for vnode_pager_uncache and other
travesties like that anymore.
A side-effect of these changes is that SMP locking should be much simpler,
the I/O copyin/copyout optimizations work, NFS should be more ponderable,
and further work on layered filesystems should be less frustrating, because
of the totally coherent management of the vnode objects and vnodes.
Please be careful with your system while running this code, but I would
greatly appreciate feedback as soon a reasonably possible.
1998-01-06 05:26:17 +00:00
|
|
|
dev_t dev;
|
1994-05-24 10:09:53 +00:00
|
|
|
int i, blks, size, error;
|
1997-02-10 02:22:35 +00:00
|
|
|
int32_t *lp;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
1995-08-28 09:19:25 +00:00
|
|
|
if ((mp->mnt_flag & MNT_RDONLY) == 0)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (EINVAL);
|
|
|
|
/*
|
|
|
|
* Step 1: invalidate all cached meta-data.
|
|
|
|
*/
|
1995-08-28 09:19:25 +00:00
|
|
|
devvp = VFSTOUFS(mp)->um_devvp;
|
2001-09-12 08:38:13 +00:00
|
|
|
vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, td);
|
|
|
|
error = vinvalbuf(devvp, 0, cred, td, 0, 0);
|
|
|
|
VOP_UNLOCK(devvp, 0, td);
|
1998-03-08 09:59:44 +00:00
|
|
|
if (error)
|
1994-05-24 10:09:53 +00:00
|
|
|
panic("ffs_reload: dirty1");
|
Make our v_usecount vnode reference count work identically to the
original BSD code. The association between the vnode and the vm_object
no longer includes reference counts. The major difference is that
vm_object's are no longer freed gratuitiously from the vnode, and so
once an object is created for the vnode, it will last as long as the
vnode does.
When a vnode object reference count is incremented, then the underlying
vnode reference count is incremented also. The two "objects" are now
more intimately related, and so the interactions are now much less
complex.
When vnodes are now normally placed onto the free queue with an object still
attached. The rundown of the object happens at vnode rundown time, and
happens with exactly the same filesystem semantics of the original VFS
code. There is absolutely no need for vnode_pager_uncache and other
travesties like that anymore.
A side-effect of these changes is that SMP locking should be much simpler,
the I/O copyin/copyout optimizations work, NFS should be more ponderable,
and further work on layered filesystems should be less frustrating, because
of the totally coherent management of the vnode objects and vnodes.
Please be careful with your system while running this code, but I would
greatly appreciate feedback as soon a reasonably possible.
1998-01-06 05:26:17 +00:00
|
|
|
|
|
|
|
dev = devvp->v_rdev;
|
1998-10-27 11:47:08 +00:00
|
|
|
|
Make our v_usecount vnode reference count work identically to the
original BSD code. The association between the vnode and the vm_object
no longer includes reference counts. The major difference is that
vm_object's are no longer freed gratuitiously from the vnode, and so
once an object is created for the vnode, it will last as long as the
vnode does.
When a vnode object reference count is incremented, then the underlying
vnode reference count is incremented also. The two "objects" are now
more intimately related, and so the interactions are now much less
complex.
When vnodes are now normally placed onto the free queue with an object still
attached. The rundown of the object happens at vnode rundown time, and
happens with exactly the same filesystem semantics of the original VFS
code. There is absolutely no need for vnode_pager_uncache and other
travesties like that anymore.
A side-effect of these changes is that SMP locking should be much simpler,
the I/O copyin/copyout optimizations work, NFS should be more ponderable,
and further work on layered filesystems should be less frustrating, because
of the totally coherent management of the vnode objects and vnodes.
Please be careful with your system while running this code, but I would
greatly appreciate feedback as soon a reasonably possible.
1998-01-06 05:26:17 +00:00
|
|
|
/*
|
|
|
|
* Only VMIO the backing device if the backing device is a real
|
2001-05-29 21:21:53 +00:00
|
|
|
* block device.
|
Make our v_usecount vnode reference count work identically to the
original BSD code. The association between the vnode and the vm_object
no longer includes reference counts. The major difference is that
vm_object's are no longer freed gratuitiously from the vnode, and so
once an object is created for the vnode, it will last as long as the
vnode does.
When a vnode object reference count is incremented, then the underlying
vnode reference count is incremented also. The two "objects" are now
more intimately related, and so the interactions are now much less
complex.
When vnodes are now normally placed onto the free queue with an object still
attached. The rundown of the object happens at vnode rundown time, and
happens with exactly the same filesystem semantics of the original VFS
code. There is absolutely no need for vnode_pager_uncache and other
travesties like that anymore.
A side-effect of these changes is that SMP locking should be much simpler,
the I/O copyin/copyout optimizations work, NFS should be more ponderable,
and further work on layered filesystems should be less frustrating, because
of the totally coherent management of the vnode objects and vnodes.
Please be careful with your system while running this code, but I would
greatly appreciate feedback as soon a reasonably possible.
1998-01-06 05:26:17 +00:00
|
|
|
*/
|
2001-05-29 21:21:53 +00:00
|
|
|
if (vn_isdisk(devvp, NULL)) {
|
2001-09-12 08:38:13 +00:00
|
|
|
vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, td);
|
2002-02-27 18:32:23 +00:00
|
|
|
vfs_object_create(devvp, td, td->td_ucred);
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_lock(&devvp->v_interlock);
|
2001-09-12 08:38:13 +00:00
|
|
|
VOP_UNLOCK(devvp, LK_INTERLOCK, td);
|
Make our v_usecount vnode reference count work identically to the
original BSD code. The association between the vnode and the vm_object
no longer includes reference counts. The major difference is that
vm_object's are no longer freed gratuitiously from the vnode, and so
once an object is created for the vnode, it will last as long as the
vnode does.
When a vnode object reference count is incremented, then the underlying
vnode reference count is incremented also. The two "objects" are now
more intimately related, and so the interactions are now much less
complex.
When vnodes are now normally placed onto the free queue with an object still
attached. The rundown of the object happens at vnode rundown time, and
happens with exactly the same filesystem semantics of the original VFS
code. There is absolutely no need for vnode_pager_uncache and other
travesties like that anymore.
A side-effect of these changes is that SMP locking should be much simpler,
the I/O copyin/copyout optimizations work, NFS should be more ponderable,
and further work on layered filesystems should be less frustrating, because
of the totally coherent management of the vnode objects and vnodes.
Please be careful with your system while running this code, but I would
greatly appreciate feedback as soon a reasonably possible.
1998-01-06 05:26:17 +00:00
|
|
|
}
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* Step 2: re-read superblock from disk.
|
|
|
|
*/
|
2001-09-12 08:38:13 +00:00
|
|
|
if (VOP_IOCTL(devvp, DIOCGPART, (caddr_t)&dpart, FREAD, NOCRED, td) != 0)
|
1997-02-10 02:22:35 +00:00
|
|
|
size = DEV_BSIZE;
|
|
|
|
else
|
|
|
|
size = dpart.disklab->d_secsize;
|
1999-01-28 00:57:57 +00:00
|
|
|
if ((error = bread(devvp, (ufs_daddr_t)(SBOFF/size), SBSIZE, NOCRED,&bp)) != 0)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
1997-02-10 02:22:35 +00:00
|
|
|
newfs = (struct fs *)bp->b_data;
|
|
|
|
if (newfs->fs_magic != FS_MAGIC || newfs->fs_bsize > MAXBSIZE ||
|
|
|
|
newfs->fs_bsize < sizeof(struct fs)) {
|
|
|
|
brelse(bp);
|
|
|
|
return (EIO); /* XXX needs translation */
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
1995-08-28 09:19:25 +00:00
|
|
|
fs = VFSTOUFS(mp)->um_fs;
|
1997-02-10 02:22:35 +00:00
|
|
|
/*
|
|
|
|
* Copy pointer fields back into superblock before copying in XXX
|
|
|
|
* new superblock. These should really be in the ufsmount. XXX
|
|
|
|
* Note that important parameters (eg fs_ncg) are unchanged.
|
|
|
|
*/
|
2001-01-15 18:30:40 +00:00
|
|
|
newfs->fs_csp = fs->fs_csp;
|
1997-02-10 02:22:35 +00:00
|
|
|
newfs->fs_maxcluster = fs->fs_maxcluster;
|
2001-04-24 00:37:16 +00:00
|
|
|
newfs->fs_contigdirs = fs->fs_contigdirs;
|
2001-12-16 18:54:09 +00:00
|
|
|
newfs->fs_active = fs->fs_active;
|
1997-02-10 02:22:35 +00:00
|
|
|
bcopy(newfs, fs, (u_int)fs->fs_sbsize);
|
1994-05-24 10:09:53 +00:00
|
|
|
if (fs->fs_sbsize < SBSIZE)
|
2000-07-11 22:07:57 +00:00
|
|
|
bp->b_flags |= B_INVAL | B_NOCACHE;
|
1994-05-24 10:09:53 +00:00
|
|
|
brelse(bp);
|
1997-02-10 02:22:35 +00:00
|
|
|
mp->mnt_maxsymlinklen = fs->fs_maxsymlinklen;
|
1994-05-24 10:09:53 +00:00
|
|
|
ffs_oldfscompat(fs);
|
2001-04-24 00:37:16 +00:00
|
|
|
/* An old fsck may have zeroed these fields, so recheck them. */
|
|
|
|
if (fs->fs_avgfilesize <= 0) /* XXX */
|
|
|
|
fs->fs_avgfilesize = AVFILESIZ; /* XXX */
|
|
|
|
if (fs->fs_avgfpdir <= 0) /* XXX */
|
|
|
|
fs->fs_avgfpdir = AFPDIR; /* XXX */
|
2001-05-08 07:42:20 +00:00
|
|
|
if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) {
|
|
|
|
printf("%s: reload pending error: blocks %d files %d\n",
|
|
|
|
fs->fs_fsmnt, fs->fs_pendingblocks, fs->fs_pendinginodes);
|
|
|
|
fs->fs_pendingblocks = 0;
|
|
|
|
fs->fs_pendinginodes = 0;
|
|
|
|
}
|
1997-02-10 02:22:35 +00:00
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* Step 3: re-read summary information from disk.
|
|
|
|
*/
|
|
|
|
blks = howmany(fs->fs_cssize, fs->fs_fsize);
|
2001-01-15 18:30:40 +00:00
|
|
|
space = fs->fs_csp;
|
1994-05-24 10:09:53 +00:00
|
|
|
for (i = 0; i < blks; i += fs->fs_frag) {
|
|
|
|
size = fs->fs_bsize;
|
|
|
|
if (i + fs->fs_frag > blks)
|
|
|
|
size = (blks - i) * fs->fs_fsize;
|
1994-10-08 06:20:06 +00:00
|
|
|
error = bread(devvp, fsbtodb(fs, fs->fs_csaddr + i), size,
|
|
|
|
NOCRED, &bp);
|
|
|
|
if (error)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
2001-01-15 18:30:40 +00:00
|
|
|
bcopy(bp->b_data, space, (u_int)size);
|
|
|
|
space = (char *)space + size;
|
1994-05-24 10:09:53 +00:00
|
|
|
brelse(bp);
|
|
|
|
}
|
1997-02-10 02:22:35 +00:00
|
|
|
/*
|
|
|
|
* We no longer know anything about clusters per cylinder group.
|
|
|
|
*/
|
|
|
|
if (fs->fs_contigsumsize > 0) {
|
|
|
|
lp = fs->fs_maxcluster;
|
|
|
|
for (i = 0; i < fs->fs_ncg; i++)
|
|
|
|
*lp++ = fs->fs_contigsumsize;
|
|
|
|
}
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
loop:
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_lock(&mntvnode_mtx);
|
2001-10-23 01:21:29 +00:00
|
|
|
for (vp = TAILQ_FIRST(&mp->mnt_nvnodelist); vp != NULL; vp = nvp) {
|
1997-02-10 02:22:35 +00:00
|
|
|
if (vp->v_mount != mp) {
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_unlock(&mntvnode_mtx);
|
1997-02-10 02:22:35 +00:00
|
|
|
goto loop;
|
|
|
|
}
|
2001-10-23 01:21:29 +00:00
|
|
|
nvp = TAILQ_NEXT(vp, v_nmntvnodes);
|
2001-06-28 22:21:33 +00:00
|
|
|
mtx_unlock(&mntvnode_mtx);
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* Step 4: invalidate all inactive vnodes.
|
|
|
|
*/
|
2001-09-12 08:38:13 +00:00
|
|
|
if (vrecycle(vp, NULL, td))
|
1997-02-10 02:22:35 +00:00
|
|
|
goto loop;
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* Step 5: invalidate all cached file data.
|
|
|
|
*/
|
2001-06-28 04:12:56 +00:00
|
|
|
mtx_lock(&vp->v_interlock);
|
2001-09-12 08:38:13 +00:00
|
|
|
if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, td)) {
|
1994-05-24 10:09:53 +00:00
|
|
|
goto loop;
|
1997-02-10 02:22:35 +00:00
|
|
|
}
|
2001-09-12 08:38:13 +00:00
|
|
|
if (vinvalbuf(vp, 0, cred, td, 0, 0))
|
1994-05-24 10:09:53 +00:00
|
|
|
panic("ffs_reload: dirty2");
|
|
|
|
/*
|
|
|
|
* Step 6: re-read inode data for all active vnodes.
|
|
|
|
*/
|
|
|
|
ip = VTOI(vp);
|
1994-10-08 06:20:06 +00:00
|
|
|
error =
|
1994-05-24 10:09:53 +00:00
|
|
|
bread(devvp, fsbtodb(fs, ino_to_fsba(fs, ip->i_number)),
|
1994-10-08 06:20:06 +00:00
|
|
|
(int)fs->fs_bsize, NOCRED, &bp);
|
|
|
|
if (error) {
|
1994-05-24 10:09:53 +00:00
|
|
|
vput(vp);
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
ip->i_din = *((struct dinode *)bp->b_data +
|
|
|
|
ino_to_fsbo(fs, ip->i_number));
|
1998-03-08 09:59:44 +00:00
|
|
|
ip->i_effnlink = ip->i_nlink;
|
1994-05-24 10:09:53 +00:00
|
|
|
brelse(bp);
|
|
|
|
vput(vp);
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_lock(&mntvnode_mtx);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_unlock(&mntvnode_mtx);
|
1994-05-24 10:09:53 +00:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
2001-04-17 05:37:51 +00:00
|
|
|
#include <sys/sysctl.h>
|
|
|
|
int bigcgs = 0;
|
|
|
|
SYSCTL_INT(_debug, OID_AUTO, bigcgs, CTLFLAG_RW, &bigcgs, 0, "");
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* Common code for mount and mountroot
|
|
|
|
*/
|
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
ffs_mountfs(devvp, mp, td, malloctype)
|
1994-05-24 10:09:53 +00:00
|
|
|
register struct vnode *devvp;
|
|
|
|
struct mount *mp;
|
2001-09-12 08:38:13 +00:00
|
|
|
struct thread *td;
|
1997-10-10 18:17:00 +00:00
|
|
|
struct malloc_type *malloctype;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
|
|
|
register struct ufsmount *ump;
|
|
|
|
struct buf *bp;
|
|
|
|
register struct fs *fs;
|
1997-02-10 02:22:35 +00:00
|
|
|
dev_t dev;
|
1994-05-24 10:09:53 +00:00
|
|
|
struct partinfo dpart;
|
2001-01-15 18:30:40 +00:00
|
|
|
void *space;
|
1998-10-25 17:44:59 +00:00
|
|
|
int error, i, blks, size, ronly;
|
1997-02-10 02:22:35 +00:00
|
|
|
int32_t *lp;
|
|
|
|
struct ucred *cred;
|
|
|
|
u_int64_t maxfilesize; /* XXX */
|
1998-06-04 17:21:39 +00:00
|
|
|
size_t strsize;
|
1996-08-21 21:56:23 +00:00
|
|
|
int ncount;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
1997-02-10 02:22:35 +00:00
|
|
|
dev = devvp->v_rdev;
|
2002-02-27 18:32:23 +00:00
|
|
|
cred = td ? td->td_ucred : NOCRED;
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* Disallow multiple mounts of the same device.
|
|
|
|
* Disallow mounting of a device that is currently in use
|
|
|
|
* (except for root, which might share swap device for miniroot).
|
|
|
|
* Flush out any old buffers remaining from a previous use.
|
|
|
|
*/
|
1994-10-08 06:20:06 +00:00
|
|
|
error = vfs_mountedon(devvp);
|
|
|
|
if (error)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
1996-08-21 21:56:23 +00:00
|
|
|
ncount = vcount(devvp);
|
This mega-commit is meant to fix numerous interrelated problems. There
has been some bitrot and incorrect assumptions in the vfs_bio code. These
problems have manifest themselves worse on NFS type filesystems, but can
still affect local filesystems under certain circumstances. Most of
the problems have involved mmap consistancy, and as a side-effect broke
the vfs.ioopt code. This code might have been committed seperately, but
almost everything is interrelated.
1) Allow (pmap_object_init_pt) prefaulting of buffer-busy pages that
are fully valid.
2) Rather than deactivating erroneously read initial (header) pages in
kern_exec, we now free them.
3) Fix the rundown of non-VMIO buffers that are in an inconsistent
(missing vp) state.
4) Fix the disassociation of pages from buffers in brelse. The previous
code had rotted and was faulty in a couple of important circumstances.
5) Remove a gratuitious buffer wakeup in vfs_vmio_release.
6) Remove a crufty and currently unused cluster mechanism for VBLK
files in vfs_bio_awrite. When the code is functional, I'll add back
a cleaner version.
7) The page busy count wakeups assocated with the buffer cache usage were
incorrectly cleaned up in a previous commit by me. Revert to the
original, correct version, but with a cleaner implementation.
8) The cluster read code now tries to keep data associated with buffers
more aggressively (without breaking the heuristics) when it is presumed
that the read data (buffers) will be soon needed.
9) Change to filesystem lockmgr locks so that they use LK_NOPAUSE. The
delay loop waiting is not useful for filesystem locks, due to the
length of the time intervals.
10) Correct and clean-up spec_getpages.
11) Implement a fully functional nfs_getpages, nfs_putpages.
12) Fix nfs_write so that modifications are coherent with the NFS data on
the server disk (at least as well as NFS seems to allow.)
13) Properly support MS_INVALIDATE on NFS.
14) Properly pass down MS_INVALIDATE to lower levels of the VM code from
vm_map_clean.
15) Better support the notion of pages being busy but valid, so that
fewer in-transit waits occur. (use p->busy more for pageouts instead
of PG_BUSY.) Since the page is fully valid, it is still usable for
reads.
16) It is possible (in error) for cached pages to be busy. Make the
page allocation code handle that case correctly. (It should probably
be a printf or panic, but I want the system to handle coding errors
robustly. I'll probably add a printf.)
17) Correct the design and usage of vm_page_sleep. It didn't handle
consistancy problems very well, so make the design a little less
lofty. After vm_page_sleep, if it ever blocked, it is still important
to relookup the page (if the object generation count changed), and
verify it's status (always.)
18) In vm_pageout.c, vm_pageout_clean had rotted, so clean that up.
19) Push the page busy for writes and VM_PROT_READ into vm_pageout_flush.
20) Fix vm_pager_put_pages and it's descendents to support an int flag
instead of a boolean, so that we can pass down the invalidate bit.
1998-03-07 21:37:31 +00:00
|
|
|
|
1996-08-21 21:56:23 +00:00
|
|
|
if (ncount > 1 && devvp != rootvp)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (EBUSY);
|
2001-09-12 08:38:13 +00:00
|
|
|
vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, td);
|
|
|
|
error = vinvalbuf(devvp, V_SAVE, cred, td, 0, 0);
|
|
|
|
VOP_UNLOCK(devvp, 0, td);
|
1998-03-08 09:59:44 +00:00
|
|
|
if (error)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
|
Make our v_usecount vnode reference count work identically to the
original BSD code. The association between the vnode and the vm_object
no longer includes reference counts. The major difference is that
vm_object's are no longer freed gratuitiously from the vnode, and so
once an object is created for the vnode, it will last as long as the
vnode does.
When a vnode object reference count is incremented, then the underlying
vnode reference count is incremented also. The two "objects" are now
more intimately related, and so the interactions are now much less
complex.
When vnodes are now normally placed onto the free queue with an object still
attached. The rundown of the object happens at vnode rundown time, and
happens with exactly the same filesystem semantics of the original VFS
code. There is absolutely no need for vnode_pager_uncache and other
travesties like that anymore.
A side-effect of these changes is that SMP locking should be much simpler,
the I/O copyin/copyout optimizations work, NFS should be more ponderable,
and further work on layered filesystems should be less frustrating, because
of the totally coherent management of the vnode objects and vnodes.
Please be careful with your system while running this code, but I would
greatly appreciate feedback as soon a reasonably possible.
1998-01-06 05:26:17 +00:00
|
|
|
/*
|
|
|
|
* Only VMIO the backing device if the backing device is a real
|
2001-05-29 21:21:53 +00:00
|
|
|
* block device.
|
Make our v_usecount vnode reference count work identically to the
original BSD code. The association between the vnode and the vm_object
no longer includes reference counts. The major difference is that
vm_object's are no longer freed gratuitiously from the vnode, and so
once an object is created for the vnode, it will last as long as the
vnode does.
When a vnode object reference count is incremented, then the underlying
vnode reference count is incremented also. The two "objects" are now
more intimately related, and so the interactions are now much less
complex.
When vnodes are now normally placed onto the free queue with an object still
attached. The rundown of the object happens at vnode rundown time, and
happens with exactly the same filesystem semantics of the original VFS
code. There is absolutely no need for vnode_pager_uncache and other
travesties like that anymore.
A side-effect of these changes is that SMP locking should be much simpler,
the I/O copyin/copyout optimizations work, NFS should be more ponderable,
and further work on layered filesystems should be less frustrating, because
of the totally coherent management of the vnode objects and vnodes.
Please be careful with your system while running this code, but I would
greatly appreciate feedback as soon a reasonably possible.
1998-01-06 05:26:17 +00:00
|
|
|
* Note that it is optional that the backing device be VMIOed. This
|
|
|
|
* increases the opportunity for metadata caching.
|
|
|
|
*/
|
2001-05-29 21:21:53 +00:00
|
|
|
if (vn_isdisk(devvp, NULL)) {
|
2001-09-12 08:38:13 +00:00
|
|
|
vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, td);
|
|
|
|
vfs_object_create(devvp, td, cred);
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_lock(&devvp->v_interlock);
|
2001-09-12 08:38:13 +00:00
|
|
|
VOP_UNLOCK(devvp, LK_INTERLOCK, td);
|
Make our v_usecount vnode reference count work identically to the
original BSD code. The association between the vnode and the vm_object
no longer includes reference counts. The major difference is that
vm_object's are no longer freed gratuitiously from the vnode, and so
once an object is created for the vnode, it will last as long as the
vnode does.
When a vnode object reference count is incremented, then the underlying
vnode reference count is incremented also. The two "objects" are now
more intimately related, and so the interactions are now much less
complex.
When vnodes are now normally placed onto the free queue with an object still
attached. The rundown of the object happens at vnode rundown time, and
happens with exactly the same filesystem semantics of the original VFS
code. There is absolutely no need for vnode_pager_uncache and other
travesties like that anymore.
A side-effect of these changes is that SMP locking should be much simpler,
the I/O copyin/copyout optimizations work, NFS should be more ponderable,
and further work on layered filesystems should be less frustrating, because
of the totally coherent management of the vnode objects and vnodes.
Please be careful with your system while running this code, but I would
greatly appreciate feedback as soon a reasonably possible.
1998-01-06 05:26:17 +00:00
|
|
|
}
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
ronly = (mp->mnt_flag & MNT_RDONLY) != 0;
|
2001-09-12 08:38:13 +00:00
|
|
|
vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, td);
|
|
|
|
error = VOP_OPEN(devvp, ronly ? FREAD : FREAD|FWRITE, FSCRED, td);
|
|
|
|
VOP_UNLOCK(devvp, 0, td);
|
1994-10-08 06:20:06 +00:00
|
|
|
if (error)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
1999-09-29 20:05:33 +00:00
|
|
|
if (devvp->v_rdev->si_iosize_max > mp->mnt_iosize_max)
|
|
|
|
mp->mnt_iosize_max = devvp->v_rdev->si_iosize_max;
|
|
|
|
if (mp->mnt_iosize_max > MAXPHYS)
|
|
|
|
mp->mnt_iosize_max = MAXPHYS;
|
Make our v_usecount vnode reference count work identically to the
original BSD code. The association between the vnode and the vm_object
no longer includes reference counts. The major difference is that
vm_object's are no longer freed gratuitiously from the vnode, and so
once an object is created for the vnode, it will last as long as the
vnode does.
When a vnode object reference count is incremented, then the underlying
vnode reference count is incremented also. The two "objects" are now
more intimately related, and so the interactions are now much less
complex.
When vnodes are now normally placed onto the free queue with an object still
attached. The rundown of the object happens at vnode rundown time, and
happens with exactly the same filesystem semantics of the original VFS
code. There is absolutely no need for vnode_pager_uncache and other
travesties like that anymore.
A side-effect of these changes is that SMP locking should be much simpler,
the I/O copyin/copyout optimizations work, NFS should be more ponderable,
and further work on layered filesystems should be less frustrating, because
of the totally coherent management of the vnode objects and vnodes.
Please be careful with your system while running this code, but I would
greatly appreciate feedback as soon a reasonably possible.
1998-01-06 05:26:17 +00:00
|
|
|
|
2001-09-12 08:38:13 +00:00
|
|
|
if (VOP_IOCTL(devvp, DIOCGPART, (caddr_t)&dpart, FREAD, cred, td) != 0)
|
1994-05-24 10:09:53 +00:00
|
|
|
size = DEV_BSIZE;
|
1997-02-10 02:22:35 +00:00
|
|
|
else
|
1994-05-24 10:09:53 +00:00
|
|
|
size = dpart.disklab->d_secsize;
|
|
|
|
|
|
|
|
bp = NULL;
|
|
|
|
ump = NULL;
|
1999-01-28 00:57:57 +00:00
|
|
|
if ((error = bread(devvp, SBLOCK, SBSIZE, cred, &bp)) != 0)
|
1994-05-24 10:09:53 +00:00
|
|
|
goto out;
|
|
|
|
fs = (struct fs *)bp->b_data;
|
|
|
|
if (fs->fs_magic != FS_MAGIC || fs->fs_bsize > MAXBSIZE ||
|
|
|
|
fs->fs_bsize < sizeof(struct fs)) {
|
|
|
|
error = EINVAL; /* XXX needs translation */
|
|
|
|
goto out;
|
|
|
|
}
|
1996-11-13 01:45:56 +00:00
|
|
|
fs->fs_fmod = 0;
|
1998-09-26 04:59:42 +00:00
|
|
|
fs->fs_flags &= ~FS_UNCLEAN;
|
|
|
|
if (fs->fs_clean == 0) {
|
|
|
|
fs->fs_flags |= FS_UNCLEAN;
|
2001-03-21 04:09:01 +00:00
|
|
|
if (ronly || (mp->mnt_flag & MNT_FORCE) ||
|
2001-04-14 05:26:28 +00:00
|
|
|
((fs->fs_flags & FS_NEEDSFSCK) == 0 &&
|
|
|
|
(fs->fs_flags & FS_DOSOFTDEP))) {
|
1998-09-26 04:59:42 +00:00
|
|
|
printf(
|
|
|
|
"WARNING: %s was not properly dismounted\n",
|
|
|
|
fs->fs_fsmnt);
|
1995-05-15 08:39:37 +00:00
|
|
|
} else {
|
1998-09-26 04:59:42 +00:00
|
|
|
printf(
|
|
|
|
"WARNING: R/W mount of %s denied. Filesystem is not clean - run fsck\n",
|
|
|
|
fs->fs_fsmnt);
|
1995-05-15 08:39:37 +00:00
|
|
|
error = EPERM;
|
|
|
|
goto out;
|
|
|
|
}
|
2001-05-08 07:42:20 +00:00
|
|
|
if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) {
|
|
|
|
printf("%s: lost blocks %d files %d\n", fs->fs_fsmnt,
|
|
|
|
fs->fs_pendingblocks, fs->fs_pendinginodes);
|
|
|
|
fs->fs_pendingblocks = 0;
|
|
|
|
fs->fs_pendinginodes = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) {
|
|
|
|
printf("%s: mount pending error: blocks %d files %d\n",
|
|
|
|
fs->fs_fsmnt, fs->fs_pendingblocks, fs->fs_pendinginodes);
|
|
|
|
fs->fs_pendingblocks = 0;
|
|
|
|
fs->fs_pendinginodes = 0;
|
1995-05-15 08:39:37 +00:00
|
|
|
}
|
1997-02-10 02:22:35 +00:00
|
|
|
/* XXX updating 4.2 FFS superblocks trashes rotational layout tables */
|
|
|
|
if (fs->fs_postblformat == FS_42POSTBLFMT && !ronly) {
|
|
|
|
error = EROFS; /* needs translation */
|
|
|
|
goto out;
|
|
|
|
}
|
2000-12-08 21:51:06 +00:00
|
|
|
ump = malloc(sizeof *ump, M_UFSMNT, M_WAITOK | M_ZERO);
|
1997-10-10 18:17:00 +00:00
|
|
|
ump->um_malloctype = malloctype;
|
1999-11-03 12:05:39 +00:00
|
|
|
ump->um_i_effnlink_valid = 1;
|
1994-05-24 10:09:53 +00:00
|
|
|
ump->um_fs = malloc((u_long)fs->fs_sbsize, M_UFSMNT,
|
|
|
|
M_WAITOK);
|
VFS mega cleanup commit (x/N)
1. Add new file "sys/kern/vfs_default.c" where default actions for
VOPs go. Implement proper defaults for ABORTOP, BWRITE, LEASE,
POLL, REVOKE and STRATEGY. Various stuff spread over the entire
tree belongs here.
2. Change VOP_BLKATOFF to a normal function in cd9660.
3. Kill VOP_BLKATOFF, VOP_TRUNCATE, VOP_VFREE, VOP_VALLOC. These
are private interface functions between UFS and the underlying
storage manager layer (FFS/LFS/MFS/EXT2FS). The functions now
live in struct ufsmount instead.
4. Remove a kludge of VOP_ functions in all filesystems, that did
nothing but obscure the simplicity and break the expandability.
If a filesystem doesn't implement VOP_FOO, it shouldn't have an
entry for it in its vnops table. The system will try to DTRT
if it is not implemented. There are still some cruft left, but
the bulk of it is done.
5. Fix another VCALL in vfs_cache.c (thanks Bruce!)
1997-10-16 10:50:27 +00:00
|
|
|
ump->um_blkatoff = ffs_blkatoff;
|
|
|
|
ump->um_truncate = ffs_truncate;
|
1997-10-16 20:32:40 +00:00
|
|
|
ump->um_update = ffs_update;
|
VFS mega cleanup commit (x/N)
1. Add new file "sys/kern/vfs_default.c" where default actions for
VOPs go. Implement proper defaults for ABORTOP, BWRITE, LEASE,
POLL, REVOKE and STRATEGY. Various stuff spread over the entire
tree belongs here.
2. Change VOP_BLKATOFF to a normal function in cd9660.
3. Kill VOP_BLKATOFF, VOP_TRUNCATE, VOP_VFREE, VOP_VALLOC. These
are private interface functions between UFS and the underlying
storage manager layer (FFS/LFS/MFS/EXT2FS). The functions now
live in struct ufsmount instead.
4. Remove a kludge of VOP_ functions in all filesystems, that did
nothing but obscure the simplicity and break the expandability.
If a filesystem doesn't implement VOP_FOO, it shouldn't have an
entry for it in its vnops table. The system will try to DTRT
if it is not implemented. There are still some cruft left, but
the bulk of it is done.
5. Fix another VCALL in vfs_cache.c (thanks Bruce!)
1997-10-16 10:50:27 +00:00
|
|
|
ump->um_valloc = ffs_valloc;
|
|
|
|
ump->um_vfree = ffs_vfree;
|
2001-04-29 12:36:52 +00:00
|
|
|
ump->um_balloc = ffs_balloc;
|
1994-05-24 10:09:53 +00:00
|
|
|
bcopy(bp->b_data, ump->um_fs, (u_int)fs->fs_sbsize);
|
|
|
|
if (fs->fs_sbsize < SBSIZE)
|
2000-07-11 22:07:57 +00:00
|
|
|
bp->b_flags |= B_INVAL | B_NOCACHE;
|
1994-05-24 10:09:53 +00:00
|
|
|
brelse(bp);
|
|
|
|
bp = NULL;
|
|
|
|
fs = ump->um_fs;
|
|
|
|
fs->fs_ronly = ronly;
|
1997-02-10 02:22:35 +00:00
|
|
|
size = fs->fs_cssize;
|
|
|
|
blks = howmany(size, fs->fs_fsize);
|
|
|
|
if (fs->fs_contigsumsize > 0)
|
|
|
|
size += fs->fs_ncg * sizeof(int32_t);
|
Directory layout preference improvements from Grigoriy Orlov <gluk@ptci.ru>.
His description of the problem and solution follow. My own tests show
speedups on typical filesystem intensive workloads of 5% to 12% which
is very impressive considering the small amount of code change involved.
------
One day I noticed that some file operations run much faster on
small file systems then on big ones. I've looked at the ffs
algorithms, thought about them, and redesigned the dirpref algorithm.
First I want to describe the results of my tests. These results are old
and I have improved the algorithm after these tests were done. Nevertheless
they show how big the perfomance speedup may be. I have done two file/directory
intensive tests on a two OpenBSD systems with old and new dirpref algorithm.
The first test is "tar -xzf ports.tar.gz", the second is "rm -rf ports".
The ports.tar.gz file is the ports collection from the OpenBSD 2.8 release.
It contains 6596 directories and 13868 files. The test systems are:
1. Celeron-450, 128Mb, two IDE drives, the system at wd0, file system for
test is at wd1. Size of test file system is 8 Gb, number of cg=991,
size of cg is 8m, block size = 8k, fragment size = 1k OpenBSD-current
from Dec 2000 with BUFCACHEPERCENT=35
2. PIII-600, 128Mb, two IBM DTLA-307045 IDE drives at i815e, the system
at wd0, file system for test is at wd1. Size of test file system is 40 Gb,
number of cg=5324, size of cg is 8m, block size = 8k, fragment size = 1k
OpenBSD-current from Dec 2000 with BUFCACHEPERCENT=50
You can get more info about the test systems and methods at:
http://www.ptci.ru/gluk/dirpref/old/dirpref.html
Test Results
tar -xzf ports.tar.gz rm -rf ports
mode old dirpref new dirpref speedup old dirprefnew dirpref speedup
First system
normal 667 472 1.41 477 331 1.44
async 285 144 1.98 130 14 9.29
sync 768 616 1.25 477 334 1.43
softdep 413 252 1.64 241 38 6.34
Second system
normal 329 81 4.06 263.5 93.5 2.81
async 302 25.7 11.75 112 2.26 49.56
sync 281 57.0 4.93 263 90.5 2.9
softdep 341 40.6 8.4 284 4.76 59.66
"old dirpref" and "new dirpref" columns give a test time in seconds.
speedup - speed increasement in times, ie. old dirpref / new dirpref.
------
Algorithm description
The old dirpref algorithm is described in comments:
/*
* Find a cylinder to place a directory.
*
* The policy implemented by this algorithm is to select from
* among those cylinder groups with above the average number of
* free inodes, the one with the smallest number of directories.
*/
A new directory is allocated in a different cylinder groups than its
parent directory resulting in a directory tree that is spreaded across
all the cylinder groups. This spreading out results in a non-optimal
access to the directories and files. When we have a small filesystem
it is not a problem but when the filesystem is big then perfomance
degradation becomes very apparent.
What I mean by a big file system ?
1. A big filesystem is a filesystem which occupy 20-30 or more percent
of total drive space, i.e. first and last cylinder are physically
located relatively far from each other.
2. It has a relatively large number of cylinder groups, for example
more cylinder groups than 50% of the buffers in the buffer cache.
The first results in long access times, while the second results in
many buffers being used by metadata operations. Such operations use
cylinder group blocks and on-disk inode blocks. The cylinder group
block (fs->fs_cblkno) contains struct cg, inode and block bit maps.
It is 2k in size for the default filesystem parameters. If new and
parent directories are located in different cylinder groups then the
system performs more input/output operations and uses more buffers.
On filesystems with many cylinder groups, lots of cache buffers are
used for metadata operations.
My solution for this problem is very simple. I allocate many directories
in one cylinder group. I also do some things, so that the new allocation
method does not cause excessive fragmentation and all directory inodes
will not be located at a location far from its file's inodes and data.
The algorithm is:
/*
* Find a cylinder group to place a directory.
*
* The policy implemented by this algorithm is to allocate a
* directory inode in the same cylinder group as its parent
* directory, but also to reserve space for its files inodes
* and data. Restrict the number of directories which may be
* allocated one after another in the same cylinder group
* without intervening allocation of files.
*
* If we allocate a first level directory then force allocation
* in another cylinder group.
*/
My early versions of dirpref give me a good results for a wide range of
file operations and different filesystem capacities except one case:
those applications that create their entire directory structure first
and only later fill this structure with files.
My solution for such and similar cases is to limit a number of
directories which may be created one after another in the same cylinder
group without intervening file creations. For this purpose, I allocate
an array of counters at mount time. This array is linked to the superblock
fs->fs_contigdirs[cg]. Each time a directory is created the counter
increases and each time a file is created the counter decreases. A 60Gb
filesystem with 8mb/cg requires 10kb of memory for the counters array.
The maxcontigdirs is a maximum number of directories which may be created
without an intervening file creation. I found in my tests that the best
performance occurs when I restrict the number of directories in one cylinder
group such that all its files may be located in the same cylinder group.
There may be some deterioration in performance if all the file inodes
are in the same cylinder group as its containing directory, but their
data partially resides in a different cylinder group. The maxcontigdirs
value is calculated to try to prevent this condition. Since there is
no way to know how many files and directories will be allocated later
I added two optimization parameters in superblock/tunefs. They are:
int32_t fs_avgfilesize; /* expected average file size */
int32_t fs_avgfpdir; /* expected # of files per directory */
These parameters have reasonable defaults but may be tweeked for special
uses of a filesystem. They are only necessary in rare cases like better
tuning a filesystem being used to store a squid cache.
I have been using this algorithm for about 3 months. I have done
a lot of testing on filesystems with different capacities, average
filesize, average number of files per directory, and so on. I think
this algorithm has no negative impact on filesystem perfomance. It
works better than the default one in all cases. The new dirpref
will greatly improve untarring/removing/coping of big directories,
decrease load on cvs servers and much more. The new dirpref doesn't
speedup a compilation process, but also doesn't slow it down.
Obtained from: Grigoriy Orlov <gluk@ptci.ru>
2001-04-10 08:38:59 +00:00
|
|
|
size += fs->fs_ncg * sizeof(u_int8_t);
|
2001-01-15 18:30:40 +00:00
|
|
|
space = malloc((u_long)size, M_UFSMNT, M_WAITOK);
|
|
|
|
fs->fs_csp = space;
|
1994-05-24 10:09:53 +00:00
|
|
|
for (i = 0; i < blks; i += fs->fs_frag) {
|
|
|
|
size = fs->fs_bsize;
|
|
|
|
if (i + fs->fs_frag > blks)
|
|
|
|
size = (blks - i) * fs->fs_fsize;
|
1999-01-28 00:57:57 +00:00
|
|
|
if ((error = bread(devvp, fsbtodb(fs, fs->fs_csaddr + i), size,
|
|
|
|
cred, &bp)) != 0) {
|
2001-01-15 18:30:40 +00:00
|
|
|
free(fs->fs_csp, M_UFSMNT);
|
1994-05-24 10:09:53 +00:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
bcopy(bp->b_data, space, (u_int)size);
|
2001-01-15 18:30:40 +00:00
|
|
|
space = (char *)space + size;
|
1994-05-24 10:09:53 +00:00
|
|
|
brelse(bp);
|
|
|
|
bp = NULL;
|
|
|
|
}
|
1997-02-10 02:22:35 +00:00
|
|
|
if (fs->fs_contigsumsize > 0) {
|
2001-01-15 18:30:40 +00:00
|
|
|
fs->fs_maxcluster = lp = space;
|
1997-02-10 02:22:35 +00:00
|
|
|
for (i = 0; i < fs->fs_ncg; i++)
|
|
|
|
*lp++ = fs->fs_contigsumsize;
|
2001-09-09 23:48:28 +00:00
|
|
|
space = lp;
|
1997-02-10 02:22:35 +00:00
|
|
|
}
|
Directory layout preference improvements from Grigoriy Orlov <gluk@ptci.ru>.
His description of the problem and solution follow. My own tests show
speedups on typical filesystem intensive workloads of 5% to 12% which
is very impressive considering the small amount of code change involved.
------
One day I noticed that some file operations run much faster on
small file systems then on big ones. I've looked at the ffs
algorithms, thought about them, and redesigned the dirpref algorithm.
First I want to describe the results of my tests. These results are old
and I have improved the algorithm after these tests were done. Nevertheless
they show how big the perfomance speedup may be. I have done two file/directory
intensive tests on a two OpenBSD systems with old and new dirpref algorithm.
The first test is "tar -xzf ports.tar.gz", the second is "rm -rf ports".
The ports.tar.gz file is the ports collection from the OpenBSD 2.8 release.
It contains 6596 directories and 13868 files. The test systems are:
1. Celeron-450, 128Mb, two IDE drives, the system at wd0, file system for
test is at wd1. Size of test file system is 8 Gb, number of cg=991,
size of cg is 8m, block size = 8k, fragment size = 1k OpenBSD-current
from Dec 2000 with BUFCACHEPERCENT=35
2. PIII-600, 128Mb, two IBM DTLA-307045 IDE drives at i815e, the system
at wd0, file system for test is at wd1. Size of test file system is 40 Gb,
number of cg=5324, size of cg is 8m, block size = 8k, fragment size = 1k
OpenBSD-current from Dec 2000 with BUFCACHEPERCENT=50
You can get more info about the test systems and methods at:
http://www.ptci.ru/gluk/dirpref/old/dirpref.html
Test Results
tar -xzf ports.tar.gz rm -rf ports
mode old dirpref new dirpref speedup old dirprefnew dirpref speedup
First system
normal 667 472 1.41 477 331 1.44
async 285 144 1.98 130 14 9.29
sync 768 616 1.25 477 334 1.43
softdep 413 252 1.64 241 38 6.34
Second system
normal 329 81 4.06 263.5 93.5 2.81
async 302 25.7 11.75 112 2.26 49.56
sync 281 57.0 4.93 263 90.5 2.9
softdep 341 40.6 8.4 284 4.76 59.66
"old dirpref" and "new dirpref" columns give a test time in seconds.
speedup - speed increasement in times, ie. old dirpref / new dirpref.
------
Algorithm description
The old dirpref algorithm is described in comments:
/*
* Find a cylinder to place a directory.
*
* The policy implemented by this algorithm is to select from
* among those cylinder groups with above the average number of
* free inodes, the one with the smallest number of directories.
*/
A new directory is allocated in a different cylinder groups than its
parent directory resulting in a directory tree that is spreaded across
all the cylinder groups. This spreading out results in a non-optimal
access to the directories and files. When we have a small filesystem
it is not a problem but when the filesystem is big then perfomance
degradation becomes very apparent.
What I mean by a big file system ?
1. A big filesystem is a filesystem which occupy 20-30 or more percent
of total drive space, i.e. first and last cylinder are physically
located relatively far from each other.
2. It has a relatively large number of cylinder groups, for example
more cylinder groups than 50% of the buffers in the buffer cache.
The first results in long access times, while the second results in
many buffers being used by metadata operations. Such operations use
cylinder group blocks and on-disk inode blocks. The cylinder group
block (fs->fs_cblkno) contains struct cg, inode and block bit maps.
It is 2k in size for the default filesystem parameters. If new and
parent directories are located in different cylinder groups then the
system performs more input/output operations and uses more buffers.
On filesystems with many cylinder groups, lots of cache buffers are
used for metadata operations.
My solution for this problem is very simple. I allocate many directories
in one cylinder group. I also do some things, so that the new allocation
method does not cause excessive fragmentation and all directory inodes
will not be located at a location far from its file's inodes and data.
The algorithm is:
/*
* Find a cylinder group to place a directory.
*
* The policy implemented by this algorithm is to allocate a
* directory inode in the same cylinder group as its parent
* directory, but also to reserve space for its files inodes
* and data. Restrict the number of directories which may be
* allocated one after another in the same cylinder group
* without intervening allocation of files.
*
* If we allocate a first level directory then force allocation
* in another cylinder group.
*/
My early versions of dirpref give me a good results for a wide range of
file operations and different filesystem capacities except one case:
those applications that create their entire directory structure first
and only later fill this structure with files.
My solution for such and similar cases is to limit a number of
directories which may be created one after another in the same cylinder
group without intervening file creations. For this purpose, I allocate
an array of counters at mount time. This array is linked to the superblock
fs->fs_contigdirs[cg]. Each time a directory is created the counter
increases and each time a file is created the counter decreases. A 60Gb
filesystem with 8mb/cg requires 10kb of memory for the counters array.
The maxcontigdirs is a maximum number of directories which may be created
without an intervening file creation. I found in my tests that the best
performance occurs when I restrict the number of directories in one cylinder
group such that all its files may be located in the same cylinder group.
There may be some deterioration in performance if all the file inodes
are in the same cylinder group as its containing directory, but their
data partially resides in a different cylinder group. The maxcontigdirs
value is calculated to try to prevent this condition. Since there is
no way to know how many files and directories will be allocated later
I added two optimization parameters in superblock/tunefs. They are:
int32_t fs_avgfilesize; /* expected average file size */
int32_t fs_avgfpdir; /* expected # of files per directory */
These parameters have reasonable defaults but may be tweeked for special
uses of a filesystem. They are only necessary in rare cases like better
tuning a filesystem being used to store a squid cache.
I have been using this algorithm for about 3 months. I have done
a lot of testing on filesystems with different capacities, average
filesize, average number of files per directory, and so on. I think
this algorithm has no negative impact on filesystem perfomance. It
works better than the default one in all cases. The new dirpref
will greatly improve untarring/removing/coping of big directories,
decrease load on cvs servers and much more. The new dirpref doesn't
speedup a compilation process, but also doesn't slow it down.
Obtained from: Grigoriy Orlov <gluk@ptci.ru>
2001-04-10 08:38:59 +00:00
|
|
|
size = fs->fs_ncg * sizeof(u_int8_t);
|
|
|
|
fs->fs_contigdirs = (u_int8_t *)space;
|
|
|
|
bzero(fs->fs_contigdirs, size);
|
2001-12-16 18:54:09 +00:00
|
|
|
fs->fs_active = NULL;
|
Directory layout preference improvements from Grigoriy Orlov <gluk@ptci.ru>.
His description of the problem and solution follow. My own tests show
speedups on typical filesystem intensive workloads of 5% to 12% which
is very impressive considering the small amount of code change involved.
------
One day I noticed that some file operations run much faster on
small file systems then on big ones. I've looked at the ffs
algorithms, thought about them, and redesigned the dirpref algorithm.
First I want to describe the results of my tests. These results are old
and I have improved the algorithm after these tests were done. Nevertheless
they show how big the perfomance speedup may be. I have done two file/directory
intensive tests on a two OpenBSD systems with old and new dirpref algorithm.
The first test is "tar -xzf ports.tar.gz", the second is "rm -rf ports".
The ports.tar.gz file is the ports collection from the OpenBSD 2.8 release.
It contains 6596 directories and 13868 files. The test systems are:
1. Celeron-450, 128Mb, two IDE drives, the system at wd0, file system for
test is at wd1. Size of test file system is 8 Gb, number of cg=991,
size of cg is 8m, block size = 8k, fragment size = 1k OpenBSD-current
from Dec 2000 with BUFCACHEPERCENT=35
2. PIII-600, 128Mb, two IBM DTLA-307045 IDE drives at i815e, the system
at wd0, file system for test is at wd1. Size of test file system is 40 Gb,
number of cg=5324, size of cg is 8m, block size = 8k, fragment size = 1k
OpenBSD-current from Dec 2000 with BUFCACHEPERCENT=50
You can get more info about the test systems and methods at:
http://www.ptci.ru/gluk/dirpref/old/dirpref.html
Test Results
tar -xzf ports.tar.gz rm -rf ports
mode old dirpref new dirpref speedup old dirprefnew dirpref speedup
First system
normal 667 472 1.41 477 331 1.44
async 285 144 1.98 130 14 9.29
sync 768 616 1.25 477 334 1.43
softdep 413 252 1.64 241 38 6.34
Second system
normal 329 81 4.06 263.5 93.5 2.81
async 302 25.7 11.75 112 2.26 49.56
sync 281 57.0 4.93 263 90.5 2.9
softdep 341 40.6 8.4 284 4.76 59.66
"old dirpref" and "new dirpref" columns give a test time in seconds.
speedup - speed increasement in times, ie. old dirpref / new dirpref.
------
Algorithm description
The old dirpref algorithm is described in comments:
/*
* Find a cylinder to place a directory.
*
* The policy implemented by this algorithm is to select from
* among those cylinder groups with above the average number of
* free inodes, the one with the smallest number of directories.
*/
A new directory is allocated in a different cylinder groups than its
parent directory resulting in a directory tree that is spreaded across
all the cylinder groups. This spreading out results in a non-optimal
access to the directories and files. When we have a small filesystem
it is not a problem but when the filesystem is big then perfomance
degradation becomes very apparent.
What I mean by a big file system ?
1. A big filesystem is a filesystem which occupy 20-30 or more percent
of total drive space, i.e. first and last cylinder are physically
located relatively far from each other.
2. It has a relatively large number of cylinder groups, for example
more cylinder groups than 50% of the buffers in the buffer cache.
The first results in long access times, while the second results in
many buffers being used by metadata operations. Such operations use
cylinder group blocks and on-disk inode blocks. The cylinder group
block (fs->fs_cblkno) contains struct cg, inode and block bit maps.
It is 2k in size for the default filesystem parameters. If new and
parent directories are located in different cylinder groups then the
system performs more input/output operations and uses more buffers.
On filesystems with many cylinder groups, lots of cache buffers are
used for metadata operations.
My solution for this problem is very simple. I allocate many directories
in one cylinder group. I also do some things, so that the new allocation
method does not cause excessive fragmentation and all directory inodes
will not be located at a location far from its file's inodes and data.
The algorithm is:
/*
* Find a cylinder group to place a directory.
*
* The policy implemented by this algorithm is to allocate a
* directory inode in the same cylinder group as its parent
* directory, but also to reserve space for its files inodes
* and data. Restrict the number of directories which may be
* allocated one after another in the same cylinder group
* without intervening allocation of files.
*
* If we allocate a first level directory then force allocation
* in another cylinder group.
*/
My early versions of dirpref give me a good results for a wide range of
file operations and different filesystem capacities except one case:
those applications that create their entire directory structure first
and only later fill this structure with files.
My solution for such and similar cases is to limit a number of
directories which may be created one after another in the same cylinder
group without intervening file creations. For this purpose, I allocate
an array of counters at mount time. This array is linked to the superblock
fs->fs_contigdirs[cg]. Each time a directory is created the counter
increases and each time a file is created the counter decreases. A 60Gb
filesystem with 8mb/cg requires 10kb of memory for the counters array.
The maxcontigdirs is a maximum number of directories which may be created
without an intervening file creation. I found in my tests that the best
performance occurs when I restrict the number of directories in one cylinder
group such that all its files may be located in the same cylinder group.
There may be some deterioration in performance if all the file inodes
are in the same cylinder group as its containing directory, but their
data partially resides in a different cylinder group. The maxcontigdirs
value is calculated to try to prevent this condition. Since there is
no way to know how many files and directories will be allocated later
I added two optimization parameters in superblock/tunefs. They are:
int32_t fs_avgfilesize; /* expected average file size */
int32_t fs_avgfpdir; /* expected # of files per directory */
These parameters have reasonable defaults but may be tweeked for special
uses of a filesystem. They are only necessary in rare cases like better
tuning a filesystem being used to store a squid cache.
I have been using this algorithm for about 3 months. I have done
a lot of testing on filesystems with different capacities, average
filesize, average number of files per directory, and so on. I think
this algorithm has no negative impact on filesystem perfomance. It
works better than the default one in all cases. The new dirpref
will greatly improve untarring/removing/coping of big directories,
decrease load on cvs servers and much more. The new dirpref doesn't
speedup a compilation process, but also doesn't slow it down.
Obtained from: Grigoriy Orlov <gluk@ptci.ru>
2001-04-10 08:38:59 +00:00
|
|
|
/* Compatibility for old filesystems XXX */
|
|
|
|
if (fs->fs_avgfilesize <= 0) /* XXX */
|
|
|
|
fs->fs_avgfilesize = AVFILESIZ; /* XXX */
|
|
|
|
if (fs->fs_avgfpdir <= 0) /* XXX */
|
|
|
|
fs->fs_avgfpdir = AFPDIR; /* XXX */
|
1994-05-24 10:09:53 +00:00
|
|
|
mp->mnt_data = (qaddr_t)ump;
|
1999-07-11 19:16:50 +00:00
|
|
|
mp->mnt_stat.f_fsid.val[0] = fs->fs_id[0];
|
|
|
|
mp->mnt_stat.f_fsid.val[1] = fs->fs_id[1];
|
|
|
|
if (fs->fs_id[0] == 0 || fs->fs_id[1] == 0 ||
|
|
|
|
vfs_getvfs(&mp->mnt_stat.f_fsid))
|
|
|
|
vfs_getnewfsid(mp);
|
1994-05-24 10:09:53 +00:00
|
|
|
mp->mnt_maxsymlinklen = fs->fs_maxsymlinklen;
|
1997-03-18 19:50:12 +00:00
|
|
|
mp->mnt_flag |= MNT_LOCAL;
|
1994-05-24 10:09:53 +00:00
|
|
|
ump->um_mountp = mp;
|
|
|
|
ump->um_dev = dev;
|
|
|
|
ump->um_devvp = devvp;
|
|
|
|
ump->um_nindir = fs->fs_nindir;
|
|
|
|
ump->um_bptrtodb = fs->fs_fsbtodb;
|
|
|
|
ump->um_seqinc = fs->fs_frag;
|
|
|
|
for (i = 0; i < MAXQUOTAS; i++)
|
|
|
|
ump->um_quotas[i] = NULLVP;
|
2001-03-19 04:35:40 +00:00
|
|
|
#ifdef UFS_EXTATTR
|
Introduce extended attribute support for FFS, allowing arbitrary
(name, value) pairs to be associated with inodes. This support is
used for ACLs, MAC labels, and Capabilities in the TrustedBSD
security extensions, which are currently under development.
In this implementation, attributes are backed to data vnodes in the
style of the quota support in FFS. Support for FFS extended
attributes may be enabled using the FFS_EXTATTR kernel option
(disabled by default). Userland utilities and man pages will be
committed in the next batch. VFS interfaces and man pages have
been in the repo since 4.0-RELEASE and are unchanged.
o ufs/ufs/extattr.h: UFS-specific extattr defines
o ufs/ufs/ufs_extattr.c: bulk of support routines
o ufs/{ufs,ffs,mfs}/*.[ch]: hooks and extattr.h includes
o contrib/softupdates/ffs_softdep.c: extattr.h includes
o conf/options, conf/files, i386/conf/LINT: added FFS_EXTATTR
o coda/coda_vfsops.c: XXX required extattr.h due to ufsmount.h
(This should not be the case, and will be fixed in a future commit)
Currently attributes are not supported in MFS. This will be fixed.
Reviewed by: adrian, bp, freebsd-fs, other unthanked souls
Obtained from: TrustedBSD Project
2000-04-15 03:34:27 +00:00
|
|
|
ufs_extattr_uepm_init(&ump->um_extattr);
|
|
|
|
#endif
|
2000-10-09 17:31:39 +00:00
|
|
|
devvp->v_rdev->si_mountpoint = mp;
|
1994-05-24 10:09:53 +00:00
|
|
|
ffs_oldfscompat(fs);
|
1995-08-28 09:19:25 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Set FS local "last mounted on" information (NULL pad)
|
|
|
|
*/
|
|
|
|
copystr( mp->mnt_stat.f_mntonname, /* mount point*/
|
|
|
|
fs->fs_fsmnt, /* copy area*/
|
|
|
|
sizeof(fs->fs_fsmnt) - 1, /* max size*/
|
|
|
|
&strsize); /* real size*/
|
|
|
|
bzero( fs->fs_fsmnt + strsize, sizeof(fs->fs_fsmnt) - strsize);
|
|
|
|
|
|
|
|
if( mp->mnt_flag & MNT_ROOTFS) {
|
|
|
|
/*
|
|
|
|
* Root mount; update timestamp in mount structure.
|
|
|
|
* this will be used by the common root mount code
|
|
|
|
* to update the system clock.
|
|
|
|
*/
|
|
|
|
mp->mnt_time = fs->fs_time;
|
|
|
|
}
|
1997-02-10 02:22:35 +00:00
|
|
|
|
|
|
|
ump->um_savedmaxfilesize = fs->fs_maxfilesize; /* XXX */
|
|
|
|
maxfilesize = (u_int64_t)0x40000000 * fs->fs_bsize - 1; /* XXX */
|
|
|
|
if (fs->fs_maxfilesize > maxfilesize) /* XXX */
|
|
|
|
fs->fs_maxfilesize = maxfilesize; /* XXX */
|
2001-04-17 05:37:51 +00:00
|
|
|
if (bigcgs) {
|
|
|
|
if (fs->fs_sparecon[0] <= 0)
|
|
|
|
fs->fs_sparecon[0] = fs->fs_cgsize;
|
|
|
|
fs->fs_cgsize = fs->fs_bsize;
|
|
|
|
}
|
1997-02-10 02:22:35 +00:00
|
|
|
if (ronly == 0) {
|
1998-03-08 09:59:44 +00:00
|
|
|
if ((fs->fs_flags & FS_DOSOFTDEP) &&
|
|
|
|
(error = softdep_mount(devvp, mp, fs, cred)) != 0) {
|
2001-01-15 18:30:40 +00:00
|
|
|
free(fs->fs_csp, M_UFSMNT);
|
1998-03-08 09:59:44 +00:00
|
|
|
goto out;
|
|
|
|
}
|
2000-07-11 22:07:57 +00:00
|
|
|
if (fs->fs_snapinum[0] != 0)
|
|
|
|
ffs_snapshot_mount(mp);
|
2000-01-10 00:24:24 +00:00
|
|
|
fs->fs_fmod = 1;
|
1997-02-10 02:22:35 +00:00
|
|
|
fs->fs_clean = 0;
|
|
|
|
(void) ffs_sbupdate(ump, MNT_WAIT);
|
|
|
|
}
|
2001-03-19 04:35:40 +00:00
|
|
|
#ifdef UFS_EXTATTR
|
|
|
|
#ifdef UFS_EXTATTR_AUTOSTART
|
2000-10-04 04:44:51 +00:00
|
|
|
/*
|
|
|
|
*
|
o Implement "options FFS_EXTATTR_AUTOSTART", which depends on
"options FFS_EXTATTR". When extended attribute auto-starting
is enabled, FFS will scan the .attribute directory off of the
root of each file system, as it is mounted. If .attribute
exists, EA support will be started for the file system. If
there are files in the directory, FFS will attempt to start
them as attribute backing files for attributes baring the same
name. All attributes are started before access to the file
system is permitted, so this permits race-free enabling of
attributes. For attributes backing support for security
features, such as ACLs, MAC, Capabilities, this is vital, as
it prevents the file system attributes from getting out of
sync as a result of file system operations between mount-time
and the enabling of the extended attribute. The userland
extattrctl tool will still function exactly as previously.
Files must be placed directly in .attribute, which must be
directly off of the file system root: symbolic links are
not permitted. FFS_EXTATTR will continue to be able
to function without FFS_EXTATTR_AUTOSTART for sites that do not
want/require auto-starting. If you're using the UFS_ACL code
available from www.TrustedBSD.org, using FFS_EXTATTR_AUTOSTART
is recommended.
o This support is implemented by adding an invocation of
ufs_extattr_autostart() to ffs_mountfs(). In addition,
several new supporting calls are introduced in
ufs_extattr.c:
ufs_extattr_autostart(): start EAs on the specified mount
ufs_extattr_lookup(): given a directory and filename,
return the vnode for the file.
ufs_extattr_enable_with_open(): invoke ufs_extattr_enable()
after doing the equililent of vn_open()
on the passed file.
ufs_extattr_iterate_directory(): iterate over a directory,
invoking ufs_extattr_lookup() and
ufs_extattr_enable_with_open() on each
entry.
o This feature is not widely tested, and therefore may contain
bugs, caution is advised. Several changes are in the pipeline
for this feature, including breaking out of EA namespaces into
subdirectories of .attribute (this is waiting on the updated
EA API), as well as a per-filesystem flag indicating whether
or not EAs should be auto-started. This is required because
administrators may not want .attribute auto-started on all
file systems, especially if non-administrators have write access
to the root of a file system.
Obtained from: TrustedBSD Project
2001-03-14 05:32:31 +00:00
|
|
|
* Auto-starting does the following:
|
2000-10-04 04:44:51 +00:00
|
|
|
* - check for /.attribute in the fs, and extattr_start if so
|
|
|
|
* - for each file in .attribute, enable that file with
|
|
|
|
* an attribute of the same name.
|
|
|
|
* Not clear how to report errors -- probably eat them.
|
|
|
|
* This would all happen while the file system was busy/not
|
|
|
|
* available, so would effectively be "atomic".
|
|
|
|
*/
|
2001-09-12 08:38:13 +00:00
|
|
|
(void) ufs_extattr_autostart(mp, td);
|
2001-03-19 04:35:40 +00:00
|
|
|
#endif /* !UFS_EXTATTR_AUTOSTART */
|
|
|
|
#endif /* !UFS_EXTATTR */
|
1994-05-24 10:09:53 +00:00
|
|
|
return (0);
|
|
|
|
out:
|
2000-10-09 17:31:39 +00:00
|
|
|
devvp->v_rdev->si_mountpoint = NULL;
|
1994-05-24 10:09:53 +00:00
|
|
|
if (bp)
|
|
|
|
brelse(bp);
|
2001-09-12 08:38:13 +00:00
|
|
|
(void)VOP_CLOSE(devvp, ronly ? FREAD : FREAD|FWRITE, cred, td);
|
1994-05-24 10:09:53 +00:00
|
|
|
if (ump) {
|
|
|
|
free(ump->um_fs, M_UFSMNT);
|
|
|
|
free(ump, M_UFSMNT);
|
|
|
|
mp->mnt_data = (qaddr_t)0;
|
|
|
|
}
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Sanity checks for old file systems.
|
|
|
|
*
|
|
|
|
* XXX - goes away some day.
|
|
|
|
*/
|
1995-12-17 21:14:36 +00:00
|
|
|
static int
|
1994-05-24 10:09:53 +00:00
|
|
|
ffs_oldfscompat(fs)
|
|
|
|
struct fs *fs;
|
|
|
|
{
|
|
|
|
|
|
|
|
fs->fs_npsect = max(fs->fs_npsect, fs->fs_nsect); /* XXX */
|
|
|
|
fs->fs_interleave = max(fs->fs_interleave, 1); /* XXX */
|
|
|
|
if (fs->fs_postblformat == FS_42POSTBLFMT) /* XXX */
|
|
|
|
fs->fs_nrpos = 8; /* XXX */
|
|
|
|
if (fs->fs_inodefmt < FS_44INODEFMT) { /* XXX */
|
1995-11-20 12:25:37 +00:00
|
|
|
#if 0
|
|
|
|
int i; /* XXX */
|
1997-02-10 02:22:35 +00:00
|
|
|
u_int64_t sizepb = fs->fs_bsize; /* XXX */
|
|
|
|
/* XXX */
|
1994-05-24 10:09:53 +00:00
|
|
|
fs->fs_maxfilesize = fs->fs_bsize * NDADDR - 1; /* XXX */
|
|
|
|
for (i = 0; i < NIADDR; i++) { /* XXX */
|
|
|
|
sizepb *= NINDIR(fs); /* XXX */
|
|
|
|
fs->fs_maxfilesize += sizepb; /* XXX */
|
|
|
|
} /* XXX */
|
1994-10-22 02:27:35 +00:00
|
|
|
#endif
|
1995-12-11 04:58:34 +00:00
|
|
|
fs->fs_maxfilesize = (u_quad_t) 1LL << 39;
|
1994-05-24 10:09:53 +00:00
|
|
|
fs->fs_qbmask = ~fs->fs_bmask; /* XXX */
|
|
|
|
fs->fs_qfmask = ~fs->fs_fmask; /* XXX */
|
|
|
|
} /* XXX */
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* unmount system call
|
|
|
|
*/
|
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
ffs_unmount(mp, mntflags, td)
|
1994-05-24 10:09:53 +00:00
|
|
|
struct mount *mp;
|
|
|
|
int mntflags;
|
2001-09-12 08:38:13 +00:00
|
|
|
struct thread *td;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2000-10-06 15:31:28 +00:00
|
|
|
register struct ufsmount *ump = VFSTOUFS(mp);
|
1994-05-24 10:09:53 +00:00
|
|
|
register struct fs *fs;
|
1997-02-10 02:22:35 +00:00
|
|
|
int error, flags;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
|
|
|
flags = 0;
|
|
|
|
if (mntflags & MNT_FORCE) {
|
|
|
|
flags |= FORCECLOSE;
|
|
|
|
}
|
2001-03-19 04:35:40 +00:00
|
|
|
#ifdef UFS_EXTATTR
|
2001-09-12 08:38:13 +00:00
|
|
|
if ((error = ufs_extattr_stop(mp, td))) {
|
2000-06-04 04:50:36 +00:00
|
|
|
if (error != EOPNOTSUPP)
|
|
|
|
printf("ffs_unmount: ufs_extattr_stop returned %d\n",
|
|
|
|
error);
|
2001-09-01 20:11:05 +00:00
|
|
|
} else {
|
|
|
|
ufs_extattr_uepm_destroy(&ump->um_extattr);
|
|
|
|
}
|
Introduce extended attribute support for FFS, allowing arbitrary
(name, value) pairs to be associated with inodes. This support is
used for ACLs, MAC labels, and Capabilities in the TrustedBSD
security extensions, which are currently under development.
In this implementation, attributes are backed to data vnodes in the
style of the quota support in FFS. Support for FFS extended
attributes may be enabled using the FFS_EXTATTR kernel option
(disabled by default). Userland utilities and man pages will be
committed in the next batch. VFS interfaces and man pages have
been in the repo since 4.0-RELEASE and are unchanged.
o ufs/ufs/extattr.h: UFS-specific extattr defines
o ufs/ufs/ufs_extattr.c: bulk of support routines
o ufs/{ufs,ffs,mfs}/*.[ch]: hooks and extattr.h includes
o contrib/softupdates/ffs_softdep.c: extattr.h includes
o conf/options, conf/files, i386/conf/LINT: added FFS_EXTATTR
o coda/coda_vfsops.c: XXX required extattr.h due to ufsmount.h
(This should not be the case, and will be fixed in a future commit)
Currently attributes are not supported in MFS. This will be fixed.
Reviewed by: adrian, bp, freebsd-fs, other unthanked souls
Obtained from: TrustedBSD Project
2000-04-15 03:34:27 +00:00
|
|
|
#endif
|
1998-03-08 09:59:44 +00:00
|
|
|
if (mp->mnt_flag & MNT_SOFTDEP) {
|
2001-09-12 08:38:13 +00:00
|
|
|
if ((error = softdep_flushfiles(mp, flags, td)) != 0)
|
1998-03-08 09:59:44 +00:00
|
|
|
return (error);
|
|
|
|
} else {
|
2001-09-12 08:38:13 +00:00
|
|
|
if ((error = ffs_flushfiles(mp, flags, td)) != 0)
|
1998-03-08 09:59:44 +00:00
|
|
|
return (error);
|
|
|
|
}
|
1994-05-24 10:09:53 +00:00
|
|
|
fs = ump->um_fs;
|
2001-04-17 05:37:51 +00:00
|
|
|
if (bigcgs) {
|
|
|
|
fs->fs_cgsize = fs->fs_sparecon[0];
|
|
|
|
fs->fs_sparecon[0] = 0;
|
|
|
|
}
|
2001-05-08 07:42:20 +00:00
|
|
|
if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) {
|
|
|
|
printf("%s: unmount pending error: blocks %d files %d\n",
|
|
|
|
fs->fs_fsmnt, fs->fs_pendingblocks, fs->fs_pendinginodes);
|
|
|
|
fs->fs_pendingblocks = 0;
|
|
|
|
fs->fs_pendinginodes = 0;
|
|
|
|
}
|
1997-02-10 02:22:35 +00:00
|
|
|
if (fs->fs_ronly == 0) {
|
2001-04-14 05:26:28 +00:00
|
|
|
fs->fs_clean = fs->fs_flags & (FS_UNCLEAN|FS_NEEDSFSCK) ? 0 : 1;
|
1997-02-10 02:22:35 +00:00
|
|
|
error = ffs_sbupdate(ump, MNT_WAIT);
|
|
|
|
if (error) {
|
|
|
|
fs->fs_clean = 0;
|
|
|
|
return (error);
|
|
|
|
}
|
1994-08-20 16:03:26 +00:00
|
|
|
}
|
2000-10-09 17:31:39 +00:00
|
|
|
ump->um_devvp->v_rdev->si_mountpoint = NULL;
|
1996-08-21 21:56:23 +00:00
|
|
|
|
2001-09-12 08:38:13 +00:00
|
|
|
vinvalbuf(ump->um_devvp, V_SAVE, NOCRED, td, 0, 0);
|
1997-02-10 02:22:35 +00:00
|
|
|
error = VOP_CLOSE(ump->um_devvp, fs->fs_ronly ? FREAD : FREAD|FWRITE,
|
2001-09-12 08:38:13 +00:00
|
|
|
NOCRED, td);
|
1996-08-21 21:56:23 +00:00
|
|
|
|
|
|
|
vrele(ump->um_devvp);
|
|
|
|
|
2001-01-15 18:30:40 +00:00
|
|
|
free(fs->fs_csp, M_UFSMNT);
|
1994-05-24 10:09:53 +00:00
|
|
|
free(fs, M_UFSMNT);
|
|
|
|
free(ump, M_UFSMNT);
|
|
|
|
mp->mnt_data = (qaddr_t)0;
|
1997-03-18 19:50:12 +00:00
|
|
|
mp->mnt_flag &= ~MNT_LOCAL;
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Flush out all the files in a filesystem.
|
|
|
|
*/
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
ffs_flushfiles(mp, flags, td)
|
1994-05-24 10:09:53 +00:00
|
|
|
register struct mount *mp;
|
|
|
|
int flags;
|
2001-09-12 08:38:13 +00:00
|
|
|
struct thread *td;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
|
|
|
register struct ufsmount *ump;
|
1994-10-08 06:20:06 +00:00
|
|
|
int error;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
|
|
|
ump = VFSTOUFS(mp);
|
|
|
|
#ifdef QUOTA
|
|
|
|
if (mp->mnt_flag & MNT_QUOTA) {
|
1994-10-10 01:04:55 +00:00
|
|
|
int i;
|
2001-05-16 18:04:37 +00:00
|
|
|
error = vflush(mp, 0, SKIPSYSTEM|flags);
|
1994-10-10 01:04:55 +00:00
|
|
|
if (error)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
for (i = 0; i < MAXQUOTAS; i++) {
|
|
|
|
if (ump->um_quotas[i] == NULLVP)
|
|
|
|
continue;
|
2001-09-12 08:38:13 +00:00
|
|
|
quotaoff(td, mp, i);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
/*
|
|
|
|
* Here we fall through to vflush again to ensure
|
|
|
|
* that we have gotten rid of all the system vnodes.
|
|
|
|
*/
|
|
|
|
}
|
|
|
|
#endif
|
2000-07-11 22:07:57 +00:00
|
|
|
if (ump->um_devvp->v_flag & VCOPYONWRITE) {
|
2001-05-16 18:04:37 +00:00
|
|
|
if ((error = vflush(mp, 0, SKIPSYSTEM | flags)) != 0)
|
2000-07-11 22:07:57 +00:00
|
|
|
return (error);
|
|
|
|
ffs_snapshot_unmount(mp);
|
|
|
|
/*
|
|
|
|
* Here we fall through to vflush again to ensure
|
|
|
|
* that we have gotten rid of all the system vnodes.
|
|
|
|
*/
|
|
|
|
}
|
1998-03-08 09:59:44 +00:00
|
|
|
/*
|
|
|
|
* Flush all the files.
|
|
|
|
*/
|
2001-05-16 18:04:37 +00:00
|
|
|
if ((error = vflush(mp, 0, flags)) != 0)
|
1998-03-08 09:59:44 +00:00
|
|
|
return (error);
|
|
|
|
/*
|
|
|
|
* Flush filesystem metadata.
|
|
|
|
*/
|
2001-09-12 08:38:13 +00:00
|
|
|
vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY, td);
|
2002-02-27 18:32:23 +00:00
|
|
|
error = VOP_FSYNC(ump->um_devvp, td->td_ucred, MNT_WAIT, td);
|
2001-09-12 08:38:13 +00:00
|
|
|
VOP_UNLOCK(ump->um_devvp, 0, td);
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Get file system statistics.
|
|
|
|
*/
|
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
ffs_statfs(mp, sbp, td)
|
1994-05-24 10:09:53 +00:00
|
|
|
struct mount *mp;
|
|
|
|
register struct statfs *sbp;
|
2001-09-12 08:38:13 +00:00
|
|
|
struct thread *td;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
|
|
|
register struct ufsmount *ump;
|
|
|
|
register struct fs *fs;
|
|
|
|
|
|
|
|
ump = VFSTOUFS(mp);
|
|
|
|
fs = ump->um_fs;
|
|
|
|
if (fs->fs_magic != FS_MAGIC)
|
|
|
|
panic("ffs_statfs");
|
|
|
|
sbp->f_bsize = fs->fs_fsize;
|
|
|
|
sbp->f_iosize = fs->fs_bsize;
|
|
|
|
sbp->f_blocks = fs->fs_dsize;
|
|
|
|
sbp->f_bfree = fs->fs_cstotal.cs_nbfree * fs->fs_frag +
|
2001-05-08 07:42:20 +00:00
|
|
|
fs->fs_cstotal.cs_nffree + dbtofsb(fs, fs->fs_pendingblocks);
|
|
|
|
sbp->f_bavail = freespace(fs, fs->fs_minfree) +
|
|
|
|
dbtofsb(fs, fs->fs_pendingblocks);
|
1994-05-24 10:09:53 +00:00
|
|
|
sbp->f_files = fs->fs_ncg * fs->fs_ipg - ROOTINO;
|
2001-05-08 07:42:20 +00:00
|
|
|
sbp->f_ffree = fs->fs_cstotal.cs_nifree + fs->fs_pendinginodes;
|
1994-05-24 10:09:53 +00:00
|
|
|
if (sbp != &mp->mnt_stat) {
|
1997-02-10 02:22:35 +00:00
|
|
|
sbp->f_type = mp->mnt_vfc->vfc_typenum;
|
1994-05-24 10:09:53 +00:00
|
|
|
bcopy((caddr_t)mp->mnt_stat.f_mntonname,
|
|
|
|
(caddr_t)&sbp->f_mntonname[0], MNAMELEN);
|
|
|
|
bcopy((caddr_t)mp->mnt_stat.f_mntfromname,
|
|
|
|
(caddr_t)&sbp->f_mntfromname[0], MNAMELEN);
|
|
|
|
}
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Go through the disk queues to initiate sandbagged IO;
|
|
|
|
* go through the inodes to write those that have been modified;
|
|
|
|
* initiate the writing of the super block if it has been modified.
|
|
|
|
*
|
|
|
|
* Note: we are always called with the filesystem marked `MPBUSY'.
|
|
|
|
*/
|
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
ffs_sync(mp, waitfor, cred, td)
|
1994-05-24 10:09:53 +00:00
|
|
|
struct mount *mp;
|
|
|
|
int waitfor;
|
|
|
|
struct ucred *cred;
|
2001-09-12 08:38:13 +00:00
|
|
|
struct thread *td;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2001-04-25 08:11:18 +00:00
|
|
|
struct vnode *nvp, *vp, *devvp;
|
1997-02-10 02:22:35 +00:00
|
|
|
struct inode *ip;
|
|
|
|
struct ufsmount *ump = VFSTOUFS(mp);
|
|
|
|
struct fs *fs;
|
2000-07-24 05:28:33 +00:00
|
|
|
int error, count, wait, lockreq, allerror = 0;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
|
|
|
fs = ump->um_fs;
|
1997-02-10 02:22:35 +00:00
|
|
|
if (fs->fs_fmod != 0 && fs->fs_ronly != 0) { /* XXX */
|
|
|
|
printf("fs = %s\n", fs->fs_fsmnt);
|
1997-03-09 06:00:44 +00:00
|
|
|
panic("ffs_sync: rofs mod");
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
/*
|
|
|
|
* Write back each (modified) inode.
|
|
|
|
*/
|
2000-07-24 05:28:33 +00:00
|
|
|
wait = 0;
|
2001-10-26 00:08:05 +00:00
|
|
|
lockreq = LK_EXCLUSIVE | LK_NOWAIT;
|
2000-07-24 05:28:33 +00:00
|
|
|
if (waitfor == MNT_WAIT) {
|
|
|
|
wait = 1;
|
2001-10-26 00:08:05 +00:00
|
|
|
lockreq = LK_EXCLUSIVE;
|
2000-07-24 05:28:33 +00:00
|
|
|
}
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_lock(&mntvnode_mtx);
|
1994-05-24 10:09:53 +00:00
|
|
|
loop:
|
2001-10-23 01:21:29 +00:00
|
|
|
for (vp = TAILQ_FIRST(&mp->mnt_nvnodelist); vp != NULL; vp = nvp) {
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* If the vnode that we are about to sync is no longer
|
|
|
|
* associated with this mount point, start over.
|
|
|
|
*/
|
|
|
|
if (vp->v_mount != mp)
|
|
|
|
goto loop;
|
2001-06-28 04:12:56 +00:00
|
|
|
|
2001-10-26 00:08:05 +00:00
|
|
|
/*
|
|
|
|
* Depend on the mntvnode_slock to keep things stable enough
|
|
|
|
* for a quick test. Since there might be hundreds of
|
|
|
|
* thousands of vnodes, we cannot afford even a subroutine
|
|
|
|
* call unless there's a good chance that we have work to do.
|
|
|
|
*/
|
|
|
|
nvp = TAILQ_NEXT(vp, v_nmntvnodes);
|
1994-05-24 10:09:53 +00:00
|
|
|
ip = VTOI(vp);
|
2000-01-10 00:24:24 +00:00
|
|
|
if (vp->v_type == VNON || ((ip->i_flag &
|
2001-10-26 00:08:05 +00:00
|
|
|
(IN_ACCESS | IN_CHANGE | IN_MODIFIED | IN_UPDATE)) == 0 &&
|
|
|
|
TAILQ_EMPTY(&vp->v_dirtyblkhd))) {
|
1994-05-24 10:09:53 +00:00
|
|
|
continue;
|
1997-02-10 02:22:35 +00:00
|
|
|
}
|
1995-04-11 04:23:47 +00:00
|
|
|
if (vp->v_type != VCHR) {
|
2001-10-26 00:08:05 +00:00
|
|
|
mtx_unlock(&mntvnode_mtx);
|
2001-09-12 08:38:13 +00:00
|
|
|
if ((error = vget(vp, lockreq, td)) != 0) {
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_lock(&mntvnode_mtx);
|
1997-02-10 02:22:35 +00:00
|
|
|
if (error == ENOENT)
|
|
|
|
goto loop;
|
2001-10-26 00:08:05 +00:00
|
|
|
} else {
|
|
|
|
if ((error = VOP_FSYNC(vp, cred, waitfor, td)) != 0)
|
|
|
|
allerror = error;
|
|
|
|
VOP_UNLOCK(vp, 0, td);
|
|
|
|
vrele(vp);
|
|
|
|
mtx_lock(&mntvnode_mtx);
|
1997-02-10 02:22:35 +00:00
|
|
|
}
|
1995-04-11 04:23:47 +00:00
|
|
|
} else {
|
2001-10-26 00:08:05 +00:00
|
|
|
mtx_unlock(&mntvnode_mtx);
|
2000-07-24 05:28:33 +00:00
|
|
|
UFS_UPDATE(vp, wait);
|
2001-10-26 00:08:05 +00:00
|
|
|
mtx_lock(&mntvnode_mtx);
|
1995-04-11 04:23:47 +00:00
|
|
|
}
|
2001-10-26 00:08:05 +00:00
|
|
|
if (TAILQ_NEXT(vp, v_nmntvnodes) != nvp)
|
|
|
|
goto loop;
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_unlock(&mntvnode_mtx);
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* Force stale file system control information to be flushed.
|
|
|
|
*/
|
2000-07-24 05:28:33 +00:00
|
|
|
if (waitfor == MNT_WAIT) {
|
2001-09-12 08:38:13 +00:00
|
|
|
if ((error = softdep_flushworklist(ump->um_mountp, &count, td)))
|
2000-07-24 05:28:33 +00:00
|
|
|
allerror = error;
|
|
|
|
/* Flushed work items may create new vnodes to clean */
|
|
|
|
if (count) {
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_lock(&mntvnode_mtx);
|
2000-07-24 05:28:33 +00:00
|
|
|
goto loop;
|
|
|
|
}
|
|
|
|
}
|
2001-03-07 07:09:55 +00:00
|
|
|
#ifdef QUOTA
|
|
|
|
qsync(mp);
|
|
|
|
#endif
|
2001-04-25 08:11:18 +00:00
|
|
|
devvp = ump->um_devvp;
|
|
|
|
mtx_lock(&devvp->v_interlock);
|
|
|
|
if (waitfor != MNT_LAZY &&
|
|
|
|
(devvp->v_numoutput > 0 || TAILQ_FIRST(&devvp->v_dirtyblkhd))) {
|
|
|
|
mtx_unlock(&devvp->v_interlock);
|
2001-09-12 08:38:13 +00:00
|
|
|
vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, td);
|
|
|
|
if ((error = VOP_FSYNC(devvp, cred, waitfor, td)) != 0)
|
1998-03-08 09:59:44 +00:00
|
|
|
allerror = error;
|
2001-09-12 08:38:13 +00:00
|
|
|
VOP_UNLOCK(devvp, 0, td);
|
2001-04-25 08:11:18 +00:00
|
|
|
if (waitfor == MNT_WAIT) {
|
|
|
|
mtx_lock(&mntvnode_mtx);
|
|
|
|
goto loop;
|
|
|
|
}
|
|
|
|
} else
|
|
|
|
mtx_unlock(&devvp->v_interlock);
|
1997-02-10 02:22:35 +00:00
|
|
|
/*
|
|
|
|
* Write back modified superblock.
|
|
|
|
*/
|
1998-03-08 09:59:44 +00:00
|
|
|
if (fs->fs_fmod != 0 && (error = ffs_sbupdate(ump, waitfor)) != 0)
|
|
|
|
allerror = error;
|
1994-05-24 10:09:53 +00:00
|
|
|
return (allerror);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Look up a FFS dinode number to find its incore vnode, otherwise read it
|
|
|
|
* in from disk. If it is in core, wait for the lock bit to clear, then
|
|
|
|
* return the inode locked. Detection and handling of mount points must be
|
|
|
|
* done by the calling routine.
|
|
|
|
*/
|
1995-12-17 21:14:36 +00:00
|
|
|
static int ffs_inode_hash_lock;
|
2000-12-13 10:04:01 +00:00
|
|
|
/*
|
|
|
|
* ffs_inode_hash_lock is a variable to manage mutual exclusion
|
|
|
|
* of vnode allocation and intertion to the hash, especially to
|
|
|
|
* avoid holding more than one vnodes for the same inode in the
|
|
|
|
* hash table. ffs_inode_hash_lock must hence be tested-and-set
|
|
|
|
* or cleared atomically, accomplished by ffs_inode_hash_mtx.
|
|
|
|
*
|
|
|
|
* As vnode allocation may block during MALLOC() and zone
|
|
|
|
* allocation, we should also do msleep() to give away the CPU
|
|
|
|
* if anyone else is allocating a vnode. lockmgr is not suitable
|
|
|
|
* here because someone else may insert to the hash table the
|
|
|
|
* vnode we are trying to allocate during our sleep, in which
|
|
|
|
* case the hash table needs to be examined once again after
|
|
|
|
* waking up.
|
|
|
|
*/
|
|
|
|
static struct mtx ffs_inode_hash_mtx;
|
1995-07-21 03:52:40 +00:00
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
int
|
|
|
|
ffs_vget(mp, ino, vpp)
|
|
|
|
struct mount *mp;
|
|
|
|
ino_t ino;
|
|
|
|
struct vnode **vpp;
|
|
|
|
{
|
1997-02-10 02:22:35 +00:00
|
|
|
struct fs *fs;
|
|
|
|
struct inode *ip;
|
1994-05-24 10:09:53 +00:00
|
|
|
struct ufsmount *ump;
|
|
|
|
struct buf *bp;
|
|
|
|
struct vnode *vp;
|
|
|
|
dev_t dev;
|
2000-12-13 10:04:01 +00:00
|
|
|
int error, want_wakeup;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
|
|
|
ump = VFSTOUFS(mp);
|
|
|
|
dev = ump->um_dev;
|
1995-07-21 16:20:20 +00:00
|
|
|
restart:
|
This mega-commit is meant to fix numerous interrelated problems. There
has been some bitrot and incorrect assumptions in the vfs_bio code. These
problems have manifest themselves worse on NFS type filesystems, but can
still affect local filesystems under certain circumstances. Most of
the problems have involved mmap consistancy, and as a side-effect broke
the vfs.ioopt code. This code might have been committed seperately, but
almost everything is interrelated.
1) Allow (pmap_object_init_pt) prefaulting of buffer-busy pages that
are fully valid.
2) Rather than deactivating erroneously read initial (header) pages in
kern_exec, we now free them.
3) Fix the rundown of non-VMIO buffers that are in an inconsistent
(missing vp) state.
4) Fix the disassociation of pages from buffers in brelse. The previous
code had rotted and was faulty in a couple of important circumstances.
5) Remove a gratuitious buffer wakeup in vfs_vmio_release.
6) Remove a crufty and currently unused cluster mechanism for VBLK
files in vfs_bio_awrite. When the code is functional, I'll add back
a cleaner version.
7) The page busy count wakeups assocated with the buffer cache usage were
incorrectly cleaned up in a previous commit by me. Revert to the
original, correct version, but with a cleaner implementation.
8) The cluster read code now tries to keep data associated with buffers
more aggressively (without breaking the heuristics) when it is presumed
that the read data (buffers) will be soon needed.
9) Change to filesystem lockmgr locks so that they use LK_NOPAUSE. The
delay loop waiting is not useful for filesystem locks, due to the
length of the time intervals.
10) Correct and clean-up spec_getpages.
11) Implement a fully functional nfs_getpages, nfs_putpages.
12) Fix nfs_write so that modifications are coherent with the NFS data on
the server disk (at least as well as NFS seems to allow.)
13) Properly support MS_INVALIDATE on NFS.
14) Properly pass down MS_INVALIDATE to lower levels of the VM code from
vm_map_clean.
15) Better support the notion of pages being busy but valid, so that
fewer in-transit waits occur. (use p->busy more for pageouts instead
of PG_BUSY.) Since the page is fully valid, it is still usable for
reads.
16) It is possible (in error) for cached pages to be busy. Make the
page allocation code handle that case correctly. (It should probably
be a printf or panic, but I want the system to handle coding errors
robustly. I'll probably add a printf.)
17) Correct the design and usage of vm_page_sleep. It didn't handle
consistancy problems very well, so make the design a little less
lofty. After vm_page_sleep, if it ever blocked, it is still important
to relookup the page (if the object generation count changed), and
verify it's status (always.)
18) In vm_pageout.c, vm_pageout_clean had rotted, so clean that up.
19) Push the page busy for writes and VM_PROT_READ into vm_pageout_flush.
20) Fix vm_pager_put_pages and it's descendents to support an int flag
instead of a boolean, so that we can pass down the invalidate bit.
1998-03-07 21:37:31 +00:00
|
|
|
if ((*vpp = ufs_ihashget(dev, ino)) != NULL) {
|
1994-05-24 10:09:53 +00:00
|
|
|
return (0);
|
This mega-commit is meant to fix numerous interrelated problems. There
has been some bitrot and incorrect assumptions in the vfs_bio code. These
problems have manifest themselves worse on NFS type filesystems, but can
still affect local filesystems under certain circumstances. Most of
the problems have involved mmap consistancy, and as a side-effect broke
the vfs.ioopt code. This code might have been committed seperately, but
almost everything is interrelated.
1) Allow (pmap_object_init_pt) prefaulting of buffer-busy pages that
are fully valid.
2) Rather than deactivating erroneously read initial (header) pages in
kern_exec, we now free them.
3) Fix the rundown of non-VMIO buffers that are in an inconsistent
(missing vp) state.
4) Fix the disassociation of pages from buffers in brelse. The previous
code had rotted and was faulty in a couple of important circumstances.
5) Remove a gratuitious buffer wakeup in vfs_vmio_release.
6) Remove a crufty and currently unused cluster mechanism for VBLK
files in vfs_bio_awrite. When the code is functional, I'll add back
a cleaner version.
7) The page busy count wakeups assocated with the buffer cache usage were
incorrectly cleaned up in a previous commit by me. Revert to the
original, correct version, but with a cleaner implementation.
8) The cluster read code now tries to keep data associated with buffers
more aggressively (without breaking the heuristics) when it is presumed
that the read data (buffers) will be soon needed.
9) Change to filesystem lockmgr locks so that they use LK_NOPAUSE. The
delay loop waiting is not useful for filesystem locks, due to the
length of the time intervals.
10) Correct and clean-up spec_getpages.
11) Implement a fully functional nfs_getpages, nfs_putpages.
12) Fix nfs_write so that modifications are coherent with the NFS data on
the server disk (at least as well as NFS seems to allow.)
13) Properly support MS_INVALIDATE on NFS.
14) Properly pass down MS_INVALIDATE to lower levels of the VM code from
vm_map_clean.
15) Better support the notion of pages being busy but valid, so that
fewer in-transit waits occur. (use p->busy more for pageouts instead
of PG_BUSY.) Since the page is fully valid, it is still usable for
reads.
16) It is possible (in error) for cached pages to be busy. Make the
page allocation code handle that case correctly. (It should probably
be a printf or panic, but I want the system to handle coding errors
robustly. I'll probably add a printf.)
17) Correct the design and usage of vm_page_sleep. It didn't handle
consistancy problems very well, so make the design a little less
lofty. After vm_page_sleep, if it ever blocked, it is still important
to relookup the page (if the object generation count changed), and
verify it's status (always.)
18) In vm_pageout.c, vm_pageout_clean had rotted, so clean that up.
19) Push the page busy for writes and VM_PROT_READ into vm_pageout_flush.
20) Fix vm_pager_put_pages and it's descendents to support an int flag
instead of a boolean, so that we can pass down the invalidate bit.
1998-03-07 21:37:31 +00:00
|
|
|
}
|
1994-05-24 10:09:53 +00:00
|
|
|
|
1995-07-21 03:52:40 +00:00
|
|
|
/*
|
1995-07-21 16:20:20 +00:00
|
|
|
* Lock out the creation of new entries in the FFS hash table in
|
|
|
|
* case getnewvnode() or MALLOC() blocks, otherwise a duplicate
|
1995-07-21 03:52:40 +00:00
|
|
|
* may occur!
|
|
|
|
*/
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_lock(&ffs_inode_hash_mtx);
|
1995-07-21 03:52:40 +00:00
|
|
|
if (ffs_inode_hash_lock) {
|
|
|
|
while (ffs_inode_hash_lock) {
|
|
|
|
ffs_inode_hash_lock = -1;
|
2000-12-13 10:04:01 +00:00
|
|
|
msleep(&ffs_inode_hash_lock, &ffs_inode_hash_mtx, PVM, "ffsvgt", 0);
|
1995-07-21 03:52:40 +00:00
|
|
|
}
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_unlock(&ffs_inode_hash_mtx);
|
1995-07-21 16:20:20 +00:00
|
|
|
goto restart;
|
1995-07-21 03:52:40 +00:00
|
|
|
}
|
|
|
|
ffs_inode_hash_lock = 1;
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_unlock(&ffs_inode_hash_mtx);
|
1995-07-21 03:52:40 +00:00
|
|
|
|
1996-06-12 03:37:57 +00:00
|
|
|
/*
|
|
|
|
* If this MALLOC() is performed after the getnewvnode()
|
|
|
|
* it might block, leaving a vnode with a NULL v_data to be
|
|
|
|
* found by ffs_sync() if a sync happens to fire right then,
|
|
|
|
* which will cause a panic because ffs_sync() blindly
|
|
|
|
* dereferences vp->v_data (as well it should).
|
|
|
|
*/
|
1997-10-10 18:17:00 +00:00
|
|
|
MALLOC(ip, struct inode *, sizeof(struct inode),
|
|
|
|
ump->um_malloctype, M_WAITOK);
|
1996-06-12 03:37:57 +00:00
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/* Allocate a new vnode/inode. */
|
1994-10-08 06:20:06 +00:00
|
|
|
error = getnewvnode(VT_UFS, mp, ffs_vnodeop_p, &vp);
|
|
|
|
if (error) {
|
2000-12-13 10:04:01 +00:00
|
|
|
/*
|
|
|
|
* Do not wake up processes while holding the mutex,
|
|
|
|
* otherwise the processes waken up immediately hit
|
|
|
|
* themselves into the mutex.
|
|
|
|
*/
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_lock(&ffs_inode_hash_mtx);
|
2000-12-13 10:04:01 +00:00
|
|
|
want_wakeup = ffs_inode_hash_lock < 0;
|
1995-07-21 03:52:40 +00:00
|
|
|
ffs_inode_hash_lock = 0;
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_unlock(&ffs_inode_hash_mtx);
|
2000-12-13 10:04:01 +00:00
|
|
|
if (want_wakeup)
|
|
|
|
wakeup(&ffs_inode_hash_lock);
|
1994-05-24 10:09:53 +00:00
|
|
|
*vpp = NULL;
|
1997-10-10 18:17:00 +00:00
|
|
|
FREE(ip, ump->um_malloctype);
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
bzero((caddr_t)ip, sizeof(struct inode));
|
2000-09-25 15:24:04 +00:00
|
|
|
/*
|
|
|
|
* FFS supports lock sharing in the stack of vnodes
|
|
|
|
*/
|
|
|
|
vp->v_vnlock = &vp->v_lock;
|
2001-12-20 22:42:27 +00:00
|
|
|
lockinit(vp->v_vnlock, PINOD, "inode", VLKTIMEOUT, LK_CANRECURSE);
|
1994-05-24 10:09:53 +00:00
|
|
|
vp->v_data = ip;
|
|
|
|
ip->i_vnode = vp;
|
|
|
|
ip->i_fs = fs = ump->um_fs;
|
|
|
|
ip->i_dev = dev;
|
|
|
|
ip->i_number = ino;
|
|
|
|
#ifdef QUOTA
|
1994-10-10 01:04:55 +00:00
|
|
|
{
|
1995-07-21 03:52:40 +00:00
|
|
|
int i;
|
|
|
|
for (i = 0; i < MAXQUOTAS; i++)
|
|
|
|
ip->i_dquot[i] = NODQUOT;
|
1994-10-10 01:04:55 +00:00
|
|
|
}
|
1994-05-24 10:09:53 +00:00
|
|
|
#endif
|
|
|
|
/*
|
|
|
|
* Put it onto its hash chain and lock it so that other requests for
|
|
|
|
* this inode will block if they arrive while we are sleeping waiting
|
|
|
|
* for old data structures to be purged or for the contents of the
|
|
|
|
* disk portion of this inode to be read.
|
|
|
|
*/
|
|
|
|
ufs_ihashins(ip);
|
|
|
|
|
2000-12-13 10:04:01 +00:00
|
|
|
/*
|
|
|
|
* Do not wake up processes while holding the mutex,
|
|
|
|
* otherwise the processes waken up immediately hit
|
|
|
|
* themselves into the mutex.
|
|
|
|
*/
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_lock(&ffs_inode_hash_mtx);
|
2000-12-13 10:04:01 +00:00
|
|
|
want_wakeup = ffs_inode_hash_lock < 0;
|
1995-07-21 03:52:40 +00:00
|
|
|
ffs_inode_hash_lock = 0;
|
Change and clean the mutex lock interface.
mtx_enter(lock, type) becomes:
mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks)
mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized)
similarily, for releasing a lock, we now have:
mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN.
We change the caller interface for the two different types of locks
because the semantics are entirely different for each case, and this
makes it explicitly clear and, at the same time, it rids us of the
extra `type' argument.
The enter->lock and exit->unlock change has been made with the idea
that we're "locking data" and not "entering locked code" in mind.
Further, remove all additional "flags" previously passed to the
lock acquire/release routines with the exception of two:
MTX_QUIET and MTX_NOSWITCH
The functionality of these flags is preserved and they can be passed
to the lock/unlock routines by calling the corresponding wrappers:
mtx_{lock, unlock}_flags(lock, flag(s)) and
mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN
locks, respectively.
Re-inline some lock acq/rel code; in the sleep lock case, we only
inline the _obtain_lock()s in order to ensure that the inlined code
fits into a cache line. In the spin lock case, we inline recursion and
actually only perform a function call if we need to spin. This change
has been made with the idea that we generally tend to avoid spin locks
and that also the spin locks that we do have and are heavily used
(i.e. sched_lock) do recurse, and therefore in an effort to reduce
function call overhead for some architectures (such as alpha), we
inline recursion for this case.
Create a new malloc type for the witness code and retire from using
the M_DEV type. The new type is called M_WITNESS and is only declared
if WITNESS is enabled.
Begin cleaning up some machdep/mutex.h code - specifically updated the
"optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN
and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently
need those.
Finally, caught up to the interface changes in all sys code.
Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
|
|
|
mtx_unlock(&ffs_inode_hash_mtx);
|
2000-12-13 10:04:01 +00:00
|
|
|
if (want_wakeup)
|
|
|
|
wakeup(&ffs_inode_hash_lock);
|
1995-07-21 03:52:40 +00:00
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/* Read in the disk contents for the inode, copy into the inode. */
|
1994-10-08 06:20:06 +00:00
|
|
|
error = bread(ump->um_devvp, fsbtodb(fs, ino_to_fsba(fs, ino)),
|
|
|
|
(int)fs->fs_bsize, NOCRED, &bp);
|
|
|
|
if (error) {
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* The inode does not contain anything useful, so it would
|
|
|
|
* be misleading to leave it on its hash chain. With mode
|
|
|
|
* still zero, it will be unlinked and returned to the free
|
|
|
|
* list by vput().
|
|
|
|
*/
|
|
|
|
brelse(bp);
|
1996-01-19 04:00:31 +00:00
|
|
|
vput(vp);
|
1994-05-24 10:09:53 +00:00
|
|
|
*vpp = NULL;
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
ip->i_din = *((struct dinode *)bp->b_data + ino_to_fsbo(fs, ino));
|
1998-03-08 09:59:44 +00:00
|
|
|
if (DOINGSOFTDEP(vp))
|
|
|
|
softdep_load_inodeblock(ip);
|
|
|
|
else
|
|
|
|
ip->i_effnlink = ip->i_nlink;
|
1996-01-19 04:00:31 +00:00
|
|
|
bqrelse(bp);
|
1994-05-24 10:09:53 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Initialize the vnode from the inode, check for aliases.
|
|
|
|
* Note that the underlying vnode may have changed.
|
|
|
|
*/
|
1996-02-25 20:12:36 +00:00
|
|
|
error = ufs_vinit(mp, ffs_specop_p, ffs_fifoop_p, &vp);
|
1994-10-08 06:20:06 +00:00
|
|
|
if (error) {
|
1994-05-24 10:09:53 +00:00
|
|
|
vput(vp);
|
|
|
|
*vpp = NULL;
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* Finish inode initialization now that aliasing has been resolved.
|
|
|
|
*/
|
|
|
|
ip->i_devvp = ump->um_devvp;
|
|
|
|
VREF(ip->i_devvp);
|
|
|
|
/*
|
|
|
|
* Set up a generation number for this inode if it does not
|
|
|
|
* already have one. This should only happen on old filesystems.
|
|
|
|
*/
|
|
|
|
if (ip->i_gen == 0) {
|
1997-03-23 20:08:22 +00:00
|
|
|
ip->i_gen = random() / 2 + 1;
|
1994-05-24 10:09:53 +00:00
|
|
|
if ((vp->v_mount->mnt_flag & MNT_RDONLY) == 0)
|
|
|
|
ip->i_flag |= IN_MODIFIED;
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* Ensure that uid and gid are correct. This is a temporary
|
|
|
|
* fix until fsck has been changed to do the update.
|
|
|
|
*/
|
|
|
|
if (fs->fs_inodefmt < FS_44INODEFMT) { /* XXX */
|
|
|
|
ip->i_uid = ip->i_din.di_ouid; /* XXX */
|
|
|
|
ip->i_gid = ip->i_din.di_ogid; /* XXX */
|
|
|
|
} /* XXX */
|
|
|
|
|
|
|
|
*vpp = vp;
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* File handle to vnode
|
|
|
|
*
|
|
|
|
* Have to be really careful about stale file handles:
|
|
|
|
* - check that the inode number is valid
|
|
|
|
* - call ffs_vget() to get the locked inode
|
|
|
|
* - check for an unallocated inode (i_mode == 0)
|
|
|
|
* - check that the given client host has export rights and return
|
|
|
|
* those rights via. exflagsp and credanonp
|
|
|
|
*/
|
|
|
|
int
|
1999-09-11 00:46:08 +00:00
|
|
|
ffs_fhtovp(mp, fhp, vpp)
|
1994-05-24 10:09:53 +00:00
|
|
|
register struct mount *mp;
|
|
|
|
struct fid *fhp;
|
|
|
|
struct vnode **vpp;
|
|
|
|
{
|
|
|
|
register struct ufid *ufhp;
|
|
|
|
struct fs *fs;
|
|
|
|
|
|
|
|
ufhp = (struct ufid *)fhp;
|
|
|
|
fs = VFSTOUFS(mp)->um_fs;
|
|
|
|
if (ufhp->ufid_ino < ROOTINO ||
|
|
|
|
ufhp->ufid_ino >= fs->fs_ncg * fs->fs_ipg)
|
|
|
|
return (ESTALE);
|
1999-09-11 00:46:08 +00:00
|
|
|
return (ufs_fhtovp(mp, ufhp, vpp));
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Vnode pointer to File handle
|
|
|
|
*/
|
|
|
|
/* ARGSUSED */
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
1994-05-24 10:09:53 +00:00
|
|
|
ffs_vptofh(vp, fhp)
|
|
|
|
struct vnode *vp;
|
|
|
|
struct fid *fhp;
|
|
|
|
{
|
|
|
|
register struct inode *ip;
|
|
|
|
register struct ufid *ufhp;
|
|
|
|
|
|
|
|
ip = VTOI(vp);
|
|
|
|
ufhp = (struct ufid *)fhp;
|
|
|
|
ufhp->ufid_len = sizeof(struct ufid);
|
|
|
|
ufhp->ufid_ino = ip->i_number;
|
|
|
|
ufhp->ufid_gen = ip->i_gen;
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
1997-02-10 02:22:35 +00:00
|
|
|
/*
|
|
|
|
* Initialize the filesystem; just use ufs_init.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
ffs_init(vfsp)
|
|
|
|
struct vfsconf *vfsp;
|
|
|
|
{
|
|
|
|
|
1998-03-08 09:59:44 +00:00
|
|
|
softdep_initialize();
|
2000-12-13 10:04:01 +00:00
|
|
|
mtx_init(&ffs_inode_hash_mtx, "ifsvgt", MTX_DEF);
|
1997-02-10 02:22:35 +00:00
|
|
|
return (ufs_init(vfsp));
|
|
|
|
}
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* Write a superblock and associated information back to disk.
|
|
|
|
*/
|
1995-12-17 21:14:36 +00:00
|
|
|
static int
|
1994-05-24 10:09:53 +00:00
|
|
|
ffs_sbupdate(mp, waitfor)
|
|
|
|
struct ufsmount *mp;
|
|
|
|
int waitfor;
|
|
|
|
{
|
1997-02-10 02:22:35 +00:00
|
|
|
register struct fs *dfs, *fs = mp->um_fs;
|
1994-05-24 10:09:53 +00:00
|
|
|
register struct buf *bp;
|
|
|
|
int blks;
|
2001-01-15 18:30:40 +00:00
|
|
|
void *space;
|
1997-02-10 02:22:35 +00:00
|
|
|
int i, size, error, allerror = 0;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
1997-02-10 02:22:35 +00:00
|
|
|
/*
|
|
|
|
* First write back the summary information.
|
|
|
|
*/
|
1994-05-24 10:09:53 +00:00
|
|
|
blks = howmany(fs->fs_cssize, fs->fs_fsize);
|
2001-01-15 18:30:40 +00:00
|
|
|
space = fs->fs_csp;
|
1994-05-24 10:09:53 +00:00
|
|
|
for (i = 0; i < blks; i += fs->fs_frag) {
|
|
|
|
size = fs->fs_bsize;
|
|
|
|
if (i + fs->fs_frag > blks)
|
|
|
|
size = (blks - i) * fs->fs_fsize;
|
|
|
|
bp = getblk(mp->um_devvp, fsbtodb(fs, fs->fs_csaddr + i),
|
|
|
|
size, 0, 0);
|
|
|
|
bcopy(space, bp->b_data, (u_int)size);
|
2001-01-15 18:30:40 +00:00
|
|
|
space = (char *)space + size;
|
1997-02-10 02:22:35 +00:00
|
|
|
if (waitfor != MNT_WAIT)
|
1994-05-24 10:09:53 +00:00
|
|
|
bawrite(bp);
|
1999-01-28 00:57:57 +00:00
|
|
|
else if ((error = bwrite(bp)) != 0)
|
1997-02-10 02:22:35 +00:00
|
|
|
allerror = error;
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
1997-02-10 02:22:35 +00:00
|
|
|
/*
|
|
|
|
* Now write back the superblock itself. If any errors occurred
|
|
|
|
* up to this point, then fail so that the superblock avoids
|
|
|
|
* being written out as clean.
|
|
|
|
*/
|
|
|
|
if (allerror)
|
|
|
|
return (allerror);
|
|
|
|
bp = getblk(mp->um_devvp, SBLOCK, (int)fs->fs_sbsize, 0, 0);
|
1998-03-08 09:59:44 +00:00
|
|
|
fs->fs_fmod = 0;
|
1998-03-30 09:56:58 +00:00
|
|
|
fs->fs_time = time_second;
|
1997-02-10 02:22:35 +00:00
|
|
|
bcopy((caddr_t)fs, bp->b_data, (u_int)fs->fs_sbsize);
|
|
|
|
/* Restore compatibility to old file systems. XXX */
|
|
|
|
dfs = (struct fs *)bp->b_data; /* XXX */
|
|
|
|
if (fs->fs_postblformat == FS_42POSTBLFMT) /* XXX */
|
|
|
|
dfs->fs_nrpos = -1; /* XXX */
|
|
|
|
if (fs->fs_inodefmt < FS_44INODEFMT) { /* XXX */
|
|
|
|
int32_t *lp, tmp; /* XXX */
|
|
|
|
/* XXX */
|
|
|
|
lp = (int32_t *)&dfs->fs_qbmask; /* XXX */
|
|
|
|
tmp = lp[4]; /* XXX */
|
|
|
|
for (i = 4; i > 0; i--) /* XXX */
|
|
|
|
lp[i] = lp[i-1]; /* XXX */
|
|
|
|
lp[0] = tmp; /* XXX */
|
|
|
|
} /* XXX */
|
|
|
|
dfs->fs_maxfilesize = mp->um_savedmaxfilesize; /* XXX */
|
|
|
|
if (waitfor != MNT_WAIT)
|
|
|
|
bawrite(bp);
|
1999-01-28 00:57:57 +00:00
|
|
|
else if ((error = bwrite(bp)) != 0)
|
1997-02-10 02:22:35 +00:00
|
|
|
allerror = error;
|
|
|
|
return (allerror);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|