freebsd-skq/sys/msdosfs/msdosfs_vfsops.c

985 lines
26 KiB
C
Raw Normal View History

1999-08-28 01:08:13 +00:00
/* $FreeBSD$ */
/* $NetBSD: msdosfs_vfsops.c,v 1.51 1997/11/17 15:36:58 ws Exp $ */
1994-09-19 15:41:57 +00:00
/*-
* Copyright (C) 1994, 1995, 1997 Wolfgang Solfrank.
* Copyright (C) 1994, 1995, 1997 TooLs GmbH.
1994-09-19 15:41:57 +00:00
* All rights reserved.
* Original code by Paul Popelka (paulp@uts.amdahl.com) (see below).
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by TooLs GmbH.
* 4. The name of TooLs GmbH may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* Written by Paul Popelka (paulp@uts.amdahl.com)
1995-05-30 08:16:23 +00:00
*
1994-09-19 15:41:57 +00:00
* You can do anything you want with this software, just don't say you wrote
* it, and don't remove this notice.
1995-05-30 08:16:23 +00:00
*
1994-09-19 15:41:57 +00:00
* This software is provided "as is".
1995-05-30 08:16:23 +00:00
*
1994-09-19 15:41:57 +00:00
* The author supplies this software to be publicly redistributed on the
* understanding that the author is not responsible for the correct
* functioning of this software in any circumstances and is not liable for
* any damages caused by this software.
1995-05-30 08:16:23 +00:00
*
1994-09-19 15:41:57 +00:00
* October 1992
*/
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/conf.h>
1994-09-19 15:41:57 +00:00
#include <sys/namei.h>
#include <sys/proc.h>
#include <sys/kernel.h>
#include <sys/vnode.h>
#include <sys/mount.h>
#include <sys/bio.h>
1994-09-19 15:41:57 +00:00
#include <sys/buf.h>
#include <sys/fcntl.h>
1994-09-19 15:41:57 +00:00
#include <sys/malloc.h>
#include <sys/stat.h> /* defines ALLPERMS */
#include <sys/mutex.h>
1994-09-19 15:41:57 +00:00
#include <msdosfs/bpb.h>
#include <msdosfs/bootsect.h>
#include <msdosfs/direntry.h>
#include <msdosfs/denode.h>
#include <msdosfs/msdosfsmount.h>
#include <msdosfs/fat.h>
#define MSDOSFS_DFLTBSIZE 4096
#if 1 /*def PC98*/
/*
* XXX - The boot signature formatted by NEC PC-98 DOS looks like a
* garbage or a random value :-{
* If you want to use that broken-signatured media, define the
* following symbol even though PC/AT.
* (ex. mount PC-98 DOS formatted FD on PC/AT)
*/
#define MSDOSFS_NOCHECKSIG
#endif
MALLOC_DEFINE(M_MSDOSFSMNT, "MSDOSFS mount", "MSDOSFS mount structure");
static MALLOC_DEFINE(M_MSDOSFSFAT, "MSDOSFS FAT", "MSDOSFS file allocation table");
static int update_mp __P((struct mount *mp, struct msdosfs_args *argp));
static int mountmsdosfs __P((struct vnode *devvp, struct mount *mp,
struct proc *p, struct msdosfs_args *argp));
static int msdosfs_fhtovp __P((struct mount *, struct fid *,
struct vnode **));
static int msdosfs_checkexp __P((struct mount *, struct sockaddr *,
int *, struct ucred **));
static int msdosfs_mount __P((struct mount *, char *, caddr_t,
struct nameidata *, struct proc *));
static int msdosfs_root __P((struct mount *, struct vnode **));
static int msdosfs_statfs __P((struct mount *, struct statfs *,
struct proc *));
static int msdosfs_sync __P((struct mount *, int, struct ucred *,
struct proc *));
static int msdosfs_unmount __P((struct mount *, int, struct proc *));
static int msdosfs_vptofh __P((struct vnode *, struct fid *));
static int
update_mp(mp, argp)
struct mount *mp;
struct msdosfs_args *argp;
{
struct msdosfsmount *pmp = VFSTOMSDOSFS(mp);
int error;
pmp->pm_gid = argp->gid;
pmp->pm_uid = argp->uid;
pmp->pm_mask = argp->mask & ALLPERMS;
pmp->pm_flags |= argp->flags & MSDOSFSMNT_MNTOPT;
if (pmp->pm_flags & MSDOSFSMNT_U2WTABLE) {
bcopy(argp->u2w, pmp->pm_u2w, sizeof(pmp->pm_u2w));
bcopy(argp->d2u, pmp->pm_d2u, sizeof(pmp->pm_d2u));
bcopy(argp->u2d, pmp->pm_u2d, sizeof(pmp->pm_u2d));
}
if (pmp->pm_flags & MSDOSFSMNT_ULTABLE) {
bcopy(argp->ul, pmp->pm_ul, sizeof(pmp->pm_ul));
bcopy(argp->lu, pmp->pm_lu, sizeof(pmp->pm_lu));
}
#ifndef __FreeBSD__
/*
* GEMDOS knows nothing (yet) about win95
*/
if (pmp->pm_flags & MSDOSFSMNT_GEMDOSFS)
pmp->pm_flags |= MSDOSFSMNT_NOWIN95;
#endif
if (pmp->pm_flags & MSDOSFSMNT_NOWIN95)
pmp->pm_flags |= MSDOSFSMNT_SHORTNAME;
else if (!(pmp->pm_flags &
(MSDOSFSMNT_SHORTNAME | MSDOSFSMNT_LONGNAME))) {
struct vnode *rootvp;
/*
* Try to divine whether to support Win'95 long filenames
*/
if (FAT32(pmp))
pmp->pm_flags |= MSDOSFSMNT_LONGNAME;
else {
if ((error = msdosfs_root(mp, &rootvp)) != 0)
return error;
pmp->pm_flags |= findwin95(VTODE(rootvp))
? MSDOSFSMNT_LONGNAME
: MSDOSFSMNT_SHORTNAME;
vput(rootvp);
}
}
return 0;
}
#ifndef __FreeBSD__
int
msdosfs_mountroot()
{
register struct mount *mp;
struct proc *p = curproc; /* XXX */
size_t size;
int error;
struct msdosfs_args args;
if (root_device->dv_class != DV_DISK)
return (ENODEV);
/*
* Get vnodes for swapdev and rootdev.
*/
if (bdevvp(rootdev, &rootvp))
panic("msdosfs_mountroot: can't setup rootvp");
mp = malloc((u_long)sizeof(struct mount), M_MOUNT, M_WAITOK | M_ZERO);
mp->mnt_op = &msdosfs_vfsops;
mp->mnt_flag = 0;
LIST_INIT(&mp->mnt_vnodelist);
args.flags = 0;
args.uid = 0;
args.gid = 0;
args.mask = 0777;
if ((error = mountmsdosfs(rootvp, mp, p, &args)) != 0) {
free(mp, M_MOUNT);
return (error);
}
if ((error = update_mp(mp, &args)) != 0) {
(void)msdosfs_unmount(mp, 0, p);
free(mp, M_MOUNT);
return (error);
}
if ((error = vfs_lock(mp)) != 0) {
(void)msdosfs_unmount(mp, 0, p);
free(mp, M_MOUNT);
return (error);
}
TAILQ_INSERT_TAIL(&mountlist, mp, mnt_list);
mp->mnt_vnodecovered = NULLVP;
(void) copystr("/", mp->mnt_stat.f_mntonname, MNAMELEN - 1,
&size);
bzero(mp->mnt_stat.f_mntonname + size, MNAMELEN - size);
(void) copystr(ROOTNAME, mp->mnt_stat.f_mntfromname, MNAMELEN - 1,
&size);
bzero(mp->mnt_stat.f_mntfromname + size, MNAMELEN - size);
(void)msdosfs_statfs(mp, &mp->mnt_stat, p);
vfs_unlock(mp);
return (0);
}
#endif
1994-09-19 15:41:57 +00:00
/*
1995-05-30 08:16:23 +00:00
* mp - path - addr in user space of mount point (ie /usr or whatever)
1994-09-19 15:41:57 +00:00
* data - addr in user space of mount params including the name of the block
1995-05-30 08:16:23 +00:00
* special file to treat as a filesystem.
1994-09-19 15:41:57 +00:00
*/
1995-11-07 14:06:45 +00:00
static int
1994-09-19 15:41:57 +00:00
msdosfs_mount(mp, path, data, ndp, p)
struct mount *mp;
char *path;
caddr_t data;
struct nameidata *ndp;
struct proc *p;
{
struct vnode *devvp; /* vnode for blk device to mount */
struct msdosfs_args args; /* will hold data from mount request */
/* msdosfs specific mount control block */
struct msdosfsmount *pmp = NULL;
size_t size;
1994-09-19 15:41:57 +00:00
int error, flags;
mode_t accessmode;
1994-09-19 15:41:57 +00:00
error = copyin(data, (caddr_t)&args, sizeof(struct msdosfs_args));
if (error)
return (error);
if (args.magic != MSDOSFS_ARGSMAGIC)
args.flags = 0;
1994-09-19 15:41:57 +00:00
/*
* If updating, check whether changing from read-only to
* read/write; if there is no device name, that's all we do.
1994-09-19 15:41:57 +00:00
*/
if (mp->mnt_flag & MNT_UPDATE) {
pmp = VFSTOMSDOSFS(mp);
1994-09-19 15:41:57 +00:00
error = 0;
if (!(pmp->pm_flags & MSDOSFSMNT_RONLY) && (mp->mnt_flag & MNT_RDONLY)) {
1994-09-19 15:41:57 +00:00
flags = WRITECLOSE;
if (mp->mnt_flag & MNT_FORCE)
flags |= FORCECLOSE;
error = vflush(mp, NULLVP, flags);
}
if (!error && (mp->mnt_flag & MNT_RELOAD))
/* not yet implemented */
error = EOPNOTSUPP;
1994-09-19 15:41:57 +00:00
if (error)
return (error);
if ((pmp->pm_flags & MSDOSFSMNT_RONLY) && (mp->mnt_kern_flag & MNTK_WANTRDWR)) {
/*
* If upgrade to read-write by non-root, then verify
* that user has necessary permissions on the device.
*/
if (p->p_ucred->cr_uid != 0) {
devvp = pmp->pm_devvp;
vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, p);
error = VOP_ACCESS(devvp, VREAD | VWRITE,
p->p_ucred, p);
if (error) {
VOP_UNLOCK(devvp, 0, p);
return (error);
}
VOP_UNLOCK(devvp, 0, p);
}
pmp->pm_flags &= ~MSDOSFSMNT_RONLY;
}
1994-09-19 15:41:57 +00:00
if (args.fspec == 0) {
#ifdef __notyet__ /* doesn't work correctly with current mountd XXX */
if (args.flags & MSDOSFSMNT_MNTOPT) {
pmp->pm_flags &= ~MSDOSFSMNT_MNTOPT;
pmp->pm_flags |= args.flags & MSDOSFSMNT_MNTOPT;
if (pmp->pm_flags & MSDOSFSMNT_NOWIN95)
pmp->pm_flags |= MSDOSFSMNT_SHORTNAME;
}
#endif
1994-09-19 15:41:57 +00:00
/*
* Process export requests.
*/
return (vfs_export(mp, &pmp->pm_export, &args.export));
1994-09-19 15:41:57 +00:00
}
}
/*
* Not an update, or updating the name: look up the name
* and verify that it refers to a sensible block device.
1994-09-19 15:41:57 +00:00
*/
NDINIT(ndp, LOOKUP, FOLLOW, UIO_USERSPACE, args.fspec, p);
error = namei(ndp);
if (error)
return (error);
1994-09-19 15:41:57 +00:00
devvp = ndp->ni_vp;
NDFREE(ndp, NDF_ONLY_PNBUF);
if (!vn_isdisk(devvp, &error)) {
1994-09-19 15:41:57 +00:00
vrele(devvp);
return (error);
1994-09-19 15:41:57 +00:00
}
/*
* If mount by non-root, then verify that user has necessary
* permissions on the device.
1994-09-19 15:41:57 +00:00
*/
if (p->p_ucred->cr_uid != 0) {
accessmode = VREAD;
if ((mp->mnt_flag & MNT_RDONLY) == 0)
accessmode |= VWRITE;
vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, p);
error = VOP_ACCESS(devvp, accessmode, p->p_ucred, p);
if (error) {
vput(devvp);
return (error);
}
VOP_UNLOCK(devvp, 0, p);
}
if ((mp->mnt_flag & MNT_UPDATE) == 0) {
error = mountmsdosfs(devvp, mp, p, &args);
#ifdef MSDOSFS_DEBUG /* only needed for the printf below */
pmp = VFSTOMSDOSFS(mp);
#endif
} else {
1994-09-19 15:41:57 +00:00
if (devvp != pmp->pm_devvp)
error = EINVAL; /* XXX needs translation */
1994-09-19 15:41:57 +00:00
else
vrele(devvp);
}
if (error) {
vrele(devvp);
return (error);
}
error = update_mp(mp, &args);
if (error) {
msdosfs_unmount(mp, MNT_FORCE, p);
1994-09-19 15:41:57 +00:00
return error;
}
(void) copyinstr(args.fspec, mp->mnt_stat.f_mntfromname, MNAMELEN - 1,
&size);
bzero(mp->mnt_stat.f_mntfromname + size, MNAMELEN - size);
1994-09-19 15:41:57 +00:00
(void) msdosfs_statfs(mp, &mp->mnt_stat, p);
#ifdef MSDOSFS_DEBUG
printf("msdosfs_mount(): mp %p, pmp %p, inusemap %p\n", mp, pmp, pmp->pm_inusemap);
1994-09-19 15:41:57 +00:00
#endif
return (0);
1994-09-19 15:41:57 +00:00
}
1995-11-07 14:06:45 +00:00
static int
mountmsdosfs(devvp, mp, p, argp)
1994-09-19 15:41:57 +00:00
struct vnode *devvp;
struct mount *mp;
struct proc *p;
struct msdosfs_args *argp;
1994-09-19 15:41:57 +00:00
{
struct msdosfsmount *pmp;
struct buf *bp;
1994-09-19 15:41:57 +00:00
dev_t dev = devvp->v_rdev;
#ifndef __FreeBSD__
struct partinfo dpart;
int bsize = 0, dtype = 0, tmp;
#endif
1994-09-19 15:41:57 +00:00
union bootsector *bsp;
struct byte_bpb33 *b33;
struct byte_bpb50 *b50;
struct byte_bpb710 *b710;
u_int8_t SecPerClust;
u_long clusters;
int ronly, error;
1994-09-19 15:41:57 +00:00
/*
* Disallow multiple mounts of the same device.
* Disallow mounting of a device that is currently in use
* (except for root, which might share swap device for miniroot).
* Flush out any old buffers remaining from a previous use.
1994-09-19 15:41:57 +00:00
*/
error = vfs_mountedon(devvp);
if (error)
return (error);
if (vcount(devvp) > 1 && devvp != rootvp)
return (EBUSY);
vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, p);
error = vinvalbuf(devvp, V_SAVE, p->p_ucred, p, 0, 0);
VOP_UNLOCK(devvp, 0, p);
if (error)
return (error);
1994-09-19 15:41:57 +00:00
ronly = (mp->mnt_flag & MNT_RDONLY) != 0;
vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, p);
error = VOP_OPEN(devvp, ronly ? FREAD : FREAD|FWRITE, FSCRED, p);
VOP_UNLOCK(devvp, 0, p);
if (error)
return (error);
bp = NULL; /* both used in error_exit */
pmp = NULL;
#ifndef __FreeBSD__
if (argp->flags & MSDOSFSMNT_GEMDOSFS) {
/*
* We need the disklabel to calculate the size of a FAT entry
* later on. Also make sure the partition contains a filesystem
* of type FS_MSDOS. This doesn't work for floppies, so we have
* to check for them too.
*
* At least some parts of the msdos fs driver seem to assume
* that the size of a disk block will always be 512 bytes.
* Let's check it...
*/
error = VOP_IOCTL(devvp, DIOCGPART, (caddr_t)&dpart,
FREAD, NOCRED, p);
if (error)
goto error_exit;
tmp = dpart.part->p_fstype;
dtype = dpart.disklab->d_type;
bsize = dpart.disklab->d_secsize;
if (bsize != 512 || (dtype!=DTYPE_FLOPPY && tmp!=FS_MSDOS)) {
error = EINVAL;
goto error_exit;
}
1994-09-19 15:41:57 +00:00
}
#endif
/*
* Read the boot sector of the filesystem, and then check the
* boot signature. If not a dos boot sector then error out.
*
* NOTE: 2048 is a maximum sector size in current...
1994-09-19 15:41:57 +00:00
*/
error = bread(devvp, 0, 2048, NOCRED, &bp);
if (error)
1994-09-19 15:41:57 +00:00
goto error_exit;
bp->b_flags |= B_AGE;
bsp = (union bootsector *)bp->b_data;
b33 = (struct byte_bpb33 *)bsp->bs33.bsBPB;
b50 = (struct byte_bpb50 *)bsp->bs50.bsBPB;
b710 = (struct byte_bpb710 *)bsp->bs710.bsPBP;
#ifndef __FreeBSD__
if (!(argp->flags & MSDOSFSMNT_GEMDOSFS)) {
#endif
#ifndef MSDOSFS_NOCHECKSIG
if (bsp->bs50.bsBootSectSig0 != BOOTSIG0
|| bsp->bs50.bsBootSectSig1 != BOOTSIG1) {
error = EINVAL;
goto error_exit;
}
#endif
#ifndef __FreeBSD__
1994-09-19 15:41:57 +00:00
}
#endif
pmp = malloc(sizeof *pmp, M_MSDOSFSMNT, M_WAITOK | M_ZERO);
1994-09-19 15:41:57 +00:00
pmp->pm_mountp = mp;
/*
* Compute several useful quantities from the bpb in the
* bootsector. Copy in the dos 5 variant of the bpb then fix up
* the fields that are different between dos 5 and dos 3.3.
*/
SecPerClust = b50->bpbSecPerClust;
1994-09-19 15:41:57 +00:00
pmp->pm_BytesPerSec = getushort(b50->bpbBytesPerSec);
pmp->pm_ResSectors = getushort(b50->bpbResSectors);
pmp->pm_FATs = b50->bpbFATs;
pmp->pm_RootDirEnts = getushort(b50->bpbRootDirEnts);
pmp->pm_Sectors = getushort(b50->bpbSectors);
pmp->pm_FATsecs = getushort(b50->bpbFATsecs);
pmp->pm_SecPerTrack = getushort(b50->bpbSecPerTrack);
pmp->pm_Heads = getushort(b50->bpbHeads);
pmp->pm_Media = b50->bpbMedia;
1994-09-19 15:41:57 +00:00
/* calculate the ratio of sector size to DEV_BSIZE */
pmp->pm_BlkPerSec = pmp->pm_BytesPerSec / DEV_BSIZE;
#ifndef __FreeBSD__
if (!(argp->flags & MSDOSFSMNT_GEMDOSFS)) {
#endif
/* XXX - We should probably check more values here */
if (!pmp->pm_BytesPerSec || !SecPerClust
|| !pmp->pm_Heads || pmp->pm_Heads > 255
#ifdef PC98
|| !pmp->pm_SecPerTrack || pmp->pm_SecPerTrack > 255) {
#else
|| !pmp->pm_SecPerTrack || pmp->pm_SecPerTrack > 63) {
#endif
error = EINVAL;
goto error_exit;
}
#ifndef __FreeBSD__
1994-09-19 15:41:57 +00:00
}
#endif
1994-09-19 15:41:57 +00:00
if (pmp->pm_Sectors == 0) {
pmp->pm_HiddenSects = getulong(b50->bpbHiddenSecs);
pmp->pm_HugeSectors = getulong(b50->bpbHugeSectors);
} else {
pmp->pm_HiddenSects = getushort(b33->bpbHiddenSecs);
pmp->pm_HugeSectors = pmp->pm_Sectors;
}
if (pmp->pm_HugeSectors > 0xffffffff /
(pmp->pm_BytesPerSec / sizeof(struct direntry)) + 1) {
/*
* We cannot deal currently with this size of disk
* due to fileid limitations (see msdosfs_getattr and
* msdosfs_readdir)
*/
error = EINVAL;
printf("mountmsdosfs(): disk too big, sorry\n");
goto error_exit;
}
if (pmp->pm_RootDirEnts == 0) {
if (bsp->bs710.bsBootSectSig2 != BOOTSIG2
|| bsp->bs710.bsBootSectSig3 != BOOTSIG3
|| pmp->pm_Sectors
|| pmp->pm_FATsecs
|| getushort(b710->bpbFSVers)) {
error = EINVAL;
printf("mountmsdosfs(): bad FAT32 filesystem\n");
goto error_exit;
}
pmp->pm_fatmask = FAT32_MASK;
pmp->pm_fatmult = 4;
pmp->pm_fatdiv = 1;
pmp->pm_FATsecs = getulong(b710->bpbBigFATsecs);
if (getushort(b710->bpbExtFlags) & FATMIRROR)
pmp->pm_curfat = getushort(b710->bpbExtFlags) & FATNUM;
else
pmp->pm_flags |= MSDOSFS_FATMIRROR;
} else
pmp->pm_flags |= MSDOSFS_FATMIRROR;
/*
* Check a few values (could do some more):
* - logical sector size: power of 2, >= block size
* - sectors per cluster: power of 2, >= 1
* - number of sectors: >= 1, <= size of partition
*/
if ( (SecPerClust == 0)
|| (SecPerClust & (SecPerClust - 1))
|| (pmp->pm_BytesPerSec < DEV_BSIZE)
|| (pmp->pm_BytesPerSec & (pmp->pm_BytesPerSec - 1))
|| (pmp->pm_HugeSectors == 0)
) {
error = EINVAL;
goto error_exit;
}
pmp->pm_HugeSectors *= pmp->pm_BlkPerSec;
pmp->pm_HiddenSects *= pmp->pm_BlkPerSec; /* XXX not used? */
pmp->pm_FATsecs *= pmp->pm_BlkPerSec;
SecPerClust *= pmp->pm_BlkPerSec;
pmp->pm_fatblk = pmp->pm_ResSectors * pmp->pm_BlkPerSec;
if (FAT32(pmp)) {
pmp->pm_rootdirblk = getulong(b710->bpbRootClust);
pmp->pm_firstcluster = pmp->pm_fatblk
+ (pmp->pm_FATs * pmp->pm_FATsecs);
pmp->pm_fsinfo = getushort(b710->bpbFSInfo) * pmp->pm_BlkPerSec;
} else {
pmp->pm_rootdirblk = pmp->pm_fatblk +
(pmp->pm_FATs * pmp->pm_FATsecs);
pmp->pm_rootdirsize = (pmp->pm_RootDirEnts * sizeof(struct direntry)
+ DEV_BSIZE - 1)
/ DEV_BSIZE; /* in blocks */
pmp->pm_firstcluster = pmp->pm_rootdirblk + pmp->pm_rootdirsize;
}
pmp->pm_maxcluster = (pmp->pm_HugeSectors - pmp->pm_firstcluster) /
SecPerClust + 1;
pmp->pm_fatsize = pmp->pm_FATsecs * DEV_BSIZE; /* XXX not used? */
#ifndef __FreeBSD__
if (argp->flags & MSDOSFSMNT_GEMDOSFS) {
if ((pmp->pm_maxcluster <= (0xff0 - 2))
&& ((dtype == DTYPE_FLOPPY) || ((dtype == DTYPE_VNODE)
&& ((pmp->pm_Heads == 1) || (pmp->pm_Heads == 2))))
) {
pmp->pm_fatmask = FAT12_MASK;
pmp->pm_fatmult = 3;
pmp->pm_fatdiv = 2;
} else {
pmp->pm_fatmask = FAT16_MASK;
pmp->pm_fatmult = 2;
pmp->pm_fatdiv = 1;
}
} else
#endif
if (pmp->pm_fatmask == 0) {
if (pmp->pm_maxcluster
<= ((CLUST_RSRVD - CLUST_FIRST) & FAT12_MASK)) {
/*
* This will usually be a floppy disk. This size makes
* sure that one fat entry will not be split across
* multiple blocks.
*/
pmp->pm_fatmask = FAT12_MASK;
pmp->pm_fatmult = 3;
pmp->pm_fatdiv = 2;
} else {
pmp->pm_fatmask = FAT16_MASK;
pmp->pm_fatmult = 2;
pmp->pm_fatdiv = 1;
}
}
clusters = (pmp->pm_fatsize / pmp->pm_fatmult) * pmp->pm_fatdiv;
if (pmp->pm_maxcluster >= clusters) {
printf("Warning: number of clusters (%ld) exceeds FAT "
"capacity (%ld)\n", pmp->pm_maxcluster + 1, clusters);
pmp->pm_maxcluster = clusters - 1;
}
1994-09-19 15:41:57 +00:00
if (FAT12(pmp))
pmp->pm_fatblocksize = 3 * pmp->pm_BytesPerSec;
else
pmp->pm_fatblocksize = MSDOSFS_DFLTBSIZE;
1994-09-19 15:41:57 +00:00
pmp->pm_fatblocksec = pmp->pm_fatblocksize / DEV_BSIZE;
pmp->pm_bnshift = ffs(DEV_BSIZE) - 1;
1994-09-19 15:41:57 +00:00
/*
* Compute mask and shift value for isolating cluster relative byte
* offsets and cluster numbers from a file offset.
*/
pmp->pm_bpcluster = SecPerClust * DEV_BSIZE;
pmp->pm_crbomask = pmp->pm_bpcluster - 1;
pmp->pm_cnshift = ffs(pmp->pm_bpcluster) - 1;
/*
* Check for valid cluster size
* must be a power of 2
*/
if (pmp->pm_bpcluster ^ (1 << pmp->pm_cnshift)) {
1994-09-19 15:41:57 +00:00
error = EINVAL;
goto error_exit;
}
/*
* Release the bootsector buffer.
*/
brelse(bp);
bp = NULL;
/*
* Check FSInfo.
*/
if (pmp->pm_fsinfo) {
struct fsinfo *fp;
if ((error = bread(devvp, pmp->pm_fsinfo, fsi_size(pmp),
NOCRED, &bp)) != 0)
goto error_exit;
fp = (struct fsinfo *)bp->b_data;
if (!bcmp(fp->fsisig1, "RRaA", 4)
&& !bcmp(fp->fsisig2, "rrAa", 4)
&& !bcmp(fp->fsisig3, "\0\0\125\252", 4)
&& !bcmp(fp->fsisig4, "\0\0\125\252", 4))
pmp->pm_nxtfree = getulong(fp->fsinxtfree);
else
pmp->pm_fsinfo = 0;
brelse(bp);
bp = NULL;
}
1994-09-19 15:41:57 +00:00
/*
* Check and validate (or perhaps invalidate?) the fsinfo structure? XXX
1994-09-19 15:41:57 +00:00
*/
/*
* Allocate memory for the bitmap of allocated clusters, and then
* fill it in.
*/
pmp->pm_inusemap = malloc(((pmp->pm_maxcluster + N_INUSEBITS - 1)
/ N_INUSEBITS)
* sizeof(*pmp->pm_inusemap),
M_MSDOSFSFAT, M_WAITOK);
/*
* fillinusemap() needs pm_devvp.
*/
pmp->pm_dev = dev;
pmp->pm_devvp = devvp;
/*
* Have the inuse map filled in.
*/
if ((error = fillinusemap(pmp)) != 0)
1994-09-19 15:41:57 +00:00
goto error_exit;
/*
* If they want fat updates to be synchronous then let them suffer
* the performance degradation in exchange for the on disk copy of
* the fat being correct just about all the time. I suppose this
* would be a good thing to turn on if the kernel is still flakey.
*/
if (mp->mnt_flag & MNT_SYNCHRONOUS)
pmp->pm_flags |= MSDOSFSMNT_WAITONFAT;
1994-09-19 15:41:57 +00:00
/*
* Finish up.
*/
if (ronly)
pmp->pm_flags |= MSDOSFSMNT_RONLY;
else
1994-09-19 15:41:57 +00:00
pmp->pm_fmod = 1;
mp->mnt_data = (qaddr_t) pmp;
mp->mnt_stat.f_fsid.val[0] = dev2udev(dev);
mp->mnt_stat.f_fsid.val[1] = mp->mnt_vfc->vfc_typenum;
mp->mnt_flag |= MNT_LOCAL;
devvp->v_rdev->si_mountpoint = mp;
1994-09-19 15:41:57 +00:00
return 0;
error_exit:
if (bp)
brelse(bp);
(void) VOP_CLOSE(devvp, ronly ? FREAD : FREAD | FWRITE, NOCRED, p);
1994-09-19 15:41:57 +00:00
if (pmp) {
if (pmp->pm_inusemap)
free(pmp->pm_inusemap, M_MSDOSFSFAT);
free(pmp, M_MSDOSFSMNT);
mp->mnt_data = (qaddr_t)0;
1994-09-19 15:41:57 +00:00
}
return (error);
1994-09-19 15:41:57 +00:00
}
/*
* Unmount the filesystem described by mp.
*/
1995-11-07 14:06:45 +00:00
static int
1994-09-19 15:41:57 +00:00
msdosfs_unmount(mp, mntflags, p)
struct mount *mp;
int mntflags;
struct proc *p;
{
struct msdosfsmount *pmp;
int error, flags;
1994-09-19 15:41:57 +00:00
flags = 0;
if (mntflags & MNT_FORCE)
1994-09-19 15:41:57 +00:00
flags |= FORCECLOSE;
error = vflush(mp, NULLVP, flags);
if (error)
1994-09-19 15:41:57 +00:00
return error;
pmp = VFSTOMSDOSFS(mp);
pmp->pm_devvp->v_rdev->si_mountpoint = NULL;
#ifdef MSDOSFS_DEBUG
{
struct vnode *vp = pmp->pm_devvp;
printf("msdosfs_umount(): just before calling VOP_CLOSE()\n");
printf("flag %08lx, usecount %d, writecount %d, holdcnt %ld\n",
vp->v_flag, vp->v_usecount, vp->v_writecount, vp->v_holdcnt);
printf("id %lu, mount %p, op %p\n",
vp->v_id, vp->v_mount, vp->v_op);
printf("freef %p, freeb %p, mount %p\n",
TAILQ_NEXT(vp, v_freelist), vp->v_freelist.tqe_prev,
vp->v_mount);
printf("cleanblkhd %p, dirtyblkhd %p, numoutput %ld, type %d\n",
TAILQ_FIRST(&vp->v_cleanblkhd),
TAILQ_FIRST(&vp->v_dirtyblkhd),
vp->v_numoutput, vp->v_type);
printf("union %p, tag %d, data[0] %08x, data[1] %08x\n",
vp->v_socket, vp->v_tag,
((u_int *)vp->v_data)[0],
((u_int *)vp->v_data)[1]);
}
#endif
error = VOP_CLOSE(pmp->pm_devvp,
(pmp->pm_flags&MSDOSFSMNT_RONLY) ? FREAD : FREAD | FWRITE,
NOCRED, p);
1994-09-19 15:41:57 +00:00
vrele(pmp->pm_devvp);
free(pmp->pm_inusemap, M_MSDOSFSFAT);
free(pmp, M_MSDOSFSMNT);
mp->mnt_data = (qaddr_t)0;
mp->mnt_flag &= ~MNT_LOCAL;
return (error);
1994-09-19 15:41:57 +00:00
}
1995-11-07 14:06:45 +00:00
static int
1994-09-19 15:41:57 +00:00
msdosfs_root(mp, vpp)
struct mount *mp;
struct vnode **vpp;
{
struct msdosfsmount *pmp = VFSTOMSDOSFS(mp);
1994-09-19 15:41:57 +00:00
struct denode *ndep;
int error;
#ifdef MSDOSFS_DEBUG
printf("msdosfs_root(); mp %p, pmp %p\n", mp, pmp);
1994-09-19 15:41:57 +00:00
#endif
error = deget(pmp, MSDOSFSROOT, MSDOSFSROOT_OFS, &ndep);
if (error)
return (error);
*vpp = DETOV(ndep);
return (0);
1994-09-19 15:41:57 +00:00
}
1995-11-07 14:06:45 +00:00
static int
1994-09-19 15:41:57 +00:00
msdosfs_statfs(mp, sbp, p)
struct mount *mp;
struct statfs *sbp;
struct proc *p;
{
struct msdosfsmount *pmp;
1994-09-19 15:41:57 +00:00
pmp = VFSTOMSDOSFS(mp);
1994-09-19 15:41:57 +00:00
sbp->f_bsize = pmp->pm_bpcluster;
sbp->f_iosize = pmp->pm_bpcluster;
sbp->f_blocks = pmp->pm_maxcluster + 1;
1994-09-19 15:41:57 +00:00
sbp->f_bfree = pmp->pm_freeclustercount;
sbp->f_bavail = pmp->pm_freeclustercount;
sbp->f_files = pmp->pm_RootDirEnts; /* XXX */
sbp->f_ffree = 0; /* what to put in here? */
if (sbp != &mp->mnt_stat) {
sbp->f_type = mp->mnt_vfc->vfc_typenum;
bcopy(mp->mnt_stat.f_mntonname, sbp->f_mntonname, MNAMELEN);
bcopy(mp->mnt_stat.f_mntfromname, sbp->f_mntfromname, MNAMELEN);
1994-09-19 15:41:57 +00:00
}
strncpy(sbp->f_fstypename, mp->mnt_vfc->vfc_name, MFSNAMELEN);
return (0);
1994-09-19 15:41:57 +00:00
}
1995-11-07 14:06:45 +00:00
static int
1994-09-19 15:41:57 +00:00
msdosfs_sync(mp, waitfor, cred, p)
struct mount *mp;
int waitfor;
struct ucred *cred;
struct proc *p;
{
struct vnode *vp, *nvp;
1994-09-19 15:41:57 +00:00
struct denode *dep;
struct msdosfsmount *pmp = VFSTOMSDOSFS(mp);
int error, allerror = 0;
1994-09-19 15:41:57 +00:00
/*
* If we ever switch to not updating all of the fats all the time,
* this would be the place to update them from the first one.
*/
if (pmp->pm_fmod != 0) {
if (pmp->pm_flags & MSDOSFSMNT_RONLY)
1994-09-19 15:41:57 +00:00
panic("msdosfs_sync: rofs mod");
else {
/* update fats here */
}
}
1994-09-19 15:41:57 +00:00
/*
* Write back each (modified) denode.
1994-09-19 15:41:57 +00:00
*/
Change and clean the mutex lock interface. mtx_enter(lock, type) becomes: mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks) mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized) similarily, for releasing a lock, we now have: mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN. We change the caller interface for the two different types of locks because the semantics are entirely different for each case, and this makes it explicitly clear and, at the same time, it rids us of the extra `type' argument. The enter->lock and exit->unlock change has been made with the idea that we're "locking data" and not "entering locked code" in mind. Further, remove all additional "flags" previously passed to the lock acquire/release routines with the exception of two: MTX_QUIET and MTX_NOSWITCH The functionality of these flags is preserved and they can be passed to the lock/unlock routines by calling the corresponding wrappers: mtx_{lock, unlock}_flags(lock, flag(s)) and mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN locks, respectively. Re-inline some lock acq/rel code; in the sleep lock case, we only inline the _obtain_lock()s in order to ensure that the inlined code fits into a cache line. In the spin lock case, we inline recursion and actually only perform a function call if we need to spin. This change has been made with the idea that we generally tend to avoid spin locks and that also the spin locks that we do have and are heavily used (i.e. sched_lock) do recurse, and therefore in an effort to reduce function call overhead for some architectures (such as alpha), we inline recursion for this case. Create a new malloc type for the witness code and retire from using the M_DEV type. The new type is called M_WITNESS and is only declared if WITNESS is enabled. Begin cleaning up some machdep/mutex.h code - specifically updated the "optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently need those. Finally, caught up to the interface changes in all sys code. Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
mtx_lock(&mntvnode_mtx);
1994-09-19 15:41:57 +00:00
loop:
2000-12-31 10:24:19 +00:00
for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp != NULL; vp = nvp) {
/*
* If the vnode that we are about to sync is no longer
* associated with this mount point, start over.
*/
if (vp->v_mount != mp)
1994-09-19 15:41:57 +00:00
goto loop;
Change and clean the mutex lock interface. mtx_enter(lock, type) becomes: mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks) mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized) similarily, for releasing a lock, we now have: mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN. We change the caller interface for the two different types of locks because the semantics are entirely different for each case, and this makes it explicitly clear and, at the same time, it rids us of the extra `type' argument. The enter->lock and exit->unlock change has been made with the idea that we're "locking data" and not "entering locked code" in mind. Further, remove all additional "flags" previously passed to the lock acquire/release routines with the exception of two: MTX_QUIET and MTX_NOSWITCH The functionality of these flags is preserved and they can be passed to the lock/unlock routines by calling the corresponding wrappers: mtx_{lock, unlock}_flags(lock, flag(s)) and mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN locks, respectively. Re-inline some lock acq/rel code; in the sleep lock case, we only inline the _obtain_lock()s in order to ensure that the inlined code fits into a cache line. In the spin lock case, we inline recursion and actually only perform a function call if we need to spin. This change has been made with the idea that we generally tend to avoid spin locks and that also the spin locks that we do have and are heavily used (i.e. sched_lock) do recurse, and therefore in an effort to reduce function call overhead for some architectures (such as alpha), we inline recursion for this case. Create a new malloc type for the witness code and retire from using the M_DEV type. The new type is called M_WITNESS and is only declared if WITNESS is enabled. Begin cleaning up some machdep/mutex.h code - specifically updated the "optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently need those. Finally, caught up to the interface changes in all sys code. Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
mtx_lock(&vp->v_interlock);
2000-12-31 10:24:19 +00:00
nvp = LIST_NEXT(vp, v_mntvnodes);
1994-09-19 15:41:57 +00:00
dep = VTODE(vp);
if (vp->v_type == VNON ||
((dep->de_flag &
(DE_ACCESS | DE_CREATE | DE_UPDATE | DE_MODIFIED)) == 0 &&
(TAILQ_EMPTY(&vp->v_dirtyblkhd) || waitfor == MNT_LAZY))) {
Change and clean the mutex lock interface. mtx_enter(lock, type) becomes: mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks) mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized) similarily, for releasing a lock, we now have: mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN. We change the caller interface for the two different types of locks because the semantics are entirely different for each case, and this makes it explicitly clear and, at the same time, it rids us of the extra `type' argument. The enter->lock and exit->unlock change has been made with the idea that we're "locking data" and not "entering locked code" in mind. Further, remove all additional "flags" previously passed to the lock acquire/release routines with the exception of two: MTX_QUIET and MTX_NOSWITCH The functionality of these flags is preserved and they can be passed to the lock/unlock routines by calling the corresponding wrappers: mtx_{lock, unlock}_flags(lock, flag(s)) and mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN locks, respectively. Re-inline some lock acq/rel code; in the sleep lock case, we only inline the _obtain_lock()s in order to ensure that the inlined code fits into a cache line. In the spin lock case, we inline recursion and actually only perform a function call if we need to spin. This change has been made with the idea that we generally tend to avoid spin locks and that also the spin locks that we do have and are heavily used (i.e. sched_lock) do recurse, and therefore in an effort to reduce function call overhead for some architectures (such as alpha), we inline recursion for this case. Create a new malloc type for the witness code and retire from using the M_DEV type. The new type is called M_WITNESS and is only declared if WITNESS is enabled. Begin cleaning up some machdep/mutex.h code - specifically updated the "optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently need those. Finally, caught up to the interface changes in all sys code. Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
mtx_unlock(&vp->v_interlock);
1994-09-19 15:41:57 +00:00
continue;
}
Change and clean the mutex lock interface. mtx_enter(lock, type) becomes: mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks) mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized) similarily, for releasing a lock, we now have: mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN. We change the caller interface for the two different types of locks because the semantics are entirely different for each case, and this makes it explicitly clear and, at the same time, it rids us of the extra `type' argument. The enter->lock and exit->unlock change has been made with the idea that we're "locking data" and not "entering locked code" in mind. Further, remove all additional "flags" previously passed to the lock acquire/release routines with the exception of two: MTX_QUIET and MTX_NOSWITCH The functionality of these flags is preserved and they can be passed to the lock/unlock routines by calling the corresponding wrappers: mtx_{lock, unlock}_flags(lock, flag(s)) and mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN locks, respectively. Re-inline some lock acq/rel code; in the sleep lock case, we only inline the _obtain_lock()s in order to ensure that the inlined code fits into a cache line. In the spin lock case, we inline recursion and actually only perform a function call if we need to spin. This change has been made with the idea that we generally tend to avoid spin locks and that also the spin locks that we do have and are heavily used (i.e. sched_lock) do recurse, and therefore in an effort to reduce function call overhead for some architectures (such as alpha), we inline recursion for this case. Create a new malloc type for the witness code and retire from using the M_DEV type. The new type is called M_WITNESS and is only declared if WITNESS is enabled. Begin cleaning up some machdep/mutex.h code - specifically updated the "optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently need those. Finally, caught up to the interface changes in all sys code. Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
mtx_unlock(&mntvnode_mtx);
error = vget(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK, p);
if (error) {
Change and clean the mutex lock interface. mtx_enter(lock, type) becomes: mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks) mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized) similarily, for releasing a lock, we now have: mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN. We change the caller interface for the two different types of locks because the semantics are entirely different for each case, and this makes it explicitly clear and, at the same time, it rids us of the extra `type' argument. The enter->lock and exit->unlock change has been made with the idea that we're "locking data" and not "entering locked code" in mind. Further, remove all additional "flags" previously passed to the lock acquire/release routines with the exception of two: MTX_QUIET and MTX_NOSWITCH The functionality of these flags is preserved and they can be passed to the lock/unlock routines by calling the corresponding wrappers: mtx_{lock, unlock}_flags(lock, flag(s)) and mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN locks, respectively. Re-inline some lock acq/rel code; in the sleep lock case, we only inline the _obtain_lock()s in order to ensure that the inlined code fits into a cache line. In the spin lock case, we inline recursion and actually only perform a function call if we need to spin. This change has been made with the idea that we generally tend to avoid spin locks and that also the spin locks that we do have and are heavily used (i.e. sched_lock) do recurse, and therefore in an effort to reduce function call overhead for some architectures (such as alpha), we inline recursion for this case. Create a new malloc type for the witness code and retire from using the M_DEV type. The new type is called M_WITNESS and is only declared if WITNESS is enabled. Begin cleaning up some machdep/mutex.h code - specifically updated the "optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently need those. Finally, caught up to the interface changes in all sys code. Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
mtx_lock(&mntvnode_mtx);
if (error == ENOENT)
goto loop;
continue;
}
error = VOP_FSYNC(vp, cred, waitfor, p);
if (error)
1994-09-19 15:41:57 +00:00
allerror = error;
VOP_UNLOCK(vp, 0, p);
vrele(vp);
Change and clean the mutex lock interface. mtx_enter(lock, type) becomes: mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks) mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized) similarily, for releasing a lock, we now have: mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN. We change the caller interface for the two different types of locks because the semantics are entirely different for each case, and this makes it explicitly clear and, at the same time, it rids us of the extra `type' argument. The enter->lock and exit->unlock change has been made with the idea that we're "locking data" and not "entering locked code" in mind. Further, remove all additional "flags" previously passed to the lock acquire/release routines with the exception of two: MTX_QUIET and MTX_NOSWITCH The functionality of these flags is preserved and they can be passed to the lock/unlock routines by calling the corresponding wrappers: mtx_{lock, unlock}_flags(lock, flag(s)) and mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN locks, respectively. Re-inline some lock acq/rel code; in the sleep lock case, we only inline the _obtain_lock()s in order to ensure that the inlined code fits into a cache line. In the spin lock case, we inline recursion and actually only perform a function call if we need to spin. This change has been made with the idea that we generally tend to avoid spin locks and that also the spin locks that we do have and are heavily used (i.e. sched_lock) do recurse, and therefore in an effort to reduce function call overhead for some architectures (such as alpha), we inline recursion for this case. Create a new malloc type for the witness code and retire from using the M_DEV type. The new type is called M_WITNESS and is only declared if WITNESS is enabled. Begin cleaning up some machdep/mutex.h code - specifically updated the "optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently need those. Finally, caught up to the interface changes in all sys code. Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
mtx_lock(&mntvnode_mtx);
1994-09-19 15:41:57 +00:00
}
Change and clean the mutex lock interface. mtx_enter(lock, type) becomes: mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks) mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized) similarily, for releasing a lock, we now have: mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN. We change the caller interface for the two different types of locks because the semantics are entirely different for each case, and this makes it explicitly clear and, at the same time, it rids us of the extra `type' argument. The enter->lock and exit->unlock change has been made with the idea that we're "locking data" and not "entering locked code" in mind. Further, remove all additional "flags" previously passed to the lock acquire/release routines with the exception of two: MTX_QUIET and MTX_NOSWITCH The functionality of these flags is preserved and they can be passed to the lock/unlock routines by calling the corresponding wrappers: mtx_{lock, unlock}_flags(lock, flag(s)) and mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN locks, respectively. Re-inline some lock acq/rel code; in the sleep lock case, we only inline the _obtain_lock()s in order to ensure that the inlined code fits into a cache line. In the spin lock case, we inline recursion and actually only perform a function call if we need to spin. This change has been made with the idea that we generally tend to avoid spin locks and that also the spin locks that we do have and are heavily used (i.e. sched_lock) do recurse, and therefore in an effort to reduce function call overhead for some architectures (such as alpha), we inline recursion for this case. Create a new malloc type for the witness code and retire from using the M_DEV type. The new type is called M_WITNESS and is only declared if WITNESS is enabled. Begin cleaning up some machdep/mutex.h code - specifically updated the "optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently need those. Finally, caught up to the interface changes in all sys code. Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
mtx_unlock(&mntvnode_mtx);
1994-09-19 15:41:57 +00:00
/*
* Flush filesystem control info.
*/
if (waitfor != MNT_LAZY) {
vn_lock(pmp->pm_devvp, LK_EXCLUSIVE | LK_RETRY, p);
error = VOP_FSYNC(pmp->pm_devvp, cred, waitfor, p);
if (error)
allerror = error;
VOP_UNLOCK(pmp->pm_devvp, 0, p);
}
return (allerror);
1994-09-19 15:41:57 +00:00
}
1995-11-07 14:06:45 +00:00
static int
msdosfs_fhtovp(mp, fhp, vpp)
1994-09-19 15:41:57 +00:00
struct mount *mp;
struct fid *fhp;
struct vnode **vpp;
{
struct msdosfsmount *pmp = VFSTOMSDOSFS(mp);
1994-09-19 15:41:57 +00:00
struct defid *defhp = (struct defid *) fhp;
struct denode *dep;
int error;
error = deget(pmp, defhp->defid_dirclust, defhp->defid_dirofs, &dep);
1994-09-19 15:41:57 +00:00
if (error) {
*vpp = NULLVP;
return (error);
1994-09-19 15:41:57 +00:00
}
*vpp = DETOV(dep);
return (0);
}
static int
msdosfs_checkexp(mp, nam, exflagsp, credanonp)
struct mount *mp;
struct sockaddr *nam;
int *exflagsp;
struct ucred **credanonp;
{
struct msdosfsmount *pmp = VFSTOMSDOSFS(mp);
struct netcred *np;
np = vfs_export_lookup(mp, &pmp->pm_export, nam);
if (np == NULL)
return (EACCES);
1994-09-19 15:41:57 +00:00
*exflagsp = np->netc_exflags;
*credanonp = &np->netc_anon;
return (0);
1994-09-19 15:41:57 +00:00
}
1995-11-07 14:06:45 +00:00
static int
1994-09-19 15:41:57 +00:00
msdosfs_vptofh(vp, fhp)
struct vnode *vp;
struct fid *fhp;
{
struct denode *dep;
struct defid *defhp;
1994-09-19 15:41:57 +00:00
dep = VTODE(vp);
defhp = (struct defid *)fhp;
1994-09-19 15:41:57 +00:00
defhp->defid_len = sizeof(struct defid);
defhp->defid_dirclust = dep->de_dirclust;
defhp->defid_dirofs = dep->de_diroffset;
/* defhp->defid_gen = dep->de_gen; */
return (0);
1994-09-19 15:41:57 +00:00
}
1995-11-07 14:10:19 +00:00
static struct vfsops msdosfs_vfsops = {
1994-09-19 15:41:57 +00:00
msdosfs_mount,
vfs_stdstart,
1994-09-19 15:41:57 +00:00
msdosfs_unmount,
msdosfs_root,
vfs_stdquotactl,
1994-09-19 15:41:57 +00:00
msdosfs_statfs,
msdosfs_sync,
vfs_stdvget,
1994-09-19 15:41:57 +00:00
msdosfs_fhtovp,
msdosfs_checkexp,
1994-09-19 15:41:57 +00:00
msdosfs_vptofh,
msdosfs_init,
msdosfs_uninit,
vfs_stdextattrctl,
1994-09-19 15:41:57 +00:00
};
VFS_SET(msdosfs_vfsops, msdos, 0);