freebsd-nq/sys/fs/msdosfs/msdosfs_vfsops.c

844 lines
23 KiB
C
Raw Normal View History

1999-08-28 01:08:13 +00:00
/* $FreeBSD$ */
/* $NetBSD: msdosfs_vfsops.c,v 1.51 1997/11/17 15:36:58 ws Exp $ */
1994-09-19 15:41:57 +00:00
/*-
* Copyright (C) 1994, 1995, 1997 Wolfgang Solfrank.
* Copyright (C) 1994, 1995, 1997 TooLs GmbH.
1994-09-19 15:41:57 +00:00
* All rights reserved.
* Original code by Paul Popelka (paulp@uts.amdahl.com) (see below).
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by TooLs GmbH.
* 4. The name of TooLs GmbH may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* Written by Paul Popelka (paulp@uts.amdahl.com)
1995-05-30 08:16:23 +00:00
*
1994-09-19 15:41:57 +00:00
* You can do anything you want with this software, just don't say you wrote
* it, and don't remove this notice.
1995-05-30 08:16:23 +00:00
*
1994-09-19 15:41:57 +00:00
* This software is provided "as is".
1995-05-30 08:16:23 +00:00
*
1994-09-19 15:41:57 +00:00
* The author supplies this software to be publicly redistributed on the
* understanding that the author is not responsible for the correct
* functioning of this software in any circumstances and is not liable for
* any damages caused by this software.
1995-05-30 08:16:23 +00:00
*
1994-09-19 15:41:57 +00:00
* October 1992
*/
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/conf.h>
1994-09-19 15:41:57 +00:00
#include <sys/namei.h>
#include <sys/proc.h>
#include <sys/kernel.h>
#include <sys/vnode.h>
#include <sys/mount.h>
#include <sys/bio.h>
1994-09-19 15:41:57 +00:00
#include <sys/buf.h>
#include <sys/fcntl.h>
1994-09-19 15:41:57 +00:00
#include <sys/malloc.h>
#include <sys/stat.h> /* defines ALLPERMS */
#include <sys/mutex.h>
#include <fs/msdosfs/bpb.h>
#include <fs/msdosfs/bootsect.h>
#include <fs/msdosfs/direntry.h>
#include <fs/msdosfs/denode.h>
#include <fs/msdosfs/msdosfsmount.h>
#include <fs/msdosfs/fat.h>
1994-09-19 15:41:57 +00:00
#define MSDOSFS_DFLTBSIZE 4096
#if 1 /*def PC98*/
/*
* XXX - The boot signature formatted by NEC PC-98 DOS looks like a
* garbage or a random value :-{
* If you want to use that broken-signatured media, define the
* following symbol even though PC/AT.
* (ex. mount PC-98 DOS formatted FD on PC/AT)
*/
#define MSDOSFS_NOCHECKSIG
#endif
MALLOC_DEFINE(M_MSDOSFSMNT, "MSDOSFS mount", "MSDOSFS mount structure");
static MALLOC_DEFINE(M_MSDOSFSFAT, "MSDOSFS FAT", "MSDOSFS file allocation table");
2002-03-19 22:20:14 +00:00
static int update_mp(struct mount *mp, struct msdosfs_args *argp);
static int mountmsdosfs(struct vnode *devvp, struct mount *mp,
struct thread *td, struct msdosfs_args *argp);
static vfs_fhtovp_t msdosfs_fhtovp;
static vfs_mount_t msdosfs_mount;
static vfs_root_t msdosfs_root;
static vfs_statfs_t msdosfs_statfs;
static vfs_sync_t msdosfs_sync;
static vfs_unmount_t msdosfs_unmount;
static vfs_vptofh_t msdosfs_vptofh;
static int
update_mp(mp, argp)
struct mount *mp;
struct msdosfs_args *argp;
{
struct msdosfsmount *pmp = VFSTOMSDOSFS(mp);
int error;
pmp->pm_gid = argp->gid;
pmp->pm_uid = argp->uid;
pmp->pm_mask = argp->mask & ALLPERMS;
pmp->pm_flags |= argp->flags & MSDOSFSMNT_MNTOPT;
if (pmp->pm_flags & MSDOSFSMNT_U2WTABLE) {
bcopy(argp->u2w, pmp->pm_u2w, sizeof(pmp->pm_u2w));
bcopy(argp->d2u, pmp->pm_d2u, sizeof(pmp->pm_d2u));
bcopy(argp->u2d, pmp->pm_u2d, sizeof(pmp->pm_u2d));
}
if (pmp->pm_flags & MSDOSFSMNT_ULTABLE) {
bcopy(argp->ul, pmp->pm_ul, sizeof(pmp->pm_ul));
bcopy(argp->lu, pmp->pm_lu, sizeof(pmp->pm_lu));
}
if (pmp->pm_flags & MSDOSFSMNT_NOWIN95)
pmp->pm_flags |= MSDOSFSMNT_SHORTNAME;
else if (!(pmp->pm_flags &
(MSDOSFSMNT_SHORTNAME | MSDOSFSMNT_LONGNAME))) {
struct vnode *rootvp;
/*
* Try to divine whether to support Win'95 long filenames
*/
if (FAT32(pmp))
pmp->pm_flags |= MSDOSFSMNT_LONGNAME;
else {
if ((error = msdosfs_root(mp, &rootvp)) != 0)
return error;
pmp->pm_flags |= findwin95(VTODE(rootvp))
? MSDOSFSMNT_LONGNAME
: MSDOSFSMNT_SHORTNAME;
vput(rootvp);
}
}
return 0;
}
1994-09-19 15:41:57 +00:00
/*
1995-05-30 08:16:23 +00:00
* mp - path - addr in user space of mount point (ie /usr or whatever)
1994-09-19 15:41:57 +00:00
* data - addr in user space of mount params including the name of the block
1995-05-30 08:16:23 +00:00
* special file to treat as a filesystem.
1994-09-19 15:41:57 +00:00
*/
1995-11-07 14:06:45 +00:00
static int
msdosfs_mount(mp, path, data, ndp, td)
1994-09-19 15:41:57 +00:00
struct mount *mp;
char *path;
caddr_t data;
struct nameidata *ndp;
struct thread *td;
1994-09-19 15:41:57 +00:00
{
struct vnode *devvp; /* vnode for blk device to mount */
struct msdosfs_args args; /* will hold data from mount request */
/* msdosfs specific mount control block */
struct msdosfsmount *pmp = NULL;
size_t size;
1994-09-19 15:41:57 +00:00
int error, flags;
mode_t accessmode;
1994-09-19 15:41:57 +00:00
error = copyin(data, (caddr_t)&args, sizeof(struct msdosfs_args));
if (error)
return (error);
if (args.magic != MSDOSFS_ARGSMAGIC)
args.flags = 0;
1994-09-19 15:41:57 +00:00
/*
* If updating, check whether changing from read-only to
* read/write; if there is no device name, that's all we do.
1994-09-19 15:41:57 +00:00
*/
if (mp->mnt_flag & MNT_UPDATE) {
pmp = VFSTOMSDOSFS(mp);
1994-09-19 15:41:57 +00:00
error = 0;
if (!(pmp->pm_flags & MSDOSFSMNT_RONLY) && (mp->mnt_flag & MNT_RDONLY)) {
1994-09-19 15:41:57 +00:00
flags = WRITECLOSE;
if (mp->mnt_flag & MNT_FORCE)
flags |= FORCECLOSE;
error = vflush(mp, 0, flags);
1994-09-19 15:41:57 +00:00
}
if (!error && (mp->mnt_flag & MNT_RELOAD))
/* not yet implemented */
error = EOPNOTSUPP;
1994-09-19 15:41:57 +00:00
if (error)
return (error);
if ((pmp->pm_flags & MSDOSFSMNT_RONLY) && (mp->mnt_kern_flag & MNTK_WANTRDWR)) {
/*
* If upgrade to read-write by non-root, then verify
* that user has necessary permissions on the device.
*/
if (suser(td)) {
devvp = pmp->pm_devvp;
vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, td);
error = VOP_ACCESS(devvp, VREAD | VWRITE,
td->td_ucred, td);
if (error) {
VOP_UNLOCK(devvp, 0, td);
return (error);
}
VOP_UNLOCK(devvp, 0, td);
}
pmp->pm_flags &= ~MSDOSFSMNT_RONLY;
}
1994-09-19 15:41:57 +00:00
if (args.fspec == 0) {
#ifdef __notyet__ /* doesn't work correctly with current mountd XXX */
if (args.flags & MSDOSFSMNT_MNTOPT) {
pmp->pm_flags &= ~MSDOSFSMNT_MNTOPT;
pmp->pm_flags |= args.flags & MSDOSFSMNT_MNTOPT;
if (pmp->pm_flags & MSDOSFSMNT_NOWIN95)
pmp->pm_flags |= MSDOSFSMNT_SHORTNAME;
}
#endif
1994-09-19 15:41:57 +00:00
/*
* Process export requests.
*/
return (vfs_export(mp, &args.export));
1994-09-19 15:41:57 +00:00
}
}
/*
* Not an update, or updating the name: look up the name
* and verify that it refers to a sensible block device.
1994-09-19 15:41:57 +00:00
*/
NDINIT(ndp, LOOKUP, FOLLOW, UIO_USERSPACE, args.fspec, td);
1994-09-19 15:41:57 +00:00
error = namei(ndp);
if (error)
return (error);
1994-09-19 15:41:57 +00:00
devvp = ndp->ni_vp;
NDFREE(ndp, NDF_ONLY_PNBUF);
if (!vn_isdisk(devvp, &error)) {
1994-09-19 15:41:57 +00:00
vrele(devvp);
return (error);
1994-09-19 15:41:57 +00:00
}
/*
* If mount by non-root, then verify that user has necessary
* permissions on the device.
1994-09-19 15:41:57 +00:00
*/
if (suser(td)) {
accessmode = VREAD;
if ((mp->mnt_flag & MNT_RDONLY) == 0)
accessmode |= VWRITE;
vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, td);
error = VOP_ACCESS(devvp, accessmode, td->td_ucred, td);
if (error) {
vput(devvp);
return (error);
}
VOP_UNLOCK(devvp, 0, td);
}
if ((mp->mnt_flag & MNT_UPDATE) == 0) {
error = mountmsdosfs(devvp, mp, td, &args);
#ifdef MSDOSFS_DEBUG /* only needed for the printf below */
pmp = VFSTOMSDOSFS(mp);
#endif
} else {
1994-09-19 15:41:57 +00:00
if (devvp != pmp->pm_devvp)
error = EINVAL; /* XXX needs translation */
1994-09-19 15:41:57 +00:00
else
vrele(devvp);
}
if (error) {
vrele(devvp);
return (error);
}
error = update_mp(mp, &args);
if (error) {
msdosfs_unmount(mp, MNT_FORCE, td);
1994-09-19 15:41:57 +00:00
return error;
}
(void) copyinstr(args.fspec, mp->mnt_stat.f_mntfromname, MNAMELEN - 1,
&size);
bzero(mp->mnt_stat.f_mntfromname + size, MNAMELEN - size);
(void) msdosfs_statfs(mp, &mp->mnt_stat, td);
1994-09-19 15:41:57 +00:00
#ifdef MSDOSFS_DEBUG
printf("msdosfs_mount(): mp %p, pmp %p, inusemap %p\n", mp, pmp, pmp->pm_inusemap);
1994-09-19 15:41:57 +00:00
#endif
return (0);
1994-09-19 15:41:57 +00:00
}
1995-11-07 14:06:45 +00:00
static int
mountmsdosfs(devvp, mp, td, argp)
1994-09-19 15:41:57 +00:00
struct vnode *devvp;
struct mount *mp;
struct thread *td;
struct msdosfs_args *argp;
1994-09-19 15:41:57 +00:00
{
struct msdosfsmount *pmp;
struct buf *bp;
1994-09-19 15:41:57 +00:00
dev_t dev = devvp->v_rdev;
union bootsector *bsp;
struct byte_bpb33 *b33;
struct byte_bpb50 *b50;
struct byte_bpb710 *b710;
u_int8_t SecPerClust;
u_long clusters;
int ronly, error;
1994-09-19 15:41:57 +00:00
/*
* Disallow multiple mounts of the same device.
* Disallow mounting of a device that is currently in use
* (except for root, which might share swap device for miniroot).
* Flush out any old buffers remaining from a previous use.
1994-09-19 15:41:57 +00:00
*/
error = vfs_mountedon(devvp);
if (error)
return (error);
if (vcount(devvp) > 1 && devvp != rootvp)
return (EBUSY);
vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, td);
error = vinvalbuf(devvp, V_SAVE, td->td_ucred, td, 0, 0);
VOP_UNLOCK(devvp, 0, td);
if (error)
return (error);
1994-09-19 15:41:57 +00:00
ronly = (mp->mnt_flag & MNT_RDONLY) != 0;
vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, td);
error = VOP_OPEN(devvp, ronly ? FREAD : FREAD|FWRITE, FSCRED, td);
VOP_UNLOCK(devvp, 0, td);
if (error)
return (error);
bp = NULL; /* both used in error_exit */
pmp = NULL;
1994-09-19 15:41:57 +00:00
/*
* Read the boot sector of the filesystem, and then check the
* boot signature. If not a dos boot sector then error out.
*
* NOTE: 2048 is a maximum sector size in current...
1994-09-19 15:41:57 +00:00
*/
error = bread(devvp, 0, 2048, NOCRED, &bp);
if (error)
1994-09-19 15:41:57 +00:00
goto error_exit;
bp->b_flags |= B_AGE;
bsp = (union bootsector *)bp->b_data;
b33 = (struct byte_bpb33 *)bsp->bs33.bsBPB;
b50 = (struct byte_bpb50 *)bsp->bs50.bsBPB;
b710 = (struct byte_bpb710 *)bsp->bs710.bsPBP;
#ifndef MSDOSFS_NOCHECKSIG
if (bsp->bs50.bsBootSectSig0 != BOOTSIG0
|| bsp->bs50.bsBootSectSig1 != BOOTSIG1) {
error = EINVAL;
goto error_exit;
}
#endif
1994-09-19 15:41:57 +00:00
pmp = malloc(sizeof *pmp, M_MSDOSFSMNT, M_WAITOK | M_ZERO);
1994-09-19 15:41:57 +00:00
pmp->pm_mountp = mp;
/*
* Compute several useful quantities from the bpb in the
* bootsector. Copy in the dos 5 variant of the bpb then fix up
* the fields that are different between dos 5 and dos 3.3.
*/
SecPerClust = b50->bpbSecPerClust;
1994-09-19 15:41:57 +00:00
pmp->pm_BytesPerSec = getushort(b50->bpbBytesPerSec);
pmp->pm_ResSectors = getushort(b50->bpbResSectors);
pmp->pm_FATs = b50->bpbFATs;
pmp->pm_RootDirEnts = getushort(b50->bpbRootDirEnts);
pmp->pm_Sectors = getushort(b50->bpbSectors);
pmp->pm_FATsecs = getushort(b50->bpbFATsecs);
pmp->pm_SecPerTrack = getushort(b50->bpbSecPerTrack);
pmp->pm_Heads = getushort(b50->bpbHeads);
pmp->pm_Media = b50->bpbMedia;
1994-09-19 15:41:57 +00:00
/* calculate the ratio of sector size to DEV_BSIZE */
pmp->pm_BlkPerSec = pmp->pm_BytesPerSec / DEV_BSIZE;
/* XXX - We should probably check more values here */
if (!pmp->pm_BytesPerSec || !SecPerClust
|| !pmp->pm_Heads || pmp->pm_Heads > 255
#ifdef PC98
|| !pmp->pm_SecPerTrack || pmp->pm_SecPerTrack > 255) {
#else
|| !pmp->pm_SecPerTrack || pmp->pm_SecPerTrack > 63) {
#endif
error = EINVAL;
goto error_exit;
}
1994-09-19 15:41:57 +00:00
if (pmp->pm_Sectors == 0) {
pmp->pm_HiddenSects = getulong(b50->bpbHiddenSecs);
pmp->pm_HugeSectors = getulong(b50->bpbHugeSectors);
} else {
pmp->pm_HiddenSects = getushort(b33->bpbHiddenSecs);
pmp->pm_HugeSectors = pmp->pm_Sectors;
}
if (pmp->pm_HugeSectors > 0xffffffff /
(pmp->pm_BytesPerSec / sizeof(struct direntry)) + 1) {
/*
* We cannot deal currently with this size of disk
* due to fileid limitations (see msdosfs_getattr and
* msdosfs_readdir)
*/
error = EINVAL;
printf("mountmsdosfs(): disk too big, sorry\n");
goto error_exit;
}
if (pmp->pm_RootDirEnts == 0) {
if (bsp->bs710.bsBootSectSig2 != BOOTSIG2
|| bsp->bs710.bsBootSectSig3 != BOOTSIG3
|| pmp->pm_Sectors
|| pmp->pm_FATsecs
|| getushort(b710->bpbFSVers)) {
error = EINVAL;
printf("mountmsdosfs(): bad FAT32 filesystem\n");
goto error_exit;
}
pmp->pm_fatmask = FAT32_MASK;
pmp->pm_fatmult = 4;
pmp->pm_fatdiv = 1;
pmp->pm_FATsecs = getulong(b710->bpbBigFATsecs);
if (getushort(b710->bpbExtFlags) & FATMIRROR)
pmp->pm_curfat = getushort(b710->bpbExtFlags) & FATNUM;
else
pmp->pm_flags |= MSDOSFS_FATMIRROR;
} else
pmp->pm_flags |= MSDOSFS_FATMIRROR;
/*
* Check a few values (could do some more):
* - logical sector size: power of 2, >= block size
* - sectors per cluster: power of 2, >= 1
* - number of sectors: >= 1, <= size of partition
*/
if ( (SecPerClust == 0)
|| (SecPerClust & (SecPerClust - 1))
|| (pmp->pm_BytesPerSec < DEV_BSIZE)
|| (pmp->pm_BytesPerSec & (pmp->pm_BytesPerSec - 1))
|| (pmp->pm_HugeSectors == 0)
) {
error = EINVAL;
goto error_exit;
}
pmp->pm_HugeSectors *= pmp->pm_BlkPerSec;
pmp->pm_HiddenSects *= pmp->pm_BlkPerSec; /* XXX not used? */
pmp->pm_FATsecs *= pmp->pm_BlkPerSec;
SecPerClust *= pmp->pm_BlkPerSec;
pmp->pm_fatblk = pmp->pm_ResSectors * pmp->pm_BlkPerSec;
if (FAT32(pmp)) {
pmp->pm_rootdirblk = getulong(b710->bpbRootClust);
pmp->pm_firstcluster = pmp->pm_fatblk
+ (pmp->pm_FATs * pmp->pm_FATsecs);
pmp->pm_fsinfo = getushort(b710->bpbFSInfo) * pmp->pm_BlkPerSec;
} else {
pmp->pm_rootdirblk = pmp->pm_fatblk +
(pmp->pm_FATs * pmp->pm_FATsecs);
pmp->pm_rootdirsize = (pmp->pm_RootDirEnts * sizeof(struct direntry)
+ DEV_BSIZE - 1)
/ DEV_BSIZE; /* in blocks */
pmp->pm_firstcluster = pmp->pm_rootdirblk + pmp->pm_rootdirsize;
}
pmp->pm_maxcluster = (pmp->pm_HugeSectors - pmp->pm_firstcluster) /
SecPerClust + 1;
pmp->pm_fatsize = pmp->pm_FATsecs * DEV_BSIZE; /* XXX not used? */
if (pmp->pm_fatmask == 0) {
if (pmp->pm_maxcluster
<= ((CLUST_RSRVD - CLUST_FIRST) & FAT12_MASK)) {
/*
* This will usually be a floppy disk. This size makes
* sure that one fat entry will not be split across
* multiple blocks.
*/
pmp->pm_fatmask = FAT12_MASK;
pmp->pm_fatmult = 3;
pmp->pm_fatdiv = 2;
} else {
pmp->pm_fatmask = FAT16_MASK;
pmp->pm_fatmult = 2;
pmp->pm_fatdiv = 1;
}
}
clusters = (pmp->pm_fatsize / pmp->pm_fatmult) * pmp->pm_fatdiv;
if (pmp->pm_maxcluster >= clusters) {
printf("Warning: number of clusters (%ld) exceeds FAT "
"capacity (%ld)\n", pmp->pm_maxcluster + 1, clusters);
pmp->pm_maxcluster = clusters - 1;
}
1994-09-19 15:41:57 +00:00
if (FAT12(pmp))
pmp->pm_fatblocksize = 3 * pmp->pm_BytesPerSec;
else
pmp->pm_fatblocksize = MSDOSFS_DFLTBSIZE;
1994-09-19 15:41:57 +00:00
pmp->pm_fatblocksec = pmp->pm_fatblocksize / DEV_BSIZE;
pmp->pm_bnshift = ffs(DEV_BSIZE) - 1;
1994-09-19 15:41:57 +00:00
/*
* Compute mask and shift value for isolating cluster relative byte
* offsets and cluster numbers from a file offset.
*/
pmp->pm_bpcluster = SecPerClust * DEV_BSIZE;
pmp->pm_crbomask = pmp->pm_bpcluster - 1;
pmp->pm_cnshift = ffs(pmp->pm_bpcluster) - 1;
/*
* Check for valid cluster size
* must be a power of 2
*/
if (pmp->pm_bpcluster ^ (1 << pmp->pm_cnshift)) {
1994-09-19 15:41:57 +00:00
error = EINVAL;
goto error_exit;
}
/*
* Release the bootsector buffer.
*/
brelse(bp);
bp = NULL;
/*
* Check FSInfo.
*/
if (pmp->pm_fsinfo) {
struct fsinfo *fp;
if ((error = bread(devvp, pmp->pm_fsinfo, fsi_size(pmp),
NOCRED, &bp)) != 0)
goto error_exit;
fp = (struct fsinfo *)bp->b_data;
if (!bcmp(fp->fsisig1, "RRaA", 4)
&& !bcmp(fp->fsisig2, "rrAa", 4)
&& !bcmp(fp->fsisig3, "\0\0\125\252", 4)
&& !bcmp(fp->fsisig4, "\0\0\125\252", 4))
pmp->pm_nxtfree = getulong(fp->fsinxtfree);
else
pmp->pm_fsinfo = 0;
brelse(bp);
bp = NULL;
}
1994-09-19 15:41:57 +00:00
/*
* Check and validate (or perhaps invalidate?) the fsinfo structure?
1994-09-19 15:41:57 +00:00
*/
if (pmp->pm_fsinfo && pmp->pm_nxtfree > pmp->pm_maxcluster) {
printf(
"Next free cluster in FSInfo (%lu) exceeds maxcluster (%lu)\n",
pmp->pm_nxtfree, pmp->pm_maxcluster);
error = EINVAL;
goto error_exit;
}
1994-09-19 15:41:57 +00:00
/*
* Allocate memory for the bitmap of allocated clusters, and then
* fill it in.
*/
pmp->pm_inusemap = malloc(((pmp->pm_maxcluster + N_INUSEBITS - 1)
/ N_INUSEBITS)
* sizeof(*pmp->pm_inusemap),
M_MSDOSFSFAT, M_WAITOK);
/*
* fillinusemap() needs pm_devvp.
*/
pmp->pm_dev = dev;
pmp->pm_devvp = devvp;
/*
* Have the inuse map filled in.
*/
if ((error = fillinusemap(pmp)) != 0)
1994-09-19 15:41:57 +00:00
goto error_exit;
/*
* If they want fat updates to be synchronous then let them suffer
* the performance degradation in exchange for the on disk copy of
* the fat being correct just about all the time. I suppose this
* would be a good thing to turn on if the kernel is still flakey.
*/
if (mp->mnt_flag & MNT_SYNCHRONOUS)
pmp->pm_flags |= MSDOSFSMNT_WAITONFAT;
1994-09-19 15:41:57 +00:00
/*
* Finish up.
*/
if (ronly)
pmp->pm_flags |= MSDOSFSMNT_RONLY;
else
1994-09-19 15:41:57 +00:00
pmp->pm_fmod = 1;
mp->mnt_data = (qaddr_t) pmp;
mp->mnt_stat.f_fsid.val[0] = dev2udev(dev);
mp->mnt_stat.f_fsid.val[1] = mp->mnt_vfc->vfc_typenum;
mp->mnt_flag |= MNT_LOCAL;
devvp->v_rdev->si_mountpoint = mp;
1994-09-19 15:41:57 +00:00
return 0;
error_exit:
if (bp)
brelse(bp);
(void) VOP_CLOSE(devvp, ronly ? FREAD : FREAD | FWRITE, NOCRED, td);
1994-09-19 15:41:57 +00:00
if (pmp) {
if (pmp->pm_inusemap)
free(pmp->pm_inusemap, M_MSDOSFSFAT);
free(pmp, M_MSDOSFSMNT);
mp->mnt_data = (qaddr_t)0;
1994-09-19 15:41:57 +00:00
}
return (error);
1994-09-19 15:41:57 +00:00
}
/*
* Unmount the filesystem described by mp.
*/
1995-11-07 14:06:45 +00:00
static int
msdosfs_unmount(mp, mntflags, td)
1994-09-19 15:41:57 +00:00
struct mount *mp;
int mntflags;
struct thread *td;
1994-09-19 15:41:57 +00:00
{
struct msdosfsmount *pmp;
int error, flags;
1994-09-19 15:41:57 +00:00
flags = 0;
if (mntflags & MNT_FORCE)
1994-09-19 15:41:57 +00:00
flags |= FORCECLOSE;
error = vflush(mp, 0, flags);
if (error)
1994-09-19 15:41:57 +00:00
return error;
pmp = VFSTOMSDOSFS(mp);
pmp->pm_devvp->v_rdev->si_mountpoint = NULL;
#ifdef MSDOSFS_DEBUG
{
struct vnode *vp = pmp->pm_devvp;
printf("msdosfs_umount(): just before calling VOP_CLOSE()\n");
printf("iflag %08lx, usecount %d, writecount %d, holdcnt %ld\n",
vp->vi_flag, vp->v_usecount, vp->v_writecount,
vp->v_holdcnt);
printf("id %lu, mount %p, op %p\n",
vp->v_id, vp->v_mount, vp->v_op);
printf("freef %p, freeb %p, mount %p\n",
TAILQ_NEXT(vp, v_freelist), vp->v_freelist.tqe_prev,
vp->v_mount);
printf("cleanblkhd %p, dirtyblkhd %p, numoutput %ld, type %d\n",
TAILQ_FIRST(&vp->v_cleanblkhd),
TAILQ_FIRST(&vp->v_dirtyblkhd),
vp->v_numoutput, vp->v_type);
printf("union %p, tag %d, data[0] %08x, data[1] %08x\n",
vp->v_socket, vp->v_tag,
((u_int *)vp->v_data)[0],
((u_int *)vp->v_data)[1]);
}
#endif
error = VOP_CLOSE(pmp->pm_devvp,
(pmp->pm_flags&MSDOSFSMNT_RONLY) ? FREAD : FREAD | FWRITE,
NOCRED, td);
1994-09-19 15:41:57 +00:00
vrele(pmp->pm_devvp);
free(pmp->pm_inusemap, M_MSDOSFSFAT);
free(pmp, M_MSDOSFSMNT);
mp->mnt_data = (qaddr_t)0;
mp->mnt_flag &= ~MNT_LOCAL;
return (error);
1994-09-19 15:41:57 +00:00
}
1995-11-07 14:06:45 +00:00
static int
1994-09-19 15:41:57 +00:00
msdosfs_root(mp, vpp)
struct mount *mp;
struct vnode **vpp;
{
struct msdosfsmount *pmp = VFSTOMSDOSFS(mp);
1994-09-19 15:41:57 +00:00
struct denode *ndep;
int error;
#ifdef MSDOSFS_DEBUG
printf("msdosfs_root(); mp %p, pmp %p\n", mp, pmp);
1994-09-19 15:41:57 +00:00
#endif
error = deget(pmp, MSDOSFSROOT, MSDOSFSROOT_OFS, &ndep);
if (error)
return (error);
*vpp = DETOV(ndep);
return (0);
1994-09-19 15:41:57 +00:00
}
1995-11-07 14:06:45 +00:00
static int
msdosfs_statfs(mp, sbp, td)
1994-09-19 15:41:57 +00:00
struct mount *mp;
struct statfs *sbp;
struct thread *td;
1994-09-19 15:41:57 +00:00
{
struct msdosfsmount *pmp;
1994-09-19 15:41:57 +00:00
pmp = VFSTOMSDOSFS(mp);
1994-09-19 15:41:57 +00:00
sbp->f_bsize = pmp->pm_bpcluster;
sbp->f_iosize = pmp->pm_bpcluster;
sbp->f_blocks = pmp->pm_maxcluster + 1;
1994-09-19 15:41:57 +00:00
sbp->f_bfree = pmp->pm_freeclustercount;
sbp->f_bavail = pmp->pm_freeclustercount;
sbp->f_files = pmp->pm_RootDirEnts; /* XXX */
sbp->f_ffree = 0; /* what to put in here? */
if (sbp != &mp->mnt_stat) {
sbp->f_type = mp->mnt_vfc->vfc_typenum;
bcopy(mp->mnt_stat.f_mntonname, sbp->f_mntonname, MNAMELEN);
bcopy(mp->mnt_stat.f_mntfromname, sbp->f_mntfromname, MNAMELEN);
1994-09-19 15:41:57 +00:00
}
strncpy(sbp->f_fstypename, mp->mnt_vfc->vfc_name, MFSNAMELEN);
return (0);
1994-09-19 15:41:57 +00:00
}
1995-11-07 14:06:45 +00:00
static int
msdosfs_sync(mp, waitfor, cred, td)
1994-09-19 15:41:57 +00:00
struct mount *mp;
int waitfor;
struct ucred *cred;
struct thread *td;
1994-09-19 15:41:57 +00:00
{
struct vnode *vp, *nvp;
1994-09-19 15:41:57 +00:00
struct denode *dep;
struct msdosfsmount *pmp = VFSTOMSDOSFS(mp);
int error, allerror = 0;
1994-09-19 15:41:57 +00:00
/*
* If we ever switch to not updating all of the fats all the time,
* this would be the place to update them from the first one.
*/
if (pmp->pm_fmod != 0) {
if (pmp->pm_flags & MSDOSFSMNT_RONLY)
1994-09-19 15:41:57 +00:00
panic("msdosfs_sync: rofs mod");
else {
/* update fats here */
}
}
1994-09-19 15:41:57 +00:00
/*
* Write back each (modified) denode.
1994-09-19 15:41:57 +00:00
*/
Change and clean the mutex lock interface. mtx_enter(lock, type) becomes: mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks) mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized) similarily, for releasing a lock, we now have: mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN. We change the caller interface for the two different types of locks because the semantics are entirely different for each case, and this makes it explicitly clear and, at the same time, it rids us of the extra `type' argument. The enter->lock and exit->unlock change has been made with the idea that we're "locking data" and not "entering locked code" in mind. Further, remove all additional "flags" previously passed to the lock acquire/release routines with the exception of two: MTX_QUIET and MTX_NOSWITCH The functionality of these flags is preserved and they can be passed to the lock/unlock routines by calling the corresponding wrappers: mtx_{lock, unlock}_flags(lock, flag(s)) and mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN locks, respectively. Re-inline some lock acq/rel code; in the sleep lock case, we only inline the _obtain_lock()s in order to ensure that the inlined code fits into a cache line. In the spin lock case, we inline recursion and actually only perform a function call if we need to spin. This change has been made with the idea that we generally tend to avoid spin locks and that also the spin locks that we do have and are heavily used (i.e. sched_lock) do recurse, and therefore in an effort to reduce function call overhead for some architectures (such as alpha), we inline recursion for this case. Create a new malloc type for the witness code and retire from using the M_DEV type. The new type is called M_WITNESS and is only declared if WITNESS is enabled. Begin cleaning up some machdep/mutex.h code - specifically updated the "optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently need those. Finally, caught up to the interface changes in all sys code. Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
mtx_lock(&mntvnode_mtx);
1994-09-19 15:41:57 +00:00
loop:
for (vp = TAILQ_FIRST(&mp->mnt_nvnodelist); vp != NULL; vp = nvp) {
/*
* If the vnode that we are about to sync is no longer
* associated with this mount point, start over.
*/
if (vp->v_mount != mp)
1994-09-19 15:41:57 +00:00
goto loop;
nvp = TAILQ_NEXT(vp, v_nmntvnodes);
mtx_unlock(&mntvnode_mtx);
Change and clean the mutex lock interface. mtx_enter(lock, type) becomes: mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks) mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized) similarily, for releasing a lock, we now have: mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN. We change the caller interface for the two different types of locks because the semantics are entirely different for each case, and this makes it explicitly clear and, at the same time, it rids us of the extra `type' argument. The enter->lock and exit->unlock change has been made with the idea that we're "locking data" and not "entering locked code" in mind. Further, remove all additional "flags" previously passed to the lock acquire/release routines with the exception of two: MTX_QUIET and MTX_NOSWITCH The functionality of these flags is preserved and they can be passed to the lock/unlock routines by calling the corresponding wrappers: mtx_{lock, unlock}_flags(lock, flag(s)) and mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN locks, respectively. Re-inline some lock acq/rel code; in the sleep lock case, we only inline the _obtain_lock()s in order to ensure that the inlined code fits into a cache line. In the spin lock case, we inline recursion and actually only perform a function call if we need to spin. This change has been made with the idea that we generally tend to avoid spin locks and that also the spin locks that we do have and are heavily used (i.e. sched_lock) do recurse, and therefore in an effort to reduce function call overhead for some architectures (such as alpha), we inline recursion for this case. Create a new malloc type for the witness code and retire from using the M_DEV type. The new type is called M_WITNESS and is only declared if WITNESS is enabled. Begin cleaning up some machdep/mutex.h code - specifically updated the "optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently need those. Finally, caught up to the interface changes in all sys code. Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
mtx_lock(&vp->v_interlock);
1994-09-19 15:41:57 +00:00
dep = VTODE(vp);
if (vp->v_type == VNON ||
((dep->de_flag &
(DE_ACCESS | DE_CREATE | DE_UPDATE | DE_MODIFIED)) == 0 &&
(TAILQ_EMPTY(&vp->v_dirtyblkhd) || waitfor == MNT_LAZY))) {
Change and clean the mutex lock interface. mtx_enter(lock, type) becomes: mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks) mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized) similarily, for releasing a lock, we now have: mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN. We change the caller interface for the two different types of locks because the semantics are entirely different for each case, and this makes it explicitly clear and, at the same time, it rids us of the extra `type' argument. The enter->lock and exit->unlock change has been made with the idea that we're "locking data" and not "entering locked code" in mind. Further, remove all additional "flags" previously passed to the lock acquire/release routines with the exception of two: MTX_QUIET and MTX_NOSWITCH The functionality of these flags is preserved and they can be passed to the lock/unlock routines by calling the corresponding wrappers: mtx_{lock, unlock}_flags(lock, flag(s)) and mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN locks, respectively. Re-inline some lock acq/rel code; in the sleep lock case, we only inline the _obtain_lock()s in order to ensure that the inlined code fits into a cache line. In the spin lock case, we inline recursion and actually only perform a function call if we need to spin. This change has been made with the idea that we generally tend to avoid spin locks and that also the spin locks that we do have and are heavily used (i.e. sched_lock) do recurse, and therefore in an effort to reduce function call overhead for some architectures (such as alpha), we inline recursion for this case. Create a new malloc type for the witness code and retire from using the M_DEV type. The new type is called M_WITNESS and is only declared if WITNESS is enabled. Begin cleaning up some machdep/mutex.h code - specifically updated the "optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently need those. Finally, caught up to the interface changes in all sys code. Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
mtx_unlock(&vp->v_interlock);
mtx_lock(&mntvnode_mtx);
1994-09-19 15:41:57 +00:00
continue;
}
error = vget(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK, td);
if (error) {
Change and clean the mutex lock interface. mtx_enter(lock, type) becomes: mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks) mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized) similarily, for releasing a lock, we now have: mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN. We change the caller interface for the two different types of locks because the semantics are entirely different for each case, and this makes it explicitly clear and, at the same time, it rids us of the extra `type' argument. The enter->lock and exit->unlock change has been made with the idea that we're "locking data" and not "entering locked code" in mind. Further, remove all additional "flags" previously passed to the lock acquire/release routines with the exception of two: MTX_QUIET and MTX_NOSWITCH The functionality of these flags is preserved and they can be passed to the lock/unlock routines by calling the corresponding wrappers: mtx_{lock, unlock}_flags(lock, flag(s)) and mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN locks, respectively. Re-inline some lock acq/rel code; in the sleep lock case, we only inline the _obtain_lock()s in order to ensure that the inlined code fits into a cache line. In the spin lock case, we inline recursion and actually only perform a function call if we need to spin. This change has been made with the idea that we generally tend to avoid spin locks and that also the spin locks that we do have and are heavily used (i.e. sched_lock) do recurse, and therefore in an effort to reduce function call overhead for some architectures (such as alpha), we inline recursion for this case. Create a new malloc type for the witness code and retire from using the M_DEV type. The new type is called M_WITNESS and is only declared if WITNESS is enabled. Begin cleaning up some machdep/mutex.h code - specifically updated the "optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently need those. Finally, caught up to the interface changes in all sys code. Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
mtx_lock(&mntvnode_mtx);
if (error == ENOENT)
goto loop;
continue;
}
error = VOP_FSYNC(vp, cred, waitfor, td);
if (error)
1994-09-19 15:41:57 +00:00
allerror = error;
VOP_UNLOCK(vp, 0, td);
vrele(vp);
Change and clean the mutex lock interface. mtx_enter(lock, type) becomes: mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks) mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized) similarily, for releasing a lock, we now have: mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN. We change the caller interface for the two different types of locks because the semantics are entirely different for each case, and this makes it explicitly clear and, at the same time, it rids us of the extra `type' argument. The enter->lock and exit->unlock change has been made with the idea that we're "locking data" and not "entering locked code" in mind. Further, remove all additional "flags" previously passed to the lock acquire/release routines with the exception of two: MTX_QUIET and MTX_NOSWITCH The functionality of these flags is preserved and they can be passed to the lock/unlock routines by calling the corresponding wrappers: mtx_{lock, unlock}_flags(lock, flag(s)) and mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN locks, respectively. Re-inline some lock acq/rel code; in the sleep lock case, we only inline the _obtain_lock()s in order to ensure that the inlined code fits into a cache line. In the spin lock case, we inline recursion and actually only perform a function call if we need to spin. This change has been made with the idea that we generally tend to avoid spin locks and that also the spin locks that we do have and are heavily used (i.e. sched_lock) do recurse, and therefore in an effort to reduce function call overhead for some architectures (such as alpha), we inline recursion for this case. Create a new malloc type for the witness code and retire from using the M_DEV type. The new type is called M_WITNESS and is only declared if WITNESS is enabled. Begin cleaning up some machdep/mutex.h code - specifically updated the "optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently need those. Finally, caught up to the interface changes in all sys code. Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
mtx_lock(&mntvnode_mtx);
1994-09-19 15:41:57 +00:00
}
Change and clean the mutex lock interface. mtx_enter(lock, type) becomes: mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks) mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized) similarily, for releasing a lock, we now have: mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN. We change the caller interface for the two different types of locks because the semantics are entirely different for each case, and this makes it explicitly clear and, at the same time, it rids us of the extra `type' argument. The enter->lock and exit->unlock change has been made with the idea that we're "locking data" and not "entering locked code" in mind. Further, remove all additional "flags" previously passed to the lock acquire/release routines with the exception of two: MTX_QUIET and MTX_NOSWITCH The functionality of these flags is preserved and they can be passed to the lock/unlock routines by calling the corresponding wrappers: mtx_{lock, unlock}_flags(lock, flag(s)) and mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN locks, respectively. Re-inline some lock acq/rel code; in the sleep lock case, we only inline the _obtain_lock()s in order to ensure that the inlined code fits into a cache line. In the spin lock case, we inline recursion and actually only perform a function call if we need to spin. This change has been made with the idea that we generally tend to avoid spin locks and that also the spin locks that we do have and are heavily used (i.e. sched_lock) do recurse, and therefore in an effort to reduce function call overhead for some architectures (such as alpha), we inline recursion for this case. Create a new malloc type for the witness code and retire from using the M_DEV type. The new type is called M_WITNESS and is only declared if WITNESS is enabled. Begin cleaning up some machdep/mutex.h code - specifically updated the "optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently need those. Finally, caught up to the interface changes in all sys code. Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
mtx_unlock(&mntvnode_mtx);
1994-09-19 15:41:57 +00:00
/*
* Flush filesystem control info.
*/
if (waitfor != MNT_LAZY) {
vn_lock(pmp->pm_devvp, LK_EXCLUSIVE | LK_RETRY, td);
error = VOP_FSYNC(pmp->pm_devvp, cred, waitfor, td);
if (error)
allerror = error;
VOP_UNLOCK(pmp->pm_devvp, 0, td);
}
return (allerror);
1994-09-19 15:41:57 +00:00
}
1995-11-07 14:06:45 +00:00
static int
msdosfs_fhtovp(mp, fhp, vpp)
1994-09-19 15:41:57 +00:00
struct mount *mp;
struct fid *fhp;
struct vnode **vpp;
{
struct msdosfsmount *pmp = VFSTOMSDOSFS(mp);
1994-09-19 15:41:57 +00:00
struct defid *defhp = (struct defid *) fhp;
struct denode *dep;
int error;
error = deget(pmp, defhp->defid_dirclust, defhp->defid_dirofs, &dep);
1994-09-19 15:41:57 +00:00
if (error) {
*vpp = NULLVP;
return (error);
1994-09-19 15:41:57 +00:00
}
*vpp = DETOV(dep);
return (0);
}
1995-11-07 14:06:45 +00:00
static int
1994-09-19 15:41:57 +00:00
msdosfs_vptofh(vp, fhp)
struct vnode *vp;
struct fid *fhp;
{
struct denode *dep;
struct defid *defhp;
1994-09-19 15:41:57 +00:00
dep = VTODE(vp);
defhp = (struct defid *)fhp;
1994-09-19 15:41:57 +00:00
defhp->defid_len = sizeof(struct defid);
defhp->defid_dirclust = dep->de_dirclust;
defhp->defid_dirofs = dep->de_diroffset;
/* defhp->defid_gen = dep->de_gen; */
return (0);
1994-09-19 15:41:57 +00:00
}
1995-11-07 14:10:19 +00:00
static struct vfsops msdosfs_vfsops = {
1994-09-19 15:41:57 +00:00
msdosfs_mount,
vfs_stdstart,
1994-09-19 15:41:57 +00:00
msdosfs_unmount,
msdosfs_root,
vfs_stdquotactl,
1994-09-19 15:41:57 +00:00
msdosfs_statfs,
msdosfs_sync,
vfs_stdvget,
1994-09-19 15:41:57 +00:00
msdosfs_fhtovp,
vfs_stdcheckexp,
1994-09-19 15:41:57 +00:00
msdosfs_vptofh,
msdosfs_init,
msdosfs_uninit,
vfs_stdextattrctl,
1994-09-19 15:41:57 +00:00
};
VFS_SET(msdosfs_vfsops, msdosfs, 0);