freebsd-dev/sys/fs/msdosfs/msdosfs_denode.c

711 lines
19 KiB
C
Raw Normal View History

1999-08-28 01:08:13 +00:00
/* $FreeBSD$ */
/* $NetBSD: msdosfs_denode.c,v 1.28 1998/02/10 14:10:00 mrg Exp $ */
1994-09-19 15:41:57 +00:00
/*-
* Copyright (C) 1994, 1995, 1997 Wolfgang Solfrank.
* Copyright (C) 1994, 1995, 1997 TooLs GmbH.
1994-09-19 15:41:57 +00:00
* All rights reserved.
* Original code by Paul Popelka (paulp@uts.amdahl.com) (see below).
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by TooLs GmbH.
* 4. The name of TooLs GmbH may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*-
1994-09-19 15:41:57 +00:00
* Written by Paul Popelka (paulp@uts.amdahl.com)
1995-05-30 08:16:23 +00:00
*
1994-09-19 15:41:57 +00:00
* You can do anything you want with this software, just don't say you wrote
* it, and don't remove this notice.
1995-05-30 08:16:23 +00:00
*
1994-09-19 15:41:57 +00:00
* This software is provided "as is".
1995-05-30 08:16:23 +00:00
*
1994-09-19 15:41:57 +00:00
* The author supplies this software to be publicly redistributed on the
* understanding that the author is not responsible for the correct
* functioning of this software in any circumstances and is not liable for
* any damages caused by this software.
1995-05-30 08:16:23 +00:00
*
1994-09-19 15:41:57 +00:00
* October 1992
*/
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/kernel.h>
1994-09-19 15:41:57 +00:00
#include <sys/mount.h>
#include <sys/malloc.h>
#include <sys/bio.h>
1994-09-19 15:41:57 +00:00
#include <sys/buf.h>
#include <sys/vnode.h>
#include <sys/mutex.h>
1994-09-19 15:41:57 +00:00
#include <vm/vm.h>
#include <vm/vm_extern.h>
#include <fs/msdosfs/bpb.h>
#include <fs/msdosfs/msdosfsmount.h>
#include <fs/msdosfs/direntry.h>
#include <fs/msdosfs/denode.h>
#include <fs/msdosfs/fat.h>
1994-09-19 15:41:57 +00:00
static MALLOC_DEFINE(M_MSDOSFSNODE, "MSDOSFS node", "MSDOSFS vnode private part");
1998-02-09 06:11:36 +00:00
static struct denode **dehashtbl;
static u_long dehash; /* size of hash table - 1 */
Divorce "dev_t" from the "major|minor" bitmap, which is now called udev_t in the kernel but still called dev_t in userland. Provide functions to manipulate both types: major() umajor() minor() uminor() makedev() umakedev() dev2udev() udev2dev() For now they're functions, they will become in-line functions after one of the next two steps in this process. Return major/minor/makedev to macro-hood for userland. Register a name in cdevsw[] for the "filedescriptor" driver. In the kernel the udev_t appears in places where we have the major/minor number combination, (ie: a potential device: we may not have the driver nor the device), like in inodes, vattr, cdevsw registration and so on, whereas the dev_t appears where we carry around a reference to a actual device. In the future the cdevsw and the aliased-from vnode will be hung directly from the dev_t, along with up to two softc pointers for the device driver and a few houskeeping bits. This will essentially replace the current "alias" check code (same buck, bigger bang). A little stunt has been provided to try to catch places where the wrong type is being used (dev_t vs udev_t), if you see something not working, #undef DEVT_FASCIST in kern/kern_conf.c and see if it makes a difference. If it does, please try to track it down (many hands make light work) or at least try to reproduce it as simply as possible, and describe how to do that. Without DEVT_FASCIST I belive this patch is a no-op. Stylistic/posixoid comments about the userland view of the <sys/*.h> files welcome now, from userland they now contain the end result. Next planned step: make all dev_t's refer to the same devsw[] which means convert BLK's to CHR's at the perimeter of the vnodes and other places where they enter the game (bootdev, mknod, sysctl).
1999-05-11 19:55:07 +00:00
#define DEHASH(dev, dcl, doff) (dehashtbl[(minor(dev) + (dcl) + (doff) / \
sizeof(struct direntry)) & dehash])
static struct mtx dehash_mtx;
static int dehash_init;
1994-09-19 15:41:57 +00:00
1995-12-03 16:42:02 +00:00
static struct denode *
msdosfs_hashget(struct cdev *dev, u_long dirclust, u_long diroff);
2002-03-19 22:20:14 +00:00
static void msdosfs_hashins(struct denode *dep);
static void msdosfs_hashrem(struct denode *dep);
1995-12-03 16:42:02 +00:00
/*ARGSUSED*/
int
msdosfs_init(vfsp)
struct vfsconf *vfsp;
1994-09-19 15:41:57 +00:00
{
/*
* The following lines prevent us from initializing the mutex
* init multiple times. I'm not sure why we get called multiple
* times, but the following prevents the panic when we initalize
* the mutext the second time. XXX BAD XXX
*/
if (dehash_init) {
printf("Warning: msdosfs_init called more than once!\n");
return (0);
}
dehash_init++;
1994-09-19 15:41:57 +00:00
dehashtbl = hashinit(desiredvnodes/2, M_MSDOSFSMNT, &dehash);
mtx_init(&dehash_mtx, "msdosfs dehash", NULL, MTX_DEF);
return (0);
1994-09-19 15:41:57 +00:00
}
int
msdosfs_uninit(vfsp)
struct vfsconf *vfsp;
{
if (dehashtbl)
free(dehashtbl, M_MSDOSFSMNT);
mtx_destroy(&dehash_mtx);
dehash_init--;
return (0);
}
1994-09-19 15:41:57 +00:00
static struct denode *
msdosfs_hashget(dev, dirclust, diroff)
struct cdev *dev;
1994-09-19 15:41:57 +00:00
u_long dirclust;
u_long diroff;
{
struct thread *td = curthread; /* XXX */
1994-09-19 15:41:57 +00:00
struct denode *dep;
struct vnode *vp;
loop:
Change and clean the mutex lock interface. mtx_enter(lock, type) becomes: mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks) mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized) similarily, for releasing a lock, we now have: mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN. We change the caller interface for the two different types of locks because the semantics are entirely different for each case, and this makes it explicitly clear and, at the same time, it rids us of the extra `type' argument. The enter->lock and exit->unlock change has been made with the idea that we're "locking data" and not "entering locked code" in mind. Further, remove all additional "flags" previously passed to the lock acquire/release routines with the exception of two: MTX_QUIET and MTX_NOSWITCH The functionality of these flags is preserved and they can be passed to the lock/unlock routines by calling the corresponding wrappers: mtx_{lock, unlock}_flags(lock, flag(s)) and mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN locks, respectively. Re-inline some lock acq/rel code; in the sleep lock case, we only inline the _obtain_lock()s in order to ensure that the inlined code fits into a cache line. In the spin lock case, we inline recursion and actually only perform a function call if we need to spin. This change has been made with the idea that we generally tend to avoid spin locks and that also the spin locks that we do have and are heavily used (i.e. sched_lock) do recurse, and therefore in an effort to reduce function call overhead for some architectures (such as alpha), we inline recursion for this case. Create a new malloc type for the witness code and retire from using the M_DEV type. The new type is called M_WITNESS and is only declared if WITNESS is enabled. Begin cleaning up some machdep/mutex.h code - specifically updated the "optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently need those. Finally, caught up to the interface changes in all sys code. Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
mtx_lock(&dehash_mtx);
for (dep = DEHASH(dev, dirclust, diroff); dep; dep = dep->de_next) {
if (dirclust == dep->de_dirclust
&& diroff == dep->de_diroffset
&& dev == dep->de_dev
&& dep->de_refcnt != 0) {
vp = DETOV(dep);
Change and clean the mutex lock interface. mtx_enter(lock, type) becomes: mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks) mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized) similarily, for releasing a lock, we now have: mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN. We change the caller interface for the two different types of locks because the semantics are entirely different for each case, and this makes it explicitly clear and, at the same time, it rids us of the extra `type' argument. The enter->lock and exit->unlock change has been made with the idea that we're "locking data" and not "entering locked code" in mind. Further, remove all additional "flags" previously passed to the lock acquire/release routines with the exception of two: MTX_QUIET and MTX_NOSWITCH The functionality of these flags is preserved and they can be passed to the lock/unlock routines by calling the corresponding wrappers: mtx_{lock, unlock}_flags(lock, flag(s)) and mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN locks, respectively. Re-inline some lock acq/rel code; in the sleep lock case, we only inline the _obtain_lock()s in order to ensure that the inlined code fits into a cache line. In the spin lock case, we inline recursion and actually only perform a function call if we need to spin. This change has been made with the idea that we generally tend to avoid spin locks and that also the spin locks that we do have and are heavily used (i.e. sched_lock) do recurse, and therefore in an effort to reduce function call overhead for some architectures (such as alpha), we inline recursion for this case. Create a new malloc type for the witness code and retire from using the M_DEV type. The new type is called M_WITNESS and is only declared if WITNESS is enabled. Begin cleaning up some machdep/mutex.h code - specifically updated the "optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently need those. Finally, caught up to the interface changes in all sys code. Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
mtx_lock(&vp->v_interlock);
mtx_unlock(&dehash_mtx);
if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, td))
goto loop;
return (dep);
1994-09-19 15:41:57 +00:00
}
}
Change and clean the mutex lock interface. mtx_enter(lock, type) becomes: mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks) mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized) similarily, for releasing a lock, we now have: mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN. We change the caller interface for the two different types of locks because the semantics are entirely different for each case, and this makes it explicitly clear and, at the same time, it rids us of the extra `type' argument. The enter->lock and exit->unlock change has been made with the idea that we're "locking data" and not "entering locked code" in mind. Further, remove all additional "flags" previously passed to the lock acquire/release routines with the exception of two: MTX_QUIET and MTX_NOSWITCH The functionality of these flags is preserved and they can be passed to the lock/unlock routines by calling the corresponding wrappers: mtx_{lock, unlock}_flags(lock, flag(s)) and mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN locks, respectively. Re-inline some lock acq/rel code; in the sleep lock case, we only inline the _obtain_lock()s in order to ensure that the inlined code fits into a cache line. In the spin lock case, we inline recursion and actually only perform a function call if we need to spin. This change has been made with the idea that we generally tend to avoid spin locks and that also the spin locks that we do have and are heavily used (i.e. sched_lock) do recurse, and therefore in an effort to reduce function call overhead for some architectures (such as alpha), we inline recursion for this case. Create a new malloc type for the witness code and retire from using the M_DEV type. The new type is called M_WITNESS and is only declared if WITNESS is enabled. Begin cleaning up some machdep/mutex.h code - specifically updated the "optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently need those. Finally, caught up to the interface changes in all sys code. Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
mtx_unlock(&dehash_mtx);
return (NULL);
1994-09-19 15:41:57 +00:00
}
static void
msdosfs_hashins(dep)
struct denode *dep;
{
struct denode **depp, *deq;
1995-05-30 08:16:23 +00:00
Change and clean the mutex lock interface. mtx_enter(lock, type) becomes: mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks) mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized) similarily, for releasing a lock, we now have: mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN. We change the caller interface for the two different types of locks because the semantics are entirely different for each case, and this makes it explicitly clear and, at the same time, it rids us of the extra `type' argument. The enter->lock and exit->unlock change has been made with the idea that we're "locking data" and not "entering locked code" in mind. Further, remove all additional "flags" previously passed to the lock acquire/release routines with the exception of two: MTX_QUIET and MTX_NOSWITCH The functionality of these flags is preserved and they can be passed to the lock/unlock routines by calling the corresponding wrappers: mtx_{lock, unlock}_flags(lock, flag(s)) and mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN locks, respectively. Re-inline some lock acq/rel code; in the sleep lock case, we only inline the _obtain_lock()s in order to ensure that the inlined code fits into a cache line. In the spin lock case, we inline recursion and actually only perform a function call if we need to spin. This change has been made with the idea that we generally tend to avoid spin locks and that also the spin locks that we do have and are heavily used (i.e. sched_lock) do recurse, and therefore in an effort to reduce function call overhead for some architectures (such as alpha), we inline recursion for this case. Create a new malloc type for the witness code and retire from using the M_DEV type. The new type is called M_WITNESS and is only declared if WITNESS is enabled. Begin cleaning up some machdep/mutex.h code - specifically updated the "optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently need those. Finally, caught up to the interface changes in all sys code. Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
mtx_lock(&dehash_mtx);
depp = &DEHASH(dep->de_dev, dep->de_dirclust, dep->de_diroffset);
deq = *depp;
if (deq)
1994-09-19 15:41:57 +00:00
deq->de_prev = &dep->de_next;
dep->de_next = deq;
dep->de_prev = depp;
*depp = dep;
Change and clean the mutex lock interface. mtx_enter(lock, type) becomes: mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks) mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized) similarily, for releasing a lock, we now have: mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN. We change the caller interface for the two different types of locks because the semantics are entirely different for each case, and this makes it explicitly clear and, at the same time, it rids us of the extra `type' argument. The enter->lock and exit->unlock change has been made with the idea that we're "locking data" and not "entering locked code" in mind. Further, remove all additional "flags" previously passed to the lock acquire/release routines with the exception of two: MTX_QUIET and MTX_NOSWITCH The functionality of these flags is preserved and they can be passed to the lock/unlock routines by calling the corresponding wrappers: mtx_{lock, unlock}_flags(lock, flag(s)) and mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN locks, respectively. Re-inline some lock acq/rel code; in the sleep lock case, we only inline the _obtain_lock()s in order to ensure that the inlined code fits into a cache line. In the spin lock case, we inline recursion and actually only perform a function call if we need to spin. This change has been made with the idea that we generally tend to avoid spin locks and that also the spin locks that we do have and are heavily used (i.e. sched_lock) do recurse, and therefore in an effort to reduce function call overhead for some architectures (such as alpha), we inline recursion for this case. Create a new malloc type for the witness code and retire from using the M_DEV type. The new type is called M_WITNESS and is only declared if WITNESS is enabled. Begin cleaning up some machdep/mutex.h code - specifically updated the "optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently need those. Finally, caught up to the interface changes in all sys code. Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
mtx_unlock(&dehash_mtx);
1994-09-19 15:41:57 +00:00
}
static void
msdosfs_hashrem(dep)
struct denode *dep;
{
struct denode *deq;
Change and clean the mutex lock interface. mtx_enter(lock, type) becomes: mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks) mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized) similarily, for releasing a lock, we now have: mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN. We change the caller interface for the two different types of locks because the semantics are entirely different for each case, and this makes it explicitly clear and, at the same time, it rids us of the extra `type' argument. The enter->lock and exit->unlock change has been made with the idea that we're "locking data" and not "entering locked code" in mind. Further, remove all additional "flags" previously passed to the lock acquire/release routines with the exception of two: MTX_QUIET and MTX_NOSWITCH The functionality of these flags is preserved and they can be passed to the lock/unlock routines by calling the corresponding wrappers: mtx_{lock, unlock}_flags(lock, flag(s)) and mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN locks, respectively. Re-inline some lock acq/rel code; in the sleep lock case, we only inline the _obtain_lock()s in order to ensure that the inlined code fits into a cache line. In the spin lock case, we inline recursion and actually only perform a function call if we need to spin. This change has been made with the idea that we generally tend to avoid spin locks and that also the spin locks that we do have and are heavily used (i.e. sched_lock) do recurse, and therefore in an effort to reduce function call overhead for some architectures (such as alpha), we inline recursion for this case. Create a new malloc type for the witness code and retire from using the M_DEV type. The new type is called M_WITNESS and is only declared if WITNESS is enabled. Begin cleaning up some machdep/mutex.h code - specifically updated the "optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently need those. Finally, caught up to the interface changes in all sys code. Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
mtx_lock(&dehash_mtx);
deq = dep->de_next;
if (deq)
1994-09-19 15:41:57 +00:00
deq->de_prev = dep->de_prev;
*dep->de_prev = deq;
#ifdef DIAGNOSTIC
dep->de_next = NULL;
dep->de_prev = NULL;
#endif
Change and clean the mutex lock interface. mtx_enter(lock, type) becomes: mtx_lock(lock) for sleep locks (MTX_DEF-initialized locks) mtx_lock_spin(lock) for spin locks (MTX_SPIN-initialized) similarily, for releasing a lock, we now have: mtx_unlock(lock) for MTX_DEF and mtx_unlock_spin(lock) for MTX_SPIN. We change the caller interface for the two different types of locks because the semantics are entirely different for each case, and this makes it explicitly clear and, at the same time, it rids us of the extra `type' argument. The enter->lock and exit->unlock change has been made with the idea that we're "locking data" and not "entering locked code" in mind. Further, remove all additional "flags" previously passed to the lock acquire/release routines with the exception of two: MTX_QUIET and MTX_NOSWITCH The functionality of these flags is preserved and they can be passed to the lock/unlock routines by calling the corresponding wrappers: mtx_{lock, unlock}_flags(lock, flag(s)) and mtx_{lock, unlock}_spin_flags(lock, flag(s)) for MTX_DEF and MTX_SPIN locks, respectively. Re-inline some lock acq/rel code; in the sleep lock case, we only inline the _obtain_lock()s in order to ensure that the inlined code fits into a cache line. In the spin lock case, we inline recursion and actually only perform a function call if we need to spin. This change has been made with the idea that we generally tend to avoid spin locks and that also the spin locks that we do have and are heavily used (i.e. sched_lock) do recurse, and therefore in an effort to reduce function call overhead for some architectures (such as alpha), we inline recursion for this case. Create a new malloc type for the witness code and retire from using the M_DEV type. The new type is called M_WITNESS and is only declared if WITNESS is enabled. Begin cleaning up some machdep/mutex.h code - specifically updated the "optimized" inlined code in alpha/mutex.h and wrote MTX_LOCK_SPIN and MTX_UNLOCK_SPIN asm macros for the i386/mutex.h as we presently need those. Finally, caught up to the interface changes in all sys code. Contributors: jake, jhb, jasone (in no particular order)
2001-02-09 06:11:45 +00:00
mtx_unlock(&dehash_mtx);
1994-09-19 15:41:57 +00:00
}
/*
1995-05-30 08:16:23 +00:00
* If deget() succeeds it returns with the gotten denode locked().
1994-09-19 15:41:57 +00:00
*
* pmp - address of msdosfsmount structure of the filesystem containing
* the denode of interest. The pm_dev field and the address of
1995-05-30 08:16:23 +00:00
* the msdosfsmount structure are used.
1994-09-19 15:41:57 +00:00
* dirclust - which cluster bp contains, if dirclust is 0 (root directory)
* diroffset is relative to the beginning of the root directory,
1995-05-30 08:16:23 +00:00
* otherwise it is cluster relative.
* diroffset - offset past begin of cluster of denode we want
1994-09-19 15:41:57 +00:00
* depp - returns the address of the gotten denode.
*/
int
deget(pmp, dirclust, diroffset, depp)
1994-09-19 15:41:57 +00:00
struct msdosfsmount *pmp; /* so we know the maj/min number */
u_long dirclust; /* cluster this dir entry came from */
u_long diroffset; /* index of entry within the cluster */
struct denode **depp; /* returns the addr of the gotten denode */
{
int error;
struct cdev *dev = pmp->pm_dev;
1994-09-19 15:41:57 +00:00
struct mount *mntp = pmp->pm_mountp;
struct direntry *direntptr;
1994-09-19 15:41:57 +00:00
struct denode *ldep;
struct vnode *nvp;
struct buf *bp;
struct thread *td = curthread; /* XXX */
1994-09-19 15:41:57 +00:00
#ifdef MSDOSFS_DEBUG
printf("deget(pmp %p, dirclust %lu, diroffset %lx, depp %p)\n",
pmp, dirclust, diroffset, depp);
1994-09-19 15:41:57 +00:00
#endif
/*
* On FAT32 filesystems, root is a (more or less) normal
* directory
1994-09-19 15:41:57 +00:00
*/
if (FAT32(pmp) && dirclust == MSDOSFSROOT)
dirclust = pmp->pm_rootdirblk;
1994-09-19 15:41:57 +00:00
/*
* See if the denode is in the denode cache. Use the location of
* the directory entry to compute the hash value. For subdir use
* address of "." entry. For root dir (if not FAT32) use cluster
* MSDOSFSROOT, offset MSDOSFSROOT_OFS
1995-05-30 08:16:23 +00:00
*
1994-09-19 15:41:57 +00:00
* NOTE: The check for de_refcnt > 0 below insures the denode being
* examined does not represent an unlinked but still open file.
* These files are not to be accessible even when the directory
* entry that represented the file happens to be reused while the
* deleted file is still open.
*/
ldep = msdosfs_hashget(dev, dirclust, diroffset);
if (ldep) {
1994-09-19 15:41:57 +00:00
*depp = ldep;
return (0);
1994-09-19 15:41:57 +00:00
}
/*
* Do the MALLOC before the getnewvnode since doing so afterward
* might cause a bogus v_data pointer to get dereferenced
* elsewhere if MALLOC should block.
*/
MALLOC(ldep, struct denode *, sizeof(struct denode), M_MSDOSFSNODE, M_WAITOK);
1994-09-19 15:41:57 +00:00
/*
* Directory entry was not in cache, have to create a vnode and
* copy it from the passed disk buffer.
*/
/* getnewvnode() does a VREF() on the vnode */
error = getnewvnode("msdosfs", mntp, &msdosfs_vnodeops, &nvp);
if (error) {
*depp = NULL;
FREE(ldep, M_MSDOSFSNODE);
1994-09-19 15:41:57 +00:00
return error;
}
bzero((caddr_t)ldep, sizeof *ldep);
nvp->v_data = ldep;
ldep->de_vnode = nvp;
ldep->de_flag = 0;
ldep->de_dev = dev;
ldep->de_dirclust = dirclust;
ldep->de_diroffset = diroffset;
fc_purge(ldep, 0); /* init the fat cache for this denode */
/*
* Lock the denode so that it can't be accessed until we've read
* it in and have done what we need to it. Do this here instead
* of at the start of msdosfs_hashins() so that reinsert() can
* call msdosfs_hashins() with a locked denode.
*/
if (VOP_LOCK(nvp, LK_EXCLUSIVE, td) != 0)
panic("deget: unexpected lock failure");
/*
* Insert the denode into the hash queue.
1994-09-19 15:41:57 +00:00
*/
msdosfs_hashins(ldep);
ldep->de_pmp = pmp;
ldep->de_refcnt = 1;
1994-09-19 15:41:57 +00:00
/*
* Copy the directory entry into the denode area of the vnode.
*/
if ((dirclust == MSDOSFSROOT
|| (FAT32(pmp) && dirclust == pmp->pm_rootdirblk))
&& diroffset == MSDOSFSROOT_OFS) {
1994-09-19 15:41:57 +00:00
/*
* Directory entry for the root directory. There isn't one,
* so we manufacture one. We should probably rummage
* through the root directory and find a label entry (if it
* exists), and then use the time and date from that entry
* as the time and date for the root denode.
*/
nvp->v_vflag |= VV_ROOT; /* should be further down XXX */
1994-09-19 15:41:57 +00:00
ldep->de_Attributes = ATTR_DIRECTORY;
ldep->de_LowerCase = 0;
if (FAT32(pmp))
ldep->de_StartCluster = pmp->pm_rootdirblk;
/* de_FileSize will be filled in further down */
else {
ldep->de_StartCluster = MSDOSFSROOT;
ldep->de_FileSize = pmp->pm_rootdirsize * DEV_BSIZE;
}
1994-09-19 15:41:57 +00:00
/*
* fill in time and date so that dos2unixtime() doesn't
* spit up when called from msdosfs_getattr() with root
* denode
*/
ldep->de_CHun = 0;
ldep->de_CTime = 0x0000; /* 00:00:00 */
ldep->de_CDate = (0 << DD_YEAR_SHIFT) | (1 << DD_MONTH_SHIFT)
1994-09-19 15:41:57 +00:00
| (1 << DD_DAY_SHIFT);
/* Jan 1, 1980 */
ldep->de_ADate = ldep->de_CDate;
ldep->de_MTime = ldep->de_CTime;
ldep->de_MDate = ldep->de_CDate;
1994-09-19 15:41:57 +00:00
/* leave the other fields as garbage */
} else {
error = readep(pmp, dirclust, diroffset, &bp, &direntptr);
if (error) {
/*
* The denode does not contain anything useful, so
* it would be wrong to leave it on its hash chain.
* Arrange for vput() to just forget about it.
*/
ldep->de_Name[0] = SLOT_DELETED;
vput(nvp);
*depp = NULL;
return (error);
}
1994-09-19 15:41:57 +00:00
DE_INTERNALIZE(ldep, direntptr);
brelse(bp);
1994-09-19 15:41:57 +00:00
}
/*
* Fill in a few fields of the vnode and finish filling in the
* denode. Then return the address of the found denode.
*/
if (ldep->de_Attributes & ATTR_DIRECTORY) {
/*
* Since DOS directory entries that describe directories
* have 0 in the filesize field, we take this opportunity
* to find out the length of the directory and plug it into
* the denode structure.
*/
u_long size;
/*
* XXX Sometimes, these arrives that . entry have cluster
* number 0, when it shouldn't. Use real cluster number
* instead of what is written in directory entry.
*/
if ((diroffset == 0) && (ldep->de_StartCluster != dirclust)) {
printf("deget(): . entry at clust %ld != %ld\n",
dirclust, ldep->de_StartCluster);
ldep->de_StartCluster = dirclust;
}
1994-09-19 15:41:57 +00:00
nvp->v_type = VDIR;
if (ldep->de_StartCluster != MSDOSFSROOT) {
error = pcbmap(ldep, 0xffff, 0, &size, 0);
1994-09-19 15:41:57 +00:00
if (error == E2BIG) {
ldep->de_FileSize = de_cn2off(pmp, size);
1994-09-19 15:41:57 +00:00
error = 0;
} else
printf("deget(): pcbmap returned %d\n", error);
}
} else
nvp->v_type = VREG;
ldep->de_modrev = init_va_filerev();
1994-09-19 15:41:57 +00:00
*depp = ldep;
return (0);
1994-09-19 15:41:57 +00:00
}
int
deupdat(dep, waitfor)
1994-09-19 15:41:57 +00:00
struct denode *dep;
int waitfor;
{
int error;
struct buf *bp;
struct direntry *dirp;
struct timespec ts;
1994-09-19 15:41:57 +00:00
if (DETOV(dep)->v_mount->mnt_flag & MNT_RDONLY)
return (0);
getnanotime(&ts);
DETIMES(dep, &ts, &ts, &ts);
if ((dep->de_flag & DE_MODIFIED) == 0)
return (0);
dep->de_flag &= ~DE_MODIFIED;
if (dep->de_Attributes & ATTR_DIRECTORY)
return (0);
if (dep->de_refcnt <= 0)
return (0);
error = readde(dep, &bp, &dirp);
1995-05-30 08:16:23 +00:00
if (error)
return (error);
1994-09-19 15:41:57 +00:00
DE_EXTERNALIZE(dirp, dep);
if (waitfor)
return (bwrite(bp));
else {
1994-09-19 15:41:57 +00:00
bdwrite(bp);
return (0);
}
1994-09-19 15:41:57 +00:00
}
/*
* Truncate the file described by dep to the length specified by length.
*/
int
detrunc(dep, length, flags, cred, td)
1994-09-19 15:41:57 +00:00
struct denode *dep;
u_long length;
int flags;
struct ucred *cred;
struct thread *td;
1994-09-19 15:41:57 +00:00
{
int error;
int allerror;
u_long eofentry;
u_long chaintofree;
daddr_t bn;
int boff;
int isadir = dep->de_Attributes & ATTR_DIRECTORY;
struct buf *bp;
struct msdosfsmount *pmp = dep->de_pmp;
#ifdef MSDOSFS_DEBUG
printf("detrunc(): file %s, length %lu, flags %x\n", dep->de_Name, length, flags);
1994-09-19 15:41:57 +00:00
#endif
/*
* Disallow attempts to truncate the root directory since it is of
* fixed size. That's just the way dos filesystems are. We use
* the VROOT bit in the vnode because checking for the directory
* bit and a startcluster of 0 in the denode is not adequate to
* recognize the root directory at this point in a file or
* directory's life.
*/
if ((DETOV(dep)->v_vflag & VV_ROOT) && !FAT32(pmp)) {
printf("detrunc(): can't truncate root directory, clust %ld, offset %ld\n",
1994-09-19 15:41:57 +00:00
dep->de_dirclust, dep->de_diroffset);
return (EINVAL);
1994-09-19 15:41:57 +00:00
}
if (dep->de_FileSize < length) {
vnode_pager_setsize(DETOV(dep), length);
1994-09-19 15:41:57 +00:00
return deextend(dep, length, cred);
}
1994-09-19 15:41:57 +00:00
/*
* If the desired length is 0 then remember the starting cluster of
* the file and set the StartCluster field in the directory entry
* to 0. If the desired length is not zero, then get the number of
* the last cluster in the shortened file. Then get the number of
* the first cluster in the part of the file that is to be freed.
* Then set the next cluster pointer in the last cluster of the
* file to CLUST_EOFE.
*/
if (length == 0) {
chaintofree = dep->de_StartCluster;
dep->de_StartCluster = 0;
eofentry = ~0;
} else {
error = pcbmap(dep, de_clcount(pmp, length) - 1, 0,
&eofentry, 0);
if (error) {
1994-09-19 15:41:57 +00:00
#ifdef MSDOSFS_DEBUG
printf("detrunc(): pcbmap fails %d\n", error);
#endif
return (error);
1994-09-19 15:41:57 +00:00
}
}
fc_purge(dep, de_clcount(pmp, length));
1994-09-19 15:41:57 +00:00
/*
* If the new length is not a multiple of the cluster size then we
* must zero the tail end of the new last cluster in case it
* becomes part of the file again because of a seek.
*/
if ((boff = length & pmp->pm_crbomask) != 0) {
if (isadir) {
bn = cntobn(pmp, eofentry);
error = bread(pmp->pm_devvp, bn, pmp->pm_bpcluster,
NOCRED, &bp);
if (error) {
brelse(bp);
1994-09-19 15:41:57 +00:00
#ifdef MSDOSFS_DEBUG
printf("detrunc(): bread fails %d\n", error);
1994-09-19 15:41:57 +00:00
#endif
return (error);
}
bzero(bp->b_data + boff, pmp->pm_bpcluster - boff);
if (flags & IO_SYNC)
bwrite(bp);
else
bdwrite(bp);
1994-09-19 15:41:57 +00:00
}
}
/*
* Write out the updated directory entry. Even if the update fails
* we free the trailing clusters.
*/
dep->de_FileSize = length;
if (!isadir)
dep->de_flag |= DE_UPDATE|DE_MODIFIED;
allerror = vtruncbuf(DETOV(dep), cred, td, length, pmp->pm_bpcluster);
#ifdef MSDOSFS_DEBUG
if (allerror)
printf("detrunc(): vtruncbuf error %d\n", allerror);
#endif
error = deupdat(dep, 1);
if (error && (allerror == 0))
allerror = error;
1994-09-19 15:41:57 +00:00
#ifdef MSDOSFS_DEBUG
printf("detrunc(): allerror %d, eofentry %lu\n",
1994-09-19 15:41:57 +00:00
allerror, eofentry);
#endif
/*
* If we need to break the cluster chain for the file then do it
* now.
*/
if (eofentry != ~0) {
error = fatentry(FAT_GET_AND_SET, pmp, eofentry,
&chaintofree, CLUST_EOFE);
if (error) {
#ifdef MSDOSFS_DEBUG
printf("detrunc(): fatentry errors %d\n", error);
#endif
return (error);
1994-09-19 15:41:57 +00:00
}
fc_setcache(dep, FC_LASTFC, de_cluster(pmp, length - 1),
1994-09-19 15:41:57 +00:00
eofentry);
}
/*
* Now free the clusters removed from the file because of the
* truncation.
*/
if (chaintofree != 0 && !MSDOSFSEOF(pmp, chaintofree))
1994-09-19 15:41:57 +00:00
freeclusterchain(pmp, chaintofree);
return (allerror);
1994-09-19 15:41:57 +00:00
}
/*
* Extend the file described by dep to length specified by length.
*/
int
deextend(dep, length, cred)
struct denode *dep;
u_long length;
1994-09-19 15:41:57 +00:00
struct ucred *cred;
{
struct msdosfsmount *pmp = dep->de_pmp;
u_long count;
int error;
1995-05-30 08:16:23 +00:00
1994-09-19 15:41:57 +00:00
/*
* The root of a DOS filesystem cannot be extended.
*/
if ((DETOV(dep)->v_vflag & VV_ROOT) && !FAT32(pmp))
return (EINVAL);
1994-09-19 15:41:57 +00:00
/*
* Directories cannot be extended.
1994-09-19 15:41:57 +00:00
*/
if (dep->de_Attributes & ATTR_DIRECTORY)
return (EISDIR);
1994-09-19 15:41:57 +00:00
if (length <= dep->de_FileSize)
panic("deextend: file too large");
1995-05-30 08:16:23 +00:00
1994-09-19 15:41:57 +00:00
/*
* Compute the number of clusters to allocate.
*/
count = de_clcount(pmp, length) - de_clcount(pmp, dep->de_FileSize);
if (count > 0) {
if (count > pmp->pm_freeclustercount)
return (ENOSPC);
error = extendfile(dep, count, NULL, NULL, DE_CLEAR);
if (error) {
1994-09-19 15:41:57 +00:00
/* truncate the added clusters away again */
(void) detrunc(dep, dep->de_FileSize, 0, cred, NULL);
return (error);
1994-09-19 15:41:57 +00:00
}
}
dep->de_FileSize = length;
dep->de_flag |= DE_UPDATE|DE_MODIFIED;
return (deupdat(dep, 1));
1994-09-19 15:41:57 +00:00
}
/*
* Move a denode to its correct hash queue after the file it represents has
* been moved to a new directory.
*/
void
reinsert(dep)
1994-09-19 15:41:57 +00:00
struct denode *dep;
{
/*
* Fix up the denode cache. If the denode is for a directory,
* there is nothing to do since the hash is based on the starting
* cluster of the directory file and that hasn't changed. If for a
* file the hash is based on the location of the directory entry,
* so we must remove it from the cache and re-enter it with the
* hash based on the new location of the directory entry.
*/
if (dep->de_Attributes & ATTR_DIRECTORY)
return;
msdosfs_hashrem(dep);
msdosfs_hashins(dep);
1994-09-19 15:41:57 +00:00
}
int
msdosfs_reclaim(ap)
struct vop_reclaim_args /* {
struct vnode *a_vp;
} */ *ap;
{
struct vnode *vp = ap->a_vp;
struct denode *dep = VTODE(vp);
1995-05-30 08:16:23 +00:00
1994-09-19 15:41:57 +00:00
#ifdef MSDOSFS_DEBUG
1994-10-10 07:57:33 +00:00
printf("msdosfs_reclaim(): dep %p, file %s, refcnt %ld\n",
1994-09-19 15:41:57 +00:00
dep, dep->de_Name, dep->de_refcnt);
#endif
if (prtactive && vrefcnt(vp) != 0)
1994-09-19 15:41:57 +00:00
vprint("msdosfs_reclaim(): pushing active", vp);
/*
* Remove the denode from its hash chain.
1994-09-19 15:41:57 +00:00
*/
msdosfs_hashrem(dep);
/*
* Purge old data structures associated with the denode.
1994-09-19 15:41:57 +00:00
*/
#if 0 /* XXX */
1994-09-19 15:41:57 +00:00
dep->de_flag = 0;
#endif
1994-09-19 15:41:57 +00:00
FREE(dep, M_MSDOSFSNODE);
vp->v_data = NULL;
vnode_destroy_vobject(vp);
1995-05-30 08:16:23 +00:00
return (0);
1994-09-19 15:41:57 +00:00
}
int
msdosfs_inactive(ap)
struct vop_inactive_args /* {
struct vnode *a_vp;
struct thread *a_td;
1994-09-19 15:41:57 +00:00
} */ *ap;
{
struct vnode *vp = ap->a_vp;
struct denode *dep = VTODE(vp);
struct thread *td = ap->a_td;
1994-09-19 15:41:57 +00:00
int error = 0;
1995-05-30 08:16:23 +00:00
1994-09-19 15:41:57 +00:00
#ifdef MSDOSFS_DEBUG
1994-10-10 07:57:33 +00:00
printf("msdosfs_inactive(): dep %p, de_Name[0] %x\n", dep, dep->de_Name[0]);
1994-09-19 15:41:57 +00:00
#endif
if (prtactive && vrefcnt(vp) != 0)
1994-09-19 15:41:57 +00:00
vprint("msdosfs_inactive(): pushing active", vp);
/*
* Ignore denodes related to stale file handles.
1994-09-19 15:41:57 +00:00
*/
if (dep->de_Name[0] == SLOT_DELETED)
goto out;
1994-09-19 15:41:57 +00:00
/*
* If the file has been deleted and it is on a read/write
* filesystem, then truncate the file, and mark the directory slot
* as empty. (This may not be necessary for the dos filesystem.)
*/
#ifdef MSDOSFS_DEBUG
1994-10-10 07:57:33 +00:00
printf("msdosfs_inactive(): dep %p, refcnt %ld, mntflag %x, MNT_RDONLY %x\n",
1994-09-19 15:41:57 +00:00
dep, dep->de_refcnt, vp->v_mount->mnt_flag, MNT_RDONLY);
#endif
if (dep->de_refcnt <= 0 && (vp->v_mount->mnt_flag & MNT_RDONLY) == 0) {
error = detrunc(dep, (u_long) 0, 0, NOCRED, td);
1994-09-19 15:41:57 +00:00
dep->de_flag |= DE_UPDATE;
dep->de_Name[0] = SLOT_DELETED;
}
deupdat(dep, 0);
out:
VOP_UNLOCK(vp, 0, td);
1994-09-19 15:41:57 +00:00
/*
* If we are done with the denode, reclaim it
* so that it can be reused immediately.
1994-09-19 15:41:57 +00:00
*/
#ifdef MSDOSFS_DEBUG
printf("msdosfs_inactive(): v_usecount %d, de_Name[0] %x\n",
vrefcnt(vp), dep->de_Name[0]);
1994-09-19 15:41:57 +00:00
#endif
if (dep->de_Name[0] == SLOT_DELETED)
2005-01-28 13:08:21 +00:00
vrecycle(vp, td);
return (error);
1994-09-19 15:41:57 +00:00
}