aec0fb7b40
initializations but we did have lofty goals and big ideals. Adjust to more contemporary circumstances and gain type checking. Replace the entire vop_t frobbing thing with properly typed structures. The only casualty is that we can not add a new VOP_ method with a loadable module. History has not given us reason to belive this would ever be feasible in the the first place. Eliminate in toto VOCALL(), vop_t, VNODEOP_SET() etc. Give coda correct prototypes and function definitions for all vop_()s. Generate a bit more data from the vnode_if.src file: a struct vop_vector and protype typedefs for all vop methods. Add a new vop_bypass() and make vop_default be a pointer to another struct vop_vector. Remove a lot of vfs_init since vop_vector is ready to use from the compiler. Cast various vop_mumble() to void * with uppercase name, for instance VOP_PANIC, VOP_NULL etc. Implement VCALL() by making vdesc_offset the offsetof() the relevant function pointer in vop_vector. This is disgusting but since the code is generated by a script comparatively safe. The alternative for nullfs etc. would be much worse. Fix up all vnode method vectors to remove casts so they become typesafe. (The bulk of this is generated by scripts)
295 lines
7.9 KiB
C
295 lines
7.9 KiB
C
/*-
|
|
* Copyright (c) 2001 Dag-Erling Coïdan Smørgrav
|
|
* All rights reserved.
|
|
*
|
|
* Redistribution and use in source and binary forms, with or without
|
|
* modification, are permitted provided that the following conditions
|
|
* are met:
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
* notice, this list of conditions and the following disclaimer
|
|
* in this position and unchanged.
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
* documentation and/or other materials provided with the distribution.
|
|
* 3. The name of the author may not be used to endorse or promote products
|
|
* derived from this software without specific prior written permission.
|
|
*
|
|
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
|
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
|
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
|
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
|
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
|
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
|
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
*
|
|
* $FreeBSD$
|
|
*/
|
|
|
|
#include <sys/param.h>
|
|
#include <sys/kernel.h>
|
|
#include <sys/systm.h>
|
|
#include <sys/eventhandler.h>
|
|
#include <sys/lock.h>
|
|
#include <sys/malloc.h>
|
|
#include <sys/mutex.h>
|
|
#include <sys/proc.h>
|
|
#include <sys/sysctl.h>
|
|
#include <sys/vnode.h>
|
|
|
|
#include <fs/pseudofs/pseudofs.h>
|
|
#include <fs/pseudofs/pseudofs_internal.h>
|
|
|
|
static MALLOC_DEFINE(M_PFSVNCACHE, "pfs_vncache", "pseudofs vnode cache");
|
|
|
|
static struct mtx pfs_vncache_mutex;
|
|
static struct pfs_vdata *pfs_vncache;
|
|
static eventhandler_tag pfs_exit_tag;
|
|
static void pfs_exit(void *arg, struct proc *p);
|
|
|
|
SYSCTL_NODE(_vfs_pfs, OID_AUTO, vncache, CTLFLAG_RW, 0,
|
|
"pseudofs vnode cache");
|
|
|
|
static int pfs_vncache_entries;
|
|
SYSCTL_INT(_vfs_pfs_vncache, OID_AUTO, entries, CTLFLAG_RD,
|
|
&pfs_vncache_entries, 0,
|
|
"number of entries in the vnode cache");
|
|
|
|
static int pfs_vncache_maxentries;
|
|
SYSCTL_INT(_vfs_pfs_vncache, OID_AUTO, maxentries, CTLFLAG_RD,
|
|
&pfs_vncache_maxentries, 0,
|
|
"highest number of entries in the vnode cache");
|
|
|
|
static int pfs_vncache_hits;
|
|
SYSCTL_INT(_vfs_pfs_vncache, OID_AUTO, hits, CTLFLAG_RD,
|
|
&pfs_vncache_hits, 0,
|
|
"number of cache hits since initialization");
|
|
|
|
static int pfs_vncache_misses;
|
|
SYSCTL_INT(_vfs_pfs_vncache, OID_AUTO, misses, CTLFLAG_RD,
|
|
&pfs_vncache_misses, 0,
|
|
"number of cache misses since initialization");
|
|
|
|
extern struct vop_vector pfs_vnodeops; /* XXX -> .h file */
|
|
|
|
/*
|
|
* Initialize vnode cache
|
|
*/
|
|
void
|
|
pfs_vncache_load(void)
|
|
{
|
|
mtx_init(&pfs_vncache_mutex, "pseudofs_vncache", NULL, MTX_DEF);
|
|
pfs_exit_tag = EVENTHANDLER_REGISTER(process_exit, pfs_exit, NULL,
|
|
EVENTHANDLER_PRI_ANY);
|
|
}
|
|
|
|
/*
|
|
* Tear down vnode cache
|
|
*/
|
|
void
|
|
pfs_vncache_unload(void)
|
|
{
|
|
EVENTHANDLER_DEREGISTER(process_exit, pfs_exit_tag);
|
|
if (pfs_vncache_entries != 0)
|
|
printf("pfs_vncache_unload(): %d entries remaining\n",
|
|
pfs_vncache_entries);
|
|
mtx_destroy(&pfs_vncache_mutex);
|
|
}
|
|
|
|
/*
|
|
* Allocate a vnode
|
|
*/
|
|
int
|
|
pfs_vncache_alloc(struct mount *mp, struct vnode **vpp,
|
|
struct pfs_node *pn, pid_t pid)
|
|
{
|
|
struct pfs_vdata *pvd;
|
|
int error;
|
|
|
|
/*
|
|
* See if the vnode is in the cache.
|
|
* XXX linear search is not very efficient.
|
|
*/
|
|
mtx_lock(&pfs_vncache_mutex);
|
|
for (pvd = pfs_vncache; pvd; pvd = pvd->pvd_next) {
|
|
if (pvd->pvd_pn == pn && pvd->pvd_pid == pid &&
|
|
pvd->pvd_vnode->v_mount == mp) {
|
|
if (vget(pvd->pvd_vnode, 0, curthread) == 0) {
|
|
++pfs_vncache_hits;
|
|
*vpp = pvd->pvd_vnode;
|
|
mtx_unlock(&pfs_vncache_mutex);
|
|
/* XXX see comment at top of pfs_lookup() */
|
|
cache_purge(*vpp);
|
|
vn_lock(*vpp, LK_RETRY | LK_EXCLUSIVE,
|
|
curthread);
|
|
return (0);
|
|
}
|
|
/* XXX if this can happen, we're in trouble */
|
|
break;
|
|
}
|
|
}
|
|
mtx_unlock(&pfs_vncache_mutex);
|
|
++pfs_vncache_misses;
|
|
|
|
/* nope, get a new one */
|
|
MALLOC(pvd, struct pfs_vdata *, sizeof *pvd, M_PFSVNCACHE, M_WAITOK);
|
|
if (++pfs_vncache_entries > pfs_vncache_maxentries)
|
|
pfs_vncache_maxentries = pfs_vncache_entries;
|
|
error = getnewvnode("pseudofs", mp, &pfs_vnodeops, vpp);
|
|
if (error) {
|
|
FREE(pvd, M_PFSVNCACHE);
|
|
return (error);
|
|
}
|
|
pvd->pvd_pn = pn;
|
|
pvd->pvd_pid = pid;
|
|
(*vpp)->v_data = pvd;
|
|
switch (pn->pn_type) {
|
|
case pfstype_root:
|
|
(*vpp)->v_vflag = VV_ROOT;
|
|
#if 0
|
|
printf("root vnode allocated\n");
|
|
#endif
|
|
/* fall through */
|
|
case pfstype_dir:
|
|
case pfstype_this:
|
|
case pfstype_parent:
|
|
case pfstype_procdir:
|
|
(*vpp)->v_type = VDIR;
|
|
break;
|
|
case pfstype_file:
|
|
(*vpp)->v_type = VREG;
|
|
break;
|
|
case pfstype_symlink:
|
|
(*vpp)->v_type = VLNK;
|
|
break;
|
|
case pfstype_none:
|
|
KASSERT(0, ("pfs_vncache_alloc called for null node\n"));
|
|
default:
|
|
panic("%s has unexpected type: %d", pn->pn_name, pn->pn_type);
|
|
}
|
|
/*
|
|
* Propagate flag through to vnode so users know it can change
|
|
* if the process changes (i.e. execve)
|
|
*/
|
|
if ((pn->pn_flags & PFS_PROCDEP) != 0)
|
|
(*vpp)->v_vflag |= VV_PROCDEP;
|
|
pvd->pvd_vnode = *vpp;
|
|
mtx_lock(&pfs_vncache_mutex);
|
|
pvd->pvd_prev = NULL;
|
|
pvd->pvd_next = pfs_vncache;
|
|
if (pvd->pvd_next)
|
|
pvd->pvd_next->pvd_prev = pvd;
|
|
pfs_vncache = pvd;
|
|
mtx_unlock(&pfs_vncache_mutex);
|
|
(*vpp)->v_vnlock->lk_flags |= LK_CANRECURSE;
|
|
vn_lock(*vpp, LK_RETRY | LK_EXCLUSIVE, curthread);
|
|
return (0);
|
|
}
|
|
|
|
/*
|
|
* Free a vnode
|
|
*/
|
|
int
|
|
pfs_vncache_free(struct vnode *vp)
|
|
{
|
|
struct pfs_vdata *pvd;
|
|
|
|
mtx_lock(&pfs_vncache_mutex);
|
|
pvd = (struct pfs_vdata *)vp->v_data;
|
|
KASSERT(pvd != NULL, ("pfs_vncache_free(): no vnode data\n"));
|
|
if (pvd->pvd_next)
|
|
pvd->pvd_next->pvd_prev = pvd->pvd_prev;
|
|
if (pvd->pvd_prev)
|
|
pvd->pvd_prev->pvd_next = pvd->pvd_next;
|
|
else
|
|
pfs_vncache = pvd->pvd_next;
|
|
mtx_unlock(&pfs_vncache_mutex);
|
|
|
|
--pfs_vncache_entries;
|
|
FREE(pvd, M_PFSVNCACHE);
|
|
vp->v_data = NULL;
|
|
return (0);
|
|
}
|
|
|
|
/*
|
|
* Free all vnodes associated with a defunct process
|
|
*/
|
|
static void
|
|
pfs_exit(void *arg, struct proc *p)
|
|
{
|
|
struct pfs_vdata *pvd;
|
|
struct vnode *vnp;
|
|
|
|
mtx_lock(&Giant);
|
|
/*
|
|
* This is extremely inefficient due to the fact that vgone() not
|
|
* only indirectly modifies the vnode cache, but may also sleep.
|
|
* We can neither hold pfs_vncache_mutex across a vgone() call,
|
|
* nor make any assumptions about the state of the cache after
|
|
* vgone() returns. In consequence, we must start over after
|
|
* every vgone() call, and keep trying until we manage to traverse
|
|
* the entire cache.
|
|
*
|
|
* The only way to improve this situation is to change the data
|
|
* structure used to implement the cache. An obvious choice in
|
|
* this particular case would be a BST sorted by PID.
|
|
*/
|
|
mtx_lock(&pfs_vncache_mutex);
|
|
pvd = pfs_vncache;
|
|
while (pvd != NULL) {
|
|
if (pvd->pvd_pid == p->p_pid) {
|
|
vnp = pvd->pvd_vnode;
|
|
mtx_unlock(&pfs_vncache_mutex);
|
|
vgone(vnp);
|
|
mtx_lock(&pfs_vncache_mutex);
|
|
pvd = pfs_vncache;
|
|
} else {
|
|
pvd = pvd->pvd_next;
|
|
}
|
|
}
|
|
mtx_unlock(&pfs_vncache_mutex);
|
|
mtx_unlock(&Giant);
|
|
}
|
|
|
|
/*
|
|
* Disable a pseudofs node, and free all vnodes associated with it
|
|
*/
|
|
int
|
|
pfs_disable(struct pfs_node *pn)
|
|
{
|
|
struct pfs_vdata *pvd;
|
|
struct vnode *vnp;
|
|
|
|
if (pn->pn_flags & PFS_DISABLED)
|
|
return (0);
|
|
pn->pn_flags |= PFS_DISABLED;
|
|
/* XXX see comment above nearly identical code in pfs_exit() */
|
|
mtx_lock(&pfs_vncache_mutex);
|
|
pvd = pfs_vncache;
|
|
while (pvd != NULL) {
|
|
if (pvd->pvd_pn == pn) {
|
|
vnp = pvd->pvd_vnode;
|
|
mtx_unlock(&pfs_vncache_mutex);
|
|
vgone(vnp);
|
|
mtx_lock(&pfs_vncache_mutex);
|
|
pvd = pfs_vncache;
|
|
} else {
|
|
pvd = pvd->pvd_next;
|
|
}
|
|
}
|
|
mtx_unlock(&pfs_vncache_mutex);
|
|
return (0);
|
|
}
|
|
|
|
/*
|
|
* Re-enable a disabled pseudofs node
|
|
*/
|
|
int
|
|
pfs_enable(struct pfs_node *pn)
|
|
{
|
|
pn->pn_flags &= ~PFS_DISABLED;
|
|
return (0);
|
|
}
|