2001-04-07 19:51:12 +00:00
|
|
|
/*-
|
2017-11-27 15:15:37 +00:00
|
|
|
* SPDX-License-Identifier: BSD-3-Clause
|
|
|
|
*
|
2012-01-15 13:23:18 +00:00
|
|
|
* Copyright (c) 2001 Dag-Erling Coïdan Smørgrav
|
2001-04-07 19:51:12 +00:00
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer
|
|
|
|
* in this position and unchanged.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
* 3. The name of the author may not be used to endorse or promote products
|
|
|
|
* derived from this software without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
|
|
|
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
|
|
|
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
|
|
|
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
|
|
|
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
|
|
|
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
|
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
|
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
|
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
|
|
|
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
*/
|
|
|
|
|
2005-03-14 15:54:11 +00:00
|
|
|
#include <sys/cdefs.h>
|
|
|
|
__FBSDID("$FreeBSD$");
|
|
|
|
|
|
|
|
#include "opt_pseudofs.h"
|
|
|
|
|
2001-04-07 19:51:12 +00:00
|
|
|
#include <sys/param.h>
|
|
|
|
#include <sys/kernel.h>
|
|
|
|
#include <sys/systm.h>
|
2003-03-24 21:15:35 +00:00
|
|
|
#include <sys/eventhandler.h>
|
2001-06-11 15:04:48 +00:00
|
|
|
#include <sys/lock.h>
|
2001-04-07 19:51:12 +00:00
|
|
|
#include <sys/malloc.h>
|
2001-06-10 10:36:16 +00:00
|
|
|
#include <sys/mutex.h>
|
2001-10-01 04:22:20 +00:00
|
|
|
#include <sys/proc.h>
|
2001-04-07 19:51:12 +00:00
|
|
|
#include <sys/sysctl.h>
|
|
|
|
#include <sys/vnode.h>
|
|
|
|
|
|
|
|
#include <fs/pseudofs/pseudofs.h>
|
|
|
|
#include <fs/pseudofs/pseudofs_internal.h>
|
|
|
|
|
2001-06-10 18:39:21 +00:00
|
|
|
static MALLOC_DEFINE(M_PFSVNCACHE, "pfs_vncache", "pseudofs vnode cache");
|
2001-04-07 19:51:12 +00:00
|
|
|
|
|
|
|
static struct mtx pfs_vncache_mutex;
|
2003-03-24 21:15:35 +00:00
|
|
|
static eventhandler_tag pfs_exit_tag;
|
|
|
|
static void pfs_exit(void *arg, struct proc *p);
|
2019-10-22 22:52:53 +00:00
|
|
|
static void pfs_purge_all(void);
|
2001-04-07 19:51:12 +00:00
|
|
|
|
2020-02-24 10:37:56 +00:00
|
|
|
static SYSCTL_NODE(_vfs_pfs, OID_AUTO, vncache, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
|
2001-04-07 19:51:12 +00:00
|
|
|
"pseudofs vnode cache");
|
|
|
|
|
2001-10-01 04:22:20 +00:00
|
|
|
static int pfs_vncache_entries;
|
|
|
|
SYSCTL_INT(_vfs_pfs_vncache, OID_AUTO, entries, CTLFLAG_RD,
|
|
|
|
&pfs_vncache_entries, 0,
|
|
|
|
"number of entries in the vnode cache");
|
|
|
|
|
|
|
|
static int pfs_vncache_maxentries;
|
|
|
|
SYSCTL_INT(_vfs_pfs_vncache, OID_AUTO, maxentries, CTLFLAG_RD,
|
|
|
|
&pfs_vncache_maxentries, 0,
|
|
|
|
"highest number of entries in the vnode cache");
|
|
|
|
|
2001-04-07 19:51:12 +00:00
|
|
|
static int pfs_vncache_hits;
|
2001-10-01 04:22:20 +00:00
|
|
|
SYSCTL_INT(_vfs_pfs_vncache, OID_AUTO, hits, CTLFLAG_RD,
|
|
|
|
&pfs_vncache_hits, 0,
|
2001-04-07 19:51:12 +00:00
|
|
|
"number of cache hits since initialization");
|
|
|
|
|
|
|
|
static int pfs_vncache_misses;
|
2001-10-01 04:22:20 +00:00
|
|
|
SYSCTL_INT(_vfs_pfs_vncache, OID_AUTO, misses, CTLFLAG_RD,
|
|
|
|
&pfs_vncache_misses, 0,
|
2001-04-07 19:51:12 +00:00
|
|
|
"number of cache misses since initialization");
|
|
|
|
|
2004-12-01 23:16:38 +00:00
|
|
|
extern struct vop_vector pfs_vnodeops; /* XXX -> .h file */
|
2001-04-07 19:51:12 +00:00
|
|
|
|
2019-10-22 22:52:53 +00:00
|
|
|
static SLIST_HEAD(pfs_vncache_head, pfs_vdata) *pfs_vncache_hashtbl;
|
|
|
|
static u_long pfs_vncache_hash;
|
|
|
|
#define PFS_VNCACHE_HASH(pid) (&pfs_vncache_hashtbl[(pid) & pfs_vncache_hash])
|
|
|
|
|
2001-04-07 19:51:12 +00:00
|
|
|
/*
|
|
|
|
* Initialize vnode cache
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
pfs_vncache_load(void)
|
|
|
|
{
|
2007-04-14 14:08:30 +00:00
|
|
|
|
|
|
|
mtx_init(&pfs_vncache_mutex, "pfs_vncache", NULL, MTX_DEF);
|
2019-10-22 22:52:53 +00:00
|
|
|
pfs_vncache_hashtbl = hashinit(maxproc / 4, M_PFSVNCACHE, &pfs_vncache_hash);
|
2003-03-24 21:15:35 +00:00
|
|
|
pfs_exit_tag = EVENTHANDLER_REGISTER(process_exit, pfs_exit, NULL,
|
|
|
|
EVENTHANDLER_PRI_ANY);
|
2001-04-07 19:51:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Tear down vnode cache
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
pfs_vncache_unload(void)
|
|
|
|
{
|
2007-04-14 14:08:30 +00:00
|
|
|
|
2003-03-24 21:15:35 +00:00
|
|
|
EVENTHANDLER_DEREGISTER(process_exit, pfs_exit_tag);
|
2019-10-22 22:52:53 +00:00
|
|
|
pfs_purge_all();
|
2007-04-14 14:08:30 +00:00
|
|
|
KASSERT(pfs_vncache_entries == 0,
|
|
|
|
("%d vncache entries remaining", pfs_vncache_entries));
|
2001-04-07 19:51:12 +00:00
|
|
|
mtx_destroy(&pfs_vncache_mutex);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Allocate a vnode
|
|
|
|
*/
|
|
|
|
int
|
2001-06-10 18:39:21 +00:00
|
|
|
pfs_vncache_alloc(struct mount *mp, struct vnode **vpp,
|
|
|
|
struct pfs_node *pn, pid_t pid)
|
2001-04-07 19:51:12 +00:00
|
|
|
{
|
2019-10-22 22:52:53 +00:00
|
|
|
struct pfs_vncache_head *hash;
|
2008-12-29 12:07:18 +00:00
|
|
|
struct pfs_vdata *pvd, *pvd2;
|
2007-01-02 17:27:52 +00:00
|
|
|
struct vnode *vp;
|
2001-04-07 19:51:12 +00:00
|
|
|
int error;
|
2001-12-19 23:58:09 +00:00
|
|
|
|
|
|
|
/*
|
2002-06-06 16:59:24 +00:00
|
|
|
* See if the vnode is in the cache.
|
2001-12-19 23:58:09 +00:00
|
|
|
*/
|
2019-10-22 22:52:53 +00:00
|
|
|
hash = PFS_VNCACHE_HASH(pid);
|
|
|
|
if (SLIST_EMPTY(hash))
|
|
|
|
goto alloc;
|
2007-01-02 17:27:52 +00:00
|
|
|
retry:
|
2001-06-10 18:39:21 +00:00
|
|
|
mtx_lock(&pfs_vncache_mutex);
|
2019-10-22 22:52:53 +00:00
|
|
|
SLIST_FOREACH(pvd, hash, pvd_hash) {
|
2003-01-28 09:21:42 +00:00
|
|
|
if (pvd->pvd_pn == pn && pvd->pvd_pid == pid &&
|
|
|
|
pvd->pvd_vnode->v_mount == mp) {
|
2007-01-02 17:27:52 +00:00
|
|
|
vp = pvd->pvd_vnode;
|
|
|
|
VI_LOCK(vp);
|
|
|
|
mtx_unlock(&pfs_vncache_mutex);
|
|
|
|
if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, curthread) == 0) {
|
2001-04-07 19:51:12 +00:00
|
|
|
++pfs_vncache_hits;
|
2007-01-02 17:27:52 +00:00
|
|
|
*vpp = vp;
|
Add a flag to struct pfs_vdata to mark the vnode as dead (e.g. process-
specific nodes when the process exits)
Move the vnode-cache-walking loop which was duplicated in pfs_exit() and
pfs_disable() into its own function, pfs_purge(), which looks for vnodes
marked as dead and / or belonging to the specified pfs_node and reclaims
them. Note that this loop is still extremely inefficient.
Add a comment in pfs_vncache_alloc() explaining why we have to purge the
vnode from the vnode cache before returning, in case anyone should be
tempted to remove the call to cache_purge().
Move the special handling for pfstype_root nodes into pfs_fileno_alloc()
and pfs_fileno_free() (the root node's fileno must always be 2). This
also fixes a bug where pfs_fileno_free() would reclaim the root node's
fileno, triggering a panic in the unr code, as that fileno was never
allocated from unr to begin with.
When destroying a pfs_node, release its fileno and purge it from the
vnode cache. I wish we could put off the call to pfs_purge() until
after the entire tree had been destroyed, but then we'd have vnodes
referencing freed pfs nodes. This probably doesn't matter while we're
still under Giant, but might become an issue later.
When destroying a pseudofs instance, destroy the tree before tearing
down the fileno allocator.
In pfs_mount(), acquire the mountpoint interlock when required.
MFC after: 3 weeks
2007-04-11 22:40:57 +00:00
|
|
|
/*
|
|
|
|
* Some callers cache_enter(vp) later, so
|
|
|
|
* we have to make sure it's not in the
|
|
|
|
* VFS cache so it doesn't get entered
|
|
|
|
* twice. A better solution would be to
|
|
|
|
* make pfs_vncache_alloc() responsible
|
|
|
|
* for entering the vnode in the VFS
|
|
|
|
* cache.
|
|
|
|
*/
|
|
|
|
cache_purge(vp);
|
2001-04-07 19:51:12 +00:00
|
|
|
return (0);
|
|
|
|
}
|
2007-01-02 17:27:52 +00:00
|
|
|
goto retry;
|
2001-06-10 18:39:21 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
mtx_unlock(&pfs_vncache_mutex);
|
2019-10-22 22:52:53 +00:00
|
|
|
alloc:
|
2001-04-07 19:51:12 +00:00
|
|
|
/* nope, get a new one */
|
2008-10-23 15:53:51 +00:00
|
|
|
pvd = malloc(sizeof *pvd, M_PFSVNCACHE, M_WAITOK);
|
2004-12-01 23:16:38 +00:00
|
|
|
error = getnewvnode("pseudofs", mp, &pfs_vnodeops, vpp);
|
2002-10-15 18:51:02 +00:00
|
|
|
if (error) {
|
2008-10-23 15:53:51 +00:00
|
|
|
free(pvd, M_PFSVNCACHE);
|
2001-04-07 19:51:12 +00:00
|
|
|
return (error);
|
2002-10-15 18:51:02 +00:00
|
|
|
}
|
2001-06-10 18:39:21 +00:00
|
|
|
pvd->pvd_pn = pn;
|
|
|
|
pvd->pvd_pid = pid;
|
|
|
|
(*vpp)->v_data = pvd;
|
2001-04-07 19:51:12 +00:00
|
|
|
switch (pn->pn_type) {
|
|
|
|
case pfstype_root:
|
2002-08-04 10:29:36 +00:00
|
|
|
(*vpp)->v_vflag = VV_ROOT;
|
2001-04-07 19:51:12 +00:00
|
|
|
#if 0
|
|
|
|
printf("root vnode allocated\n");
|
|
|
|
#endif
|
2001-10-01 04:22:20 +00:00
|
|
|
/* fall through */
|
2001-04-07 19:51:12 +00:00
|
|
|
case pfstype_dir:
|
|
|
|
case pfstype_this:
|
|
|
|
case pfstype_parent:
|
2001-06-10 18:39:21 +00:00
|
|
|
case pfstype_procdir:
|
2001-04-07 19:51:12 +00:00
|
|
|
(*vpp)->v_type = VDIR;
|
|
|
|
break;
|
|
|
|
case pfstype_file:
|
|
|
|
(*vpp)->v_type = VREG;
|
|
|
|
break;
|
|
|
|
case pfstype_symlink:
|
|
|
|
(*vpp)->v_type = VLNK;
|
|
|
|
break;
|
2001-06-10 18:39:21 +00:00
|
|
|
case pfstype_none:
|
|
|
|
KASSERT(0, ("pfs_vncache_alloc called for null node\n"));
|
2001-04-07 19:51:12 +00:00
|
|
|
default:
|
|
|
|
panic("%s has unexpected type: %d", pn->pn_name, pn->pn_type);
|
|
|
|
}
|
2002-09-14 09:02:28 +00:00
|
|
|
/*
|
|
|
|
* Propagate flag through to vnode so users know it can change
|
|
|
|
* if the process changes (i.e. execve)
|
|
|
|
*/
|
|
|
|
if ((pn->pn_flags & PFS_PROCDEP) != 0)
|
|
|
|
(*vpp)->v_vflag |= VV_PROCDEP;
|
2001-10-01 04:22:20 +00:00
|
|
|
pvd->pvd_vnode = *vpp;
|
2008-01-10 01:10:58 +00:00
|
|
|
vn_lock(*vpp, LK_EXCLUSIVE | LK_RETRY);
|
2010-08-20 19:46:50 +00:00
|
|
|
VN_LOCK_AREC(*vpp);
|
2007-03-13 01:50:27 +00:00
|
|
|
error = insmntque(*vpp, mp);
|
|
|
|
if (error != 0) {
|
2009-09-07 11:55:34 +00:00
|
|
|
free(pvd, M_PFSVNCACHE);
|
2007-03-13 01:50:27 +00:00
|
|
|
*vpp = NULLVP;
|
|
|
|
return (error);
|
|
|
|
}
|
2008-12-29 12:07:18 +00:00
|
|
|
retry2:
|
2001-06-10 18:39:21 +00:00
|
|
|
mtx_lock(&pfs_vncache_mutex);
|
2008-12-29 12:07:18 +00:00
|
|
|
/*
|
|
|
|
* Other thread may race with us, creating the entry we are
|
|
|
|
* going to insert into the cache. Recheck after
|
|
|
|
* pfs_vncache_mutex is reacquired.
|
|
|
|
*/
|
2019-10-22 22:52:53 +00:00
|
|
|
SLIST_FOREACH(pvd2, hash, pvd_hash) {
|
2008-12-29 12:07:18 +00:00
|
|
|
if (pvd2->pvd_pn == pn && pvd2->pvd_pid == pid &&
|
|
|
|
pvd2->pvd_vnode->v_mount == mp) {
|
|
|
|
vp = pvd2->pvd_vnode;
|
|
|
|
VI_LOCK(vp);
|
|
|
|
mtx_unlock(&pfs_vncache_mutex);
|
|
|
|
if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, curthread) == 0) {
|
|
|
|
++pfs_vncache_hits;
|
|
|
|
vgone(*vpp);
|
2009-01-09 22:06:48 +00:00
|
|
|
vput(*vpp);
|
2008-12-29 12:07:18 +00:00
|
|
|
*vpp = vp;
|
|
|
|
cache_purge(vp);
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
goto retry2;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
++pfs_vncache_misses;
|
|
|
|
if (++pfs_vncache_entries > pfs_vncache_maxentries)
|
|
|
|
pfs_vncache_maxentries = pfs_vncache_entries;
|
2019-10-22 22:52:53 +00:00
|
|
|
SLIST_INSERT_HEAD(hash, pvd, pvd_hash);
|
2001-04-07 19:51:12 +00:00
|
|
|
mtx_unlock(&pfs_vncache_mutex);
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Free a vnode
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
pfs_vncache_free(struct vnode *vp)
|
|
|
|
{
|
2019-10-22 22:52:53 +00:00
|
|
|
struct pfs_vdata *pvd, *pvd2;
|
2001-12-19 23:58:09 +00:00
|
|
|
|
2001-04-07 19:51:12 +00:00
|
|
|
mtx_lock(&pfs_vncache_mutex);
|
2001-10-01 04:22:20 +00:00
|
|
|
pvd = (struct pfs_vdata *)vp->v_data;
|
|
|
|
KASSERT(pvd != NULL, ("pfs_vncache_free(): no vnode data\n"));
|
2019-10-22 22:52:53 +00:00
|
|
|
SLIST_FOREACH(pvd2, PFS_VNCACHE_HASH(pvd->pvd_pid), pvd_hash) {
|
|
|
|
if (pvd2 != pvd)
|
|
|
|
continue;
|
|
|
|
SLIST_REMOVE(PFS_VNCACHE_HASH(pvd->pvd_pid), pvd, pfs_vdata, pvd_hash);
|
2009-09-07 12:10:41 +00:00
|
|
|
--pfs_vncache_entries;
|
2019-10-22 22:52:53 +00:00
|
|
|
break;
|
2009-09-07 12:10:41 +00:00
|
|
|
}
|
2001-04-07 19:51:12 +00:00
|
|
|
mtx_unlock(&pfs_vncache_mutex);
|
2001-10-01 04:22:20 +00:00
|
|
|
|
2008-10-23 15:53:51 +00:00
|
|
|
free(pvd, M_PFSVNCACHE);
|
2001-06-10 18:39:21 +00:00
|
|
|
vp->v_data = NULL;
|
2001-04-07 19:51:12 +00:00
|
|
|
return (0);
|
|
|
|
}
|
2001-10-01 04:22:20 +00:00
|
|
|
|
|
|
|
/*
|
2007-04-15 17:10:01 +00:00
|
|
|
* Purge the cache of dead entries
|
2005-01-08 04:56:38 +00:00
|
|
|
*
|
2019-10-22 22:52:53 +00:00
|
|
|
* The code is not very efficient and this perhaps can be addressed without
|
|
|
|
* a complete rewrite. Previous iteration was walking a linked list from
|
|
|
|
* scratch every time. This code only walks the relevant hash chain (if pid
|
|
|
|
* is provided), but still resorts to scanning the entire cache at least twice
|
|
|
|
* if a specific component is to be removed which is slower. This can be
|
|
|
|
* augmented with resizing the hash.
|
|
|
|
*
|
|
|
|
* Explanation of the previous state:
|
|
|
|
*
|
Add a flag to struct pfs_vdata to mark the vnode as dead (e.g. process-
specific nodes when the process exits)
Move the vnode-cache-walking loop which was duplicated in pfs_exit() and
pfs_disable() into its own function, pfs_purge(), which looks for vnodes
marked as dead and / or belonging to the specified pfs_node and reclaims
them. Note that this loop is still extremely inefficient.
Add a comment in pfs_vncache_alloc() explaining why we have to purge the
vnode from the vnode cache before returning, in case anyone should be
tempted to remove the call to cache_purge().
Move the special handling for pfstype_root nodes into pfs_fileno_alloc()
and pfs_fileno_free() (the root node's fileno must always be 2). This
also fixes a bug where pfs_fileno_free() would reclaim the root node's
fileno, triggering a panic in the unr code, as that fileno was never
allocated from unr to begin with.
When destroying a pfs_node, release its fileno and purge it from the
vnode cache. I wish we could put off the call to pfs_purge() until
after the entire tree had been destroyed, but then we'd have vnodes
referencing freed pfs nodes. This probably doesn't matter while we're
still under Giant, but might become an issue later.
When destroying a pseudofs instance, destroy the tree before tearing
down the fileno allocator.
In pfs_mount(), acquire the mountpoint interlock when required.
MFC after: 3 weeks
2007-04-11 22:40:57 +00:00
|
|
|
* This is extremely inefficient due to the fact that vgone() not only
|
|
|
|
* indirectly modifies the vnode cache, but may also sleep. We can
|
|
|
|
* neither hold pfs_vncache_mutex across a vgone() call, nor make any
|
|
|
|
* assumptions about the state of the cache after vgone() returns. In
|
|
|
|
* consequence, we must start over after every vgone() call, and keep
|
|
|
|
* trying until we manage to traverse the entire cache.
|
|
|
|
*
|
|
|
|
* The only way to improve this situation is to change the data structure
|
|
|
|
* used to implement the cache.
|
2001-10-01 04:22:20 +00:00
|
|
|
*/
|
2019-10-22 22:52:53 +00:00
|
|
|
|
2009-06-06 00:44:13 +00:00
|
|
|
static void
|
2019-10-22 22:52:53 +00:00
|
|
|
pfs_purge_one(struct vnode *vnp)
|
|
|
|
{
|
|
|
|
|
|
|
|
VOP_LOCK(vnp, LK_EXCLUSIVE);
|
|
|
|
vgone(vnp);
|
2020-01-03 22:29:58 +00:00
|
|
|
VOP_UNLOCK(vnp);
|
2019-10-22 22:52:53 +00:00
|
|
|
vdrop(vnp);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
pfs_purge(struct pfs_node *pn)
|
2001-10-01 04:22:20 +00:00
|
|
|
{
|
2004-08-15 21:58:02 +00:00
|
|
|
struct pfs_vdata *pvd;
|
|
|
|
struct vnode *vnp;
|
2019-10-22 22:52:53 +00:00
|
|
|
u_long i, removed;
|
2001-10-01 04:22:20 +00:00
|
|
|
|
2019-10-22 22:52:53 +00:00
|
|
|
mtx_lock(&pfs_vncache_mutex);
|
|
|
|
restart:
|
|
|
|
removed = 0;
|
|
|
|
for (i = 0; i < pfs_vncache_hash; i++) {
|
|
|
|
restart_chain:
|
|
|
|
SLIST_FOREACH(pvd, &pfs_vncache_hashtbl[i], pvd_hash) {
|
|
|
|
if (pn != NULL && pvd->pvd_pn != pn)
|
|
|
|
continue;
|
2004-08-15 21:58:02 +00:00
|
|
|
vnp = pvd->pvd_vnode;
|
2005-07-07 07:33:10 +00:00
|
|
|
vhold(vnp);
|
2004-08-15 21:58:02 +00:00
|
|
|
mtx_unlock(&pfs_vncache_mutex);
|
2019-10-22 22:52:53 +00:00
|
|
|
pfs_purge_one(vnp);
|
|
|
|
removed++;
|
2004-08-15 21:58:02 +00:00
|
|
|
mtx_lock(&pfs_vncache_mutex);
|
2019-10-22 22:52:53 +00:00
|
|
|
goto restart_chain;
|
2001-10-01 04:22:20 +00:00
|
|
|
}
|
|
|
|
}
|
2019-10-22 22:52:53 +00:00
|
|
|
if (removed > 0)
|
|
|
|
goto restart;
|
|
|
|
mtx_unlock(&pfs_vncache_mutex);
|
2009-06-06 00:44:13 +00:00
|
|
|
}
|
|
|
|
|
2019-10-22 22:52:53 +00:00
|
|
|
static void
|
|
|
|
pfs_purge_all(void)
|
2009-06-06 00:44:13 +00:00
|
|
|
{
|
|
|
|
|
2019-10-22 22:52:53 +00:00
|
|
|
pfs_purge(NULL);
|
Add a flag to struct pfs_vdata to mark the vnode as dead (e.g. process-
specific nodes when the process exits)
Move the vnode-cache-walking loop which was duplicated in pfs_exit() and
pfs_disable() into its own function, pfs_purge(), which looks for vnodes
marked as dead and / or belonging to the specified pfs_node and reclaims
them. Note that this loop is still extremely inefficient.
Add a comment in pfs_vncache_alloc() explaining why we have to purge the
vnode from the vnode cache before returning, in case anyone should be
tempted to remove the call to cache_purge().
Move the special handling for pfstype_root nodes into pfs_fileno_alloc()
and pfs_fileno_free() (the root node's fileno must always be 2). This
also fixes a bug where pfs_fileno_free() would reclaim the root node's
fileno, triggering a panic in the unr code, as that fileno was never
allocated from unr to begin with.
When destroying a pfs_node, release its fileno and purge it from the
vnode cache. I wish we could put off the call to pfs_purge() until
after the entire tree had been destroyed, but then we'd have vnodes
referencing freed pfs nodes. This probably doesn't matter while we're
still under Giant, but might become an issue later.
When destroying a pseudofs instance, destroy the tree before tearing
down the fileno allocator.
In pfs_mount(), acquire the mountpoint interlock when required.
MFC after: 3 weeks
2007-04-11 22:40:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Free all vnodes associated with a defunct process
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
pfs_exit(void *arg, struct proc *p)
|
|
|
|
{
|
2019-10-22 22:52:53 +00:00
|
|
|
struct pfs_vncache_head *hash;
|
Add a flag to struct pfs_vdata to mark the vnode as dead (e.g. process-
specific nodes when the process exits)
Move the vnode-cache-walking loop which was duplicated in pfs_exit() and
pfs_disable() into its own function, pfs_purge(), which looks for vnodes
marked as dead and / or belonging to the specified pfs_node and reclaims
them. Note that this loop is still extremely inefficient.
Add a comment in pfs_vncache_alloc() explaining why we have to purge the
vnode from the vnode cache before returning, in case anyone should be
tempted to remove the call to cache_purge().
Move the special handling for pfstype_root nodes into pfs_fileno_alloc()
and pfs_fileno_free() (the root node's fileno must always be 2). This
also fixes a bug where pfs_fileno_free() would reclaim the root node's
fileno, triggering a panic in the unr code, as that fileno was never
allocated from unr to begin with.
When destroying a pfs_node, release its fileno and purge it from the
vnode cache. I wish we could put off the call to pfs_purge() until
after the entire tree had been destroyed, but then we'd have vnodes
referencing freed pfs nodes. This probably doesn't matter while we're
still under Giant, but might become an issue later.
When destroying a pseudofs instance, destroy the tree before tearing
down the fileno allocator.
In pfs_mount(), acquire the mountpoint interlock when required.
MFC after: 3 weeks
2007-04-11 22:40:57 +00:00
|
|
|
struct pfs_vdata *pvd;
|
2019-10-22 22:52:53 +00:00
|
|
|
struct vnode *vnp;
|
|
|
|
int pid;
|
Add a flag to struct pfs_vdata to mark the vnode as dead (e.g. process-
specific nodes when the process exits)
Move the vnode-cache-walking loop which was duplicated in pfs_exit() and
pfs_disable() into its own function, pfs_purge(), which looks for vnodes
marked as dead and / or belonging to the specified pfs_node and reclaims
them. Note that this loop is still extremely inefficient.
Add a comment in pfs_vncache_alloc() explaining why we have to purge the
vnode from the vnode cache before returning, in case anyone should be
tempted to remove the call to cache_purge().
Move the special handling for pfstype_root nodes into pfs_fileno_alloc()
and pfs_fileno_free() (the root node's fileno must always be 2). This
also fixes a bug where pfs_fileno_free() would reclaim the root node's
fileno, triggering a panic in the unr code, as that fileno was never
allocated from unr to begin with.
When destroying a pfs_node, release its fileno and purge it from the
vnode cache. I wish we could put off the call to pfs_purge() until
after the entire tree had been destroyed, but then we'd have vnodes
referencing freed pfs nodes. This probably doesn't matter while we're
still under Giant, but might become an issue later.
When destroying a pseudofs instance, destroy the tree before tearing
down the fileno allocator.
In pfs_mount(), acquire the mountpoint interlock when required.
MFC after: 3 weeks
2007-04-11 22:40:57 +00:00
|
|
|
|
2019-10-22 22:52:53 +00:00
|
|
|
pid = p->p_pid;
|
|
|
|
hash = PFS_VNCACHE_HASH(pid);
|
|
|
|
if (SLIST_EMPTY(hash))
|
Add a flag to struct pfs_vdata to mark the vnode as dead (e.g. process-
specific nodes when the process exits)
Move the vnode-cache-walking loop which was duplicated in pfs_exit() and
pfs_disable() into its own function, pfs_purge(), which looks for vnodes
marked as dead and / or belonging to the specified pfs_node and reclaims
them. Note that this loop is still extremely inefficient.
Add a comment in pfs_vncache_alloc() explaining why we have to purge the
vnode from the vnode cache before returning, in case anyone should be
tempted to remove the call to cache_purge().
Move the special handling for pfstype_root nodes into pfs_fileno_alloc()
and pfs_fileno_free() (the root node's fileno must always be 2). This
also fixes a bug where pfs_fileno_free() would reclaim the root node's
fileno, triggering a panic in the unr code, as that fileno was never
allocated from unr to begin with.
When destroying a pfs_node, release its fileno and purge it from the
vnode cache. I wish we could put off the call to pfs_purge() until
after the entire tree had been destroyed, but then we'd have vnodes
referencing freed pfs nodes. This probably doesn't matter while we're
still under Giant, but might become an issue later.
When destroying a pseudofs instance, destroy the tree before tearing
down the fileno allocator.
In pfs_mount(), acquire the mountpoint interlock when required.
MFC after: 3 weeks
2007-04-11 22:40:57 +00:00
|
|
|
return;
|
2019-10-22 22:52:53 +00:00
|
|
|
restart:
|
Add a flag to struct pfs_vdata to mark the vnode as dead (e.g. process-
specific nodes when the process exits)
Move the vnode-cache-walking loop which was duplicated in pfs_exit() and
pfs_disable() into its own function, pfs_purge(), which looks for vnodes
marked as dead and / or belonging to the specified pfs_node and reclaims
them. Note that this loop is still extremely inefficient.
Add a comment in pfs_vncache_alloc() explaining why we have to purge the
vnode from the vnode cache before returning, in case anyone should be
tempted to remove the call to cache_purge().
Move the special handling for pfstype_root nodes into pfs_fileno_alloc()
and pfs_fileno_free() (the root node's fileno must always be 2). This
also fixes a bug where pfs_fileno_free() would reclaim the root node's
fileno, triggering a panic in the unr code, as that fileno was never
allocated from unr to begin with.
When destroying a pfs_node, release its fileno and purge it from the
vnode cache. I wish we could put off the call to pfs_purge() until
after the entire tree had been destroyed, but then we'd have vnodes
referencing freed pfs nodes. This probably doesn't matter while we're
still under Giant, but might become an issue later.
When destroying a pseudofs instance, destroy the tree before tearing
down the fileno allocator.
In pfs_mount(), acquire the mountpoint interlock when required.
MFC after: 3 weeks
2007-04-11 22:40:57 +00:00
|
|
|
mtx_lock(&pfs_vncache_mutex);
|
2019-10-22 22:52:53 +00:00
|
|
|
SLIST_FOREACH(pvd, hash, pvd_hash) {
|
|
|
|
if (pvd->pvd_pid != pid)
|
|
|
|
continue;
|
|
|
|
vnp = pvd->pvd_vnode;
|
|
|
|
vhold(vnp);
|
|
|
|
mtx_unlock(&pfs_vncache_mutex);
|
|
|
|
pfs_purge_one(vnp);
|
|
|
|
goto restart;
|
|
|
|
}
|
2009-06-06 00:44:13 +00:00
|
|
|
mtx_unlock(&pfs_vncache_mutex);
|
2001-10-01 04:22:20 +00:00
|
|
|
}
|