1994-05-24 10:09:53 +00:00
|
|
|
/*
|
1997-02-10 02:22:35 +00:00
|
|
|
* Copyright (c) 1989, 1993, 1995
|
1994-05-24 10:09:53 +00:00
|
|
|
* The Regents of the University of California. All rights reserved.
|
1997-02-10 02:22:35 +00:00
|
|
|
*
|
|
|
|
* This code is derived from software contributed to Berkeley by
|
|
|
|
* Poul-Henning Kamp of the FreeBSD Project.
|
1994-05-24 10:09:53 +00:00
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
* 3. All advertising materials mentioning features or use of this software
|
|
|
|
* must display the following acknowledgement:
|
|
|
|
* This product includes software developed by the University of
|
|
|
|
* California, Berkeley and its contributors.
|
|
|
|
* 4. Neither the name of the University nor the names of its contributors
|
|
|
|
* may be used to endorse or promote products derived from this software
|
|
|
|
* without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
|
|
|
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
|
|
|
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
|
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
|
|
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
|
|
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
|
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
|
|
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
|
|
|
* SUCH DAMAGE.
|
|
|
|
*
|
1997-03-08 15:22:14 +00:00
|
|
|
* @(#)vfs_cache.c 8.5 (Berkeley) 3/22/95
|
1999-08-28 01:08:13 +00:00
|
|
|
* $FreeBSD$
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
|
|
|
|
|
|
|
#include <sys/param.h>
|
|
|
|
#include <sys/systm.h>
|
1995-12-14 09:55:16 +00:00
|
|
|
#include <sys/kernel.h>
|
|
|
|
#include <sys/sysctl.h>
|
1994-05-24 10:09:53 +00:00
|
|
|
#include <sys/mount.h>
|
|
|
|
#include <sys/vnode.h>
|
|
|
|
#include <sys/namei.h>
|
|
|
|
#include <sys/malloc.h>
|
1999-10-03 12:18:29 +00:00
|
|
|
#include <sys/sysproto.h>
|
|
|
|
#include <sys/proc.h>
|
|
|
|
#include <sys/filedesc.h>
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2000-04-26 11:57:45 +00:00
|
|
|
/*
|
|
|
|
* This structure describes the elements in the cache of recent
|
|
|
|
* names looked up by namei.
|
|
|
|
*/
|
|
|
|
|
|
|
|
struct namecache {
|
2000-05-23 20:41:01 +00:00
|
|
|
LIST_ENTRY(struct namecache) nc_hash; /* hash chain */
|
|
|
|
LIST_ENTRY(struct namecache) nc_src; /* source vnode list */
|
|
|
|
TAILQ_ENTRY(struct namecache) nc_dst; /* destination vnode list */
|
2000-04-26 11:57:45 +00:00
|
|
|
struct vnode *nc_dvp; /* vnode of parent of name */
|
|
|
|
struct vnode *nc_vp; /* vnode the name refers to */
|
|
|
|
u_char nc_flag; /* flag bits */
|
|
|
|
u_char nc_nlen; /* length of name */
|
|
|
|
char nc_name[0]; /* segment name */
|
|
|
|
};
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* Name caching works as follows:
|
|
|
|
*
|
|
|
|
* Names found by directory scans are retained in a cache
|
|
|
|
* for future reference. It is managed LRU, so frequently
|
|
|
|
* used names will hang around. Cache is indexed by hash value
|
|
|
|
* obtained from (vp, name) where vp refers to the directory
|
|
|
|
* containing name.
|
|
|
|
*
|
1997-02-10 02:22:35 +00:00
|
|
|
* If it is a "negative" entry, (i.e. for a name that is known NOT to
|
|
|
|
* exist) the vnode pointer will be NULL.
|
1995-03-09 20:23:45 +00:00
|
|
|
*
|
1994-05-24 10:09:53 +00:00
|
|
|
* Upon reaching the last segment of a path, if the reference
|
|
|
|
* is for DELETE, or NOCACHE is set (rewrite), and the
|
|
|
|
* name is located in the cache, it will be dropped.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Structures associated with name cacheing.
|
|
|
|
*/
|
1999-10-03 12:18:29 +00:00
|
|
|
#define NCHHASH(dvp, hash) \
|
|
|
|
(&nchashtbl[((dvp)->v_id + (hash)) & nchash])
|
2000-05-23 20:41:01 +00:00
|
|
|
static LIST_HEAD(nchashhead, struct namecache) *nchashtbl; /* Hash Table */
|
|
|
|
static TAILQ_HEAD(, struct namecache) ncneg; /* Hash Table */
|
1997-03-08 15:22:14 +00:00
|
|
|
static u_long nchash; /* size of hash table */
|
1997-09-03 09:20:17 +00:00
|
|
|
SYSCTL_INT(_debug, OID_AUTO, nchash, CTLFLAG_RD, &nchash, 0, "");
|
1. Add a {pointer, v_id} pair to the vnode to store the reference to the
".." vnode. This is cheaper storagewise than keeping it in the
namecache, and it makes more sense since it's a 1:1 mapping.
2. Also handle the case of "." more intelligently rather than stuff
the namecache with pointless entries.
3. Add two lists to the vnode and hang namecache entries which go from
or to this vnode. When cleaning a vnode, delete all namecache
entries it invalidates.
4. Never reuse namecache enties, malloc new ones when we need it, free
old ones when they die. No longer a hard limit on how many we can
have.
5. Remove the upper limit on namelength of namecache entries.
6. Make a global list for negative namecache entries, limit their number
to a sysctl'able (debug.ncnegfactor) fraction of the total namecache.
Currently the default fraction is 1/16th. (Suggestions for better
default wanted!)
7. Assign v_id correctly in the face of 32bit rollover.
8. Remove the LRU list for namecache entries, not needed. Remove the
#ifdef NCH_STATISTICS stuff, it's not needed either.
9. Use the vnode freelist as a true LRU list, also for namecache accesses.
10. Reuse vnodes more aggresively but also more selectively, if we can't
reuse, malloc a new one. There is no longer a hard limit on their
number, they grow to the point where we don't reuse potentially
usable vnodes. A vnode will not get recycled if still has pages in
core or if it is the source of namecache entries (Yes, this does
indeed work :-) "." and ".." are not namecache entries any longer...)
11. Do not overload the v_id field in namecache entries with whiteout
information, use a char sized flags field instead, so we can get
rid of the vpid and v_id fields from the namecache struct. Since
we're linked to the vnodes and purged when they're cleaned, we don't
have to check the v_id any more.
12. NFS knew about the limitation on name length in the namecache, it
shouldn't and doesn't now.
Bugs:
The namecache statistics no longer includes the hits for ".."
and "." hits.
Performance impact:
Generally in the +/- 0.5% for "normal" workstations, but
I hope this will allow the system to be selftuning over a
bigger range of "special" applications. The case where
RAM is available but unused for cache because we don't have
any vnodes should be gone.
Future work:
Straighten out the namecache statistics.
"desiredvnodes" is still used to (bogusly ?) size hash
tables in the filesystems.
I have still to find a way to safely free unused vnodes
back so their number can shrink when not needed.
There is a few uses of the v_id field left in the filesystems,
scheduled for demolition at a later time.
Maybe a one slot cache for unused namecache entries should
be implemented to decrease the malloc/free frequency.
1997-05-04 09:17:38 +00:00
|
|
|
static u_long ncnegfactor = 16; /* ratio of negative entries */
|
|
|
|
SYSCTL_INT(_debug, OID_AUTO, ncnegfactor, CTLFLAG_RW, &ncnegfactor, 0, "");
|
|
|
|
static u_long numneg; /* number of cache entries allocated */
|
|
|
|
SYSCTL_INT(_debug, OID_AUTO, numneg, CTLFLAG_RD, &numneg, 0, "");
|
1997-03-08 15:22:14 +00:00
|
|
|
static u_long numcache; /* number of cache entries allocated */
|
1. Add a {pointer, v_id} pair to the vnode to store the reference to the
".." vnode. This is cheaper storagewise than keeping it in the
namecache, and it makes more sense since it's a 1:1 mapping.
2. Also handle the case of "." more intelligently rather than stuff
the namecache with pointless entries.
3. Add two lists to the vnode and hang namecache entries which go from
or to this vnode. When cleaning a vnode, delete all namecache
entries it invalidates.
4. Never reuse namecache enties, malloc new ones when we need it, free
old ones when they die. No longer a hard limit on how many we can
have.
5. Remove the upper limit on namelength of namecache entries.
6. Make a global list for negative namecache entries, limit their number
to a sysctl'able (debug.ncnegfactor) fraction of the total namecache.
Currently the default fraction is 1/16th. (Suggestions for better
default wanted!)
7. Assign v_id correctly in the face of 32bit rollover.
8. Remove the LRU list for namecache entries, not needed. Remove the
#ifdef NCH_STATISTICS stuff, it's not needed either.
9. Use the vnode freelist as a true LRU list, also for namecache accesses.
10. Reuse vnodes more aggresively but also more selectively, if we can't
reuse, malloc a new one. There is no longer a hard limit on their
number, they grow to the point where we don't reuse potentially
usable vnodes. A vnode will not get recycled if still has pages in
core or if it is the source of namecache entries (Yes, this does
indeed work :-) "." and ".." are not namecache entries any longer...)
11. Do not overload the v_id field in namecache entries with whiteout
information, use a char sized flags field instead, so we can get
rid of the vpid and v_id fields from the namecache struct. Since
we're linked to the vnodes and purged when they're cleaned, we don't
have to check the v_id any more.
12. NFS knew about the limitation on name length in the namecache, it
shouldn't and doesn't now.
Bugs:
The namecache statistics no longer includes the hits for ".."
and "." hits.
Performance impact:
Generally in the +/- 0.5% for "normal" workstations, but
I hope this will allow the system to be selftuning over a
bigger range of "special" applications. The case where
RAM is available but unused for cache because we don't have
any vnodes should be gone.
Future work:
Straighten out the namecache statistics.
"desiredvnodes" is still used to (bogusly ?) size hash
tables in the filesystems.
I have still to find a way to safely free unused vnodes
back so their number can shrink when not needed.
There is a few uses of the v_id field left in the filesystems,
scheduled for demolition at a later time.
Maybe a one slot cache for unused namecache entries should
be implemented to decrease the malloc/free frequency.
1997-05-04 09:17:38 +00:00
|
|
|
SYSCTL_INT(_debug, OID_AUTO, numcache, CTLFLAG_RD, &numcache, 0, "");
|
1997-02-10 02:22:35 +00:00
|
|
|
struct nchstats nchstats; /* cache effectiveness statistics */
|
1994-05-24 10:09:53 +00:00
|
|
|
|
1997-03-08 15:22:14 +00:00
|
|
|
static int doingcache = 1; /* 1 => enable the cache */
|
|
|
|
SYSCTL_INT(_debug, OID_AUTO, vfscache, CTLFLAG_RW, &doingcache, 0, "");
|
1. Add a {pointer, v_id} pair to the vnode to store the reference to the
".." vnode. This is cheaper storagewise than keeping it in the
namecache, and it makes more sense since it's a 1:1 mapping.
2. Also handle the case of "." more intelligently rather than stuff
the namecache with pointless entries.
3. Add two lists to the vnode and hang namecache entries which go from
or to this vnode. When cleaning a vnode, delete all namecache
entries it invalidates.
4. Never reuse namecache enties, malloc new ones when we need it, free
old ones when they die. No longer a hard limit on how many we can
have.
5. Remove the upper limit on namelength of namecache entries.
6. Make a global list for negative namecache entries, limit their number
to a sysctl'able (debug.ncnegfactor) fraction of the total namecache.
Currently the default fraction is 1/16th. (Suggestions for better
default wanted!)
7. Assign v_id correctly in the face of 32bit rollover.
8. Remove the LRU list for namecache entries, not needed. Remove the
#ifdef NCH_STATISTICS stuff, it's not needed either.
9. Use the vnode freelist as a true LRU list, also for namecache accesses.
10. Reuse vnodes more aggresively but also more selectively, if we can't
reuse, malloc a new one. There is no longer a hard limit on their
number, they grow to the point where we don't reuse potentially
usable vnodes. A vnode will not get recycled if still has pages in
core or if it is the source of namecache entries (Yes, this does
indeed work :-) "." and ".." are not namecache entries any longer...)
11. Do not overload the v_id field in namecache entries with whiteout
information, use a char sized flags field instead, so we can get
rid of the vpid and v_id fields from the namecache struct. Since
we're linked to the vnodes and purged when they're cleaned, we don't
have to check the v_id any more.
12. NFS knew about the limitation on name length in the namecache, it
shouldn't and doesn't now.
Bugs:
The namecache statistics no longer includes the hits for ".."
and "." hits.
Performance impact:
Generally in the +/- 0.5% for "normal" workstations, but
I hope this will allow the system to be selftuning over a
bigger range of "special" applications. The case where
RAM is available but unused for cache because we don't have
any vnodes should be gone.
Future work:
Straighten out the namecache statistics.
"desiredvnodes" is still used to (bogusly ?) size hash
tables in the filesystems.
I have still to find a way to safely free unused vnodes
back so their number can shrink when not needed.
There is a few uses of the v_id field left in the filesystems,
scheduled for demolition at a later time.
Maybe a one slot cache for unused namecache entries should
be implemented to decrease the malloc/free frequency.
1997-05-04 09:17:38 +00:00
|
|
|
SYSCTL_INT(_debug, OID_AUTO, vnsize, CTLFLAG_RD, 0, sizeof(struct vnode), "");
|
|
|
|
SYSCTL_INT(_debug, OID_AUTO, ncsize, CTLFLAG_RD, 0, sizeof(struct namecache), "");
|
1997-03-08 15:22:14 +00:00
|
|
|
|
1997-09-24 07:46:54 +00:00
|
|
|
/*
|
|
|
|
* The new name cache statistics
|
|
|
|
*/
|
1998-09-09 07:41:41 +00:00
|
|
|
SYSCTL_NODE(_vfs, OID_AUTO, cache, CTLFLAG_RW, 0, "Name cache statistics");
|
1997-09-24 07:46:54 +00:00
|
|
|
#define STATNODE(mode, name, var) \
|
|
|
|
SYSCTL_INT(_vfs_cache, OID_AUTO, name, mode, var, 0, "");
|
|
|
|
STATNODE(CTLFLAG_RD, numneg, &numneg);
|
|
|
|
STATNODE(CTLFLAG_RD, numcache, &numcache);
|
|
|
|
static u_long numcalls; STATNODE(CTLFLAG_RD, numcalls, &numcalls);
|
|
|
|
static u_long dothits; STATNODE(CTLFLAG_RD, dothits, &dothits);
|
|
|
|
static u_long dotdothits; STATNODE(CTLFLAG_RD, dotdothits, &dotdothits);
|
|
|
|
static u_long numchecks; STATNODE(CTLFLAG_RD, numchecks, &numchecks);
|
|
|
|
static u_long nummiss; STATNODE(CTLFLAG_RD, nummiss, &nummiss);
|
1997-09-24 15:54:10 +00:00
|
|
|
static u_long nummisszap; STATNODE(CTLFLAG_RD, nummisszap, &nummisszap);
|
1997-09-24 07:46:54 +00:00
|
|
|
static u_long numposzaps; STATNODE(CTLFLAG_RD, numposzaps, &numposzaps);
|
|
|
|
static u_long numposhits; STATNODE(CTLFLAG_RD, numposhits, &numposhits);
|
|
|
|
static u_long numnegzaps; STATNODE(CTLFLAG_RD, numnegzaps, &numnegzaps);
|
|
|
|
static u_long numneghits; STATNODE(CTLFLAG_RD, numneghits, &numneghits);
|
|
|
|
|
|
|
|
|
1. Add a {pointer, v_id} pair to the vnode to store the reference to the
".." vnode. This is cheaper storagewise than keeping it in the
namecache, and it makes more sense since it's a 1:1 mapping.
2. Also handle the case of "." more intelligently rather than stuff
the namecache with pointless entries.
3. Add two lists to the vnode and hang namecache entries which go from
or to this vnode. When cleaning a vnode, delete all namecache
entries it invalidates.
4. Never reuse namecache enties, malloc new ones when we need it, free
old ones when they die. No longer a hard limit on how many we can
have.
5. Remove the upper limit on namelength of namecache entries.
6. Make a global list for negative namecache entries, limit their number
to a sysctl'able (debug.ncnegfactor) fraction of the total namecache.
Currently the default fraction is 1/16th. (Suggestions for better
default wanted!)
7. Assign v_id correctly in the face of 32bit rollover.
8. Remove the LRU list for namecache entries, not needed. Remove the
#ifdef NCH_STATISTICS stuff, it's not needed either.
9. Use the vnode freelist as a true LRU list, also for namecache accesses.
10. Reuse vnodes more aggresively but also more selectively, if we can't
reuse, malloc a new one. There is no longer a hard limit on their
number, they grow to the point where we don't reuse potentially
usable vnodes. A vnode will not get recycled if still has pages in
core or if it is the source of namecache entries (Yes, this does
indeed work :-) "." and ".." are not namecache entries any longer...)
11. Do not overload the v_id field in namecache entries with whiteout
information, use a char sized flags field instead, so we can get
rid of the vpid and v_id fields from the namecache struct. Since
we're linked to the vnodes and purged when they're cleaned, we don't
have to check the v_id any more.
12. NFS knew about the limitation on name length in the namecache, it
shouldn't and doesn't now.
Bugs:
The namecache statistics no longer includes the hits for ".."
and "." hits.
Performance impact:
Generally in the +/- 0.5% for "normal" workstations, but
I hope this will allow the system to be selftuning over a
bigger range of "special" applications. The case where
RAM is available but unused for cache because we don't have
any vnodes should be gone.
Future work:
Straighten out the namecache statistics.
"desiredvnodes" is still used to (bogusly ?) size hash
tables in the filesystems.
I have still to find a way to safely free unused vnodes
back so their number can shrink when not needed.
There is a few uses of the v_id field left in the filesystems,
scheduled for demolition at a later time.
Maybe a one slot cache for unused namecache entries should
be implemented to decrease the malloc/free frequency.
1997-05-04 09:17:38 +00:00
|
|
|
static void cache_zap __P((struct namecache *ncp));
|
1995-03-09 20:23:45 +00:00
|
|
|
|
1999-10-03 12:18:29 +00:00
|
|
|
MALLOC_DEFINE(M_VFSCACHE, "vfscache", "VFS name cache entries");
|
|
|
|
|
1997-02-10 02:22:35 +00:00
|
|
|
/*
|
1. Add a {pointer, v_id} pair to the vnode to store the reference to the
".." vnode. This is cheaper storagewise than keeping it in the
namecache, and it makes more sense since it's a 1:1 mapping.
2. Also handle the case of "." more intelligently rather than stuff
the namecache with pointless entries.
3. Add two lists to the vnode and hang namecache entries which go from
or to this vnode. When cleaning a vnode, delete all namecache
entries it invalidates.
4. Never reuse namecache enties, malloc new ones when we need it, free
old ones when they die. No longer a hard limit on how many we can
have.
5. Remove the upper limit on namelength of namecache entries.
6. Make a global list for negative namecache entries, limit their number
to a sysctl'able (debug.ncnegfactor) fraction of the total namecache.
Currently the default fraction is 1/16th. (Suggestions for better
default wanted!)
7. Assign v_id correctly in the face of 32bit rollover.
8. Remove the LRU list for namecache entries, not needed. Remove the
#ifdef NCH_STATISTICS stuff, it's not needed either.
9. Use the vnode freelist as a true LRU list, also for namecache accesses.
10. Reuse vnodes more aggresively but also more selectively, if we can't
reuse, malloc a new one. There is no longer a hard limit on their
number, they grow to the point where we don't reuse potentially
usable vnodes. A vnode will not get recycled if still has pages in
core or if it is the source of namecache entries (Yes, this does
indeed work :-) "." and ".." are not namecache entries any longer...)
11. Do not overload the v_id field in namecache entries with whiteout
information, use a char sized flags field instead, so we can get
rid of the vpid and v_id fields from the namecache struct. Since
we're linked to the vnodes and purged when they're cleaned, we don't
have to check the v_id any more.
12. NFS knew about the limitation on name length in the namecache, it
shouldn't and doesn't now.
Bugs:
The namecache statistics no longer includes the hits for ".."
and "." hits.
Performance impact:
Generally in the +/- 0.5% for "normal" workstations, but
I hope this will allow the system to be selftuning over a
bigger range of "special" applications. The case where
RAM is available but unused for cache because we don't have
any vnodes should be gone.
Future work:
Straighten out the namecache statistics.
"desiredvnodes" is still used to (bogusly ?) size hash
tables in the filesystems.
I have still to find a way to safely free unused vnodes
back so their number can shrink when not needed.
There is a few uses of the v_id field left in the filesystems,
scheduled for demolition at a later time.
Maybe a one slot cache for unused namecache entries should
be implemented to decrease the malloc/free frequency.
1997-05-04 09:17:38 +00:00
|
|
|
* Flags in namecache.nc_flag
|
1997-02-10 02:22:35 +00:00
|
|
|
*/
|
1. Add a {pointer, v_id} pair to the vnode to store the reference to the
".." vnode. This is cheaper storagewise than keeping it in the
namecache, and it makes more sense since it's a 1:1 mapping.
2. Also handle the case of "." more intelligently rather than stuff
the namecache with pointless entries.
3. Add two lists to the vnode and hang namecache entries which go from
or to this vnode. When cleaning a vnode, delete all namecache
entries it invalidates.
4. Never reuse namecache enties, malloc new ones when we need it, free
old ones when they die. No longer a hard limit on how many we can
have.
5. Remove the upper limit on namelength of namecache entries.
6. Make a global list for negative namecache entries, limit their number
to a sysctl'able (debug.ncnegfactor) fraction of the total namecache.
Currently the default fraction is 1/16th. (Suggestions for better
default wanted!)
7. Assign v_id correctly in the face of 32bit rollover.
8. Remove the LRU list for namecache entries, not needed. Remove the
#ifdef NCH_STATISTICS stuff, it's not needed either.
9. Use the vnode freelist as a true LRU list, also for namecache accesses.
10. Reuse vnodes more aggresively but also more selectively, if we can't
reuse, malloc a new one. There is no longer a hard limit on their
number, they grow to the point where we don't reuse potentially
usable vnodes. A vnode will not get recycled if still has pages in
core or if it is the source of namecache entries (Yes, this does
indeed work :-) "." and ".." are not namecache entries any longer...)
11. Do not overload the v_id field in namecache entries with whiteout
information, use a char sized flags field instead, so we can get
rid of the vpid and v_id fields from the namecache struct. Since
we're linked to the vnodes and purged when they're cleaned, we don't
have to check the v_id any more.
12. NFS knew about the limitation on name length in the namecache, it
shouldn't and doesn't now.
Bugs:
The namecache statistics no longer includes the hits for ".."
and "." hits.
Performance impact:
Generally in the +/- 0.5% for "normal" workstations, but
I hope this will allow the system to be selftuning over a
bigger range of "special" applications. The case where
RAM is available but unused for cache because we don't have
any vnodes should be gone.
Future work:
Straighten out the namecache statistics.
"desiredvnodes" is still used to (bogusly ?) size hash
tables in the filesystems.
I have still to find a way to safely free unused vnodes
back so their number can shrink when not needed.
There is a few uses of the v_id field left in the filesystems,
scheduled for demolition at a later time.
Maybe a one slot cache for unused namecache entries should
be implemented to decrease the malloc/free frequency.
1997-05-04 09:17:38 +00:00
|
|
|
#define NCF_WHITE 1
|
1997-02-10 02:22:35 +00:00
|
|
|
/*
|
1. Add a {pointer, v_id} pair to the vnode to store the reference to the
".." vnode. This is cheaper storagewise than keeping it in the
namecache, and it makes more sense since it's a 1:1 mapping.
2. Also handle the case of "." more intelligently rather than stuff
the namecache with pointless entries.
3. Add two lists to the vnode and hang namecache entries which go from
or to this vnode. When cleaning a vnode, delete all namecache
entries it invalidates.
4. Never reuse namecache enties, malloc new ones when we need it, free
old ones when they die. No longer a hard limit on how many we can
have.
5. Remove the upper limit on namelength of namecache entries.
6. Make a global list for negative namecache entries, limit their number
to a sysctl'able (debug.ncnegfactor) fraction of the total namecache.
Currently the default fraction is 1/16th. (Suggestions for better
default wanted!)
7. Assign v_id correctly in the face of 32bit rollover.
8. Remove the LRU list for namecache entries, not needed. Remove the
#ifdef NCH_STATISTICS stuff, it's not needed either.
9. Use the vnode freelist as a true LRU list, also for namecache accesses.
10. Reuse vnodes more aggresively but also more selectively, if we can't
reuse, malloc a new one. There is no longer a hard limit on their
number, they grow to the point where we don't reuse potentially
usable vnodes. A vnode will not get recycled if still has pages in
core or if it is the source of namecache entries (Yes, this does
indeed work :-) "." and ".." are not namecache entries any longer...)
11. Do not overload the v_id field in namecache entries with whiteout
information, use a char sized flags field instead, so we can get
rid of the vpid and v_id fields from the namecache struct. Since
we're linked to the vnodes and purged when they're cleaned, we don't
have to check the v_id any more.
12. NFS knew about the limitation on name length in the namecache, it
shouldn't and doesn't now.
Bugs:
The namecache statistics no longer includes the hits for ".."
and "." hits.
Performance impact:
Generally in the +/- 0.5% for "normal" workstations, but
I hope this will allow the system to be selftuning over a
bigger range of "special" applications. The case where
RAM is available but unused for cache because we don't have
any vnodes should be gone.
Future work:
Straighten out the namecache statistics.
"desiredvnodes" is still used to (bogusly ?) size hash
tables in the filesystems.
I have still to find a way to safely free unused vnodes
back so their number can shrink when not needed.
There is a few uses of the v_id field left in the filesystems,
scheduled for demolition at a later time.
Maybe a one slot cache for unused namecache entries should
be implemented to decrease the malloc/free frequency.
1997-05-04 09:17:38 +00:00
|
|
|
* Delete an entry from its hash list and move it to the front
|
|
|
|
* of the LRU list for immediate reuse.
|
1997-02-10 02:22:35 +00:00
|
|
|
*/
|
1. Add a {pointer, v_id} pair to the vnode to store the reference to the
".." vnode. This is cheaper storagewise than keeping it in the
namecache, and it makes more sense since it's a 1:1 mapping.
2. Also handle the case of "." more intelligently rather than stuff
the namecache with pointless entries.
3. Add two lists to the vnode and hang namecache entries which go from
or to this vnode. When cleaning a vnode, delete all namecache
entries it invalidates.
4. Never reuse namecache enties, malloc new ones when we need it, free
old ones when they die. No longer a hard limit on how many we can
have.
5. Remove the upper limit on namelength of namecache entries.
6. Make a global list for negative namecache entries, limit their number
to a sysctl'able (debug.ncnegfactor) fraction of the total namecache.
Currently the default fraction is 1/16th. (Suggestions for better
default wanted!)
7. Assign v_id correctly in the face of 32bit rollover.
8. Remove the LRU list for namecache entries, not needed. Remove the
#ifdef NCH_STATISTICS stuff, it's not needed either.
9. Use the vnode freelist as a true LRU list, also for namecache accesses.
10. Reuse vnodes more aggresively but also more selectively, if we can't
reuse, malloc a new one. There is no longer a hard limit on their
number, they grow to the point where we don't reuse potentially
usable vnodes. A vnode will not get recycled if still has pages in
core or if it is the source of namecache entries (Yes, this does
indeed work :-) "." and ".." are not namecache entries any longer...)
11. Do not overload the v_id field in namecache entries with whiteout
information, use a char sized flags field instead, so we can get
rid of the vpid and v_id fields from the namecache struct. Since
we're linked to the vnodes and purged when they're cleaned, we don't
have to check the v_id any more.
12. NFS knew about the limitation on name length in the namecache, it
shouldn't and doesn't now.
Bugs:
The namecache statistics no longer includes the hits for ".."
and "." hits.
Performance impact:
Generally in the +/- 0.5% for "normal" workstations, but
I hope this will allow the system to be selftuning over a
bigger range of "special" applications. The case where
RAM is available but unused for cache because we don't have
any vnodes should be gone.
Future work:
Straighten out the namecache statistics.
"desiredvnodes" is still used to (bogusly ?) size hash
tables in the filesystems.
I have still to find a way to safely free unused vnodes
back so their number can shrink when not needed.
There is a few uses of the v_id field left in the filesystems,
scheduled for demolition at a later time.
Maybe a one slot cache for unused namecache entries should
be implemented to decrease the malloc/free frequency.
1997-05-04 09:17:38 +00:00
|
|
|
static void
|
|
|
|
cache_zap(ncp)
|
|
|
|
struct namecache *ncp;
|
|
|
|
{
|
|
|
|
LIST_REMOVE(ncp, nc_hash);
|
|
|
|
LIST_REMOVE(ncp, nc_src);
|
1997-08-31 07:32:39 +00:00
|
|
|
if (LIST_EMPTY(&ncp->nc_dvp->v_cache_src))
|
|
|
|
vdrop(ncp->nc_dvp);
|
1. Add a {pointer, v_id} pair to the vnode to store the reference to the
".." vnode. This is cheaper storagewise than keeping it in the
namecache, and it makes more sense since it's a 1:1 mapping.
2. Also handle the case of "." more intelligently rather than stuff
the namecache with pointless entries.
3. Add two lists to the vnode and hang namecache entries which go from
or to this vnode. When cleaning a vnode, delete all namecache
entries it invalidates.
4. Never reuse namecache enties, malloc new ones when we need it, free
old ones when they die. No longer a hard limit on how many we can
have.
5. Remove the upper limit on namelength of namecache entries.
6. Make a global list for negative namecache entries, limit their number
to a sysctl'able (debug.ncnegfactor) fraction of the total namecache.
Currently the default fraction is 1/16th. (Suggestions for better
default wanted!)
7. Assign v_id correctly in the face of 32bit rollover.
8. Remove the LRU list for namecache entries, not needed. Remove the
#ifdef NCH_STATISTICS stuff, it's not needed either.
9. Use the vnode freelist as a true LRU list, also for namecache accesses.
10. Reuse vnodes more aggresively but also more selectively, if we can't
reuse, malloc a new one. There is no longer a hard limit on their
number, they grow to the point where we don't reuse potentially
usable vnodes. A vnode will not get recycled if still has pages in
core or if it is the source of namecache entries (Yes, this does
indeed work :-) "." and ".." are not namecache entries any longer...)
11. Do not overload the v_id field in namecache entries with whiteout
information, use a char sized flags field instead, so we can get
rid of the vpid and v_id fields from the namecache struct. Since
we're linked to the vnodes and purged when they're cleaned, we don't
have to check the v_id any more.
12. NFS knew about the limitation on name length in the namecache, it
shouldn't and doesn't now.
Bugs:
The namecache statistics no longer includes the hits for ".."
and "." hits.
Performance impact:
Generally in the +/- 0.5% for "normal" workstations, but
I hope this will allow the system to be selftuning over a
bigger range of "special" applications. The case where
RAM is available but unused for cache because we don't have
any vnodes should be gone.
Future work:
Straighten out the namecache statistics.
"desiredvnodes" is still used to (bogusly ?) size hash
tables in the filesystems.
I have still to find a way to safely free unused vnodes
back so their number can shrink when not needed.
There is a few uses of the v_id field left in the filesystems,
scheduled for demolition at a later time.
Maybe a one slot cache for unused namecache entries should
be implemented to decrease the malloc/free frequency.
1997-05-04 09:17:38 +00:00
|
|
|
if (ncp->nc_vp) {
|
|
|
|
TAILQ_REMOVE(&ncp->nc_vp->v_cache_dst, ncp, nc_dst);
|
|
|
|
} else {
|
|
|
|
TAILQ_REMOVE(&ncneg, ncp, nc_dst);
|
|
|
|
numneg--;
|
|
|
|
}
|
|
|
|
numcache--;
|
1999-10-03 12:18:29 +00:00
|
|
|
free(ncp, M_VFSCACHE);
|
1997-02-10 02:22:35 +00:00
|
|
|
}
|
1995-03-09 20:23:45 +00:00
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
1997-03-08 15:22:14 +00:00
|
|
|
* Lookup an entry in the cache
|
1995-03-09 20:23:45 +00:00
|
|
|
*
|
1997-03-08 15:22:14 +00:00
|
|
|
* We don't do this if the segment name is long, simply so the cache
|
1995-03-09 20:23:45 +00:00
|
|
|
* can avoid holding long names (which would either waste space, or
|
1994-05-24 10:09:53 +00:00
|
|
|
* add greatly to the complexity).
|
|
|
|
*
|
1995-03-09 20:23:45 +00:00
|
|
|
* Lookup is called with dvp pointing to the directory to search,
|
1997-02-10 02:22:35 +00:00
|
|
|
* cnp pointing to the name of the entry being sought. If the lookup
|
|
|
|
* succeeds, the vnode is returned in *vpp, and a status of -1 is
|
|
|
|
* returned. If the lookup determines that the name does not exist
|
|
|
|
* (negative cacheing), a status of ENOENT is returned. If the lookup
|
|
|
|
* fails, a status of zero is returned.
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
1995-03-09 20:23:45 +00:00
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
int
|
|
|
|
cache_lookup(dvp, vpp, cnp)
|
|
|
|
struct vnode *dvp;
|
|
|
|
struct vnode **vpp;
|
|
|
|
struct componentname *cnp;
|
|
|
|
{
|
1999-10-03 12:18:29 +00:00
|
|
|
struct namecache *ncp;
|
|
|
|
u_long hash;
|
|
|
|
u_char *cp;
|
|
|
|
int len;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
1995-03-06 06:45:52 +00:00
|
|
|
if (!doingcache) {
|
|
|
|
cnp->cn_flags &= ~MAKEENTRY;
|
1994-05-24 10:09:53 +00:00
|
|
|
return (0);
|
1995-03-06 06:45:52 +00:00
|
|
|
}
|
1995-03-09 20:23:45 +00:00
|
|
|
|
1997-09-24 07:46:54 +00:00
|
|
|
numcalls++;
|
|
|
|
|
1. Add a {pointer, v_id} pair to the vnode to store the reference to the
".." vnode. This is cheaper storagewise than keeping it in the
namecache, and it makes more sense since it's a 1:1 mapping.
2. Also handle the case of "." more intelligently rather than stuff
the namecache with pointless entries.
3. Add two lists to the vnode and hang namecache entries which go from
or to this vnode. When cleaning a vnode, delete all namecache
entries it invalidates.
4. Never reuse namecache enties, malloc new ones when we need it, free
old ones when they die. No longer a hard limit on how many we can
have.
5. Remove the upper limit on namelength of namecache entries.
6. Make a global list for negative namecache entries, limit their number
to a sysctl'able (debug.ncnegfactor) fraction of the total namecache.
Currently the default fraction is 1/16th. (Suggestions for better
default wanted!)
7. Assign v_id correctly in the face of 32bit rollover.
8. Remove the LRU list for namecache entries, not needed. Remove the
#ifdef NCH_STATISTICS stuff, it's not needed either.
9. Use the vnode freelist as a true LRU list, also for namecache accesses.
10. Reuse vnodes more aggresively but also more selectively, if we can't
reuse, malloc a new one. There is no longer a hard limit on their
number, they grow to the point where we don't reuse potentially
usable vnodes. A vnode will not get recycled if still has pages in
core or if it is the source of namecache entries (Yes, this does
indeed work :-) "." and ".." are not namecache entries any longer...)
11. Do not overload the v_id field in namecache entries with whiteout
information, use a char sized flags field instead, so we can get
rid of the vpid and v_id fields from the namecache struct. Since
we're linked to the vnodes and purged when they're cleaned, we don't
have to check the v_id any more.
12. NFS knew about the limitation on name length in the namecache, it
shouldn't and doesn't now.
Bugs:
The namecache statistics no longer includes the hits for ".."
and "." hits.
Performance impact:
Generally in the +/- 0.5% for "normal" workstations, but
I hope this will allow the system to be selftuning over a
bigger range of "special" applications. The case where
RAM is available but unused for cache because we don't have
any vnodes should be gone.
Future work:
Straighten out the namecache statistics.
"desiredvnodes" is still used to (bogusly ?) size hash
tables in the filesystems.
I have still to find a way to safely free unused vnodes
back so their number can shrink when not needed.
There is a few uses of the v_id field left in the filesystems,
scheduled for demolition at a later time.
Maybe a one slot cache for unused namecache entries should
be implemented to decrease the malloc/free frequency.
1997-05-04 09:17:38 +00:00
|
|
|
if (cnp->cn_nameptr[0] == '.') {
|
|
|
|
if (cnp->cn_namelen == 1) {
|
|
|
|
*vpp = dvp;
|
1997-09-24 07:46:54 +00:00
|
|
|
dothits++;
|
1. Add a {pointer, v_id} pair to the vnode to store the reference to the
".." vnode. This is cheaper storagewise than keeping it in the
namecache, and it makes more sense since it's a 1:1 mapping.
2. Also handle the case of "." more intelligently rather than stuff
the namecache with pointless entries.
3. Add two lists to the vnode and hang namecache entries which go from
or to this vnode. When cleaning a vnode, delete all namecache
entries it invalidates.
4. Never reuse namecache enties, malloc new ones when we need it, free
old ones when they die. No longer a hard limit on how many we can
have.
5. Remove the upper limit on namelength of namecache entries.
6. Make a global list for negative namecache entries, limit their number
to a sysctl'able (debug.ncnegfactor) fraction of the total namecache.
Currently the default fraction is 1/16th. (Suggestions for better
default wanted!)
7. Assign v_id correctly in the face of 32bit rollover.
8. Remove the LRU list for namecache entries, not needed. Remove the
#ifdef NCH_STATISTICS stuff, it's not needed either.
9. Use the vnode freelist as a true LRU list, also for namecache accesses.
10. Reuse vnodes more aggresively but also more selectively, if we can't
reuse, malloc a new one. There is no longer a hard limit on their
number, they grow to the point where we don't reuse potentially
usable vnodes. A vnode will not get recycled if still has pages in
core or if it is the source of namecache entries (Yes, this does
indeed work :-) "." and ".." are not namecache entries any longer...)
11. Do not overload the v_id field in namecache entries with whiteout
information, use a char sized flags field instead, so we can get
rid of the vpid and v_id fields from the namecache struct. Since
we're linked to the vnodes and purged when they're cleaned, we don't
have to check the v_id any more.
12. NFS knew about the limitation on name length in the namecache, it
shouldn't and doesn't now.
Bugs:
The namecache statistics no longer includes the hits for ".."
and "." hits.
Performance impact:
Generally in the +/- 0.5% for "normal" workstations, but
I hope this will allow the system to be selftuning over a
bigger range of "special" applications. The case where
RAM is available but unused for cache because we don't have
any vnodes should be gone.
Future work:
Straighten out the namecache statistics.
"desiredvnodes" is still used to (bogusly ?) size hash
tables in the filesystems.
I have still to find a way to safely free unused vnodes
back so their number can shrink when not needed.
There is a few uses of the v_id field left in the filesystems,
scheduled for demolition at a later time.
Maybe a one slot cache for unused namecache entries should
be implemented to decrease the malloc/free frequency.
1997-05-04 09:17:38 +00:00
|
|
|
return (-1);
|
1995-03-06 06:45:52 +00:00
|
|
|
}
|
1. Add a {pointer, v_id} pair to the vnode to store the reference to the
".." vnode. This is cheaper storagewise than keeping it in the
namecache, and it makes more sense since it's a 1:1 mapping.
2. Also handle the case of "." more intelligently rather than stuff
the namecache with pointless entries.
3. Add two lists to the vnode and hang namecache entries which go from
or to this vnode. When cleaning a vnode, delete all namecache
entries it invalidates.
4. Never reuse namecache enties, malloc new ones when we need it, free
old ones when they die. No longer a hard limit on how many we can
have.
5. Remove the upper limit on namelength of namecache entries.
6. Make a global list for negative namecache entries, limit their number
to a sysctl'able (debug.ncnegfactor) fraction of the total namecache.
Currently the default fraction is 1/16th. (Suggestions for better
default wanted!)
7. Assign v_id correctly in the face of 32bit rollover.
8. Remove the LRU list for namecache entries, not needed. Remove the
#ifdef NCH_STATISTICS stuff, it's not needed either.
9. Use the vnode freelist as a true LRU list, also for namecache accesses.
10. Reuse vnodes more aggresively but also more selectively, if we can't
reuse, malloc a new one. There is no longer a hard limit on their
number, they grow to the point where we don't reuse potentially
usable vnodes. A vnode will not get recycled if still has pages in
core or if it is the source of namecache entries (Yes, this does
indeed work :-) "." and ".." are not namecache entries any longer...)
11. Do not overload the v_id field in namecache entries with whiteout
information, use a char sized flags field instead, so we can get
rid of the vpid and v_id fields from the namecache struct. Since
we're linked to the vnodes and purged when they're cleaned, we don't
have to check the v_id any more.
12. NFS knew about the limitation on name length in the namecache, it
shouldn't and doesn't now.
Bugs:
The namecache statistics no longer includes the hits for ".."
and "." hits.
Performance impact:
Generally in the +/- 0.5% for "normal" workstations, but
I hope this will allow the system to be selftuning over a
bigger range of "special" applications. The case where
RAM is available but unused for cache because we don't have
any vnodes should be gone.
Future work:
Straighten out the namecache statistics.
"desiredvnodes" is still used to (bogusly ?) size hash
tables in the filesystems.
I have still to find a way to safely free unused vnodes
back so their number can shrink when not needed.
There is a few uses of the v_id field left in the filesystems,
scheduled for demolition at a later time.
Maybe a one slot cache for unused namecache entries should
be implemented to decrease the malloc/free frequency.
1997-05-04 09:17:38 +00:00
|
|
|
if (cnp->cn_namelen == 2 && cnp->cn_nameptr[1] == '.') {
|
1997-09-24 07:46:54 +00:00
|
|
|
dotdothits++;
|
1. Add a {pointer, v_id} pair to the vnode to store the reference to the
".." vnode. This is cheaper storagewise than keeping it in the
namecache, and it makes more sense since it's a 1:1 mapping.
2. Also handle the case of "." more intelligently rather than stuff
the namecache with pointless entries.
3. Add two lists to the vnode and hang namecache entries which go from
or to this vnode. When cleaning a vnode, delete all namecache
entries it invalidates.
4. Never reuse namecache enties, malloc new ones when we need it, free
old ones when they die. No longer a hard limit on how many we can
have.
5. Remove the upper limit on namelength of namecache entries.
6. Make a global list for negative namecache entries, limit their number
to a sysctl'able (debug.ncnegfactor) fraction of the total namecache.
Currently the default fraction is 1/16th. (Suggestions for better
default wanted!)
7. Assign v_id correctly in the face of 32bit rollover.
8. Remove the LRU list for namecache entries, not needed. Remove the
#ifdef NCH_STATISTICS stuff, it's not needed either.
9. Use the vnode freelist as a true LRU list, also for namecache accesses.
10. Reuse vnodes more aggresively but also more selectively, if we can't
reuse, malloc a new one. There is no longer a hard limit on their
number, they grow to the point where we don't reuse potentially
usable vnodes. A vnode will not get recycled if still has pages in
core or if it is the source of namecache entries (Yes, this does
indeed work :-) "." and ".." are not namecache entries any longer...)
11. Do not overload the v_id field in namecache entries with whiteout
information, use a char sized flags field instead, so we can get
rid of the vpid and v_id fields from the namecache struct. Since
we're linked to the vnodes and purged when they're cleaned, we don't
have to check the v_id any more.
12. NFS knew about the limitation on name length in the namecache, it
shouldn't and doesn't now.
Bugs:
The namecache statistics no longer includes the hits for ".."
and "." hits.
Performance impact:
Generally in the +/- 0.5% for "normal" workstations, but
I hope this will allow the system to be selftuning over a
bigger range of "special" applications. The case where
RAM is available but unused for cache because we don't have
any vnodes should be gone.
Future work:
Straighten out the namecache statistics.
"desiredvnodes" is still used to (bogusly ?) size hash
tables in the filesystems.
I have still to find a way to safely free unused vnodes
back so their number can shrink when not needed.
There is a few uses of the v_id field left in the filesystems,
scheduled for demolition at a later time.
Maybe a one slot cache for unused namecache entries should
be implemented to decrease the malloc/free frequency.
1997-05-04 09:17:38 +00:00
|
|
|
if (dvp->v_dd->v_id != dvp->v_ddid ||
|
|
|
|
(cnp->cn_flags & MAKEENTRY) == 0) {
|
|
|
|
dvp->v_ddid = 0;
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
*vpp = dvp->v_dd;
|
|
|
|
return (-1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
1999-10-03 12:18:29 +00:00
|
|
|
hash = 0;
|
|
|
|
len = cnp->cn_namelen;
|
|
|
|
for (cp = cnp->cn_nameptr; len; len--, cp++)
|
|
|
|
hash += *cp;
|
|
|
|
LIST_FOREACH(ncp, (NCHHASH(dvp, hash)), nc_hash) {
|
1997-09-24 07:46:54 +00:00
|
|
|
numchecks++;
|
1. Add a {pointer, v_id} pair to the vnode to store the reference to the
".." vnode. This is cheaper storagewise than keeping it in the
namecache, and it makes more sense since it's a 1:1 mapping.
2. Also handle the case of "." more intelligently rather than stuff
the namecache with pointless entries.
3. Add two lists to the vnode and hang namecache entries which go from
or to this vnode. When cleaning a vnode, delete all namecache
entries it invalidates.
4. Never reuse namecache enties, malloc new ones when we need it, free
old ones when they die. No longer a hard limit on how many we can
have.
5. Remove the upper limit on namelength of namecache entries.
6. Make a global list for negative namecache entries, limit their number
to a sysctl'able (debug.ncnegfactor) fraction of the total namecache.
Currently the default fraction is 1/16th. (Suggestions for better
default wanted!)
7. Assign v_id correctly in the face of 32bit rollover.
8. Remove the LRU list for namecache entries, not needed. Remove the
#ifdef NCH_STATISTICS stuff, it's not needed either.
9. Use the vnode freelist as a true LRU list, also for namecache accesses.
10. Reuse vnodes more aggresively but also more selectively, if we can't
reuse, malloc a new one. There is no longer a hard limit on their
number, they grow to the point where we don't reuse potentially
usable vnodes. A vnode will not get recycled if still has pages in
core or if it is the source of namecache entries (Yes, this does
indeed work :-) "." and ".." are not namecache entries any longer...)
11. Do not overload the v_id field in namecache entries with whiteout
information, use a char sized flags field instead, so we can get
rid of the vpid and v_id fields from the namecache struct. Since
we're linked to the vnodes and purged when they're cleaned, we don't
have to check the v_id any more.
12. NFS knew about the limitation on name length in the namecache, it
shouldn't and doesn't now.
Bugs:
The namecache statistics no longer includes the hits for ".."
and "." hits.
Performance impact:
Generally in the +/- 0.5% for "normal" workstations, but
I hope this will allow the system to be selftuning over a
bigger range of "special" applications. The case where
RAM is available but unused for cache because we don't have
any vnodes should be gone.
Future work:
Straighten out the namecache statistics.
"desiredvnodes" is still used to (bogusly ?) size hash
tables in the filesystems.
I have still to find a way to safely free unused vnodes
back so their number can shrink when not needed.
There is a few uses of the v_id field left in the filesystems,
scheduled for demolition at a later time.
Maybe a one slot cache for unused namecache entries should
be implemented to decrease the malloc/free frequency.
1997-05-04 09:17:38 +00:00
|
|
|
if (ncp->nc_dvp == dvp && ncp->nc_nlen == cnp->cn_namelen &&
|
1997-12-19 23:18:37 +00:00
|
|
|
!bcmp(ncp->nc_name, cnp->cn_nameptr, ncp->nc_nlen))
|
1997-02-10 02:22:35 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* We failed to find an entry */
|
|
|
|
if (ncp == 0) {
|
1997-09-24 15:54:10 +00:00
|
|
|
if ((cnp->cn_flags & MAKEENTRY) == 0) {
|
|
|
|
nummisszap++;
|
|
|
|
} else {
|
|
|
|
nummiss++;
|
|
|
|
}
|
1997-02-10 02:22:35 +00:00
|
|
|
nchstats.ncs_miss++;
|
|
|
|
return (0);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
1995-03-09 20:23:45 +00:00
|
|
|
|
|
|
|
/* We don't want to have an entry, so dump it */
|
1995-03-06 06:45:52 +00:00
|
|
|
if ((cnp->cn_flags & MAKEENTRY) == 0) {
|
1997-09-24 07:46:54 +00:00
|
|
|
numposzaps++;
|
1994-05-24 10:09:53 +00:00
|
|
|
nchstats.ncs_badhits++;
|
1. Add a {pointer, v_id} pair to the vnode to store the reference to the
".." vnode. This is cheaper storagewise than keeping it in the
namecache, and it makes more sense since it's a 1:1 mapping.
2. Also handle the case of "." more intelligently rather than stuff
the namecache with pointless entries.
3. Add two lists to the vnode and hang namecache entries which go from
or to this vnode. When cleaning a vnode, delete all namecache
entries it invalidates.
4. Never reuse namecache enties, malloc new ones when we need it, free
old ones when they die. No longer a hard limit on how many we can
have.
5. Remove the upper limit on namelength of namecache entries.
6. Make a global list for negative namecache entries, limit their number
to a sysctl'able (debug.ncnegfactor) fraction of the total namecache.
Currently the default fraction is 1/16th. (Suggestions for better
default wanted!)
7. Assign v_id correctly in the face of 32bit rollover.
8. Remove the LRU list for namecache entries, not needed. Remove the
#ifdef NCH_STATISTICS stuff, it's not needed either.
9. Use the vnode freelist as a true LRU list, also for namecache accesses.
10. Reuse vnodes more aggresively but also more selectively, if we can't
reuse, malloc a new one. There is no longer a hard limit on their
number, they grow to the point where we don't reuse potentially
usable vnodes. A vnode will not get recycled if still has pages in
core or if it is the source of namecache entries (Yes, this does
indeed work :-) "." and ".." are not namecache entries any longer...)
11. Do not overload the v_id field in namecache entries with whiteout
information, use a char sized flags field instead, so we can get
rid of the vpid and v_id fields from the namecache struct. Since
we're linked to the vnodes and purged when they're cleaned, we don't
have to check the v_id any more.
12. NFS knew about the limitation on name length in the namecache, it
shouldn't and doesn't now.
Bugs:
The namecache statistics no longer includes the hits for ".."
and "." hits.
Performance impact:
Generally in the +/- 0.5% for "normal" workstations, but
I hope this will allow the system to be selftuning over a
bigger range of "special" applications. The case where
RAM is available but unused for cache because we don't have
any vnodes should be gone.
Future work:
Straighten out the namecache statistics.
"desiredvnodes" is still used to (bogusly ?) size hash
tables in the filesystems.
I have still to find a way to safely free unused vnodes
back so their number can shrink when not needed.
There is a few uses of the v_id field left in the filesystems,
scheduled for demolition at a later time.
Maybe a one slot cache for unused namecache entries should
be implemented to decrease the malloc/free frequency.
1997-05-04 09:17:38 +00:00
|
|
|
cache_zap(ncp);
|
1995-03-09 20:23:45 +00:00
|
|
|
return (0);
|
1997-03-08 15:22:14 +00:00
|
|
|
}
|
1995-03-09 20:23:45 +00:00
|
|
|
|
|
|
|
/* We found a "positive" match, return the vnode */
|
1997-02-10 02:22:35 +00:00
|
|
|
if (ncp->nc_vp) {
|
1997-09-24 07:46:54 +00:00
|
|
|
numposhits++;
|
1994-05-24 10:09:53 +00:00
|
|
|
nchstats.ncs_goodhits++;
|
|
|
|
*vpp = ncp->nc_vp;
|
|
|
|
return (-1);
|
|
|
|
}
|
|
|
|
|
1995-03-09 20:23:45 +00:00
|
|
|
/* We found a negative match, and want to create it, so purge */
|
|
|
|
if (cnp->cn_nameiop == CREATE) {
|
1997-09-24 07:46:54 +00:00
|
|
|
numnegzaps++;
|
1995-03-12 02:01:20 +00:00
|
|
|
nchstats.ncs_badhits++;
|
1. Add a {pointer, v_id} pair to the vnode to store the reference to the
".." vnode. This is cheaper storagewise than keeping it in the
namecache, and it makes more sense since it's a 1:1 mapping.
2. Also handle the case of "." more intelligently rather than stuff
the namecache with pointless entries.
3. Add two lists to the vnode and hang namecache entries which go from
or to this vnode. When cleaning a vnode, delete all namecache
entries it invalidates.
4. Never reuse namecache enties, malloc new ones when we need it, free
old ones when they die. No longer a hard limit on how many we can
have.
5. Remove the upper limit on namelength of namecache entries.
6. Make a global list for negative namecache entries, limit their number
to a sysctl'able (debug.ncnegfactor) fraction of the total namecache.
Currently the default fraction is 1/16th. (Suggestions for better
default wanted!)
7. Assign v_id correctly in the face of 32bit rollover.
8. Remove the LRU list for namecache entries, not needed. Remove the
#ifdef NCH_STATISTICS stuff, it's not needed either.
9. Use the vnode freelist as a true LRU list, also for namecache accesses.
10. Reuse vnodes more aggresively but also more selectively, if we can't
reuse, malloc a new one. There is no longer a hard limit on their
number, they grow to the point where we don't reuse potentially
usable vnodes. A vnode will not get recycled if still has pages in
core or if it is the source of namecache entries (Yes, this does
indeed work :-) "." and ".." are not namecache entries any longer...)
11. Do not overload the v_id field in namecache entries with whiteout
information, use a char sized flags field instead, so we can get
rid of the vpid and v_id fields from the namecache struct. Since
we're linked to the vnodes and purged when they're cleaned, we don't
have to check the v_id any more.
12. NFS knew about the limitation on name length in the namecache, it
shouldn't and doesn't now.
Bugs:
The namecache statistics no longer includes the hits for ".."
and "." hits.
Performance impact:
Generally in the +/- 0.5% for "normal" workstations, but
I hope this will allow the system to be selftuning over a
bigger range of "special" applications. The case where
RAM is available but unused for cache because we don't have
any vnodes should be gone.
Future work:
Straighten out the namecache statistics.
"desiredvnodes" is still used to (bogusly ?) size hash
tables in the filesystems.
I have still to find a way to safely free unused vnodes
back so their number can shrink when not needed.
There is a few uses of the v_id field left in the filesystems,
scheduled for demolition at a later time.
Maybe a one slot cache for unused namecache entries should
be implemented to decrease the malloc/free frequency.
1997-05-04 09:17:38 +00:00
|
|
|
cache_zap(ncp);
|
1995-03-09 20:23:45 +00:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
1997-09-24 07:46:54 +00:00
|
|
|
numneghits++;
|
1997-02-10 02:22:35 +00:00
|
|
|
/*
|
|
|
|
* We found a "negative" match, ENOENT notifies client of this match.
|
|
|
|
* The nc_vpid field records whether this is a whiteout.
|
|
|
|
*/
|
1. Add a {pointer, v_id} pair to the vnode to store the reference to the
".." vnode. This is cheaper storagewise than keeping it in the
namecache, and it makes more sense since it's a 1:1 mapping.
2. Also handle the case of "." more intelligently rather than stuff
the namecache with pointless entries.
3. Add two lists to the vnode and hang namecache entries which go from
or to this vnode. When cleaning a vnode, delete all namecache
entries it invalidates.
4. Never reuse namecache enties, malloc new ones when we need it, free
old ones when they die. No longer a hard limit on how many we can
have.
5. Remove the upper limit on namelength of namecache entries.
6. Make a global list for negative namecache entries, limit their number
to a sysctl'able (debug.ncnegfactor) fraction of the total namecache.
Currently the default fraction is 1/16th. (Suggestions for better
default wanted!)
7. Assign v_id correctly in the face of 32bit rollover.
8. Remove the LRU list for namecache entries, not needed. Remove the
#ifdef NCH_STATISTICS stuff, it's not needed either.
9. Use the vnode freelist as a true LRU list, also for namecache accesses.
10. Reuse vnodes more aggresively but also more selectively, if we can't
reuse, malloc a new one. There is no longer a hard limit on their
number, they grow to the point where we don't reuse potentially
usable vnodes. A vnode will not get recycled if still has pages in
core or if it is the source of namecache entries (Yes, this does
indeed work :-) "." and ".." are not namecache entries any longer...)
11. Do not overload the v_id field in namecache entries with whiteout
information, use a char sized flags field instead, so we can get
rid of the vpid and v_id fields from the namecache struct. Since
we're linked to the vnodes and purged when they're cleaned, we don't
have to check the v_id any more.
12. NFS knew about the limitation on name length in the namecache, it
shouldn't and doesn't now.
Bugs:
The namecache statistics no longer includes the hits for ".."
and "." hits.
Performance impact:
Generally in the +/- 0.5% for "normal" workstations, but
I hope this will allow the system to be selftuning over a
bigger range of "special" applications. The case where
RAM is available but unused for cache because we don't have
any vnodes should be gone.
Future work:
Straighten out the namecache statistics.
"desiredvnodes" is still used to (bogusly ?) size hash
tables in the filesystems.
I have still to find a way to safely free unused vnodes
back so their number can shrink when not needed.
There is a few uses of the v_id field left in the filesystems,
scheduled for demolition at a later time.
Maybe a one slot cache for unused namecache entries should
be implemented to decrease the malloc/free frequency.
1997-05-04 09:17:38 +00:00
|
|
|
TAILQ_REMOVE(&ncneg, ncp, nc_dst);
|
|
|
|
TAILQ_INSERT_TAIL(&ncneg, ncp, nc_dst);
|
1995-03-09 20:23:45 +00:00
|
|
|
nchstats.ncs_neghits++;
|
1. Add a {pointer, v_id} pair to the vnode to store the reference to the
".." vnode. This is cheaper storagewise than keeping it in the
namecache, and it makes more sense since it's a 1:1 mapping.
2. Also handle the case of "." more intelligently rather than stuff
the namecache with pointless entries.
3. Add two lists to the vnode and hang namecache entries which go from
or to this vnode. When cleaning a vnode, delete all namecache
entries it invalidates.
4. Never reuse namecache enties, malloc new ones when we need it, free
old ones when they die. No longer a hard limit on how many we can
have.
5. Remove the upper limit on namelength of namecache entries.
6. Make a global list for negative namecache entries, limit their number
to a sysctl'able (debug.ncnegfactor) fraction of the total namecache.
Currently the default fraction is 1/16th. (Suggestions for better
default wanted!)
7. Assign v_id correctly in the face of 32bit rollover.
8. Remove the LRU list for namecache entries, not needed. Remove the
#ifdef NCH_STATISTICS stuff, it's not needed either.
9. Use the vnode freelist as a true LRU list, also for namecache accesses.
10. Reuse vnodes more aggresively but also more selectively, if we can't
reuse, malloc a new one. There is no longer a hard limit on their
number, they grow to the point where we don't reuse potentially
usable vnodes. A vnode will not get recycled if still has pages in
core or if it is the source of namecache entries (Yes, this does
indeed work :-) "." and ".." are not namecache entries any longer...)
11. Do not overload the v_id field in namecache entries with whiteout
information, use a char sized flags field instead, so we can get
rid of the vpid and v_id fields from the namecache struct. Since
we're linked to the vnodes and purged when they're cleaned, we don't
have to check the v_id any more.
12. NFS knew about the limitation on name length in the namecache, it
shouldn't and doesn't now.
Bugs:
The namecache statistics no longer includes the hits for ".."
and "." hits.
Performance impact:
Generally in the +/- 0.5% for "normal" workstations, but
I hope this will allow the system to be selftuning over a
bigger range of "special" applications. The case where
RAM is available but unused for cache because we don't have
any vnodes should be gone.
Future work:
Straighten out the namecache statistics.
"desiredvnodes" is still used to (bogusly ?) size hash
tables in the filesystems.
I have still to find a way to safely free unused vnodes
back so their number can shrink when not needed.
There is a few uses of the v_id field left in the filesystems,
scheduled for demolition at a later time.
Maybe a one slot cache for unused namecache entries should
be implemented to decrease the malloc/free frequency.
1997-05-04 09:17:38 +00:00
|
|
|
if (ncp->nc_flag & NCF_WHITE)
|
|
|
|
cnp->cn_flags |= ISWHITEOUT;
|
1995-03-09 20:23:45 +00:00
|
|
|
return (ENOENT);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
1995-03-09 20:23:45 +00:00
|
|
|
* Add an entry to the cache.
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
1994-05-25 09:21:21 +00:00
|
|
|
void
|
1994-05-24 10:09:53 +00:00
|
|
|
cache_enter(dvp, vp, cnp)
|
|
|
|
struct vnode *dvp;
|
|
|
|
struct vnode *vp;
|
|
|
|
struct componentname *cnp;
|
|
|
|
{
|
1999-10-03 12:18:29 +00:00
|
|
|
struct namecache *ncp;
|
|
|
|
struct nchashhead *ncpp;
|
|
|
|
u_long hash;
|
|
|
|
u_char *cp, *dp;
|
|
|
|
int len;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
|
|
|
if (!doingcache)
|
|
|
|
return;
|
1995-03-09 20:23:45 +00:00
|
|
|
|
1. Add a {pointer, v_id} pair to the vnode to store the reference to the
".." vnode. This is cheaper storagewise than keeping it in the
namecache, and it makes more sense since it's a 1:1 mapping.
2. Also handle the case of "." more intelligently rather than stuff
the namecache with pointless entries.
3. Add two lists to the vnode and hang namecache entries which go from
or to this vnode. When cleaning a vnode, delete all namecache
entries it invalidates.
4. Never reuse namecache enties, malloc new ones when we need it, free
old ones when they die. No longer a hard limit on how many we can
have.
5. Remove the upper limit on namelength of namecache entries.
6. Make a global list for negative namecache entries, limit their number
to a sysctl'able (debug.ncnegfactor) fraction of the total namecache.
Currently the default fraction is 1/16th. (Suggestions for better
default wanted!)
7. Assign v_id correctly in the face of 32bit rollover.
8. Remove the LRU list for namecache entries, not needed. Remove the
#ifdef NCH_STATISTICS stuff, it's not needed either.
9. Use the vnode freelist as a true LRU list, also for namecache accesses.
10. Reuse vnodes more aggresively but also more selectively, if we can't
reuse, malloc a new one. There is no longer a hard limit on their
number, they grow to the point where we don't reuse potentially
usable vnodes. A vnode will not get recycled if still has pages in
core or if it is the source of namecache entries (Yes, this does
indeed work :-) "." and ".." are not namecache entries any longer...)
11. Do not overload the v_id field in namecache entries with whiteout
information, use a char sized flags field instead, so we can get
rid of the vpid and v_id fields from the namecache struct. Since
we're linked to the vnodes and purged when they're cleaned, we don't
have to check the v_id any more.
12. NFS knew about the limitation on name length in the namecache, it
shouldn't and doesn't now.
Bugs:
The namecache statistics no longer includes the hits for ".."
and "." hits.
Performance impact:
Generally in the +/- 0.5% for "normal" workstations, but
I hope this will allow the system to be selftuning over a
bigger range of "special" applications. The case where
RAM is available but unused for cache because we don't have
any vnodes should be gone.
Future work:
Straighten out the namecache statistics.
"desiredvnodes" is still used to (bogusly ?) size hash
tables in the filesystems.
I have still to find a way to safely free unused vnodes
back so their number can shrink when not needed.
There is a few uses of the v_id field left in the filesystems,
scheduled for demolition at a later time.
Maybe a one slot cache for unused namecache entries should
be implemented to decrease the malloc/free frequency.
1997-05-04 09:17:38 +00:00
|
|
|
if (cnp->cn_nameptr[0] == '.') {
|
|
|
|
if (cnp->cn_namelen == 1) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (cnp->cn_namelen == 2 && cnp->cn_nameptr[1] == '.') {
|
|
|
|
if (vp) {
|
|
|
|
dvp->v_dd = vp;
|
|
|
|
dvp->v_ddid = vp->v_id;
|
|
|
|
} else {
|
|
|
|
dvp->v_dd = dvp;
|
|
|
|
dvp->v_ddid = 0;
|
|
|
|
}
|
|
|
|
return;
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
1995-03-09 20:23:45 +00:00
|
|
|
}
|
1. Add a {pointer, v_id} pair to the vnode to store the reference to the
".." vnode. This is cheaper storagewise than keeping it in the
namecache, and it makes more sense since it's a 1:1 mapping.
2. Also handle the case of "." more intelligently rather than stuff
the namecache with pointless entries.
3. Add two lists to the vnode and hang namecache entries which go from
or to this vnode. When cleaning a vnode, delete all namecache
entries it invalidates.
4. Never reuse namecache enties, malloc new ones when we need it, free
old ones when they die. No longer a hard limit on how many we can
have.
5. Remove the upper limit on namelength of namecache entries.
6. Make a global list for negative namecache entries, limit their number
to a sysctl'able (debug.ncnegfactor) fraction of the total namecache.
Currently the default fraction is 1/16th. (Suggestions for better
default wanted!)
7. Assign v_id correctly in the face of 32bit rollover.
8. Remove the LRU list for namecache entries, not needed. Remove the
#ifdef NCH_STATISTICS stuff, it's not needed either.
9. Use the vnode freelist as a true LRU list, also for namecache accesses.
10. Reuse vnodes more aggresively but also more selectively, if we can't
reuse, malloc a new one. There is no longer a hard limit on their
number, they grow to the point where we don't reuse potentially
usable vnodes. A vnode will not get recycled if still has pages in
core or if it is the source of namecache entries (Yes, this does
indeed work :-) "." and ".." are not namecache entries any longer...)
11. Do not overload the v_id field in namecache entries with whiteout
information, use a char sized flags field instead, so we can get
rid of the vpid and v_id fields from the namecache struct. Since
we're linked to the vnodes and purged when they're cleaned, we don't
have to check the v_id any more.
12. NFS knew about the limitation on name length in the namecache, it
shouldn't and doesn't now.
Bugs:
The namecache statistics no longer includes the hits for ".."
and "." hits.
Performance impact:
Generally in the +/- 0.5% for "normal" workstations, but
I hope this will allow the system to be selftuning over a
bigger range of "special" applications. The case where
RAM is available but unused for cache because we don't have
any vnodes should be gone.
Future work:
Straighten out the namecache statistics.
"desiredvnodes" is still used to (bogusly ?) size hash
tables in the filesystems.
I have still to find a way to safely free unused vnodes
back so their number can shrink when not needed.
There is a few uses of the v_id field left in the filesystems,
scheduled for demolition at a later time.
Maybe a one slot cache for unused namecache entries should
be implemented to decrease the malloc/free frequency.
1997-05-04 09:17:38 +00:00
|
|
|
|
|
|
|
ncp = (struct namecache *)
|
1999-10-03 12:18:29 +00:00
|
|
|
malloc(sizeof *ncp + cnp->cn_namelen, M_VFSCACHE, M_WAITOK);
|
1. Add a {pointer, v_id} pair to the vnode to store the reference to the
".." vnode. This is cheaper storagewise than keeping it in the
namecache, and it makes more sense since it's a 1:1 mapping.
2. Also handle the case of "." more intelligently rather than stuff
the namecache with pointless entries.
3. Add two lists to the vnode and hang namecache entries which go from
or to this vnode. When cleaning a vnode, delete all namecache
entries it invalidates.
4. Never reuse namecache enties, malloc new ones when we need it, free
old ones when they die. No longer a hard limit on how many we can
have.
5. Remove the upper limit on namelength of namecache entries.
6. Make a global list for negative namecache entries, limit their number
to a sysctl'able (debug.ncnegfactor) fraction of the total namecache.
Currently the default fraction is 1/16th. (Suggestions for better
default wanted!)
7. Assign v_id correctly in the face of 32bit rollover.
8. Remove the LRU list for namecache entries, not needed. Remove the
#ifdef NCH_STATISTICS stuff, it's not needed either.
9. Use the vnode freelist as a true LRU list, also for namecache accesses.
10. Reuse vnodes more aggresively but also more selectively, if we can't
reuse, malloc a new one. There is no longer a hard limit on their
number, they grow to the point where we don't reuse potentially
usable vnodes. A vnode will not get recycled if still has pages in
core or if it is the source of namecache entries (Yes, this does
indeed work :-) "." and ".." are not namecache entries any longer...)
11. Do not overload the v_id field in namecache entries with whiteout
information, use a char sized flags field instead, so we can get
rid of the vpid and v_id fields from the namecache struct. Since
we're linked to the vnodes and purged when they're cleaned, we don't
have to check the v_id any more.
12. NFS knew about the limitation on name length in the namecache, it
shouldn't and doesn't now.
Bugs:
The namecache statistics no longer includes the hits for ".."
and "." hits.
Performance impact:
Generally in the +/- 0.5% for "normal" workstations, but
I hope this will allow the system to be selftuning over a
bigger range of "special" applications. The case where
RAM is available but unused for cache because we don't have
any vnodes should be gone.
Future work:
Straighten out the namecache statistics.
"desiredvnodes" is still used to (bogusly ?) size hash
tables in the filesystems.
I have still to find a way to safely free unused vnodes
back so their number can shrink when not needed.
There is a few uses of the v_id field left in the filesystems,
scheduled for demolition at a later time.
Maybe a one slot cache for unused namecache entries should
be implemented to decrease the malloc/free frequency.
1997-05-04 09:17:38 +00:00
|
|
|
bzero((char *)ncp, sizeof *ncp);
|
|
|
|
numcache++;
|
1997-08-31 07:32:39 +00:00
|
|
|
if (!vp) {
|
1. Add a {pointer, v_id} pair to the vnode to store the reference to the
".." vnode. This is cheaper storagewise than keeping it in the
namecache, and it makes more sense since it's a 1:1 mapping.
2. Also handle the case of "." more intelligently rather than stuff
the namecache with pointless entries.
3. Add two lists to the vnode and hang namecache entries which go from
or to this vnode. When cleaning a vnode, delete all namecache
entries it invalidates.
4. Never reuse namecache enties, malloc new ones when we need it, free
old ones when they die. No longer a hard limit on how many we can
have.
5. Remove the upper limit on namelength of namecache entries.
6. Make a global list for negative namecache entries, limit their number
to a sysctl'able (debug.ncnegfactor) fraction of the total namecache.
Currently the default fraction is 1/16th. (Suggestions for better
default wanted!)
7. Assign v_id correctly in the face of 32bit rollover.
8. Remove the LRU list for namecache entries, not needed. Remove the
#ifdef NCH_STATISTICS stuff, it's not needed either.
9. Use the vnode freelist as a true LRU list, also for namecache accesses.
10. Reuse vnodes more aggresively but also more selectively, if we can't
reuse, malloc a new one. There is no longer a hard limit on their
number, they grow to the point where we don't reuse potentially
usable vnodes. A vnode will not get recycled if still has pages in
core or if it is the source of namecache entries (Yes, this does
indeed work :-) "." and ".." are not namecache entries any longer...)
11. Do not overload the v_id field in namecache entries with whiteout
information, use a char sized flags field instead, so we can get
rid of the vpid and v_id fields from the namecache struct. Since
we're linked to the vnodes and purged when they're cleaned, we don't
have to check the v_id any more.
12. NFS knew about the limitation on name length in the namecache, it
shouldn't and doesn't now.
Bugs:
The namecache statistics no longer includes the hits for ".."
and "." hits.
Performance impact:
Generally in the +/- 0.5% for "normal" workstations, but
I hope this will allow the system to be selftuning over a
bigger range of "special" applications. The case where
RAM is available but unused for cache because we don't have
any vnodes should be gone.
Future work:
Straighten out the namecache statistics.
"desiredvnodes" is still used to (bogusly ?) size hash
tables in the filesystems.
I have still to find a way to safely free unused vnodes
back so their number can shrink when not needed.
There is a few uses of the v_id field left in the filesystems,
scheduled for demolition at a later time.
Maybe a one slot cache for unused namecache entries should
be implemented to decrease the malloc/free frequency.
1997-05-04 09:17:38 +00:00
|
|
|
numneg++;
|
1997-08-31 07:32:39 +00:00
|
|
|
ncp->nc_flag = cnp->cn_flags & ISWHITEOUT ? NCF_WHITE : 0;
|
1997-09-03 09:20:17 +00:00
|
|
|
} else if (vp->v_type == VDIR) {
|
|
|
|
vp->v_dd = dvp;
|
|
|
|
vp->v_ddid = dvp->v_id;
|
1997-08-31 07:32:39 +00:00
|
|
|
}
|
1997-03-08 15:22:14 +00:00
|
|
|
|
1997-02-10 02:22:35 +00:00
|
|
|
/*
|
|
|
|
* Fill in cache info, if vp is NULL this is a "negative" cache entry.
|
|
|
|
* For negative entries, we have to record whether it is a whiteout.
|
|
|
|
* the whiteout flag is stored in the nc_vpid field which is
|
|
|
|
* otherwise unused.
|
|
|
|
*/
|
1995-03-09 20:23:45 +00:00
|
|
|
ncp->nc_vp = vp;
|
1994-05-24 10:09:53 +00:00
|
|
|
ncp->nc_dvp = dvp;
|
1999-10-03 12:18:29 +00:00
|
|
|
len = ncp->nc_nlen = cnp->cn_namelen;
|
|
|
|
hash = 0;
|
|
|
|
dp = ncp->nc_name;
|
|
|
|
for (cp = cnp->cn_nameptr; len; len--, cp++, dp++)
|
|
|
|
hash += (*dp = *cp);
|
|
|
|
ncpp = NCHHASH(dvp, hash);
|
1995-03-06 06:45:52 +00:00
|
|
|
LIST_INSERT_HEAD(ncpp, ncp, nc_hash);
|
1997-08-31 07:32:39 +00:00
|
|
|
if (LIST_EMPTY(&dvp->v_cache_src))
|
|
|
|
vhold(dvp);
|
1. Add a {pointer, v_id} pair to the vnode to store the reference to the
".." vnode. This is cheaper storagewise than keeping it in the
namecache, and it makes more sense since it's a 1:1 mapping.
2. Also handle the case of "." more intelligently rather than stuff
the namecache with pointless entries.
3. Add two lists to the vnode and hang namecache entries which go from
or to this vnode. When cleaning a vnode, delete all namecache
entries it invalidates.
4. Never reuse namecache enties, malloc new ones when we need it, free
old ones when they die. No longer a hard limit on how many we can
have.
5. Remove the upper limit on namelength of namecache entries.
6. Make a global list for negative namecache entries, limit their number
to a sysctl'able (debug.ncnegfactor) fraction of the total namecache.
Currently the default fraction is 1/16th. (Suggestions for better
default wanted!)
7. Assign v_id correctly in the face of 32bit rollover.
8. Remove the LRU list for namecache entries, not needed. Remove the
#ifdef NCH_STATISTICS stuff, it's not needed either.
9. Use the vnode freelist as a true LRU list, also for namecache accesses.
10. Reuse vnodes more aggresively but also more selectively, if we can't
reuse, malloc a new one. There is no longer a hard limit on their
number, they grow to the point where we don't reuse potentially
usable vnodes. A vnode will not get recycled if still has pages in
core or if it is the source of namecache entries (Yes, this does
indeed work :-) "." and ".." are not namecache entries any longer...)
11. Do not overload the v_id field in namecache entries with whiteout
information, use a char sized flags field instead, so we can get
rid of the vpid and v_id fields from the namecache struct. Since
we're linked to the vnodes and purged when they're cleaned, we don't
have to check the v_id any more.
12. NFS knew about the limitation on name length in the namecache, it
shouldn't and doesn't now.
Bugs:
The namecache statistics no longer includes the hits for ".."
and "." hits.
Performance impact:
Generally in the +/- 0.5% for "normal" workstations, but
I hope this will allow the system to be selftuning over a
bigger range of "special" applications. The case where
RAM is available but unused for cache because we don't have
any vnodes should be gone.
Future work:
Straighten out the namecache statistics.
"desiredvnodes" is still used to (bogusly ?) size hash
tables in the filesystems.
I have still to find a way to safely free unused vnodes
back so their number can shrink when not needed.
There is a few uses of the v_id field left in the filesystems,
scheduled for demolition at a later time.
Maybe a one slot cache for unused namecache entries should
be implemented to decrease the malloc/free frequency.
1997-05-04 09:17:38 +00:00
|
|
|
LIST_INSERT_HEAD(&dvp->v_cache_src, ncp, nc_src);
|
|
|
|
if (vp) {
|
|
|
|
TAILQ_INSERT_HEAD(&vp->v_cache_dst, ncp, nc_dst);
|
|
|
|
} else {
|
|
|
|
TAILQ_INSERT_TAIL(&ncneg, ncp, nc_dst);
|
|
|
|
}
|
1999-10-03 12:18:29 +00:00
|
|
|
if (numneg * ncnegfactor > numcache) {
|
1. Add a {pointer, v_id} pair to the vnode to store the reference to the
".." vnode. This is cheaper storagewise than keeping it in the
namecache, and it makes more sense since it's a 1:1 mapping.
2. Also handle the case of "." more intelligently rather than stuff
the namecache with pointless entries.
3. Add two lists to the vnode and hang namecache entries which go from
or to this vnode. When cleaning a vnode, delete all namecache
entries it invalidates.
4. Never reuse namecache enties, malloc new ones when we need it, free
old ones when they die. No longer a hard limit on how many we can
have.
5. Remove the upper limit on namelength of namecache entries.
6. Make a global list for negative namecache entries, limit their number
to a sysctl'able (debug.ncnegfactor) fraction of the total namecache.
Currently the default fraction is 1/16th. (Suggestions for better
default wanted!)
7. Assign v_id correctly in the face of 32bit rollover.
8. Remove the LRU list for namecache entries, not needed. Remove the
#ifdef NCH_STATISTICS stuff, it's not needed either.
9. Use the vnode freelist as a true LRU list, also for namecache accesses.
10. Reuse vnodes more aggresively but also more selectively, if we can't
reuse, malloc a new one. There is no longer a hard limit on their
number, they grow to the point where we don't reuse potentially
usable vnodes. A vnode will not get recycled if still has pages in
core or if it is the source of namecache entries (Yes, this does
indeed work :-) "." and ".." are not namecache entries any longer...)
11. Do not overload the v_id field in namecache entries with whiteout
information, use a char sized flags field instead, so we can get
rid of the vpid and v_id fields from the namecache struct. Since
we're linked to the vnodes and purged when they're cleaned, we don't
have to check the v_id any more.
12. NFS knew about the limitation on name length in the namecache, it
shouldn't and doesn't now.
Bugs:
The namecache statistics no longer includes the hits for ".."
and "." hits.
Performance impact:
Generally in the +/- 0.5% for "normal" workstations, but
I hope this will allow the system to be selftuning over a
bigger range of "special" applications. The case where
RAM is available but unused for cache because we don't have
any vnodes should be gone.
Future work:
Straighten out the namecache statistics.
"desiredvnodes" is still used to (bogusly ?) size hash
tables in the filesystems.
I have still to find a way to safely free unused vnodes
back so their number can shrink when not needed.
There is a few uses of the v_id field left in the filesystems,
scheduled for demolition at a later time.
Maybe a one slot cache for unused namecache entries should
be implemented to decrease the malloc/free frequency.
1997-05-04 09:17:38 +00:00
|
|
|
ncp = TAILQ_FIRST(&ncneg);
|
|
|
|
cache_zap(ncp);
|
|
|
|
}
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Name cache initialization, from vfs_init() when we are booting
|
|
|
|
*/
|
1994-05-25 09:21:21 +00:00
|
|
|
void
|
1994-05-24 10:09:53 +00:00
|
|
|
nchinit()
|
|
|
|
{
|
1997-03-08 15:22:14 +00:00
|
|
|
|
1. Add a {pointer, v_id} pair to the vnode to store the reference to the
".." vnode. This is cheaper storagewise than keeping it in the
namecache, and it makes more sense since it's a 1:1 mapping.
2. Also handle the case of "." more intelligently rather than stuff
the namecache with pointless entries.
3. Add two lists to the vnode and hang namecache entries which go from
or to this vnode. When cleaning a vnode, delete all namecache
entries it invalidates.
4. Never reuse namecache enties, malloc new ones when we need it, free
old ones when they die. No longer a hard limit on how many we can
have.
5. Remove the upper limit on namelength of namecache entries.
6. Make a global list for negative namecache entries, limit their number
to a sysctl'able (debug.ncnegfactor) fraction of the total namecache.
Currently the default fraction is 1/16th. (Suggestions for better
default wanted!)
7. Assign v_id correctly in the face of 32bit rollover.
8. Remove the LRU list for namecache entries, not needed. Remove the
#ifdef NCH_STATISTICS stuff, it's not needed either.
9. Use the vnode freelist as a true LRU list, also for namecache accesses.
10. Reuse vnodes more aggresively but also more selectively, if we can't
reuse, malloc a new one. There is no longer a hard limit on their
number, they grow to the point where we don't reuse potentially
usable vnodes. A vnode will not get recycled if still has pages in
core or if it is the source of namecache entries (Yes, this does
indeed work :-) "." and ".." are not namecache entries any longer...)
11. Do not overload the v_id field in namecache entries with whiteout
information, use a char sized flags field instead, so we can get
rid of the vpid and v_id fields from the namecache struct. Since
we're linked to the vnodes and purged when they're cleaned, we don't
have to check the v_id any more.
12. NFS knew about the limitation on name length in the namecache, it
shouldn't and doesn't now.
Bugs:
The namecache statistics no longer includes the hits for ".."
and "." hits.
Performance impact:
Generally in the +/- 0.5% for "normal" workstations, but
I hope this will allow the system to be selftuning over a
bigger range of "special" applications. The case where
RAM is available but unused for cache because we don't have
any vnodes should be gone.
Future work:
Straighten out the namecache statistics.
"desiredvnodes" is still used to (bogusly ?) size hash
tables in the filesystems.
I have still to find a way to safely free unused vnodes
back so their number can shrink when not needed.
There is a few uses of the v_id field left in the filesystems,
scheduled for demolition at a later time.
Maybe a one slot cache for unused namecache entries should
be implemented to decrease the malloc/free frequency.
1997-05-04 09:17:38 +00:00
|
|
|
TAILQ_INIT(&ncneg);
|
1999-10-03 12:18:29 +00:00
|
|
|
nchashtbl = hashinit(desiredvnodes*2, M_VFSCACHE, &nchash);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
1999-04-24 17:58:14 +00:00
|
|
|
* Invalidate all entries to a particular vnode.
|
1997-03-08 15:22:14 +00:00
|
|
|
*
|
1999-04-24 17:58:14 +00:00
|
|
|
* Remove all entries in the namecache relating to this vnode and
|
|
|
|
* change the v_id. We take the v_id from a global counter, since
|
|
|
|
* it becomes a handy sequence number in crash-dumps that way.
|
|
|
|
* No valid vnode will ever have (v_id == 0).
|
|
|
|
*
|
|
|
|
* XXX: Only time and the size of v_id prevents this from failing:
|
|
|
|
* XXX: In theory we should hunt down all (struct vnode*, v_id)
|
|
|
|
* XXX: soft references and nuke them, at least on the global
|
|
|
|
* XXX: v_id wraparound. The period of resistance can be extended
|
|
|
|
* XXX: by incrementing each vnodes v_id individually instead of
|
|
|
|
* XXX: using the global v_id.
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
1999-04-24 17:58:14 +00:00
|
|
|
|
1994-05-25 09:21:21 +00:00
|
|
|
void
|
1994-05-24 10:09:53 +00:00
|
|
|
cache_purge(vp)
|
|
|
|
struct vnode *vp;
|
|
|
|
{
|
1997-09-04 08:24:44 +00:00
|
|
|
static u_long nextid;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
1. Add a {pointer, v_id} pair to the vnode to store the reference to the
".." vnode. This is cheaper storagewise than keeping it in the
namecache, and it makes more sense since it's a 1:1 mapping.
2. Also handle the case of "." more intelligently rather than stuff
the namecache with pointless entries.
3. Add two lists to the vnode and hang namecache entries which go from
or to this vnode. When cleaning a vnode, delete all namecache
entries it invalidates.
4. Never reuse namecache enties, malloc new ones when we need it, free
old ones when they die. No longer a hard limit on how many we can
have.
5. Remove the upper limit on namelength of namecache entries.
6. Make a global list for negative namecache entries, limit their number
to a sysctl'able (debug.ncnegfactor) fraction of the total namecache.
Currently the default fraction is 1/16th. (Suggestions for better
default wanted!)
7. Assign v_id correctly in the face of 32bit rollover.
8. Remove the LRU list for namecache entries, not needed. Remove the
#ifdef NCH_STATISTICS stuff, it's not needed either.
9. Use the vnode freelist as a true LRU list, also for namecache accesses.
10. Reuse vnodes more aggresively but also more selectively, if we can't
reuse, malloc a new one. There is no longer a hard limit on their
number, they grow to the point where we don't reuse potentially
usable vnodes. A vnode will not get recycled if still has pages in
core or if it is the source of namecache entries (Yes, this does
indeed work :-) "." and ".." are not namecache entries any longer...)
11. Do not overload the v_id field in namecache entries with whiteout
information, use a char sized flags field instead, so we can get
rid of the vpid and v_id fields from the namecache struct. Since
we're linked to the vnodes and purged when they're cleaned, we don't
have to check the v_id any more.
12. NFS knew about the limitation on name length in the namecache, it
shouldn't and doesn't now.
Bugs:
The namecache statistics no longer includes the hits for ".."
and "." hits.
Performance impact:
Generally in the +/- 0.5% for "normal" workstations, but
I hope this will allow the system to be selftuning over a
bigger range of "special" applications. The case where
RAM is available but unused for cache because we don't have
any vnodes should be gone.
Future work:
Straighten out the namecache statistics.
"desiredvnodes" is still used to (bogusly ?) size hash
tables in the filesystems.
I have still to find a way to safely free unused vnodes
back so their number can shrink when not needed.
There is a few uses of the v_id field left in the filesystems,
scheduled for demolition at a later time.
Maybe a one slot cache for unused namecache entries should
be implemented to decrease the malloc/free frequency.
1997-05-04 09:17:38 +00:00
|
|
|
while (!LIST_EMPTY(&vp->v_cache_src))
|
|
|
|
cache_zap(LIST_FIRST(&vp->v_cache_src));
|
|
|
|
while (!TAILQ_EMPTY(&vp->v_cache_dst))
|
|
|
|
cache_zap(TAILQ_FIRST(&vp->v_cache_dst));
|
|
|
|
|
1999-04-24 17:58:14 +00:00
|
|
|
do
|
|
|
|
nextid++;
|
|
|
|
while (nextid == vp->v_id || !nextid);
|
1997-09-04 08:24:44 +00:00
|
|
|
vp->v_id = nextid;
|
1. Add a {pointer, v_id} pair to the vnode to store the reference to the
".." vnode. This is cheaper storagewise than keeping it in the
namecache, and it makes more sense since it's a 1:1 mapping.
2. Also handle the case of "." more intelligently rather than stuff
the namecache with pointless entries.
3. Add two lists to the vnode and hang namecache entries which go from
or to this vnode. When cleaning a vnode, delete all namecache
entries it invalidates.
4. Never reuse namecache enties, malloc new ones when we need it, free
old ones when they die. No longer a hard limit on how many we can
have.
5. Remove the upper limit on namelength of namecache entries.
6. Make a global list for negative namecache entries, limit their number
to a sysctl'able (debug.ncnegfactor) fraction of the total namecache.
Currently the default fraction is 1/16th. (Suggestions for better
default wanted!)
7. Assign v_id correctly in the face of 32bit rollover.
8. Remove the LRU list for namecache entries, not needed. Remove the
#ifdef NCH_STATISTICS stuff, it's not needed either.
9. Use the vnode freelist as a true LRU list, also for namecache accesses.
10. Reuse vnodes more aggresively but also more selectively, if we can't
reuse, malloc a new one. There is no longer a hard limit on their
number, they grow to the point where we don't reuse potentially
usable vnodes. A vnode will not get recycled if still has pages in
core or if it is the source of namecache entries (Yes, this does
indeed work :-) "." and ".." are not namecache entries any longer...)
11. Do not overload the v_id field in namecache entries with whiteout
information, use a char sized flags field instead, so we can get
rid of the vpid and v_id fields from the namecache struct. Since
we're linked to the vnodes and purged when they're cleaned, we don't
have to check the v_id any more.
12. NFS knew about the limitation on name length in the namecache, it
shouldn't and doesn't now.
Bugs:
The namecache statistics no longer includes the hits for ".."
and "." hits.
Performance impact:
Generally in the +/- 0.5% for "normal" workstations, but
I hope this will allow the system to be selftuning over a
bigger range of "special" applications. The case where
RAM is available but unused for cache because we don't have
any vnodes should be gone.
Future work:
Straighten out the namecache statistics.
"desiredvnodes" is still used to (bogusly ?) size hash
tables in the filesystems.
I have still to find a way to safely free unused vnodes
back so their number can shrink when not needed.
There is a few uses of the v_id field left in the filesystems,
scheduled for demolition at a later time.
Maybe a one slot cache for unused namecache entries should
be implemented to decrease the malloc/free frequency.
1997-05-04 09:17:38 +00:00
|
|
|
vp->v_dd = vp;
|
|
|
|
vp->v_ddid = 0;
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
1995-03-09 20:23:45 +00:00
|
|
|
* Flush all entries referencing a particular filesystem.
|
1994-05-24 10:09:53 +00:00
|
|
|
*
|
1995-03-09 20:23:45 +00:00
|
|
|
* Since we need to check it anyway, we will flush all the invalid
|
1995-12-22 15:56:35 +00:00
|
|
|
* entries at the same time.
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
1994-05-25 09:21:21 +00:00
|
|
|
void
|
1994-05-24 10:09:53 +00:00
|
|
|
cache_purgevfs(mp)
|
|
|
|
struct mount *mp;
|
|
|
|
{
|
1995-03-09 20:23:45 +00:00
|
|
|
struct nchashhead *ncpp;
|
1997-02-10 02:22:35 +00:00
|
|
|
struct namecache *ncp, *nnp;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
1995-03-09 20:23:45 +00:00
|
|
|
/* Scan hash tables for applicable entries */
|
1997-09-03 09:20:17 +00:00
|
|
|
for (ncpp = &nchashtbl[nchash]; ncpp >= nchashtbl; ncpp--) {
|
1. Add a {pointer, v_id} pair to the vnode to store the reference to the
".." vnode. This is cheaper storagewise than keeping it in the
namecache, and it makes more sense since it's a 1:1 mapping.
2. Also handle the case of "." more intelligently rather than stuff
the namecache with pointless entries.
3. Add two lists to the vnode and hang namecache entries which go from
or to this vnode. When cleaning a vnode, delete all namecache
entries it invalidates.
4. Never reuse namecache enties, malloc new ones when we need it, free
old ones when they die. No longer a hard limit on how many we can
have.
5. Remove the upper limit on namelength of namecache entries.
6. Make a global list for negative namecache entries, limit their number
to a sysctl'able (debug.ncnegfactor) fraction of the total namecache.
Currently the default fraction is 1/16th. (Suggestions for better
default wanted!)
7. Assign v_id correctly in the face of 32bit rollover.
8. Remove the LRU list for namecache entries, not needed. Remove the
#ifdef NCH_STATISTICS stuff, it's not needed either.
9. Use the vnode freelist as a true LRU list, also for namecache accesses.
10. Reuse vnodes more aggresively but also more selectively, if we can't
reuse, malloc a new one. There is no longer a hard limit on their
number, they grow to the point where we don't reuse potentially
usable vnodes. A vnode will not get recycled if still has pages in
core or if it is the source of namecache entries (Yes, this does
indeed work :-) "." and ".." are not namecache entries any longer...)
11. Do not overload the v_id field in namecache entries with whiteout
information, use a char sized flags field instead, so we can get
rid of the vpid and v_id fields from the namecache struct. Since
we're linked to the vnodes and purged when they're cleaned, we don't
have to check the v_id any more.
12. NFS knew about the limitation on name length in the namecache, it
shouldn't and doesn't now.
Bugs:
The namecache statistics no longer includes the hits for ".."
and "." hits.
Performance impact:
Generally in the +/- 0.5% for "normal" workstations, but
I hope this will allow the system to be selftuning over a
bigger range of "special" applications. The case where
RAM is available but unused for cache because we don't have
any vnodes should be gone.
Future work:
Straighten out the namecache statistics.
"desiredvnodes" is still used to (bogusly ?) size hash
tables in the filesystems.
I have still to find a way to safely free unused vnodes
back so their number can shrink when not needed.
There is a few uses of the v_id field left in the filesystems,
scheduled for demolition at a later time.
Maybe a one slot cache for unused namecache entries should
be implemented to decrease the malloc/free frequency.
1997-05-04 09:17:38 +00:00
|
|
|
for (ncp = LIST_FIRST(ncpp); ncp != 0; ncp = nnp) {
|
|
|
|
nnp = LIST_NEXT(ncp, nc_hash);
|
|
|
|
if (ncp->nc_dvp->v_mount == mp) {
|
|
|
|
cache_zap(ncp);
|
1995-03-09 20:23:45 +00:00
|
|
|
}
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
1997-08-26 07:32:51 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Perform canonical checks and cache lookup and pass on to filesystem
|
|
|
|
* through the vop_cachedlookup only if needed.
|
|
|
|
*/
|
|
|
|
|
|
|
|
int
|
|
|
|
vfs_cache_lookup(ap)
|
|
|
|
struct vop_lookup_args /* {
|
|
|
|
struct vnode *a_dvp;
|
|
|
|
struct vnode **a_vpp;
|
|
|
|
struct componentname *a_cnp;
|
|
|
|
} */ *ap;
|
|
|
|
{
|
|
|
|
struct vnode *vdp;
|
|
|
|
struct vnode *pdp;
|
|
|
|
int lockparent;
|
|
|
|
int error;
|
|
|
|
struct vnode **vpp = ap->a_vpp;
|
|
|
|
struct componentname *cnp = ap->a_cnp;
|
|
|
|
struct ucred *cred = cnp->cn_cred;
|
|
|
|
int flags = cnp->cn_flags;
|
|
|
|
struct proc *p = cnp->cn_proc;
|
|
|
|
u_long vpid; /* capability number of vnode */
|
|
|
|
|
|
|
|
*vpp = NULL;
|
|
|
|
vdp = ap->a_dvp;
|
|
|
|
lockparent = flags & LOCKPARENT;
|
|
|
|
|
|
|
|
if (vdp->v_type != VDIR)
|
|
|
|
return (ENOTDIR);
|
|
|
|
|
|
|
|
if ((flags & ISLASTCN) && (vdp->v_mount->mnt_flag & MNT_RDONLY) &&
|
|
|
|
(cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME))
|
|
|
|
return (EROFS);
|
|
|
|
|
|
|
|
error = VOP_ACCESS(vdp, VEXEC, cred, cnp->cn_proc);
|
|
|
|
|
|
|
|
if (error)
|
|
|
|
return (error);
|
|
|
|
|
|
|
|
error = cache_lookup(vdp, vpp, cnp);
|
|
|
|
|
|
|
|
if (!error)
|
1997-10-15 13:24:07 +00:00
|
|
|
return (VOP_CACHEDLOOKUP(ap->a_dvp, ap->a_vpp, ap->a_cnp));
|
1997-08-26 07:32:51 +00:00
|
|
|
|
|
|
|
if (error == ENOENT)
|
|
|
|
return (error);
|
|
|
|
|
|
|
|
pdp = vdp;
|
|
|
|
vdp = *vpp;
|
|
|
|
vpid = vdp->v_id;
|
|
|
|
if (pdp == vdp) { /* lookup on "." */
|
|
|
|
VREF(vdp);
|
|
|
|
error = 0;
|
|
|
|
} else if (flags & ISDOTDOT) {
|
|
|
|
VOP_UNLOCK(pdp, 0, p);
|
|
|
|
error = vget(vdp, LK_EXCLUSIVE, p);
|
|
|
|
if (!error && lockparent && (flags & ISLASTCN))
|
|
|
|
error = vn_lock(pdp, LK_EXCLUSIVE, p);
|
|
|
|
} else {
|
|
|
|
error = vget(vdp, LK_EXCLUSIVE, p);
|
|
|
|
if (!lockparent || error || !(flags & ISLASTCN))
|
|
|
|
VOP_UNLOCK(pdp, 0, p);
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* Check that the capability number did not change
|
|
|
|
* while we were waiting for the lock.
|
|
|
|
*/
|
|
|
|
if (!error) {
|
|
|
|
if (vpid == vdp->v_id)
|
|
|
|
return (0);
|
|
|
|
vput(vdp);
|
|
|
|
if (lockparent && pdp != vdp && (flags & ISLASTCN))
|
|
|
|
VOP_UNLOCK(pdp, 0, p);
|
|
|
|
}
|
|
|
|
error = vn_lock(pdp, LK_EXCLUSIVE, p);
|
|
|
|
if (error)
|
|
|
|
return (error);
|
VFS mega cleanup commit (x/N)
1. Add new file "sys/kern/vfs_default.c" where default actions for
VOPs go. Implement proper defaults for ABORTOP, BWRITE, LEASE,
POLL, REVOKE and STRATEGY. Various stuff spread over the entire
tree belongs here.
2. Change VOP_BLKATOFF to a normal function in cd9660.
3. Kill VOP_BLKATOFF, VOP_TRUNCATE, VOP_VFREE, VOP_VALLOC. These
are private interface functions between UFS and the underlying
storage manager layer (FFS/LFS/MFS/EXT2FS). The functions now
live in struct ufsmount instead.
4. Remove a kludge of VOP_ functions in all filesystems, that did
nothing but obscure the simplicity and break the expandability.
If a filesystem doesn't implement VOP_FOO, it shouldn't have an
entry for it in its vnops table. The system will try to DTRT
if it is not implemented. There are still some cruft left, but
the bulk of it is done.
5. Fix another VCALL in vfs_cache.c (thanks Bruce!)
1997-10-16 10:50:27 +00:00
|
|
|
return (VOP_CACHEDLOOKUP(ap->a_dvp, ap->a_vpp, ap->a_cnp));
|
1997-08-26 07:32:51 +00:00
|
|
|
}
|
1999-10-03 12:18:29 +00:00
|
|
|
|
|
|
|
|
|
|
|
#ifndef _SYS_SYSPROTO_H_
|
|
|
|
struct __getcwd_args {
|
|
|
|
u_char *buf;
|
|
|
|
u_int buflen;
|
|
|
|
};
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#define STATNODE(mode, name, var) \
|
|
|
|
SYSCTL_INT(_vfs_cache, OID_AUTO, name, mode, var, 0, "");
|
|
|
|
|
|
|
|
static int disablecwd;
|
|
|
|
SYSCTL_INT(_debug, OID_AUTO, disablecwd, CTLFLAG_RW, &disablecwd, 0, "");
|
|
|
|
|
|
|
|
static u_long numcwdcalls; STATNODE(CTLFLAG_RD, numcwdcalls, &numcwdcalls);
|
|
|
|
static u_long numcwdfail1; STATNODE(CTLFLAG_RD, numcwdfail1, &numcwdfail1);
|
|
|
|
static u_long numcwdfail2; STATNODE(CTLFLAG_RD, numcwdfail2, &numcwdfail2);
|
|
|
|
static u_long numcwdfail3; STATNODE(CTLFLAG_RD, numcwdfail3, &numcwdfail3);
|
|
|
|
static u_long numcwdfail4; STATNODE(CTLFLAG_RD, numcwdfail4, &numcwdfail4);
|
|
|
|
static u_long numcwdfound; STATNODE(CTLFLAG_RD, numcwdfound, &numcwdfound);
|
|
|
|
int
|
|
|
|
__getcwd(p, uap)
|
|
|
|
struct proc *p;
|
|
|
|
struct __getcwd_args *uap;
|
|
|
|
{
|
|
|
|
char *bp, *buf;
|
|
|
|
int error, i, slash_prefixed;
|
|
|
|
struct filedesc *fdp;
|
|
|
|
struct namecache *ncp;
|
|
|
|
struct vnode *vp;
|
|
|
|
|
|
|
|
numcwdcalls++;
|
|
|
|
if (disablecwd)
|
|
|
|
return (ENODEV);
|
|
|
|
if (uap->buflen < 2)
|
|
|
|
return (EINVAL);
|
|
|
|
if (uap->buflen > MAXPATHLEN)
|
|
|
|
uap->buflen = MAXPATHLEN;
|
|
|
|
buf = bp = malloc(uap->buflen, M_TEMP, M_WAITOK);
|
|
|
|
bp += uap->buflen - 1;
|
|
|
|
*bp = '\0';
|
|
|
|
fdp = p->p_fd;
|
|
|
|
slash_prefixed = 0;
|
|
|
|
for (vp = fdp->fd_cdir; vp != fdp->fd_rdir && vp != rootvnode;) {
|
|
|
|
if (vp->v_flag & VROOT) {
|
2000-02-14 06:09:01 +00:00
|
|
|
if (vp->v_mount == NULL) /* forced unmount */
|
|
|
|
return (EBADF);
|
1999-10-03 12:18:29 +00:00
|
|
|
vp = vp->v_mount->mnt_vnodecovered;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (vp->v_dd->v_id != vp->v_ddid) {
|
|
|
|
numcwdfail1++;
|
|
|
|
free(buf, M_TEMP);
|
|
|
|
return (ENOTDIR);
|
|
|
|
}
|
|
|
|
ncp = TAILQ_FIRST(&vp->v_cache_dst);
|
|
|
|
if (!ncp) {
|
|
|
|
numcwdfail2++;
|
|
|
|
free(buf, M_TEMP);
|
|
|
|
return (ENOENT);
|
|
|
|
}
|
|
|
|
if (ncp->nc_dvp != vp->v_dd) {
|
|
|
|
numcwdfail3++;
|
|
|
|
free(buf, M_TEMP);
|
|
|
|
return (EBADF);
|
|
|
|
}
|
|
|
|
for (i = ncp->nc_nlen - 1; i >= 0; i--) {
|
|
|
|
if (bp == buf) {
|
|
|
|
numcwdfail4++;
|
|
|
|
free(buf, M_TEMP);
|
|
|
|
return (ENOMEM);
|
|
|
|
}
|
|
|
|
*--bp = ncp->nc_name[i];
|
|
|
|
}
|
|
|
|
if (bp == buf) {
|
|
|
|
numcwdfail4++;
|
|
|
|
free(buf, M_TEMP);
|
|
|
|
return (ENOMEM);
|
|
|
|
}
|
|
|
|
*--bp = '/';
|
|
|
|
slash_prefixed = 1;
|
|
|
|
vp = vp->v_dd;
|
|
|
|
}
|
|
|
|
if (!slash_prefixed) {
|
|
|
|
if (bp == buf) {
|
|
|
|
numcwdfail4++;
|
|
|
|
free(buf, M_TEMP);
|
|
|
|
return (ENOMEM);
|
|
|
|
}
|
|
|
|
*--bp = '/';
|
|
|
|
}
|
|
|
|
numcwdfound++;
|
|
|
|
error = copyout(bp, uap->buf, strlen(bp) + 1);
|
|
|
|
free(buf, M_TEMP);
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
2000-04-26 11:57:45 +00:00
|
|
|
/*
|
|
|
|
* Thus begins the fullpath magic.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#undef STATNODE
|
|
|
|
#define STATNODE(name) \
|
|
|
|
static u_int name; \
|
|
|
|
SYSCTL_INT(_vfs_cache, OID_AUTO, name, CTLFLAG_RD, &name, 0, "")
|
|
|
|
|
|
|
|
static int disablefullpath;
|
|
|
|
SYSCTL_INT(_debug, OID_AUTO, disablefullpath, CTLFLAG_RW,
|
|
|
|
&disablefullpath, 0, "");
|
|
|
|
|
|
|
|
STATNODE(numfullpathcalls);
|
|
|
|
STATNODE(numfullpathfail1);
|
|
|
|
STATNODE(numfullpathfail2);
|
|
|
|
STATNODE(numfullpathfail3);
|
|
|
|
STATNODE(numfullpathfail4);
|
|
|
|
STATNODE(numfullpathfound);
|
|
|
|
|
|
|
|
int
|
|
|
|
textvp_fullpath(struct proc *p, char **retbuf, char **retfreebuf) {
|
|
|
|
char *bp, *buf;
|
|
|
|
int i, slash_prefixed;
|
|
|
|
struct filedesc *fdp;
|
|
|
|
struct namecache *ncp;
|
|
|
|
struct vnode *vp, *textvp;
|
|
|
|
|
|
|
|
numfullpathcalls++;
|
|
|
|
if (disablefullpath)
|
|
|
|
return (ENODEV);
|
|
|
|
textvp = p->p_textvp;
|
|
|
|
if (textvp == NULL)
|
|
|
|
return (EINVAL);
|
|
|
|
buf = malloc(MAXPATHLEN, M_TEMP, M_WAITOK);
|
|
|
|
bp = buf + MAXPATHLEN - 1;
|
|
|
|
*bp = '\0';
|
|
|
|
fdp = p->p_fd;
|
|
|
|
slash_prefixed = 0;
|
|
|
|
for (vp = textvp; vp != fdp->fd_rdir && vp != rootvnode;) {
|
|
|
|
if (vp->v_flag & VROOT) {
|
|
|
|
if (vp->v_mount == NULL) { /* forced unmount */
|
|
|
|
free(buf, M_TEMP);
|
|
|
|
return (EBADF);
|
|
|
|
}
|
|
|
|
vp = vp->v_mount->mnt_vnodecovered;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (vp != textvp && vp->v_dd->v_id != vp->v_ddid) {
|
|
|
|
numfullpathfail1++;
|
|
|
|
free(buf, M_TEMP);
|
|
|
|
return (ENOTDIR);
|
|
|
|
}
|
|
|
|
ncp = TAILQ_FIRST(&vp->v_cache_dst);
|
|
|
|
if (!ncp) {
|
|
|
|
numfullpathfail2++;
|
|
|
|
free(buf, M_TEMP);
|
|
|
|
return (ENOENT);
|
|
|
|
}
|
|
|
|
if (vp != textvp && ncp->nc_dvp != vp->v_dd) {
|
|
|
|
numfullpathfail3++;
|
|
|
|
free(buf, M_TEMP);
|
|
|
|
return (EBADF);
|
|
|
|
}
|
|
|
|
for (i = ncp->nc_nlen - 1; i >= 0; i--) {
|
|
|
|
if (bp == buf) {
|
|
|
|
numfullpathfail4++;
|
|
|
|
free(buf, M_TEMP);
|
|
|
|
return (ENOMEM);
|
|
|
|
}
|
|
|
|
*--bp = ncp->nc_name[i];
|
|
|
|
}
|
|
|
|
if (bp == buf) {
|
|
|
|
numfullpathfail4++;
|
|
|
|
free(buf, M_TEMP);
|
|
|
|
return (ENOMEM);
|
|
|
|
}
|
|
|
|
*--bp = '/';
|
|
|
|
slash_prefixed = 1;
|
|
|
|
vp = ncp->nc_dvp;
|
|
|
|
}
|
|
|
|
if (!slash_prefixed) {
|
|
|
|
if (bp == buf) {
|
|
|
|
numfullpathfail4++;
|
|
|
|
free(buf, M_TEMP);
|
|
|
|
return (ENOMEM);
|
|
|
|
}
|
|
|
|
*--bp = '/';
|
|
|
|
}
|
|
|
|
numfullpathfound++;
|
|
|
|
*retbuf = bp;
|
|
|
|
*retfreebuf = buf;
|
|
|
|
return (0);
|
|
|
|
}
|