1. Add a {pointer, v_id} pair to the vnode to store the reference to the

".." vnode.  This is cheaper storagewise than keeping it in the
    namecache, and it makes more sense since it's a 1:1 mapping.

2.  Also handle the case of "." more intelligently rather than stuff
    the namecache with pointless entries.

3.  Add two lists to the vnode and hang namecache entries which go from
    or to this vnode.  When cleaning a vnode, delete all namecache
    entries it invalidates.

4.  Never reuse namecache enties, malloc new ones when we need it, free
    old ones when they die.  No longer a hard limit on how many we can
    have.

5.  Remove the upper limit on namelength of namecache entries.

6.  Make a global list for negative namecache entries, limit their number
    to a sysctl'able (debug.ncnegfactor) fraction of the total namecache.
    Currently the default fraction is 1/16th.  (Suggestions for better
    default wanted!)

7.  Assign v_id correctly in the face of 32bit rollover.

8.  Remove the LRU list for namecache entries, not needed.  Remove the
    #ifdef NCH_STATISTICS stuff, it's not needed either.

9.  Use the vnode freelist as a true LRU list, also for namecache accesses.

10. Reuse vnodes more aggresively but also more selectively, if we can't
    reuse, malloc a new one.  There is no longer a hard limit on their
    number, they grow to the point where we don't reuse potentially
    usable vnodes.  A vnode will not get recycled if still has pages in
    core or if it is the source of namecache entries (Yes, this does
    indeed work :-)  "." and ".." are not namecache entries any longer...)

11. Do not overload the v_id field in namecache entries with whiteout
    information, use a char sized flags field instead, so we can get
    rid of the vpid and v_id fields from the namecache struct.  Since
    we're linked to the vnodes and purged when they're cleaned, we don't
    have to check the v_id any more.

12. NFS knew about the limitation on name length in the namecache, it
    shouldn't and doesn't now.

Bugs:
        The namecache statistics no longer includes the hits for ".."
        and "." hits.

Performance impact:
        Generally in the +/- 0.5% for "normal" workstations, but
        I hope this will allow the system to be selftuning over a
        bigger range of "special" applications.  The case where
        RAM is available but unused for cache because we don't have
        any vnodes should be gone.

Future work:
        Straighten out the namecache statistics.

        "desiredvnodes" is still used to (bogusly ?) size hash
        tables in the filesystems.

        I have still to find a way to safely free unused vnodes
        back so their number can shrink when not needed.

        There is a few uses of the v_id field left in the filesystems,
        scheduled for demolition at a later time.

        Maybe a one slot cache for unused namecache entries should
        be implemented to decrease the malloc/free frequency.
This commit is contained in:
Poul-Henning Kamp 1997-05-04 09:17:38 +00:00
parent f0394ba022
commit b15a966ec6
7 changed files with 230 additions and 225 deletions

View File

@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* @(#)vfs_cache.c 8.5 (Berkeley) 3/22/95
* $Id: vfs_cache.c,v 1.23 1997/02/22 09:39:31 peter Exp $
* $Id: vfs_cache.c,v 1.24 1997/03/08 15:22:14 bde Exp $
*/
#include <sys/param.h>
@ -62,10 +62,6 @@
* If it is a "negative" entry, (i.e. for a name that is known NOT to
* exist) the vnode pointer will be NULL.
*
* For simplicity (and economy of storage), names longer than
* a maximum length of NCHNAMLEN are not cached; they occur
* infrequently in any case, and are almost never of interest.
*
* Upon reaching the last segment of a path, if the reference
* is for DELETE, or NOCACHE is set (rewrite), and the
* name is located in the cache, it will be dropped.
@ -77,44 +73,45 @@
#define NCHHASH(dvp, cnp) \
(&nchashtbl[((dvp)->v_id + (cnp)->cn_hash) % nchash])
static LIST_HEAD(nchashhead, namecache) *nchashtbl; /* Hash Table */
static TAILQ_HEAD(, namecache) ncneg; /* Hash Table */
static u_long nchash; /* size of hash table */
static u_long ncnegfactor = 16; /* ratio of negative entries */
SYSCTL_INT(_debug, OID_AUTO, ncnegfactor, CTLFLAG_RW, &ncnegfactor, 0, "");
static u_long numneg; /* number of cache entries allocated */
SYSCTL_INT(_debug, OID_AUTO, numneg, CTLFLAG_RD, &numneg, 0, "");
static u_long numcache; /* number of cache entries allocated */
static TAILQ_HEAD(, namecache) nclruhead; /* LRU chain */
SYSCTL_INT(_debug, OID_AUTO, numcache, CTLFLAG_RD, &numcache, 0, "");
struct nchstats nchstats; /* cache effectiveness statistics */
static int doingcache = 1; /* 1 => enable the cache */
SYSCTL_INT(_debug, OID_AUTO, vfscache, CTLFLAG_RW, &doingcache, 0, "");
SYSCTL_INT(_debug, OID_AUTO, vnsize, CTLFLAG_RD, 0, sizeof(struct vnode), "");
SYSCTL_INT(_debug, OID_AUTO, ncsize, CTLFLAG_RD, 0, sizeof(struct namecache), "");
#ifdef NCH_STATISTICS
u_long nchnbr;
#define NCHNBR(ncp) (ncp)->nc_nbr = ++nchnbr;
#define NCHHIT(ncp) (ncp)->nc_hits++
#else
#define NCHNBR(ncp)
#define NCHHIT(ncp)
#endif
static void cache_zap __P((struct namecache *ncp));
/*
* Flags in namecache.nc_flag
*/
#define NCF_WHITE 1
/*
* Delete an entry from its hash list and move it to the front
* of the LRU list for immediate reuse.
*/
#define PURGE(ncp) { \
LIST_REMOVE(ncp, nc_hash); \
ncp->nc_hash.le_prev = 0; \
TAILQ_REMOVE(&nclruhead, ncp, nc_lru); \
TAILQ_INSERT_HEAD(&nclruhead, ncp, nc_lru); \
}
/*
* Move an entry that has been used to the tail of the LRU list
* so that it will be preserved for future use.
*/
#define TOUCH(ncp) { \
if (ncp->nc_lru.tqe_next != 0) { \
TAILQ_REMOVE(&nclruhead, ncp, nc_lru); \
TAILQ_INSERT_TAIL(&nclruhead, ncp, nc_lru); \
NCHNBR(ncp); \
} \
static void
cache_zap(ncp)
struct namecache *ncp;
{
LIST_REMOVE(ncp, nc_hash);
LIST_REMOVE(ncp, nc_src);
if (ncp->nc_vp) {
TAILQ_REMOVE(&ncp->nc_vp->v_cache_dst, ncp, nc_dst);
} else {
TAILQ_REMOVE(&ncneg, ncp, nc_dst);
numneg--;
}
numcache--;
free(ncp, M_CACHE);
}
/*
@ -145,25 +142,25 @@ cache_lookup(dvp, vpp, cnp)
cnp->cn_flags &= ~MAKEENTRY;
return (0);
}
if (cnp->cn_namelen > NCHNAMLEN) {
nchstats.ncs_long++;
cnp->cn_flags &= ~MAKEENTRY;
return (0);
if (cnp->cn_nameptr[0] == '.') {
if (cnp->cn_namelen == 1) {
*vpp = dvp;
return (-1);
}
if (cnp->cn_namelen == 2 && cnp->cn_nameptr[1] == '.') {
if (dvp->v_dd->v_id != dvp->v_ddid ||
(cnp->cn_flags & MAKEENTRY) == 0) {
dvp->v_ddid = 0;
return (0);
}
*vpp = dvp->v_dd;
return (-1);
}
}
ncpp = NCHHASH(dvp, cnp);
for (ncp = ncpp->lh_first; ncp != 0; ncp = nnp) {
nnp = ncp->nc_hash.le_next;
/* If one of the vp's went stale, don't bother anymore. */
if ((ncp->nc_dvpid != ncp->nc_dvp->v_id) ||
(ncp->nc_vp && ncp->nc_vpid != ncp->nc_vp->v_id)) {
nchstats.ncs_falsehits++;
PURGE(ncp);
continue;
}
/* Now that we know the vp's to be valid, is it ours ? */
if (ncp->nc_dvp == dvp &&
ncp->nc_nlen == cnp->cn_namelen &&
LIST_FOREACH(ncp, (NCHHASH(dvp, cnp)), nc_hash) {
if (ncp->nc_dvp == dvp && ncp->nc_nlen == cnp->cn_namelen &&
!bcmp(ncp->nc_name, cnp->cn_nameptr, (u_int)ncp->nc_nlen))
break;
}
@ -174,29 +171,25 @@ cache_lookup(dvp, vpp, cnp)
return (0);
}
NCHHIT(ncp);
/* We don't want to have an entry, so dump it */
if ((cnp->cn_flags & MAKEENTRY) == 0) {
nchstats.ncs_badhits++;
PURGE(ncp);
cache_zap(ncp);
return (0);
}
/* We found a "positive" match, return the vnode */
if (ncp->nc_vp) {
nchstats.ncs_goodhits++;
TOUCH(ncp);
vtouch(ncp->nc_vp);
*vpp = ncp->nc_vp;
if ((*vpp)->v_usage < MAXVNODEUSE)
(*vpp)->v_usage++;
return (-1);
}
/* We found a negative match, and want to create it, so purge */
if (cnp->cn_nameiop == CREATE) {
nchstats.ncs_badhits++;
PURGE(ncp);
cache_zap(ncp);
return (0);
}
@ -204,9 +197,11 @@ cache_lookup(dvp, vpp, cnp)
* We found a "negative" match, ENOENT notifies client of this match.
* The nc_vpid field records whether this is a whiteout.
*/
TAILQ_REMOVE(&ncneg, ncp, nc_dst);
TAILQ_INSERT_TAIL(&ncneg, ncp, nc_dst);
nchstats.ncs_neghits++;
TOUCH(ncp);
cnp->cn_flags |= ncp->nc_vpid;
if (ncp->nc_flag & NCF_WHITE)
cnp->cn_flags |= ISWHITEOUT;
return (ENOENT);
}
@ -225,35 +220,28 @@ cache_enter(dvp, vp, cnp)
if (!doingcache)
return;
if (cnp->cn_namelen > NCHNAMLEN) {
printf("cache_enter: name too long");
return;
}
/*
* We allocate a new entry if we are less than the maximum
* allowed and the one at the front of the LRU list is in use.
* Otherwise we use the one at the front of the LRU list.
*/
if (numcache < desiredvnodes &&
((ncp = nclruhead.tqh_first) == NULL ||
ncp->nc_hash.le_prev != 0)) {
/* Add one more entry */
ncp = (struct namecache *)
malloc((u_long)sizeof *ncp, M_CACHE, M_WAITOK);
bzero((char *)ncp, sizeof *ncp);
numcache++;
} else if (ncp = nclruhead.tqh_first) {
/* reuse an old entry */
TAILQ_REMOVE(&nclruhead, ncp, nc_lru);
if (ncp->nc_hash.le_prev != 0) {
LIST_REMOVE(ncp, nc_hash);
ncp->nc_hash.le_prev = 0;
if (cnp->cn_nameptr[0] == '.') {
if (cnp->cn_namelen == 1) {
return;
}
if (cnp->cn_namelen == 2 && cnp->cn_nameptr[1] == '.') {
if (vp) {
dvp->v_dd = vp;
dvp->v_ddid = vp->v_id;
} else {
dvp->v_dd = dvp;
dvp->v_ddid = 0;
}
return;
}
} else {
/* give up */
return;
}
ncp = (struct namecache *)
malloc(sizeof *ncp + cnp->cn_namelen, M_CACHE, M_WAITOK);
bzero((char *)ncp, sizeof *ncp);
numcache++;
if (!vp)
numneg++;
/*
* Fill in cache info, if vp is NULL this is a "negative" cache entry.
@ -262,19 +250,25 @@ cache_enter(dvp, vp, cnp)
* otherwise unused.
*/
ncp->nc_vp = vp;
if (vp) {
ncp->nc_vpid = vp->v_id;
if (vp->v_usage < MAXVNODEUSE)
++vp->v_usage;
} else
ncp->nc_vpid = cnp->cn_flags & ISWHITEOUT;
if (vp)
vtouch(vp);
else
ncp->nc_flag = cnp->cn_flags & ISWHITEOUT ? NCF_WHITE : 0;
ncp->nc_dvp = dvp;
ncp->nc_dvpid = dvp->v_id;
ncp->nc_nlen = cnp->cn_namelen;
bcopy(cnp->cn_nameptr, ncp->nc_name, (unsigned)ncp->nc_nlen);
TAILQ_INSERT_TAIL(&nclruhead, ncp, nc_lru);
ncpp = NCHHASH(dvp, cnp);
LIST_INSERT_HEAD(ncpp, ncp, nc_hash);
LIST_INSERT_HEAD(&dvp->v_cache_src, ncp, nc_src);
if (vp) {
TAILQ_INSERT_HEAD(&vp->v_cache_dst, ncp, nc_dst);
} else {
TAILQ_INSERT_TAIL(&ncneg, ncp, nc_dst);
}
if (numneg*ncnegfactor > numcache) {
ncp = TAILQ_FIRST(&ncneg);
cache_zap(ncp);
}
}
/*
@ -284,7 +278,7 @@ void
nchinit()
{
TAILQ_INIT(&nclruhead);
TAILQ_INIT(&ncneg);
nchashtbl = phashinit(desiredvnodes, M_CACHE, &nchash);
}
@ -304,14 +298,20 @@ cache_purge(vp)
struct nchashhead *ncpp;
static u_long nextvnodeid;
vp->v_id = ++nextvnodeid;
if (nextvnodeid != 0)
return;
for (ncpp = &nchashtbl[nchash - 1]; ncpp >= nchashtbl; ncpp--) {
while (ncp = ncpp->lh_first)
PURGE(ncp);
}
vp->v_id = ++nextvnodeid;
while (!LIST_EMPTY(&vp->v_cache_src))
cache_zap(LIST_FIRST(&vp->v_cache_src));
while (!TAILQ_EMPTY(&vp->v_cache_dst))
cache_zap(TAILQ_FIRST(&vp->v_cache_dst));
/* Never assign the same v_id, and never assign zero as v_id */
do {
if (++nextvnodeid == vp->v_id)
++nextvnodeid;
} while (!nextvnodeid);
vp->v_id = nextvnodeid;
vp->v_dd = vp;
vp->v_ddid = 0;
}
/*
@ -329,12 +329,10 @@ cache_purgevfs(mp)
/* Scan hash tables for applicable entries */
for (ncpp = &nchashtbl[nchash - 1]; ncpp >= nchashtbl; ncpp--) {
for (ncp = ncpp->lh_first; ncp != 0; ncp = nnp) {
nnp = ncp->nc_hash.le_next;
if (ncp->nc_dvpid != ncp->nc_dvp->v_id ||
(ncp->nc_vp && ncp->nc_vpid != ncp->nc_vp->v_id) ||
ncp->nc_dvp->v_mount == mp) {
PURGE(ncp);
for (ncp = LIST_FIRST(ncpp); ncp != 0; ncp = nnp) {
nnp = LIST_NEXT(ncp, nc_hash);
if (ncp->nc_dvp->v_mount == mp) {
cache_zap(ncp);
}
}
}

View File

@ -36,7 +36,7 @@
* SUCH DAMAGE.
*
* @(#)vfs_subr.c 8.31 (Berkeley) 5/26/95
* $Id: vfs_subr.c,v 1.83 1997/04/25 06:47:12 peter Exp $
* $Id: vfs_subr.c,v 1.84 1997/04/30 03:09:15 dyson Exp $
*/
/*
@ -78,6 +78,7 @@ extern void printlockedvnodes __P((void));
static void vclean __P((struct vnode *vp, int flags, struct proc *p));
static void vgonel __P((struct vnode *vp, struct proc *p));
unsigned long numvnodes;
SYSCTL_INT(_debug, OID_AUTO, numvnodes, CTLFLAG_RD, &numvnodes, 0, "");
static void vputrele __P((struct vnode *vp, int put));
enum vtype iftovt_tab[16] = {
@ -342,54 +343,36 @@ getnewvnode(tag, mp, vops, vpp)
struct proc *p = curproc; /* XXX */
struct vnode *vp;
simple_lock(&vnode_free_list_slock);
retry:
/*
* we allocate a new vnode if
* 1. we don't have any free
* Pretty obvious, we actually used to panic, but that
* is a silly thing to do.
* 2. we havn't filled our pool yet
* We don't want to trash the incore (VM-)vnodecache.
* 3. if less that 1/4th of our vnodes are free.
* We don't want to trash the namei cache either.
* We take the least recently used vnode from the freelist
* if we can get it and it has no cached pages, and no
* namecache entries are relative to it.
* Otherwise we allocate a new vnode
*/
if (freevnodes < (numvnodes >> 2) ||
numvnodes < desiredvnodes ||
vnode_free_list.tqh_first == NULL) {
simple_unlock(&vnode_free_list_slock);
vp = (struct vnode *) malloc((u_long) sizeof *vp,
M_VNODE, M_WAITOK);
bzero((char *) vp, sizeof *vp);
numvnodes++;
} else {
for (vp = vnode_free_list.tqh_first;
vp != NULLVP; vp = vp->v_freelist.tqe_next) {
if (simple_lock_try(&vp->v_interlock))
break;
}
/*
* Unless this is a bad time of the month, at most
* the first NCPUS items on the free list are
* locked, so this is close enough to being empty.
*/
if (vp == NULLVP) {
simple_unlock(&vnode_free_list_slock);
tablefull("vnode");
*vpp = 0;
return (ENFILE);
}
simple_lock(&vnode_free_list_slock);
TAILQ_FOREACH(vp, &vnode_free_list, v_freelist) {
if (!simple_lock_try(&vp->v_interlock))
continue;
if (vp->v_usecount)
panic("free vnode isn't");
TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
if (vp->v_usage > 0) {
simple_unlock(&vp->v_interlock);
--vp->v_usage;
TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
goto retry;
}
freevnodes--;
if (vp->v_object && vp->v_object->resident_page_count) {
/* Don't recycle if it's caching some pages */
simple_unlock(&vp->v_interlock);
continue;
} else if (LIST_FIRST(&vp->v_cache_src)) {
/* Don't recycle if active in the namecache */
simple_unlock(&vp->v_interlock);
continue;
} else {
break;
}
}
if (vp) {
TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
freevnodes--;
/* see comment on why 0xdeadb is set at end of vgone (below) */
vp->v_freelist.tqe_prev = (struct vnode **) 0xdeadb;
simple_unlock(&vnode_free_list_slock);
@ -420,8 +403,17 @@ retry:
vp->v_clen = 0;
vp->v_socket = 0;
vp->v_writecount = 0; /* XXX */
vp->v_usage = 0;
} else {
simple_unlock(&vnode_free_list_slock);
vp = (struct vnode *) malloc((u_long) sizeof *vp,
M_VNODE, M_WAITOK);
bzero((char *) vp, sizeof *vp);
vp->v_dd = vp;
LIST_INIT(&vp->v_cache_src);
TAILQ_INIT(&vp->v_cache_dst);
numvnodes++;
}
vp->v_type = VNON;
cache_purge(vp);
vp->v_tag = tag;
@ -1119,7 +1111,6 @@ vputrele(vp, put)
simple_lock(&vnode_free_list_slock);
if (vp->v_flag & VAGE) {
vp->v_flag &= ~VAGE;
vp->v_usage = 0;
if(vp->v_tag != VT_TFS)
TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
} else {
@ -2147,3 +2138,20 @@ retry:
retn:
return error;
}
void
vtouch(vp)
struct vnode *vp;
{
simple_lock(&vp->v_interlock);
if (vp->v_usecount) {
simple_unlock(&vp->v_interlock);
return;
}
if (simple_lock_try(&vnode_free_list_slock)) {
TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
simple_unlock(&vnode_free_list_slock);
}
simple_unlock(&vp->v_interlock);
}

View File

@ -36,7 +36,7 @@
* SUCH DAMAGE.
*
* @(#)vfs_subr.c 8.31 (Berkeley) 5/26/95
* $Id: vfs_subr.c,v 1.83 1997/04/25 06:47:12 peter Exp $
* $Id: vfs_subr.c,v 1.84 1997/04/30 03:09:15 dyson Exp $
*/
/*
@ -78,6 +78,7 @@ extern void printlockedvnodes __P((void));
static void vclean __P((struct vnode *vp, int flags, struct proc *p));
static void vgonel __P((struct vnode *vp, struct proc *p));
unsigned long numvnodes;
SYSCTL_INT(_debug, OID_AUTO, numvnodes, CTLFLAG_RD, &numvnodes, 0, "");
static void vputrele __P((struct vnode *vp, int put));
enum vtype iftovt_tab[16] = {
@ -342,54 +343,36 @@ getnewvnode(tag, mp, vops, vpp)
struct proc *p = curproc; /* XXX */
struct vnode *vp;
simple_lock(&vnode_free_list_slock);
retry:
/*
* we allocate a new vnode if
* 1. we don't have any free
* Pretty obvious, we actually used to panic, but that
* is a silly thing to do.
* 2. we havn't filled our pool yet
* We don't want to trash the incore (VM-)vnodecache.
* 3. if less that 1/4th of our vnodes are free.
* We don't want to trash the namei cache either.
* We take the least recently used vnode from the freelist
* if we can get it and it has no cached pages, and no
* namecache entries are relative to it.
* Otherwise we allocate a new vnode
*/
if (freevnodes < (numvnodes >> 2) ||
numvnodes < desiredvnodes ||
vnode_free_list.tqh_first == NULL) {
simple_unlock(&vnode_free_list_slock);
vp = (struct vnode *) malloc((u_long) sizeof *vp,
M_VNODE, M_WAITOK);
bzero((char *) vp, sizeof *vp);
numvnodes++;
} else {
for (vp = vnode_free_list.tqh_first;
vp != NULLVP; vp = vp->v_freelist.tqe_next) {
if (simple_lock_try(&vp->v_interlock))
break;
}
/*
* Unless this is a bad time of the month, at most
* the first NCPUS items on the free list are
* locked, so this is close enough to being empty.
*/
if (vp == NULLVP) {
simple_unlock(&vnode_free_list_slock);
tablefull("vnode");
*vpp = 0;
return (ENFILE);
}
simple_lock(&vnode_free_list_slock);
TAILQ_FOREACH(vp, &vnode_free_list, v_freelist) {
if (!simple_lock_try(&vp->v_interlock))
continue;
if (vp->v_usecount)
panic("free vnode isn't");
TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
if (vp->v_usage > 0) {
simple_unlock(&vp->v_interlock);
--vp->v_usage;
TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
goto retry;
}
freevnodes--;
if (vp->v_object && vp->v_object->resident_page_count) {
/* Don't recycle if it's caching some pages */
simple_unlock(&vp->v_interlock);
continue;
} else if (LIST_FIRST(&vp->v_cache_src)) {
/* Don't recycle if active in the namecache */
simple_unlock(&vp->v_interlock);
continue;
} else {
break;
}
}
if (vp) {
TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
freevnodes--;
/* see comment on why 0xdeadb is set at end of vgone (below) */
vp->v_freelist.tqe_prev = (struct vnode **) 0xdeadb;
simple_unlock(&vnode_free_list_slock);
@ -420,8 +403,17 @@ retry:
vp->v_clen = 0;
vp->v_socket = 0;
vp->v_writecount = 0; /* XXX */
vp->v_usage = 0;
} else {
simple_unlock(&vnode_free_list_slock);
vp = (struct vnode *) malloc((u_long) sizeof *vp,
M_VNODE, M_WAITOK);
bzero((char *) vp, sizeof *vp);
vp->v_dd = vp;
LIST_INIT(&vp->v_cache_src);
TAILQ_INIT(&vp->v_cache_dst);
numvnodes++;
}
vp->v_type = VNON;
cache_purge(vp);
vp->v_tag = tag;
@ -1119,7 +1111,6 @@ vputrele(vp, put)
simple_lock(&vnode_free_list_slock);
if (vp->v_flag & VAGE) {
vp->v_flag &= ~VAGE;
vp->v_usage = 0;
if(vp->v_tag != VT_TFS)
TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
} else {
@ -2147,3 +2138,20 @@ retry:
retn:
return error;
}
void
vtouch(vp)
struct vnode *vp;
{
simple_lock(&vp->v_interlock);
if (vp->v_usecount) {
simple_unlock(&vp->v_interlock);
return;
}
if (simple_lock_try(&vnode_free_list_slock)) {
TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
simple_unlock(&vnode_free_list_slock);
}
simple_unlock(&vp->v_interlock);
}

View File

@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* @(#)nfs_vnops.c 8.16 (Berkeley) 5/27/95
* $Id: nfs_vnops.c,v 1.45 1997/03/29 12:40:20 bde Exp $
* $Id: nfs_vnops.c,v 1.46 1997/04/04 17:49:33 dfr Exp $
*/
@ -2382,8 +2382,7 @@ nfs_readdirplusrpc(vp, uiop, cred)
for (cp = cnp->cn_nameptr, i = 1; i <= len;
i++, cp++)
cnp->cn_hash += (unsigned char)*cp * i;
if (cnp->cn_namelen <= NCHNAMLEN)
cache_enter(ndp->ni_dvp, ndp->ni_vp, cnp);
cache_enter(ndp->ni_dvp, ndp->ni_vp, cnp);
}
} else {
/* Just skip over the file handle */

View File

@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* @(#)nfs_vnops.c 8.16 (Berkeley) 5/27/95
* $Id: nfs_vnops.c,v 1.45 1997/03/29 12:40:20 bde Exp $
* $Id: nfs_vnops.c,v 1.46 1997/04/04 17:49:33 dfr Exp $
*/
@ -2382,8 +2382,7 @@ nfs_readdirplusrpc(vp, uiop, cred)
for (cp = cnp->cn_nameptr, i = 1; i <= len;
i++, cp++)
cnp->cn_hash += (unsigned char)*cp * i;
if (cnp->cn_namelen <= NCHNAMLEN)
cache_enter(ndp->ni_dvp, ndp->ni_vp, cnp);
cache_enter(ndp->ni_dvp, ndp->ni_vp, cnp);
}
} else {
/* Just skip over the file handle */

View File

@ -31,7 +31,7 @@
* SUCH DAMAGE.
*
* @(#)namei.h 8.5 (Berkeley) 1/9/95
* $Id$
* $Id: namei.h,v 1.13 1997/02/22 09:45:38 peter Exp $
*/
#ifndef _SYS_NAMEI_H_
@ -153,30 +153,18 @@ struct nameidata {
/*
* This structure describes the elements in the cache of recent
* names looked up by namei. NCHNAMLEN is sized to make structure
* size a power of two to optimize malloc's. Minimum reasonable
* size is 15.
* names looked up by namei.
*/
#ifdef NCH_STATISTICS
#define NCHNAMLEN 23 /* maximum name segment length we bother with */
#else
#define NCHNAMLEN 31 /* maximum name segment length we bother with */
#endif
struct namecache {
LIST_ENTRY(namecache) nc_hash; /* hash chain */
TAILQ_ENTRY(namecache) nc_lru; /* LRU chain */
LIST_ENTRY(namecache) nc_src; /* source vnode list */
TAILQ_ENTRY(namecache) nc_dst; /* destination vnode list */
struct vnode *nc_dvp; /* vnode of parent of name */
u_long nc_dvpid; /* capability number of nc_dvp */
struct vnode *nc_vp; /* vnode the name refers to */
u_long nc_vpid; /* capability number of nc_vp */
#ifdef NCH_STATISTICS
u_long nc_nbr; /* a serial number */
u_long nc_hits; /* how many times we got hit */
#endif
char nc_flag; /* flag bits */
char nc_nlen; /* length of name */
char nc_name[NCHNAMLEN]; /* segment name */
char nc_name[0]; /* segment name */
};
#ifdef KERNEL

View File

@ -31,7 +31,7 @@
* SUCH DAMAGE.
*
* @(#)vnode.h 8.7 (Berkeley) 2/4/94
* $Id: vnode.h,v 1.42 1997/02/22 09:46:29 peter Exp $
* $Id: vnode.h,v 1.43 1997/04/04 17:43:32 dfr Exp $
*/
#ifndef _SYS_VNODE_H_
@ -70,6 +70,7 @@ LIST_HEAD(buflists, buf);
typedef int vop_t __P((void *));
struct vm_object;
struct namecache;
/*
* Reading or writing any of these items requires holding the appropriate lock.
@ -104,12 +105,15 @@ struct vnode {
daddr_t v_cstart; /* start block of cluster */
daddr_t v_lasta; /* last allocation */
int v_clen; /* length of current cluster */
int v_usage; /* Vnode usage counter */
struct vm_object *v_object; /* Place to store VM object */
struct simplelock v_interlock; /* lock on usecount and flag */
struct lock *v_vnlock; /* used for non-locking fs's */
enum vtagtype v_tag; /* type of underlying data */
void *v_data; /* private data for fs */
LIST_HEAD(, namecache) v_cache_src; /* Cache entries from us */
TAILQ_HEAD(, namecache) v_cache_dst; /* Cache entries to us */
struct vnode *v_dd; /* .. vnode */
u_long v_ddid; /* .. capability identifier */
};
#define v_mountedhere v_un.vu_mountedhere
#define v_socket v_un.vu_socket
@ -506,6 +510,7 @@ struct vnode *
checkalias __P((struct vnode *vp, dev_t nvp_rdev, struct mount *mp));
void vput __P((struct vnode *vp));
void vrele __P((struct vnode *vp));
void vtouch __P((struct vnode *vp));
#endif /* KERNEL */
#endif /* !_SYS_VNODE_H_ */