Remove the nc_time and nc_ticks elements from struct namecache, and

provide struct namecache_ts which is the old struct namecache. Only
allocate struct namecache_ts if non-null struct timespec *tsp was
passed to cache_enter_time, otherwise use struct namecache.

Change struct namecache allocation and deallocation macros into static
functions, since logic becomes somewhat twisty.  Provide accessor for
the nc_name member of struct namecache to hide difference between
struct namecache and namecache_ts.

The aim of the change is to not waste 20 bytes per small namecache
entry.

Reviewed by:	 jhb
MFC after: 2 weeks
X-MFC-note:  after r230394
This commit is contained in:
kib 2012-01-22 01:11:06 +00:00
parent 8e3e986346
commit fb6370fb86

View File

@ -97,13 +97,35 @@ struct namecache {
TAILQ_ENTRY(namecache) nc_dst; /* destination vnode list */
struct vnode *nc_dvp; /* vnode of parent of name */
struct vnode *nc_vp; /* vnode the name refers to */
struct timespec nc_time; /* timespec provided by fs */
int nc_ticks; /* ticks value when entry was added */
u_char nc_flag; /* flag bits */
u_char nc_nlen; /* length of name */
char nc_name[0]; /* segment name + nul */
};
/*
* struct namecache_ts repeats struct namecache layout up to the
* nc_nlen member.
*/
struct namecache_ts {
LIST_ENTRY(namecache) nc_hash; /* hash chain */
LIST_ENTRY(namecache) nc_src; /* source vnode list */
TAILQ_ENTRY(namecache) nc_dst; /* destination vnode list */
struct vnode *nc_dvp; /* vnode of parent of name */
struct vnode *nc_vp; /* vnode the name refers to */
u_char nc_flag; /* flag bits */
u_char nc_nlen; /* length of name */
struct timespec nc_time; /* timespec provided by fs */
int nc_ticks; /* ticks value when entry was added */
char nc_name[0]; /* segment name + nul */
};
/*
* Flags in namecache.nc_flag
*/
#define NCF_WHITE 0x01
#define NCF_ISDOTDOT 0x02
#define NCF_TS 0x04
/*
* Name caching works as follows:
*
@ -166,20 +188,50 @@ RW_SYSINIT(vfscache, &cache_lock, "Name Cache");
* fit in the small cache.
*/
static uma_zone_t cache_zone_small;
static uma_zone_t cache_zone_small_ts;
static uma_zone_t cache_zone_large;
#define CACHE_PATH_CUTOFF 35
#define CACHE_ZONE_SMALL (sizeof(struct namecache) + CACHE_PATH_CUTOFF \
+ 1)
#define CACHE_ZONE_LARGE (sizeof(struct namecache) + NAME_MAX + 1)
#define cache_alloc(len) uma_zalloc(((len) <= CACHE_PATH_CUTOFF) ? \
cache_zone_small : cache_zone_large, M_WAITOK)
#define cache_free(ncp) do { \
if (ncp != NULL) \
uma_zfree(((ncp)->nc_nlen <= CACHE_PATH_CUTOFF) ? \
cache_zone_small : cache_zone_large, (ncp)); \
} while (0)
static struct namecache *
cache_alloc(int len, int ts)
{
if (len > CACHE_PATH_CUTOFF)
return (uma_zalloc(cache_zone_large, M_WAITOK));
if (ts)
return (uma_zalloc(cache_zone_small_ts, M_WAITOK));
else
return (uma_zalloc(cache_zone_small, M_WAITOK));
}
static void
cache_free(struct namecache *ncp)
{
int ts;
if (ncp == NULL)
return;
ts = ncp->nc_flag & NCF_TS;
if (ncp->nc_nlen <= CACHE_PATH_CUTOFF) {
if (ts)
uma_zfree(cache_zone_small_ts, ncp);
else
uma_zfree(cache_zone_small, ncp);
} else
uma_zfree(cache_zone_large, ncp);
}
static char *
nc_get_name(struct namecache *ncp)
{
struct namecache_ts *ncp_ts;
if ((ncp->nc_flag & NCF_TS) == 0)
return (ncp->nc_name);
ncp_ts = (struct namecache_ts *)ncp;
return (ncp_ts->nc_name);
}
static int doingcache = 1; /* 1 => enable the cache */
SYSCTL_INT(_debug, OID_AUTO, vfscache, CTLFLAG_RW, &doingcache, 0,
@ -235,12 +287,6 @@ static int vn_fullpath1(struct thread *td, struct vnode *vp, struct vnode *rdir,
static MALLOC_DEFINE(M_VFSCACHE, "vfscache", "VFS name cache entries");
/*
* Flags in namecache.nc_flag
*/
#define NCF_WHITE 0x01
#define NCF_ISDOTDOT 0x02
#ifdef DIAGNOSTIC
/*
* Grab an atomic snapshot of the name cache hash chain lengths
@ -346,10 +392,10 @@ cache_zap(ncp)
#ifdef KDTRACE_HOOKS
if (ncp->nc_vp != NULL) {
SDT_PROBE(vfs, namecache, zap, done, ncp->nc_dvp,
ncp->nc_name, ncp->nc_vp, 0, 0);
nc_get_name(ncp), ncp->nc_vp, 0, 0);
} else {
SDT_PROBE(vfs, namecache, zap_negative, done, ncp->nc_dvp,
ncp->nc_name, 0, 0, 0);
nc_get_name(ncp), 0, 0, 0);
}
#endif
vp = NULL;
@ -460,10 +506,17 @@ retry_wlocked:
dvp, cnp->cn_nameptr, *vpp);
SDT_PROBE(vfs, namecache, lookup, hit, dvp, "..",
*vpp, 0, 0);
if (tsp != NULL)
*tsp = ncp->nc_time;
if (ticksp != NULL)
*ticksp = ncp->nc_ticks;
if (tsp != NULL) {
KASSERT((ncp->nc_flag & NCF_TS) != 0,
("No NCF_TS"));
*tsp = ((struct namecache_ts *)ncp)->nc_time;
}
if (ticksp != NULL) {
KASSERT((ncp->nc_flag & NCF_TS) != 0,
("No NCF_TS"));
*ticksp = ((struct namecache_ts *)ncp)->
nc_ticks;
}
goto success;
}
}
@ -473,7 +526,7 @@ retry_wlocked:
LIST_FOREACH(ncp, (NCHHASH(hash)), nc_hash) {
numchecks++;
if (ncp->nc_dvp == dvp && ncp->nc_nlen == cnp->cn_namelen &&
!bcmp(ncp->nc_name, cnp->cn_nameptr, ncp->nc_nlen))
!bcmp(nc_get_name(ncp), cnp->cn_nameptr, ncp->nc_nlen))
break;
}
@ -508,12 +561,16 @@ retry_wlocked:
*vpp = ncp->nc_vp;
CTR4(KTR_VFS, "cache_lookup(%p, %s) found %p via ncp %p",
dvp, cnp->cn_nameptr, *vpp, ncp);
SDT_PROBE(vfs, namecache, lookup, hit, dvp, ncp->nc_name,
SDT_PROBE(vfs, namecache, lookup, hit, dvp, nc_get_name(ncp),
*vpp, 0, 0);
if (tsp != NULL)
*tsp = ncp->nc_time;
if (ticksp != NULL)
*ticksp = ncp->nc_ticks;
if (tsp != NULL) {
KASSERT((ncp->nc_flag & NCF_TS) != 0, ("No NCF_TS"));
*tsp = ((struct namecache_ts *)ncp)->nc_time;
}
if (ticksp != NULL) {
KASSERT((ncp->nc_flag & NCF_TS) != 0, ("No NCF_TS"));
*ticksp = ((struct namecache_ts *)ncp)->nc_ticks;
}
goto success;
}
@ -543,12 +600,16 @@ negative_success:
nchstats.ncs_neghits++;
if (ncp->nc_flag & NCF_WHITE)
cnp->cn_flags |= ISWHITEOUT;
SDT_PROBE(vfs, namecache, lookup, hit_negative, dvp, ncp->nc_name,
SDT_PROBE(vfs, namecache, lookup, hit_negative, dvp, nc_get_name(ncp),
0, 0, 0);
if (tsp != NULL)
*tsp = ncp->nc_time;
if (ticksp != NULL)
*ticksp = ncp->nc_ticks;
if (tsp != NULL) {
KASSERT((ncp->nc_flag & NCF_TS) != 0, ("No NCF_TS"));
*tsp = ((struct namecache_ts *)ncp)->nc_time;
}
if (ticksp != NULL) {
KASSERT((ncp->nc_flag & NCF_TS) != 0, ("No NCF_TS"));
*ticksp = ((struct namecache_ts *)ncp)->nc_ticks;
}
CACHE_WUNLOCK();
return (ENOENT);
@ -642,6 +703,7 @@ cache_enter_time(dvp, vp, cnp, tsp)
struct timespec *tsp;
{
struct namecache *ncp, *n2;
struct namecache_ts *n3;
struct nchashhead *ncpp;
uint32_t hash;
int flag;
@ -708,18 +770,19 @@ cache_enter_time(dvp, vp, cnp, tsp)
* Calculate the hash key and setup as much of the new
* namecache entry as possible before acquiring the lock.
*/
ncp = cache_alloc(cnp->cn_namelen);
ncp = cache_alloc(cnp->cn_namelen, tsp != NULL);
ncp->nc_vp = vp;
ncp->nc_dvp = dvp;
ncp->nc_flag = flag;
if (tsp != NULL)
ncp->nc_time = *tsp;
else
timespecclear(&ncp->nc_time);
ncp->nc_ticks = ticks;
if (tsp != NULL) {
n3 = (struct namecache_ts *)ncp;
n3->nc_time = *tsp;
n3->nc_ticks = ticks;
n3->nc_flag |= NCF_TS;
}
len = ncp->nc_nlen = cnp->cn_namelen;
hash = fnv_32_buf(cnp->cn_nameptr, len, FNV1_32_INIT);
strlcpy(ncp->nc_name, cnp->cn_nameptr, len + 1);
strlcpy(nc_get_name(ncp), cnp->cn_nameptr, len + 1);
hash = fnv_32_buf(&dvp, sizeof(dvp), hash);
CACHE_WLOCK();
@ -732,9 +795,16 @@ cache_enter_time(dvp, vp, cnp, tsp)
LIST_FOREACH(n2, ncpp, nc_hash) {
if (n2->nc_dvp == dvp &&
n2->nc_nlen == cnp->cn_namelen &&
!bcmp(n2->nc_name, cnp->cn_nameptr, n2->nc_nlen)) {
n2->nc_time = ncp->nc_time;
n2->nc_ticks = ncp->nc_ticks;
!bcmp(nc_get_name(n2), cnp->cn_nameptr, n2->nc_nlen)) {
if (tsp != NULL) {
KASSERT((n2->nc_flag & NCF_TS) != 0,
("no NCF_TS"));
n3 = (struct namecache_ts *)n2;
n3->nc_time =
((struct namecache_ts *)ncp)->nc_time;
n3->nc_ticks =
((struct namecache_ts *)ncp)->nc_ticks;
}
CACHE_WUNLOCK();
cache_free(ncp);
return;
@ -792,12 +862,12 @@ cache_enter_time(dvp, vp, cnp, tsp)
*/
if (vp) {
TAILQ_INSERT_HEAD(&vp->v_cache_dst, ncp, nc_dst);
SDT_PROBE(vfs, namecache, enter, done, dvp, ncp->nc_name, vp,
0, 0);
SDT_PROBE(vfs, namecache, enter, done, dvp, nc_get_name(ncp),
vp, 0, 0);
} else {
TAILQ_INSERT_TAIL(&ncneg, ncp, nc_dst);
SDT_PROBE(vfs, namecache, enter_negative, done, dvp,
ncp->nc_name, 0, 0, 0);
nc_get_name(ncp), 0, 0, 0);
}
if (numneg * ncnegfactor > numcache) {
ncp = TAILQ_FIRST(&ncneg);
@ -819,10 +889,15 @@ nchinit(void *dummy __unused)
TAILQ_INIT(&ncneg);
cache_zone_small = uma_zcreate("S VFS Cache", CACHE_ZONE_SMALL, NULL,
NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_ZINIT);
cache_zone_large = uma_zcreate("L VFS Cache", CACHE_ZONE_LARGE, NULL,
NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_ZINIT);
cache_zone_small = uma_zcreate("S VFS Cache",
sizeof(struct namecache) + CACHE_PATH_CUTOFF + 1,
NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_ZINIT);
cache_zone_small_ts = uma_zcreate("STS VFS Cache",
sizeof(struct namecache_ts) + CACHE_PATH_CUTOFF + 1,
NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_ZINIT);
cache_zone_large = uma_zcreate("L VFS Cache",
sizeof(struct namecache_ts) + NAME_MAX + 1,
NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_ZINIT);
nchashtbl = hashinit(desiredvnodes * 2, M_VFSCACHE, &nchash);
}
@ -1126,9 +1201,9 @@ vn_vptocnp_locked(struct vnode **vp, struct ucred *cred, char *buf,
return (error);
}
*buflen -= ncp->nc_nlen;
memcpy(buf + *buflen, ncp->nc_name, ncp->nc_nlen);
memcpy(buf + *buflen, nc_get_name(ncp), ncp->nc_nlen);
SDT_PROBE(vfs, namecache, fullpath, hit, ncp->nc_dvp,
ncp->nc_name, vp, 0, 0);
nc_get_name(ncp), vp, 0, 0);
dvp = *vp;
*vp = ncp->nc_dvp;
vref(*vp);
@ -1301,7 +1376,7 @@ vn_commname(struct vnode *vp, char *buf, u_int buflen)
return (ENOENT);
}
l = min(ncp->nc_nlen, buflen - 1);
memcpy(buf, ncp->nc_name, l);
memcpy(buf, nc_get_name(ncp), l);
CACHE_RUNLOCK();
buf[l] = '\0';
return (0);