cache: convert the hash from LIST to SLIST

This reduces struct namecache by sizeof(void *).

Negative side is that we have to find the previous element (if any) when
removing an entry, but since we normally don't expect collisions it should be
fine.

Note this adds cache_get_hash calls which can be eliminated.
This commit is contained in:
Mateusz Guzik 2020-08-05 09:25:59 +00:00
parent cf8ac0de81
commit 2b86f9d6d0
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=363892

View File

@ -124,7 +124,7 @@ _Static_assert(sizeof(struct negstate) <= sizeof(struct vnode *),
struct namecache {
LIST_ENTRY(namecache) nc_src; /* source vnode list */
TAILQ_ENTRY(namecache) nc_dst; /* destination vnode list */
CK_LIST_ENTRY(namecache) nc_hash;/* hash chain */
CK_SLIST_ENTRY(namecache) nc_hash;/* hash chain */
struct vnode *nc_dvp; /* vnode of parent of name */
union {
struct vnode *nu_vp; /* vnode the name refers to */
@ -264,7 +264,7 @@ VFS_SMR_DECLARE;
*/
#define NCHHASH(hash) \
(&nchashtbl[(hash) & nchash])
static __read_mostly CK_LIST_HEAD(nchashhead, namecache) *nchashtbl;/* Hash Table */
static __read_mostly CK_SLIST_HEAD(nchashhead, namecache) *nchashtbl;/* Hash Table */
static u_long __read_mostly nchash; /* size of hash table */
SYSCTL_ULONG(_debug, OID_AUTO, nchash, CTLFLAG_RD, &nchash, 0,
"Size of namecache hash table");
@ -520,6 +520,15 @@ cache_get_hash(char *name, u_char len, struct vnode *dvp)
return (fnv_32_buf(name, len, dvp->v_nchash));
}
static inline struct nchashhead *
NCP2BUCKET(struct namecache *ncp)
{
uint32_t hash;
hash = cache_get_hash(ncp->nc_name, ncp->nc_nlen, ncp->nc_dvp);
return (NCHHASH(hash));
}
static inline struct rwlock *
NCP2BUCKETLOCK(struct namecache *ncp)
{
@ -687,7 +696,7 @@ sysctl_debug_hashstat_rawnchash(SYSCTL_HANDLER_ARGS)
}
/* Scan hash tables counting entries */
for (ncpp = nchashtbl, i = 0; i < n_nchash; ncpp++, i++)
CK_LIST_FOREACH(ncp, ncpp, nc_hash)
CK_SLIST_FOREACH(ncp, ncpp, nc_hash)
cntbuf[i]++;
cache_unlock_all_buckets();
for (error = 0, i = 0; i < n_nchash; i++)
@ -720,7 +729,7 @@ sysctl_debug_hashstat_nchash(SYSCTL_HANDLER_ARGS)
/* Scan hash tables for applicable entries */
for (ncpp = nchashtbl; n_nchash > 0; n_nchash--, ncpp++) {
count = 0;
CK_LIST_FOREACH(ncp, ncpp, nc_hash) {
CK_SLIST_FOREACH(ncp, ncpp, nc_hash) {
count++;
}
if (count)
@ -952,6 +961,7 @@ cache_negative_zap_one(void)
static void
cache_zap_locked(struct namecache *ncp)
{
struct nchashhead *ncpp;
if (!(ncp->nc_flag & NCF_NEGATIVE))
cache_assert_vnode_locked(ncp->nc_vp);
@ -963,7 +973,8 @@ cache_zap_locked(struct namecache *ncp)
cache_ncp_invalidate(ncp);
CK_LIST_REMOVE(ncp, nc_hash);
ncpp = NCP2BUCKET(ncp);
CK_SLIST_REMOVE(ncpp, ncp, namecache, nc_hash);
if (!(ncp->nc_flag & NCF_NEGATIVE)) {
SDT_PROBE3(vfs, namecache, zap, done, ncp->nc_dvp,
ncp->nc_name, ncp->nc_vp);
@ -1122,7 +1133,7 @@ cache_zap_unlocked_bucket(struct namecache *ncp, struct componentname *cnp,
cache_sort_vnodes(&dvlp, &vlp);
cache_lock_vnodes(dvlp, vlp);
rw_wlock(blp);
CK_LIST_FOREACH(rncp, (NCHHASH(hash)), nc_hash) {
CK_SLIST_FOREACH(rncp, (NCHHASH(hash)), nc_hash) {
if (rncp == ncp && rncp->nc_dvp == dvp &&
rncp->nc_nlen == cnp->cn_namelen &&
!bcmp(rncp->nc_name, cnp->cn_nameptr, rncp->nc_nlen))
@ -1336,12 +1347,12 @@ cache_lookup_nomakeentry(struct vnode *dvp, struct vnode **vpp,
hash = cache_get_hash(cnp->cn_nameptr, cnp->cn_namelen, dvp);
blp = HASH2BUCKETLOCK(hash);
retry:
if (CK_LIST_EMPTY(NCHHASH(hash)))
if (CK_SLIST_EMPTY(NCHHASH(hash)))
goto out_no_entry;
rw_wlock(blp);
CK_LIST_FOREACH(ncp, (NCHHASH(hash)), nc_hash) {
CK_SLIST_FOREACH(ncp, (NCHHASH(hash)), nc_hash) {
if (ncp->nc_dvp == dvp && ncp->nc_nlen == cnp->cn_namelen &&
!bcmp(ncp->nc_name, cnp->cn_nameptr, ncp->nc_nlen))
break;
@ -1485,7 +1496,7 @@ cache_lookup(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp,
rw_rlock(blp);
}
CK_LIST_FOREACH(ncp, (NCHHASH(hash)), nc_hash) {
CK_SLIST_FOREACH(ncp, (NCHHASH(hash)), nc_hash) {
if (ncp->nc_dvp == dvp && ncp->nc_nlen == cnp->cn_namelen &&
!bcmp(ncp->nc_name, cnp->cn_nameptr, ncp->nc_nlen))
break;
@ -1932,7 +1943,7 @@ cache_enter_time(struct vnode *dvp, struct vnode *vp, struct componentname *cnp,
* the same path name.
*/
ncpp = NCHHASH(hash);
CK_LIST_FOREACH(n2, ncpp, nc_hash) {
CK_SLIST_FOREACH(n2, ncpp, nc_hash) {
if (n2->nc_dvp == dvp &&
n2->nc_nlen == cnp->cn_namelen &&
!bcmp(n2->nc_name, cnp->cn_nameptr, n2->nc_nlen)) {
@ -2021,7 +2032,7 @@ cache_enter_time(struct vnode *dvp, struct vnode *vp, struct componentname *cnp,
* Insert the new namecache entry into the appropriate chain
* within the cache entries table.
*/
CK_LIST_INSERT_HEAD(ncpp, ncp, nc_hash);
CK_SLIST_INSERT_HEAD(ncpp, ncp, nc_hash);
atomic_thread_fence_rel();
/*
@ -2053,6 +2064,28 @@ cache_roundup_2(u_int val)
return (res);
}
static struct nchashhead *
nchinittbl(u_long elements, u_long *hashmask)
{
struct nchashhead *hashtbl;
u_long hashsize, i;
hashsize = cache_roundup_2(desiredvnodes * 2) / 2;
hashtbl = malloc((u_long)hashsize * sizeof(*hashtbl), M_VFSCACHE, M_WAITOK);
for (i = 0; i < hashsize; i++)
CK_SLIST_INIT(&hashtbl[i]);
*hashmask = hashsize - 1;
return (hashtbl);
}
static void
ncfreetbl(struct nchashhead *hashtbl)
{
free(hashtbl, M_VFSCACHE);
}
/*
* Name cache initialization, from vfs_init() when we are booting
*/
@ -2084,7 +2117,7 @@ nchinit(void *dummy __unused)
VFS_SMR_ZONE_SET(cache_zone_large_ts);
ncsize = desiredvnodes * ncsizefactor;
nchashtbl = hashinit(desiredvnodes * 2, M_VFSCACHE, &nchash);
nchashtbl = nchinittbl(desiredvnodes * 2, &nchash);
ncbuckethash = cache_roundup_2(mp_ncpus * mp_ncpus) - 1;
if (ncbuckethash < 7) /* arbitrarily chosen to avoid having one lock */
ncbuckethash = 7;
@ -2139,10 +2172,10 @@ cache_changesize(u_long newmaxvnodes)
if (newmaxvnodes < numbucketlocks)
newmaxvnodes = numbucketlocks;
new_nchashtbl = hashinit(newmaxvnodes, M_VFSCACHE, &new_nchash);
new_nchashtbl = nchinittbl(newmaxvnodes, &new_nchash);
/* If same hash table size, nothing to do */
if (nchash == new_nchash) {
free(new_nchashtbl, M_VFSCACHE);
ncfreetbl(new_nchashtbl);
return;
}
/*
@ -2157,17 +2190,17 @@ cache_changesize(u_long newmaxvnodes)
nchashtbl = new_nchashtbl;
nchash = new_nchash;
for (i = 0; i <= old_nchash; i++) {
while ((ncp = CK_LIST_FIRST(&old_nchashtbl[i])) != NULL) {
while ((ncp = CK_SLIST_FIRST(&old_nchashtbl[i])) != NULL) {
hash = cache_get_hash(ncp->nc_name, ncp->nc_nlen,
ncp->nc_dvp);
CK_LIST_REMOVE(ncp, nc_hash);
CK_LIST_INSERT_HEAD(NCHHASH(hash), ncp, nc_hash);
CK_SLIST_REMOVE(&old_nchashtbl[i], ncp, namecache, nc_hash);
CK_SLIST_INSERT_HEAD(NCHHASH(hash), ncp, nc_hash);
}
}
ncsize = newncsize;
cache_unlock_all_buckets();
cache_unlock_all_vnodes();
free(old_nchashtbl, M_VFSCACHE);
ncfreetbl(old_nchashtbl);
}
/*
@ -2317,7 +2350,7 @@ cache_purgevfs(struct mount *mp, bool force)
for (j = i; j < n_nchash; j += numbucketlocks) {
retry:
bucket = &nchashtbl[j];
CK_LIST_FOREACH_SAFE(ncp, bucket, nc_hash, nnp) {
CK_SLIST_FOREACH_SAFE(ncp, bucket, nc_hash, nnp) {
cache_assert_bucket_locked(ncp, RA_WLOCKED);
if (ncp->nc_dvp->v_mount != mp)
continue;
@ -3233,7 +3266,7 @@ cache_fplookup_negative_promote(struct cache_fpl *fpl, struct namecache *oncp,
* In particular at this point there can be a new ncp which matches the
* search but hashes to a different neglist.
*/
CK_LIST_FOREACH(ncp, (NCHHASH(hash)), nc_hash) {
CK_SLIST_FOREACH(ncp, (NCHHASH(hash)), nc_hash) {
if (ncp == oncp)
break;
}
@ -3583,7 +3616,7 @@ cache_fplookup_next(struct cache_fpl *fpl)
hash = cache_get_hash(cnp->cn_nameptr, cnp->cn_namelen, dvp);
CK_LIST_FOREACH(ncp, (NCHHASH(hash)), nc_hash) {
CK_SLIST_FOREACH(ncp, (NCHHASH(hash)), nc_hash) {
if (ncp->nc_dvp == dvp && ncp->nc_nlen == cnp->cn_namelen &&
!bcmp(ncp->nc_name, cnp->cn_nameptr, ncp->nc_nlen))
break;