Track changes to kern.maxvnodes and appropriately increase or decrease

the size of the name cache hash table (mapping file names to vnodes)
and the vnode hash table (mapping mount point and inode number to vnode).
An appropriate locking strategy is the key to changing hash table sizes
while they are in active use.

Reviewed by: kib
Tested by:   Peter Holm
Differential Revision: https://reviews.freebsd.org/D2265
MFC after:   2 weeks
This commit is contained in:
mckusick 2015-09-06 05:50:51 +00:00
parent e7f079d7b1
commit 17357462a0
4 changed files with 102 additions and 2 deletions

View File

@ -327,11 +327,17 @@ sysctl_debug_hashstat_rawnchash(SYSCTL_HANDLER_ARGS)
struct namecache *ncp;
int i, error, n_nchash, *cntbuf;
retry:
n_nchash = nchash + 1; /* nchash is max index, not count */
if (req->oldptr == NULL)
return SYSCTL_OUT(req, 0, n_nchash * sizeof(int));
cntbuf = malloc(n_nchash * sizeof(int), M_TEMP, M_ZERO | M_WAITOK);
CACHE_RLOCK();
if (n_nchash != nchash + 1) {
CACHE_RUNLOCK();
free(cntbuf, M_TEMP);
goto retry;
}
/* Scan hash tables counting entries */
for (ncpp = nchashtbl, i = 0; i < n_nchash; ncpp++, i++)
LIST_FOREACH(ncp, ncpp, nc_hash)
@ -930,6 +936,44 @@ nchinit(void *dummy __unused)
}
SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_SECOND, nchinit, NULL);
void
cache_changesize(int newmaxvnodes)
{
struct nchashhead *new_nchashtbl, *old_nchashtbl;
u_long new_nchash, old_nchash;
struct namecache *ncp;
uint32_t hash;
int i;
new_nchashtbl = hashinit(newmaxvnodes * 2, M_VFSCACHE, &new_nchash);
/* If same hash table size, nothing to do */
if (nchash == new_nchash) {
free(new_nchashtbl, M_VFSCACHE);
return;
}
/*
* Move everything from the old hash table to the new table.
* None of the namecache entries in the table can be removed
* because to do so, they have to be removed from the hash table.
*/
CACHE_WLOCK();
old_nchashtbl = nchashtbl;
old_nchash = nchash;
nchashtbl = new_nchashtbl;
nchash = new_nchash;
for (i = 0; i <= old_nchash; i++) {
while ((ncp = LIST_FIRST(&old_nchashtbl[i])) != NULL) {
hash = fnv_32_buf(nc_get_name(ncp), ncp->nc_nlen,
FNV1_32_INIT);
hash = fnv_32_buf(&ncp->nc_dvp, sizeof(ncp->nc_dvp),
hash);
LIST_REMOVE(ncp, nc_hash);
LIST_INSERT_HEAD(NCHHASH(hash), ncp, nc_hash);
}
}
CACHE_WUNLOCK();
free(old_nchashtbl, M_VFSCACHE);
}
/*
* Invalidate all entries to a particular vnode.

View File

@ -161,3 +161,40 @@ vfs_hash_rehash(struct vnode *vp, u_int hash)
vp->v_hash = hash;
rw_wunlock(&vfs_hash_lock);
}
void
vfs_hash_changesize(int newmaxvnodes)
{
struct vfs_hash_head *vfs_hash_newtbl, *vfs_hash_oldtbl;
u_long vfs_hash_newmask, vfs_hash_oldmask;
struct vnode *vp;
int i;
vfs_hash_newtbl = hashinit(newmaxvnodes, M_VFS_HASH,
&vfs_hash_newmask);
/* If same hash table size, nothing to do */
if (vfs_hash_mask == vfs_hash_newmask) {
free(vfs_hash_newtbl, M_VFS_HASH);
return;
}
/*
* Move everything from the old hash table to the new table.
* None of the vnodes in the table can be recycled because to
* do so, they have to be removed from the hash table.
*/
rw_wlock(&vfs_hash_lock);
vfs_hash_oldtbl = vfs_hash_tbl;
vfs_hash_oldmask = vfs_hash_mask;
vfs_hash_tbl = vfs_hash_newtbl;
vfs_hash_mask = vfs_hash_newmask;
for (i = 0; i <= vfs_hash_oldmask; i++) {
while ((vp = LIST_FIRST(&vfs_hash_oldtbl[i])) != NULL) {
LIST_REMOVE(vp, v_hashlist);
LIST_INSERT_HEAD(
vfs_hash_bucket(vp->v_mount, vp->v_hash),
vp, v_hashlist);
}
}
rw_wunlock(&vfs_hash_lock);
free(vfs_hash_oldtbl, M_VFS_HASH);
}

View File

@ -281,8 +281,25 @@ static enum { SYNCER_RUNNING, SYNCER_SHUTTING_DOWN, SYNCER_FINAL_DELAY }
* XXX desiredvnodes is historical cruft and should not exist.
*/
int desiredvnodes;
SYSCTL_INT(_kern, KERN_MAXVNODES, maxvnodes, CTLFLAG_RW,
&desiredvnodes, 0, "Maximum number of vnodes");
static int
sysctl_update_desiredvnodes(SYSCTL_HANDLER_ARGS)
{
int error, old_desiredvnodes;
old_desiredvnodes = desiredvnodes;
if ((error = sysctl_handle_int(oidp, arg1, arg2, req)) != 0)
return (error);
if (old_desiredvnodes != desiredvnodes) {
vfs_hash_changesize(desiredvnodes);
cache_changesize(desiredvnodes);
}
return (0);
}
SYSCTL_PROC(_kern, KERN_MAXVNODES, maxvnodes,
CTLTYPE_INT | CTLFLAG_MPSAFE | CTLFLAG_RW, &desiredvnodes, 0,
sysctl_update_desiredvnodes, "I", "Maximum number of vnodes");
SYSCTL_ULONG(_kern, OID_AUTO, minvnodes, CTLFLAG_RW,
&wantfreevnodes, 0, "Minimum number of vnodes (legacy)");
static int vnlru_nowhere;

View File

@ -607,6 +607,7 @@ struct vnode;
typedef int (*vn_get_ino_t)(struct mount *, void *, int, struct vnode **);
/* cache_* may belong in namei.h. */
void cache_changesize(int newhashsize);
#define cache_enter(dvp, vp, cnp) \
cache_enter_time(dvp, vp, cnp, NULL, NULL)
void cache_enter_time(struct vnode *dvp, struct vnode *vp,
@ -843,6 +844,7 @@ int fifo_printinfo(struct vnode *);
/* vfs_hash.c */
typedef int vfs_hash_cmp_t(struct vnode *vp, void *arg);
void vfs_hash_changesize(int newhashsize);
int vfs_hash_get(const struct mount *mp, u_int hash, int flags, struct thread *td, struct vnode **vpp, vfs_hash_cmp_t *fn, void *arg);
u_int vfs_hash_index(struct vnode *vp);
int vfs_hash_insert(struct vnode *vp, u_int hash, int flags, struct thread *td, struct vnode **vpp, vfs_hash_cmp_t *fn, void *arg);