From 22e53424b295b4155e4c090aa469d652faaaa4d2 Mon Sep 17 00:00:00 2001 From: David Greenman Date: Tue, 4 Apr 1995 02:01:13 +0000 Subject: [PATCH] kern_subr.c: Added a new type to uiomove - "UIO_NOCOPY" which causes it to update pointers and counts, but doesn't do any data copying. This is needed for upcoming changes to the way that the vnode pager does its page outs. Added a new hash init function call "phashinit" that allocates and initializes a prime number sized hash table. vfs_cache.c: Changed hashing algorithm to use the remainder of dividing by a prime number to improve the distribution characteristcs. Uses new phashinit function in kern_subr.c. --- sys/kern/kern_subr.c | 37 ++++++++++++++++++++++++++++++++++++- sys/kern/vfs_cache.c | 10 +++++----- 2 files changed, 41 insertions(+), 6 deletions(-) diff --git a/sys/kern/kern_subr.c b/sys/kern/kern_subr.c index 0c1d970493bb..c1912394176f 100644 --- a/sys/kern/kern_subr.c +++ b/sys/kern/kern_subr.c @@ -36,7 +36,7 @@ * SUCH DAMAGE. * * @(#)kern_subr.c 8.3 (Berkeley) 1/21/94 - * $Id: kern_subr.c,v 1.3 1994/08/02 07:42:14 davidg Exp $ + * $Id: kern_subr.c,v 1.4 1995/02/12 09:11:47 davidg Exp $ */ #include @@ -90,6 +90,8 @@ uiomove(cp, n, uio) else bcopy(iov->iov_base, (caddr_t)cp, cnt); break; + case UIO_NOCOPY: + break; } iov->iov_base += cnt; iov->iov_len -= cnt; @@ -213,3 +215,36 @@ hashinit(elements, type, hashmask) *hashmask = hashsize - 1; return (hashtbl); } + +#define NPRIMES 24 +static int primes[] = { 61, 127, 251, 509, 761, 1021, 1531, 2039, 2557, + 3067, 3583, 4093, 4603, 5119, 5623, 6143, 6653, + 7159, 7673, 8191, 12281, 16381, 24571, 32749 }; + +/* + * General routine to allocate a prime number sized hash table. + */ +void * +phashinit(elements, type, nentries) + int elements, type; + u_long *nentries; +{ + long hashsize; + LIST_HEAD(generic, generic) *hashtbl; + int i; + + if (elements <= 0) + panic("hashinit: bad cnt"); + for (i = 1, hashsize = primes[1]; hashsize <= elements;) { + i++; + if (i == NPRIMES) + break; + hashsize = primes[i]; + } + hashsize = primes[i - 1]; + hashtbl = malloc((u_long)hashsize * sizeof(*hashtbl), type, M_WAITOK); + for (i = 0; i < hashsize; i++) + LIST_INIT(&hashtbl[i]); + *nentries = hashsize; + return (hashtbl); +} diff --git a/sys/kern/vfs_cache.c b/sys/kern/vfs_cache.c index 8dc79db41710..bb4b8be16fee 100644 --- a/sys/kern/vfs_cache.c +++ b/sys/kern/vfs_cache.c @@ -33,7 +33,7 @@ * SUCH DAMAGE. * * @(#)vfs_cache.c 8.3 (Berkeley) 8/22/94 - * $Id: vfs_cache.c,v 1.11 1995/03/12 02:01:20 phk Exp $ + * $Id: vfs_cache.c,v 1.12 1995/03/19 09:33:51 davidg Exp $ */ #include @@ -72,7 +72,7 @@ */ LIST_HEAD(nchashhead, namecache) *nchashtbl; /* Hash Table */ TAILQ_HEAD(, namecache) nclruhead; /* LRU chain */ -u_long nchash; /* size of hash table - 1 */ +u_long nchash; /* size of hash table */ struct nchstats nchstats; /* cache effectiveness statistics */ struct vnode nchENOENT; /* our own "novnode" */ int doingcache = 1; /* 1 => enable the cache */ @@ -134,7 +134,7 @@ cache_lookup(dvp, vpp, cnp) return (0); } - ncpp = &nchashtbl[(dvp->v_id + cnp->cn_hash) & nchash]; + ncpp = &nchashtbl[(dvp->v_id + cnp->cn_hash) % nchash]; for (ncp = ncpp->lh_first; ncp != 0; ncp = nnp) { nnp = ncp->nc_hash.le_next; /* If one of the vp's went stale, don't bother anymore. */ @@ -235,7 +235,7 @@ cache_enter(dvp, vp, cnp) ncp->nc_nlen = cnp->cn_namelen; bcopy(cnp->cn_nameptr, ncp->nc_name, (unsigned)ncp->nc_nlen); TAILQ_INSERT_TAIL(&nclruhead, ncp, nc_lru); - ncpp = &nchashtbl[(dvp->v_id + cnp->cn_hash) & nchash]; + ncpp = &nchashtbl[(dvp->v_id + cnp->cn_hash) % nchash]; LIST_INSERT_HEAD(ncpp, ncp, nc_hash); } @@ -248,7 +248,7 @@ nchinit() { TAILQ_INIT(&nclruhead); - nchashtbl = hashinit(desiredvnodes, M_CACHE, &nchash); + nchashtbl = phashinit(desiredvnodes, M_CACHE, &nchash); nchENOENT.v_id = 1; }