Vnodes pull in 800-900 bytes these days, all things counted, so we need

to treat desiredvnodes much more like a limit than as a vague concept.

On a 2GB RAM machine where desired vnodes is 130k, we run out of
kmem_map space when we hit about 190k vnodes.

If we wake up the vnode washer in getnewvnode(), sleep until it is done,
so that it has a chance to offer us a washed vnode.  If we don't sleep
here we'll just race ahead and allocate yet a vnode which will never
get freed.

In the vnodewasher, instead of doing 10 vnodes per mountpoint per
rotation, do 10% of the vnodes distributed evenly across the
mountpoints.
This commit is contained in:
Poul-Henning Kamp 2002-12-29 10:39:05 +00:00
parent 402746a2d7
commit 851a87ea1a

View File

@ -736,7 +736,7 @@ vnlru_proc(void)
{
struct mount *mp, *nmp;
int s;
int done;
int done, take;
struct proc *p = vnlruproc;
struct thread *td = FIRST_THREAD_IN_PROC(p); /* XXXKSE */
@ -752,18 +752,23 @@ vnlru_proc(void)
if (numvnodes - freevnodes <= desiredvnodes * 9 / 10) {
mtx_unlock(&vnode_free_list_mtx);
vnlruproc_sig = 0;
wakeup(&vnlruproc_sig);
tsleep(vnlruproc, PVFS, "vlruwt", 0);
continue;
}
mtx_unlock(&vnode_free_list_mtx);
done = 0;
mtx_lock(&mountlist_mtx);
take = 0;
TAILQ_FOREACH(mp, &mountlist, mnt_list)
take++;
take = desiredvnodes / (take * 10);
for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) {
if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx, td)) {
nmp = TAILQ_NEXT(mp, mnt_list);
continue;
}
done += vlrureclaim(mp, 10);
done += vlrureclaim(mp, take);
mtx_lock(&mountlist_mtx);
nmp = TAILQ_NEXT(mp, mnt_list);
vfs_unbusy(mp, td);
@ -897,9 +902,14 @@ getnewvnode(tag, mp, vops, vpp)
* attempt to directly reclaim vnodes due to nasty recursion
* problems.
*/
if (vnlruproc_sig == 0 && numvnodes - freevnodes > desiredvnodes) {
vnlruproc_sig = 1; /* avoid unnecessary wakeups */
wakeup(vnlruproc);
while (numvnodes - freevnodes > desiredvnodes) {
if (vnlruproc_sig == 0) {
vnlruproc_sig = 1; /* avoid unnecessary wakeups */
wakeup(vnlruproc);
}
mtx_unlock(&vnode_free_list_mtx);
tsleep(&vnlruproc_sig, PVFS, "vlruwk", hz);
mtx_lock(&vnode_free_list_mtx);
}
/*