vfs: prevent numvnodes and freevnodes re-reads when appropriate

Otherwise in code like this:
if (numvnodes > desiredvnodes)
	vnlru_free_locked(numvnodes - desiredvnodes, NULL);

numvnodes can drop below desiredvnodes prior to the call and if the
compiler generated another read the subtraction would get a negative
value.
This commit is contained in:
Mateusz Guzik 2020-01-07 04:34:03 +00:00
parent 9a67c570a9
commit cc2b586d69

View File

@ -1211,16 +1211,19 @@ vnlru_free(int count, struct vfsops *mnt_op)
static int static int
vspace(void) vspace(void)
{ {
u_long rnumvnodes, rfreevnodes;
int space; int space;
gapvnodes = imax(desiredvnodes - wantfreevnodes, 100); gapvnodes = imax(desiredvnodes - wantfreevnodes, 100);
vhiwat = gapvnodes / 11; /* 9% -- just under the 10% in vlrureclaim() */ vhiwat = gapvnodes / 11; /* 9% -- just under the 10% in vlrureclaim() */
vlowat = vhiwat / 2; vlowat = vhiwat / 2;
if (numvnodes > desiredvnodes) rnumvnodes = atomic_load_long(&numvnodes);
rfreevnodes = atomic_load_long(&freevnodes);
if (rnumvnodes > desiredvnodes)
return (0); return (0);
space = desiredvnodes - numvnodes; space = desiredvnodes - rnumvnodes;
if (freevnodes > wantfreevnodes) if (freevnodes > wantfreevnodes)
space += freevnodes - wantfreevnodes; space += rfreevnodes - wantfreevnodes;
return (space); return (space);
} }
@ -1292,6 +1295,7 @@ static int vnlruproc_sig;
static void static void
vnlru_proc(void) vnlru_proc(void)
{ {
u_long rnumvnodes, rfreevnodes;
struct mount *mp, *nmp; struct mount *mp, *nmp;
unsigned long onumvnodes; unsigned long onumvnodes;
int done, force, trigger, usevnodes, vsp; int done, force, trigger, usevnodes, vsp;
@ -1304,13 +1308,14 @@ vnlru_proc(void)
for (;;) { for (;;) {
kproc_suspend_check(vnlruproc); kproc_suspend_check(vnlruproc);
mtx_lock(&vnode_free_list_mtx); mtx_lock(&vnode_free_list_mtx);
rnumvnodes = atomic_load_long(&numvnodes);
/* /*
* If numvnodes is too large (due to desiredvnodes being * If numvnodes is too large (due to desiredvnodes being
* adjusted using its sysctl, or emergency growth), first * adjusted using its sysctl, or emergency growth), first
* try to reduce it by discarding from the free list. * try to reduce it by discarding from the free list.
*/ */
if (numvnodes > desiredvnodes) if (rnumvnodes > desiredvnodes)
vnlru_free_locked(numvnodes - desiredvnodes, NULL); vnlru_free_locked(rnumvnodes - desiredvnodes, NULL);
/* /*
* Sleep if the vnode cache is in a good state. This is * Sleep if the vnode cache is in a good state. This is
* when it is not over-full and has space for about a 4% * when it is not over-full and has space for about a 4%
@ -1332,7 +1337,10 @@ vnlru_proc(void)
} }
mtx_unlock(&vnode_free_list_mtx); mtx_unlock(&vnode_free_list_mtx);
done = 0; done = 0;
onumvnodes = numvnodes; rnumvnodes = atomic_load_long(&numvnodes);
rfreevnodes = atomic_load_long(&freevnodes);
onumvnodes = rnumvnodes;
/* /*
* Calculate parameters for recycling. These are the same * Calculate parameters for recycling. These are the same
* throughout the loop to give some semblance of fairness. * throughout the loop to give some semblance of fairness.
@ -1340,10 +1348,10 @@ vnlru_proc(void)
* of resident pages. We aren't trying to free memory; we * of resident pages. We aren't trying to free memory; we
* are trying to recycle or at least free vnodes. * are trying to recycle or at least free vnodes.
*/ */
if (numvnodes <= desiredvnodes) if (rnumvnodes <= desiredvnodes)
usevnodes = numvnodes - freevnodes; usevnodes = rnumvnodes - rfreevnodes;
else else
usevnodes = numvnodes; usevnodes = rnumvnodes;
if (usevnodes <= 0) if (usevnodes <= 0)
usevnodes = 1; usevnodes = 1;
/* /*
@ -1516,14 +1524,17 @@ getnewvnode_wait(int suspended)
void void
getnewvnode_reserve(u_int count) getnewvnode_reserve(u_int count)
{ {
u_long rnumvnodes, rfreevnodes;
struct thread *td; struct thread *td;
/* Pre-adjust like the pre-adjust in getnewvnode(), with any count. */ /* Pre-adjust like the pre-adjust in getnewvnode(), with any count. */
/* XXX no longer so quick, but this part is not racy. */ /* XXX no longer so quick, but this part is not racy. */
mtx_lock(&vnode_free_list_mtx); mtx_lock(&vnode_free_list_mtx);
if (numvnodes + count > desiredvnodes && freevnodes > wantfreevnodes) rnumvnodes = atomic_load_long(&numvnodes);
vnlru_free_locked(ulmin(numvnodes + count - desiredvnodes, rfreevnodes = atomic_load_long(&freevnodes);
freevnodes - wantfreevnodes), NULL); if (rnumvnodes + count > desiredvnodes && rfreevnodes > wantfreevnodes)
vnlru_free_locked(ulmin(rnumvnodes + count - desiredvnodes,
rfreevnodes - wantfreevnodes), NULL);
mtx_unlock(&vnode_free_list_mtx); mtx_unlock(&vnode_free_list_mtx);
td = curthread; td = curthread;