Back out the removal of LK_NOWAIT from the VOP_LOCK() call in

vlrureclaim() in vfs_subr.c 1.636  because waiting for the vnode
lock aggravates an existing race condition.  It is also undesirable
according to the commit log for 1.631.

Fix the tiny race condition that remains by rechecking the vnode
state after grabbing the vnode lock and grabbing the vnode interlock.

Fix the problem of other threads being starved (which 1.636 attempted
to fix by removing LK_NOWAIT) by calling uio_yield() periodically
in vlrureclaim().  This should be more deterministic than hoping
that VOP_LOCK() without LK_NOWAIT will block, which may not happen
in this loop.

Reviewed by:	kan
MFC after:	5 days
This commit is contained in:
Don Lewis 2005-08-23 03:44:06 +00:00
parent 4d3d08301e
commit ad9f180121
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=149385

View File

@ -570,29 +570,59 @@ vlrureclaim(struct mount *mp)
TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
--count;
if (!VI_TRYLOCK(vp))
continue;
goto next_iter;
/*
* If it's been deconstructed already, it's still
* referenced, or it exceeds the trigger, skip it.
*/
if ((vp->v_iflag & VI_DOOMED) != 0 || vp->v_usecount ||
!LIST_EMPTY(&(vp)->v_cache_src) || (vp->v_object != NULL &&
if (vp->v_usecount || !LIST_EMPTY(&(vp)->v_cache_src) ||
(vp->v_iflag & VI_DOOMED) != 0 || (vp->v_object != NULL &&
vp->v_object->resident_page_count > trigger)) {
VI_UNLOCK(vp);
continue;
goto next_iter;
}
MNT_IUNLOCK(mp);
vholdl(vp);
if (VOP_LOCK(vp, LK_INTERLOCK|LK_EXCLUSIVE, td)) {
if (VOP_LOCK(vp, LK_INTERLOCK|LK_EXCLUSIVE|LK_NOWAIT, td)) {
vdrop(vp);
MNT_ILOCK(mp);
continue;
goto next_iter_mntunlocked;
}
VI_LOCK(vp);
/*
* v_usecount may have been bumped after VOP_LOCK() dropped
* the vnode interlock and before it was locked again.
*
* It is not necessary to recheck VI_DOOMED because it can
* only be set by another thread that holds both the vnode
* lock and vnode interlock. If another thread has the
* vnode lock before we get to VOP_LOCK() and obtains the
* vnode interlock after VOP_LOCK() drops the vnode
* interlock, the other thread will be unable to drop the
* vnode lock before our VOP_LOCK() call fails.
*/
if (vp->v_usecount || !LIST_EMPTY(&(vp)->v_cache_src) ||
(vp->v_object != NULL &&
vp->v_object->resident_page_count > trigger)) {
VOP_UNLOCK(vp, LK_INTERLOCK, td);
goto next_iter_mntunlocked;
}
KASSERT((vp->v_iflag & VI_DOOMED) == 0,
("VI_DOOMED unexpectedly detected in vlrureclaim()"));
vgonel(vp);
VOP_UNLOCK(vp, 0, td);
vdropl(vp);
done++;
next_iter_mntunlocked:
if ((count % 256) != 0)
goto relock_mnt;
goto yield;
next_iter:
if ((count % 256) != 0)
continue;
MNT_IUNLOCK(mp);
yield:
uio_yield();
relock_mnt:
MNT_ILOCK(mp);
}
MNT_IUNLOCK(mp);