cache: add cache_purge_vgone

cache_purge locklessly checks whether the vnode at hand has any namecache
entries. This can race with a concurrent purge which managed to remove
the last entry, but may not be done touching the vnode.

Make sure we observe the relevant vnode lock as not taken before proceeding
with vgone.

Paired with the fact that doomed vnodes cannnot receive entries this restores
the invariant that there are no namecache-related writing users past cache_purge
in vgone.

Reported by:	pho
This commit is contained in:
mjg 2020-08-04 23:04:29 +00:00
parent a83b4ff63f
commit dcf686cabf
3 changed files with 52 additions and 9 deletions

View File

@ -2138,22 +2138,17 @@ cache_changesize(u_long newmaxvnodes)
/* /*
* Invalidate all entries from and to a particular vnode. * Invalidate all entries from and to a particular vnode.
*/ */
void static void
cache_purge(struct vnode *vp) cache_purge_impl(struct vnode *vp)
{ {
TAILQ_HEAD(, namecache) ncps; TAILQ_HEAD(, namecache) ncps;
struct namecache *ncp, *nnp; struct namecache *ncp, *nnp;
struct mtx *vlp, *vlp2; struct mtx *vlp, *vlp2;
CTR1(KTR_VFS, "cache_purge(%p)", vp);
SDT_PROBE1(vfs, namecache, purge, done, vp);
if (LIST_EMPTY(&vp->v_cache_src) && TAILQ_EMPTY(&vp->v_cache_dst) &&
vp->v_cache_dd == NULL)
return;
TAILQ_INIT(&ncps); TAILQ_INIT(&ncps);
vlp = VP2VNODELOCK(vp); vlp = VP2VNODELOCK(vp);
vlp2 = NULL; vlp2 = NULL;
mtx_lock(vlp); mtx_assert(vlp, MA_OWNED);
retry: retry:
while (!LIST_EMPTY(&vp->v_cache_src)) { while (!LIST_EMPTY(&vp->v_cache_src)) {
ncp = LIST_FIRST(&vp->v_cache_src); ncp = LIST_FIRST(&vp->v_cache_src);
@ -2184,6 +2179,53 @@ cache_purge(struct vnode *vp)
} }
} }
void
cache_purge(struct vnode *vp)
{
struct mtx *vlp;
SDT_PROBE1(vfs, namecache, purge, done, vp);
if (LIST_EMPTY(&vp->v_cache_src) && TAILQ_EMPTY(&vp->v_cache_dst) &&
vp->v_cache_dd == NULL)
return;
vlp = VP2VNODELOCK(vp);
mtx_lock(vlp);
cache_purge_impl(vp);
}
/*
* Only to be used by vgone.
*/
void
cache_purge_vgone(struct vnode *vp)
{
struct mtx *vlp;
VNPASS(VN_IS_DOOMED(vp), vp);
vlp = VP2VNODELOCK(vp);
if (!(LIST_EMPTY(&vp->v_cache_src) && TAILQ_EMPTY(&vp->v_cache_dst) &&
vp->v_cache_dd == NULL)) {
mtx_lock(vlp);
cache_purge_impl(vp);
mtx_assert(vlp, MA_NOTOWNED);
return;
}
/*
* All the NULL pointer state we found above may be transient.
* Serialize against a possible thread doing cache_purge.
*/
mtx_wait_unlocked(vlp);
if (!(LIST_EMPTY(&vp->v_cache_src) && TAILQ_EMPTY(&vp->v_cache_dst) &&
vp->v_cache_dd == NULL)) {
mtx_lock(vlp);
cache_purge_impl(vp);
mtx_assert(vlp, MA_NOTOWNED);
return;
}
return;
}
/* /*
* Invalidate all negative entries for a particular directory vnode. * Invalidate all negative entries for a particular directory vnode.
*/ */

View File

@ -4146,7 +4146,7 @@ vgonel(struct vnode *vp)
* Delete from old mount point vnode list. * Delete from old mount point vnode list.
*/ */
delmntque(vp); delmntque(vp);
cache_purge(vp); cache_purge_vgone(vp);
/* /*
* Done with purge, reset to the standard lock and invalidate * Done with purge, reset to the standard lock and invalidate
* the vnode. * the vnode.

View File

@ -638,6 +638,7 @@ int cache_lookup(struct vnode *dvp, struct vnode **vpp,
struct componentname *cnp, struct timespec *tsp, int *ticksp); struct componentname *cnp, struct timespec *tsp, int *ticksp);
void cache_vnode_init(struct vnode *vp); void cache_vnode_init(struct vnode *vp);
void cache_purge(struct vnode *vp); void cache_purge(struct vnode *vp);
void cache_purge_vgone(struct vnode *vp);
void cache_purge_negative(struct vnode *vp); void cache_purge_negative(struct vnode *vp);
void cache_purgevfs(struct mount *mp, bool force); void cache_purgevfs(struct mount *mp, bool force);
int change_dir(struct vnode *vp, struct thread *td); int change_dir(struct vnode *vp, struct thread *td);