cache: add cache_purge_vgone

cache_purge locklessly checks whether the vnode at hand has any namecache
entries. This can race with a concurrent purge which managed to remove
the last entry, but may not be done touching the vnode.

Make sure we observe the relevant vnode lock as not taken before proceeding
with vgone.

Paired with the fact that doomed vnodes cannnot receive entries this restores
the invariant that there are no namecache-related writing users past cache_purge
in vgone.

Reported by:	pho
This commit is contained in:
Mateusz Guzik 2020-08-04 23:04:29 +00:00
parent bd66a0750f
commit 6e10434c02
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=363872
3 changed files with 52 additions and 9 deletions

View File

@ -2138,22 +2138,17 @@ cache_changesize(u_long newmaxvnodes)
/*
* Invalidate all entries from and to a particular vnode.
*/
void
cache_purge(struct vnode *vp)
static void
cache_purge_impl(struct vnode *vp)
{
TAILQ_HEAD(, namecache) ncps;
struct namecache *ncp, *nnp;
struct mtx *vlp, *vlp2;
CTR1(KTR_VFS, "cache_purge(%p)", vp);
SDT_PROBE1(vfs, namecache, purge, done, vp);
if (LIST_EMPTY(&vp->v_cache_src) && TAILQ_EMPTY(&vp->v_cache_dst) &&
vp->v_cache_dd == NULL)
return;
TAILQ_INIT(&ncps);
vlp = VP2VNODELOCK(vp);
vlp2 = NULL;
mtx_lock(vlp);
mtx_assert(vlp, MA_OWNED);
retry:
while (!LIST_EMPTY(&vp->v_cache_src)) {
ncp = LIST_FIRST(&vp->v_cache_src);
@ -2184,6 +2179,53 @@ cache_purge(struct vnode *vp)
}
}
void
cache_purge(struct vnode *vp)
{
struct mtx *vlp;
SDT_PROBE1(vfs, namecache, purge, done, vp);
if (LIST_EMPTY(&vp->v_cache_src) && TAILQ_EMPTY(&vp->v_cache_dst) &&
vp->v_cache_dd == NULL)
return;
vlp = VP2VNODELOCK(vp);
mtx_lock(vlp);
cache_purge_impl(vp);
}
/*
* Only to be used by vgone.
*/
void
cache_purge_vgone(struct vnode *vp)
{
struct mtx *vlp;
VNPASS(VN_IS_DOOMED(vp), vp);
vlp = VP2VNODELOCK(vp);
if (!(LIST_EMPTY(&vp->v_cache_src) && TAILQ_EMPTY(&vp->v_cache_dst) &&
vp->v_cache_dd == NULL)) {
mtx_lock(vlp);
cache_purge_impl(vp);
mtx_assert(vlp, MA_NOTOWNED);
return;
}
/*
* All the NULL pointer state we found above may be transient.
* Serialize against a possible thread doing cache_purge.
*/
mtx_wait_unlocked(vlp);
if (!(LIST_EMPTY(&vp->v_cache_src) && TAILQ_EMPTY(&vp->v_cache_dst) &&
vp->v_cache_dd == NULL)) {
mtx_lock(vlp);
cache_purge_impl(vp);
mtx_assert(vlp, MA_NOTOWNED);
return;
}
return;
}
/*
* Invalidate all negative entries for a particular directory vnode.
*/

View File

@ -4146,7 +4146,7 @@ vgonel(struct vnode *vp)
* Delete from old mount point vnode list.
*/
delmntque(vp);
cache_purge(vp);
cache_purge_vgone(vp);
/*
* Done with purge, reset to the standard lock and invalidate
* the vnode.

View File

@ -638,6 +638,7 @@ int cache_lookup(struct vnode *dvp, struct vnode **vpp,
struct componentname *cnp, struct timespec *tsp, int *ticksp);
void cache_vnode_init(struct vnode *vp);
void cache_purge(struct vnode *vp);
void cache_purge_vgone(struct vnode *vp);
void cache_purge_negative(struct vnode *vp);
void cache_purgevfs(struct mount *mp, bool force);
int change_dir(struct vnode *vp, struct thread *td);