cache: assorted clean ups

In particular remove spurious comments, duplicate assertions and the
inconsistently done KTR support.
This commit is contained in:
Mateusz Guzik 2020-08-27 06:31:27 +00:00
parent 12441fcbe2
commit 5f08d440b0

View File

@ -999,9 +999,6 @@ cache_zap_locked(struct namecache *ncp)
cache_assert_vnode_locked(ncp->nc_dvp);
cache_assert_bucket_locked(ncp);
CTR2(KTR_VFS, "cache_zap(%p) vp %p", ncp,
(ncp->nc_flag & NCF_NEGATIVE) ? NULL : ncp->nc_vp);
cache_ncp_invalidate(ncp);
ncpp = NCP2BUCKET(ncp);
@ -1260,148 +1257,6 @@ cache_zap_locked_bucket_kl(struct namecache *ncp, struct mtx *blp,
return (EAGAIN);
}
static int __noinline
cache_lookup_dot(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp,
struct timespec *tsp, int *ticksp)
{
int ltype;
*vpp = dvp;
CTR2(KTR_VFS, "cache_lookup(%p, %s) found via .",
dvp, cnp->cn_nameptr);
counter_u64_add(dothits, 1);
SDT_PROBE3(vfs, namecache, lookup, hit, dvp, ".", *vpp);
if (tsp != NULL)
timespecclear(tsp);
if (ticksp != NULL)
*ticksp = ticks;
vrefact(*vpp);
/*
* When we lookup "." we still can be asked to lock it
* differently...
*/
ltype = cnp->cn_lkflags & LK_TYPE_MASK;
if (ltype != VOP_ISLOCKED(*vpp)) {
if (ltype == LK_EXCLUSIVE) {
vn_lock(*vpp, LK_UPGRADE | LK_RETRY);
if (VN_IS_DOOMED((*vpp))) {
/* forced unmount */
vrele(*vpp);
*vpp = NULL;
return (ENOENT);
}
} else
vn_lock(*vpp, LK_DOWNGRADE | LK_RETRY);
}
return (-1);
}
static __noinline int
cache_remove_cnp(struct vnode *dvp, struct componentname *cnp);
static int __noinline
cache_lookup_dotdot(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp,
struct timespec *tsp, int *ticksp)
{
struct namecache_ts *ncp_ts;
struct namecache *ncp;
struct mtx *dvlp;
enum vgetstate vs;
int error, ltype;
bool whiteout;
MPASS((cnp->cn_flags & ISDOTDOT) != 0);
if ((cnp->cn_flags & MAKEENTRY) == 0) {
cache_remove_cnp(dvp, cnp);
return (0);
}
counter_u64_add(dotdothits, 1);
retry:
dvlp = VP2VNODELOCK(dvp);
mtx_lock(dvlp);
ncp = dvp->v_cache_dd;
if (ncp == NULL) {
SDT_PROBE3(vfs, namecache, lookup, miss, dvp,
"..", NULL);
mtx_unlock(dvlp);
return (0);
}
if ((ncp->nc_flag & NCF_ISDOTDOT) != 0) {
if (ncp->nc_flag & NCF_NEGATIVE)
*vpp = NULL;
else
*vpp = ncp->nc_vp;
} else
*vpp = ncp->nc_dvp;
/* Return failure if negative entry was found. */
if (*vpp == NULL)
goto negative_success;
CTR3(KTR_VFS, "cache_lookup(%p, %s) found %p via ..",
dvp, cnp->cn_nameptr, *vpp);
SDT_PROBE3(vfs, namecache, lookup, hit, dvp, "..",
*vpp);
cache_out_ts(ncp, tsp, ticksp);
if ((ncp->nc_flag & (NCF_ISDOTDOT | NCF_DTS)) ==
NCF_DTS && tsp != NULL) {
ncp_ts = __containerof(ncp, struct namecache_ts, nc_nc);
*tsp = ncp_ts->nc_dotdottime;
}
/*
* On success we return a locked and ref'd vnode as per the lookup
* protocol.
*/
MPASS(dvp != *vpp);
ltype = 0; /* silence gcc warning */
ltype = VOP_ISLOCKED(dvp);
VOP_UNLOCK(dvp);
vs = vget_prep(*vpp);
mtx_unlock(dvlp);
error = vget_finish(*vpp, cnp->cn_lkflags, vs);
vn_lock(dvp, ltype | LK_RETRY);
if (VN_IS_DOOMED(dvp)) {
if (error == 0)
vput(*vpp);
*vpp = NULL;
return (ENOENT);
}
if (error) {
*vpp = NULL;
goto retry;
}
if ((cnp->cn_flags & ISLASTCN) &&
(cnp->cn_lkflags & LK_TYPE_MASK) == LK_EXCLUSIVE) {
ASSERT_VOP_ELOCKED(*vpp, "cache_lookup");
}
return (-1);
negative_success:
if (__predict_false(cnp->cn_nameiop == CREATE)) {
if (cnp->cn_flags & ISLASTCN) {
counter_u64_add(numnegzaps, 1);
error = cache_zap_locked_vnode(ncp, dvp);
if (__predict_false(error != 0)) {
zap_and_exit_bucket_fail2++;
goto retry;
}
cache_free(ncp);
return (0);
}
}
SDT_PROBE2(vfs, namecache, lookup, hit__negative, dvp, ncp->nc_name);
cache_out_ts(ncp, tsp, ticksp);
counter_u64_add(numneghits, 1);
whiteout = (ncp->nc_flag & NCF_WHITE);
cache_negative_hit(ncp);
mtx_unlock(dvlp);
if (whiteout)
cnp->cn_flags |= ISWHITEOUT;
return (ENOENT);
}
static __noinline int
cache_remove_cnp(struct vnode *dvp, struct componentname *cnp)
{
@ -1426,10 +1281,7 @@ retry_dotdot:
return (0);
}
if ((ncp->nc_flag & NCF_ISDOTDOT) != 0) {
if (ncp->nc_dvp != dvp)
panic("dvp %p v_cache_dd %p\n", dvp, ncp);
if (!cache_zap_locked_vnode_kl2(ncp,
dvp, &dvlp2))
if (!cache_zap_locked_vnode_kl2(ncp, dvp, &dvlp2))
goto retry_dotdot;
MPASS(dvp->v_cache_dd == NULL);
mtx_unlock(dvlp);
@ -1462,7 +1314,6 @@ retry:
break;
}
/* We failed to find an entry */
if (ncp == NULL) {
mtx_unlock(blp);
goto out_no_entry;
@ -1474,15 +1325,137 @@ retry:
goto retry;
}
counter_u64_add(numposzaps, 1);
cache_free(ncp);
SDT_PROBE2(vfs, namecache, removecnp, hit, dvp, cnp);
cache_free(ncp);
return (1);
out_no_entry:
SDT_PROBE2(vfs, namecache, removecnp, miss, dvp, cnp);
counter_u64_add(nummisszap, 1);
SDT_PROBE2(vfs, namecache, removecnp, miss, dvp, cnp);
return (0);
}
static int __noinline
cache_lookup_dot(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp,
struct timespec *tsp, int *ticksp)
{
int ltype;
*vpp = dvp;
counter_u64_add(dothits, 1);
SDT_PROBE3(vfs, namecache, lookup, hit, dvp, ".", *vpp);
if (tsp != NULL)
timespecclear(tsp);
if (ticksp != NULL)
*ticksp = ticks;
vrefact(*vpp);
/*
* When we lookup "." we still can be asked to lock it
* differently...
*/
ltype = cnp->cn_lkflags & LK_TYPE_MASK;
if (ltype != VOP_ISLOCKED(*vpp)) {
if (ltype == LK_EXCLUSIVE) {
vn_lock(*vpp, LK_UPGRADE | LK_RETRY);
if (VN_IS_DOOMED((*vpp))) {
/* forced unmount */
vrele(*vpp);
*vpp = NULL;
return (ENOENT);
}
} else
vn_lock(*vpp, LK_DOWNGRADE | LK_RETRY);
}
return (-1);
}
static int __noinline
cache_lookup_dotdot(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp,
struct timespec *tsp, int *ticksp)
{
struct namecache_ts *ncp_ts;
struct namecache *ncp;
struct mtx *dvlp;
enum vgetstate vs;
int error, ltype;
bool whiteout;
MPASS((cnp->cn_flags & ISDOTDOT) != 0);
if ((cnp->cn_flags & MAKEENTRY) == 0) {
cache_remove_cnp(dvp, cnp);
return (0);
}
counter_u64_add(dotdothits, 1);
retry:
dvlp = VP2VNODELOCK(dvp);
mtx_lock(dvlp);
ncp = dvp->v_cache_dd;
if (ncp == NULL) {
SDT_PROBE3(vfs, namecache, lookup, miss, dvp, "..", NULL);
mtx_unlock(dvlp);
return (0);
}
if ((ncp->nc_flag & NCF_ISDOTDOT) != 0) {
if (ncp->nc_flag & NCF_NEGATIVE)
*vpp = NULL;
else
*vpp = ncp->nc_vp;
} else
*vpp = ncp->nc_dvp;
if (*vpp == NULL)
goto negative_success;
SDT_PROBE3(vfs, namecache, lookup, hit, dvp, "..", *vpp);
cache_out_ts(ncp, tsp, ticksp);
if ((ncp->nc_flag & (NCF_ISDOTDOT | NCF_DTS)) ==
NCF_DTS && tsp != NULL) {
ncp_ts = __containerof(ncp, struct namecache_ts, nc_nc);
*tsp = ncp_ts->nc_dotdottime;
}
MPASS(dvp != *vpp);
ltype = VOP_ISLOCKED(dvp);
VOP_UNLOCK(dvp);
vs = vget_prep(*vpp);
mtx_unlock(dvlp);
error = vget_finish(*vpp, cnp->cn_lkflags, vs);
vn_lock(dvp, ltype | LK_RETRY);
if (VN_IS_DOOMED(dvp)) {
if (error == 0)
vput(*vpp);
*vpp = NULL;
return (ENOENT);
}
if (error) {
*vpp = NULL;
goto retry;
}
return (-1);
negative_success:
if (__predict_false(cnp->cn_nameiop == CREATE)) {
if (cnp->cn_flags & ISLASTCN) {
counter_u64_add(numnegzaps, 1);
error = cache_zap_locked_vnode(ncp, dvp);
if (__predict_false(error != 0)) {
zap_and_exit_bucket_fail2++;
goto retry;
}
cache_free(ncp);
return (0);
}
}
SDT_PROBE2(vfs, namecache, lookup, hit__negative, dvp, ncp->nc_name);
cache_out_ts(ncp, tsp, ticksp);
counter_u64_add(numneghits, 1);
whiteout = (ncp->nc_flag & NCF_WHITE);
cache_negative_hit(ncp);
mtx_unlock(dvlp);
if (whiteout)
cnp->cn_flags |= ISWHITEOUT;
return (ENOENT);
}
/**
* Lookup a name in the name cache
*
@ -1504,6 +1477,8 @@ out_no_entry:
* that was current when the cache entry was created, unless cnp
* was ".".
*
* Either both tsp and ticks have to be provided or neither of them.
*
* # Returns
*
* - -1: A positive cache hit. vpp will contain the desired vnode.
@ -1543,7 +1518,6 @@ retry:
break;
}
/* We failed to find an entry */
if (__predict_false(ncp == NULL)) {
mtx_unlock(blp);
SDT_PROBE3(vfs, namecache, lookup, miss, dvp, cnp->cn_nameptr,
@ -1555,18 +1529,10 @@ retry:
if (ncp->nc_flag & NCF_NEGATIVE)
goto negative_success;
/* We found a "positive" match, return the vnode */
counter_u64_add(numposhits, 1);
*vpp = ncp->nc_vp;
CTR4(KTR_VFS, "cache_lookup(%p, %s) found %p via ncp %p",
dvp, cnp->cn_nameptr, *vpp, ncp);
SDT_PROBE3(vfs, namecache, lookup, hit, dvp, ncp->nc_name,
*vpp);
SDT_PROBE3(vfs, namecache, lookup, hit, dvp, ncp->nc_name, *vpp);
cache_out_ts(ncp, tsp, ticksp);
/*
* On success we return a locked and ref'd vnode as per the lookup
* protocol.
*/
MPASS(dvp != *vpp);
vs = vget_prep(*vpp);
mtx_unlock(blp);
@ -1575,14 +1541,8 @@ retry:
*vpp = NULL;
goto retry;
}
if ((cnp->cn_flags & ISLASTCN) &&
(cnp->cn_lkflags & LK_TYPE_MASK) == LK_EXCLUSIVE) {
ASSERT_VOP_ELOCKED(*vpp, "cache_lookup");
}
return (-1);
negative_success:
/* We found a negative match, and want to create it, so purge */
if (__predict_false(cnp->cn_nameiop == CREATE)) {
if (cnp->cn_flags & ISLASTCN) {
counter_u64_add(numnegzaps, 1);
@ -1651,7 +1611,6 @@ cache_lookup(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp,
break;
}
/* We failed to find an entry */
if (__predict_false(ncp == NULL)) {
vfs_smr_exit();
SDT_PROBE3(vfs, namecache, lookup, miss, dvp, cnp->cn_nameptr,
@ -1664,18 +1623,10 @@ cache_lookup(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp,
if (nc_flag & NCF_NEGATIVE)
goto negative_success;
/* We found a "positive" match, return the vnode */
counter_u64_add(numposhits, 1);
*vpp = ncp->nc_vp;
CTR4(KTR_VFS, "cache_lookup(%p, %s) found %p via ncp %p",
dvp, cnp->cn_nameptr, *vpp, ncp);
SDT_PROBE3(vfs, namecache, lookup, hit, dvp, ncp->nc_name,
*vpp);
SDT_PROBE3(vfs, namecache, lookup, hit, dvp, ncp->nc_name, *vpp);
cache_out_ts(ncp, tsp, ticksp);
/*
* On success we return a locked and ref'd vnode as per the lookup
* protocol.
*/
MPASS(dvp != *vpp);
if (!cache_ncp_canuse(ncp)) {
vfs_smr_exit();
@ -1693,12 +1644,7 @@ cache_lookup(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp,
*vpp = NULL;
goto out_fallback;
}
if ((cnp->cn_flags & ISLASTCN) &&
(cnp->cn_lkflags & LK_TYPE_MASK) == LK_EXCLUSIVE) {
ASSERT_VOP_ELOCKED(*vpp, "cache_lookup");
}
return (-1);
negative_success:
if (__predict_false(cnp->cn_nameiop == CREATE)) {
if (cnp->cn_flags & ISLASTCN) {
@ -1712,7 +1658,8 @@ negative_success:
counter_u64_add(numneghits, 1);
whiteout = (ncp->nc_flag & NCF_WHITE);
/*
* We need to take locks to promote an entry.
* TODO: We need to take locks to promote an entry. Code doing it
* in SMR lookup can be modified to be shared.
*/
negstate = NCP2NEGSTATE(ncp);
if ((negstate->neg_flag & NEG_HOT) == 0 ||
@ -1850,8 +1797,7 @@ cache_unlock_buckets_cel(struct celockstate *cel)
*
* This means vnodelocks for dvp, vp and the relevant bucketlock.
* However, insertion can result in removal of an old entry. In this
* case we have an additional vnode and bucketlock pair to lock. If the
* entry is negative, ncelock is locked instead of the vnode.
* case we have an additional vnode and bucketlock pair to lock.
*
* That is, in the worst case we have to lock 3 vnodes and 2 bucketlocks, while
* preserving the locking order (smaller address first).
@ -1986,7 +1932,6 @@ cache_enter_time(struct vnode *dvp, struct vnode *vp, struct componentname *cnp,
int len;
u_long lnumcache;
CTR3(KTR_VFS, "cache_enter(%p, %p, %s)", dvp, vp, cnp->cn_nameptr);
VNPASS(!VN_IS_DOOMED(dvp), dvp);
VNPASS(dvp->v_type != VNON, dvp);
if (vp != NULL) {
@ -2416,7 +2361,6 @@ cache_purge_negative(struct vnode *vp)
struct namecache *ncp, *nnp;
struct mtx *vlp;
CTR1(KTR_VFS, "cache_purge_negative(%p)", vp);
SDT_PROBE1(vfs, namecache, purge_negative, done, vp);
if (LIST_EMPTY(&vp->v_cache_src))
return;