vfs: add vfs_op_thread_enter/exit _crit variants

and employ them in the namecache. Eliminates all spurious checks for preemption.
This commit is contained in:
Mateusz Guzik 2020-08-04 19:54:10 +00:00
parent 0311b05fec
commit 17a66c7087
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=363856
2 changed files with 27 additions and 14 deletions

View File

@ -3484,29 +3484,29 @@ cache_fplookup_climb_mount(struct cache_fpl *fpl)
prev_mp = NULL;
for (;;) {
if (!vfs_op_thread_enter(mp)) {
if (!vfs_op_thread_enter_crit(mp)) {
if (prev_mp != NULL)
vfs_op_thread_exit(prev_mp);
vfs_op_thread_exit_crit(prev_mp);
return (cache_fpl_partial(fpl));
}
if (prev_mp != NULL)
vfs_op_thread_exit(prev_mp);
vfs_op_thread_exit_crit(prev_mp);
if (!vn_seqc_consistent(vp, vp_seqc)) {
vfs_op_thread_exit(mp);
vfs_op_thread_exit_crit(mp);
return (cache_fpl_partial(fpl));
}
if (!cache_fplookup_mp_supported(mp)) {
vfs_op_thread_exit(mp);
vfs_op_thread_exit_crit(mp);
return (cache_fpl_partial(fpl));
}
vp = atomic_load_ptr(&mp->mnt_rootvnode);
if (vp == NULL || VN_IS_DOOMED(vp)) {
vfs_op_thread_exit(mp);
vfs_op_thread_exit_crit(mp);
return (cache_fpl_partial(fpl));
}
vp_seqc = vn_seqc_read_any(vp);
if (seqc_in_modify(vp_seqc)) {
vfs_op_thread_exit(mp);
vfs_op_thread_exit_crit(mp);
return (cache_fpl_partial(fpl));
}
prev_mp = mp;
@ -3515,7 +3515,7 @@ cache_fplookup_climb_mount(struct cache_fpl *fpl)
break;
}
vfs_op_thread_exit(prev_mp);
vfs_op_thread_exit_crit(prev_mp);
fpl->tvp = vp;
fpl->tvp_seqc = vp_seqc;
return (0);

View File

@ -1023,23 +1023,36 @@ int vfs_mount_fetch_counter(struct mount *, enum mount_counter);
*zpcpu_get(mp->mnt_thread_in_ops_pcpu) == 1; \
})
#define vfs_op_thread_enter(mp) ({ \
bool _retval = true; \
critical_enter(); \
#define vfs_op_thread_enter_crit(mp) ({ \
bool _retval_crit = true; \
MPASS(curthread->td_critnest > 0); \
MPASS(!vfs_op_thread_entered(mp)); \
zpcpu_set_protected(mp->mnt_thread_in_ops_pcpu, 1); \
__compiler_membar(); \
if (__predict_false(mp->mnt_vfs_ops > 0)) { \
vfs_op_thread_exit(mp); \
_retval = false; \
vfs_op_thread_exit_crit(mp); \
_retval_crit = false; \
} \
_retval_crit; \
})
#define vfs_op_thread_enter(mp) ({ \
bool _retval; \
critical_enter(); \
_retval = vfs_op_thread_enter_crit(mp); \
if (__predict_false(!_retval)) \
critical_exit(); \
_retval; \
})
#define vfs_op_thread_exit(mp) do { \
#define vfs_op_thread_exit_crit(mp) do { \
MPASS(vfs_op_thread_entered(mp)); \
__compiler_membar(); \
zpcpu_set_protected(mp->mnt_thread_in_ops_pcpu, 0); \
} while (0)
#define vfs_op_thread_exit(mp) do { \
vfs_op_thread_exit_crit(mp); \
critical_exit(); \
} while (0)