ZFS vn_rele_async: catch up with the use of refcount(9) for the vnode use count
It's not sufficient nor required to use the vnode interlock when checking if we are going to drop the last use count as the code in vputx() uses refcount (atomic) operations for both checking and decrementing the use code. Apply the same method to vn_rele_async(). While here, remove vn_rele_inactive(), a wrapper around vrele() that didn't add any value. Also, the change required making vfs_refcount_release_if_not_last() public. I've made vfs_refcount_acquire_if_not_zero() public as well. They are in sys/refcount.h now. While making the move I've dropped the vfs_ prefix. Reviewed by: mjg MFC after: 2 weeks Sponsored by: Panzura Differential Revision: https://reviews.freebsd.org/D14869
This commit is contained in:
parent
6472fa9a9d
commit
f4043145f2
Notes:
svn2git
2020-12-20 02:59:44 +00:00
svn path=/head/; revision=331666
@ -72,12 +72,6 @@ xva_getxoptattr(xvattr_t *xvap)
|
||||
return (xoap);
|
||||
}
|
||||
|
||||
static void
|
||||
vn_rele_inactive(vnode_t *vp)
|
||||
{
|
||||
vrele(vp);
|
||||
}
|
||||
|
||||
/*
|
||||
* Like vn_rele() except if we are going to call VOP_INACTIVE() then do it
|
||||
* asynchronously using a taskq. This can avoid deadlocks caused by re-entering
|
||||
@ -92,13 +86,10 @@ void
|
||||
vn_rele_async(vnode_t *vp, taskq_t *taskq)
|
||||
{
|
||||
VERIFY(vp->v_count > 0);
|
||||
VI_LOCK(vp);
|
||||
if (vp->v_count == 1 && !(vp->v_iflag & VI_DOINGINACT)) {
|
||||
VI_UNLOCK(vp);
|
||||
VERIFY(taskq_dispatch((taskq_t *)taskq,
|
||||
(task_func_t *)vn_rele_inactive, vp, TQ_SLEEP) != 0);
|
||||
if (refcount_release_if_not_last(&vp->v_usecount)) {
|
||||
vdrop(vp);
|
||||
return;
|
||||
}
|
||||
refcount_release(&vp->v_usecount);
|
||||
vdropl(vp);
|
||||
VERIFY(taskq_dispatch((taskq_t *)taskq,
|
||||
(task_func_t *)vrele, vp, TQ_SLEEP) != 0);
|
||||
}
|
||||
|
@ -2455,37 +2455,6 @@ reassignbuf(struct buf *bp)
|
||||
BO_UNLOCK(bo);
|
||||
}
|
||||
|
||||
/*
|
||||
* A temporary hack until refcount_* APIs are sorted out.
|
||||
*/
|
||||
static __inline int
|
||||
vfs_refcount_acquire_if_not_zero(volatile u_int *count)
|
||||
{
|
||||
u_int old;
|
||||
|
||||
old = *count;
|
||||
for (;;) {
|
||||
if (old == 0)
|
||||
return (0);
|
||||
if (atomic_fcmpset_int(count, &old, old + 1))
|
||||
return (1);
|
||||
}
|
||||
}
|
||||
|
||||
static __inline int
|
||||
vfs_refcount_release_if_not_last(volatile u_int *count)
|
||||
{
|
||||
u_int old;
|
||||
|
||||
old = *count;
|
||||
for (;;) {
|
||||
if (old == 1)
|
||||
return (0);
|
||||
if (atomic_fcmpset_int(count, &old, old - 1))
|
||||
return (1);
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
v_init_counters(struct vnode *vp)
|
||||
{
|
||||
@ -2524,7 +2493,7 @@ v_incr_usecount(struct vnode *vp)
|
||||
CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
|
||||
|
||||
if (vp->v_type != VCHR &&
|
||||
vfs_refcount_acquire_if_not_zero(&vp->v_usecount)) {
|
||||
refcount_acquire_if_not_zero(&vp->v_usecount)) {
|
||||
VNASSERT((vp->v_iflag & VI_OWEINACT) == 0, vp,
|
||||
("vnode with usecount and VI_OWEINACT set"));
|
||||
} else {
|
||||
@ -2616,7 +2585,7 @@ vget(struct vnode *vp, int flags, struct thread *td)
|
||||
* Upgrade our holdcnt to a usecount.
|
||||
*/
|
||||
if (vp->v_type == VCHR ||
|
||||
!vfs_refcount_acquire_if_not_zero(&vp->v_usecount)) {
|
||||
!refcount_acquire_if_not_zero(&vp->v_usecount)) {
|
||||
VI_LOCK(vp);
|
||||
if ((vp->v_iflag & VI_OWEINACT) == 0) {
|
||||
oweinact = 0;
|
||||
@ -2720,7 +2689,7 @@ vputx(struct vnode *vp, int func)
|
||||
CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
|
||||
|
||||
if (vp->v_type != VCHR &&
|
||||
vfs_refcount_release_if_not_last(&vp->v_usecount)) {
|
||||
refcount_release_if_not_last(&vp->v_usecount)) {
|
||||
if (func == VPUTX_VPUT)
|
||||
VOP_UNLOCK(vp, 0);
|
||||
vdrop(vp);
|
||||
@ -2836,7 +2805,7 @@ _vhold(struct vnode *vp, bool locked)
|
||||
ASSERT_VI_UNLOCKED(vp, __func__);
|
||||
CTR2(KTR_VFS, "%s: vp %p", __func__, vp);
|
||||
if (!locked) {
|
||||
if (vfs_refcount_acquire_if_not_zero(&vp->v_holdcnt)) {
|
||||
if (refcount_acquire_if_not_zero(&vp->v_holdcnt)) {
|
||||
VNASSERT((vp->v_iflag & VI_FREE) == 0, vp,
|
||||
("_vhold: vnode with holdcnt is free"));
|
||||
return;
|
||||
@ -2907,7 +2876,7 @@ _vdrop(struct vnode *vp, bool locked)
|
||||
if ((int)vp->v_holdcnt <= 0)
|
||||
panic("vdrop: holdcnt %d", vp->v_holdcnt);
|
||||
if (!locked) {
|
||||
if (vfs_refcount_release_if_not_last(&vp->v_holdcnt))
|
||||
if (refcount_release_if_not_last(&vp->v_holdcnt))
|
||||
return;
|
||||
VI_LOCK(vp);
|
||||
}
|
||||
@ -5438,12 +5407,12 @@ mnt_vnode_next_active_relock(struct vnode *mvp, struct mount *mp,
|
||||
* acquired with vhold(), but that might try to acquire the vnode
|
||||
* interlock, which would be a LOR with the mount vnode list lock.
|
||||
*/
|
||||
held = vfs_refcount_acquire_if_not_zero(&vp->v_holdcnt);
|
||||
held = refcount_acquire_if_not_zero(&vp->v_holdcnt);
|
||||
mtx_unlock(&mp->mnt_listmtx);
|
||||
if (!held)
|
||||
goto abort;
|
||||
VI_LOCK(vp);
|
||||
if (!vfs_refcount_release_if_not_last(&vp->v_holdcnt)) {
|
||||
if (!refcount_release_if_not_last(&vp->v_holdcnt)) {
|
||||
vdropl(vp);
|
||||
goto abort;
|
||||
}
|
||||
|
@ -76,4 +76,35 @@ refcount_release(volatile u_int *count)
|
||||
return (1);
|
||||
}
|
||||
|
||||
/*
|
||||
* A temporary hack until refcount_* APIs are sorted out.
|
||||
*/
|
||||
static __inline int
|
||||
refcount_acquire_if_not_zero(volatile u_int *count)
|
||||
{
|
||||
u_int old;
|
||||
|
||||
old = *count;
|
||||
for (;;) {
|
||||
if (old == 0)
|
||||
return (0);
|
||||
if (atomic_fcmpset_int(count, &old, old + 1))
|
||||
return (1);
|
||||
}
|
||||
}
|
||||
|
||||
static __inline int
|
||||
refcount_release_if_not_last(volatile u_int *count)
|
||||
{
|
||||
u_int old;
|
||||
|
||||
old = *count;
|
||||
for (;;) {
|
||||
if (old == 1)
|
||||
return (0);
|
||||
if (atomic_fcmpset_int(count, &old, old - 1))
|
||||
return (1);
|
||||
}
|
||||
}
|
||||
|
||||
#endif /* ! __SYS_REFCOUNT_H__ */
|
||||
|
Loading…
Reference in New Issue
Block a user