Implement vn_lock_pair().

In collaboration with:	pho
Reviewed by:	mckusick (previous version), markj (previous version)
Tested by:	markj (syzkaller), pho
Sponsored by:	The FreeBSD Foundation
Differential revision:	https://reviews.freebsd.org/D26136
This commit is contained in:
Konstantin Belousov 2020-11-13 09:31:57 +00:00
parent 5dc463f9a5
commit 7cde2ec4fd
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=367631
2 changed files with 98 additions and 0 deletions

View File

@ -70,6 +70,7 @@ __FBSDID("$FreeBSD$");
#include <sys/filio.h>
#include <sys/resourcevar.h>
#include <sys/rwlock.h>
#include <sys/prng.h>
#include <sys/sx.h>
#include <sys/sleepqueue.h>
#include <sys/sysctl.h>
@ -275,6 +276,10 @@ vn_open_cred(struct nameidata *ndp, int *flagp, int cmode, u_int vn_open_flags,
vn_finished_write(mp);
if (error) {
NDFREE(ndp, NDF_ONLY_PNBUF);
if (error == ERELOOKUP) {
NDREINIT(ndp);
goto restart;
}
return (error);
}
fmode &= ~O_TRUNC;
@ -1524,6 +1529,7 @@ vn_truncate(struct file *fp, off_t length, struct ucred *active_cred,
vp = fp->f_vnode;
retry:
/*
* Lock the whole range for truncation. Otherwise split i/o
* might happen partly before and partly after the truncation.
@ -1550,6 +1556,8 @@ vn_truncate(struct file *fp, off_t length, struct ucred *active_cred,
vn_finished_write(mp);
out1:
vn_rangelock_unlock(vp, rl_cookie);
if (error == ERELOOKUP)
goto retry;
return (error);
}
@ -3318,3 +3326,91 @@ vn_fallocate(struct file *fp, off_t offset, off_t len, struct thread *td)
return (error);
}
static u_long vn_lock_pair_pause_cnt;
SYSCTL_ULONG(_debug, OID_AUTO, vn_lock_pair_pause, CTLFLAG_RD,
&vn_lock_pair_pause_cnt, 0,
"Count of vn_lock_pair deadlocks");
static void
vn_lock_pair_pause(const char *wmesg)
{
atomic_add_long(&vn_lock_pair_pause_cnt, 1);
pause(wmesg, prng32_bounded(hz / 10));
}
/*
* Lock pair of vnodes vp1, vp2, avoiding lock order reversal.
* vp1_locked indicates whether vp1 is exclusively locked; if not, vp1
* must be unlocked. Same for vp2 and vp2_locked. One of the vnodes
* can be NULL.
*
* The function returns with both vnodes exclusively locked, and
* guarantees that it does not create lock order reversal with other
* threads during its execution. Both vnodes could be unlocked
* temporary (and reclaimed).
*/
void
vn_lock_pair(struct vnode *vp1, bool vp1_locked, struct vnode *vp2,
bool vp2_locked)
{
int error;
if (vp1 == NULL && vp2 == NULL)
return;
if (vp1 != NULL) {
if (vp1_locked)
ASSERT_VOP_ELOCKED(vp1, "vp1");
else
ASSERT_VOP_UNLOCKED(vp1, "vp1");
} else {
vp1_locked = true;
}
if (vp2 != NULL) {
if (vp2_locked)
ASSERT_VOP_ELOCKED(vp2, "vp2");
else
ASSERT_VOP_UNLOCKED(vp2, "vp2");
} else {
vp2_locked = true;
}
if (!vp1_locked && !vp2_locked) {
vn_lock(vp1, LK_EXCLUSIVE | LK_RETRY);
vp1_locked = true;
}
for (;;) {
if (vp1_locked && vp2_locked)
break;
if (vp1_locked && vp2 != NULL) {
if (vp1 != NULL) {
error = VOP_LOCK1(vp2, LK_EXCLUSIVE | LK_NOWAIT,
__FILE__, __LINE__);
if (error == 0)
break;
VOP_UNLOCK(vp1);
vp1_locked = false;
vn_lock_pair_pause("vlp1");
}
vn_lock(vp2, LK_EXCLUSIVE | LK_RETRY);
vp2_locked = true;
}
if (vp2_locked && vp1 != NULL) {
if (vp2 != NULL) {
error = VOP_LOCK1(vp1, LK_EXCLUSIVE | LK_NOWAIT,
__FILE__, __LINE__);
if (error == 0)
break;
VOP_UNLOCK(vp2);
vp2_locked = false;
vn_lock_pair_pause("vlp2");
}
vn_lock(vp1, LK_EXCLUSIVE | LK_RETRY);
vp1_locked = true;
}
}
if (vp1 != NULL)
ASSERT_VOP_ELOCKED(vp1, "vp1 ret");
if (vp2 != NULL)
ASSERT_VOP_ELOCKED(vp2, "vp2 ret");
}

View File

@ -729,6 +729,8 @@ bool vn_isdisk_error(struct vnode *vp, int *errp);
bool vn_isdisk(struct vnode *vp);
int _vn_lock(struct vnode *vp, int flags, const char *file, int line);
#define vn_lock(vp, flags) _vn_lock(vp, flags, __FILE__, __LINE__)
void vn_lock_pair(struct vnode *vp1, bool vp1_locked, struct vnode *vp2,
bool vp2_locked);
int vn_open(struct nameidata *ndp, int *flagp, int cmode, struct file *fp);
int vn_open_cred(struct nameidata *ndp, int *flagp, int cmode,
u_int vn_open_flags, struct ucred *cred, struct file *fp);