Fix a BUF_TIMELOCK race against BUF_LOCK and fix a deadlock in vget()
against VM_WAIT in the pageout code. Both fixes involve adjusting the lockmgr's timeout capability so locks obtained with timeouts do not interfere with locks obtained without a timeout. Hopefully MFC: before the 4.5 release
This commit is contained in:
parent
e45e83304a
commit
23b590188f
@ -511,7 +511,7 @@ hpfs_vget(
|
||||
|
||||
|
||||
mtx_init(&hp->h_interlock, "hpfsnode interlock", MTX_DEF);
|
||||
lockinit(&hp->h_lock, PINOD, "hpnode", 0, 0);
|
||||
lockinit(&hp->h_lock, PINOD, "hpnode", VLKTIMEOUT, 0);
|
||||
|
||||
hp->h_flag = H_INVAL;
|
||||
hp->h_vp = vp;
|
||||
|
@ -261,7 +261,7 @@ deget(pmp, dirclust, diroffset, depp)
|
||||
return error;
|
||||
}
|
||||
bzero((caddr_t)ldep, sizeof *ldep);
|
||||
lockinit(&nvp->v_lock, PINOD, "denode", 0, 0);
|
||||
lockinit(&nvp->v_lock, PINOD, "denode", VLKTIMEOUT, 0);
|
||||
nvp->v_vnlock = &nvp->v_lock;
|
||||
nvp->v_data = ldep;
|
||||
ldep->de_vnode = nvp;
|
||||
|
@ -741,7 +741,7 @@ ntfs_vgetex(
|
||||
}
|
||||
dprintf(("ntfs_vget: vnode: %p for ntnode: %d\n", vp,ino));
|
||||
|
||||
lockinit(&fp->f_lock, PINOD, "fnode", 0, 0);
|
||||
lockinit(&fp->f_lock, PINOD, "fnode", VLKTIMEOUT, 0);
|
||||
fp->f_vp = vp;
|
||||
vp->v_data = fp;
|
||||
vp->v_type = f_type;
|
||||
|
@ -185,7 +185,7 @@ rescan:
|
||||
if (dvp) {
|
||||
np->n_parent = VTONW(dvp)->n_fid;
|
||||
}
|
||||
lockinit(&vp->v_lock, PINOD, "nwnode", 0, LK_CANRECURSE);
|
||||
lockinit(&vp->v_lock, PINOD, "nwnode", VLKTIMEOUT, LK_CANRECURSE);
|
||||
lockmgr(&nwhashlock, LK_EXCLUSIVE, NULL, td);
|
||||
/*
|
||||
* Another process can create vnode while we blocked in malloc() or
|
||||
|
@ -243,7 +243,7 @@ loop:
|
||||
} else if (vp->v_type == VREG)
|
||||
SMBERROR("new vnode '%s' born without parent ?\n", np->n_name);
|
||||
|
||||
lockinit(&vp->v_lock, PINOD, "smbnode", 0, LK_CANRECURSE);
|
||||
lockinit(&vp->v_lock, PINOD, "smbnode", VLKTIMEOUT, LK_CANRECURSE);
|
||||
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
|
||||
|
||||
smbfs_hash_lock(smp, td);
|
||||
|
@ -572,7 +572,7 @@ loop:
|
||||
un = VTOUNION(*vpp);
|
||||
bzero(un, sizeof(*un));
|
||||
|
||||
lockinit(&un->un_lock, PVFS, "unlock", 0, 0);
|
||||
lockinit(&un->un_lock, PVFS, "unlock", VLKTIMEOUT, 0);
|
||||
vn_lock(*vpp, LK_EXCLUSIVE | LK_RETRY, td);
|
||||
|
||||
un->un_vnode = *vpp;
|
||||
|
@ -166,7 +166,8 @@ acquire(struct lock *lkp, int extflags, int wanted) {
|
||||
lkp->lk_flags |= LK_WAIT_NONZERO;
|
||||
lkp->lk_waitcount++;
|
||||
error = msleep(lkp, lkp->lk_interlock, lkp->lk_prio,
|
||||
lkp->lk_wmesg, lkp->lk_timo);
|
||||
lkp->lk_wmesg,
|
||||
((extflags & LK_TIMELOCK) ? lkp->lk_timo : 0));
|
||||
if (lkp->lk_waitcount == 1) {
|
||||
lkp->lk_flags &= ~LK_WAIT_NONZERO;
|
||||
lkp->lk_waitcount = 0;
|
||||
@ -469,7 +470,8 @@ acquiredrain(struct lock *lkp, int extflags) {
|
||||
while (lkp->lk_flags & LK_ALL) {
|
||||
lkp->lk_flags |= LK_WAITDRAIN;
|
||||
error = msleep(&lkp->lk_flags, lkp->lk_interlock, lkp->lk_prio,
|
||||
lkp->lk_wmesg, lkp->lk_timo);
|
||||
lkp->lk_wmesg,
|
||||
((extflags & LK_TIMELOCK) ? lkp->lk_timo : 0));
|
||||
if (error)
|
||||
return error;
|
||||
if (extflags & LK_SLEEPFAIL) {
|
||||
|
@ -793,7 +793,7 @@ getnewvnode(tag, mp, vops, vpp)
|
||||
vp->v_type = VNON;
|
||||
vp->v_tag = tag;
|
||||
vp->v_op = vops;
|
||||
lockinit(&vp->v_lock, PVFS, "vnlock", 0, LK_NOPAUSE);
|
||||
lockinit(&vp->v_lock, PVFS, "vnlock", VLKTIMEOUT, LK_NOPAUSE);
|
||||
insmntque(vp, mp);
|
||||
*vpp = vp;
|
||||
vp->v_usecount = 1;
|
||||
|
@ -279,7 +279,6 @@ BUF_LOCK(struct buf *bp, int locktype)
|
||||
locktype |= LK_INTERLOCK;
|
||||
bp->b_lock.lk_wmesg = buf_wmesg;
|
||||
bp->b_lock.lk_prio = PRIBIO + 4;
|
||||
bp->b_lock.lk_timo = 0;
|
||||
ret = lockmgr(&(bp)->b_lock, locktype, &buftimelock, curthread);
|
||||
splx(s);
|
||||
return ret;
|
||||
@ -295,7 +294,7 @@ BUF_TIMELOCK(struct buf *bp, int locktype, char *wmesg, int catch, int timo)
|
||||
|
||||
s = splbio();
|
||||
mtx_lock(&buftimelock);
|
||||
locktype |= LK_INTERLOCK;
|
||||
locktype |= LK_INTERLOCK | LK_TIMELOCK;
|
||||
bp->b_lock.lk_wmesg = wmesg;
|
||||
bp->b_lock.lk_prio = (PRIBIO + 4) | catch;
|
||||
bp->b_lock.lk_timo = timo;
|
||||
|
@ -115,12 +115,13 @@ struct lock {
|
||||
* or passed in as arguments to the lock manager. The LK_REENABLE flag may be
|
||||
* set only at the release of a lock obtained by drain.
|
||||
*/
|
||||
#define LK_EXTFLG_MASK 0x01000070 /* mask of external flags */
|
||||
#define LK_EXTFLG_MASK 0x03000070 /* mask of external flags */
|
||||
#define LK_NOWAIT 0x00000010 /* do not sleep to await lock */
|
||||
#define LK_SLEEPFAIL 0x00000020 /* sleep, then return failure */
|
||||
#define LK_CANRECURSE 0x00000040 /* allow recursive exclusive lock */
|
||||
#define LK_REENABLE 0x00000080 /* lock is be reenabled after drain */
|
||||
#define LK_NOPAUSE 0x01000000 /* no spinloop */
|
||||
#define LK_TIMELOCK 0x02000000 /* use lk_timo, else no timeout */
|
||||
/*
|
||||
* Internal lock flags.
|
||||
*
|
||||
|
@ -240,6 +240,11 @@ struct vattr {
|
||||
*/
|
||||
#define VNOVAL (-1)
|
||||
|
||||
/*
|
||||
* LK_TIMELOCK timeout for vnode locks (used mainly by the pageout daemon)
|
||||
*/
|
||||
#define VLKTIMEOUT (hz / 20 + 1)
|
||||
|
||||
#ifdef _KERNEL
|
||||
|
||||
#ifdef MALLOC_DECLARE
|
||||
|
@ -1186,7 +1186,7 @@ restart:
|
||||
* FFS supports lock sharing in the stack of vnodes
|
||||
*/
|
||||
vp->v_vnlock = &vp->v_lock;
|
||||
lockinit(vp->v_vnlock, PINOD, "inode", 0, LK_CANRECURSE);
|
||||
lockinit(vp->v_vnlock, PINOD, "inode", VLKTIMEOUT, LK_CANRECURSE);
|
||||
vp->v_data = ip;
|
||||
ip->i_vnode = vp;
|
||||
ip->i_fs = fs = ump->um_fs;
|
||||
|
@ -221,7 +221,7 @@ restart:
|
||||
* IFS supports lock sharing in the stack of vnodes
|
||||
*/
|
||||
vp->v_vnlock = &vp->v_lock;
|
||||
lockinit(vp->v_vnlock, PINOD, "inode", 0, LK_CANRECURSE);
|
||||
lockinit(vp->v_vnlock, PINOD, "inode", VLKTIMEOUT, LK_CANRECURSE);
|
||||
vp->v_data = ip;
|
||||
ip->i_vnode = vp;
|
||||
ip->i_fs = fs = ump->um_fs;
|
||||
|
@ -193,6 +193,10 @@ SYSCTL_INT(_vm, OID_AUTO, defer_swapspace_pageouts,
|
||||
SYSCTL_INT(_vm, OID_AUTO, disable_swapspace_pageouts,
|
||||
CTLFLAG_RW, &disable_swap_pageouts, 0, "Disallow swapout of dirty pages");
|
||||
|
||||
static int pageout_lock_miss;
|
||||
SYSCTL_INT(_vm, OID_AUTO, pageout_lock_miss,
|
||||
CTLFLAG_RD, &pageout_lock_miss, 0, "vget() lock misses during pageout");
|
||||
|
||||
#define VM_PAGEOUT_PAGE_COUNT 16
|
||||
int vm_pageout_page_count = VM_PAGEOUT_PAGE_COUNT;
|
||||
|
||||
@ -860,17 +864,20 @@ rescan0:
|
||||
* way too large a weighting to defering the freeing
|
||||
* of dirty pages.
|
||||
*
|
||||
* XXX we need to be able to apply a timeout to the
|
||||
* vget() lock attempt.
|
||||
* We can't wait forever for the vnode lock, we might
|
||||
* deadlock due to a vn_read() getting stuck in
|
||||
* vm_wait while holding this vnode. We skip the
|
||||
* vnode if we can't get it in a reasonable amount
|
||||
* of time.
|
||||
*/
|
||||
|
||||
if (object->type == OBJT_VNODE) {
|
||||
vp = object->handle;
|
||||
|
||||
mp = NULL;
|
||||
if (vp->v_type == VREG)
|
||||
vn_start_write(vp, &mp, V_NOWAIT);
|
||||
if (vget(vp, LK_EXCLUSIVE|LK_NOOBJ, curthread)) {
|
||||
if (vget(vp, LK_EXCLUSIVE|LK_NOOBJ|LK_TIMELOCK, curthread)) {
|
||||
++pageout_lock_miss;
|
||||
vn_finished_write(mp);
|
||||
if (object->flags & OBJ_MIGHTBEDIRTY)
|
||||
vnodes_skipped++;
|
||||
|
Loading…
x
Reference in New Issue
Block a user