Replace all mtx_lock()/mtx_unlock() on the iod lock with macros.

Since the NFS node mutex needs to change to an sx lock so it can be held when
vnode_pager_setsize() is called and the iod lock is held when the NFS node lock
is acquired, the iod mutex will need to be changed to an sx lock as well.
To simply the future commit that changes both the NFS node lock and iod lock
to sx locks, this commit replaces all mtx_lock()/mtx_unlock() calls on the
iod lock with macros.
There is no semantic change as a result of this commit.

I don't know when the future commit will happen and be MFC'd, so I have
set the MFC on this commit to one week so that it can be MFC'd at the same
time.

Suggested by:	kib
MFC after:	1 week
This commit is contained in:
Rick Macklem 2019-09-24 23:38:10 +00:00
parent ec3ecd0471
commit b662b41e62
5 changed files with 25 additions and 23 deletions

View File

@ -690,6 +690,8 @@ void nfsrvd_rcv(struct socket *, void *, int);
#define NFSUNLOCKNODE(n) mtx_unlock(&((n)->n_mtx))
#define NFSLOCKMNT(m) mtx_lock(&((m)->nm_mtx))
#define NFSUNLOCKMNT(m) mtx_unlock(&((m)->nm_mtx))
#define NFSLOCKIOD() mtx_lock(&ncl_iod_mutex)
#define NFSUNLOCKIOD() mtx_unlock(&ncl_iod_mutex)
#define NFSLOCKREQUEST(r) mtx_lock(&((r)->r_mtx))
#define NFSUNLOCKREQUEST(r) mtx_unlock(&((r)->r_mtx))
#define NFSLOCKSOCKREQ(r) mtx_lock(&((r)->nr_mtx))

View File

@ -1410,11 +1410,11 @@ ncl_asyncio(struct nfsmount *nmp, struct buf *bp, struct ucred *cred, struct thr
* To avoid this deadlock, don't allow the async nfsiod threads to
* perform Readdirplus RPCs.
*/
mtx_lock(&ncl_iod_mutex);
NFSLOCKIOD();
if ((bp->b_iocmd == BIO_WRITE && (bp->b_flags & B_NEEDCOMMIT) &&
(nmp->nm_bufqiods > ncl_numasync / 2)) ||
(bp->b_vp->v_type == VDIR && (nmp->nm_flag & NFSMNT_RDIRPLUS))) {
mtx_unlock(&ncl_iod_mutex);
NFSUNLOCKIOD();
return(EIO);
}
again:
@ -1481,7 +1481,7 @@ ncl_asyncio(struct nfsmount *nmp, struct buf *bp, struct ucred *cred, struct thr
if (error) {
error2 = newnfs_sigintr(nmp, td);
if (error2) {
mtx_unlock(&ncl_iod_mutex);
NFSUNLOCKIOD();
return (error2);
}
if (slpflag == PCATCH) {
@ -1522,11 +1522,11 @@ ncl_asyncio(struct nfsmount *nmp, struct buf *bp, struct ucred *cred, struct thr
VTONFS(bp->b_vp)->n_directio_asyncwr++;
NFSUNLOCKNODE(VTONFS(bp->b_vp));
}
mtx_unlock(&ncl_iod_mutex);
NFSUNLOCKIOD();
return (0);
}
mtx_unlock(&ncl_iod_mutex);
NFSUNLOCKIOD();
/*
* All the iods are busy on other mounts, so return EIO to

View File

@ -106,7 +106,7 @@ sysctl_iodmin(SYSCTL_HANDLER_ARGS)
error = sysctl_handle_int(oidp, &newmin, 0, req);
if (error || (req->newptr == NULL))
return (error);
mtx_lock(&ncl_iod_mutex);
NFSLOCKIOD();
if (newmin > ncl_iodmax) {
error = EINVAL;
goto out;
@ -121,7 +121,7 @@ sysctl_iodmin(SYSCTL_HANDLER_ARGS)
for (i = nfs_iodmin - ncl_numasync; i > 0; i--)
nfs_nfsiodnew_sync();
out:
mtx_unlock(&ncl_iod_mutex);
NFSUNLOCKIOD();
return (0);
}
SYSCTL_PROC(_vfs_nfs, OID_AUTO, iodmin, CTLTYPE_UINT | CTLFLAG_RW, 0,
@ -140,7 +140,7 @@ sysctl_iodmax(SYSCTL_HANDLER_ARGS)
return (error);
if (newmax > NFS_MAXASYNCDAEMON)
return (EINVAL);
mtx_lock(&ncl_iod_mutex);
NFSLOCKIOD();
ncl_iodmax = newmax;
if (ncl_numasync <= ncl_iodmax)
goto out;
@ -157,7 +157,7 @@ sysctl_iodmax(SYSCTL_HANDLER_ARGS)
iod--;
}
out:
mtx_unlock(&ncl_iod_mutex);
NFSUNLOCKIOD();
return (0);
}
SYSCTL_PROC(_vfs_nfs, OID_AUTO, iodmax, CTLTYPE_UINT | CTLFLAG_RW, 0,
@ -178,10 +178,10 @@ nfs_nfsiodnew_sync(void)
}
if (i == ncl_iodmax)
return (0);
mtx_unlock(&ncl_iod_mutex);
NFSUNLOCKIOD();
error = kproc_create(nfssvc_iod, nfs_asyncdaemon + i, NULL,
RFHIGHPID, 0, "newnfs %d", i);
mtx_lock(&ncl_iod_mutex);
NFSLOCKIOD();
if (error == 0) {
ncl_numasync++;
ncl_iodwant[i] = NFSIOD_AVAILABLE;
@ -194,12 +194,12 @@ void
ncl_nfsiodnew_tq(__unused void *arg, int pending)
{
mtx_lock(&ncl_iod_mutex);
NFSLOCKIOD();
while (pending > 0) {
pending--;
nfs_nfsiodnew_sync();
}
mtx_unlock(&ncl_iod_mutex);
NFSUNLOCKIOD();
}
void
@ -217,7 +217,7 @@ nfsiod_setup(void *dummy)
TUNABLE_INT_FETCH("vfs.nfs.iodmin", &nfs_iodmin);
nfscl_init();
mtx_lock(&ncl_iod_mutex);
NFSLOCKIOD();
/* Silently limit the start number of nfsiod's */
if (nfs_iodmin > NFS_MAXASYNCDAEMON)
nfs_iodmin = NFS_MAXASYNCDAEMON;
@ -227,7 +227,7 @@ nfsiod_setup(void *dummy)
if (error == -1)
panic("nfsiod_setup: nfs_nfsiodnew failed");
}
mtx_unlock(&ncl_iod_mutex);
NFSUNLOCKIOD();
}
SYSINIT(newnfsiod, SI_SUB_KTHREAD_IDLE, SI_ORDER_ANY, nfsiod_setup, NULL);
@ -248,7 +248,7 @@ nfssvc_iod(void *instance)
int myiod, timo;
int error = 0;
mtx_lock(&ncl_iod_mutex);
NFSLOCKIOD();
myiod = (int *)instance - nfs_asyncdaemon;
/*
* Main loop
@ -291,7 +291,7 @@ nfssvc_iod(void *instance)
nmp->nm_bufqwant = 0;
wakeup(&nmp->nm_bufq);
}
mtx_unlock(&ncl_iod_mutex);
NFSUNLOCKIOD();
if (bp->b_flags & B_DIRECT) {
KASSERT((bp->b_iocmd == BIO_WRITE), ("nfscvs_iod: BIO_WRITE not set"));
(void)ncl_doio_directwrite(bp);
@ -303,7 +303,7 @@ nfssvc_iod(void *instance)
(void) ncl_doio(bp->b_vp, bp, bp->b_wcred,
NULL, 0);
}
mtx_lock(&ncl_iod_mutex);
NFSLOCKIOD();
/*
* Make sure the nmp hasn't been dismounted as soon as
* ncl_doio() completes for the last buffer.
@ -335,7 +335,7 @@ nfssvc_iod(void *instance)
/* Someone may be waiting for the last nfsiod to terminate. */
if (--ncl_numasync == 0)
wakeup(&ncl_numasync);
mtx_unlock(&ncl_iod_mutex);
NFSUNLOCKIOD();
if ((error == 0) || (error == EWOULDBLOCK))
kproc_exit(0);
/* Abnormal termination */

View File

@ -102,7 +102,7 @@ ncl_uninit(struct vfsconf *vfsp)
* Tell all nfsiod processes to exit. Clear ncl_iodmax, and wakeup
* any sleeping nfsiods so they check ncl_iodmax and exit.
*/
mtx_lock(&ncl_iod_mutex);
NFSLOCKIOD();
ncl_iodmax = 0;
for (i = 0; i < ncl_numasync; i++)
if (ncl_iodwant[i] == NFSIOD_AVAILABLE)
@ -110,7 +110,7 @@ ncl_uninit(struct vfsconf *vfsp)
/* The last nfsiod to exit will wake us up when ncl_numasync hits 0 */
while (ncl_numasync)
msleep(&ncl_numasync, &ncl_iod_mutex, PWAIT, "ioddie", 0);
mtx_unlock(&ncl_iod_mutex);
NFSUNLOCKIOD();
ncl_nhuninit();
return (0);
#else

View File

@ -1713,13 +1713,13 @@ nfs_unmount(struct mount *mp, int mntflags)
mtx_unlock(&nmp->nm_mtx);
}
/* Make sure no nfsiods are assigned to this mount. */
mtx_lock(&ncl_iod_mutex);
NFSLOCKIOD();
for (i = 0; i < NFS_MAXASYNCDAEMON; i++)
if (ncl_iodmount[i] == nmp) {
ncl_iodwant[i] = NFSIOD_AVAILABLE;
ncl_iodmount[i] = NULL;
}
mtx_unlock(&ncl_iod_mutex);
NFSUNLOCKIOD();
/*
* We can now set mnt_data to NULL and wait for