1) Fix up locking in nfs_up() and nfs_down.

2) Reduce the acquisitions of the Giant lock in the nfs_socket.c paths significantly.
- We don't need to acquire Giant before tsleeping on lbolt anymore,
  since jhb specialcased lbolt handling in msleep.
- nfs_up() needs to acquire Giant only if printing the "server up"
  message.
- nfs_timer() held Giant for the duration of the NFS timer processing,
  just because the printing of the message in nfs_down() needed it
  (and we acquire other locks in nfs_timer()). The acquisition of
  Giant is moved down into nfs_down() now, reducing the time Giant is
  held in that path.

Reported by: Kris Kennaway
This commit is contained in:
Mohan Srinivasan 2006-11-20 04:14:23 +00:00
parent 276096bb3e
commit a18c4dc336
2 changed files with 40 additions and 31 deletions

View File

@ -193,7 +193,7 @@ extern TAILQ_HEAD(nfs_reqq, nfsreq) nfs_reqq;
#define R_TPRINTFMSG 0x20 /* Did a tprintf msg. */
#define R_MUSTRESEND 0x40 /* Must resend request */
#define R_GETONEREP 0x80 /* Probe for one reply only */
#define R_REXMIT_INPROG 0x100 /* Re-transmit in progress */
#define R_PIN_REQ 0x100 /* Pin request down (rexmit in prog or other) */
struct buf;
struct socket;

View File

@ -488,9 +488,7 @@ nfs_reconnect(struct nfsreq *rep)
error = EINTR;
if (error == EIO || error == EINTR)
return (error);
mtx_lock(&Giant);
(void) tsleep(&lbolt, PSOCK, "nfscon", 0);
mtx_unlock(&Giant);
}
/*
@ -1209,7 +1207,7 @@ nfs_request(struct vnode *vp, struct mbuf *mrest, int procnum,
* comes back, it will be discarded (since the req struct for it no longer
* exists).
*/
while (rep->r_flags & R_REXMIT_INPROG) {
while (rep->r_flags & R_PIN_REQ) {
msleep((caddr_t)&rep->r_flags, &nfs_reqq_mtx,
(PZERO - 1), "nfsrxmt", 0);
}
@ -1232,9 +1230,7 @@ nfs_request(struct vnode *vp, struct mbuf *mrest, int procnum,
* tprintf a response.
*/
if (!error) {
mtx_lock(&Giant);
nfs_up(rep, nmp, rep->r_td, "is alive again", NFSSTA_TIMEO);
mtx_unlock(&Giant);
}
mrep = rep->r_mrep;
md = rep->r_md;
@ -1292,9 +1288,7 @@ nfs_request(struct vnode *vp, struct mbuf *mrest, int procnum,
error = 0;
waituntil = time_second + nfs3_jukebox_delay;
while (time_second < waituntil) {
mtx_lock(&Giant);
(void) tsleep(&lbolt, PSOCK, "nqnfstry", 0);
mtx_unlock(&Giant);
}
mtx_lock(&nfs_reqq_mtx);
if (++nfs_xid == 0)
@ -1350,7 +1344,7 @@ nfs_request(struct vnode *vp, struct mbuf *mrest, int procnum,
* lock ordering violation. The NFS client socket callback acquires
* inp_lock->nfsreq mutex and pru_send acquires inp_lock. So we drop the
* reqq mutex (and reacquire it after the pru_send()). The req structure
* (for the rexmit) is prevented from being removed by the R_REXMIT_INPROG flag.
* (for the rexmit) is prevented from being removed by the R_PIN_REQ flag.
*/
void
nfs_timer(void *arg)
@ -1364,7 +1358,6 @@ nfs_timer(void *arg)
struct timeval now;
getmicrouptime(&now);
mtx_lock(&Giant); /* nfs_down -> tprintf */
mtx_lock(&nfs_reqq_mtx);
TAILQ_FOREACH(rep, &nfs_reqq, r_chain) {
nmp = rep->r_nmp;
@ -1393,13 +1386,18 @@ nfs_timer(void *arg)
if (nmp->nm_tprintf_initial_delay != 0 &&
(rep->r_rexmit > 2 || (rep->r_flags & R_RESENDERR)) &&
rep->r_lastmsg + nmp->nm_tprintf_delay < now.tv_sec) {
rep->r_lastmsg = now.tv_sec;
rep->r_flags |= R_PIN_REQ;
mtx_unlock(&rep->r_mtx);
mtx_unlock(&nmp->nm_mtx);
rep->r_lastmsg = now.tv_sec;
mtx_unlock(&nfs_reqq_mtx);
nfs_down(rep, nmp, rep->r_td, "not responding",
0, NFSSTA_TIMEO);
mtx_lock(&nfs_reqq_mtx);
mtx_lock(&nmp->nm_mtx);
mtx_lock(&rep->r_mtx);
rep->r_flags &= ~R_PIN_REQ;
wakeup((caddr_t)&rep->r_flags);
}
if (rep->r_rtt >= 0) {
rep->r_rtt++;
@ -1463,7 +1461,7 @@ nfs_timer(void *arg)
* removed in nfs_request().
*/
mtx_lock(&rep->r_mtx);
rep->r_flags |= R_REXMIT_INPROG;
rep->r_flags |= R_PIN_REQ;
mtx_unlock(&rep->r_mtx);
mtx_unlock(&nfs_reqq_mtx);
NET_LOCK_GIANT();
@ -1478,7 +1476,7 @@ nfs_timer(void *arg)
mtx_lock(&nfs_reqq_mtx);
mtx_lock(&nmp->nm_mtx);
mtx_lock(&rep->r_mtx);
rep->r_flags &= ~R_REXMIT_INPROG;
rep->r_flags &= ~R_PIN_REQ;
wakeup((caddr_t)&rep->r_flags);
if (error) {
if (NFSIGNORE_SOERROR(nmp->nm_soflags, error))
@ -1514,7 +1512,6 @@ nfs_timer(void *arg)
}
}
mtx_unlock(&nfs_reqq_mtx);
mtx_unlock(&Giant); /* nfs_down -> tprintf */
callout_reset(&nfs_callout, nfs_ticks, nfs_timer, NULL);
}
@ -1552,9 +1549,7 @@ nfs_nmcancelreqs(nmp)
mtx_unlock(&nfs_reqq_mtx);
if (req == NULL)
return (0);
mtx_lock(&Giant);
tsleep(&lbolt, PSOCK, "nfscancel", 0);
mtx_unlock(&Giant);
}
return (EBUSY);
}
@ -1864,28 +1859,33 @@ nfs_down(rep, nmp, td, msg, error, flags)
const char *msg;
int error, flags;
{
GIANT_REQUIRED; /* nfs_msg */
if (nmp == NULL)
return;
mtx_lock(&nmp->nm_mtx);
if ((flags & NFSSTA_TIMEO) && !(nmp->nm_state & NFSSTA_TIMEO)) {
nmp->nm_state |= NFSSTA_TIMEO;
mtx_unlock(&nmp->nm_mtx);
vfs_event_signal(&nmp->nm_mountp->mnt_stat.f_fsid,
VQ_NOTRESP, 0);
nmp->nm_state |= NFSSTA_TIMEO;
}
} else
mtx_unlock(&nmp->nm_mtx);
#ifdef NFSSTA_LOCKTIMEO
mtx_lock(&nmp->nm_mtx);
if ((flags & NFSSTA_LOCKTIMEO) && !(nmp->nm_state & NFSSTA_LOCKTIMEO)) {
nmp->nm_state |= NFSSTA_LOCKTIMEO;
mtx_unlock(&nmp->nm_mtx);
vfs_event_signal(&nmp->nm_mountp->mnt_stat.f_fsid,
VQ_NOTRESPLOCK, 0);
nmp->nm_state |= NFSSTA_LOCKTIMEO;
}
} else
mtx_unlock(&nmp->nm_mtx);
#endif
mtx_lock(&rep->r_mtx);
if (rep)
rep->r_flags |= R_TPRINTFMSG;
mtx_unlock(&rep->r_mtx);
mtx_lock(&Giant);
nfs_msg(td, nmp->nm_mountp->mnt_stat.f_mntfromname, msg, error);
mtx_unlock(&Giant);
}
void
@ -1896,25 +1896,34 @@ nfs_up(rep, nmp, td, msg, flags)
const char *msg;
int flags;
{
GIANT_REQUIRED; /* nfs_msg */
if (nmp == NULL)
if (nmp == NULL || rep == NULL)
return;
mtx_lock(&rep->r_mtx);
if ((rep == NULL) || (rep->r_flags & R_TPRINTFMSG) != 0)
if ((rep->r_flags & R_TPRINTFMSG) != 0) {
mtx_unlock(&rep->r_mtx);
mtx_lock(&Giant);
nfs_msg(td, nmp->nm_mountp->mnt_stat.f_mntfromname, msg, 0);
mtx_unlock(&rep->r_mtx);
mtx_unlock(&Giant);
} else
mtx_unlock(&rep->r_mtx);
mtx_lock(&nmp->nm_mtx);
if ((flags & NFSSTA_TIMEO) && (nmp->nm_state & NFSSTA_TIMEO)) {
nmp->nm_state &= ~NFSSTA_TIMEO;
mtx_unlock(&nmp->nm_mtx);
vfs_event_signal(&nmp->nm_mountp->mnt_stat.f_fsid,
VQ_NOTRESP, 1);
}
} else
mtx_unlock(&nmp->nm_mtx);
#ifdef NFSSTA_LOCKTIMEO
mtx_lock(&nmp->nm_mtx);
if ((flags & NFSSTA_LOCKTIMEO) && (nmp->nm_state & NFSSTA_LOCKTIMEO)) {
nmp->nm_state &= ~NFSSTA_LOCKTIMEO;
mtx_unlock(&nmp->nm_mtx);
vfs_event_signal(&nmp->nm_mountp->mnt_stat.f_fsid,
VQ_NOTRESPLOCK, 1);
}
} else
mtx_unlock(&nmp->nm_mtx);
#endif
}