Some style(9) fixes

This commit is contained in:
Marius Strobl 2010-02-09 23:40:07 +00:00
parent 3e0490b3fe
commit be03f0b907
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=203731
3 changed files with 75 additions and 74 deletions

View File

@ -37,7 +37,7 @@ __FBSDID("$FreeBSD$");
/*
* These functions support the macros and help fiddle mbuf chains for
* the nfs op functions. They do things like create the rpc header and
* the nfs op functions. They do things like create the rpc header and
* copy data between mbuf chains and uio lists.
*/
@ -75,10 +75,11 @@ nfstype nfsv3_type[9] = {
NFNON, NFREG, NFDIR, NFBLK, NFCHR, NFLNK, NFSOCK, NFFIFO, NFNON
};
static void *nfsm_dissect_xx_sub(int s, struct mbuf **md, caddr_t *dpos, int how);
static void *nfsm_dissect_xx_sub(int s, struct mbuf **md, caddr_t *dpos,
int how);
u_quad_t
nfs_curusec(void)
nfs_curusec(void)
{
struct timeval tv;
@ -176,7 +177,7 @@ nfsm_disct(struct mbuf **mdp, caddr_t *dposp, int siz, int left, int how)
while (left == 0) {
*mdp = mp = mp->m_next;
if (mp == NULL)
return NULL;
return (NULL);
left = mp->m_len;
*dposp = mtod(mp, caddr_t);
}
@ -184,13 +185,13 @@ nfsm_disct(struct mbuf **mdp, caddr_t *dposp, int siz, int left, int how)
ret = *dposp;
*dposp += siz;
} else if (mp->m_next == NULL) {
return NULL;
return (NULL);
} else if (siz > MHLEN) {
panic("nfs S too big");
} else {
MGET(mp2, how, MT_DATA);
if (mp2 == NULL)
return NULL;
return (NULL);
mp2->m_len = siz;
mp2->m_next = mp->m_next;
mp->m_next = mp2;
@ -206,7 +207,7 @@ nfsm_disct(struct mbuf **mdp, caddr_t *dposp, int siz, int left, int how)
/* Loop around copying up the siz2 bytes */
while (siz2 > 0) {
if (mp2 == NULL)
return NULL;
return (NULL);
xfer = (siz2 > mp2->m_len) ? mp2->m_len : siz2;
if (xfer > 0) {
bcopy(mtod(mp2, caddr_t), ptr, xfer);
@ -229,7 +230,7 @@ nfsm_disct(struct mbuf **mdp, caddr_t *dposp, int siz, int left, int how)
*dposp = npos;
}
}
return ret;
return (ret);
}
/*
@ -273,19 +274,21 @@ nfsm_build_xx(int s, struct mbuf **mb, caddr_t *bpos)
ret = *bpos;
(*mb)->m_len += s;
*bpos += s;
return ret;
return (ret);
}
void *
nfsm_dissect_xx(int s, struct mbuf **md, caddr_t *dpos)
{
return nfsm_dissect_xx_sub(s, md, dpos, M_WAIT);
return (nfsm_dissect_xx_sub(s, md, dpos, M_WAIT));
}
void *
nfsm_dissect_xx_nonblock(int s, struct mbuf **md, caddr_t *dpos)
{
return nfsm_dissect_xx_sub(s, md, dpos, M_DONTWAIT);
return (nfsm_dissect_xx_sub(s, md, dpos, M_DONTWAIT));
}
static void *
@ -299,10 +302,10 @@ nfsm_dissect_xx_sub(int s, struct mbuf **md, caddr_t *dpos, int how)
if (t1 >= s) {
ret = *dpos;
*dpos += s;
return ret;
return (ret);
}
cp2 = nfsm_disct(md, dpos, s, t1, how);
return cp2;
cp2 = nfsm_disct(md, dpos, s, t1, how);
return (cp2);
}
int
@ -312,11 +315,11 @@ nfsm_strsiz_xx(int *s, int m, struct mbuf **mb, caddr_t *bpos)
tl = nfsm_dissect_xx(NFSX_UNSIGNED, mb, bpos);
if (tl == NULL)
return EBADRPC;
return (EBADRPC);
*s = fxdr_unsigned(int32_t, *tl);
if (*s > m)
return EBADRPC;
return 0;
return (EBADRPC);
return (0);
}
int
@ -327,10 +330,10 @@ nfsm_adv_xx(int s, struct mbuf **md, caddr_t *dpos)
t1 = mtod(*md, caddr_t) + (*md)->m_len - *dpos;
if (t1 >= s) {
*dpos += s;
return 0;
return (0);
}
t1 = nfs_adv(md, dpos, s, t1);
if (t1)
return t1;
return 0;
return (t1);
return (0);
}

View File

@ -33,7 +33,6 @@
* $FreeBSD$
*/
#ifndef _NFS_NFS_COMMON_H_
#define _NFS_NFS_COMMON_H_
@ -86,7 +85,7 @@ do { \
goto nfsmout; \
} \
} while (0)
#define nfsm_dissect(c, s) \
({ \
void *ret; \

View File

@ -105,9 +105,11 @@ SYSCTL_INT(_vfs_nfs, OID_AUTO, bufpackets, CTLFLAG_RW, &nfs_bufpackets, 0,
"Buffer reservation size 2 < x < 64");
SYSCTL_INT(_vfs_nfs, OID_AUTO, reconnects, CTLFLAG_RD, &nfs_reconnects, 0,
"Number of times the nfs client has had to reconnect");
SYSCTL_INT(_vfs_nfs, OID_AUTO, nfs3_jukebox_delay, CTLFLAG_RW, &nfs3_jukebox_delay, 0,
SYSCTL_INT(_vfs_nfs, OID_AUTO, nfs3_jukebox_delay, CTLFLAG_RW,
&nfs3_jukebox_delay, 0,
"Number of seconds to delay a retry after receiving EJUKEBOX");
SYSCTL_INT(_vfs_nfs, OID_AUTO, skip_wcc_data_onerr, CTLFLAG_RW, &nfs_skip_wcc_data_onerr, 0,
SYSCTL_INT(_vfs_nfs, OID_AUTO, skip_wcc_data_onerr, CTLFLAG_RW,
&nfs_skip_wcc_data_onerr, 0,
"Disable weak cache consistency checking when server returns an error");
static void nfs_down(struct nfsmount *, struct thread *, const char *,
@ -160,7 +162,8 @@ static enum nfs_rto_timer_t nfs_proct[NFS_NPROCS] = {
static inline enum nfs_rto_timer_t
nfs_rto_timer(u_int32_t procnum)
{
return nfs_proct[procnum];
return (nfs_proct[procnum]);
}
/*
@ -225,7 +228,7 @@ nfs_connect(struct nfsmount *nmp)
nconf = getnetconfigent("udp6");
else
nconf = getnetconfigent("tcp6");
/*
* Get buffer reservation size from sysctl, but impose reasonable
* limits.
@ -273,9 +276,8 @@ nfs_connect(struct nfsmount *nmp)
* Someone else already connected.
*/
CLNT_RELEASE(client);
} else {
} else
nmp->nm_client = client;
}
/*
* Protocols that do not require connections may be optionally left
@ -284,22 +286,21 @@ nfs_connect(struct nfsmount *nmp)
if (!(nmp->nm_flag & NFSMNT_NOCONN)) {
mtx_unlock(&nmp->nm_mtx);
CLNT_CONTROL(client, CLSET_CONNECT, &one);
} else {
} else
mtx_unlock(&nmp->nm_mtx);
}
/* Restore current thread's credentials. */
td->td_ucred = origcred;
mtx_lock(&nmp->nm_mtx);
/* Initialize other non-zero congestion variables */
/* Initialize other non-zero congestion variables. */
nfs_init_rtt(nmp);
mtx_unlock(&nmp->nm_mtx);
return (0);
}
/*
* NFS disconnect. Clean up and unlink.
* NFS disconnect. Clean up and unlink.
*/
void
nfs_disconnect(struct nfsmount *nmp)
@ -316,9 +317,8 @@ nfs_disconnect(struct nfsmount *nmp)
#endif
CLNT_CLOSE(client);
CLNT_RELEASE(client);
} else {
} else
mtx_unlock(&nmp->nm_mtx);
}
}
void
@ -341,11 +341,10 @@ nfs_getauth(struct nfsmount *nmp, struct ucred *cred)
case RPCSEC_GSS_KRB5:
case RPCSEC_GSS_KRB5I:
case RPCSEC_GSS_KRB5P:
if (!nmp->nm_mech_oid) {
if (!nmp->nm_mech_oid)
if (!rpc_gss_mech_to_oid("kerberosv5",
&nmp->nm_mech_oid))
&nmp->nm_mech_oid))
return (NULL);
}
if (nmp->nm_secflavor == RPCSEC_GSS_KRB5)
svc = rpc_gss_svc_none;
else if (nmp->nm_secflavor == RPCSEC_GSS_KRB5I)
@ -503,10 +502,10 @@ nfs_request(struct vnode *vp, struct mbuf *mreq, int procnum,
nf.nf_td = td;
getmicrouptime(&now);
nf.nf_lastmsg = now.tv_sec -
((nmp->nm_tprintf_delay) - (nmp->nm_tprintf_initial_delay));
((nmp->nm_tprintf_delay) - (nmp->nm_tprintf_initial_delay));
/*
* XXX if not already connected call nfs_connect now. Longer
* XXX if not already connected call nfs_connect now. Longer
* term, change nfs_mount to call nfs_connect unconditionally
* and let clnt_reconnect_create handle reconnects.
*/
@ -526,18 +525,17 @@ nfs_request(struct vnode *vp, struct mbuf *mreq, int procnum,
/*
* Use a conservative timeout for RPCs other than getattr,
* lookup, read or write. The justification for doing "other"
* lookup, read or write. The justification for doing "other"
* this way is that these RPCs happen so infrequently that
* timer est. would probably be stale. Also, since many of
* these RPCs are non-idempotent, a conservative timeout is
* desired.
*/
timer = nfs_rto_timer(procnum);
if (timer != NFS_DEFAULT_TIMER) {
if (timer != NFS_DEFAULT_TIMER)
ext.rc_timers = &nmp->nm_timers[timer - 1];
} else {
else
ext.rc_timers = NULL;
}
#ifdef KDTRACE_HOOKS
if (dtrace_nfsclient_nfs23_start_probe != NULL) {
@ -570,17 +568,16 @@ nfs_request(struct vnode *vp, struct mbuf *mreq, int procnum,
* If there was a successful reply and a tprintf msg.
* tprintf a response.
*/
if (stat == RPC_SUCCESS) {
if (stat == RPC_SUCCESS)
error = 0;
} else if (stat == RPC_TIMEDOUT) {
else if (stat == RPC_TIMEDOUT)
error = ETIMEDOUT;
} else if (stat == RPC_VERSMISMATCH) {
else if (stat == RPC_VERSMISMATCH)
error = EOPNOTSUPP;
} else if (stat == RPC_PROGVERSMISMATCH) {
else if (stat == RPC_PROGVERSMISMATCH)
error = EPROTONOSUPPORT;
} else {
else
error = EACCES;
}
if (error)
goto nfsmout;
@ -609,9 +606,9 @@ nfs_request(struct vnode *vp, struct mbuf *mreq, int procnum,
m_freem(mrep);
error = 0;
waituntil = time_second + nfs3_jukebox_delay;
while (time_second < waituntil) {
(void) tsleep(&fake_wchan, PSOCK, "nqnfstry", hz);
}
while (time_second < waituntil)
(void)tsleep(&fake_wchan, PSOCK, "nqnfstry",
hz);
goto tryagain;
}
@ -622,12 +619,13 @@ nfs_request(struct vnode *vp, struct mbuf *mreq, int procnum,
if (error == ESTALE)
nfs_purgecache(vp);
/*
* Skip wcc data on NFS errors for now. NetApp filers
* Skip wcc data on NFS errors for now. NetApp filers
* return corrupt postop attrs in the wcc data for NFS
* err EROFS. Not sure if they could return corrupt
* err EROFS. Not sure if they could return corrupt
* postop attrs for others errors.
*/
if ((nmp->nm_flag & NFSMNT_NFSV3) && !nfs_skip_wcc_data_onerr) {
if ((nmp->nm_flag & NFSMNT_NFSV3) &&
!nfs_skip_wcc_data_onerr) {
*mrp = mrep;
*mdp = md;
*dposp = dpos;
@ -689,7 +687,7 @@ nfs_request(struct vnode *vp, struct mbuf *mreq, int procnum,
/*
* Mark all of an nfs mount's outstanding requests with R_SOFTTERM and
* wait for all requests to complete. This is used by forced unmounts
* wait for all requests to complete. This is used by forced unmounts
* to terminate any outstanding RPCs.
*/
int
@ -703,7 +701,7 @@ nfs_nmcancelreqs(struct nfsmount *nmp)
/*
* Any signal that can interrupt an NFS operation in an intr mount
* should be added to this set. SIGSTOP and SIGKILL cannot be masked.
* should be added to this set. SIGSTOP and SIGKILL cannot be masked.
*/
int nfs_sig_set[] = {
SIGINT,
@ -722,16 +720,16 @@ static int
nfs_sig_pending(sigset_t set)
{
int i;
for (i = 0 ; i < sizeof(nfs_sig_set)/sizeof(int) ; i++)
if (SIGISMEMBER(set, nfs_sig_set[i]))
return (1);
return (0);
}
/*
* The set/restore sigmask functions are used to (temporarily) overwrite
* the process p_sigmask during an RPC call (for example). These are also
* the process p_sigmask during an RPC call (for example). These are also
* used in other places in the NFS client that might tsleep().
*/
void
@ -740,18 +738,18 @@ nfs_set_sigmask(struct thread *td, sigset_t *oldset)
sigset_t newset;
int i;
struct proc *p;
SIGFILLSET(newset);
if (td == NULL)
td = curthread; /* XXX */
p = td->td_proc;
/* Remove the NFS set of signals from newset */
/* Remove the NFS set of signals from newset. */
PROC_LOCK(p);
mtx_lock(&p->p_sigacts->ps_mtx);
for (i = 0 ; i < sizeof(nfs_sig_set)/sizeof(int) ; i++) {
/*
* But make sure we leave the ones already masked
* by the process, ie. remove the signal from the
* by the process, i.e. remove the signal from the
* temporary signalmask only if it wasn't already
* in p_sigmask.
*/
@ -777,12 +775,13 @@ nfs_restore_sigmask(struct thread *td, sigset_t *set)
* old one after msleep() returns.
*/
int
nfs_msleep(struct thread *td, void *ident, struct mtx *mtx, int priority, char *wmesg, int timo)
nfs_msleep(struct thread *td, void *ident, struct mtx *mtx, int priority,
char *wmesg, int timo)
{
sigset_t oldset;
int error;
struct proc *p;
if ((priority & PCATCH) == 0)
return msleep(ident, mtx, priority, wmesg, timo);
if (td == NULL)
@ -803,7 +802,7 @@ nfs_sigintr(struct nfsmount *nmp, struct thread *td)
{
struct proc *p;
sigset_t tmpset;
/* Terminate all requests while attempting a forced unmount. */
if (nmp->nm_mountp->mnt_kern_flag & MNTK_UNMOUNTF)
return (EIO);
@ -834,12 +833,11 @@ nfs_msg(struct thread *td, const char *server, const char *msg, int error)
struct proc *p;
p = td ? td->td_proc : NULL;
if (error) {
if (error)
tprintf(p, LOG_INFO, "nfs server %s: %s, error %d\n", server,
msg, error);
} else {
else
tprintf(p, LOG_INFO, "nfs server %s: %s\n", server, msg);
}
return (0);
}
@ -858,7 +856,8 @@ nfs_down(struct nfsmount *nmp, struct thread *td, const char *msg,
} else
mtx_unlock(&nmp->nm_mtx);
mtx_lock(&nmp->nm_mtx);
if ((flags & NFSSTA_LOCKTIMEO) && !(nmp->nm_state & NFSSTA_LOCKTIMEO)) {
if ((flags & NFSSTA_LOCKTIMEO) &&
!(nmp->nm_state & NFSSTA_LOCKTIMEO)) {
nmp->nm_state |= NFSSTA_LOCKTIMEO;
mtx_unlock(&nmp->nm_mtx);
vfs_event_signal(&nmp->nm_mountp->mnt_stat.f_fsid,
@ -874,9 +873,8 @@ nfs_up(struct nfsmount *nmp, struct thread *td, const char *msg,
{
if (nmp == NULL)
return;
if (tprintfmsg) {
if (tprintfmsg)
nfs_msg(td, nmp->nm_mountp->mnt_stat.f_mntfromname, msg, 0);
}
mtx_lock(&nmp->nm_mtx);
if ((flags & NFSSTA_TIMEO) && (nmp->nm_state & NFSSTA_TIMEO)) {
@ -886,9 +884,10 @@ nfs_up(struct nfsmount *nmp, struct thread *td, const char *msg,
VQ_NOTRESP, 1);
} else
mtx_unlock(&nmp->nm_mtx);
mtx_lock(&nmp->nm_mtx);
if ((flags & NFSSTA_LOCKTIMEO) && (nmp->nm_state & NFSSTA_LOCKTIMEO)) {
if ((flags & NFSSTA_LOCKTIMEO) &&
(nmp->nm_state & NFSSTA_LOCKTIMEO)) {
nmp->nm_state &= ~NFSSTA_LOCKTIMEO;
mtx_unlock(&nmp->nm_mtx);
vfs_event_signal(&nmp->nm_mountp->mnt_stat.f_fsid,