1994-05-24 10:09:53 +00:00
|
|
|
/*
|
1997-02-10 02:22:35 +00:00
|
|
|
* Copyright (c) 1989, 1991, 1993, 1995
|
1994-05-24 10:09:53 +00:00
|
|
|
* The Regents of the University of California. All rights reserved.
|
|
|
|
*
|
|
|
|
* This code is derived from software contributed to Berkeley by
|
|
|
|
* Rick Macklem at The University of Guelph.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
* 4. Neither the name of the University nor the names of its contributors
|
|
|
|
* may be used to endorse or promote products derived from this software
|
|
|
|
* without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
|
|
|
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
|
|
|
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
|
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
|
|
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
|
|
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
|
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
|
|
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
|
|
|
* SUCH DAMAGE.
|
|
|
|
*
|
1997-02-10 02:22:35 +00:00
|
|
|
* @(#)nfs_socket.c 8.5 (Berkeley) 3/30/95
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
|
|
|
|
2001-09-18 23:32:09 +00:00
|
|
|
#include <sys/cdefs.h>
|
|
|
|
__FBSDID("$FreeBSD$");
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* Socket operations for use by nfs
|
|
|
|
*/
|
|
|
|
|
2002-07-15 19:40:23 +00:00
|
|
|
#include "opt_inet6.h"
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
#include <sys/param.h>
|
|
|
|
#include <sys/systm.h>
|
|
|
|
#include <sys/kernel.h>
|
2001-05-01 08:13:21 +00:00
|
|
|
#include <sys/lock.h>
|
|
|
|
#include <sys/malloc.h>
|
1994-05-24 10:09:53 +00:00
|
|
|
#include <sys/mbuf.h>
|
2001-05-01 08:13:21 +00:00
|
|
|
#include <sys/mount.h>
|
|
|
|
#include <sys/mutex.h>
|
|
|
|
#include <sys/proc.h>
|
1994-05-24 10:09:53 +00:00
|
|
|
#include <sys/protosw.h>
|
2001-05-01 08:13:21 +00:00
|
|
|
#include <sys/signalvar.h>
|
2004-12-06 21:11:15 +00:00
|
|
|
#include <sys/syscallsubr.h>
|
1994-05-24 10:09:53 +00:00
|
|
|
#include <sys/socket.h>
|
|
|
|
#include <sys/socketvar.h>
|
The VFS/BIO subsystem contained a number of hacks in order to optimize
piecemeal, middle-of-file writes for NFS. These hacks have caused no
end of trouble, especially when combined with mmap(). I've removed
them. Instead, NFS will issue a read-before-write to fully
instantiate the struct buf containing the write. NFS does, however,
optimize piecemeal appends to files. For most common file operations,
you will not notice the difference. The sole remaining fragment in
the VFS/BIO system is b_dirtyoff/end, which NFS uses to avoid cache
coherency issues with read-merge-write style operations. NFS also
optimizes the write-covers-entire-buffer case by avoiding the
read-before-write. There is quite a bit of room for further
optimization in these areas.
The VM system marks pages fully-valid (AKA vm_page_t->valid =
VM_PAGE_BITS_ALL) in several places, most noteably in vm_fault. This
is not correct operation. The vm_pager_get_pages() code is now
responsible for marking VM pages all-valid. A number of VM helper
routines have been added to aid in zeroing-out the invalid portions of
a VM page prior to the page being marked all-valid. This operation is
necessary to properly support mmap(). The zeroing occurs most often
when dealing with file-EOF situations. Several bugs have been fixed
in the NFS subsystem, including bits handling file and directory EOF
situations and buf->b_flags consistancy issues relating to clearing
B_ERROR & B_INVAL, and handling B_DONE.
getblk() and allocbuf() have been rewritten. B_CACHE operation is now
formally defined in comments and more straightforward in
implementation. B_CACHE for VMIO buffers is based on the validity of
the backing store. B_CACHE for non-VMIO buffers is based simply on
whether the buffer is B_INVAL or not (B_CACHE set if B_INVAL clear,
and vise-versa). biodone() is now responsible for setting B_CACHE
when a successful read completes. B_CACHE is also set when a bdwrite()
is initiated and when a bwrite() is initiated. VFS VOP_BWRITE
routines (there are only two - nfs_bwrite() and bwrite()) are now
expected to set B_CACHE. This means that bowrite() and bawrite() also
set B_CACHE indirectly.
There are a number of places in the code which were previously using
buf->b_bufsize (which is DEV_BSIZE aligned) when they should have
been using buf->b_bcount. These have been fixed. getblk() now clears
B_DONE on return because the rest of the system is so bad about
dealing with B_DONE.
Major fixes to NFS/TCP have been made. A server-side bug could cause
requests to be lost by the server due to nfs_realign() overwriting
other rpc's in the same TCP mbuf chain. The server's kernel must be
recompiled to get the benefit of the fixes.
Submitted by: Matthew Dillon <dillon@apollo.backplane.com>
1999-05-02 23:57:16 +00:00
|
|
|
#include <sys/sysctl.h>
|
2001-05-01 08:13:21 +00:00
|
|
|
#include <sys/syslog.h>
|
|
|
|
#include <sys/vnode.h>
|
1994-05-24 10:09:53 +00:00
|
|
|
|
|
|
|
#include <netinet/in.h>
|
|
|
|
#include <netinet/tcp.h>
|
1994-10-17 17:47:45 +00:00
|
|
|
|
2003-11-14 20:54:10 +00:00
|
|
|
#include <rpc/rpcclnt.h>
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
#include <nfs/rpcv2.h>
|
1995-06-27 11:07:30 +00:00
|
|
|
#include <nfs/nfsproto.h>
|
2001-09-18 23:32:09 +00:00
|
|
|
#include <nfsclient/nfs.h>
|
1994-05-24 10:09:53 +00:00
|
|
|
#include <nfs/xdr_subs.h>
|
2001-09-18 23:32:09 +00:00
|
|
|
#include <nfsclient/nfsm_subs.h>
|
|
|
|
#include <nfsclient/nfsmount.h>
|
|
|
|
#include <nfsclient/nfsnode.h>
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2003-11-14 20:54:10 +00:00
|
|
|
#include <nfs4client/nfs4.h>
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
#define TRUE 1
|
|
|
|
#define FALSE 0
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Estimate rto for an nfs rpc sent via. an unreliable datagram.
|
|
|
|
* Use the mean and mean deviation of rtt for the appropriate type of rpc
|
|
|
|
* for the frequent rpcs and a default for the others.
|
|
|
|
* The justification for doing "other" this way is that these rpcs
|
|
|
|
* happen so infrequently that timer est. would probably be stale.
|
|
|
|
* Also, since many of these rpcs are
|
|
|
|
* non-idempotent, a conservative timeout is desired.
|
|
|
|
* getattr, lookup - A+2D
|
|
|
|
* read, write - A+4D
|
|
|
|
* other - nm_timeo
|
|
|
|
*/
|
|
|
|
#define NFS_RTO(n, t) \
|
|
|
|
((t) == 0 ? (n)->nm_timeo : \
|
|
|
|
((t) < 3 ? \
|
|
|
|
(((((n)->nm_srtt[t-1] + 3) >> 2) + (n)->nm_sdrtt[t-1] + 1) >> 1) : \
|
|
|
|
((((n)->nm_srtt[t-1] + 7) >> 3) + (n)->nm_sdrtt[t-1] + 1)))
|
|
|
|
#define NFS_SRTT(r) (r)->r_nmp->nm_srtt[proct[(r)->r_procnum] - 1]
|
|
|
|
#define NFS_SDRTT(r) (r)->r_nmp->nm_sdrtt[proct[(r)->r_procnum] - 1]
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Defines which timer to use for the procnum.
|
|
|
|
* 0 - default
|
|
|
|
* 1 - getattr
|
|
|
|
* 2 - lookup
|
|
|
|
* 3 - read
|
|
|
|
* 4 - write
|
|
|
|
*/
|
|
|
|
static int proct[NFS_NPROCS] = {
|
1995-06-27 11:07:30 +00:00
|
|
|
0, 1, 0, 2, 1, 3, 3, 4, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 0, 0, 0, 0, 0,
|
1994-05-24 10:09:53 +00:00
|
|
|
};
|
|
|
|
|
2001-09-18 23:32:09 +00:00
|
|
|
static int nfs_realign_test;
|
|
|
|
static int nfs_realign_count;
|
|
|
|
static int nfs_bufpackets = 4;
|
2004-07-12 06:22:42 +00:00
|
|
|
static int nfs_reconnects;
|
The VFS/BIO subsystem contained a number of hacks in order to optimize
piecemeal, middle-of-file writes for NFS. These hacks have caused no
end of trouble, especially when combined with mmap(). I've removed
them. Instead, NFS will issue a read-before-write to fully
instantiate the struct buf containing the write. NFS does, however,
optimize piecemeal appends to files. For most common file operations,
you will not notice the difference. The sole remaining fragment in
the VFS/BIO system is b_dirtyoff/end, which NFS uses to avoid cache
coherency issues with read-merge-write style operations. NFS also
optimizes the write-covers-entire-buffer case by avoiding the
read-before-write. There is quite a bit of room for further
optimization in these areas.
The VM system marks pages fully-valid (AKA vm_page_t->valid =
VM_PAGE_BITS_ALL) in several places, most noteably in vm_fault. This
is not correct operation. The vm_pager_get_pages() code is now
responsible for marking VM pages all-valid. A number of VM helper
routines have been added to aid in zeroing-out the invalid portions of
a VM page prior to the page being marked all-valid. This operation is
necessary to properly support mmap(). The zeroing occurs most often
when dealing with file-EOF situations. Several bugs have been fixed
in the NFS subsystem, including bits handling file and directory EOF
situations and buf->b_flags consistancy issues relating to clearing
B_ERROR & B_INVAL, and handling B_DONE.
getblk() and allocbuf() have been rewritten. B_CACHE operation is now
formally defined in comments and more straightforward in
implementation. B_CACHE for VMIO buffers is based on the validity of
the backing store. B_CACHE for non-VMIO buffers is based simply on
whether the buffer is B_INVAL or not (B_CACHE set if B_INVAL clear,
and vise-versa). biodone() is now responsible for setting B_CACHE
when a successful read completes. B_CACHE is also set when a bdwrite()
is initiated and when a bwrite() is initiated. VFS VOP_BWRITE
routines (there are only two - nfs_bwrite() and bwrite()) are now
expected to set B_CACHE. This means that bowrite() and bawrite() also
set B_CACHE indirectly.
There are a number of places in the code which were previously using
buf->b_bufsize (which is DEV_BSIZE aligned) when they should have
been using buf->b_bcount. These have been fixed. getblk() now clears
B_DONE on return because the rest of the system is so bad about
dealing with B_DONE.
Major fixes to NFS/TCP have been made. A server-side bug could cause
requests to be lost by the server due to nfs_realign() overwriting
other rpc's in the same TCP mbuf chain. The server's kernel must be
recompiled to get the benefit of the fixes.
Submitted by: Matthew Dillon <dillon@apollo.backplane.com>
1999-05-02 23:57:16 +00:00
|
|
|
|
|
|
|
SYSCTL_DECL(_vfs_nfs);
|
|
|
|
|
2000-03-27 21:38:35 +00:00
|
|
|
SYSCTL_INT(_vfs_nfs, OID_AUTO, realign_test, CTLFLAG_RW, &nfs_realign_test, 0, "");
|
|
|
|
SYSCTL_INT(_vfs_nfs, OID_AUTO, realign_count, CTLFLAG_RW, &nfs_realign_count, 0, "");
|
|
|
|
SYSCTL_INT(_vfs_nfs, OID_AUTO, bufpackets, CTLFLAG_RW, &nfs_bufpackets, 0, "");
|
2004-07-12 06:22:42 +00:00
|
|
|
SYSCTL_INT(_vfs_nfs, OID_AUTO, reconnects, CTLFLAG_RD, &nfs_reconnects, 0,
|
|
|
|
"number of times the nfs client has had to reconnect");
|
The VFS/BIO subsystem contained a number of hacks in order to optimize
piecemeal, middle-of-file writes for NFS. These hacks have caused no
end of trouble, especially when combined with mmap(). I've removed
them. Instead, NFS will issue a read-before-write to fully
instantiate the struct buf containing the write. NFS does, however,
optimize piecemeal appends to files. For most common file operations,
you will not notice the difference. The sole remaining fragment in
the VFS/BIO system is b_dirtyoff/end, which NFS uses to avoid cache
coherency issues with read-merge-write style operations. NFS also
optimizes the write-covers-entire-buffer case by avoiding the
read-before-write. There is quite a bit of room for further
optimization in these areas.
The VM system marks pages fully-valid (AKA vm_page_t->valid =
VM_PAGE_BITS_ALL) in several places, most noteably in vm_fault. This
is not correct operation. The vm_pager_get_pages() code is now
responsible for marking VM pages all-valid. A number of VM helper
routines have been added to aid in zeroing-out the invalid portions of
a VM page prior to the page being marked all-valid. This operation is
necessary to properly support mmap(). The zeroing occurs most often
when dealing with file-EOF situations. Several bugs have been fixed
in the NFS subsystem, including bits handling file and directory EOF
situations and buf->b_flags consistancy issues relating to clearing
B_ERROR & B_INVAL, and handling B_DONE.
getblk() and allocbuf() have been rewritten. B_CACHE operation is now
formally defined in comments and more straightforward in
implementation. B_CACHE for VMIO buffers is based on the validity of
the backing store. B_CACHE for non-VMIO buffers is based simply on
whether the buffer is B_INVAL or not (B_CACHE set if B_INVAL clear,
and vise-versa). biodone() is now responsible for setting B_CACHE
when a successful read completes. B_CACHE is also set when a bdwrite()
is initiated and when a bwrite() is initiated. VFS VOP_BWRITE
routines (there are only two - nfs_bwrite() and bwrite()) are now
expected to set B_CACHE. This means that bowrite() and bawrite() also
set B_CACHE indirectly.
There are a number of places in the code which were previously using
buf->b_bufsize (which is DEV_BSIZE aligned) when they should have
been using buf->b_bcount. These have been fixed. getblk() now clears
B_DONE on return because the rest of the system is so bad about
dealing with B_DONE.
Major fixes to NFS/TCP have been made. A server-side bug could cause
requests to be lost by the server due to nfs_realign() overwriting
other rpc's in the same TCP mbuf chain. The server's kernel must be
recompiled to get the benefit of the fixes.
Submitted by: Matthew Dillon <dillon@apollo.backplane.com>
1999-05-02 23:57:16 +00:00
|
|
|
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* There is a congestion window for outstanding rpcs maintained per mount
|
|
|
|
* point. The cwnd size is adjusted in roughly the way that:
|
|
|
|
* Van Jacobson, Congestion avoidance and Control, In "Proceedings of
|
|
|
|
* SIGCOMM '88". ACM, August 1988.
|
|
|
|
* describes for TCP. The cwnd size is chopped in half on a retransmit timeout
|
|
|
|
* and incremented by 1/cwnd when each rpc reply is received and a full cwnd
|
|
|
|
* of rpcs is in progress.
|
|
|
|
* (The sent count and cwnd are scaled for integer arith.)
|
|
|
|
* Variants of "slow start" were tried and were found to be too much of a
|
|
|
|
* performance hit (ave. rtt 3 times larger),
|
|
|
|
* I suspect due to the large rtt that nfs rpcs have.
|
|
|
|
*/
|
|
|
|
#define NFS_CWNDSCALE 256
|
|
|
|
#define NFS_MAXCWND (NFS_CWNDSCALE * 32)
|
2001-12-30 18:41:52 +00:00
|
|
|
#define NFS_NBACKOFF 8
|
|
|
|
static int nfs_backoff[NFS_NBACKOFF] = { 2, 4, 8, 16, 32, 64, 128, 256, };
|
2004-03-25 21:48:09 +00:00
|
|
|
struct callout nfs_callout;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2004-07-06 09:12:03 +00:00
|
|
|
static int nfs_msg(struct thread *, const char *, const char *, int);
|
2004-12-06 21:11:15 +00:00
|
|
|
static int nfs_realign(struct mbuf **pm, int hsiz);
|
2001-09-18 23:32:09 +00:00
|
|
|
static int nfs_reply(struct nfsreq *);
|
|
|
|
static void nfs_softterm(struct nfsreq *rep);
|
|
|
|
static int nfs_reconnect(struct nfsreq *rep);
|
2004-12-06 21:11:15 +00:00
|
|
|
static void nfs_clnt_tcp_soupcall(struct socket *so, void *arg, int waitflag);
|
|
|
|
static void nfs_clnt_udp_soupcall(struct socket *so, void *arg, int waitflag);
|
2004-12-07 03:39:52 +00:00
|
|
|
static void wakeup_nfsreq(struct nfsreq *req);
|
2004-12-06 21:11:15 +00:00
|
|
|
|
|
|
|
extern struct mtx nfs_reqq_mtx;
|
|
|
|
extern struct mtx nfs_reply_mtx;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Initialize sockets and congestion for a new NFS connection.
|
|
|
|
* We do not free the sockaddr if error.
|
|
|
|
*/
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
2001-09-18 23:32:09 +00:00
|
|
|
nfs_connect(struct nfsmount *nmp, struct nfsreq *rep)
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2001-09-18 23:32:09 +00:00
|
|
|
struct socket *so;
|
2004-07-06 16:55:41 +00:00
|
|
|
int error, rcvreserve, sndreserve;
|
2004-07-13 05:42:59 +00:00
|
|
|
int pktscale;
|
1994-05-24 10:09:53 +00:00
|
|
|
struct sockaddr *saddr;
|
2002-02-07 20:58:47 +00:00
|
|
|
struct thread *td = &thread0; /* only used for socreate and sobind */
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2004-06-16 03:12:50 +00:00
|
|
|
NET_ASSERT_GIANT();
|
2003-11-07 22:57:09 +00:00
|
|
|
|
2004-12-06 21:11:15 +00:00
|
|
|
if (nmp->nm_sotype == SOCK_STREAM) {
|
|
|
|
mtx_lock(&nmp->nm_nfstcpstate.mtx);
|
|
|
|
nmp->nm_nfstcpstate.flags |= NFS_TCP_EXPECT_RPCMARKER;
|
|
|
|
nmp->nm_nfstcpstate.rpcresid = 0;
|
|
|
|
mtx_unlock(&nmp->nm_nfstcpstate.mtx);
|
|
|
|
}
|
2002-07-11 17:54:58 +00:00
|
|
|
nmp->nm_so = NULL;
|
1997-08-16 19:16:27 +00:00
|
|
|
saddr = nmp->nm_nam;
|
1995-05-30 08:16:23 +00:00
|
|
|
error = socreate(saddr->sa_family, &nmp->nm_so, nmp->nm_sotype,
|
2002-09-08 15:11:18 +00:00
|
|
|
nmp->nm_soproto, nmp->nm_mountp->mnt_cred, td);
|
1994-10-02 17:27:07 +00:00
|
|
|
if (error)
|
1994-05-24 10:09:53 +00:00
|
|
|
goto bad;
|
|
|
|
so = nmp->nm_so;
|
|
|
|
nmp->nm_soflags = so->so_proto->pr_flags;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Some servers require that the client port be a reserved port number.
|
|
|
|
*/
|
2002-07-15 19:40:23 +00:00
|
|
|
if (nmp->nm_flag & NFSMNT_RESVPORT) {
|
1999-06-05 05:35:03 +00:00
|
|
|
struct sockopt sopt;
|
2002-07-15 19:40:23 +00:00
|
|
|
int ip, ip2, len;
|
|
|
|
struct sockaddr_in6 ssin;
|
|
|
|
struct sockaddr *sa;
|
1999-06-05 05:35:03 +00:00
|
|
|
|
|
|
|
bzero(&sopt, sizeof sopt);
|
2002-07-15 19:40:23 +00:00
|
|
|
switch(saddr->sa_family) {
|
|
|
|
case AF_INET:
|
|
|
|
sopt.sopt_level = IPPROTO_IP;
|
|
|
|
sopt.sopt_name = IP_PORTRANGE;
|
|
|
|
ip = IP_PORTRANGE_LOW;
|
|
|
|
ip2 = IP_PORTRANGE_DEFAULT;
|
|
|
|
len = sizeof (struct sockaddr_in);
|
|
|
|
break;
|
|
|
|
#ifdef INET6
|
|
|
|
case AF_INET6:
|
|
|
|
sopt.sopt_level = IPPROTO_IPV6;
|
|
|
|
sopt.sopt_name = IPV6_PORTRANGE;
|
|
|
|
ip = IPV6_PORTRANGE_LOW;
|
|
|
|
ip2 = IPV6_PORTRANGE_DEFAULT;
|
|
|
|
len = sizeof (struct sockaddr_in6);
|
|
|
|
break;
|
|
|
|
#endif
|
|
|
|
default:
|
|
|
|
goto noresvport;
|
|
|
|
}
|
|
|
|
sa = (struct sockaddr *)&ssin;
|
|
|
|
bzero(sa, len);
|
|
|
|
sa->sa_len = len;
|
|
|
|
sa->sa_family = saddr->sa_family;
|
1999-06-05 05:35:03 +00:00
|
|
|
sopt.sopt_dir = SOPT_SET;
|
|
|
|
sopt.sopt_val = (void *)&ip;
|
|
|
|
sopt.sopt_valsize = sizeof(ip);
|
|
|
|
error = sosetopt(so, &sopt);
|
|
|
|
if (error)
|
|
|
|
goto bad;
|
2002-07-15 19:40:23 +00:00
|
|
|
error = sobind(so, sa, td);
|
1999-06-05 05:35:03 +00:00
|
|
|
if (error)
|
|
|
|
goto bad;
|
2002-07-15 19:40:23 +00:00
|
|
|
ip = ip2;
|
1999-06-05 05:35:03 +00:00
|
|
|
error = sosetopt(so, &sopt);
|
1994-05-24 10:09:53 +00:00
|
|
|
if (error)
|
|
|
|
goto bad;
|
2002-07-15 19:40:23 +00:00
|
|
|
noresvport: ;
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Protocols that do not require connections may be optionally left
|
|
|
|
* unconnected for servers that reply from a port other than NFS_PORT.
|
|
|
|
*/
|
|
|
|
if (nmp->nm_flag & NFSMNT_NOCONN) {
|
|
|
|
if (nmp->nm_soflags & PR_CONNREQUIRED) {
|
|
|
|
error = ENOTCONN;
|
|
|
|
goto bad;
|
|
|
|
}
|
|
|
|
} else {
|
2001-09-12 08:38:13 +00:00
|
|
|
error = soconnect(so, nmp->nm_nam, td);
|
1994-10-02 17:27:07 +00:00
|
|
|
if (error)
|
1994-05-24 10:09:53 +00:00
|
|
|
goto bad;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Wait for the connection to complete. Cribbed from the
|
|
|
|
* connect system call but with the wait timing out so
|
|
|
|
* that interruptible mounts don't hang here for a long time.
|
|
|
|
*/
|
2004-07-06 16:55:41 +00:00
|
|
|
SOCK_LOCK(so);
|
1994-05-24 10:09:53 +00:00
|
|
|
while ((so->so_state & SS_ISCONNECTING) && so->so_error == 0) {
|
2004-07-06 16:55:41 +00:00
|
|
|
(void) msleep(&so->so_timeo, SOCK_MTX(so),
|
2002-05-27 05:20:15 +00:00
|
|
|
PSOCK, "nfscon", 2 * hz);
|
1994-05-24 10:09:53 +00:00
|
|
|
if ((so->so_state & SS_ISCONNECTING) &&
|
|
|
|
so->so_error == 0 && rep &&
|
2002-06-28 21:53:08 +00:00
|
|
|
(error = nfs_sigintr(nmp, rep, rep->r_td)) != 0) {
|
1994-05-24 10:09:53 +00:00
|
|
|
so->so_state &= ~SS_ISCONNECTING;
|
2004-07-06 16:55:41 +00:00
|
|
|
SOCK_UNLOCK(so);
|
1994-05-24 10:09:53 +00:00
|
|
|
goto bad;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (so->so_error) {
|
|
|
|
error = so->so_error;
|
|
|
|
so->so_error = 0;
|
2004-07-06 16:55:41 +00:00
|
|
|
SOCK_UNLOCK(so);
|
1994-05-24 10:09:53 +00:00
|
|
|
goto bad;
|
|
|
|
}
|
2004-07-06 16:55:41 +00:00
|
|
|
SOCK_UNLOCK(so);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
2004-07-12 06:22:42 +00:00
|
|
|
so->so_rcv.sb_timeo = 12 * hz;
|
2002-01-02 00:41:26 +00:00
|
|
|
so->so_snd.sb_timeo = 5 * hz;
|
2000-03-27 21:38:35 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Get buffer reservation size from sysctl, but impose reasonable
|
|
|
|
* limits.
|
|
|
|
*/
|
|
|
|
pktscale = nfs_bufpackets;
|
|
|
|
if (pktscale < 2)
|
|
|
|
pktscale = 2;
|
|
|
|
if (pktscale > 64)
|
|
|
|
pktscale = 64;
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
if (nmp->nm_sotype == SOCK_DGRAM) {
|
2000-03-27 21:38:35 +00:00
|
|
|
sndreserve = (nmp->nm_wsize + NFS_MAXPKTHDR) * pktscale;
|
1998-05-31 17:57:43 +00:00
|
|
|
rcvreserve = (max(nmp->nm_rsize, nmp->nm_readdirsize) +
|
2000-03-27 21:38:35 +00:00
|
|
|
NFS_MAXPKTHDR) * pktscale;
|
1994-05-24 10:09:53 +00:00
|
|
|
} else if (nmp->nm_sotype == SOCK_SEQPACKET) {
|
2000-03-27 21:38:35 +00:00
|
|
|
sndreserve = (nmp->nm_wsize + NFS_MAXPKTHDR) * pktscale;
|
1998-05-31 17:57:43 +00:00
|
|
|
rcvreserve = (max(nmp->nm_rsize, nmp->nm_readdirsize) +
|
2000-03-27 21:38:35 +00:00
|
|
|
NFS_MAXPKTHDR) * pktscale;
|
1994-05-24 10:09:53 +00:00
|
|
|
} else {
|
|
|
|
if (nmp->nm_sotype != SOCK_STREAM)
|
|
|
|
panic("nfscon sotype");
|
|
|
|
if (so->so_proto->pr_flags & PR_CONNREQUIRED) {
|
1998-08-23 03:07:17 +00:00
|
|
|
struct sockopt sopt;
|
|
|
|
int val;
|
|
|
|
|
|
|
|
bzero(&sopt, sizeof sopt);
|
2003-10-04 17:41:59 +00:00
|
|
|
sopt.sopt_dir = SOPT_SET;
|
1998-08-23 03:07:17 +00:00
|
|
|
sopt.sopt_level = SOL_SOCKET;
|
|
|
|
sopt.sopt_name = SO_KEEPALIVE;
|
|
|
|
sopt.sopt_val = &val;
|
|
|
|
sopt.sopt_valsize = sizeof val;
|
|
|
|
val = 1;
|
|
|
|
sosetopt(so, &sopt);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
if (so->so_proto->pr_protocol == IPPROTO_TCP) {
|
1998-08-23 03:07:17 +00:00
|
|
|
struct sockopt sopt;
|
|
|
|
int val;
|
|
|
|
|
|
|
|
bzero(&sopt, sizeof sopt);
|
2003-10-04 17:41:59 +00:00
|
|
|
sopt.sopt_dir = SOPT_SET;
|
1998-08-23 03:07:17 +00:00
|
|
|
sopt.sopt_level = IPPROTO_TCP;
|
|
|
|
sopt.sopt_name = TCP_NODELAY;
|
|
|
|
sopt.sopt_val = &val;
|
|
|
|
sopt.sopt_valsize = sizeof val;
|
|
|
|
val = 1;
|
|
|
|
sosetopt(so, &sopt);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
1998-05-31 20:09:01 +00:00
|
|
|
sndreserve = (nmp->nm_wsize + NFS_MAXPKTHDR +
|
2000-03-27 21:38:35 +00:00
|
|
|
sizeof (u_int32_t)) * pktscale;
|
1998-05-31 20:09:01 +00:00
|
|
|
rcvreserve = (nmp->nm_rsize + NFS_MAXPKTHDR +
|
2000-03-27 21:38:35 +00:00
|
|
|
sizeof (u_int32_t)) * pktscale;
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
1994-10-02 17:27:07 +00:00
|
|
|
error = soreserve(so, sndreserve, rcvreserve);
|
|
|
|
if (error)
|
1994-05-24 10:09:53 +00:00
|
|
|
goto bad;
|
2004-06-24 03:12:13 +00:00
|
|
|
SOCKBUF_LOCK(&so->so_rcv);
|
1994-05-24 10:09:53 +00:00
|
|
|
so->so_rcv.sb_flags |= SB_NOINTR;
|
2004-12-06 21:11:15 +00:00
|
|
|
so->so_upcallarg = (caddr_t)nmp;
|
|
|
|
if (so->so_type == SOCK_STREAM)
|
|
|
|
so->so_upcall = nfs_clnt_tcp_soupcall;
|
|
|
|
else
|
|
|
|
so->so_upcall = nfs_clnt_udp_soupcall;
|
|
|
|
so->so_rcv.sb_flags |= SB_UPCALL;
|
2004-06-24 03:12:13 +00:00
|
|
|
SOCKBUF_UNLOCK(&so->so_rcv);
|
|
|
|
SOCKBUF_LOCK(&so->so_snd);
|
1994-05-24 10:09:53 +00:00
|
|
|
so->so_snd.sb_flags |= SB_NOINTR;
|
2004-06-24 03:12:13 +00:00
|
|
|
SOCKBUF_UNLOCK(&so->so_snd);
|
1994-05-24 10:09:53 +00:00
|
|
|
|
|
|
|
/* Initialize other non-zero congestion variables */
|
2001-09-18 23:32:09 +00:00
|
|
|
nmp->nm_srtt[0] = nmp->nm_srtt[1] = nmp->nm_srtt[2] =
|
1999-11-22 04:50:09 +00:00
|
|
|
nmp->nm_srtt[3] = (NFS_TIMEO << 3);
|
1994-05-24 10:09:53 +00:00
|
|
|
nmp->nm_sdrtt[0] = nmp->nm_sdrtt[1] = nmp->nm_sdrtt[2] =
|
1999-11-22 04:50:09 +00:00
|
|
|
nmp->nm_sdrtt[3] = 0;
|
1994-05-24 10:09:53 +00:00
|
|
|
nmp->nm_cwnd = NFS_MAXCWND / 2; /* Initial send window */
|
|
|
|
nmp->nm_sent = 0;
|
|
|
|
nmp->nm_timeouts = 0;
|
|
|
|
return (0);
|
|
|
|
|
|
|
|
bad:
|
|
|
|
nfs_disconnect(nmp);
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Reconnect routine:
|
|
|
|
* Called when a connection is broken on a reliable protocol.
|
|
|
|
* - clean up the old socket
|
|
|
|
* - nfs_connect() again
|
|
|
|
* - set R_MUSTRESEND for all outstanding requests on mount point
|
|
|
|
* If this fails the mount point is DEAD!
|
|
|
|
* nb: Must be called with the nfs_sndlock() set on the mount point.
|
|
|
|
*/
|
1995-12-17 21:14:36 +00:00
|
|
|
static int
|
2001-09-18 23:32:09 +00:00
|
|
|
nfs_reconnect(struct nfsreq *rep)
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2001-09-18 23:32:09 +00:00
|
|
|
struct nfsreq *rp;
|
|
|
|
struct nfsmount *nmp = rep->r_nmp;
|
1994-05-24 10:09:53 +00:00
|
|
|
int error;
|
|
|
|
|
2004-07-12 06:22:42 +00:00
|
|
|
nfs_reconnects++;
|
1994-05-24 10:09:53 +00:00
|
|
|
nfs_disconnect(nmp);
|
1998-05-31 17:27:58 +00:00
|
|
|
while ((error = nfs_connect(nmp, rep)) != 0) {
|
2004-07-06 09:12:03 +00:00
|
|
|
if (error == ERESTART)
|
|
|
|
error = EINTR;
|
|
|
|
if (error == EIO || error == EINTR)
|
|
|
|
return (error);
|
2003-03-02 16:54:40 +00:00
|
|
|
(void) tsleep(&lbolt, PSOCK, "nfscon", 0);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
2004-12-06 21:11:15 +00:00
|
|
|
/*
|
|
|
|
* Clear the FORCE_RECONNECT flag only after the connect
|
|
|
|
* succeeds. To prevent races between multiple processes
|
|
|
|
* waiting on the mountpoint where the connection is being
|
|
|
|
* torn down. The first one to acquire the sndlock will
|
|
|
|
* retry the connection. The others block on the sndlock
|
|
|
|
* until the connection is established successfully, and
|
|
|
|
* the re-transmit the request.
|
|
|
|
*/
|
|
|
|
mtx_lock(&nmp->nm_nfstcpstate.mtx);
|
|
|
|
nmp->nm_nfstcpstate.flags &= ~NFS_TCP_FORCE_RECONNECT;
|
|
|
|
mtx_unlock(&nmp->nm_nfstcpstate.mtx);
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* Loop through outstanding request list and fix up all requests
|
|
|
|
* on old socket.
|
|
|
|
*/
|
2004-12-06 21:11:15 +00:00
|
|
|
mtx_lock(&nfs_reqq_mtx);
|
2001-09-18 23:32:09 +00:00
|
|
|
TAILQ_FOREACH(rp, &nfs_reqq, r_chain) {
|
1994-05-24 10:09:53 +00:00
|
|
|
if (rp->r_nmp == nmp)
|
|
|
|
rp->r_flags |= R_MUSTRESEND;
|
|
|
|
}
|
2004-12-06 21:11:15 +00:00
|
|
|
mtx_unlock(&nfs_reqq_mtx);
|
1994-05-24 10:09:53 +00:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* NFS disconnect. Clean up and unlink.
|
|
|
|
*/
|
|
|
|
void
|
2001-09-18 23:32:09 +00:00
|
|
|
nfs_disconnect(struct nfsmount *nmp)
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2001-09-18 23:32:09 +00:00
|
|
|
struct socket *so;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2004-06-16 03:12:50 +00:00
|
|
|
NET_ASSERT_GIANT();
|
2003-11-07 22:57:09 +00:00
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
if (nmp->nm_so) {
|
|
|
|
so = nmp->nm_so;
|
2002-07-11 17:54:58 +00:00
|
|
|
nmp->nm_so = NULL;
|
2004-12-06 21:11:15 +00:00
|
|
|
SOCKBUF_LOCK(&so->so_rcv);
|
|
|
|
so->so_upcallarg = NULL;
|
|
|
|
so->so_upcall = NULL;
|
|
|
|
so->so_rcv.sb_flags &= ~SB_UPCALL;
|
|
|
|
SOCKBUF_UNLOCK(&so->so_rcv);
|
|
|
|
soshutdown(so, SHUT_WR);
|
1994-05-24 10:09:53 +00:00
|
|
|
soclose(so);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
1998-05-31 20:09:01 +00:00
|
|
|
void
|
2001-09-18 23:32:09 +00:00
|
|
|
nfs_safedisconnect(struct nfsmount *nmp)
|
1998-05-31 19:49:31 +00:00
|
|
|
{
|
1998-05-31 20:09:01 +00:00
|
|
|
struct nfsreq dummyreq;
|
1998-05-31 19:49:31 +00:00
|
|
|
|
|
|
|
bzero(&dummyreq, sizeof(dummyreq));
|
|
|
|
dummyreq.r_nmp = nmp;
|
|
|
|
nfs_disconnect(nmp);
|
|
|
|
}
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* This is the nfs send routine. For connection based socket types, it
|
|
|
|
* must be called with an nfs_sndlock() on the socket.
|
|
|
|
* - return EINTR if the RPC is terminated, 0 otherwise
|
|
|
|
* - set R_MUSTRESEND if the send fails for any reason
|
1998-12-30 00:37:44 +00:00
|
|
|
* - do any cleanup required by recoverable socket errors (?)
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
2001-09-18 23:32:09 +00:00
|
|
|
nfs_send(struct socket *so, struct sockaddr *nam, struct mbuf *top,
|
|
|
|
struct nfsreq *rep)
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
1997-08-16 19:16:27 +00:00
|
|
|
struct sockaddr *sendnam;
|
2004-07-06 09:12:03 +00:00
|
|
|
int error, error2, soflags, flags;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2004-06-16 03:12:50 +00:00
|
|
|
NET_ASSERT_GIANT();
|
2003-11-07 22:57:09 +00:00
|
|
|
|
2001-09-18 23:32:09 +00:00
|
|
|
KASSERT(rep, ("nfs_send: called with rep == NULL"));
|
|
|
|
|
2004-07-06 09:12:03 +00:00
|
|
|
error = nfs_sigintr(rep->r_nmp, rep, rep->r_td);
|
|
|
|
if (error) {
|
2001-09-18 23:32:09 +00:00
|
|
|
m_freem(top);
|
2004-07-06 09:12:03 +00:00
|
|
|
return (error);
|
2001-09-18 23:32:09 +00:00
|
|
|
}
|
|
|
|
if ((so = rep->r_nmp->nm_so) == NULL) {
|
|
|
|
rep->r_flags |= R_MUSTRESEND;
|
|
|
|
m_freem(top);
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
rep->r_flags &= ~R_MUSTRESEND;
|
|
|
|
soflags = rep->r_nmp->nm_soflags;
|
|
|
|
|
2002-05-31 11:52:35 +00:00
|
|
|
if ((soflags & PR_CONNREQUIRED) || (so->so_state & SS_ISCONNECTED))
|
2002-07-11 17:54:58 +00:00
|
|
|
sendnam = NULL;
|
2002-05-31 11:52:35 +00:00
|
|
|
else
|
1994-05-24 10:09:53 +00:00
|
|
|
sendnam = nam;
|
|
|
|
if (so->so_type == SOCK_SEQPACKET)
|
|
|
|
flags = MSG_EOR;
|
|
|
|
else
|
|
|
|
flags = 0;
|
|
|
|
|
1997-04-27 20:01:29 +00:00
|
|
|
error = so->so_proto->pr_usrreqs->pru_sosend(so, sendnam, 0, top, 0,
|
2001-09-12 08:38:13 +00:00
|
|
|
flags, curthread /*XXX*/);
|
1998-08-01 09:04:02 +00:00
|
|
|
if (error == ENOBUFS && so->so_type == SOCK_DGRAM) {
|
|
|
|
error = 0;
|
2001-09-18 23:32:09 +00:00
|
|
|
rep->r_flags |= R_MUSTRESEND;
|
1998-08-01 09:04:02 +00:00
|
|
|
}
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
if (error) {
|
2004-03-17 21:10:20 +00:00
|
|
|
/*
|
|
|
|
* Don't report EPIPE errors on nfs sockets.
|
|
|
|
* These can be due to idle tcp mounts which will be closed by
|
|
|
|
* netapp, solaris, etc. if left idle too long.
|
|
|
|
*/
|
2004-03-17 18:10:38 +00:00
|
|
|
if (error != EPIPE) {
|
|
|
|
log(LOG_INFO, "nfs send error %d for server %s\n",
|
|
|
|
error,
|
|
|
|
rep->r_nmp->nm_mountp->mnt_stat.f_mntfromname);
|
|
|
|
}
|
2001-09-18 23:32:09 +00:00
|
|
|
/*
|
|
|
|
* Deal with errors for the client side.
|
|
|
|
*/
|
2004-07-06 09:12:03 +00:00
|
|
|
error2 = NFS_SIGREP(rep);
|
|
|
|
if (error2)
|
|
|
|
error = error2;
|
2001-09-18 23:32:09 +00:00
|
|
|
else
|
|
|
|
rep->r_flags |= R_MUSTRESEND;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
|
|
|
/*
|
1998-12-30 00:37:44 +00:00
|
|
|
* Handle any recoverable (soft) socket errors here. (?)
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
2004-07-06 09:12:03 +00:00
|
|
|
if (error != EINTR && error != ERESTART && error != EIO &&
|
1994-05-24 10:09:53 +00:00
|
|
|
error != EWOULDBLOCK && error != EPIPE)
|
|
|
|
error = 0;
|
|
|
|
}
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
2004-12-06 21:11:15 +00:00
|
|
|
int nfs_mrep_before_tsleep = 0;
|
|
|
|
|
|
|
|
int
|
|
|
|
nfs_reply(struct nfsreq *rep)
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2004-12-06 21:11:15 +00:00
|
|
|
register struct socket *so;
|
|
|
|
register struct mbuf *m;
|
|
|
|
int error, sotype, slpflag;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2004-06-16 03:12:50 +00:00
|
|
|
NET_ASSERT_GIANT();
|
2003-11-07 22:57:09 +00:00
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
sotype = rep->r_nmp->nm_sotype;
|
|
|
|
/*
|
|
|
|
* For reliable protocols, lock against other senders/receivers
|
|
|
|
* in case a reconnect is necessary.
|
|
|
|
*/
|
|
|
|
if (sotype != SOCK_DGRAM) {
|
1999-02-25 00:03:51 +00:00
|
|
|
error = nfs_sndlock(rep);
|
1994-10-02 17:27:07 +00:00
|
|
|
if (error)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
tryagain:
|
2004-12-06 21:11:15 +00:00
|
|
|
if (rep->r_mrep) {
|
1999-02-25 00:03:51 +00:00
|
|
|
nfs_sndunlock(rep);
|
2004-12-06 21:11:15 +00:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
if (rep->r_flags & R_SOFTTERM) {
|
|
|
|
nfs_sndunlock(rep);
|
|
|
|
return (EINTR);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
1994-10-02 17:27:07 +00:00
|
|
|
so = rep->r_nmp->nm_so;
|
2004-12-06 21:11:15 +00:00
|
|
|
mtx_lock(&rep->r_nmp->nm_nfstcpstate.mtx);
|
|
|
|
if (!so ||
|
|
|
|
(rep->r_nmp->nm_nfstcpstate.flags & NFS_TCP_FORCE_RECONNECT)) {
|
|
|
|
mtx_unlock(&rep->r_nmp->nm_nfstcpstate.mtx);
|
1995-05-30 08:16:23 +00:00
|
|
|
error = nfs_reconnect(rep);
|
1994-10-02 17:27:07 +00:00
|
|
|
if (error) {
|
1999-02-25 00:03:51 +00:00
|
|
|
nfs_sndunlock(rep);
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
goto tryagain;
|
2004-12-06 21:11:15 +00:00
|
|
|
} else
|
|
|
|
mtx_unlock(&rep->r_nmp->nm_nfstcpstate.mtx);
|
1994-05-24 10:09:53 +00:00
|
|
|
while (rep->r_flags & R_MUSTRESEND) {
|
2004-12-06 21:11:15 +00:00
|
|
|
m = m_copym(rep->r_mreq, 0, M_COPYALL, M_WAIT);
|
1994-05-24 10:09:53 +00:00
|
|
|
nfsstats.rpcretries++;
|
1994-10-02 17:27:07 +00:00
|
|
|
error = nfs_send(so, rep->r_nmp->nm_nam, m, rep);
|
|
|
|
if (error) {
|
1994-05-24 10:09:53 +00:00
|
|
|
if (error == EINTR || error == ERESTART ||
|
1998-05-31 17:27:58 +00:00
|
|
|
(error = nfs_reconnect(rep)) != 0) {
|
1999-02-25 00:03:51 +00:00
|
|
|
nfs_sndunlock(rep);
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
goto tryagain;
|
|
|
|
}
|
|
|
|
}
|
1999-02-25 00:03:51 +00:00
|
|
|
nfs_sndunlock(rep);
|
2004-12-06 21:11:15 +00:00
|
|
|
}
|
|
|
|
slpflag = 0;
|
|
|
|
if (rep->r_nmp->nm_flag & NFSMNT_INT)
|
|
|
|
slpflag = PCATCH;
|
|
|
|
mtx_lock(&nfs_reply_mtx);
|
|
|
|
if (rep->r_mrep != NULL) {
|
2004-07-06 09:12:03 +00:00
|
|
|
/*
|
2004-12-06 21:11:15 +00:00
|
|
|
* This is a very rare race, but it does occur. The reply
|
|
|
|
* could come in and the wakeup could happen before the
|
|
|
|
* process tsleeps(). Blocking here without checking for
|
|
|
|
* this results in a missed wakeup(), blocking this request
|
|
|
|
* forever. The 2 reasons why this could happen are a context
|
|
|
|
* switch in the stack after the request is sent out, or heavy
|
|
|
|
* interrupt activity pinning down the process within the window.
|
|
|
|
* (after the request is sent).
|
2004-07-06 09:12:03 +00:00
|
|
|
*/
|
2004-12-06 21:11:15 +00:00
|
|
|
mtx_unlock(&nfs_reply_mtx);
|
|
|
|
nfs_mrep_before_tsleep++;
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
error = msleep((caddr_t)rep, &nfs_reply_mtx,
|
|
|
|
slpflag | (PZERO - 1), "nfsreq", 0);
|
|
|
|
mtx_unlock(&nfs_reply_mtx);
|
|
|
|
if (error == EINTR || error == ERESTART)
|
|
|
|
/* NFS operations aren't restartable. Map ERESTART to EINTR */
|
|
|
|
return (EINTR);
|
|
|
|
if (rep->r_flags & R_SOFTTERM)
|
|
|
|
/* Request was terminated because we exceeded the retries (soft mount) */
|
|
|
|
return (ETIMEDOUT);
|
|
|
|
if (sotype == SOCK_STREAM) {
|
|
|
|
mtx_lock(&rep->r_nmp->nm_nfstcpstate.mtx);
|
|
|
|
if (((rep->r_nmp->nm_nfstcpstate.flags & NFS_TCP_FORCE_RECONNECT) ||
|
|
|
|
(rep->r_flags & R_MUSTRESEND))) {
|
|
|
|
mtx_unlock(&rep->r_nmp->nm_nfstcpstate.mtx);
|
2004-07-06 09:12:03 +00:00
|
|
|
error = nfs_sndlock(rep);
|
|
|
|
if (error)
|
|
|
|
return (error);
|
2004-12-06 21:11:15 +00:00
|
|
|
goto tryagain;
|
|
|
|
} else
|
|
|
|
mtx_unlock(&rep->r_nmp->nm_nfstcpstate.mtx);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2004-12-06 21:11:15 +00:00
|
|
|
* XXX TO DO
|
|
|
|
* Make nfs_realign() non-blocking. Also make nfsm_dissect() nonblocking.
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
2004-12-06 21:11:15 +00:00
|
|
|
static void
|
|
|
|
nfs_clnt_match_xid(struct socket *so,
|
|
|
|
struct nfsmount *nmp,
|
|
|
|
struct mbuf *mrep)
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2004-12-06 21:11:15 +00:00
|
|
|
struct mbuf *md;
|
2001-09-18 23:32:09 +00:00
|
|
|
caddr_t dpos;
|
2004-12-06 21:11:15 +00:00
|
|
|
u_int32_t rxid, *tl;
|
|
|
|
struct nfsreq *rep;
|
|
|
|
register int32_t t1;
|
1994-05-24 10:09:53 +00:00
|
|
|
int error;
|
2004-12-06 21:11:15 +00:00
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
2004-12-06 21:11:15 +00:00
|
|
|
* Search for any mbufs that are not a multiple of 4 bytes long
|
|
|
|
* or with m_data not longword aligned.
|
|
|
|
* These could cause pointer alignment problems, so copy them to
|
|
|
|
* well aligned mbufs.
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
2004-12-06 21:11:15 +00:00
|
|
|
if (nfs_realign(&mrep, 5 * NFSX_UNSIGNED) == ENOMEM) {
|
|
|
|
m_freem(mrep);
|
|
|
|
nfsstats.rpcinvalid++;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Get the xid and check that it is an rpc reply
|
|
|
|
*/
|
|
|
|
md = mrep;
|
|
|
|
dpos = mtod(md, caddr_t);
|
|
|
|
tl = nfsm_dissect_nonblock(u_int32_t *, 2*NFSX_UNSIGNED);
|
|
|
|
rxid = *tl++;
|
|
|
|
if (*tl != rpc_reply) {
|
|
|
|
m_freem(mrep);
|
|
|
|
nfsmout:
|
|
|
|
nfsstats.rpcinvalid++;
|
|
|
|
return;
|
|
|
|
}
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2004-12-06 21:11:15 +00:00
|
|
|
mtx_lock(&nfs_reqq_mtx);
|
|
|
|
/*
|
|
|
|
* Loop through the request list to match up the reply
|
|
|
|
* Iff no match, just drop the datagram
|
|
|
|
*/
|
|
|
|
TAILQ_FOREACH(rep, &nfs_reqq, r_chain) {
|
|
|
|
if (rep->r_mrep == NULL && rxid == rep->r_xid) {
|
|
|
|
/* Found it.. */
|
|
|
|
rep->r_mrep = mrep;
|
|
|
|
rep->r_md = md;
|
|
|
|
rep->r_dpos = dpos;
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
2004-12-06 21:11:15 +00:00
|
|
|
* Update congestion window.
|
|
|
|
* Do the additive increase of
|
|
|
|
* one rpc/rtt.
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
2004-12-06 21:11:15 +00:00
|
|
|
if (nmp->nm_cwnd <= nmp->nm_sent) {
|
|
|
|
nmp->nm_cwnd +=
|
|
|
|
(NFS_CWNDSCALE * NFS_CWNDSCALE +
|
|
|
|
(nmp->nm_cwnd >> 1)) / nmp->nm_cwnd;
|
|
|
|
if (nmp->nm_cwnd > NFS_MAXCWND)
|
|
|
|
nmp->nm_cwnd = NFS_MAXCWND;
|
|
|
|
}
|
|
|
|
if (rep->r_flags & R_SENT) {
|
|
|
|
rep->r_flags &= ~R_SENT;
|
|
|
|
nmp->nm_sent -= NFS_CWNDSCALE;
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
2004-12-06 21:11:15 +00:00
|
|
|
/*
|
|
|
|
* Update rtt using a gain of 0.125 on the mean
|
|
|
|
* and a gain of 0.25 on the deviation.
|
|
|
|
*/
|
|
|
|
if (rep->r_flags & R_TIMING) {
|
|
|
|
/*
|
|
|
|
* Since the timer resolution of
|
|
|
|
* NFS_HZ is so course, it can often
|
|
|
|
* result in r_rtt == 0. Since
|
|
|
|
* r_rtt == N means that the actual
|
|
|
|
* rtt is between N+dt and N+2-dt ticks,
|
|
|
|
* add 1.
|
|
|
|
*/
|
|
|
|
t1 = rep->r_rtt + 1;
|
|
|
|
t1 -= (NFS_SRTT(rep) >> 3);
|
|
|
|
NFS_SRTT(rep) += t1;
|
|
|
|
if (t1 < 0)
|
|
|
|
t1 = -t1;
|
|
|
|
t1 -= (NFS_SDRTT(rep) >> 2);
|
|
|
|
NFS_SDRTT(rep) += t1;
|
|
|
|
}
|
|
|
|
nmp->nm_timeouts = 0;
|
|
|
|
break;
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
2004-12-06 21:11:15 +00:00
|
|
|
}
|
|
|
|
/*
|
|
|
|
* If not matched to a request, drop it.
|
|
|
|
* If it's mine, wake up requestor.
|
|
|
|
*/
|
|
|
|
if (rep == 0) {
|
|
|
|
nfsstats.rpcunexpected++;
|
|
|
|
m_freem(mrep);
|
2004-12-07 03:39:52 +00:00
|
|
|
} else
|
|
|
|
wakeup_nfsreq(rep);
|
2004-12-06 21:11:15 +00:00
|
|
|
mtx_unlock(&nfs_reqq_mtx);
|
|
|
|
}
|
1995-05-30 08:16:23 +00:00
|
|
|
|
2004-12-07 03:39:52 +00:00
|
|
|
/*
|
|
|
|
* The wakeup of the requestor should be done under the mutex
|
|
|
|
* to avoid potential missed wakeups.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
wakeup_nfsreq(struct nfsreq *req)
|
|
|
|
{
|
|
|
|
mtx_lock(&nfs_reply_mtx);
|
|
|
|
wakeup((caddr_t)req);
|
|
|
|
mtx_unlock(&nfs_reply_mtx);
|
|
|
|
}
|
|
|
|
|
2004-12-06 21:11:15 +00:00
|
|
|
static void
|
|
|
|
nfs_mark_for_reconnect(struct nfsmount *nmp)
|
|
|
|
{
|
|
|
|
struct nfsreq *rp;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2004-12-06 21:11:15 +00:00
|
|
|
mtx_lock(&nmp->nm_nfstcpstate.mtx);
|
|
|
|
nmp->nm_nfstcpstate.flags |= NFS_TCP_FORCE_RECONNECT;
|
|
|
|
mtx_unlock(&nmp->nm_nfstcpstate.mtx);
|
|
|
|
/*
|
|
|
|
* Wakeup all processes that are waiting for replies
|
|
|
|
* on this mount point. One of them does the reconnect.
|
|
|
|
*/
|
|
|
|
mtx_lock(&nfs_reqq_mtx);
|
|
|
|
TAILQ_FOREACH(rp, &nfs_reqq, r_chain) {
|
|
|
|
if (rp->r_nmp == nmp)
|
2004-12-07 03:39:52 +00:00
|
|
|
wakeup_nfsreq(rp);
|
2004-12-06 21:11:15 +00:00
|
|
|
}
|
|
|
|
mtx_unlock(&nfs_reqq_mtx);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
nfstcp_readable(struct socket *so, int bytes)
|
|
|
|
{
|
|
|
|
int retval;
|
|
|
|
|
|
|
|
SOCKBUF_LOCK(&so->so_rcv);
|
|
|
|
retval = (so->so_rcv.sb_cc >= (bytes) ||
|
|
|
|
(so->so_state & SBS_CANTRCVMORE) ||
|
|
|
|
so->so_error);
|
|
|
|
SOCKBUF_UNLOCK(&so->so_rcv);
|
|
|
|
return (retval);
|
|
|
|
}
|
|
|
|
|
|
|
|
#define nfstcp_marker_readable(so) nfstcp_readable(so, sizeof(u_int32_t))
|
|
|
|
|
|
|
|
static void
|
|
|
|
nfs_clnt_tcp_soupcall(struct socket *so, void *arg, int waitflag)
|
|
|
|
{
|
|
|
|
struct nfsmount *nmp = (struct nfsmount *)arg;
|
|
|
|
struct mbuf *mp = NULL;
|
|
|
|
struct uio auio;
|
|
|
|
int error;
|
|
|
|
u_int32_t len;
|
|
|
|
int rcvflg;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Don't pick any more data from the socket if we've marked the
|
|
|
|
* mountpoint for reconnect.
|
|
|
|
*/
|
|
|
|
mtx_lock(&nmp->nm_nfstcpstate.mtx);
|
|
|
|
if (nmp->nm_nfstcpstate.flags & NFS_TCP_FORCE_RECONNECT) {
|
|
|
|
mtx_unlock(&nmp->nm_nfstcpstate.mtx);
|
|
|
|
return;
|
|
|
|
} else
|
|
|
|
mtx_unlock(&nmp->nm_nfstcpstate.mtx);
|
|
|
|
auio.uio_td = curthread;
|
|
|
|
auio.uio_segflg = UIO_SYSSPACE;
|
|
|
|
auio.uio_rw = UIO_READ;
|
|
|
|
for ( ; ; ) {
|
|
|
|
if (nmp->nm_nfstcpstate.flags & NFS_TCP_EXPECT_RPCMARKER) {
|
|
|
|
if (!nfstcp_marker_readable(so)) {
|
|
|
|
/* Marker is not readable */
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
auio.uio_resid = sizeof(u_int32_t);
|
|
|
|
auio.uio_iov = NULL;
|
|
|
|
auio.uio_iovcnt = 0;
|
|
|
|
mp = NULL;
|
|
|
|
rcvflg = (MSG_DONTWAIT | MSG_SOCALLBCK);
|
|
|
|
error = so->so_proto->pr_usrreqs->pru_soreceive
|
|
|
|
(so, (struct sockaddr **)0,
|
|
|
|
&auio, &mp, (struct mbuf **)0, &rcvflg);
|
|
|
|
/*
|
|
|
|
* We've already tested that the socket is readable. 2 cases
|
|
|
|
* here, we either read 0 bytes (client closed connection),
|
|
|
|
* or got some other error. In both cases, we tear down the
|
|
|
|
* connection.
|
|
|
|
*/
|
|
|
|
if (error || auio.uio_resid > 0) {
|
|
|
|
if (auio.uio_resid > 0) {
|
|
|
|
log(LOG_ERR,
|
|
|
|
"nfs/tcp clnt: Peer closed connection, tearing down TCP connection\n");
|
|
|
|
} else {
|
|
|
|
log(LOG_ERR,
|
|
|
|
"nfs/tcp clnt: Error %d reading socket, tearing down TCP connection\n",
|
|
|
|
error);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
2004-12-06 21:11:15 +00:00
|
|
|
goto mark_reconnect;
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
2004-12-06 21:11:15 +00:00
|
|
|
if (mp == NULL)
|
|
|
|
panic("nfs_clnt_tcp_soupcall: Got empty mbuf chain from sorecv\n");
|
|
|
|
len = ntohl(*mtod(mp, u_int32_t *)) & ~0x80000000;
|
|
|
|
m_freem(mp);
|
|
|
|
/*
|
|
|
|
* This is SERIOUS! We are out of sync with the sender
|
|
|
|
* and forcing a disconnect/reconnect is all I can do.
|
|
|
|
*/
|
2005-01-05 23:21:13 +00:00
|
|
|
if (len > NFS_MAXPACKET || len == 0) {
|
2004-12-06 21:11:15 +00:00
|
|
|
log(LOG_ERR, "%s (%d) from nfs server %s\n",
|
|
|
|
"impossible packet length",
|
|
|
|
len,
|
|
|
|
nmp->nm_mountp->mnt_stat.f_mntfromname);
|
|
|
|
goto mark_reconnect;
|
|
|
|
}
|
|
|
|
nmp->nm_nfstcpstate.rpcresid = len;
|
|
|
|
nmp->nm_nfstcpstate.flags &= ~(NFS_TCP_EXPECT_RPCMARKER);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
2004-12-06 21:11:15 +00:00
|
|
|
/*
|
|
|
|
* Processed RPC marker or no RPC marker to process.
|
|
|
|
* Pull in and process data.
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
2004-12-06 21:11:15 +00:00
|
|
|
if (nmp->nm_nfstcpstate.rpcresid > 0) {
|
|
|
|
if (!nfstcp_readable(so, nmp->nm_nfstcpstate.rpcresid)) {
|
|
|
|
/* All data not readable */
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
auio.uio_resid = nmp->nm_nfstcpstate.rpcresid;
|
|
|
|
auio.uio_iov = NULL;
|
|
|
|
auio.uio_iovcnt = 0;
|
|
|
|
mp = NULL;
|
|
|
|
rcvflg = (MSG_DONTWAIT | MSG_SOCALLBCK);
|
|
|
|
error = so->so_proto->pr_usrreqs->pru_soreceive
|
|
|
|
(so, (struct sockaddr **)0,
|
|
|
|
&auio, &mp, (struct mbuf **)0, &rcvflg);
|
|
|
|
if (error || auio.uio_resid > 0) {
|
|
|
|
if (auio.uio_resid > 0) {
|
|
|
|
log(LOG_ERR,
|
|
|
|
"nfs/tcp clnt: Peer closed connection, tearing down TCP connection\n");
|
|
|
|
} else {
|
|
|
|
log(LOG_ERR,
|
|
|
|
"nfs/tcp clnt: Error %d reading socket, tearing down TCP connection\n",
|
|
|
|
error);
|
|
|
|
}
|
|
|
|
goto mark_reconnect;
|
|
|
|
}
|
|
|
|
if (mp == NULL)
|
|
|
|
panic("nfs_clnt_tcp_soupcall: Got empty mbuf chain from sorecv\n");
|
|
|
|
nmp->nm_nfstcpstate.rpcresid = 0;
|
|
|
|
nmp->nm_nfstcpstate.flags |= NFS_TCP_EXPECT_RPCMARKER;
|
|
|
|
/* We got the entire RPC reply. Match XIDs and wake up requestor */
|
|
|
|
nfs_clnt_match_xid(so, nmp, mp);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
}
|
2004-12-06 21:11:15 +00:00
|
|
|
|
|
|
|
mark_reconnect:
|
|
|
|
nfs_mark_for_reconnect(nmp);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
nfs_clnt_udp_soupcall(struct socket *so, void *arg, int waitflag)
|
|
|
|
{
|
|
|
|
struct nfsmount *nmp = (struct nfsmount *)arg;
|
|
|
|
struct uio auio;
|
|
|
|
struct mbuf *mp = NULL;
|
|
|
|
struct mbuf *control = NULL;
|
|
|
|
int error, rcvflag;
|
|
|
|
|
|
|
|
auio.uio_resid = 1000000;
|
|
|
|
auio.uio_td = curthread;
|
|
|
|
rcvflag = MSG_DONTWAIT;
|
|
|
|
auio.uio_resid = 1000000000;
|
|
|
|
do {
|
|
|
|
mp = control = NULL;
|
|
|
|
error = so->so_proto->pr_usrreqs->pru_soreceive(so,
|
|
|
|
NULL, &auio, &mp,
|
|
|
|
&control, &rcvflag);
|
|
|
|
if (control)
|
|
|
|
m_freem(control);
|
|
|
|
if (mp)
|
|
|
|
nfs_clnt_match_xid(so, nmp, mp);
|
|
|
|
} while (mp && !error);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* nfs_request - goes something like this
|
|
|
|
* - fill in request struct
|
|
|
|
* - links it into list
|
|
|
|
* - calls nfs_send() for first transmit
|
|
|
|
* - calls nfs_receive() to get reply
|
|
|
|
* - break down rpc header and return with nfs reply pointed to
|
|
|
|
* by mrep or error
|
|
|
|
* nb: always frees up mreq mbuf list
|
|
|
|
*/
|
2001-09-18 23:32:09 +00:00
|
|
|
/* XXX overloaded before */
|
|
|
|
#define NQ_TRYLATERDEL 15 /* Initial try later delay (sec) */
|
|
|
|
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
2001-09-18 23:32:09 +00:00
|
|
|
nfs_request(struct vnode *vp, struct mbuf *mrest, int procnum,
|
|
|
|
struct thread *td, struct ucred *cred, struct mbuf **mrp,
|
|
|
|
struct mbuf **mdp, caddr_t *dposp)
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2001-09-18 23:32:09 +00:00
|
|
|
struct mbuf *mrep, *m2;
|
|
|
|
struct nfsreq *rep;
|
|
|
|
u_int32_t *tl;
|
|
|
|
int i;
|
1994-05-24 10:09:53 +00:00
|
|
|
struct nfsmount *nmp;
|
1999-12-19 01:55:37 +00:00
|
|
|
struct mbuf *m, *md, *mheadend;
|
2001-09-18 23:32:09 +00:00
|
|
|
time_t waituntil;
|
|
|
|
caddr_t dpos;
|
|
|
|
int s, error = 0, mrest_len, auth_len, auth_type;
|
|
|
|
int trylater_delay = NQ_TRYLATERDEL, trylater_cnt = 0;
|
2004-07-06 09:12:03 +00:00
|
|
|
struct timeval now;
|
1998-05-31 20:09:01 +00:00
|
|
|
u_int32_t xid;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2002-04-17 01:07:29 +00:00
|
|
|
/* Reject requests while attempting a forced unmount. */
|
|
|
|
if (vp->v_mount->mnt_kern_flag & MNTK_UNMOUNTF) {
|
2002-01-02 00:41:26 +00:00
|
|
|
m_freem(mrest);
|
|
|
|
return (ESTALE);
|
|
|
|
}
|
1994-05-24 10:09:53 +00:00
|
|
|
nmp = VFSTONFS(vp->v_mount);
|
2003-11-14 20:54:10 +00:00
|
|
|
if ((nmp->nm_flag & NFSMNT_NFSV4) != 0)
|
|
|
|
return nfs4_request(vp, mrest, procnum, td, cred, mrp, mdp, dposp);
|
2003-02-19 05:47:46 +00:00
|
|
|
MALLOC(rep, struct nfsreq *, sizeof(struct nfsreq), M_NFSREQ, M_WAITOK);
|
2004-12-06 21:11:15 +00:00
|
|
|
rep->r_mrep = rep->r_md = NULL;
|
1994-05-24 10:09:53 +00:00
|
|
|
rep->r_nmp = nmp;
|
|
|
|
rep->r_vp = vp;
|
2001-09-12 08:38:13 +00:00
|
|
|
rep->r_td = td;
|
1994-05-24 10:09:53 +00:00
|
|
|
rep->r_procnum = procnum;
|
2004-07-06 09:12:03 +00:00
|
|
|
|
|
|
|
getmicrouptime(&now);
|
|
|
|
rep->r_lastmsg = now.tv_sec -
|
|
|
|
((nmp->nm_tprintf_delay) - (nmp->nm_tprintf_initial_delay));
|
2002-09-18 19:44:14 +00:00
|
|
|
mrest_len = m_length(mrest, NULL);
|
1994-05-24 10:09:53 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Get the RPC header with authorization.
|
|
|
|
*/
|
2001-09-18 23:32:09 +00:00
|
|
|
auth_type = RPCAUTH_UNIX;
|
|
|
|
if (cred->cr_ngroups < 1)
|
|
|
|
panic("nfsreq nogrps");
|
|
|
|
auth_len = ((((cred->cr_ngroups - 1) > nmp->nm_numgrps) ?
|
|
|
|
nmp->nm_numgrps : (cred->cr_ngroups - 1)) << 2) +
|
|
|
|
5 * NFSX_UNSIGNED;
|
1995-06-27 11:07:30 +00:00
|
|
|
m = nfsm_rpchead(cred, nmp->nm_flag, procnum, auth_type, auth_len,
|
2001-09-18 23:32:09 +00:00
|
|
|
mrest, mrest_len, &mheadend, &xid);
|
1994-05-24 10:09:53 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* For stream protocols, insert a Sun RPC Record Mark.
|
|
|
|
*/
|
|
|
|
if (nmp->nm_sotype == SOCK_STREAM) {
|
2003-02-19 05:47:46 +00:00
|
|
|
M_PREPEND(m, NFSX_UNSIGNED, M_TRYWAIT);
|
1998-05-31 20:09:01 +00:00
|
|
|
*mtod(m, u_int32_t *) = htonl(0x80000000 |
|
1994-05-24 10:09:53 +00:00
|
|
|
(m->m_pkthdr.len - NFSX_UNSIGNED));
|
|
|
|
}
|
|
|
|
rep->r_mreq = m;
|
|
|
|
rep->r_xid = xid;
|
|
|
|
tryagain:
|
|
|
|
if (nmp->nm_flag & NFSMNT_SOFT)
|
|
|
|
rep->r_retry = nmp->nm_retry;
|
|
|
|
else
|
|
|
|
rep->r_retry = NFS_MAXREXMIT + 1; /* past clip limit */
|
|
|
|
rep->r_rtt = rep->r_rexmit = 0;
|
|
|
|
if (proct[procnum] > 0)
|
|
|
|
rep->r_flags = R_TIMING;
|
|
|
|
else
|
|
|
|
rep->r_flags = 0;
|
|
|
|
rep->r_mrep = NULL;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Do the client side RPC.
|
|
|
|
*/
|
|
|
|
nfsstats.rpcrequests++;
|
|
|
|
/*
|
|
|
|
* Chain request into list of outstanding requests. Be sure
|
|
|
|
* to put it LAST so timer finds oldest requests first.
|
|
|
|
*/
|
|
|
|
s = splsoftclock();
|
2004-12-06 21:11:15 +00:00
|
|
|
mtx_lock(&nfs_reqq_mtx);
|
2004-03-25 21:48:09 +00:00
|
|
|
if (TAILQ_EMPTY(&nfs_reqq))
|
|
|
|
callout_reset(&nfs_callout, nfs_ticks, nfs_timer, NULL);
|
1994-10-17 17:47:45 +00:00
|
|
|
TAILQ_INSERT_TAIL(&nfs_reqq, rep, r_chain);
|
2004-12-06 21:11:15 +00:00
|
|
|
mtx_unlock(&nfs_reqq_mtx);
|
1994-05-24 10:09:53 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If backing off another request or avoiding congestion, don't
|
|
|
|
* send this one now but let timer do it. If not timing a request,
|
|
|
|
* do it now.
|
|
|
|
*/
|
|
|
|
if (nmp->nm_so && (nmp->nm_sotype != SOCK_DGRAM ||
|
|
|
|
(nmp->nm_flag & NFSMNT_DUMBTIMR) ||
|
|
|
|
nmp->nm_sent < nmp->nm_cwnd)) {
|
|
|
|
splx(s);
|
2004-07-06 09:12:03 +00:00
|
|
|
error = nfs_sndlock(rep);
|
1994-05-24 10:09:53 +00:00
|
|
|
if (!error) {
|
2003-02-19 05:47:46 +00:00
|
|
|
m2 = m_copym(m, 0, M_COPYALL, M_TRYWAIT);
|
1999-06-05 05:35:03 +00:00
|
|
|
error = nfs_send(nmp->nm_so, nmp->nm_nam, m2, rep);
|
2004-07-06 09:12:03 +00:00
|
|
|
nfs_sndunlock(rep);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
if (!error && (rep->r_flags & R_MUSTRESEND) == 0) {
|
|
|
|
nmp->nm_sent += NFS_CWNDSCALE;
|
|
|
|
rep->r_flags |= R_SENT;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
splx(s);
|
|
|
|
rep->r_rtt = -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Wait for the reply from our send or the timer's.
|
|
|
|
*/
|
|
|
|
if (!error || error == EPIPE)
|
|
|
|
error = nfs_reply(rep);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* RPC done, unlink the request.
|
|
|
|
*/
|
|
|
|
s = splsoftclock();
|
2004-12-06 21:11:15 +00:00
|
|
|
mtx_lock(&nfs_reqq_mtx);
|
1994-10-17 17:47:45 +00:00
|
|
|
TAILQ_REMOVE(&nfs_reqq, rep, r_chain);
|
2004-03-25 21:48:09 +00:00
|
|
|
if (TAILQ_EMPTY(&nfs_reqq))
|
|
|
|
callout_stop(&nfs_callout);
|
2004-12-06 21:11:15 +00:00
|
|
|
mtx_unlock(&nfs_reqq_mtx);
|
1994-05-24 10:09:53 +00:00
|
|
|
splx(s);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Decrement the outstanding request count.
|
|
|
|
*/
|
|
|
|
if (rep->r_flags & R_SENT) {
|
|
|
|
rep->r_flags &= ~R_SENT; /* paranoia */
|
|
|
|
nmp->nm_sent -= NFS_CWNDSCALE;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If there was a successful reply and a tprintf msg.
|
|
|
|
* tprintf a response.
|
|
|
|
*/
|
2004-07-06 09:12:03 +00:00
|
|
|
if (!error)
|
|
|
|
nfs_up(rep, nmp, rep->r_td, "is alive again", NFSSTA_TIMEO);
|
1994-05-24 10:09:53 +00:00
|
|
|
mrep = rep->r_mrep;
|
|
|
|
md = rep->r_md;
|
|
|
|
dpos = rep->r_dpos;
|
|
|
|
if (error) {
|
2004-12-06 21:11:15 +00:00
|
|
|
/*
|
|
|
|
* If we got interrupted by a signal in nfs_reply(), there's
|
|
|
|
* a very small window where the reply could've come in before
|
|
|
|
* this process got scheduled in. To handle that case, we need
|
|
|
|
* to free the reply if it was delivered.
|
|
|
|
*/
|
|
|
|
if (rep->r_mrep != NULL)
|
|
|
|
m_freem(rep->r_mrep);
|
1994-05-24 10:09:53 +00:00
|
|
|
m_freem(rep->r_mreq);
|
|
|
|
free((caddr_t)rep, M_NFSREQ);
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
2004-12-06 21:11:15 +00:00
|
|
|
if (rep->r_mrep == NULL)
|
|
|
|
panic("nfs_request: rep->r_mrep shouldn't be NULL if no error\n");
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* break down the rpc header and check if ok
|
|
|
|
*/
|
2001-09-27 22:40:38 +00:00
|
|
|
tl = nfsm_dissect(u_int32_t *, 3 * NFSX_UNSIGNED);
|
1994-05-24 10:09:53 +00:00
|
|
|
if (*tl++ == rpc_msgdenied) {
|
|
|
|
if (*tl == rpc_mismatch)
|
|
|
|
error = EOPNOTSUPP;
|
2001-09-18 23:32:09 +00:00
|
|
|
else
|
1994-05-24 10:09:53 +00:00
|
|
|
error = EACCES;
|
|
|
|
m_freem(mrep);
|
|
|
|
m_freem(rep->r_mreq);
|
|
|
|
free((caddr_t)rep, M_NFSREQ);
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2001-09-18 23:32:09 +00:00
|
|
|
* Just throw away any verifyer (ie: kerberos etc).
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
2001-09-18 23:32:09 +00:00
|
|
|
i = fxdr_unsigned(int, *tl++); /* verf type */
|
|
|
|
i = fxdr_unsigned(int32_t, *tl); /* len */
|
|
|
|
if (i > 0)
|
1995-06-27 11:07:30 +00:00
|
|
|
nfsm_adv(nfsm_rndup(i));
|
2001-09-27 22:40:38 +00:00
|
|
|
tl = nfsm_dissect(u_int32_t *, NFSX_UNSIGNED);
|
1994-05-24 10:09:53 +00:00
|
|
|
/* 0 == ok */
|
|
|
|
if (*tl == 0) {
|
2001-09-27 22:40:38 +00:00
|
|
|
tl = nfsm_dissect(u_int32_t *, NFSX_UNSIGNED);
|
1994-05-24 10:09:53 +00:00
|
|
|
if (*tl != 0) {
|
|
|
|
error = fxdr_unsigned(int, *tl);
|
1995-06-27 11:07:30 +00:00
|
|
|
if ((nmp->nm_flag & NFSMNT_NFSV3) &&
|
|
|
|
error == NFSERR_TRYLATER) {
|
|
|
|
m_freem(mrep);
|
1994-05-24 10:09:53 +00:00
|
|
|
error = 0;
|
1998-03-30 09:56:58 +00:00
|
|
|
waituntil = time_second + trylater_delay;
|
|
|
|
while (time_second < waituntil)
|
2003-03-02 16:54:40 +00:00
|
|
|
(void) tsleep(&lbolt,
|
1994-05-24 10:09:53 +00:00
|
|
|
PSOCK, "nqnfstry", 0);
|
|
|
|
trylater_delay *= nfs_backoff[trylater_cnt];
|
2001-12-30 18:41:52 +00:00
|
|
|
if (trylater_cnt < NFS_NBACKOFF - 1)
|
1994-05-24 10:09:53 +00:00
|
|
|
trylater_cnt++;
|
|
|
|
goto tryagain;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If the File Handle was stale, invalidate the
|
|
|
|
* lookup cache, just in case.
|
|
|
|
*/
|
|
|
|
if (error == ESTALE)
|
|
|
|
cache_purge(vp);
|
1995-06-27 11:07:30 +00:00
|
|
|
if (nmp->nm_flag & NFSMNT_NFSV3) {
|
|
|
|
*mrp = mrep;
|
|
|
|
*mdp = md;
|
|
|
|
*dposp = dpos;
|
|
|
|
error |= NFSERR_RETERR;
|
|
|
|
} else
|
|
|
|
m_freem(mrep);
|
1994-05-24 10:09:53 +00:00
|
|
|
m_freem(rep->r_mreq);
|
|
|
|
free((caddr_t)rep, M_NFSREQ);
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
*mrp = mrep;
|
|
|
|
*mdp = md;
|
|
|
|
*dposp = dpos;
|
|
|
|
m_freem(rep->r_mreq);
|
|
|
|
FREE((caddr_t)rep, M_NFSREQ);
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
m_freem(mrep);
|
|
|
|
error = EPROTONOSUPPORT;
|
|
|
|
nfsmout:
|
1995-06-27 11:07:30 +00:00
|
|
|
m_freem(rep->r_mreq);
|
|
|
|
free((caddr_t)rep, M_NFSREQ);
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Nfs timer routine
|
|
|
|
* Scan the nfsreq list and retranmit any requests that have timed out
|
|
|
|
* To avoid retransmission attempts on STREAM sockets (in the future) make
|
|
|
|
* sure to set the r_retry field to 0 (implies nm_retry == 0).
|
2004-12-06 21:11:15 +00:00
|
|
|
*
|
|
|
|
* XXX -
|
|
|
|
* For now, since we don't register MPSAFE callouts for the NFS client -
|
|
|
|
* softclock() acquires Giant before calling us. That prevents req entries
|
|
|
|
* from being removed from the list (from nfs_request()). But we still
|
|
|
|
* acquire the nfs reqq mutex to make sure the state of individual req
|
|
|
|
* entries is not modified from RPC reply handling (from socket callback)
|
|
|
|
* while nfs_timer is walking the list of reqs.
|
|
|
|
* The nfs reqq lock cannot be held while we do the pru_send() because of a
|
|
|
|
* lock ordering violation. The NFS client socket callback acquires
|
|
|
|
* inp_lock->nfsreq mutex and pru_send acquires inp_lock. So we drop the
|
|
|
|
* reqq mutex (and reacquire it after the pru_send()). This won't work
|
|
|
|
* when we move to fine grained locking for NFS. When we get to that point,
|
|
|
|
* a rewrite of nfs_timer() will be needed.
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
|
|
|
void
|
2001-09-18 23:32:09 +00:00
|
|
|
nfs_timer(void *arg)
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2001-09-18 23:32:09 +00:00
|
|
|
struct nfsreq *rep;
|
|
|
|
struct mbuf *m;
|
|
|
|
struct socket *so;
|
|
|
|
struct nfsmount *nmp;
|
|
|
|
int timeo;
|
1994-05-24 10:09:53 +00:00
|
|
|
int s, error;
|
2004-07-06 09:12:03 +00:00
|
|
|
struct timeval now;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2004-07-06 09:12:03 +00:00
|
|
|
getmicrouptime(&now);
|
1994-05-24 10:09:53 +00:00
|
|
|
s = splnet();
|
2004-12-06 21:11:15 +00:00
|
|
|
mtx_lock(&nfs_reqq_mtx);
|
2001-09-18 23:32:09 +00:00
|
|
|
TAILQ_FOREACH(rep, &nfs_reqq, r_chain) {
|
1994-05-24 10:09:53 +00:00
|
|
|
nmp = rep->r_nmp;
|
|
|
|
if (rep->r_mrep || (rep->r_flags & R_SOFTTERM))
|
|
|
|
continue;
|
2004-07-06 09:12:03 +00:00
|
|
|
if (nfs_sigintr(nmp, rep, rep->r_td))
|
1994-05-24 10:09:53 +00:00
|
|
|
continue;
|
2004-07-06 09:12:03 +00:00
|
|
|
if (nmp->nm_tprintf_initial_delay != 0 &&
|
|
|
|
(rep->r_rexmit > 2 || (rep->r_flags & R_RESENDERR)) &&
|
|
|
|
rep->r_lastmsg + nmp->nm_tprintf_delay < now.tv_sec) {
|
|
|
|
rep->r_lastmsg = now.tv_sec;
|
|
|
|
nfs_down(rep, nmp, rep->r_td, "not responding",
|
|
|
|
0, NFSSTA_TIMEO);
|
|
|
|
#if 0
|
|
|
|
if (!(nmp->nm_state & NFSSTA_MOUNTED)) {
|
|
|
|
/* we're not yet completely mounted and */
|
|
|
|
/* we can't complete an RPC, so we fail */
|
|
|
|
nfsstats.rpctimeouts++;
|
|
|
|
nfs_softterm(rep);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
#endif
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
if (rep->r_rtt >= 0) {
|
|
|
|
rep->r_rtt++;
|
|
|
|
if (nmp->nm_flag & NFSMNT_DUMBTIMR)
|
|
|
|
timeo = nmp->nm_timeo;
|
|
|
|
else
|
|
|
|
timeo = NFS_RTO(nmp, proct[rep->r_procnum]);
|
|
|
|
if (nmp->nm_timeouts > 0)
|
|
|
|
timeo *= nfs_backoff[nmp->nm_timeouts - 1];
|
|
|
|
if (rep->r_rtt <= timeo)
|
|
|
|
continue;
|
2001-12-30 18:41:52 +00:00
|
|
|
if (nmp->nm_timeouts < NFS_NBACKOFF)
|
1994-05-24 10:09:53 +00:00
|
|
|
nmp->nm_timeouts++;
|
|
|
|
}
|
|
|
|
if (rep->r_rexmit >= rep->r_retry) { /* too many */
|
|
|
|
nfsstats.rpctimeouts++;
|
1999-12-13 04:24:55 +00:00
|
|
|
nfs_softterm(rep);
|
1994-05-24 10:09:53 +00:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (nmp->nm_sotype != SOCK_DGRAM) {
|
|
|
|
if (++rep->r_rexmit > NFS_MAXREXMIT)
|
|
|
|
rep->r_rexmit = NFS_MAXREXMIT;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if ((so = nmp->nm_so) == NULL)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If there is enough space and the window allows..
|
|
|
|
* Resend it
|
|
|
|
* Set r_rtt to -1 in case we fail to send it now.
|
|
|
|
*/
|
|
|
|
rep->r_rtt = -1;
|
|
|
|
if (sbspace(&so->so_snd) >= rep->r_mreq->m_pkthdr.len &&
|
|
|
|
((nmp->nm_flag & NFSMNT_DUMBTIMR) ||
|
|
|
|
(rep->r_flags & R_SENT) ||
|
|
|
|
nmp->nm_sent < nmp->nm_cwnd) &&
|
2003-02-19 05:47:46 +00:00
|
|
|
(m = m_copym(rep->r_mreq, 0, M_COPYALL, M_DONTWAIT))){
|
2004-12-06 21:11:15 +00:00
|
|
|
mtx_unlock(&nfs_reqq_mtx);
|
1994-05-24 10:09:53 +00:00
|
|
|
if ((nmp->nm_flag & NFSMNT_NOCONN) == 0)
|
1996-07-11 16:32:50 +00:00
|
|
|
error = (*so->so_proto->pr_usrreqs->pru_send)
|
2004-08-25 01:23:38 +00:00
|
|
|
(so, 0, m, NULL, NULL, curthread);
|
1994-05-24 10:09:53 +00:00
|
|
|
else
|
1996-07-11 16:32:50 +00:00
|
|
|
error = (*so->so_proto->pr_usrreqs->pru_send)
|
2004-08-25 01:23:38 +00:00
|
|
|
(so, 0, m, nmp->nm_nam, NULL, curthread);
|
2004-12-06 21:11:15 +00:00
|
|
|
mtx_lock(&nfs_reqq_mtx);
|
1994-05-24 10:09:53 +00:00
|
|
|
if (error) {
|
|
|
|
if (NFSIGNORE_SOERROR(nmp->nm_soflags, error))
|
|
|
|
so->so_error = 0;
|
2004-07-06 09:12:03 +00:00
|
|
|
rep->r_flags |= R_RESENDERR;
|
1994-05-24 10:09:53 +00:00
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* Iff first send, start timing
|
|
|
|
* else turn timing off, backoff timer
|
|
|
|
* and divide congestion window by 2.
|
|
|
|
*/
|
2004-07-06 09:12:03 +00:00
|
|
|
rep->r_flags &= ~R_RESENDERR;
|
1994-05-24 10:09:53 +00:00
|
|
|
if (rep->r_flags & R_SENT) {
|
|
|
|
rep->r_flags &= ~R_TIMING;
|
|
|
|
if (++rep->r_rexmit > NFS_MAXREXMIT)
|
|
|
|
rep->r_rexmit = NFS_MAXREXMIT;
|
|
|
|
nmp->nm_cwnd >>= 1;
|
|
|
|
if (nmp->nm_cwnd < NFS_CWNDSCALE)
|
|
|
|
nmp->nm_cwnd = NFS_CWNDSCALE;
|
|
|
|
nfsstats.rpcretries++;
|
|
|
|
} else {
|
|
|
|
rep->r_flags |= R_SENT;
|
|
|
|
nmp->nm_sent += NFS_CWNDSCALE;
|
|
|
|
}
|
|
|
|
rep->r_rtt = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2004-12-06 21:11:15 +00:00
|
|
|
mtx_unlock(&nfs_reqq_mtx);
|
1994-05-24 10:09:53 +00:00
|
|
|
splx(s);
|
2004-03-28 05:55:27 +00:00
|
|
|
callout_reset(&nfs_callout, nfs_ticks, nfs_timer, NULL);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
2002-01-02 00:41:26 +00:00
|
|
|
/*
|
|
|
|
* Mark all of an nfs mount's outstanding requests with R_SOFTTERM and
|
|
|
|
* wait for all requests to complete. This is used by forced unmounts
|
|
|
|
* to terminate any outstanding RPCs.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
nfs_nmcancelreqs(nmp)
|
|
|
|
struct nfsmount *nmp;
|
|
|
|
{
|
|
|
|
struct nfsreq *req;
|
|
|
|
int i, s;
|
|
|
|
|
|
|
|
s = splnet();
|
2004-12-06 21:11:15 +00:00
|
|
|
mtx_lock(&nfs_reqq_mtx);
|
2002-01-02 00:41:26 +00:00
|
|
|
TAILQ_FOREACH(req, &nfs_reqq, r_chain) {
|
|
|
|
if (nmp != req->r_nmp || req->r_mrep != NULL ||
|
|
|
|
(req->r_flags & R_SOFTTERM))
|
|
|
|
continue;
|
|
|
|
nfs_softterm(req);
|
|
|
|
}
|
2004-12-06 21:11:15 +00:00
|
|
|
mtx_unlock(&nfs_reqq_mtx);
|
2002-01-02 00:41:26 +00:00
|
|
|
splx(s);
|
|
|
|
|
|
|
|
for (i = 0; i < 30; i++) {
|
|
|
|
s = splnet();
|
2004-12-06 21:11:15 +00:00
|
|
|
mtx_lock(&nfs_reqq_mtx);
|
2002-01-02 00:41:26 +00:00
|
|
|
TAILQ_FOREACH(req, &nfs_reqq, r_chain) {
|
|
|
|
if (nmp == req->r_nmp)
|
|
|
|
break;
|
|
|
|
}
|
2004-12-06 21:11:15 +00:00
|
|
|
mtx_unlock(&nfs_reqq_mtx);
|
2002-01-02 00:41:26 +00:00
|
|
|
splx(s);
|
|
|
|
if (req == NULL)
|
|
|
|
return (0);
|
2002-01-10 02:15:35 +00:00
|
|
|
tsleep(&lbolt, PSOCK, "nfscancel", 0);
|
2002-01-02 00:41:26 +00:00
|
|
|
}
|
|
|
|
return (EBUSY);
|
|
|
|
}
|
|
|
|
|
1999-12-13 04:24:55 +00:00
|
|
|
/*
|
|
|
|
* Flag a request as being about to terminate (due to NFSMNT_INT/NFSMNT_SOFT).
|
|
|
|
* The nm_send count is decremented now to avoid deadlocks when the process in
|
|
|
|
* soreceive() hasn't yet managed to send its own request.
|
|
|
|
*/
|
|
|
|
|
|
|
|
static void
|
2001-09-18 23:32:09 +00:00
|
|
|
nfs_softterm(struct nfsreq *rep)
|
1999-12-13 04:24:55 +00:00
|
|
|
{
|
|
|
|
|
2001-09-18 23:32:09 +00:00
|
|
|
rep->r_flags |= R_SOFTTERM;
|
1999-12-13 04:24:55 +00:00
|
|
|
if (rep->r_flags & R_SENT) {
|
|
|
|
rep->r_nmp->nm_sent -= NFS_CWNDSCALE;
|
|
|
|
rep->r_flags &= ~R_SENT;
|
|
|
|
}
|
2004-12-06 21:11:15 +00:00
|
|
|
/*
|
|
|
|
* Request terminated, wakeup the blocked process, so that we
|
|
|
|
* can return EINTR back.
|
|
|
|
*/
|
2004-12-07 03:39:52 +00:00
|
|
|
wakeup_nfsreq(rep);
|
2004-12-06 21:11:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Any signal that can interrupt an NFS operation in an intr mount
|
|
|
|
* should be added to this set.
|
|
|
|
*/
|
|
|
|
int nfs_sig_set[] = {
|
|
|
|
SIGINT,
|
|
|
|
SIGTERM,
|
|
|
|
SIGHUP,
|
|
|
|
SIGKILL,
|
|
|
|
SIGQUIT
|
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Check to see if one of the signals in our subset is pending on
|
|
|
|
* the process (in an intr mount).
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
nfs_sig_pending(sigset_t set)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0 ; i < sizeof(nfs_sig_set)/sizeof(int) ; i++)
|
|
|
|
if (SIGISMEMBER(set, nfs_sig_set[i]))
|
|
|
|
return (1);
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The set/restore sigmask functions are used to (temporarily) overwrite
|
|
|
|
* the process p_sigmask during an RPC call (for example). These are also
|
|
|
|
* used in other places in the NFS client that might tsleep().
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
nfs_set_sigmask(struct thread *td, sigset_t *oldset)
|
|
|
|
{
|
|
|
|
sigset_t newset;
|
|
|
|
int i;
|
|
|
|
struct proc *p;
|
|
|
|
|
|
|
|
SIGFILLSET(newset);
|
|
|
|
if (td == NULL)
|
|
|
|
td = curthread; /* XXX */
|
|
|
|
p = td->td_proc;
|
|
|
|
/* Remove the NFS set of signals from newset */
|
|
|
|
PROC_LOCK(p);
|
|
|
|
mtx_lock(&p->p_sigacts->ps_mtx);
|
|
|
|
for (i = 0 ; i < sizeof(nfs_sig_set)/sizeof(int) ; i++) {
|
|
|
|
/*
|
|
|
|
* But make sure we leave the ones already masked
|
|
|
|
* by the process, ie. remove the signal from the
|
|
|
|
* temporary signalmask only if it wasn't already
|
|
|
|
* in p_sigmask.
|
|
|
|
*/
|
|
|
|
if (!SIGISMEMBER(td->td_sigmask, nfs_sig_set[i]) &&
|
|
|
|
!SIGISMEMBER(p->p_sigacts->ps_sigignore, nfs_sig_set[i]))
|
|
|
|
SIGDELSET(newset, nfs_sig_set[i]);
|
|
|
|
}
|
|
|
|
mtx_unlock(&p->p_sigacts->ps_mtx);
|
|
|
|
PROC_UNLOCK(p);
|
|
|
|
kern_sigprocmask(td, SIG_SETMASK, &newset, oldset, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
nfs_restore_sigmask(struct thread *td, sigset_t *set)
|
|
|
|
{
|
|
|
|
if (td == NULL)
|
|
|
|
td = curthread; /* XXX */
|
|
|
|
kern_sigprocmask(td, SIG_SETMASK, set, NULL, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* NFS wrapper to msleep(), that shoves a new p_sigmask and restores the
|
|
|
|
* old one after msleep() returns.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
nfs_msleep(struct thread *td, void *ident, struct mtx *mtx, int priority, char *wmesg, int timo)
|
|
|
|
{
|
|
|
|
sigset_t oldset;
|
|
|
|
int error;
|
|
|
|
struct proc *p;
|
|
|
|
|
|
|
|
if ((priority & PCATCH) == 0)
|
|
|
|
return msleep(ident, mtx, priority, wmesg, timo);
|
|
|
|
if (td == NULL)
|
|
|
|
td = curthread; /* XXX */
|
|
|
|
nfs_set_sigmask(td, &oldset);
|
|
|
|
error = msleep(ident, mtx, priority, wmesg, timo);
|
|
|
|
nfs_restore_sigmask(td, &oldset);
|
|
|
|
p = td->td_proc;
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* NFS wrapper to tsleep(), that shoves a new p_sigmask and restores the
|
|
|
|
* old one after tsleep() returns.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
nfs_tsleep(struct thread *td, void *ident, int priority, char *wmesg, int timo)
|
|
|
|
{
|
|
|
|
sigset_t oldset;
|
|
|
|
int error;
|
|
|
|
struct proc *p;
|
|
|
|
|
|
|
|
if ((priority & PCATCH) == 0)
|
|
|
|
return tsleep(ident, priority, wmesg, timo);
|
|
|
|
if (td == NULL)
|
|
|
|
td = curthread; /* XXX */
|
|
|
|
nfs_set_sigmask(td, &oldset);
|
|
|
|
error = tsleep(ident, priority, wmesg, timo);
|
|
|
|
nfs_restore_sigmask(td, &oldset);
|
|
|
|
p = td->td_proc;
|
|
|
|
return (error);
|
1999-12-13 04:24:55 +00:00
|
|
|
}
|
1996-01-13 23:27:58 +00:00
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* Test for a termination condition pending on the process.
|
|
|
|
* This is used for NFSMNT_INT mounts.
|
|
|
|
*/
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
2002-06-28 21:53:08 +00:00
|
|
|
nfs_sigintr(struct nfsmount *nmp, struct nfsreq *rep, struct thread *td)
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2002-06-28 21:53:08 +00:00
|
|
|
struct proc *p;
|
1999-09-29 15:03:48 +00:00
|
|
|
sigset_t tmpset;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2003-11-14 20:54:10 +00:00
|
|
|
if ((nmp->nm_flag & NFSMNT_NFSV4) != 0)
|
|
|
|
return nfs4_sigintr(nmp, rep, td);
|
1994-05-24 10:09:53 +00:00
|
|
|
if (rep && (rep->r_flags & R_SOFTTERM))
|
2004-07-06 09:12:03 +00:00
|
|
|
return (EIO);
|
2002-04-17 01:07:29 +00:00
|
|
|
/* Terminate all requests while attempting a forced unmount. */
|
|
|
|
if (nmp->nm_mountp->mnt_kern_flag & MNTK_UNMOUNTF)
|
2004-07-06 09:12:03 +00:00
|
|
|
return (EIO);
|
1994-05-24 10:09:53 +00:00
|
|
|
if (!(nmp->nm_flag & NFSMNT_INT))
|
|
|
|
return (0);
|
2002-06-28 21:53:08 +00:00
|
|
|
if (td == NULL)
|
1999-09-29 20:12:39 +00:00
|
|
|
return (0);
|
|
|
|
|
2002-06-28 21:53:08 +00:00
|
|
|
p = td->td_proc;
|
2003-02-15 08:25:57 +00:00
|
|
|
PROC_LOCK(p);
|
2002-10-01 17:15:53 +00:00
|
|
|
tmpset = p->p_siglist;
|
2003-03-31 22:49:17 +00:00
|
|
|
SIGSETNAND(tmpset, td->td_sigmask);
|
- Merge struct procsig with struct sigacts.
- Move struct sigacts out of the u-area and malloc() it using the
M_SUBPROC malloc bucket.
- Add a small sigacts_*() API for managing sigacts structures: sigacts_alloc(),
sigacts_free(), sigacts_copy(), sigacts_share(), and sigacts_shared().
- Remove the p_sigignore, p_sigacts, and p_sigcatch macros.
- Add a mutex to struct sigacts that protects all the members of the struct.
- Add sigacts locking.
- Remove Giant from nosys(), kill(), killpg(), and kern_sigaction() now
that sigacts is locked.
- Several in-kernel functions such as psignal(), tdsignal(), trapsignal(),
and thread_stopped() are now MP safe.
Reviewed by: arch@
Approved by: re (rwatson)
2003-05-13 20:36:02 +00:00
|
|
|
mtx_lock(&p->p_sigacts->ps_mtx);
|
|
|
|
SIGSETNAND(tmpset, p->p_sigacts->ps_sigignore);
|
|
|
|
mtx_unlock(&p->p_sigacts->ps_mtx);
|
2004-12-06 21:11:15 +00:00
|
|
|
if (SIGNOTEMPTY(p->p_siglist) && nfs_sig_pending(tmpset)) {
|
2003-02-15 08:25:57 +00:00
|
|
|
PROC_UNLOCK(p);
|
1994-05-24 10:09:53 +00:00
|
|
|
return (EINTR);
|
2003-02-15 08:25:57 +00:00
|
|
|
}
|
|
|
|
PROC_UNLOCK(p);
|
1999-09-29 20:12:39 +00:00
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Lock a socket against others.
|
|
|
|
* Necessary for STREAM sockets to ensure you get an entire rpc request/reply
|
|
|
|
* and also to avoid race conditions between the processes with nfs requests
|
|
|
|
* in progress when a reconnect is necessary.
|
|
|
|
*/
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
2001-09-18 23:32:09 +00:00
|
|
|
nfs_sndlock(struct nfsreq *rep)
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2001-09-18 23:32:09 +00:00
|
|
|
int *statep = &rep->r_nmp->nm_state;
|
2001-09-12 08:38:13 +00:00
|
|
|
struct thread *td;
|
2004-07-06 09:12:03 +00:00
|
|
|
int error, slpflag = 0, slptimeo = 0;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2003-03-26 01:46:11 +00:00
|
|
|
td = rep->r_td;
|
|
|
|
if (rep->r_nmp->nm_flag & NFSMNT_INT)
|
|
|
|
slpflag = PCATCH;
|
1998-05-19 07:11:27 +00:00
|
|
|
while (*statep & NFSSTA_SNDLOCK) {
|
2004-07-06 09:12:03 +00:00
|
|
|
error = nfs_sigintr(rep->r_nmp, rep, td);
|
|
|
|
if (error)
|
|
|
|
return (error);
|
1998-05-19 07:11:27 +00:00
|
|
|
*statep |= NFSSTA_WANTSND;
|
2003-03-02 16:54:40 +00:00
|
|
|
(void) tsleep(statep, slpflag | (PZERO - 1),
|
1998-05-19 07:11:27 +00:00
|
|
|
"nfsndlck", slptimeo);
|
1994-05-24 10:09:53 +00:00
|
|
|
if (slpflag == PCATCH) {
|
|
|
|
slpflag = 0;
|
|
|
|
slptimeo = 2 * hz;
|
|
|
|
}
|
|
|
|
}
|
1998-05-19 07:11:27 +00:00
|
|
|
*statep |= NFSSTA_SNDLOCK;
|
1994-05-24 10:09:53 +00:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Unlock the stream socket for others.
|
|
|
|
*/
|
|
|
|
void
|
2001-09-18 23:32:09 +00:00
|
|
|
nfs_sndunlock(struct nfsreq *rep)
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2001-09-18 23:32:09 +00:00
|
|
|
int *statep = &rep->r_nmp->nm_state;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
1998-05-19 07:11:27 +00:00
|
|
|
if ((*statep & NFSSTA_SNDLOCK) == 0)
|
1994-05-24 10:09:53 +00:00
|
|
|
panic("nfs sndunlock");
|
1998-05-19 07:11:27 +00:00
|
|
|
*statep &= ~NFSSTA_SNDLOCK;
|
|
|
|
if (*statep & NFSSTA_WANTSND) {
|
|
|
|
*statep &= ~NFSSTA_WANTSND;
|
2003-03-02 16:54:40 +00:00
|
|
|
wakeup(statep);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
The VFS/BIO subsystem contained a number of hacks in order to optimize
piecemeal, middle-of-file writes for NFS. These hacks have caused no
end of trouble, especially when combined with mmap(). I've removed
them. Instead, NFS will issue a read-before-write to fully
instantiate the struct buf containing the write. NFS does, however,
optimize piecemeal appends to files. For most common file operations,
you will not notice the difference. The sole remaining fragment in
the VFS/BIO system is b_dirtyoff/end, which NFS uses to avoid cache
coherency issues with read-merge-write style operations. NFS also
optimizes the write-covers-entire-buffer case by avoiding the
read-before-write. There is quite a bit of room for further
optimization in these areas.
The VM system marks pages fully-valid (AKA vm_page_t->valid =
VM_PAGE_BITS_ALL) in several places, most noteably in vm_fault. This
is not correct operation. The vm_pager_get_pages() code is now
responsible for marking VM pages all-valid. A number of VM helper
routines have been added to aid in zeroing-out the invalid portions of
a VM page prior to the page being marked all-valid. This operation is
necessary to properly support mmap(). The zeroing occurs most often
when dealing with file-EOF situations. Several bugs have been fixed
in the NFS subsystem, including bits handling file and directory EOF
situations and buf->b_flags consistancy issues relating to clearing
B_ERROR & B_INVAL, and handling B_DONE.
getblk() and allocbuf() have been rewritten. B_CACHE operation is now
formally defined in comments and more straightforward in
implementation. B_CACHE for VMIO buffers is based on the validity of
the backing store. B_CACHE for non-VMIO buffers is based simply on
whether the buffer is B_INVAL or not (B_CACHE set if B_INVAL clear,
and vise-versa). biodone() is now responsible for setting B_CACHE
when a successful read completes. B_CACHE is also set when a bdwrite()
is initiated and when a bwrite() is initiated. VFS VOP_BWRITE
routines (there are only two - nfs_bwrite() and bwrite()) are now
expected to set B_CACHE. This means that bowrite() and bawrite() also
set B_CACHE indirectly.
There are a number of places in the code which were previously using
buf->b_bufsize (which is DEV_BSIZE aligned) when they should have
been using buf->b_bcount. These have been fixed. getblk() now clears
B_DONE on return because the rest of the system is so bad about
dealing with B_DONE.
Major fixes to NFS/TCP have been made. A server-side bug could cause
requests to be lost by the server due to nfs_realign() overwriting
other rpc's in the same TCP mbuf chain. The server's kernel must be
recompiled to get the benefit of the fixes.
Submitted by: Matthew Dillon <dillon@apollo.backplane.com>
1999-05-02 23:57:16 +00:00
|
|
|
* nfs_realign:
|
|
|
|
*
|
|
|
|
* Check for badly aligned mbuf data and realign by copying the unaligned
|
|
|
|
* portion of the data into a new mbuf chain and freeing the portions
|
|
|
|
* of the old chain that were replaced.
|
|
|
|
*
|
|
|
|
* We cannot simply realign the data within the existing mbuf chain
|
|
|
|
* because the underlying buffers may contain other rpc commands and
|
|
|
|
* we cannot afford to overwrite them.
|
|
|
|
*
|
|
|
|
* We would prefer to avoid this situation entirely. The situation does
|
|
|
|
* not occur with NFS/UDP and is supposed to only occassionally occur
|
|
|
|
* with TCP. Use vfs.nfs.realign_count and realign_test to check this.
|
2004-12-06 21:11:15 +00:00
|
|
|
*
|
|
|
|
* XXX - This still looks buggy. If there are multiple mbufs in the mbuf chain
|
|
|
|
* passed in that are unaligned, the first loop will allocate multiple new
|
|
|
|
* mbufs. But then, it doesn't seem to chain these together. So, if there are
|
|
|
|
* multiple unaligned mbufs, we're looking at a pretty serious mbuf leak.
|
|
|
|
* But, this has been how it is, perhaps the misalignment only happens in the head
|
|
|
|
* of the chain.
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
2004-12-06 21:11:15 +00:00
|
|
|
static int
|
2001-09-18 23:32:09 +00:00
|
|
|
nfs_realign(struct mbuf **pm, int hsiz)
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
The VFS/BIO subsystem contained a number of hacks in order to optimize
piecemeal, middle-of-file writes for NFS. These hacks have caused no
end of trouble, especially when combined with mmap(). I've removed
them. Instead, NFS will issue a read-before-write to fully
instantiate the struct buf containing the write. NFS does, however,
optimize piecemeal appends to files. For most common file operations,
you will not notice the difference. The sole remaining fragment in
the VFS/BIO system is b_dirtyoff/end, which NFS uses to avoid cache
coherency issues with read-merge-write style operations. NFS also
optimizes the write-covers-entire-buffer case by avoiding the
read-before-write. There is quite a bit of room for further
optimization in these areas.
The VM system marks pages fully-valid (AKA vm_page_t->valid =
VM_PAGE_BITS_ALL) in several places, most noteably in vm_fault. This
is not correct operation. The vm_pager_get_pages() code is now
responsible for marking VM pages all-valid. A number of VM helper
routines have been added to aid in zeroing-out the invalid portions of
a VM page prior to the page being marked all-valid. This operation is
necessary to properly support mmap(). The zeroing occurs most often
when dealing with file-EOF situations. Several bugs have been fixed
in the NFS subsystem, including bits handling file and directory EOF
situations and buf->b_flags consistancy issues relating to clearing
B_ERROR & B_INVAL, and handling B_DONE.
getblk() and allocbuf() have been rewritten. B_CACHE operation is now
formally defined in comments and more straightforward in
implementation. B_CACHE for VMIO buffers is based on the validity of
the backing store. B_CACHE for non-VMIO buffers is based simply on
whether the buffer is B_INVAL or not (B_CACHE set if B_INVAL clear,
and vise-versa). biodone() is now responsible for setting B_CACHE
when a successful read completes. B_CACHE is also set when a bdwrite()
is initiated and when a bwrite() is initiated. VFS VOP_BWRITE
routines (there are only two - nfs_bwrite() and bwrite()) are now
expected to set B_CACHE. This means that bowrite() and bawrite() also
set B_CACHE indirectly.
There are a number of places in the code which were previously using
buf->b_bufsize (which is DEV_BSIZE aligned) when they should have
been using buf->b_bcount. These have been fixed. getblk() now clears
B_DONE on return because the rest of the system is so bad about
dealing with B_DONE.
Major fixes to NFS/TCP have been made. A server-side bug could cause
requests to be lost by the server due to nfs_realign() overwriting
other rpc's in the same TCP mbuf chain. The server's kernel must be
recompiled to get the benefit of the fixes.
Submitted by: Matthew Dillon <dillon@apollo.backplane.com>
1999-05-02 23:57:16 +00:00
|
|
|
struct mbuf *m;
|
|
|
|
struct mbuf *n = NULL;
|
|
|
|
int off = 0;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
The VFS/BIO subsystem contained a number of hacks in order to optimize
piecemeal, middle-of-file writes for NFS. These hacks have caused no
end of trouble, especially when combined with mmap(). I've removed
them. Instead, NFS will issue a read-before-write to fully
instantiate the struct buf containing the write. NFS does, however,
optimize piecemeal appends to files. For most common file operations,
you will not notice the difference. The sole remaining fragment in
the VFS/BIO system is b_dirtyoff/end, which NFS uses to avoid cache
coherency issues with read-merge-write style operations. NFS also
optimizes the write-covers-entire-buffer case by avoiding the
read-before-write. There is quite a bit of room for further
optimization in these areas.
The VM system marks pages fully-valid (AKA vm_page_t->valid =
VM_PAGE_BITS_ALL) in several places, most noteably in vm_fault. This
is not correct operation. The vm_pager_get_pages() code is now
responsible for marking VM pages all-valid. A number of VM helper
routines have been added to aid in zeroing-out the invalid portions of
a VM page prior to the page being marked all-valid. This operation is
necessary to properly support mmap(). The zeroing occurs most often
when dealing with file-EOF situations. Several bugs have been fixed
in the NFS subsystem, including bits handling file and directory EOF
situations and buf->b_flags consistancy issues relating to clearing
B_ERROR & B_INVAL, and handling B_DONE.
getblk() and allocbuf() have been rewritten. B_CACHE operation is now
formally defined in comments and more straightforward in
implementation. B_CACHE for VMIO buffers is based on the validity of
the backing store. B_CACHE for non-VMIO buffers is based simply on
whether the buffer is B_INVAL or not (B_CACHE set if B_INVAL clear,
and vise-versa). biodone() is now responsible for setting B_CACHE
when a successful read completes. B_CACHE is also set when a bdwrite()
is initiated and when a bwrite() is initiated. VFS VOP_BWRITE
routines (there are only two - nfs_bwrite() and bwrite()) are now
expected to set B_CACHE. This means that bowrite() and bawrite() also
set B_CACHE indirectly.
There are a number of places in the code which were previously using
buf->b_bufsize (which is DEV_BSIZE aligned) when they should have
been using buf->b_bcount. These have been fixed. getblk() now clears
B_DONE on return because the rest of the system is so bad about
dealing with B_DONE.
Major fixes to NFS/TCP have been made. A server-side bug could cause
requests to be lost by the server due to nfs_realign() overwriting
other rpc's in the same TCP mbuf chain. The server's kernel must be
recompiled to get the benefit of the fixes.
Submitted by: Matthew Dillon <dillon@apollo.backplane.com>
1999-05-02 23:57:16 +00:00
|
|
|
++nfs_realign_test;
|
|
|
|
while ((m = *pm) != NULL) {
|
|
|
|
if ((m->m_len & 0x3) || (mtod(m, intptr_t) & 0x3)) {
|
2004-12-06 21:11:15 +00:00
|
|
|
MGET(n, M_DONTWAIT, MT_DATA);
|
|
|
|
if (n == NULL)
|
|
|
|
return (ENOMEM);
|
The VFS/BIO subsystem contained a number of hacks in order to optimize
piecemeal, middle-of-file writes for NFS. These hacks have caused no
end of trouble, especially when combined with mmap(). I've removed
them. Instead, NFS will issue a read-before-write to fully
instantiate the struct buf containing the write. NFS does, however,
optimize piecemeal appends to files. For most common file operations,
you will not notice the difference. The sole remaining fragment in
the VFS/BIO system is b_dirtyoff/end, which NFS uses to avoid cache
coherency issues with read-merge-write style operations. NFS also
optimizes the write-covers-entire-buffer case by avoiding the
read-before-write. There is quite a bit of room for further
optimization in these areas.
The VM system marks pages fully-valid (AKA vm_page_t->valid =
VM_PAGE_BITS_ALL) in several places, most noteably in vm_fault. This
is not correct operation. The vm_pager_get_pages() code is now
responsible for marking VM pages all-valid. A number of VM helper
routines have been added to aid in zeroing-out the invalid portions of
a VM page prior to the page being marked all-valid. This operation is
necessary to properly support mmap(). The zeroing occurs most often
when dealing with file-EOF situations. Several bugs have been fixed
in the NFS subsystem, including bits handling file and directory EOF
situations and buf->b_flags consistancy issues relating to clearing
B_ERROR & B_INVAL, and handling B_DONE.
getblk() and allocbuf() have been rewritten. B_CACHE operation is now
formally defined in comments and more straightforward in
implementation. B_CACHE for VMIO buffers is based on the validity of
the backing store. B_CACHE for non-VMIO buffers is based simply on
whether the buffer is B_INVAL or not (B_CACHE set if B_INVAL clear,
and vise-versa). biodone() is now responsible for setting B_CACHE
when a successful read completes. B_CACHE is also set when a bdwrite()
is initiated and when a bwrite() is initiated. VFS VOP_BWRITE
routines (there are only two - nfs_bwrite() and bwrite()) are now
expected to set B_CACHE. This means that bowrite() and bawrite() also
set B_CACHE indirectly.
There are a number of places in the code which were previously using
buf->b_bufsize (which is DEV_BSIZE aligned) when they should have
been using buf->b_bcount. These have been fixed. getblk() now clears
B_DONE on return because the rest of the system is so bad about
dealing with B_DONE.
Major fixes to NFS/TCP have been made. A server-side bug could cause
requests to be lost by the server due to nfs_realign() overwriting
other rpc's in the same TCP mbuf chain. The server's kernel must be
recompiled to get the benefit of the fixes.
Submitted by: Matthew Dillon <dillon@apollo.backplane.com>
1999-05-02 23:57:16 +00:00
|
|
|
if (m->m_len >= MINCLSIZE) {
|
2004-12-06 21:11:15 +00:00
|
|
|
MCLGET(n, M_DONTWAIT);
|
|
|
|
if (n->m_ext.ext_buf == NULL) {
|
|
|
|
m_freem(n);
|
|
|
|
return (ENOMEM);
|
|
|
|
}
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
The VFS/BIO subsystem contained a number of hacks in order to optimize
piecemeal, middle-of-file writes for NFS. These hacks have caused no
end of trouble, especially when combined with mmap(). I've removed
them. Instead, NFS will issue a read-before-write to fully
instantiate the struct buf containing the write. NFS does, however,
optimize piecemeal appends to files. For most common file operations,
you will not notice the difference. The sole remaining fragment in
the VFS/BIO system is b_dirtyoff/end, which NFS uses to avoid cache
coherency issues with read-merge-write style operations. NFS also
optimizes the write-covers-entire-buffer case by avoiding the
read-before-write. There is quite a bit of room for further
optimization in these areas.
The VM system marks pages fully-valid (AKA vm_page_t->valid =
VM_PAGE_BITS_ALL) in several places, most noteably in vm_fault. This
is not correct operation. The vm_pager_get_pages() code is now
responsible for marking VM pages all-valid. A number of VM helper
routines have been added to aid in zeroing-out the invalid portions of
a VM page prior to the page being marked all-valid. This operation is
necessary to properly support mmap(). The zeroing occurs most often
when dealing with file-EOF situations. Several bugs have been fixed
in the NFS subsystem, including bits handling file and directory EOF
situations and buf->b_flags consistancy issues relating to clearing
B_ERROR & B_INVAL, and handling B_DONE.
getblk() and allocbuf() have been rewritten. B_CACHE operation is now
formally defined in comments and more straightforward in
implementation. B_CACHE for VMIO buffers is based on the validity of
the backing store. B_CACHE for non-VMIO buffers is based simply on
whether the buffer is B_INVAL or not (B_CACHE set if B_INVAL clear,
and vise-versa). biodone() is now responsible for setting B_CACHE
when a successful read completes. B_CACHE is also set when a bdwrite()
is initiated and when a bwrite() is initiated. VFS VOP_BWRITE
routines (there are only two - nfs_bwrite() and bwrite()) are now
expected to set B_CACHE. This means that bowrite() and bawrite() also
set B_CACHE indirectly.
There are a number of places in the code which were previously using
buf->b_bufsize (which is DEV_BSIZE aligned) when they should have
been using buf->b_bcount. These have been fixed. getblk() now clears
B_DONE on return because the rest of the system is so bad about
dealing with B_DONE.
Major fixes to NFS/TCP have been made. A server-side bug could cause
requests to be lost by the server due to nfs_realign() overwriting
other rpc's in the same TCP mbuf chain. The server's kernel must be
recompiled to get the benefit of the fixes.
Submitted by: Matthew Dillon <dillon@apollo.backplane.com>
1999-05-02 23:57:16 +00:00
|
|
|
n->m_len = 0;
|
|
|
|
break;
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
The VFS/BIO subsystem contained a number of hacks in order to optimize
piecemeal, middle-of-file writes for NFS. These hacks have caused no
end of trouble, especially when combined with mmap(). I've removed
them. Instead, NFS will issue a read-before-write to fully
instantiate the struct buf containing the write. NFS does, however,
optimize piecemeal appends to files. For most common file operations,
you will not notice the difference. The sole remaining fragment in
the VFS/BIO system is b_dirtyoff/end, which NFS uses to avoid cache
coherency issues with read-merge-write style operations. NFS also
optimizes the write-covers-entire-buffer case by avoiding the
read-before-write. There is quite a bit of room for further
optimization in these areas.
The VM system marks pages fully-valid (AKA vm_page_t->valid =
VM_PAGE_BITS_ALL) in several places, most noteably in vm_fault. This
is not correct operation. The vm_pager_get_pages() code is now
responsible for marking VM pages all-valid. A number of VM helper
routines have been added to aid in zeroing-out the invalid portions of
a VM page prior to the page being marked all-valid. This operation is
necessary to properly support mmap(). The zeroing occurs most often
when dealing with file-EOF situations. Several bugs have been fixed
in the NFS subsystem, including bits handling file and directory EOF
situations and buf->b_flags consistancy issues relating to clearing
B_ERROR & B_INVAL, and handling B_DONE.
getblk() and allocbuf() have been rewritten. B_CACHE operation is now
formally defined in comments and more straightforward in
implementation. B_CACHE for VMIO buffers is based on the validity of
the backing store. B_CACHE for non-VMIO buffers is based simply on
whether the buffer is B_INVAL or not (B_CACHE set if B_INVAL clear,
and vise-versa). biodone() is now responsible for setting B_CACHE
when a successful read completes. B_CACHE is also set when a bdwrite()
is initiated and when a bwrite() is initiated. VFS VOP_BWRITE
routines (there are only two - nfs_bwrite() and bwrite()) are now
expected to set B_CACHE. This means that bowrite() and bawrite() also
set B_CACHE indirectly.
There are a number of places in the code which were previously using
buf->b_bufsize (which is DEV_BSIZE aligned) when they should have
been using buf->b_bcount. These have been fixed. getblk() now clears
B_DONE on return because the rest of the system is so bad about
dealing with B_DONE.
Major fixes to NFS/TCP have been made. A server-side bug could cause
requests to be lost by the server due to nfs_realign() overwriting
other rpc's in the same TCP mbuf chain. The server's kernel must be
recompiled to get the benefit of the fixes.
Submitted by: Matthew Dillon <dillon@apollo.backplane.com>
1999-05-02 23:57:16 +00:00
|
|
|
pm = &m->m_next;
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* If n is non-NULL, loop on m copying data, then replace the
|
|
|
|
* portion of the chain that had to be realigned.
|
|
|
|
*/
|
|
|
|
if (n != NULL) {
|
|
|
|
++nfs_realign_count;
|
|
|
|
while (m) {
|
|
|
|
m_copyback(n, off, m->m_len, mtod(m, caddr_t));
|
|
|
|
off += m->m_len;
|
|
|
|
m = m->m_next;
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
The VFS/BIO subsystem contained a number of hacks in order to optimize
piecemeal, middle-of-file writes for NFS. These hacks have caused no
end of trouble, especially when combined with mmap(). I've removed
them. Instead, NFS will issue a read-before-write to fully
instantiate the struct buf containing the write. NFS does, however,
optimize piecemeal appends to files. For most common file operations,
you will not notice the difference. The sole remaining fragment in
the VFS/BIO system is b_dirtyoff/end, which NFS uses to avoid cache
coherency issues with read-merge-write style operations. NFS also
optimizes the write-covers-entire-buffer case by avoiding the
read-before-write. There is quite a bit of room for further
optimization in these areas.
The VM system marks pages fully-valid (AKA vm_page_t->valid =
VM_PAGE_BITS_ALL) in several places, most noteably in vm_fault. This
is not correct operation. The vm_pager_get_pages() code is now
responsible for marking VM pages all-valid. A number of VM helper
routines have been added to aid in zeroing-out the invalid portions of
a VM page prior to the page being marked all-valid. This operation is
necessary to properly support mmap(). The zeroing occurs most often
when dealing with file-EOF situations. Several bugs have been fixed
in the NFS subsystem, including bits handling file and directory EOF
situations and buf->b_flags consistancy issues relating to clearing
B_ERROR & B_INVAL, and handling B_DONE.
getblk() and allocbuf() have been rewritten. B_CACHE operation is now
formally defined in comments and more straightforward in
implementation. B_CACHE for VMIO buffers is based on the validity of
the backing store. B_CACHE for non-VMIO buffers is based simply on
whether the buffer is B_INVAL or not (B_CACHE set if B_INVAL clear,
and vise-versa). biodone() is now responsible for setting B_CACHE
when a successful read completes. B_CACHE is also set when a bdwrite()
is initiated and when a bwrite() is initiated. VFS VOP_BWRITE
routines (there are only two - nfs_bwrite() and bwrite()) are now
expected to set B_CACHE. This means that bowrite() and bawrite() also
set B_CACHE indirectly.
There are a number of places in the code which were previously using
buf->b_bufsize (which is DEV_BSIZE aligned) when they should have
been using buf->b_bcount. These have been fixed. getblk() now clears
B_DONE on return because the rest of the system is so bad about
dealing with B_DONE.
Major fixes to NFS/TCP have been made. A server-side bug could cause
requests to be lost by the server due to nfs_realign() overwriting
other rpc's in the same TCP mbuf chain. The server's kernel must be
recompiled to get the benefit of the fixes.
Submitted by: Matthew Dillon <dillon@apollo.backplane.com>
1999-05-02 23:57:16 +00:00
|
|
|
m_freem(*pm);
|
|
|
|
*pm = n;
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
2004-12-06 21:11:15 +00:00
|
|
|
return (0);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
1998-05-31 17:27:58 +00:00
|
|
|
|
|
|
|
static int
|
2004-07-06 09:12:03 +00:00
|
|
|
nfs_msg(struct thread *td, const char *server, const char *msg, int error)
|
1998-05-31 17:27:58 +00:00
|
|
|
{
|
2004-07-06 09:12:03 +00:00
|
|
|
struct proc *p;
|
1998-05-31 17:27:58 +00:00
|
|
|
|
2004-07-06 09:12:03 +00:00
|
|
|
p = td ? td->td_proc : NULL;
|
|
|
|
if (error) {
|
|
|
|
tprintf(p, LOG_INFO, "nfs server %s: %s, error %d\n", server,
|
|
|
|
msg, error);
|
|
|
|
} else {
|
|
|
|
tprintf(p, LOG_INFO, "nfs server %s: %s\n", server, msg);
|
|
|
|
}
|
1998-05-31 17:27:58 +00:00
|
|
|
return (0);
|
|
|
|
}
|
2004-07-06 09:12:03 +00:00
|
|
|
|
|
|
|
void
|
|
|
|
nfs_down(rep, nmp, td, msg, error, flags)
|
|
|
|
struct nfsreq *rep;
|
|
|
|
struct nfsmount *nmp;
|
|
|
|
struct thread *td;
|
|
|
|
const char *msg;
|
|
|
|
int error, flags;
|
|
|
|
{
|
|
|
|
|
|
|
|
if (nmp == NULL)
|
|
|
|
return;
|
|
|
|
if ((flags & NFSSTA_TIMEO) && !(nmp->nm_state & NFSSTA_TIMEO)) {
|
|
|
|
vfs_event_signal(&nmp->nm_mountp->mnt_stat.f_fsid,
|
|
|
|
VQ_NOTRESP, 0);
|
|
|
|
nmp->nm_state |= NFSSTA_TIMEO;
|
|
|
|
}
|
|
|
|
#ifdef NFSSTA_LOCKTIMEO
|
|
|
|
if ((flags & NFSSTA_LOCKTIMEO) && !(nmp->nm_state & NFSSTA_LOCKTIMEO)) {
|
|
|
|
vfs_event_signal(&nmp->nm_mountp->mnt_stat.f_fsid,
|
|
|
|
VQ_NOTRESPLOCK, 0);
|
|
|
|
nmp->nm_state |= NFSSTA_LOCKTIMEO;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
if (rep)
|
|
|
|
rep->r_flags |= R_TPRINTFMSG;
|
|
|
|
nfs_msg(td, nmp->nm_mountp->mnt_stat.f_mntfromname, msg, error);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
nfs_up(rep, nmp, td, msg, flags)
|
|
|
|
struct nfsreq *rep;
|
|
|
|
struct nfsmount *nmp;
|
|
|
|
struct thread *td;
|
|
|
|
const char *msg;
|
|
|
|
int flags;
|
|
|
|
{
|
|
|
|
if (nmp == NULL)
|
|
|
|
return;
|
|
|
|
if ((rep == NULL) || (rep->r_flags & R_TPRINTFMSG) != 0)
|
|
|
|
nfs_msg(td, nmp->nm_mountp->mnt_stat.f_mntfromname, msg, 0);
|
|
|
|
if ((flags & NFSSTA_TIMEO) && (nmp->nm_state & NFSSTA_TIMEO)) {
|
|
|
|
nmp->nm_state &= ~NFSSTA_TIMEO;
|
|
|
|
vfs_event_signal(&nmp->nm_mountp->mnt_stat.f_fsid,
|
|
|
|
VQ_NOTRESP, 1);
|
|
|
|
}
|
|
|
|
#ifdef NFSSTA_LOCKTIMEO
|
|
|
|
if ((flags & NFSSTA_LOCKTIMEO) && (nmp->nm_state & NFSSTA_LOCKTIMEO)) {
|
|
|
|
nmp->nm_state &= ~NFSSTA_LOCKTIMEO;
|
|
|
|
vfs_event_signal(&nmp->nm_mountp->mnt_stat.f_fsid,
|
|
|
|
VQ_NOTRESPLOCK, 1);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|