2005-01-07 01:45:51 +00:00
|
|
|
/*-
|
1994-05-24 10:09:53 +00:00
|
|
|
* Copyright (c) 1989, 1993
|
|
|
|
* The Regents of the University of California. All rights reserved.
|
|
|
|
*
|
|
|
|
* This code is derived from software contributed to Berkeley by
|
|
|
|
* Rick Macklem at The University of Guelph.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
* 4. Neither the name of the University nor the names of its contributors
|
|
|
|
* may be used to endorse or promote products derived from this software
|
|
|
|
* without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
|
|
|
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
|
|
|
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
|
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
|
|
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
|
|
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
|
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
|
|
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
|
|
|
* SUCH DAMAGE.
|
|
|
|
*
|
1998-05-31 17:27:58 +00:00
|
|
|
* @(#)nfs_subs.c 8.8 (Berkeley) 5/22/95
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
|
|
|
|
2001-09-18 23:32:09 +00:00
|
|
|
#include <sys/cdefs.h>
|
|
|
|
__FBSDID("$FreeBSD$");
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* These functions support the macros and help fiddle mbuf chains for
|
|
|
|
* the nfs op functions. They do things like create the rpc header and
|
|
|
|
* copy data between mbuf chains and uio lists.
|
|
|
|
*/
|
2001-09-18 23:32:09 +00:00
|
|
|
|
2009-03-24 17:14:34 +00:00
|
|
|
#include "opt_kdtrace.h"
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
#include <sys/param.h>
|
|
|
|
#include <sys/systm.h>
|
|
|
|
#include <sys/kernel.h>
|
2000-05-05 09:59:14 +00:00
|
|
|
#include <sys/bio.h>
|
1999-06-27 11:44:22 +00:00
|
|
|
#include <sys/buf.h>
|
|
|
|
#include <sys/proc.h>
|
1994-05-24 10:09:53 +00:00
|
|
|
#include <sys/mount.h>
|
|
|
|
#include <sys/vnode.h>
|
|
|
|
#include <sys/namei.h>
|
|
|
|
#include <sys/mbuf.h>
|
|
|
|
#include <sys/socket.h>
|
|
|
|
#include <sys/stat.h>
|
1995-06-27 11:07:30 +00:00
|
|
|
#include <sys/malloc.h>
|
1994-09-22 22:10:49 +00:00
|
|
|
#include <sys/sysent.h>
|
|
|
|
#include <sys/syscall.h>
|
2001-09-18 23:32:09 +00:00
|
|
|
#include <sys/sysproto.h>
|
2010-09-12 19:06:08 +00:00
|
|
|
#include <sys/taskqueue.h>
|
1994-05-24 10:09:53 +00:00
|
|
|
|
1994-10-02 17:27:07 +00:00
|
|
|
#include <vm/vm.h>
|
1995-12-07 12:48:31 +00:00
|
|
|
#include <vm/vm_object.h>
|
|
|
|
#include <vm/vm_extern.h>
|
2002-03-20 10:07:52 +00:00
|
|
|
#include <vm/uma.h>
|
1994-10-02 17:27:07 +00:00
|
|
|
|
1995-06-27 11:07:30 +00:00
|
|
|
#include <nfs/nfsproto.h>
|
2001-09-18 23:32:09 +00:00
|
|
|
#include <nfsclient/nfs.h>
|
|
|
|
#include <nfsclient/nfsnode.h>
|
2011-05-06 20:02:19 +00:00
|
|
|
#include <nfs/nfs_kdtrace.h>
|
1994-05-24 10:09:53 +00:00
|
|
|
#include <nfs/xdr_subs.h>
|
2001-09-18 23:32:09 +00:00
|
|
|
#include <nfsclient/nfsm_subs.h>
|
|
|
|
#include <nfsclient/nfsmount.h>
|
1994-05-24 10:09:53 +00:00
|
|
|
|
|
|
|
#include <netinet/in.h>
|
|
|
|
|
2006-05-19 00:04:24 +00:00
|
|
|
/*
|
|
|
|
* Note that stdarg.h and the ANSI style va_start macro is used for both
|
|
|
|
* ANSI and traditional C compilers.
|
|
|
|
*/
|
|
|
|
#include <machine/stdarg.h>
|
|
|
|
|
2009-03-24 17:14:34 +00:00
|
|
|
#ifdef KDTRACE_HOOKS
|
|
|
|
dtrace_nfsclient_attrcache_flush_probe_func_t
|
|
|
|
dtrace_nfsclient_attrcache_flush_done_probe;
|
|
|
|
uint32_t nfsclient_attrcache_flush_done_id;
|
|
|
|
|
|
|
|
dtrace_nfsclient_attrcache_get_hit_probe_func_t
|
|
|
|
dtrace_nfsclient_attrcache_get_hit_probe;
|
|
|
|
uint32_t nfsclient_attrcache_get_hit_id;
|
|
|
|
|
|
|
|
dtrace_nfsclient_attrcache_get_miss_probe_func_t
|
|
|
|
dtrace_nfsclient_attrcache_get_miss_probe;
|
|
|
|
uint32_t nfsclient_attrcache_get_miss_id;
|
|
|
|
|
|
|
|
dtrace_nfsclient_attrcache_load_probe_func_t
|
|
|
|
dtrace_nfsclient_attrcache_load_done_probe;
|
|
|
|
uint32_t nfsclient_attrcache_load_done_id;
|
|
|
|
#endif /* !KDTRACE_HOOKS */
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* Data items converted to xdr at startup, since they are constant
|
|
|
|
* This is kinda hokey, but may save a little time doing byte swaps
|
|
|
|
*/
|
2001-09-18 23:32:09 +00:00
|
|
|
u_int32_t nfs_xdrneg1;
|
|
|
|
u_int32_t nfs_true, nfs_false;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
|
|
|
/* And other global data */
|
2008-02-13 00:04:58 +00:00
|
|
|
static u_int32_t nfs_xid = 0;
|
1995-12-17 21:14:36 +00:00
|
|
|
static enum vtype nv2tov_type[8]= {
|
2001-09-18 23:32:09 +00:00
|
|
|
VNON, VREG, VDIR, VBLK, VCHR, VLNK, VNON, VNON
|
1995-12-17 21:14:36 +00:00
|
|
|
};
|
|
|
|
|
2001-09-18 23:32:09 +00:00
|
|
|
int nfs_ticks;
|
|
|
|
int nfs_pbuf_freecnt = -1; /* start out unlimited */
|
1998-09-07 05:42:15 +00:00
|
|
|
|
2001-09-18 23:32:09 +00:00
|
|
|
struct nfs_bufq nfs_bufq;
|
2008-02-13 00:04:58 +00:00
|
|
|
static struct mtx nfs_xid_mtx;
|
2010-09-12 19:06:08 +00:00
|
|
|
struct task nfs_nfsiodnew_task;
|
1998-09-07 05:42:15 +00:00
|
|
|
|
1995-06-27 11:07:30 +00:00
|
|
|
/*
|
|
|
|
* and the reverse mapping from generic to Version 2 procedure numbers
|
|
|
|
*/
|
|
|
|
int nfsv2_procid[NFS_NPROCS] = {
|
|
|
|
NFSV2PROC_NULL,
|
|
|
|
NFSV2PROC_GETATTR,
|
|
|
|
NFSV2PROC_SETATTR,
|
|
|
|
NFSV2PROC_LOOKUP,
|
|
|
|
NFSV2PROC_NOOP,
|
|
|
|
NFSV2PROC_READLINK,
|
|
|
|
NFSV2PROC_READ,
|
|
|
|
NFSV2PROC_WRITE,
|
|
|
|
NFSV2PROC_CREATE,
|
|
|
|
NFSV2PROC_MKDIR,
|
|
|
|
NFSV2PROC_SYMLINK,
|
|
|
|
NFSV2PROC_CREATE,
|
|
|
|
NFSV2PROC_REMOVE,
|
|
|
|
NFSV2PROC_RMDIR,
|
|
|
|
NFSV2PROC_RENAME,
|
|
|
|
NFSV2PROC_LINK,
|
|
|
|
NFSV2PROC_READDIR,
|
|
|
|
NFSV2PROC_NOOP,
|
|
|
|
NFSV2PROC_STATFS,
|
|
|
|
NFSV2PROC_NOOP,
|
|
|
|
NFSV2PROC_NOOP,
|
|
|
|
NFSV2PROC_NOOP,
|
|
|
|
NFSV2PROC_NOOP,
|
|
|
|
};
|
|
|
|
|
2000-05-26 02:09:24 +00:00
|
|
|
LIST_HEAD(nfsnodehashhead, nfsnode);
|
1994-10-17 17:47:45 +00:00
|
|
|
|
2008-02-13 00:04:58 +00:00
|
|
|
u_int32_t
|
|
|
|
nfs_xid_gen(void)
|
|
|
|
{
|
|
|
|
uint32_t xid;
|
|
|
|
|
|
|
|
mtx_lock(&nfs_xid_mtx);
|
|
|
|
|
|
|
|
/* Get a pretty random xid to start with */
|
|
|
|
if (!nfs_xid)
|
|
|
|
nfs_xid = random();
|
|
|
|
/*
|
|
|
|
* Skip zero xid if it should ever happen.
|
|
|
|
*/
|
|
|
|
if (++nfs_xid == 0)
|
|
|
|
nfs_xid++;
|
|
|
|
xid = nfs_xid;
|
|
|
|
mtx_unlock(&nfs_xid_mtx);
|
|
|
|
return xid;
|
|
|
|
}
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* Create the header for an rpc request packet
|
|
|
|
* The hsiz is the size of the rest of the nfs request header.
|
|
|
|
* (just used to decide if a cluster is a good idea)
|
|
|
|
*/
|
|
|
|
struct mbuf *
|
2001-09-18 23:32:09 +00:00
|
|
|
nfsm_reqhead(struct vnode *vp, u_long procid, int hsiz)
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2001-09-18 23:32:09 +00:00
|
|
|
struct mbuf *mb;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2008-03-25 09:39:02 +00:00
|
|
|
MGET(mb, M_WAIT, MT_DATA);
|
1994-05-24 10:09:53 +00:00
|
|
|
if (hsiz >= MINCLSIZE)
|
2008-03-25 09:39:02 +00:00
|
|
|
MCLGET(mb, M_WAIT);
|
1994-05-24 10:09:53 +00:00
|
|
|
mb->m_len = 0;
|
|
|
|
return (mb);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
1996-07-16 10:19:45 +00:00
|
|
|
* copies a uio scatter/gather list to an mbuf chain.
|
|
|
|
* NOTE: can ony handle iovcnt == 1
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
2001-09-18 23:32:09 +00:00
|
|
|
nfsm_uiotombuf(struct uio *uiop, struct mbuf **mq, int siz, caddr_t *bpos)
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2001-09-18 23:32:09 +00:00
|
|
|
char *uiocp;
|
|
|
|
struct mbuf *mp, *mp2;
|
|
|
|
int xfer, left, mlen;
|
1994-05-24 10:09:53 +00:00
|
|
|
int uiosiz, clflg, rem;
|
|
|
|
char *cp;
|
|
|
|
|
2010-06-13 05:24:27 +00:00
|
|
|
KASSERT(uiop->uio_iovcnt == 1, ("nfsm_uiotombuf: iovcnt != 1"));
|
1996-07-16 10:19:45 +00:00
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
if (siz > MLEN) /* or should it >= MCLBYTES ?? */
|
|
|
|
clflg = 1;
|
|
|
|
else
|
|
|
|
clflg = 0;
|
|
|
|
rem = nfsm_rndup(siz)-siz;
|
|
|
|
mp = mp2 = *mq;
|
|
|
|
while (siz > 0) {
|
|
|
|
left = uiop->uio_iov->iov_len;
|
|
|
|
uiocp = uiop->uio_iov->iov_base;
|
|
|
|
if (left > siz)
|
|
|
|
left = siz;
|
|
|
|
uiosiz = left;
|
|
|
|
while (left > 0) {
|
|
|
|
mlen = M_TRAILINGSPACE(mp);
|
|
|
|
if (mlen == 0) {
|
2008-03-25 09:39:02 +00:00
|
|
|
MGET(mp, M_WAIT, MT_DATA);
|
1994-05-24 10:09:53 +00:00
|
|
|
if (clflg)
|
2008-03-25 09:39:02 +00:00
|
|
|
MCLGET(mp, M_WAIT);
|
1994-05-24 10:09:53 +00:00
|
|
|
mp->m_len = 0;
|
|
|
|
mp2->m_next = mp;
|
|
|
|
mp2 = mp;
|
|
|
|
mlen = M_TRAILINGSPACE(mp);
|
|
|
|
}
|
|
|
|
xfer = (left > mlen) ? mlen : left;
|
|
|
|
#ifdef notdef
|
|
|
|
/* Not Yet.. */
|
|
|
|
if (uiop->uio_iov->iov_op != NULL)
|
|
|
|
(*(uiop->uio_iov->iov_op))
|
|
|
|
(uiocp, mtod(mp, caddr_t)+mp->m_len, xfer);
|
|
|
|
else
|
|
|
|
#endif
|
|
|
|
if (uiop->uio_segflg == UIO_SYSSPACE)
|
|
|
|
bcopy(uiocp, mtod(mp, caddr_t)+mp->m_len, xfer);
|
|
|
|
else
|
|
|
|
copyin(uiocp, mtod(mp, caddr_t)+mp->m_len, xfer);
|
|
|
|
mp->m_len += xfer;
|
|
|
|
left -= xfer;
|
|
|
|
uiocp += xfer;
|
|
|
|
uiop->uio_offset += xfer;
|
|
|
|
uiop->uio_resid -= xfer;
|
|
|
|
}
|
2002-10-11 14:58:34 +00:00
|
|
|
uiop->uio_iov->iov_base =
|
|
|
|
(char *)uiop->uio_iov->iov_base + uiosiz;
|
1996-07-16 10:19:45 +00:00
|
|
|
uiop->uio_iov->iov_len -= uiosiz;
|
1994-05-24 10:09:53 +00:00
|
|
|
siz -= uiosiz;
|
|
|
|
}
|
|
|
|
if (rem > 0) {
|
|
|
|
if (rem > M_TRAILINGSPACE(mp)) {
|
2008-03-25 09:39:02 +00:00
|
|
|
MGET(mp, M_WAIT, MT_DATA);
|
1994-05-24 10:09:53 +00:00
|
|
|
mp->m_len = 0;
|
|
|
|
mp2->m_next = mp;
|
|
|
|
}
|
|
|
|
cp = mtod(mp, caddr_t)+mp->m_len;
|
|
|
|
for (left = 0; left < rem; left++)
|
|
|
|
*cp++ = '\0';
|
|
|
|
mp->m_len += rem;
|
|
|
|
*bpos = cp;
|
|
|
|
} else
|
|
|
|
*bpos = mtod(mp, caddr_t)+mp->m_len;
|
|
|
|
*mq = mp;
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Copy a string into mbufs for the hard cases...
|
|
|
|
*/
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
2001-09-18 23:32:09 +00:00
|
|
|
nfsm_strtmbuf(struct mbuf **mb, char **bpos, const char *cp, long siz)
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2001-09-18 23:32:09 +00:00
|
|
|
struct mbuf *m1 = NULL, *m2;
|
1994-05-24 10:09:53 +00:00
|
|
|
long left, xfer, len, tlen;
|
1998-05-31 20:09:01 +00:00
|
|
|
u_int32_t *tl;
|
1994-05-24 10:09:53 +00:00
|
|
|
int putsize;
|
|
|
|
|
|
|
|
putsize = 1;
|
|
|
|
m2 = *mb;
|
|
|
|
left = M_TRAILINGSPACE(m2);
|
|
|
|
if (left > 0) {
|
1998-05-31 20:09:01 +00:00
|
|
|
tl = ((u_int32_t *)(*bpos));
|
1994-05-24 10:09:53 +00:00
|
|
|
*tl++ = txdr_unsigned(siz);
|
|
|
|
putsize = 0;
|
|
|
|
left -= NFSX_UNSIGNED;
|
|
|
|
m2->m_len += NFSX_UNSIGNED;
|
|
|
|
if (left > 0) {
|
|
|
|
bcopy(cp, (caddr_t) tl, left);
|
|
|
|
siz -= left;
|
|
|
|
cp += left;
|
|
|
|
m2->m_len += left;
|
|
|
|
left = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
/* Loop around adding mbufs */
|
|
|
|
while (siz > 0) {
|
2008-03-25 09:39:02 +00:00
|
|
|
MGET(m1, M_WAIT, MT_DATA);
|
1994-05-24 10:09:53 +00:00
|
|
|
if (siz > MLEN)
|
2008-03-25 09:39:02 +00:00
|
|
|
MCLGET(m1, M_WAIT);
|
1994-05-24 10:09:53 +00:00
|
|
|
m1->m_len = NFSMSIZ(m1);
|
|
|
|
m2->m_next = m1;
|
|
|
|
m2 = m1;
|
1998-05-31 20:09:01 +00:00
|
|
|
tl = mtod(m1, u_int32_t *);
|
1994-05-24 10:09:53 +00:00
|
|
|
tlen = 0;
|
|
|
|
if (putsize) {
|
|
|
|
*tl++ = txdr_unsigned(siz);
|
|
|
|
m1->m_len -= NFSX_UNSIGNED;
|
|
|
|
tlen = NFSX_UNSIGNED;
|
|
|
|
putsize = 0;
|
|
|
|
}
|
|
|
|
if (siz < m1->m_len) {
|
|
|
|
len = nfsm_rndup(siz);
|
|
|
|
xfer = siz;
|
|
|
|
if (xfer < len)
|
|
|
|
*(tl+(xfer>>2)) = 0;
|
|
|
|
} else {
|
|
|
|
xfer = len = m1->m_len;
|
|
|
|
}
|
|
|
|
bcopy(cp, (caddr_t) tl, xfer);
|
|
|
|
m1->m_len = len+tlen;
|
|
|
|
siz -= xfer;
|
|
|
|
cp += xfer;
|
|
|
|
}
|
|
|
|
*mb = m1;
|
|
|
|
*bpos = mtod(m1, caddr_t)+m1->m_len;
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Called once to initialize data structures...
|
|
|
|
*/
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
2001-09-18 23:32:09 +00:00
|
|
|
nfs_init(struct vfsconf *vfsp)
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2001-09-18 23:32:09 +00:00
|
|
|
int i;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2002-03-20 10:07:52 +00:00
|
|
|
nfsmount_zone = uma_zcreate("NFSMOUNT", sizeof(struct nfsmount),
|
|
|
|
NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
|
1994-05-24 10:09:53 +00:00
|
|
|
nfs_true = txdr_unsigned(TRUE);
|
|
|
|
nfs_false = txdr_unsigned(FALSE);
|
1994-10-17 17:47:45 +00:00
|
|
|
nfs_xdrneg1 = txdr_unsigned(-1);
|
1995-06-27 11:07:30 +00:00
|
|
|
nfs_ticks = (hz * NFS_TICKINTVL + 500) / 1000;
|
|
|
|
if (nfs_ticks < 1)
|
|
|
|
nfs_ticks = 1;
|
1994-05-24 10:09:53 +00:00
|
|
|
/* Ensure async daemons disabled */
|
1996-11-06 10:53:16 +00:00
|
|
|
for (i = 0; i < NFS_MAXASYNCDAEMON; i++) {
|
2010-01-27 15:22:20 +00:00
|
|
|
nfs_iodwant[i] = NFSIOD_NOT_AVAILABLE;
|
2002-07-11 17:54:58 +00:00
|
|
|
nfs_iodmount[i] = NULL;
|
1996-11-06 10:53:16 +00:00
|
|
|
}
|
1994-05-24 10:09:53 +00:00
|
|
|
nfs_nhinit(); /* Init the nfsnode table */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Initialize reply list and start timer
|
|
|
|
*/
|
2006-05-19 00:04:24 +00:00
|
|
|
mtx_init(&nfs_iod_mtx, "NFS iod lock", NULL, MTX_DEF);
|
2007-10-12 19:12:21 +00:00
|
|
|
mtx_init(&nfs_xid_mtx, "NFS xid lock", NULL, MTX_DEF);
|
2010-09-12 19:06:08 +00:00
|
|
|
TASK_INIT(&nfs_nfsiodnew_task, 0, nfs_nfsiodnew_tq, NULL);
|
1996-06-14 11:13:21 +00:00
|
|
|
|
1999-01-21 08:29:12 +00:00
|
|
|
nfs_pbuf_freecnt = nswbuf / 2 + 1;
|
|
|
|
|
1994-05-25 09:21:21 +00:00
|
|
|
return (0);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
1998-09-07 05:42:15 +00:00
|
|
|
int
|
2001-09-18 23:32:09 +00:00
|
|
|
nfs_uninit(struct vfsconf *vfsp)
|
1998-09-07 05:42:15 +00:00
|
|
|
{
|
2004-04-11 13:30:20 +00:00
|
|
|
int i;
|
1998-09-07 05:42:15 +00:00
|
|
|
|
2004-04-11 13:30:20 +00:00
|
|
|
/*
|
|
|
|
* Tell all nfsiod processes to exit. Clear nfs_iodmax, and wakeup
|
|
|
|
* any sleeping nfsiods so they check nfs_iodmax and exit.
|
2010-09-12 19:06:08 +00:00
|
|
|
* Drain nfsiodnew task before we wait for them to finish.
|
2004-04-11 13:30:20 +00:00
|
|
|
*/
|
2006-05-19 00:04:24 +00:00
|
|
|
mtx_lock(&nfs_iod_mtx);
|
2004-04-11 13:30:20 +00:00
|
|
|
nfs_iodmax = 0;
|
2010-09-12 19:06:08 +00:00
|
|
|
mtx_unlock(&nfs_iod_mtx);
|
|
|
|
taskqueue_drain(taskqueue_thread, &nfs_nfsiodnew_task);
|
|
|
|
mtx_lock(&nfs_iod_mtx);
|
2004-04-11 13:30:20 +00:00
|
|
|
for (i = 0; i < nfs_numasync; i++)
|
2010-01-27 15:22:20 +00:00
|
|
|
if (nfs_iodwant[i] == NFSIOD_AVAILABLE)
|
2004-04-11 13:30:20 +00:00
|
|
|
wakeup(&nfs_iodwant[i]);
|
|
|
|
/* The last nfsiod to exit will wake us up when nfs_numasync hits 0 */
|
|
|
|
while (nfs_numasync)
|
2006-05-19 00:04:24 +00:00
|
|
|
msleep(&nfs_numasync, &nfs_iod_mtx, PWAIT, "ioddie", 0);
|
|
|
|
mtx_unlock(&nfs_iod_mtx);
|
2004-04-11 13:30:20 +00:00
|
|
|
nfs_nhuninit();
|
|
|
|
uma_zdestroy(nfsmount_zone);
|
1998-09-07 05:42:15 +00:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
2006-05-19 00:04:24 +00:00
|
|
|
void
|
|
|
|
nfs_dircookie_lock(struct nfsnode *np)
|
|
|
|
{
|
|
|
|
mtx_lock(&np->n_mtx);
|
|
|
|
while (np->n_flag & NDIRCOOKIELK)
|
|
|
|
(void) msleep(&np->n_flag, &np->n_mtx, PZERO, "nfsdirlk", 0);
|
|
|
|
np->n_flag |= NDIRCOOKIELK;
|
|
|
|
mtx_unlock(&np->n_mtx);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
nfs_dircookie_unlock(struct nfsnode *np)
|
|
|
|
{
|
|
|
|
mtx_lock(&np->n_mtx);
|
|
|
|
np->n_flag &= ~NDIRCOOKIELK;
|
|
|
|
wakeup(&np->n_flag);
|
|
|
|
mtx_unlock(&np->n_mtx);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2008-02-09 20:13:19 +00:00
|
|
|
nfs_upgrade_vnlock(struct vnode *vp)
|
2006-05-19 00:04:24 +00:00
|
|
|
{
|
|
|
|
int old_lock;
|
2009-08-14 10:59:17 +00:00
|
|
|
|
|
|
|
ASSERT_VOP_LOCKED(vp, "nfs_upgrade_vnlock");
|
|
|
|
old_lock = VOP_ISLOCKED(vp);
|
|
|
|
if (old_lock != LK_EXCLUSIVE) {
|
|
|
|
KASSERT(old_lock == LK_SHARED,
|
|
|
|
("nfs_upgrade_vnlock: wrong old_lock %d", old_lock));
|
|
|
|
/* Upgrade to exclusive lock, this might block */
|
|
|
|
vn_lock(vp, LK_UPGRADE | LK_RETRY);
|
2006-05-19 00:04:24 +00:00
|
|
|
}
|
2009-08-14 10:59:17 +00:00
|
|
|
return (old_lock);
|
2006-05-19 00:04:24 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2008-02-09 20:13:19 +00:00
|
|
|
nfs_downgrade_vnlock(struct vnode *vp, int old_lock)
|
2006-05-19 00:04:24 +00:00
|
|
|
{
|
|
|
|
if (old_lock != LK_EXCLUSIVE) {
|
2009-08-14 10:59:17 +00:00
|
|
|
KASSERT(old_lock == LK_SHARED, ("wrong old_lock %d", old_lock));
|
|
|
|
/* Downgrade from exclusive lock. */
|
|
|
|
vn_lock(vp, LK_DOWNGRADE | LK_RETRY);
|
2006-05-19 00:04:24 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
nfs_printf(const char *fmt, ...)
|
|
|
|
{
|
|
|
|
va_list ap;
|
|
|
|
|
|
|
|
mtx_lock(&Giant);
|
|
|
|
va_start(ap, fmt);
|
2011-04-13 12:46:25 +00:00
|
|
|
vprintf(fmt, ap);
|
2006-05-19 00:04:24 +00:00
|
|
|
va_end(ap);
|
|
|
|
mtx_unlock(&Giant);
|
|
|
|
}
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* Attribute cache routines.
|
|
|
|
* nfs_loadattrcache() - loads or updates the cache contents from attributes
|
|
|
|
* that are on the mbuf list
|
|
|
|
* nfs_getattrcache() - returns valid attributes if found in cache, returns
|
|
|
|
* error otherwise
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Load the attribute cache (that lives in the nfsnode entry) with
|
|
|
|
* the values on the mbuf list and
|
|
|
|
* Iff vap not NULL
|
|
|
|
* copy the attributes to *vaper
|
|
|
|
*/
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
2001-09-18 23:32:09 +00:00
|
|
|
nfs_loadattrcache(struct vnode **vpp, struct mbuf **mdp, caddr_t *dposp,
|
2006-05-19 00:04:24 +00:00
|
|
|
struct vattr *vaper, int dontshrink)
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2001-09-18 23:32:09 +00:00
|
|
|
struct vnode *vp = *vpp;
|
|
|
|
struct vattr *vap;
|
|
|
|
struct nfs_fattr *fp;
|
2009-03-24 17:14:34 +00:00
|
|
|
struct nfsnode *np = NULL;
|
2001-09-18 23:32:09 +00:00
|
|
|
int32_t t1;
|
1995-06-27 11:07:30 +00:00
|
|
|
caddr_t cp2;
|
2001-09-27 22:40:38 +00:00
|
|
|
int rdev;
|
1994-05-24 10:09:53 +00:00
|
|
|
struct mbuf *md;
|
|
|
|
enum vtype vtyp;
|
|
|
|
u_short vmode;
|
2007-07-03 18:31:47 +00:00
|
|
|
struct timespec mtime, mtime_save;
|
1995-06-27 11:07:30 +00:00
|
|
|
int v3 = NFS_ISV3(vp);
|
2009-03-24 17:14:34 +00:00
|
|
|
int error = 0;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
|
|
|
md = *mdp;
|
1995-06-27 11:07:30 +00:00
|
|
|
t1 = (mtod(md, caddr_t) + md->m_len) - *dposp;
|
2008-03-25 09:39:02 +00:00
|
|
|
cp2 = nfsm_disct(mdp, dposp, NFSX_FATTR(v3), t1, M_WAIT);
|
2009-03-24 17:14:34 +00:00
|
|
|
if (cp2 == NULL) {
|
|
|
|
error = EBADRPC;
|
|
|
|
goto out;
|
|
|
|
}
|
1995-06-27 11:07:30 +00:00
|
|
|
fp = (struct nfs_fattr *)cp2;
|
|
|
|
if (v3) {
|
|
|
|
vtyp = nfsv3tov_type(fp->fa_type);
|
|
|
|
vmode = fxdr_unsigned(u_short, fp->fa_mode);
|
2004-06-17 17:16:53 +00:00
|
|
|
rdev = makedev(fxdr_unsigned(int, fp->fa3_rdev.specdata1),
|
1996-06-23 17:19:25 +00:00
|
|
|
fxdr_unsigned(int, fp->fa3_rdev.specdata2));
|
1995-06-27 11:07:30 +00:00
|
|
|
fxdr_nfsv3time(&fp->fa3_mtime, &mtime);
|
1994-05-24 10:09:53 +00:00
|
|
|
} else {
|
1995-06-27 11:07:30 +00:00
|
|
|
vtyp = nfsv2tov_type(fp->fa_type);
|
|
|
|
vmode = fxdr_unsigned(u_short, fp->fa_mode);
|
|
|
|
/*
|
|
|
|
* XXX
|
|
|
|
*
|
|
|
|
* The duplicate information returned in fa_type and fa_mode
|
|
|
|
* is an ambiguity in the NFS version 2 protocol.
|
|
|
|
*
|
|
|
|
* VREG should be taken literally as a regular file. If a
|
|
|
|
* server intents to return some type information differently
|
|
|
|
* in the upper bits of the mode field (e.g. for sockets, or
|
|
|
|
* FIFOs), NFSv2 mandates fa_type to be VNON. Anyway, we
|
|
|
|
* leave the examination of the mode bits even in the VREG
|
|
|
|
* case to avoid breakage for bogus servers, but we make sure
|
|
|
|
* that there are actually type bits set in the upper part of
|
|
|
|
* fa_mode (and failing that, trust the va_type field).
|
|
|
|
*
|
|
|
|
* NFSv3 cleared the issue, and requires fa_mode to not
|
|
|
|
* contain any type information (while also introduing sockets
|
|
|
|
* and FIFOs for fa_type).
|
|
|
|
*/
|
|
|
|
if (vtyp == VNON || (vtyp == VREG && (vmode & S_IFMT) != 0))
|
|
|
|
vtyp = IFTOVT(vmode);
|
1998-05-31 20:09:01 +00:00
|
|
|
rdev = fxdr_unsigned(int32_t, fp->fa2_rdev);
|
1995-06-27 11:07:30 +00:00
|
|
|
fxdr_nfsv2time(&fp->fa2_mtime, &mtime);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Really ugly NFSv2 kludge.
|
|
|
|
*/
|
|
|
|
if (vtyp == VCHR && rdev == 0xffffffff)
|
|
|
|
vtyp = VFIFO;
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
1995-06-27 11:07:30 +00:00
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* If v_type == VNON it is a new node, so fill in the v_type,
|
1995-05-30 08:16:23 +00:00
|
|
|
* n_mtime fields. Check to see if it represents a special
|
1994-05-24 10:09:53 +00:00
|
|
|
* device, and if so, check for a possible alias. Once the
|
|
|
|
* correct vnode has been obtained, fill in the rest of the
|
|
|
|
* information.
|
|
|
|
*/
|
|
|
|
np = VTONFS(vp);
|
2006-05-19 00:04:24 +00:00
|
|
|
mtx_lock(&np->n_mtx);
|
1995-08-24 10:17:39 +00:00
|
|
|
if (vp->v_type != vtyp) {
|
1995-06-27 11:07:30 +00:00
|
|
|
vp->v_type = vtyp;
|
2004-03-11 16:33:11 +00:00
|
|
|
if (vp->v_type == VFIFO)
|
2004-12-01 23:16:38 +00:00
|
|
|
vp->v_op = &nfs_fifoops;
|
2004-12-06 19:18:00 +00:00
|
|
|
np->n_mtime = mtime;
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
vap = &np->n_vattr;
|
|
|
|
vap->va_type = vtyp;
|
|
|
|
vap->va_mode = (vmode & 07777);
|
Divorce "dev_t" from the "major|minor" bitmap, which is now called
udev_t in the kernel but still called dev_t in userland.
Provide functions to manipulate both types:
major() umajor()
minor() uminor()
makedev() umakedev()
dev2udev() udev2dev()
For now they're functions, they will become in-line functions
after one of the next two steps in this process.
Return major/minor/makedev to macro-hood for userland.
Register a name in cdevsw[] for the "filedescriptor" driver.
In the kernel the udev_t appears in places where we have the
major/minor number combination, (ie: a potential device: we
may not have the driver nor the device), like in inodes, vattr,
cdevsw registration and so on, whereas the dev_t appears where
we carry around a reference to a actual device.
In the future the cdevsw and the aliased-from vnode will be hung
directly from the dev_t, along with up to two softc pointers for
the device driver and a few houskeeping bits. This will essentially
replace the current "alias" check code (same buck, bigger bang).
A little stunt has been provided to try to catch places where the
wrong type is being used (dev_t vs udev_t), if you see something
not working, #undef DEVT_FASCIST in kern/kern_conf.c and see if
it makes a difference. If it does, please try to track it down
(many hands make light work) or at least try to reproduce it
as simply as possible, and describe how to do that.
Without DEVT_FASCIST I belive this patch is a no-op.
Stylistic/posixoid comments about the userland view of the <sys/*.h>
files welcome now, from userland they now contain the end result.
Next planned step: make all dev_t's refer to the same devsw[] which
means convert BLK's to CHR's at the perimeter of the vnodes and
other places where they enter the game (bootdev, mknod, sysctl).
1999-05-11 19:55:07 +00:00
|
|
|
vap->va_rdev = rdev;
|
2007-07-03 18:31:47 +00:00
|
|
|
mtime_save = vap->va_mtime;
|
1994-05-24 10:09:53 +00:00
|
|
|
vap->va_mtime = mtime;
|
|
|
|
vap->va_fsid = vp->v_mount->mnt_stat.f_fsid.val[0];
|
1995-06-27 11:07:30 +00:00
|
|
|
if (v3) {
|
|
|
|
vap->va_nlink = fxdr_unsigned(u_short, fp->fa_nlink);
|
|
|
|
vap->va_uid = fxdr_unsigned(uid_t, fp->fa_uid);
|
|
|
|
vap->va_gid = fxdr_unsigned(gid_t, fp->fa_gid);
|
1999-06-05 05:35:03 +00:00
|
|
|
vap->va_size = fxdr_hyper(&fp->fa3_size);
|
1995-06-27 11:07:30 +00:00
|
|
|
vap->va_blocksize = NFS_FABLKSIZE;
|
1999-06-05 05:35:03 +00:00
|
|
|
vap->va_bytes = fxdr_hyper(&fp->fa3_used);
|
1998-05-31 20:09:01 +00:00
|
|
|
vap->va_fileid = fxdr_unsigned(int32_t,
|
|
|
|
fp->fa3_fileid.nfsuquad[1]);
|
1995-06-27 11:07:30 +00:00
|
|
|
fxdr_nfsv3time(&fp->fa3_atime, &vap->va_atime);
|
|
|
|
fxdr_nfsv3time(&fp->fa3_ctime, &vap->va_ctime);
|
|
|
|
vap->va_flags = 0;
|
|
|
|
vap->va_filerev = 0;
|
1994-05-24 10:09:53 +00:00
|
|
|
} else {
|
1995-06-27 11:07:30 +00:00
|
|
|
vap->va_nlink = fxdr_unsigned(u_short, fp->fa_nlink);
|
|
|
|
vap->va_uid = fxdr_unsigned(uid_t, fp->fa_uid);
|
|
|
|
vap->va_gid = fxdr_unsigned(gid_t, fp->fa_gid);
|
1998-05-31 20:09:01 +00:00
|
|
|
vap->va_size = fxdr_unsigned(u_int32_t, fp->fa2_size);
|
|
|
|
vap->va_blocksize = fxdr_unsigned(int32_t, fp->fa2_blocksize);
|
1999-06-05 05:35:03 +00:00
|
|
|
vap->va_bytes = (u_quad_t)fxdr_unsigned(int32_t, fp->fa2_blocks)
|
1998-05-31 20:09:01 +00:00
|
|
|
* NFS_FABLKSIZE;
|
|
|
|
vap->va_fileid = fxdr_unsigned(int32_t, fp->fa2_fileid);
|
1995-06-27 11:07:30 +00:00
|
|
|
fxdr_nfsv2time(&fp->fa2_atime, &vap->va_atime);
|
1994-05-24 10:09:53 +00:00
|
|
|
vap->va_flags = 0;
|
1998-05-31 20:09:01 +00:00
|
|
|
vap->va_ctime.tv_sec = fxdr_unsigned(u_int32_t,
|
|
|
|
fp->fa2_ctime.nfsv2_sec);
|
1996-09-19 18:21:32 +00:00
|
|
|
vap->va_ctime.tv_nsec = 0;
|
2001-09-18 23:32:09 +00:00
|
|
|
vap->va_gen = fxdr_unsigned(u_int32_t, fp->fa2_ctime.nfsv2_usec);
|
1994-05-24 10:09:53 +00:00
|
|
|
vap->va_filerev = 0;
|
|
|
|
}
|
2000-10-24 10:13:36 +00:00
|
|
|
np->n_attrstamp = time_second;
|
1994-05-24 10:09:53 +00:00
|
|
|
if (vap->va_size != np->n_size) {
|
|
|
|
if (vap->va_type == VREG) {
|
2000-10-24 10:13:36 +00:00
|
|
|
if (dontshrink && vap->va_size < np->n_size) {
|
|
|
|
/*
|
|
|
|
* We've been told not to shrink the file;
|
|
|
|
* zero np->n_attrstamp to indicate that
|
|
|
|
* the attributes are stale.
|
|
|
|
*/
|
|
|
|
vap->va_size = np->n_size;
|
|
|
|
np->n_attrstamp = 0;
|
2009-03-24 17:14:34 +00:00
|
|
|
KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp);
|
2000-10-24 10:13:36 +00:00
|
|
|
} else if (np->n_flag & NMODIFIED) {
|
2004-04-14 23:23:55 +00:00
|
|
|
/*
|
|
|
|
* We've modified the file: Use the larger
|
|
|
|
* of our size, and the server's size.
|
|
|
|
*/
|
|
|
|
if (vap->va_size < np->n_size) {
|
1994-05-24 10:09:53 +00:00
|
|
|
vap->va_size = np->n_size;
|
2004-04-14 23:23:55 +00:00
|
|
|
} else {
|
1994-05-24 10:09:53 +00:00
|
|
|
np->n_size = vap->va_size;
|
2004-04-14 23:23:55 +00:00
|
|
|
np->n_flag |= NSIZECHANGED;
|
|
|
|
}
|
1999-12-12 06:09:57 +00:00
|
|
|
} else {
|
1994-05-24 10:09:53 +00:00
|
|
|
np->n_size = vap->va_size;
|
2004-04-14 23:23:55 +00:00
|
|
|
np->n_flag |= NSIZECHANGED;
|
1999-12-12 06:09:57 +00:00
|
|
|
}
|
1998-11-09 07:00:14 +00:00
|
|
|
vnode_pager_setsize(vp, np->n_size);
|
1999-12-12 06:09:57 +00:00
|
|
|
} else {
|
1994-05-24 10:09:53 +00:00
|
|
|
np->n_size = vap->va_size;
|
1999-12-12 06:09:57 +00:00
|
|
|
}
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
2007-07-03 18:31:47 +00:00
|
|
|
/*
|
|
|
|
* The following checks are added to prevent a race between (say)
|
|
|
|
* a READDIR+ and a WRITE.
|
|
|
|
* READDIR+, WRITE requests sent out.
|
|
|
|
* READDIR+ resp, WRITE resp received on client.
|
|
|
|
* However, the WRITE resp was handled before the READDIR+ resp
|
|
|
|
* causing the post op attrs from the write to be loaded first
|
|
|
|
* and the attrs from the READDIR+ to be loaded later. If this
|
|
|
|
* happens, we have stale attrs loaded into the attrcache.
|
|
|
|
* We detect this by for the mtime moving back. We invalidate the
|
|
|
|
* attrcache when this happens.
|
|
|
|
*/
|
2009-03-24 17:14:34 +00:00
|
|
|
if (timespeccmp(&mtime_save, &vap->va_mtime, >)) {
|
2007-07-03 18:31:47 +00:00
|
|
|
/* Size changed or mtime went backwards */
|
|
|
|
np->n_attrstamp = 0;
|
2009-03-24 17:14:34 +00:00
|
|
|
KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp);
|
|
|
|
}
|
1994-05-24 10:09:53 +00:00
|
|
|
if (vaper != NULL) {
|
|
|
|
bcopy((caddr_t)vap, (caddr_t)vaper, sizeof(*vap));
|
|
|
|
if (np->n_flag & NCHG) {
|
1995-06-27 11:07:30 +00:00
|
|
|
if (np->n_flag & NACC)
|
|
|
|
vaper->va_atime = np->n_atim;
|
|
|
|
if (np->n_flag & NUPD)
|
|
|
|
vaper->va_mtime = np->n_mtim;
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
}
|
2009-03-24 23:16:48 +00:00
|
|
|
|
|
|
|
#ifdef KDTRACE_HOOKS
|
|
|
|
if (np->n_attrstamp != 0)
|
|
|
|
KDTRACE_NFS_ATTRCACHE_LOAD_DONE(vp, &np->n_vattr, 0);
|
|
|
|
#endif
|
2006-05-19 00:04:24 +00:00
|
|
|
mtx_unlock(&np->n_mtx);
|
2009-03-24 17:14:34 +00:00
|
|
|
out:
|
2009-03-24 23:16:48 +00:00
|
|
|
#ifdef KDTRACE_HOOKS
|
|
|
|
if (error)
|
|
|
|
KDTRACE_NFS_ATTRCACHE_LOAD_DONE(vp, NULL, error);
|
2009-03-24 17:14:34 +00:00
|
|
|
#endif
|
|
|
|
return (error);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
1998-05-19 07:11:27 +00:00
|
|
|
#ifdef NFS_ACDEBUG
|
|
|
|
#include <sys/sysctl.h>
|
2011-05-15 20:52:43 +00:00
|
|
|
SYSCTL_DECL(_vfs_oldnfs);
|
1998-05-19 07:11:27 +00:00
|
|
|
static int nfs_acdebug;
|
2011-05-15 20:52:43 +00:00
|
|
|
SYSCTL_INT(_vfs_oldnfs, OID_AUTO, acdebug, CTLFLAG_RW, &nfs_acdebug, 0,
|
2011-04-13 12:46:25 +00:00
|
|
|
"Toggle acdebug (attribute cache debug) flag");
|
1998-05-19 07:11:27 +00:00
|
|
|
#endif
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* Check the time stamp
|
|
|
|
* If the cache is valid, copy contents to *vap and return 0
|
|
|
|
* otherwise return an error
|
|
|
|
*/
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
2001-09-18 23:32:09 +00:00
|
|
|
nfs_getattrcache(struct vnode *vp, struct vattr *vaper)
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2001-09-18 23:32:09 +00:00
|
|
|
struct nfsnode *np;
|
|
|
|
struct vattr *vap;
|
1998-05-19 07:11:27 +00:00
|
|
|
struct nfsmount *nmp;
|
|
|
|
int timeo;
|
2006-05-19 00:04:24 +00:00
|
|
|
|
1998-05-19 07:11:27 +00:00
|
|
|
np = VTONFS(vp);
|
|
|
|
vap = &np->n_vattr;
|
|
|
|
nmp = VFSTONFS(vp->v_mount);
|
2006-05-19 00:04:24 +00:00
|
|
|
#ifdef NFS_ACDEBUG
|
|
|
|
mtx_lock(&Giant); /* nfs_printf() */
|
|
|
|
#endif
|
|
|
|
mtx_lock(&np->n_mtx);
|
1998-05-19 07:11:27 +00:00
|
|
|
/* XXX n_mtime doesn't seem to be updated on a miss-and-reload */
|
2004-12-06 19:18:00 +00:00
|
|
|
timeo = (time_second - np->n_mtime.tv_sec) / 10;
|
1998-05-19 07:11:27 +00:00
|
|
|
|
|
|
|
#ifdef NFS_ACDEBUG
|
|
|
|
if (nfs_acdebug>1)
|
2006-05-19 00:04:24 +00:00
|
|
|
nfs_printf("nfs_getattrcache: initial timeo = %d\n", timeo);
|
1998-05-19 07:11:27 +00:00
|
|
|
#endif
|
|
|
|
|
|
|
|
if (vap->va_type == VDIR) {
|
|
|
|
if ((np->n_flag & NMODIFIED) || timeo < nmp->nm_acdirmin)
|
|
|
|
timeo = nmp->nm_acdirmin;
|
|
|
|
else if (timeo > nmp->nm_acdirmax)
|
|
|
|
timeo = nmp->nm_acdirmax;
|
|
|
|
} else {
|
|
|
|
if ((np->n_flag & NMODIFIED) || timeo < nmp->nm_acregmin)
|
|
|
|
timeo = nmp->nm_acregmin;
|
|
|
|
else if (timeo > nmp->nm_acregmax)
|
|
|
|
timeo = nmp->nm_acregmax;
|
|
|
|
}
|
1994-05-24 10:09:53 +00:00
|
|
|
|
1998-05-19 07:11:27 +00:00
|
|
|
#ifdef NFS_ACDEBUG
|
|
|
|
if (nfs_acdebug > 2)
|
2006-05-19 00:04:24 +00:00
|
|
|
nfs_printf("acregmin %d; acregmax %d; acdirmin %d; acdirmax %d\n",
|
|
|
|
nmp->nm_acregmin, nmp->nm_acregmax,
|
|
|
|
nmp->nm_acdirmin, nmp->nm_acdirmax);
|
1998-05-19 07:11:27 +00:00
|
|
|
|
|
|
|
if (nfs_acdebug)
|
2006-05-19 00:04:24 +00:00
|
|
|
nfs_printf("nfs_getattrcache: age = %d; final timeo = %d\n",
|
|
|
|
(time_second - np->n_attrstamp), timeo);
|
1998-05-19 07:11:27 +00:00
|
|
|
#endif
|
|
|
|
|
|
|
|
if ((time_second - np->n_attrstamp) >= timeo) {
|
1994-05-24 10:09:53 +00:00
|
|
|
nfsstats.attrcache_misses++;
|
2006-05-19 00:04:24 +00:00
|
|
|
mtx_unlock(&np->n_mtx);
|
2011-04-13 12:46:25 +00:00
|
|
|
#ifdef NFS_ACDEBUG
|
|
|
|
mtx_unlock(&Giant); /* nfs_printf() */
|
|
|
|
#endif
|
2009-03-24 17:14:34 +00:00
|
|
|
KDTRACE_NFS_ATTRCACHE_GET_MISS(vp);
|
|
|
|
return (ENOENT);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
nfsstats.attrcache_hits++;
|
|
|
|
if (vap->va_size != np->n_size) {
|
|
|
|
if (vap->va_type == VREG) {
|
|
|
|
if (np->n_flag & NMODIFIED) {
|
|
|
|
if (vap->va_size < np->n_size)
|
|
|
|
vap->va_size = np->n_size;
|
|
|
|
else
|
|
|
|
np->n_size = vap->va_size;
|
1999-12-12 06:09:57 +00:00
|
|
|
} else {
|
1994-05-24 10:09:53 +00:00
|
|
|
np->n_size = vap->va_size;
|
1999-12-12 06:09:57 +00:00
|
|
|
}
|
1998-11-09 07:00:14 +00:00
|
|
|
vnode_pager_setsize(vp, np->n_size);
|
1999-12-12 06:09:57 +00:00
|
|
|
} else {
|
1994-05-24 10:09:53 +00:00
|
|
|
np->n_size = vap->va_size;
|
1999-12-12 06:09:57 +00:00
|
|
|
}
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
bcopy((caddr_t)vap, (caddr_t)vaper, sizeof(struct vattr));
|
|
|
|
if (np->n_flag & NCHG) {
|
1995-06-27 11:07:30 +00:00
|
|
|
if (np->n_flag & NACC)
|
|
|
|
vaper->va_atime = np->n_atim;
|
|
|
|
if (np->n_flag & NUPD)
|
|
|
|
vaper->va_mtime = np->n_mtim;
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
2006-05-19 00:04:24 +00:00
|
|
|
mtx_unlock(&np->n_mtx);
|
|
|
|
#ifdef NFS_ACDEBUG
|
|
|
|
mtx_unlock(&Giant); /* nfs_printf() */
|
|
|
|
#endif
|
2009-03-24 17:14:34 +00:00
|
|
|
KDTRACE_NFS_ATTRCACHE_GET_HIT(vp, vap);
|
1994-05-24 10:09:53 +00:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
2009-04-06 21:11:08 +00:00
|
|
|
/*
|
|
|
|
* Purge all cached information about an NFS vnode including name
|
|
|
|
* cache entries, the attribute cache, and the access cache. This is
|
|
|
|
* called when an NFS request for a node fails with a stale
|
|
|
|
* filehandle.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
nfs_purgecache(struct vnode *vp)
|
|
|
|
{
|
|
|
|
struct nfsnode *np;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
np = VTONFS(vp);
|
|
|
|
cache_purge(vp);
|
|
|
|
mtx_lock(&np->n_mtx);
|
|
|
|
np->n_attrstamp = 0;
|
|
|
|
KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp);
|
|
|
|
for (i = 0; i < NFS_ACCESSCACHESIZE; i++)
|
|
|
|
np->n_accesscache[i].stamp = 0;
|
|
|
|
KDTRACE_NFS_ACCESSCACHE_FLUSH_DONE(vp);
|
|
|
|
mtx_unlock(&np->n_mtx);
|
|
|
|
}
|
|
|
|
|
1999-01-27 22:42:27 +00:00
|
|
|
static nfsuint64 nfs_nullcookie = { { 0, 0 } };
|
1995-06-27 11:07:30 +00:00
|
|
|
/*
|
|
|
|
* This function finds the directory cookie that corresponds to the
|
|
|
|
* logical byte offset given.
|
|
|
|
*/
|
|
|
|
nfsuint64 *
|
2001-09-18 23:32:09 +00:00
|
|
|
nfs_getcookie(struct nfsnode *np, off_t off, int add)
|
1995-06-27 11:07:30 +00:00
|
|
|
{
|
2001-09-18 23:32:09 +00:00
|
|
|
struct nfsdmap *dp, *dp2;
|
|
|
|
int pos;
|
2006-05-19 00:04:24 +00:00
|
|
|
nfsuint64 *retval = NULL;
|
|
|
|
|
1998-06-14 15:52:00 +00:00
|
|
|
pos = (uoff_t)off / NFS_DIRBLKSIZ;
|
|
|
|
if (pos == 0 || off < 0) {
|
2010-06-13 05:24:27 +00:00
|
|
|
KASSERT(!add, ("nfs getcookie add at <= 0"));
|
1995-06-27 11:07:30 +00:00
|
|
|
return (&nfs_nullcookie);
|
|
|
|
}
|
|
|
|
pos--;
|
2001-09-18 23:32:09 +00:00
|
|
|
dp = LIST_FIRST(&np->n_cookies);
|
1995-06-27 11:07:30 +00:00
|
|
|
if (!dp) {
|
|
|
|
if (add) {
|
2008-10-23 15:53:51 +00:00
|
|
|
dp = malloc(sizeof (struct nfsdmap),
|
2003-02-19 05:47:46 +00:00
|
|
|
M_NFSDIROFF, M_WAITOK);
|
1995-06-27 11:07:30 +00:00
|
|
|
dp->ndm_eocookie = 0;
|
|
|
|
LIST_INSERT_HEAD(&np->n_cookies, dp, ndm_list);
|
|
|
|
} else
|
2006-05-19 00:04:24 +00:00
|
|
|
goto out;
|
1995-06-27 11:07:30 +00:00
|
|
|
}
|
|
|
|
while (pos >= NFSNUMCOOKIES) {
|
|
|
|
pos -= NFSNUMCOOKIES;
|
2001-09-18 23:32:09 +00:00
|
|
|
if (LIST_NEXT(dp, ndm_list)) {
|
1995-06-27 11:07:30 +00:00
|
|
|
if (!add && dp->ndm_eocookie < NFSNUMCOOKIES &&
|
2006-05-19 00:04:24 +00:00
|
|
|
pos >= dp->ndm_eocookie)
|
|
|
|
goto out;
|
2001-09-18 23:32:09 +00:00
|
|
|
dp = LIST_NEXT(dp, ndm_list);
|
1995-06-27 11:07:30 +00:00
|
|
|
} else if (add) {
|
2008-10-23 15:53:51 +00:00
|
|
|
dp2 = malloc(sizeof (struct nfsdmap),
|
2003-02-19 05:47:46 +00:00
|
|
|
M_NFSDIROFF, M_WAITOK);
|
1995-06-27 11:07:30 +00:00
|
|
|
dp2->ndm_eocookie = 0;
|
|
|
|
LIST_INSERT_AFTER(dp, dp2, ndm_list);
|
|
|
|
dp = dp2;
|
|
|
|
} else
|
2006-05-19 00:04:24 +00:00
|
|
|
goto out;
|
1995-06-27 11:07:30 +00:00
|
|
|
}
|
|
|
|
if (pos >= dp->ndm_eocookie) {
|
|
|
|
if (add)
|
|
|
|
dp->ndm_eocookie = pos + 1;
|
|
|
|
else
|
2006-05-19 00:04:24 +00:00
|
|
|
goto out;
|
1995-06-27 11:07:30 +00:00
|
|
|
}
|
2006-05-19 00:04:24 +00:00
|
|
|
retval = &dp->ndm_cookies[pos];
|
|
|
|
out:
|
|
|
|
return (retval);
|
1995-06-27 11:07:30 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Invalidate cached directory information, except for the actual directory
|
|
|
|
* blocks (which are invalidated separately).
|
|
|
|
* Done mainly to avoid the use of stale offset cookies.
|
|
|
|
*/
|
|
|
|
void
|
2001-09-18 23:32:09 +00:00
|
|
|
nfs_invaldir(struct vnode *vp)
|
1995-06-27 11:07:30 +00:00
|
|
|
{
|
2001-09-18 23:32:09 +00:00
|
|
|
struct nfsnode *np = VTONFS(vp);
|
1995-06-27 11:07:30 +00:00
|
|
|
|
2010-06-13 05:24:27 +00:00
|
|
|
KASSERT(vp->v_type == VDIR, ("nfs: invaldir not dir"));
|
2006-05-19 00:04:24 +00:00
|
|
|
nfs_dircookie_lock(np);
|
1995-06-27 11:07:30 +00:00
|
|
|
np->n_direofoffset = 0;
|
|
|
|
np->n_cookieverf.nfsuquad[0] = 0;
|
|
|
|
np->n_cookieverf.nfsuquad[1] = 0;
|
2001-09-18 23:32:09 +00:00
|
|
|
if (LIST_FIRST(&np->n_cookies))
|
|
|
|
LIST_FIRST(&np->n_cookies)->ndm_eocookie = 0;
|
2006-05-19 00:04:24 +00:00
|
|
|
nfs_dircookie_unlock(np);
|
1995-06-27 11:07:30 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The write verifier has changed (probably due to a server reboot), so all
|
|
|
|
* B_NEEDCOMMIT blocks will have to be written again. Since they are on the
|
|
|
|
* dirty block list as B_DELWRI, all this takes is clearing the B_NEEDCOMMIT
|
1999-12-12 06:09:57 +00:00
|
|
|
* and B_CLUSTEROK flags. Once done the new write verifier can be set for the
|
|
|
|
* mount point.
|
|
|
|
*
|
2001-09-18 23:32:09 +00:00
|
|
|
* B_CLUSTEROK must be cleared along with B_NEEDCOMMIT because stage 1 data
|
1999-12-12 06:09:57 +00:00
|
|
|
* writes are not clusterable.
|
1995-06-27 11:07:30 +00:00
|
|
|
*/
|
|
|
|
void
|
2001-09-18 23:32:09 +00:00
|
|
|
nfs_clearcommit(struct mount *mp)
|
1995-06-27 11:07:30 +00:00
|
|
|
{
|
2001-09-18 23:32:09 +00:00
|
|
|
struct vnode *vp, *nvp;
|
|
|
|
struct buf *bp, *nbp;
|
2008-03-22 09:15:16 +00:00
|
|
|
struct bufobj *bo;
|
1995-06-27 11:07:30 +00:00
|
|
|
|
2003-11-05 04:30:08 +00:00
|
|
|
MNT_ILOCK(mp);
|
2004-07-04 08:52:35 +00:00
|
|
|
MNT_VNODE_FOREACH(vp, mp, nvp) {
|
2008-03-22 09:15:16 +00:00
|
|
|
bo = &vp->v_bufobj;
|
2002-09-25 02:38:43 +00:00
|
|
|
VI_LOCK(vp);
|
2005-03-13 12:14:56 +00:00
|
|
|
if (vp->v_iflag & VI_DOOMED) {
|
2003-10-05 06:46:45 +00:00
|
|
|
VI_UNLOCK(vp);
|
|
|
|
continue;
|
|
|
|
}
|
2008-03-22 09:15:16 +00:00
|
|
|
vholdl(vp);
|
|
|
|
VI_UNLOCK(vp);
|
2003-11-05 04:30:08 +00:00
|
|
|
MNT_IUNLOCK(mp);
|
2008-03-22 09:15:16 +00:00
|
|
|
BO_LOCK(bo);
|
|
|
|
TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) {
|
2008-01-19 17:36:23 +00:00
|
|
|
if (!BUF_ISLOCKED(bp) &&
|
1999-06-26 02:47:16 +00:00
|
|
|
(bp->b_flags & (B_DELWRI | B_NEEDCOMMIT))
|
1995-06-27 11:07:30 +00:00
|
|
|
== (B_DELWRI | B_NEEDCOMMIT))
|
1999-12-12 06:09:57 +00:00
|
|
|
bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK);
|
1995-06-27 11:07:30 +00:00
|
|
|
}
|
2008-03-22 09:15:16 +00:00
|
|
|
BO_UNLOCK(bo);
|
|
|
|
vdrop(vp);
|
2003-11-05 04:30:08 +00:00
|
|
|
MNT_ILOCK(mp);
|
1995-06-27 11:07:30 +00:00
|
|
|
}
|
2003-11-05 04:30:08 +00:00
|
|
|
MNT_IUNLOCK(mp);
|
1995-06-27 11:07:30 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2001-09-18 23:32:09 +00:00
|
|
|
* Helper functions for former macros. Some of these should be
|
|
|
|
* moved to their callers.
|
1995-06-27 11:07:30 +00:00
|
|
|
*/
|
2001-09-18 23:32:09 +00:00
|
|
|
|
1995-06-27 11:07:30 +00:00
|
|
|
int
|
2001-09-18 23:32:09 +00:00
|
|
|
nfsm_mtofh_xx(struct vnode *d, struct vnode **v, int v3, int *f,
|
2001-12-18 01:22:09 +00:00
|
|
|
struct mbuf **md, caddr_t *dpos)
|
1995-06-27 11:07:30 +00:00
|
|
|
{
|
2001-09-18 23:32:09 +00:00
|
|
|
struct nfsnode *ttnp;
|
|
|
|
struct vnode *ttvp;
|
|
|
|
nfsfh_t *ttfhp;
|
2001-12-18 01:22:09 +00:00
|
|
|
u_int32_t *tl;
|
2001-09-18 23:32:09 +00:00
|
|
|
int ttfhsize;
|
|
|
|
int t1;
|
|
|
|
|
|
|
|
if (v3) {
|
2001-12-18 01:22:09 +00:00
|
|
|
tl = nfsm_dissect_xx(NFSX_UNSIGNED, md, dpos);
|
|
|
|
if (tl == NULL)
|
2001-09-27 22:40:38 +00:00
|
|
|
return EBADRPC;
|
2001-12-18 01:22:09 +00:00
|
|
|
*f = fxdr_unsigned(int, *tl);
|
2001-09-18 23:32:09 +00:00
|
|
|
} else
|
|
|
|
*f = 1;
|
|
|
|
if (*f) {
|
2001-12-18 01:22:09 +00:00
|
|
|
t1 = nfsm_getfh_xx(&ttfhp, &ttfhsize, (v3), md, dpos);
|
2001-09-18 23:32:09 +00:00
|
|
|
if (t1 != 0)
|
|
|
|
return t1;
|
2006-09-13 18:39:09 +00:00
|
|
|
t1 = nfs_nget(d->v_mount, ttfhp, ttfhsize, &ttnp, LK_EXCLUSIVE);
|
2001-09-18 23:32:09 +00:00
|
|
|
if (t1 != 0)
|
|
|
|
return t1;
|
|
|
|
*v = NFSTOV(ttnp);
|
|
|
|
}
|
|
|
|
if (v3) {
|
2001-12-18 01:22:09 +00:00
|
|
|
tl = nfsm_dissect_xx(NFSX_UNSIGNED, md, dpos);
|
|
|
|
if (tl == NULL)
|
2001-09-27 22:40:38 +00:00
|
|
|
return EBADRPC;
|
2001-09-18 23:32:09 +00:00
|
|
|
if (*f)
|
2001-12-18 01:22:09 +00:00
|
|
|
*f = fxdr_unsigned(int, *tl);
|
|
|
|
else if (fxdr_unsigned(int, *tl))
|
|
|
|
nfsm_adv_xx(NFSX_V3FATTR, md, dpos);
|
2001-09-18 23:32:09 +00:00
|
|
|
}
|
|
|
|
if (*f) {
|
|
|
|
ttvp = *v;
|
2002-07-11 17:54:58 +00:00
|
|
|
t1 = nfs_loadattrcache(&ttvp, md, dpos, NULL, 0);
|
2001-09-18 23:32:09 +00:00
|
|
|
if (t1)
|
|
|
|
return t1;
|
|
|
|
*v = ttvp;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2001-12-18 01:22:09 +00:00
|
|
|
nfsm_getfh_xx(nfsfh_t **f, int *s, int v3, struct mbuf **md, caddr_t *dpos)
|
2001-09-18 23:32:09 +00:00
|
|
|
{
|
2001-12-18 01:22:09 +00:00
|
|
|
u_int32_t *tl;
|
2001-09-18 23:32:09 +00:00
|
|
|
|
|
|
|
if (v3) {
|
2001-12-18 01:22:09 +00:00
|
|
|
tl = nfsm_dissect_xx(NFSX_UNSIGNED, md, dpos);
|
|
|
|
if (tl == NULL)
|
2001-09-27 22:40:38 +00:00
|
|
|
return EBADRPC;
|
2001-12-18 01:22:09 +00:00
|
|
|
*s = fxdr_unsigned(int, *tl);
|
2001-09-27 22:40:38 +00:00
|
|
|
if (*s <= 0 || *s > NFSX_V3FHMAX)
|
2001-09-18 23:32:09 +00:00
|
|
|
return EBADRPC;
|
|
|
|
} else
|
|
|
|
*s = NFSX_V2FH;
|
2001-09-27 22:40:38 +00:00
|
|
|
*f = nfsm_dissect_xx(nfsm_rndup(*s), md, dpos);
|
|
|
|
if (*f == NULL)
|
|
|
|
return EBADRPC;
|
|
|
|
else
|
|
|
|
return 0;
|
2001-09-18 23:32:09 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
int
|
2001-12-18 01:22:09 +00:00
|
|
|
nfsm_loadattr_xx(struct vnode **v, struct vattr *va, struct mbuf **md,
|
2006-05-19 00:04:24 +00:00
|
|
|
caddr_t *dpos)
|
2001-09-18 23:32:09 +00:00
|
|
|
{
|
|
|
|
int t1;
|
|
|
|
|
|
|
|
struct vnode *ttvp = *v;
|
|
|
|
t1 = nfs_loadattrcache(&ttvp, md, dpos, va, 0);
|
|
|
|
if (t1 != 0)
|
|
|
|
return t1;
|
|
|
|
*v = ttvp;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2012-01-20 20:02:01 +00:00
|
|
|
nfsm_postop_attr_xx(struct vnode **v, int *f, struct vattr *va,
|
|
|
|
struct mbuf **md, caddr_t *dpos)
|
2001-09-18 23:32:09 +00:00
|
|
|
{
|
2001-12-18 01:22:09 +00:00
|
|
|
u_int32_t *tl;
|
2001-09-18 23:32:09 +00:00
|
|
|
int t1;
|
|
|
|
|
|
|
|
struct vnode *ttvp = *v;
|
2001-12-18 01:22:09 +00:00
|
|
|
tl = nfsm_dissect_xx(NFSX_UNSIGNED, md, dpos);
|
|
|
|
if (tl == NULL)
|
2001-09-27 22:40:38 +00:00
|
|
|
return EBADRPC;
|
2001-12-18 01:22:09 +00:00
|
|
|
*f = fxdr_unsigned(int, *tl);
|
2001-09-25 00:00:33 +00:00
|
|
|
if (*f != 0) {
|
2012-01-20 20:02:01 +00:00
|
|
|
t1 = nfs_loadattrcache(&ttvp, md, dpos, va, 1);
|
2001-09-18 23:32:09 +00:00
|
|
|
if (t1 != 0) {
|
|
|
|
*f = 0;
|
|
|
|
return t1;
|
1995-06-27 11:07:30 +00:00
|
|
|
}
|
2001-09-18 23:32:09 +00:00
|
|
|
*v = ttvp;
|
1995-06-27 11:07:30 +00:00
|
|
|
}
|
2001-09-18 23:32:09 +00:00
|
|
|
return 0;
|
1995-06-27 11:07:30 +00:00
|
|
|
}
|
|
|
|
|
These changes embody the support of the fully coherent merged VM buffer cache,
much higher filesystem I/O performance, and much better paging performance. It
represents the culmination of over 6 months of R&D.
The majority of the merged VM/cache work is by John Dyson.
The following highlights the most significant changes. Additionally, there are
(mostly minor) changes to the various filesystem modules (nfs, msdosfs, etc) to
support the new VM/buffer scheme.
vfs_bio.c:
Significant rewrite of most of vfs_bio to support the merged VM buffer cache
scheme. The scheme is almost fully compatible with the old filesystem
interface. Significant improvement in the number of opportunities for write
clustering.
vfs_cluster.c, vfs_subr.c
Upgrade and performance enhancements in vfs layer code to support merged
VM/buffer cache. Fixup of vfs_cluster to eliminate the bogus pagemove stuff.
vm_object.c:
Yet more improvements in the collapse code. Elimination of some windows that
can cause list corruption.
vm_pageout.c:
Fixed it, it really works better now. Somehow in 2.0, some "enhancements"
broke the code. This code has been reworked from the ground-up.
vm_fault.c, vm_page.c, pmap.c, vm_object.c
Support for small-block filesystems with merged VM/buffer cache scheme.
pmap.c vm_map.c
Dynamic kernel VM size, now we dont have to pre-allocate excessive numbers of
kernel PTs.
vm_glue.c
Much simpler and more effective swapping code. No more gratuitous swapping.
proc.h
Fixed the problem that the p_lock flag was not being cleared on a fork.
swap_pager.c, vnode_pager.c
Removal of old vfs_bio cruft to support the past pseudo-coherency. Now the
code doesn't need it anymore.
machdep.c
Changes to better support the parameter values for the merged VM/buffer cache
scheme.
machdep.c, kern_exec.c, vm_glue.c
Implemented a seperate submap for temporary exec string space and another one
to contain process upages. This eliminates all map fragmentation problems
that previously existed.
ffs_inode.c, ufs_inode.c, ufs_readwrite.c
Changes for merged VM/buffer cache. Add "bypass" support for sneaking in on
busy buffers.
Submitted by: John Dyson and David Greenman
1995-01-09 16:06:02 +00:00
|
|
|
int
|
2001-12-18 01:22:09 +00:00
|
|
|
nfsm_wcc_data_xx(struct vnode **v, int *f, struct mbuf **md, caddr_t *dpos)
|
1997-12-20 00:25:01 +00:00
|
|
|
{
|
2001-12-18 01:22:09 +00:00
|
|
|
u_int32_t *tl;
|
2001-09-18 23:32:09 +00:00
|
|
|
int ttattrf, ttretf = 0;
|
|
|
|
int t1;
|
|
|
|
|
2001-12-18 01:22:09 +00:00
|
|
|
tl = nfsm_dissect_xx(NFSX_UNSIGNED, md, dpos);
|
|
|
|
if (tl == NULL)
|
2001-09-27 22:40:38 +00:00
|
|
|
return EBADRPC;
|
2001-12-18 01:22:09 +00:00
|
|
|
if (*tl == nfs_true) {
|
|
|
|
tl = nfsm_dissect_xx(6 * NFSX_UNSIGNED, md, dpos);
|
|
|
|
if (tl == NULL)
|
2001-09-27 22:40:38 +00:00
|
|
|
return EBADRPC;
|
2006-05-19 00:04:24 +00:00
|
|
|
mtx_lock(&(VTONFS(*v))->n_mtx);
|
2001-09-18 23:32:09 +00:00
|
|
|
if (*f)
|
2004-12-06 19:18:00 +00:00
|
|
|
ttretf = (VTONFS(*v)->n_mtime.tv_sec == fxdr_unsigned(u_int32_t, *(tl + 2)) &&
|
|
|
|
VTONFS(*v)->n_mtime.tv_nsec == fxdr_unsigned(u_int32_t, *(tl + 3)));
|
2006-05-19 00:04:24 +00:00
|
|
|
mtx_unlock(&(VTONFS(*v))->n_mtx);
|
2001-09-18 23:32:09 +00:00
|
|
|
}
|
2012-01-20 20:02:01 +00:00
|
|
|
t1 = nfsm_postop_attr_xx(v, &ttattrf, NULL, md, dpos);
|
2001-09-18 23:32:09 +00:00
|
|
|
if (t1)
|
|
|
|
return t1;
|
|
|
|
if (*f)
|
|
|
|
*f = ttretf;
|
|
|
|
else
|
|
|
|
*f = ttattrf;
|
|
|
|
return 0;
|
|
|
|
}
|
These changes embody the support of the fully coherent merged VM buffer cache,
much higher filesystem I/O performance, and much better paging performance. It
represents the culmination of over 6 months of R&D.
The majority of the merged VM/cache work is by John Dyson.
The following highlights the most significant changes. Additionally, there are
(mostly minor) changes to the various filesystem modules (nfs, msdosfs, etc) to
support the new VM/buffer scheme.
vfs_bio.c:
Significant rewrite of most of vfs_bio to support the merged VM buffer cache
scheme. The scheme is almost fully compatible with the old filesystem
interface. Significant improvement in the number of opportunities for write
clustering.
vfs_cluster.c, vfs_subr.c
Upgrade and performance enhancements in vfs layer code to support merged
VM/buffer cache. Fixup of vfs_cluster to eliminate the bogus pagemove stuff.
vm_object.c:
Yet more improvements in the collapse code. Elimination of some windows that
can cause list corruption.
vm_pageout.c:
Fixed it, it really works better now. Somehow in 2.0, some "enhancements"
broke the code. This code has been reworked from the ground-up.
vm_fault.c, vm_page.c, pmap.c, vm_object.c
Support for small-block filesystems with merged VM/buffer cache scheme.
pmap.c vm_map.c
Dynamic kernel VM size, now we dont have to pre-allocate excessive numbers of
kernel PTs.
vm_glue.c
Much simpler and more effective swapping code. No more gratuitous swapping.
proc.h
Fixed the problem that the p_lock flag was not being cleared on a fork.
swap_pager.c, vnode_pager.c
Removal of old vfs_bio cruft to support the past pseudo-coherency. Now the
code doesn't need it anymore.
machdep.c
Changes to better support the parameter values for the merged VM/buffer cache
scheme.
machdep.c, kern_exec.c, vm_glue.c
Implemented a seperate submap for temporary exec string space and another one
to contain process upages. This eliminates all map fragmentation problems
that previously existed.
ffs_inode.c, ufs_inode.c, ufs_readwrite.c
Changes for merged VM/buffer cache. Add "bypass" support for sneaking in on
busy buffers.
Submitted by: John Dyson and David Greenman
1995-01-09 16:06:02 +00:00
|
|
|
|
2001-09-18 23:32:09 +00:00
|
|
|
int
|
2001-12-18 01:22:09 +00:00
|
|
|
nfsm_strtom_xx(const char *a, int s, int m, struct mbuf **mb, caddr_t *bpos)
|
2001-09-18 23:32:09 +00:00
|
|
|
{
|
2001-12-18 01:22:09 +00:00
|
|
|
u_int32_t *tl;
|
2001-09-18 23:32:09 +00:00
|
|
|
int t1;
|
|
|
|
|
|
|
|
if (s > m)
|
|
|
|
return ENAMETOOLONG;
|
|
|
|
t1 = nfsm_rndup(s) + NFSX_UNSIGNED;
|
|
|
|
if (t1 <= M_TRAILINGSPACE(*mb)) {
|
2001-12-18 01:22:09 +00:00
|
|
|
tl = nfsm_build_xx(t1, mb, bpos);
|
|
|
|
*tl++ = txdr_unsigned(s);
|
|
|
|
*(tl + ((t1 >> 2) - 2)) = 0;
|
|
|
|
bcopy(a, tl, s);
|
2001-09-18 23:32:09 +00:00
|
|
|
} else {
|
|
|
|
t1 = nfsm_strtmbuf(mb, bpos, a, s);
|
|
|
|
if (t1 != 0)
|
|
|
|
return t1;
|
|
|
|
}
|
|
|
|
return 0;
|
These changes embody the support of the fully coherent merged VM buffer cache,
much higher filesystem I/O performance, and much better paging performance. It
represents the culmination of over 6 months of R&D.
The majority of the merged VM/cache work is by John Dyson.
The following highlights the most significant changes. Additionally, there are
(mostly minor) changes to the various filesystem modules (nfs, msdosfs, etc) to
support the new VM/buffer scheme.
vfs_bio.c:
Significant rewrite of most of vfs_bio to support the merged VM buffer cache
scheme. The scheme is almost fully compatible with the old filesystem
interface. Significant improvement in the number of opportunities for write
clustering.
vfs_cluster.c, vfs_subr.c
Upgrade and performance enhancements in vfs layer code to support merged
VM/buffer cache. Fixup of vfs_cluster to eliminate the bogus pagemove stuff.
vm_object.c:
Yet more improvements in the collapse code. Elimination of some windows that
can cause list corruption.
vm_pageout.c:
Fixed it, it really works better now. Somehow in 2.0, some "enhancements"
broke the code. This code has been reworked from the ground-up.
vm_fault.c, vm_page.c, pmap.c, vm_object.c
Support for small-block filesystems with merged VM/buffer cache scheme.
pmap.c vm_map.c
Dynamic kernel VM size, now we dont have to pre-allocate excessive numbers of
kernel PTs.
vm_glue.c
Much simpler and more effective swapping code. No more gratuitous swapping.
proc.h
Fixed the problem that the p_lock flag was not being cleared on a fork.
swap_pager.c, vnode_pager.c
Removal of old vfs_bio cruft to support the past pseudo-coherency. Now the
code doesn't need it anymore.
machdep.c
Changes to better support the parameter values for the merged VM/buffer cache
scheme.
machdep.c, kern_exec.c, vm_glue.c
Implemented a seperate submap for temporary exec string space and another one
to contain process upages. This eliminates all map fragmentation problems
that previously existed.
ffs_inode.c, ufs_inode.c, ufs_readwrite.c
Changes for merged VM/buffer cache. Add "bypass" support for sneaking in on
busy buffers.
Submitted by: John Dyson and David Greenman
1995-01-09 16:06:02 +00:00
|
|
|
}
|
1998-05-31 17:27:58 +00:00
|
|
|
|
2001-09-18 23:32:09 +00:00
|
|
|
int
|
2001-12-18 01:22:09 +00:00
|
|
|
nfsm_fhtom_xx(struct vnode *v, int v3, struct mbuf **mb, caddr_t *bpos)
|
1998-05-31 17:27:58 +00:00
|
|
|
{
|
2001-12-18 01:22:09 +00:00
|
|
|
u_int32_t *tl;
|
2001-09-18 23:32:09 +00:00
|
|
|
int t1;
|
|
|
|
caddr_t cp;
|
|
|
|
|
|
|
|
if (v3) {
|
|
|
|
t1 = nfsm_rndup(VTONFS(v)->n_fhsize) + NFSX_UNSIGNED;
|
|
|
|
if (t1 < M_TRAILINGSPACE(*mb)) {
|
2001-12-18 01:22:09 +00:00
|
|
|
tl = nfsm_build_xx(t1, mb, bpos);
|
|
|
|
*tl++ = txdr_unsigned(VTONFS(v)->n_fhsize);
|
|
|
|
*(tl + ((t1 >> 2) - 2)) = 0;
|
|
|
|
bcopy(VTONFS(v)->n_fhp, tl, VTONFS(v)->n_fhsize);
|
2001-09-18 23:32:09 +00:00
|
|
|
} else {
|
|
|
|
t1 = nfsm_strtmbuf(mb, bpos,
|
|
|
|
(const char *)VTONFS(v)->n_fhp,
|
|
|
|
VTONFS(v)->n_fhsize);
|
|
|
|
if (t1 != 0)
|
|
|
|
return t1;
|
|
|
|
}
|
|
|
|
} else {
|
2001-09-27 02:33:36 +00:00
|
|
|
cp = nfsm_build_xx(NFSX_V2FH, mb, bpos);
|
2001-09-18 23:32:09 +00:00
|
|
|
bcopy(VTONFS(v)->n_fhp, cp, NFSX_V2FH);
|
1998-05-31 17:27:58 +00:00
|
|
|
}
|
2001-09-18 23:32:09 +00:00
|
|
|
return 0;
|
1998-05-31 17:27:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2001-12-18 01:22:09 +00:00
|
|
|
nfsm_v3attrbuild_xx(struct vattr *va, int full, struct mbuf **mb,
|
|
|
|
caddr_t *bpos)
|
1998-05-31 17:27:58 +00:00
|
|
|
{
|
2001-12-18 01:22:09 +00:00
|
|
|
u_int32_t *tl;
|
2001-09-18 23:32:09 +00:00
|
|
|
|
|
|
|
if (va->va_mode != (mode_t)VNOVAL) {
|
2001-12-18 01:22:09 +00:00
|
|
|
tl = nfsm_build_xx(2 * NFSX_UNSIGNED, mb, bpos);
|
|
|
|
*tl++ = nfs_true;
|
|
|
|
*tl = txdr_unsigned(va->va_mode);
|
2001-09-18 23:32:09 +00:00
|
|
|
} else {
|
2001-12-18 01:22:09 +00:00
|
|
|
tl = nfsm_build_xx(NFSX_UNSIGNED, mb, bpos);
|
|
|
|
*tl = nfs_false;
|
2001-09-18 23:32:09 +00:00
|
|
|
}
|
|
|
|
if (full && va->va_uid != (uid_t)VNOVAL) {
|
2001-12-18 01:22:09 +00:00
|
|
|
tl = nfsm_build_xx(2 * NFSX_UNSIGNED, mb, bpos);
|
|
|
|
*tl++ = nfs_true;
|
|
|
|
*tl = txdr_unsigned(va->va_uid);
|
2001-09-18 23:32:09 +00:00
|
|
|
} else {
|
2001-12-18 01:22:09 +00:00
|
|
|
tl = nfsm_build_xx(NFSX_UNSIGNED, mb, bpos);
|
|
|
|
*tl = nfs_false;
|
2001-09-18 23:32:09 +00:00
|
|
|
}
|
|
|
|
if (full && va->va_gid != (gid_t)VNOVAL) {
|
2001-12-18 01:22:09 +00:00
|
|
|
tl = nfsm_build_xx(2 * NFSX_UNSIGNED, mb, bpos);
|
|
|
|
*tl++ = nfs_true;
|
|
|
|
*tl = txdr_unsigned(va->va_gid);
|
2001-09-18 23:32:09 +00:00
|
|
|
} else {
|
2001-12-18 01:22:09 +00:00
|
|
|
tl = nfsm_build_xx(NFSX_UNSIGNED, mb, bpos);
|
|
|
|
*tl = nfs_false;
|
2001-09-18 23:32:09 +00:00
|
|
|
}
|
|
|
|
if (full && va->va_size != VNOVAL) {
|
2001-12-18 01:22:09 +00:00
|
|
|
tl = nfsm_build_xx(3 * NFSX_UNSIGNED, mb, bpos);
|
|
|
|
*tl++ = nfs_true;
|
|
|
|
txdr_hyper(va->va_size, tl);
|
2001-09-18 23:32:09 +00:00
|
|
|
} else {
|
2001-12-18 01:22:09 +00:00
|
|
|
tl = nfsm_build_xx(NFSX_UNSIGNED, mb, bpos);
|
|
|
|
*tl = nfs_false;
|
2001-09-18 23:32:09 +00:00
|
|
|
}
|
|
|
|
if (va->va_atime.tv_sec != VNOVAL) {
|
|
|
|
if (va->va_atime.tv_sec != time_second) {
|
2001-12-18 01:22:09 +00:00
|
|
|
tl = nfsm_build_xx(3 * NFSX_UNSIGNED, mb, bpos);
|
|
|
|
*tl++ = txdr_unsigned(NFSV3SATTRTIME_TOCLIENT);
|
|
|
|
txdr_nfsv3time(&va->va_atime, tl);
|
2001-09-18 23:32:09 +00:00
|
|
|
} else {
|
2001-12-18 01:22:09 +00:00
|
|
|
tl = nfsm_build_xx(NFSX_UNSIGNED, mb, bpos);
|
|
|
|
*tl = txdr_unsigned(NFSV3SATTRTIME_TOSERVER);
|
2001-09-18 23:32:09 +00:00
|
|
|
}
|
|
|
|
} else {
|
2001-12-18 01:22:09 +00:00
|
|
|
tl = nfsm_build_xx(NFSX_UNSIGNED, mb, bpos);
|
|
|
|
*tl = txdr_unsigned(NFSV3SATTRTIME_DONTCHANGE);
|
2001-09-18 23:32:09 +00:00
|
|
|
}
|
|
|
|
if (va->va_mtime.tv_sec != VNOVAL) {
|
|
|
|
if (va->va_mtime.tv_sec != time_second) {
|
2001-12-18 01:22:09 +00:00
|
|
|
tl = nfsm_build_xx(3 * NFSX_UNSIGNED, mb, bpos);
|
|
|
|
*tl++ = txdr_unsigned(NFSV3SATTRTIME_TOCLIENT);
|
|
|
|
txdr_nfsv3time(&va->va_mtime, tl);
|
2001-09-18 23:32:09 +00:00
|
|
|
} else {
|
2001-12-18 01:22:09 +00:00
|
|
|
tl = nfsm_build_xx(NFSX_UNSIGNED, mb, bpos);
|
|
|
|
*tl = txdr_unsigned(NFSV3SATTRTIME_TOSERVER);
|
2001-09-18 23:32:09 +00:00
|
|
|
}
|
|
|
|
} else {
|
2001-12-18 01:22:09 +00:00
|
|
|
tl = nfsm_build_xx(NFSX_UNSIGNED, mb, bpos);
|
|
|
|
*tl = txdr_unsigned(NFSV3SATTRTIME_DONTCHANGE);
|
2001-09-18 23:32:09 +00:00
|
|
|
}
|
1998-05-31 17:27:58 +00:00
|
|
|
}
|