2009-05-04 15:23:58 +00:00
|
|
|
/*-
|
2017-11-20 19:43:44 +00:00
|
|
|
* SPDX-License-Identifier: BSD-3-Clause
|
|
|
|
*
|
2009-05-04 15:23:58 +00:00
|
|
|
* Copyright (c) 1989, 1993
|
|
|
|
* The Regents of the University of California. All rights reserved.
|
|
|
|
*
|
|
|
|
* This code is derived from software contributed to Berkeley by
|
|
|
|
* Rick Macklem at The University of Guelph.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
2017-02-28 23:42:47 +00:00
|
|
|
* 3. Neither the name of the University nor the names of its contributors
|
2009-05-04 15:23:58 +00:00
|
|
|
* may be used to endorse or promote products derived from this software
|
|
|
|
* without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
|
|
|
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
|
|
|
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
|
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
|
|
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
|
|
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
|
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
|
|
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
|
|
|
* SUCH DAMAGE.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <sys/cdefs.h>
|
|
|
|
__FBSDID("$FreeBSD$");
|
|
|
|
|
2016-01-15 09:05:14 +00:00
|
|
|
#include "opt_inet.h"
|
2012-01-08 01:54:46 +00:00
|
|
|
#include "opt_inet6.h"
|
2011-06-18 23:02:53 +00:00
|
|
|
|
2014-03-16 10:55:57 +00:00
|
|
|
#include <sys/capsicum.h>
|
2011-08-11 12:30:23 +00:00
|
|
|
|
2009-05-04 15:23:58 +00:00
|
|
|
/*
|
|
|
|
* generally, I don't like #includes inside .h files, but it seems to
|
|
|
|
* be the easiest way to handle the port.
|
|
|
|
*/
|
2015-08-05 22:27:30 +00:00
|
|
|
#include <sys/fail.h>
|
2011-04-13 22:16:52 +00:00
|
|
|
#include <sys/hash.h>
|
2015-08-05 22:27:30 +00:00
|
|
|
#include <sys/sysctl.h>
|
2009-05-04 15:23:58 +00:00
|
|
|
#include <fs/nfs/nfsport.h>
|
2016-01-15 09:05:14 +00:00
|
|
|
#include <netinet/in_fib.h>
|
2009-05-04 15:23:58 +00:00
|
|
|
#include <netinet/if_ether.h>
|
2016-01-15 09:05:14 +00:00
|
|
|
#include <netinet6/ip6_var.h>
|
2009-05-04 15:23:58 +00:00
|
|
|
#include <net/if_types.h>
|
2020-05-28 07:35:07 +00:00
|
|
|
#include <net/route/nhop.h>
|
2009-05-04 15:23:58 +00:00
|
|
|
|
2011-06-18 23:02:53 +00:00
|
|
|
#include <fs/nfsclient/nfs_kdtrace.h>
|
|
|
|
|
|
|
|
#ifdef KDTRACE_HOOKS
|
|
|
|
dtrace_nfsclient_attrcache_flush_probe_func_t
|
|
|
|
dtrace_nfscl_attrcache_flush_done_probe;
|
|
|
|
uint32_t nfscl_attrcache_flush_done_id;
|
|
|
|
|
|
|
|
dtrace_nfsclient_attrcache_get_hit_probe_func_t
|
|
|
|
dtrace_nfscl_attrcache_get_hit_probe;
|
|
|
|
uint32_t nfscl_attrcache_get_hit_id;
|
|
|
|
|
|
|
|
dtrace_nfsclient_attrcache_get_miss_probe_func_t
|
|
|
|
dtrace_nfscl_attrcache_get_miss_probe;
|
|
|
|
uint32_t nfscl_attrcache_get_miss_id;
|
|
|
|
|
|
|
|
dtrace_nfsclient_attrcache_load_probe_func_t
|
|
|
|
dtrace_nfscl_attrcache_load_done_probe;
|
|
|
|
uint32_t nfscl_attrcache_load_done_id;
|
|
|
|
#endif /* !KDTRACE_HOOKS */
|
|
|
|
|
2009-05-04 15:23:58 +00:00
|
|
|
extern u_int32_t newnfs_true, newnfs_false, newnfs_xdrneg1;
|
|
|
|
extern struct vop_vector newnfs_vnodeops;
|
|
|
|
extern struct vop_vector newnfs_fifoops;
|
|
|
|
extern uma_zone_t newnfsnode_zone;
|
|
|
|
extern struct buf_ops buf_ops_newnfs;
|
Allocate pager bufs from UMA instead of 80-ish mutex protected linked list.
o In vm_pager_bufferinit() create pbuf_zone and start accounting on how many
pbufs are we going to have set.
In various subsystems that are going to utilize pbufs create private zones
via call to pbuf_zsecond_create(). The latter calls uma_zsecond_create(),
and sets a limit on created zone. After startup preallocate pbufs according
to requirements of all pbuf zones.
Subsystems that used to have a private limit with old allocator now have
private pbuf zones: md(4), fusefs, NFS client, smbfs, VFS cluster, FFS,
swap, vnode pager.
The following subsystems use shared pbuf zone: cam(4), nvme(4), physio(9),
aio(4). They should have their private limits, but changing that is out of
scope of this commit.
o Fetch tunable value of kern.nswbuf from init_param2() and while here move
NSWBUF_MIN to opt_param.h and eliminate opt_swap.h, that was holding only
this option.
Default values aren't touched by this commit, but they probably should be
reviewed wrt to modern hardware.
This change removes a tight bottleneck from sendfile(2) operation, that
uses pbufs in vnode pager. Other pagers also would benefit from faster
allocation.
Together with: gallatin
Tested by: pho
2019-01-15 01:02:16 +00:00
|
|
|
extern uma_zone_t ncl_pbuf_zone;
|
2009-05-04 15:23:58 +00:00
|
|
|
extern short nfsv4_cbport;
|
|
|
|
extern int nfscl_enablecallb;
|
|
|
|
extern int nfs_numnfscbd;
|
|
|
|
extern int nfscl_inited;
|
|
|
|
struct mtx ncl_iod_mutex;
|
|
|
|
NFSDLOCKMUTEX;
|
Merge the pNFS server code from projects/pnfs-planb-server into head.
This code merge adds a pNFS service to the NFSv4.1 server. Although it is
a large commit it should not affect behaviour for a non-pNFS NFS server.
Some documentation on how this works can be found at:
http://people.freebsd.org/~rmacklem/pnfs-planb-setup.txt
and will hopefully be turned into a proper document soon.
This is a merge of the kernel code. Userland and man page changes will
come soon, once the dust settles on this merge.
It has passed a "make universe", so I hope it will not cause build problems.
It also adds NFSv4.1 server support for the "current stateid".
Here is a brief overview of the pNFS service:
A pNFS service separates the Read/Write oeprations from all the other NFSv4.1
Metadata operations. It is hoped that this separation allows a pNFS service
to be configured that exceeds the limits of a single NFS server for either
storage capacity and/or I/O bandwidth.
It is possible to configure mirroring within the data servers (DSs) so that
the data storage file for an MDS file will be mirrored on two or more of
the DSs.
When this is used, failure of a DS will not stop the pNFS service and a
failed DS can be recovered once repaired while the pNFS service continues
to operate. Although two way mirroring would be the norm, it is possible
to set a mirroring level of up to four or the number of DSs, whichever is
less.
The Metadata server will always be a single point of failure,
just as a single NFS server is.
A Plan B pNFS service consists of a single MetaData Server (MDS) and K
Data Servers (DS), all of which are recent FreeBSD systems.
Clients will mount the MDS as they would a single NFS server.
When files are created, the MDS creates a file tree identical to what a
single NFS server creates, except that all the regular (VREG) files will
be empty. As such, if you look at the exported tree on the MDS directly
on the MDS server (not via an NFS mount), the files will all be of size 0.
Each of these files will also have two extended attributes in the system
attribute name space:
pnfsd.dsfile - This extended attrbute stores the information that
the MDS needs to find the data storage file(s) on DS(s) for this file.
pnfsd.dsattr - This extended attribute stores the Size, AccessTime, ModifyTime
and Change attributes for the file, so that the MDS doesn't need to
acquire the attributes from the DS for every Getattr operation.
For each regular (VREG) file, the MDS creates a data storage file on one
(or more if mirroring is enabled) of the DSs in one of the "dsNN"
subdirectories. The name of this file is the file handle
of the file on the MDS in hexadecimal so that the name is unique.
The DSs use subdirectories named "ds0" to "dsN" so that no one directory
gets too large. The value of "N" is set via the sysctl vfs.nfsd.dsdirsize
on the MDS, with the default being 20.
For production servers that will store a lot of files, this value should
probably be much larger.
It can be increased when the "nfsd" daemon is not running on the MDS,
once the "dsK" directories are created.
For pNFS aware NFSv4.1 clients, the FreeBSD server will return two pieces
of information to the client that allows it to do I/O directly to the DS.
DeviceInfo - This is relatively static information that defines what a DS
is. The critical bits of information returned by the FreeBSD
server is the IP address of the DS and, for the Flexible
File layout, that NFSv4.1 is to be used and that it is
"tightly coupled".
There is a "deviceid" which identifies the DeviceInfo.
Layout - This is per file and can be recalled by the server when it
is no longer valid. For the FreeBSD server, there is support
for two types of layout, call File and Flexible File layout.
Both allow the client to do I/O on the DS via NFSv4.1 I/O
operations. The Flexible File layout is a more recent variant
that allows specification of mirrors, where the client is
expected to do writes to all mirrors to maintain them in a
consistent state. The Flexible File layout also allows the
client to report I/O errors for a DS back to the MDS.
The Flexible File layout supports two variants referred to as
"tightly coupled" vs "loosely coupled". The FreeBSD server always
uses the "tightly coupled" variant where the client uses the
same credentials to do I/O on the DS as it would on the MDS.
For the "loosely coupled" variant, the layout specifies a
synthetic user/group that the client uses to do I/O on the DS.
The FreeBSD server does not do striping and always returns
layouts for the entire file. The critical information in a layout
is Read vs Read/Writea and DeviceID(s) that identify which
DS(s) the data is stored on.
At this time, the MDS generates File Layout layouts to NFSv4.1 clients
that know how to do pNFS for the non-mirrored DS case unless the sysctl
vfs.nfsd.default_flexfile is set non-zero, in which case Flexible File
layouts are generated.
The mirrored DS configuration always generates Flexible File layouts.
For NFS clients that do not support NFSv4.1 pNFS, all I/O operations
are done against the MDS which acts as a proxy for the appropriate DS(s).
When the MDS receives an I/O RPC, it will do the RPC on the DS as a proxy.
If the DS is on the same machine, the MDS/DS will do the RPC on the DS as
a proxy and so on, until the machine runs out of some resource, such as
session slots or mbufs.
As such, DSs must be separate systems from the MDS.
Tested by: james.rose@framestore.com
Relnotes: yes
2018-06-12 19:36:32 +00:00
|
|
|
extern struct mtx nfsrv_dslock_mtx;
|
2009-05-04 15:23:58 +00:00
|
|
|
|
|
|
|
extern void (*ncl_call_invalcaches)(struct vnode *);
|
|
|
|
|
2015-08-05 22:27:30 +00:00
|
|
|
SYSCTL_DECL(_vfs_nfs);
|
|
|
|
static int ncl_fileid_maxwarnings = 10;
|
|
|
|
SYSCTL_INT(_vfs_nfs, OID_AUTO, fileid_maxwarnings, CTLFLAG_RWTUN,
|
|
|
|
&ncl_fileid_maxwarnings, 0,
|
|
|
|
"Limit fileid corruption warnings; 0 is off; -1 is unlimited");
|
|
|
|
static volatile int ncl_fileid_nwarnings;
|
|
|
|
|
|
|
|
static void nfscl_warn_fileid(struct nfsmount *, struct nfsvattr *,
|
|
|
|
struct nfsvattr *);
|
|
|
|
|
2009-05-04 15:23:58 +00:00
|
|
|
/*
|
|
|
|
* Comparison function for vfs_hash functions.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
newnfs_vncmpf(struct vnode *vp, void *arg)
|
|
|
|
{
|
|
|
|
struct nfsfh *nfhp = (struct nfsfh *)arg;
|
|
|
|
struct nfsnode *np = VTONFS(vp);
|
|
|
|
|
|
|
|
if (np->n_fhp->nfh_len != nfhp->nfh_len ||
|
|
|
|
NFSBCMP(np->n_fhp->nfh_fh, nfhp->nfh_fh, nfhp->nfh_len))
|
|
|
|
return (1);
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Look up a vnode/nfsnode by file handle.
|
|
|
|
* Callers must check for mount points!!
|
|
|
|
* In all cases, a pointer to a
|
|
|
|
* nfsnode structure is returned.
|
|
|
|
* This variant takes a "struct nfsfh *" as second argument and uses
|
|
|
|
* that structure up, either by hanging off the nfsnode or FREEing it.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
nfscl_nget(struct mount *mntp, struct vnode *dvp, struct nfsfh *nfhp,
|
|
|
|
struct componentname *cnp, struct thread *td, struct nfsnode **npp,
|
2011-04-16 23:20:21 +00:00
|
|
|
void *stuff, int lkflags)
|
2009-05-04 15:23:58 +00:00
|
|
|
{
|
|
|
|
struct nfsnode *np, *dnp;
|
|
|
|
struct vnode *vp, *nvp;
|
|
|
|
struct nfsv4node *newd, *oldd;
|
|
|
|
int error;
|
|
|
|
u_int hash;
|
|
|
|
struct nfsmount *nmp;
|
|
|
|
|
|
|
|
nmp = VFSTONFS(mntp);
|
|
|
|
dnp = VTONFS(dvp);
|
|
|
|
*npp = NULL;
|
|
|
|
|
|
|
|
hash = fnv_32_buf(nfhp->nfh_fh, nfhp->nfh_len, FNV1_32_INIT);
|
|
|
|
|
2011-04-16 23:20:21 +00:00
|
|
|
error = vfs_hash_get(mntp, hash, lkflags,
|
2009-05-04 15:23:58 +00:00
|
|
|
td, &nvp, newnfs_vncmpf, nfhp);
|
|
|
|
if (error == 0 && nvp != NULL) {
|
|
|
|
/*
|
|
|
|
* I believe there is a slight chance that vgonel() could
|
2011-07-16 08:05:31 +00:00
|
|
|
* get called on this vnode between when NFSVOPLOCK() drops
|
2009-05-04 15:23:58 +00:00
|
|
|
* the VI_LOCK() and vget() acquires it again, so that it
|
|
|
|
* hasn't yet had v_usecount incremented. If this were to
|
2019-12-08 21:30:04 +00:00
|
|
|
* happen, the VIRF_DOOMED flag would be set, so check for
|
2009-05-04 15:23:58 +00:00
|
|
|
* that here. Since we now have the v_usecount incremented,
|
2019-12-08 21:30:04 +00:00
|
|
|
* we should be ok until we vrele() it, if the VIRF_DOOMED
|
2009-05-04 15:23:58 +00:00
|
|
|
* flag isn't set now.
|
|
|
|
*/
|
|
|
|
VI_LOCK(nvp);
|
2019-12-08 21:30:04 +00:00
|
|
|
if (VN_IS_DOOMED(nvp)) {
|
2009-05-04 15:23:58 +00:00
|
|
|
VI_UNLOCK(nvp);
|
|
|
|
vrele(nvp);
|
|
|
|
error = ENOENT;
|
|
|
|
} else {
|
|
|
|
VI_UNLOCK(nvp);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (error) {
|
2018-01-25 22:25:13 +00:00
|
|
|
free(nfhp, M_NFSFH);
|
2009-05-04 15:23:58 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
if (nvp != NULL) {
|
|
|
|
np = VTONFS(nvp);
|
|
|
|
/*
|
|
|
|
* For NFSv4, check to see if it is the same name and
|
|
|
|
* replace the name, if it is different.
|
|
|
|
*/
|
|
|
|
oldd = newd = NULL;
|
|
|
|
if ((nmp->nm_flag & NFSMNT_NFSV4) && np->n_v4 != NULL &&
|
|
|
|
nvp->v_type == VREG &&
|
|
|
|
(np->n_v4->n4_namelen != cnp->cn_namelen ||
|
|
|
|
NFSBCMP(cnp->cn_nameptr, NFS4NODENAME(np->n_v4),
|
|
|
|
cnp->cn_namelen) ||
|
|
|
|
dnp->n_fhp->nfh_len != np->n_v4->n4_fhlen ||
|
|
|
|
NFSBCMP(dnp->n_fhp->nfh_fh, np->n_v4->n4_data,
|
|
|
|
dnp->n_fhp->nfh_len))) {
|
2018-01-25 22:25:13 +00:00
|
|
|
newd = malloc(
|
2009-05-04 15:23:58 +00:00
|
|
|
sizeof (struct nfsv4node) + dnp->n_fhp->nfh_len +
|
|
|
|
+ cnp->cn_namelen - 1, M_NFSV4NODE, M_WAITOK);
|
|
|
|
NFSLOCKNODE(np);
|
|
|
|
if (newd != NULL && np->n_v4 != NULL && nvp->v_type == VREG
|
|
|
|
&& (np->n_v4->n4_namelen != cnp->cn_namelen ||
|
|
|
|
NFSBCMP(cnp->cn_nameptr, NFS4NODENAME(np->n_v4),
|
|
|
|
cnp->cn_namelen) ||
|
|
|
|
dnp->n_fhp->nfh_len != np->n_v4->n4_fhlen ||
|
|
|
|
NFSBCMP(dnp->n_fhp->nfh_fh, np->n_v4->n4_data,
|
|
|
|
dnp->n_fhp->nfh_len))) {
|
|
|
|
oldd = np->n_v4;
|
|
|
|
np->n_v4 = newd;
|
|
|
|
newd = NULL;
|
|
|
|
np->n_v4->n4_fhlen = dnp->n_fhp->nfh_len;
|
|
|
|
np->n_v4->n4_namelen = cnp->cn_namelen;
|
|
|
|
NFSBCOPY(dnp->n_fhp->nfh_fh, np->n_v4->n4_data,
|
|
|
|
dnp->n_fhp->nfh_len);
|
|
|
|
NFSBCOPY(cnp->cn_nameptr, NFS4NODENAME(np->n_v4),
|
|
|
|
cnp->cn_namelen);
|
|
|
|
}
|
|
|
|
NFSUNLOCKNODE(np);
|
|
|
|
}
|
|
|
|
if (newd != NULL)
|
2018-01-25 22:25:13 +00:00
|
|
|
free(newd, M_NFSV4NODE);
|
2009-05-04 15:23:58 +00:00
|
|
|
if (oldd != NULL)
|
2018-01-25 22:25:13 +00:00
|
|
|
free(oldd, M_NFSV4NODE);
|
2009-05-04 15:23:58 +00:00
|
|
|
*npp = np;
|
2018-01-25 22:25:13 +00:00
|
|
|
free(nfhp, M_NFSFH);
|
2009-05-04 15:23:58 +00:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
np = uma_zalloc(newnfsnode_zone, M_WAITOK | M_ZERO);
|
|
|
|
|
2015-11-29 21:01:02 +00:00
|
|
|
error = getnewvnode(nfs_vnode_tag, mntp, &newnfs_vnodeops, &nvp);
|
2009-05-04 15:23:58 +00:00
|
|
|
if (error) {
|
|
|
|
uma_zfree(newnfsnode_zone, np);
|
2018-01-25 22:25:13 +00:00
|
|
|
free(nfhp, M_NFSFH);
|
2009-05-04 15:23:58 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
vp = nvp;
|
2012-01-27 02:46:12 +00:00
|
|
|
KASSERT(vp->v_bufobj.bo_bsize != 0, ("nfscl_nget: bo_bsize == 0"));
|
2009-05-04 15:23:58 +00:00
|
|
|
vp->v_bufobj.bo_ops = &buf_ops_newnfs;
|
|
|
|
vp->v_data = np;
|
|
|
|
np->n_vnode = vp;
|
|
|
|
/*
|
|
|
|
* Initialize the mutex even if the vnode is going to be a loser.
|
|
|
|
* This simplifies the logic in reclaim, which can then unconditionally
|
|
|
|
* destroy the mutex (in the case of the loser, or if hash_insert
|
|
|
|
* happened to return an error no special casing is needed).
|
|
|
|
*/
|
|
|
|
mtx_init(&np->n_mtx, "NEWNFSnode lock", NULL, MTX_DEF | MTX_DUPOK);
|
2017-08-20 10:08:45 +00:00
|
|
|
lockinit(&np->n_excl, PVFS, "nfsupg", VLKTIMEOUT, LK_NOSHARE |
|
|
|
|
LK_CANRECURSE);
|
2009-05-04 15:23:58 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Are we getting the root? If so, make sure the vnode flags
|
|
|
|
* are correct
|
|
|
|
*/
|
|
|
|
if ((nfhp->nfh_len == nmp->nm_fhsize) &&
|
|
|
|
!bcmp(nfhp->nfh_fh, nmp->nm_fh, nfhp->nfh_len)) {
|
|
|
|
if (vp->v_type == VNON)
|
|
|
|
vp->v_type = VDIR;
|
|
|
|
vp->v_vflag |= VV_ROOT;
|
|
|
|
}
|
2019-10-22 16:17:38 +00:00
|
|
|
|
|
|
|
vp->v_vflag |= VV_VMSIZEVNLOCK;
|
2020-09-01 21:18:40 +00:00
|
|
|
|
2009-05-04 15:23:58 +00:00
|
|
|
np->n_fhp = nfhp;
|
|
|
|
/*
|
|
|
|
* For NFSv4, we have to attach the directory file handle and
|
|
|
|
* file name, so that Open Ops can be done later.
|
|
|
|
*/
|
|
|
|
if (nmp->nm_flag & NFSMNT_NFSV4) {
|
2018-01-25 22:25:13 +00:00
|
|
|
np->n_v4 = malloc(sizeof (struct nfsv4node)
|
2009-05-04 15:23:58 +00:00
|
|
|
+ dnp->n_fhp->nfh_len + cnp->cn_namelen - 1, M_NFSV4NODE,
|
|
|
|
M_WAITOK);
|
|
|
|
np->n_v4->n4_fhlen = dnp->n_fhp->nfh_len;
|
|
|
|
np->n_v4->n4_namelen = cnp->cn_namelen;
|
|
|
|
NFSBCOPY(dnp->n_fhp->nfh_fh, np->n_v4->n4_data,
|
|
|
|
dnp->n_fhp->nfh_len);
|
|
|
|
NFSBCOPY(cnp->cn_nameptr, NFS4NODENAME(np->n_v4),
|
|
|
|
cnp->cn_namelen);
|
|
|
|
} else {
|
|
|
|
np->n_v4 = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* NFS supports recursive and shared locking.
|
|
|
|
*/
|
2010-08-20 19:46:50 +00:00
|
|
|
lockmgr(vp->v_vnlock, LK_EXCLUSIVE | LK_NOWITNESS, NULL);
|
2009-05-04 15:23:58 +00:00
|
|
|
VN_LOCK_AREC(vp);
|
|
|
|
VN_LOCK_ASHARE(vp);
|
|
|
|
error = insmntque(vp, mntp);
|
|
|
|
if (error != 0) {
|
|
|
|
*npp = NULL;
|
|
|
|
mtx_destroy(&np->n_mtx);
|
2017-08-20 10:08:45 +00:00
|
|
|
lockdestroy(&np->n_excl);
|
2018-01-25 22:25:13 +00:00
|
|
|
free(nfhp, M_NFSFH);
|
2009-05-04 15:23:58 +00:00
|
|
|
if (np->n_v4 != NULL)
|
2018-01-25 22:25:13 +00:00
|
|
|
free(np->n_v4, M_NFSV4NODE);
|
2009-05-04 15:23:58 +00:00
|
|
|
uma_zfree(newnfsnode_zone, np);
|
|
|
|
return (error);
|
|
|
|
}
|
2011-04-16 23:20:21 +00:00
|
|
|
error = vfs_hash_insert(vp, hash, lkflags,
|
2009-05-04 15:23:58 +00:00
|
|
|
td, &nvp, newnfs_vncmpf, nfhp);
|
|
|
|
if (error)
|
|
|
|
return (error);
|
|
|
|
if (nvp != NULL) {
|
|
|
|
*npp = VTONFS(nvp);
|
|
|
|
/* vfs_hash_insert() vput()'s the losing vnode */
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
*npp = np;
|
|
|
|
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2016-05-11 06:35:46 +00:00
|
|
|
* Another variant of nfs_nget(). This one is only used by reopen. It
|
2009-05-04 15:23:58 +00:00
|
|
|
* takes almost the same args as nfs_nget(), but only succeeds if an entry
|
|
|
|
* exists in the cache. (Since files should already be "open" with a
|
|
|
|
* vnode ref cnt on the node when reopen calls this, it should always
|
|
|
|
* succeed.)
|
|
|
|
* Also, don't get a vnode lock, since it may already be locked by some
|
|
|
|
* other process that is handling it. This is ok, since all other threads
|
|
|
|
* on the client are blocked by the nfsc_lock being exclusively held by the
|
|
|
|
* caller of this function.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
nfscl_ngetreopen(struct mount *mntp, u_int8_t *fhp, int fhsize,
|
|
|
|
struct thread *td, struct nfsnode **npp)
|
|
|
|
{
|
|
|
|
struct vnode *nvp;
|
|
|
|
u_int hash;
|
|
|
|
struct nfsfh *nfhp;
|
|
|
|
int error;
|
|
|
|
|
|
|
|
*npp = NULL;
|
|
|
|
/* For forced dismounts, just return error. */
|
2017-07-27 20:55:31 +00:00
|
|
|
if (NFSCL_FORCEDISM(mntp))
|
2009-05-04 15:23:58 +00:00
|
|
|
return (EINTR);
|
2018-01-25 22:25:13 +00:00
|
|
|
nfhp = malloc(sizeof (struct nfsfh) + fhsize,
|
2009-05-04 15:23:58 +00:00
|
|
|
M_NFSFH, M_WAITOK);
|
|
|
|
bcopy(fhp, &nfhp->nfh_fh[0], fhsize);
|
|
|
|
nfhp->nfh_len = fhsize;
|
|
|
|
|
|
|
|
hash = fnv_32_buf(fhp, fhsize, FNV1_32_INIT);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* First, try to get the vnode locked, but don't block for the lock.
|
|
|
|
*/
|
|
|
|
error = vfs_hash_get(mntp, hash, (LK_EXCLUSIVE | LK_NOWAIT), td, &nvp,
|
|
|
|
newnfs_vncmpf, nfhp);
|
|
|
|
if (error == 0 && nvp != NULL) {
|
2020-01-03 22:29:58 +00:00
|
|
|
NFSVOPUNLOCK(nvp);
|
2009-05-04 15:23:58 +00:00
|
|
|
} else if (error == EBUSY) {
|
|
|
|
/*
|
2016-05-11 06:35:46 +00:00
|
|
|
* It is safe so long as a vflush() with
|
2009-05-04 15:23:58 +00:00
|
|
|
* FORCECLOSE has not been done. Since the Renew thread is
|
|
|
|
* stopped and the MNTK_UNMOUNTF flag is set before doing
|
|
|
|
* a vflush() with FORCECLOSE, we should be ok here.
|
|
|
|
*/
|
2017-07-27 20:55:31 +00:00
|
|
|
if (NFSCL_FORCEDISM(mntp))
|
2009-05-04 15:23:58 +00:00
|
|
|
error = EINTR;
|
2016-05-11 06:35:46 +00:00
|
|
|
else {
|
|
|
|
vfs_hash_ref(mntp, hash, td, &nvp, newnfs_vncmpf, nfhp);
|
|
|
|
if (nvp == NULL) {
|
|
|
|
error = ENOENT;
|
2019-12-08 21:30:04 +00:00
|
|
|
} else if (VN_IS_DOOMED(nvp)) {
|
2016-05-11 06:35:46 +00:00
|
|
|
error = ENOENT;
|
|
|
|
vrele(nvp);
|
|
|
|
} else {
|
|
|
|
error = 0;
|
|
|
|
}
|
|
|
|
}
|
2009-05-04 15:23:58 +00:00
|
|
|
}
|
2018-01-25 22:25:13 +00:00
|
|
|
free(nfhp, M_NFSFH);
|
2009-05-04 15:23:58 +00:00
|
|
|
if (error)
|
|
|
|
return (error);
|
|
|
|
if (nvp != NULL) {
|
|
|
|
*npp = VTONFS(nvp);
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
return (EINVAL);
|
|
|
|
}
|
|
|
|
|
2015-08-05 22:27:30 +00:00
|
|
|
static void
|
|
|
|
nfscl_warn_fileid(struct nfsmount *nmp, struct nfsvattr *oldnap,
|
|
|
|
struct nfsvattr *newnap)
|
|
|
|
{
|
|
|
|
int off;
|
|
|
|
|
|
|
|
if (ncl_fileid_maxwarnings >= 0 &&
|
|
|
|
ncl_fileid_nwarnings >= ncl_fileid_maxwarnings)
|
|
|
|
return;
|
|
|
|
off = 0;
|
|
|
|
if (ncl_fileid_maxwarnings >= 0) {
|
|
|
|
if (++ncl_fileid_nwarnings >= ncl_fileid_maxwarnings)
|
|
|
|
off = 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
printf("newnfs: server '%s' error: fileid changed. "
|
|
|
|
"fsid %jx:%jx: expected fileid %#jx, got %#jx. "
|
|
|
|
"(BROKEN NFS SERVER OR MIDDLEWARE)\n",
|
|
|
|
nmp->nm_com.nmcom_hostname,
|
|
|
|
(uintmax_t)nmp->nm_fsid[0],
|
|
|
|
(uintmax_t)nmp->nm_fsid[1],
|
|
|
|
(uintmax_t)oldnap->na_fileid,
|
|
|
|
(uintmax_t)newnap->na_fileid);
|
|
|
|
|
|
|
|
if (off)
|
|
|
|
printf("newnfs: Logged %d times about fileid corruption; "
|
|
|
|
"going quiet to avoid spamming logs excessively. (Limit "
|
|
|
|
"is: %d).\n", ncl_fileid_nwarnings,
|
|
|
|
ncl_fileid_maxwarnings);
|
|
|
|
}
|
|
|
|
|
2009-05-04 15:23:58 +00:00
|
|
|
/*
|
|
|
|
* Load the attribute cache (that lives in the nfsnode entry) with
|
|
|
|
* the attributes of the second argument and
|
|
|
|
* Iff vaper not NULL
|
|
|
|
* copy the attributes to *vaper
|
|
|
|
* Similar to nfs_loadattrcache(), except the attributes are passed in
|
|
|
|
* instead of being parsed out of the mbuf list.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
nfscl_loadattrcache(struct vnode **vpp, struct nfsvattr *nap, void *nvaper,
|
|
|
|
void *stuff, int writeattr, int dontshrink)
|
|
|
|
{
|
|
|
|
struct vnode *vp = *vpp;
|
|
|
|
struct vattr *vap, *nvap = &nap->na_vattr, *vaper = nvaper;
|
|
|
|
struct nfsnode *np;
|
|
|
|
struct nfsmount *nmp;
|
|
|
|
struct timespec mtime_save;
|
2019-09-17 18:41:39 +00:00
|
|
|
int error, force_fid_err;
|
2015-08-05 22:27:30 +00:00
|
|
|
|
|
|
|
error = 0;
|
2009-05-04 15:23:58 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If v_type == VNON it is a new node, so fill in the v_type,
|
|
|
|
* n_mtime fields. Check to see if it represents a special
|
|
|
|
* device, and if so, check for a possible alias. Once the
|
|
|
|
* correct vnode has been obtained, fill in the rest of the
|
|
|
|
* information.
|
|
|
|
*/
|
|
|
|
np = VTONFS(vp);
|
|
|
|
NFSLOCKNODE(np);
|
|
|
|
if (vp->v_type != nvap->va_type) {
|
|
|
|
vp->v_type = nvap->va_type;
|
|
|
|
if (vp->v_type == VFIFO)
|
|
|
|
vp->v_op = &newnfs_fifoops;
|
|
|
|
np->n_mtime = nvap->va_mtime;
|
|
|
|
}
|
|
|
|
nmp = VFSTONFS(vp->v_mount);
|
|
|
|
vap = &np->n_vattr.na_vattr;
|
|
|
|
mtime_save = vap->va_mtime;
|
|
|
|
if (writeattr) {
|
|
|
|
np->n_vattr.na_filerev = nap->na_filerev;
|
|
|
|
np->n_vattr.na_size = nap->na_size;
|
|
|
|
np->n_vattr.na_mtime = nap->na_mtime;
|
|
|
|
np->n_vattr.na_ctime = nap->na_ctime;
|
|
|
|
np->n_vattr.na_fsid = nap->na_fsid;
|
2011-06-28 22:52:38 +00:00
|
|
|
np->n_vattr.na_mode = nap->na_mode;
|
2009-05-04 15:23:58 +00:00
|
|
|
} else {
|
2015-08-05 22:27:30 +00:00
|
|
|
force_fid_err = 0;
|
|
|
|
KFAIL_POINT_ERROR(DEBUG_FP, nfscl_force_fileid_warning,
|
|
|
|
force_fid_err);
|
|
|
|
/*
|
|
|
|
* BROKEN NFS SERVER OR MIDDLEWARE
|
|
|
|
*
|
|
|
|
* Certain NFS servers (certain old proprietary filers ca.
|
|
|
|
* 2006) or broken middleboxes (e.g. WAN accelerator products)
|
|
|
|
* will respond to GETATTR requests with results for a
|
|
|
|
* different fileid.
|
|
|
|
*
|
|
|
|
* The WAN accelerator we've observed not only serves stale
|
|
|
|
* cache results for a given file, it also occasionally serves
|
|
|
|
* results for wholly different files. This causes surprising
|
|
|
|
* problems; for example the cached size attribute of a file
|
|
|
|
* may truncate down and then back up, resulting in zero
|
|
|
|
* regions in file contents read by applications. We observed
|
|
|
|
* this reliably with Clang and .c files during parallel build.
|
|
|
|
* A pcap revealed packet fragmentation and GETATTR RPC
|
|
|
|
* responses with wholly wrong fileids.
|
|
|
|
*/
|
|
|
|
if ((np->n_vattr.na_fileid != 0 &&
|
|
|
|
np->n_vattr.na_fileid != nap->na_fileid) ||
|
|
|
|
force_fid_err) {
|
|
|
|
nfscl_warn_fileid(nmp, &np->n_vattr, nap);
|
|
|
|
error = EIDRM;
|
|
|
|
goto out;
|
|
|
|
}
|
2009-05-04 15:23:58 +00:00
|
|
|
NFSBCOPY((caddr_t)nap, (caddr_t)&np->n_vattr,
|
|
|
|
sizeof (struct nfsvattr));
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* For NFSv4, if the node's fsid is not equal to the mount point's
|
|
|
|
* fsid, return the low order 32bits of the node's fsid. This
|
|
|
|
* allows getcwd(3) to work. There is a chance that the fsid might
|
|
|
|
* be the same as a local fs, but since this is in an NFS mount
|
|
|
|
* point, I don't think that will cause any problems?
|
|
|
|
*/
|
2011-04-13 22:16:52 +00:00
|
|
|
if (NFSHASNFSV4(nmp) && NFSHASHASSETFSID(nmp) &&
|
2009-05-04 15:23:58 +00:00
|
|
|
(nmp->nm_fsid[0] != np->n_vattr.na_filesid[0] ||
|
2011-04-13 22:16:52 +00:00
|
|
|
nmp->nm_fsid[1] != np->n_vattr.na_filesid[1])) {
|
|
|
|
/*
|
|
|
|
* va_fsid needs to be set to some value derived from
|
|
|
|
* np->n_vattr.na_filesid that is not equal
|
|
|
|
* vp->v_mount->mnt_stat.f_fsid[0], so that it changes
|
|
|
|
* from the value used for the top level server volume
|
|
|
|
* in the mounted subtree.
|
|
|
|
*/
|
2017-05-27 17:00:30 +00:00
|
|
|
vn_fsid(vp, vap);
|
2017-05-30 21:20:54 +00:00
|
|
|
if ((uint32_t)vap->va_fsid == np->n_vattr.na_filesid[0])
|
2017-05-27 17:00:30 +00:00
|
|
|
vap->va_fsid = hash32_buf(
|
2011-04-13 22:16:52 +00:00
|
|
|
np->n_vattr.na_filesid, 2 * sizeof(uint64_t), 0);
|
|
|
|
} else
|
2017-05-27 17:00:30 +00:00
|
|
|
vn_fsid(vp, vap);
|
2009-05-04 15:23:58 +00:00
|
|
|
np->n_attrstamp = time_second;
|
|
|
|
if (vap->va_size != np->n_size) {
|
|
|
|
if (vap->va_type == VREG) {
|
|
|
|
if (dontshrink && vap->va_size < np->n_size) {
|
|
|
|
/*
|
|
|
|
* We've been told not to shrink the file;
|
|
|
|
* zero np->n_attrstamp to indicate that
|
|
|
|
* the attributes are stale.
|
|
|
|
*/
|
2019-09-17 18:41:39 +00:00
|
|
|
vap->va_size = np->n_size;
|
2009-05-04 15:23:58 +00:00
|
|
|
np->n_attrstamp = 0;
|
2011-06-18 23:02:53 +00:00
|
|
|
KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp);
|
2009-05-04 15:23:58 +00:00
|
|
|
} else if (np->n_flag & NMODIFIED) {
|
|
|
|
/*
|
|
|
|
* We've modified the file: Use the larger
|
|
|
|
* of our size, and the server's size.
|
|
|
|
*/
|
|
|
|
if (vap->va_size < np->n_size) {
|
|
|
|
vap->va_size = np->n_size;
|
|
|
|
} else {
|
|
|
|
np->n_size = vap->va_size;
|
|
|
|
np->n_flag |= NSIZECHANGED;
|
|
|
|
}
|
|
|
|
} else {
|
2019-09-17 18:41:39 +00:00
|
|
|
np->n_size = vap->va_size;
|
2009-05-04 15:23:58 +00:00
|
|
|
np->n_flag |= NSIZECHANGED;
|
|
|
|
}
|
2013-05-28 22:36:01 +00:00
|
|
|
} else {
|
|
|
|
np->n_size = vap->va_size;
|
2013-03-21 07:25:08 +00:00
|
|
|
}
|
2009-05-04 15:23:58 +00:00
|
|
|
}
|
|
|
|
/*
|
|
|
|
* The following checks are added to prevent a race between (say)
|
|
|
|
* a READDIR+ and a WRITE.
|
|
|
|
* READDIR+, WRITE requests sent out.
|
|
|
|
* READDIR+ resp, WRITE resp received on client.
|
|
|
|
* However, the WRITE resp was handled before the READDIR+ resp
|
|
|
|
* causing the post op attrs from the write to be loaded first
|
|
|
|
* and the attrs from the READDIR+ to be loaded later. If this
|
|
|
|
* happens, we have stale attrs loaded into the attrcache.
|
|
|
|
* We detect this by for the mtime moving back. We invalidate the
|
|
|
|
* attrcache when this happens.
|
|
|
|
*/
|
2011-06-18 23:02:53 +00:00
|
|
|
if (timespeccmp(&mtime_save, &vap->va_mtime, >)) {
|
2009-05-04 15:23:58 +00:00
|
|
|
/* Size changed or mtime went backwards */
|
|
|
|
np->n_attrstamp = 0;
|
2011-06-18 23:02:53 +00:00
|
|
|
KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp);
|
|
|
|
}
|
2009-05-04 15:23:58 +00:00
|
|
|
if (vaper != NULL) {
|
|
|
|
NFSBCOPY((caddr_t)vap, (caddr_t)vaper, sizeof(*vap));
|
|
|
|
if (np->n_flag & NCHG) {
|
|
|
|
if (np->n_flag & NACC)
|
|
|
|
vaper->va_atime = np->n_atim;
|
|
|
|
if (np->n_flag & NUPD)
|
|
|
|
vaper->va_mtime = np->n_mtim;
|
|
|
|
}
|
|
|
|
}
|
2015-08-05 22:27:30 +00:00
|
|
|
|
|
|
|
out:
|
2011-06-18 23:02:53 +00:00
|
|
|
#ifdef KDTRACE_HOOKS
|
|
|
|
if (np->n_attrstamp != 0)
|
2015-08-05 22:27:30 +00:00
|
|
|
KDTRACE_NFS_ATTRCACHE_LOAD_DONE(vp, vap, error);
|
2011-06-18 23:02:53 +00:00
|
|
|
#endif
|
2019-10-22 16:17:38 +00:00
|
|
|
(void)ncl_pager_setsize(vp, NULL);
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Call vnode_pager_setsize() if the size of the node changed, as
|
|
|
|
* recorded in nfsnode vs. v_object, or delay the call if notifying
|
|
|
|
* the pager is not possible at the moment.
|
|
|
|
*
|
|
|
|
* If nsizep is non-NULL, the call is delayed and the new node size is
|
|
|
|
* provided. Caller should itself call vnode_pager_setsize() if
|
|
|
|
* function returned true. If nsizep is NULL, function tries to call
|
|
|
|
* vnode_pager_setsize() itself if needed and possible, and the nfs
|
|
|
|
* node is unlocked unconditionally, the return value is not useful.
|
|
|
|
*/
|
|
|
|
bool
|
|
|
|
ncl_pager_setsize(struct vnode *vp, u_quad_t *nsizep)
|
|
|
|
{
|
|
|
|
struct nfsnode *np;
|
|
|
|
vm_object_t object;
|
|
|
|
struct vattr *vap;
|
|
|
|
u_quad_t nsize;
|
|
|
|
bool setnsize;
|
|
|
|
|
|
|
|
np = VTONFS(vp);
|
|
|
|
NFSASSERTNODE(np);
|
|
|
|
|
|
|
|
vap = &np->n_vattr.na_vattr;
|
2019-09-17 18:41:39 +00:00
|
|
|
nsize = vap->va_size;
|
|
|
|
object = vp->v_object;
|
|
|
|
setnsize = false;
|
2019-10-22 16:17:38 +00:00
|
|
|
|
|
|
|
if (object != NULL && nsize != object->un_pager.vnp.vnp_size) {
|
2020-02-22 20:50:30 +00:00
|
|
|
if (VOP_ISLOCKED(vp) == LK_EXCLUSIVE &&
|
|
|
|
(curthread->td_pflags2 & TDP2_SBPAGES) == 0)
|
2019-09-17 18:41:39 +00:00
|
|
|
setnsize = true;
|
2019-10-22 16:17:38 +00:00
|
|
|
else
|
|
|
|
np->n_flag |= NVNSETSZSKIP;
|
|
|
|
}
|
|
|
|
if (nsizep == NULL) {
|
|
|
|
NFSUNLOCKNODE(np);
|
|
|
|
if (setnsize)
|
2019-09-17 18:41:39 +00:00
|
|
|
vnode_pager_setsize(vp, nsize);
|
2019-10-22 16:17:38 +00:00
|
|
|
setnsize = false;
|
|
|
|
} else {
|
|
|
|
*nsizep = nsize;
|
2019-09-17 18:41:39 +00:00
|
|
|
}
|
2019-10-22 16:17:38 +00:00
|
|
|
return (setnsize);
|
2009-05-04 15:23:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Fill in the client id name. For these bytes:
|
|
|
|
* 1 - they must be unique
|
|
|
|
* 2 - they should be persistent across client reboots
|
|
|
|
* 1 is more critical than 2
|
|
|
|
* Use the mount point's unique id plus either the uuid or, if that
|
|
|
|
* isn't set, random junk.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
nfscl_fillclid(u_int64_t clval, char *uuid, u_int8_t *cp, u_int16_t idlen)
|
|
|
|
{
|
|
|
|
int uuidlen;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* First, put in the 64bit mount point identifier.
|
|
|
|
*/
|
|
|
|
if (idlen >= sizeof (u_int64_t)) {
|
|
|
|
NFSBCOPY((caddr_t)&clval, cp, sizeof (u_int64_t));
|
|
|
|
cp += sizeof (u_int64_t);
|
|
|
|
idlen -= sizeof (u_int64_t);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If uuid is non-zero length, use it.
|
|
|
|
*/
|
|
|
|
uuidlen = strlen(uuid);
|
|
|
|
if (uuidlen > 0 && idlen >= uuidlen) {
|
|
|
|
NFSBCOPY(uuid, cp, uuidlen);
|
|
|
|
cp += uuidlen;
|
|
|
|
idlen -= uuidlen;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This only normally happens if the uuid isn't set.
|
|
|
|
*/
|
|
|
|
while (idlen > 0) {
|
|
|
|
*cp++ = (u_int8_t)(arc4random() % 256);
|
|
|
|
idlen--;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Fill in a lock owner name. For now, pid + the process's creation time.
|
|
|
|
*/
|
|
|
|
void
|
2011-06-05 18:17:37 +00:00
|
|
|
nfscl_filllockowner(void *id, u_int8_t *cp, int flags)
|
2009-05-04 15:23:58 +00:00
|
|
|
{
|
|
|
|
union {
|
|
|
|
u_int32_t lval;
|
|
|
|
u_int8_t cval[4];
|
|
|
|
} tl;
|
|
|
|
struct proc *p;
|
|
|
|
|
2011-06-05 18:17:37 +00:00
|
|
|
if (id == NULL) {
|
2017-04-13 21:54:19 +00:00
|
|
|
/* Return the single open_owner of all 0 bytes. */
|
2011-06-05 18:17:37 +00:00
|
|
|
bzero(cp, NFSV4CL_LOCKNAMELEN);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
if ((flags & F_POSIX) != 0) {
|
|
|
|
p = (struct proc *)id;
|
|
|
|
tl.lval = p->p_pid;
|
|
|
|
*cp++ = tl.cval[0];
|
|
|
|
*cp++ = tl.cval[1];
|
|
|
|
*cp++ = tl.cval[2];
|
|
|
|
*cp++ = tl.cval[3];
|
|
|
|
tl.lval = p->p_stats->p_start.tv_sec;
|
|
|
|
*cp++ = tl.cval[0];
|
|
|
|
*cp++ = tl.cval[1];
|
|
|
|
*cp++ = tl.cval[2];
|
|
|
|
*cp++ = tl.cval[3];
|
|
|
|
tl.lval = p->p_stats->p_start.tv_usec;
|
|
|
|
*cp++ = tl.cval[0];
|
|
|
|
*cp++ = tl.cval[1];
|
|
|
|
*cp++ = tl.cval[2];
|
|
|
|
*cp = tl.cval[3];
|
2011-06-05 20:22:56 +00:00
|
|
|
} else if ((flags & F_FLOCK) != 0) {
|
|
|
|
bcopy(&id, cp, sizeof(id));
|
|
|
|
bzero(&cp[sizeof(id)], NFSV4CL_LOCKNAMELEN - sizeof(id));
|
2011-06-05 18:17:37 +00:00
|
|
|
} else {
|
2011-06-05 20:22:56 +00:00
|
|
|
printf("nfscl_filllockowner: not F_POSIX or F_FLOCK\n");
|
2011-06-05 18:17:37 +00:00
|
|
|
bzero(cp, NFSV4CL_LOCKNAMELEN);
|
|
|
|
}
|
2009-05-04 15:23:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Find the parent process for the thread passed in as an argument.
|
|
|
|
* If none exists, return NULL, otherwise return a thread for the parent.
|
|
|
|
* (Can be any of the threads, since it is only used for td->td_proc.)
|
|
|
|
*/
|
|
|
|
NFSPROC_T *
|
|
|
|
nfscl_getparent(struct thread *td)
|
|
|
|
{
|
|
|
|
struct proc *p;
|
|
|
|
struct thread *ptd;
|
|
|
|
|
|
|
|
if (td == NULL)
|
|
|
|
return (NULL);
|
|
|
|
p = td->td_proc;
|
|
|
|
if (p->p_pid == 0)
|
|
|
|
return (NULL);
|
|
|
|
p = p->p_pptr;
|
|
|
|
if (p == NULL)
|
|
|
|
return (NULL);
|
|
|
|
ptd = TAILQ_FIRST(&p->p_threads);
|
|
|
|
return (ptd);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Start up the renew kernel thread.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
start_nfscl(void *arg)
|
|
|
|
{
|
|
|
|
struct nfsclclient *clp;
|
|
|
|
struct thread *td;
|
|
|
|
|
|
|
|
clp = (struct nfsclclient *)arg;
|
|
|
|
td = TAILQ_FIRST(&clp->nfsc_renewthread->p_threads);
|
|
|
|
nfscl_renewthread(clp, td);
|
|
|
|
kproc_exit(0);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
nfscl_start_renewthread(struct nfsclclient *clp)
|
|
|
|
{
|
|
|
|
|
|
|
|
kproc_create(start_nfscl, (void *)clp, &clp->nfsc_renewthread, 0, 0,
|
|
|
|
"nfscl");
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Handle wcc_data.
|
|
|
|
* For NFSv4, it assumes that nfsv4_wccattr() was used to set up the getattr
|
|
|
|
* as the first Op after PutFH.
|
|
|
|
* (For NFSv4, the postop attributes are after the Op, so they can't be
|
|
|
|
* parsed here. A separate call to nfscl_postop_attr() is required.)
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
nfscl_wcc_data(struct nfsrv_descript *nd, struct vnode *vp,
|
|
|
|
struct nfsvattr *nap, int *flagp, int *wccflagp, void *stuff)
|
|
|
|
{
|
|
|
|
u_int32_t *tl;
|
|
|
|
struct nfsnode *np = VTONFS(vp);
|
|
|
|
struct nfsvattr nfsva;
|
|
|
|
int error = 0;
|
|
|
|
|
|
|
|
if (wccflagp != NULL)
|
|
|
|
*wccflagp = 0;
|
|
|
|
if (nd->nd_flag & ND_NFSV3) {
|
|
|
|
*flagp = 0;
|
|
|
|
NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED);
|
|
|
|
if (*tl == newnfs_true) {
|
|
|
|
NFSM_DISSECT(tl, u_int32_t *, 6 * NFSX_UNSIGNED);
|
|
|
|
if (wccflagp != NULL) {
|
2019-09-24 01:58:54 +00:00
|
|
|
NFSLOCKNODE(np);
|
2009-05-04 15:23:58 +00:00
|
|
|
*wccflagp = (np->n_mtime.tv_sec ==
|
|
|
|
fxdr_unsigned(u_int32_t, *(tl + 2)) &&
|
|
|
|
np->n_mtime.tv_nsec ==
|
|
|
|
fxdr_unsigned(u_int32_t, *(tl + 3)));
|
2019-09-24 01:58:54 +00:00
|
|
|
NFSUNLOCKNODE(np);
|
2009-05-04 15:23:58 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
error = nfscl_postop_attr(nd, nap, flagp, stuff);
|
2017-06-28 21:37:08 +00:00
|
|
|
if (wccflagp != NULL && *flagp == 0)
|
|
|
|
*wccflagp = 0;
|
2009-05-04 15:23:58 +00:00
|
|
|
} else if ((nd->nd_flag & (ND_NOMOREDATA | ND_NFSV4 | ND_V4WCCATTR))
|
|
|
|
== (ND_NFSV4 | ND_V4WCCATTR)) {
|
|
|
|
error = nfsv4_loadattr(nd, NULL, &nfsva, NULL,
|
|
|
|
NULL, 0, NULL, NULL, NULL, NULL, NULL, 0,
|
|
|
|
NULL, NULL, NULL, NULL, NULL);
|
|
|
|
if (error)
|
|
|
|
return (error);
|
|
|
|
/*
|
|
|
|
* Get rid of Op# and status for next op.
|
|
|
|
*/
|
|
|
|
NFSM_DISSECT(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
|
|
|
|
if (*++tl)
|
|
|
|
nd->nd_flag |= ND_NOMOREDATA;
|
|
|
|
if (wccflagp != NULL &&
|
|
|
|
nfsva.na_vattr.va_mtime.tv_sec != 0) {
|
2019-09-24 01:58:54 +00:00
|
|
|
NFSLOCKNODE(np);
|
2009-05-04 15:23:58 +00:00
|
|
|
*wccflagp = (np->n_mtime.tv_sec ==
|
|
|
|
nfsva.na_vattr.va_mtime.tv_sec &&
|
|
|
|
np->n_mtime.tv_nsec ==
|
|
|
|
nfsva.na_vattr.va_mtime.tv_sec);
|
2019-09-24 01:58:54 +00:00
|
|
|
NFSUNLOCKNODE(np);
|
2009-05-04 15:23:58 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
nfsmout:
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Get postop attributes.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
nfscl_postop_attr(struct nfsrv_descript *nd, struct nfsvattr *nap, int *retp,
|
|
|
|
void *stuff)
|
|
|
|
{
|
|
|
|
u_int32_t *tl;
|
|
|
|
int error = 0;
|
|
|
|
|
|
|
|
*retp = 0;
|
|
|
|
if (nd->nd_flag & ND_NOMOREDATA)
|
|
|
|
return (error);
|
|
|
|
if (nd->nd_flag & ND_NFSV3) {
|
|
|
|
NFSM_DISSECT(tl, u_int32_t *, NFSX_UNSIGNED);
|
|
|
|
*retp = fxdr_unsigned(int, *tl);
|
|
|
|
} else if (nd->nd_flag & ND_NFSV4) {
|
|
|
|
/*
|
|
|
|
* For NFSv4, the postop attr are at the end, so no point
|
|
|
|
* in looking if nd_repstat != 0.
|
|
|
|
*/
|
|
|
|
if (!nd->nd_repstat) {
|
|
|
|
NFSM_DISSECT(tl, u_int32_t *, 2 * NFSX_UNSIGNED);
|
|
|
|
if (*(tl + 1))
|
|
|
|
/* should never happen since nd_repstat != 0 */
|
|
|
|
nd->nd_flag |= ND_NOMOREDATA;
|
|
|
|
else
|
|
|
|
*retp = 1;
|
|
|
|
}
|
|
|
|
} else if (!nd->nd_repstat) {
|
|
|
|
/* For NFSv2, the attributes are here iff nd_repstat == 0 */
|
|
|
|
*retp = 1;
|
|
|
|
}
|
|
|
|
if (*retp) {
|
|
|
|
error = nfsm_loadattr(nd, nap);
|
|
|
|
if (error)
|
|
|
|
*retp = 0;
|
|
|
|
}
|
|
|
|
nfsmout:
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* nfscl_request() - mostly a wrapper for newnfs_request().
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
nfscl_request(struct nfsrv_descript *nd, struct vnode *vp, NFSPROC_T *p,
|
|
|
|
struct ucred *cred, void *stuff)
|
|
|
|
{
|
|
|
|
int ret, vers;
|
|
|
|
struct nfsmount *nmp;
|
|
|
|
|
|
|
|
nmp = VFSTONFS(vp->v_mount);
|
|
|
|
if (nd->nd_flag & ND_NFSV4)
|
|
|
|
vers = NFS_VER4;
|
|
|
|
else if (nd->nd_flag & ND_NFSV3)
|
|
|
|
vers = NFS_VER3;
|
|
|
|
else
|
|
|
|
vers = NFS_VER2;
|
|
|
|
ret = newnfs_request(nd, nmp, NULL, &nmp->nm_sockreq, vp, p, cred,
|
2012-12-08 22:52:39 +00:00
|
|
|
NFS_PROG, vers, NULL, 1, NULL, NULL);
|
2009-05-04 15:23:58 +00:00
|
|
|
return (ret);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* fill in this bsden's variant of statfs using nfsstatfs.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
nfscl_loadsbinfo(struct nfsmount *nmp, struct nfsstatfs *sfp, void *statfs)
|
|
|
|
{
|
|
|
|
struct statfs *sbp = (struct statfs *)statfs;
|
|
|
|
|
|
|
|
if (nmp->nm_flag & (NFSMNT_NFSV3 | NFSMNT_NFSV4)) {
|
|
|
|
sbp->f_bsize = NFS_FABLKSIZE;
|
2011-05-05 00:11:09 +00:00
|
|
|
sbp->f_blocks = sfp->sf_tbytes / NFS_FABLKSIZE;
|
|
|
|
sbp->f_bfree = sfp->sf_fbytes / NFS_FABLKSIZE;
|
|
|
|
/*
|
|
|
|
* Although sf_abytes is uint64_t and f_bavail is int64_t,
|
|
|
|
* the value after dividing by NFS_FABLKSIZE is small
|
|
|
|
* enough that it will fit in 63bits, so it is ok to
|
|
|
|
* assign it to f_bavail without fear that it will become
|
|
|
|
* negative.
|
|
|
|
*/
|
|
|
|
sbp->f_bavail = sfp->sf_abytes / NFS_FABLKSIZE;
|
|
|
|
sbp->f_files = sfp->sf_tfiles;
|
|
|
|
/* Since f_ffree is int64_t, clip it to 63bits. */
|
|
|
|
if (sfp->sf_ffiles > INT64_MAX)
|
|
|
|
sbp->f_ffree = INT64_MAX;
|
|
|
|
else
|
|
|
|
sbp->f_ffree = sfp->sf_ffiles;
|
2009-05-04 15:23:58 +00:00
|
|
|
} else if ((nmp->nm_flag & NFSMNT_NFSV4) == 0) {
|
2011-05-05 00:11:09 +00:00
|
|
|
/*
|
|
|
|
* The type casts to (int32_t) ensure that this code is
|
|
|
|
* compatible with the old NFS client, in that it will
|
|
|
|
* propagate bit31 to the high order bits. This may or may
|
|
|
|
* not be correct for NFSv2, but since it is a legacy
|
|
|
|
* environment, I'd rather retain backwards compatibility.
|
|
|
|
*/
|
2009-05-04 15:23:58 +00:00
|
|
|
sbp->f_bsize = (int32_t)sfp->sf_bsize;
|
|
|
|
sbp->f_blocks = (int32_t)sfp->sf_blocks;
|
|
|
|
sbp->f_bfree = (int32_t)sfp->sf_bfree;
|
|
|
|
sbp->f_bavail = (int32_t)sfp->sf_bavail;
|
|
|
|
sbp->f_files = 0;
|
|
|
|
sbp->f_ffree = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Use the fsinfo stuff to update the mount point.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
nfscl_loadfsinfo(struct nfsmount *nmp, struct nfsfsinfo *fsp)
|
|
|
|
{
|
|
|
|
|
|
|
|
if ((nmp->nm_wsize == 0 || fsp->fs_wtpref < nmp->nm_wsize) &&
|
|
|
|
fsp->fs_wtpref >= NFS_FABLKSIZE)
|
|
|
|
nmp->nm_wsize = (fsp->fs_wtpref + NFS_FABLKSIZE - 1) &
|
|
|
|
~(NFS_FABLKSIZE - 1);
|
|
|
|
if (fsp->fs_wtmax < nmp->nm_wsize && fsp->fs_wtmax > 0) {
|
|
|
|
nmp->nm_wsize = fsp->fs_wtmax & ~(NFS_FABLKSIZE - 1);
|
|
|
|
if (nmp->nm_wsize == 0)
|
|
|
|
nmp->nm_wsize = fsp->fs_wtmax;
|
|
|
|
}
|
|
|
|
if (nmp->nm_wsize < NFS_FABLKSIZE)
|
|
|
|
nmp->nm_wsize = NFS_FABLKSIZE;
|
|
|
|
if ((nmp->nm_rsize == 0 || fsp->fs_rtpref < nmp->nm_rsize) &&
|
|
|
|
fsp->fs_rtpref >= NFS_FABLKSIZE)
|
|
|
|
nmp->nm_rsize = (fsp->fs_rtpref + NFS_FABLKSIZE - 1) &
|
|
|
|
~(NFS_FABLKSIZE - 1);
|
|
|
|
if (fsp->fs_rtmax < nmp->nm_rsize && fsp->fs_rtmax > 0) {
|
|
|
|
nmp->nm_rsize = fsp->fs_rtmax & ~(NFS_FABLKSIZE - 1);
|
|
|
|
if (nmp->nm_rsize == 0)
|
|
|
|
nmp->nm_rsize = fsp->fs_rtmax;
|
|
|
|
}
|
|
|
|
if (nmp->nm_rsize < NFS_FABLKSIZE)
|
|
|
|
nmp->nm_rsize = NFS_FABLKSIZE;
|
|
|
|
if ((nmp->nm_readdirsize == 0 || fsp->fs_dtpref < nmp->nm_readdirsize)
|
|
|
|
&& fsp->fs_dtpref >= NFS_DIRBLKSIZ)
|
|
|
|
nmp->nm_readdirsize = (fsp->fs_dtpref + NFS_DIRBLKSIZ - 1) &
|
|
|
|
~(NFS_DIRBLKSIZ - 1);
|
|
|
|
if (fsp->fs_rtmax < nmp->nm_readdirsize && fsp->fs_rtmax > 0) {
|
|
|
|
nmp->nm_readdirsize = fsp->fs_rtmax & ~(NFS_DIRBLKSIZ - 1);
|
|
|
|
if (nmp->nm_readdirsize == 0)
|
|
|
|
nmp->nm_readdirsize = fsp->fs_rtmax;
|
|
|
|
}
|
|
|
|
if (nmp->nm_readdirsize < NFS_DIRBLKSIZ)
|
|
|
|
nmp->nm_readdirsize = NFS_DIRBLKSIZ;
|
|
|
|
if (fsp->fs_maxfilesize > 0 &&
|
|
|
|
fsp->fs_maxfilesize < nmp->nm_maxfilesize)
|
|
|
|
nmp->nm_maxfilesize = fsp->fs_maxfilesize;
|
|
|
|
nmp->nm_mountp->mnt_stat.f_iosize = newnfs_iosize(nmp);
|
|
|
|
nmp->nm_state |= NFSSTA_GOTFSINFO;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2016-01-15 09:05:14 +00:00
|
|
|
* Lookups source address which should be used to communicate with
|
|
|
|
* @nmp and stores it inside @pdst.
|
|
|
|
*
|
|
|
|
* Returns 0 on success.
|
2009-05-04 15:23:58 +00:00
|
|
|
*/
|
|
|
|
u_int8_t *
|
2016-01-15 09:05:14 +00:00
|
|
|
nfscl_getmyip(struct nfsmount *nmp, struct in6_addr *paddr, int *isinet6p)
|
2009-05-04 15:23:58 +00:00
|
|
|
{
|
2016-01-15 16:45:36 +00:00
|
|
|
#if defined(INET6) || defined(INET)
|
2020-05-28 09:52:28 +00:00
|
|
|
int fibnum;
|
2009-05-04 15:23:58 +00:00
|
|
|
|
2016-01-15 09:05:14 +00:00
|
|
|
fibnum = curthread->td_proc->p_fibnum;
|
2016-01-15 16:45:36 +00:00
|
|
|
#endif
|
2016-01-15 09:05:14 +00:00
|
|
|
#ifdef INET
|
2009-05-04 15:23:58 +00:00
|
|
|
if (nmp->nm_nam->sa_family == AF_INET) {
|
2020-05-28 07:35:07 +00:00
|
|
|
struct epoch_tracker et;
|
|
|
|
struct nhop_object *nh;
|
2016-01-15 09:05:14 +00:00
|
|
|
struct sockaddr_in *sin;
|
2020-05-28 07:35:07 +00:00
|
|
|
struct in_addr addr = {};
|
2016-01-15 09:05:14 +00:00
|
|
|
|
2009-05-04 15:23:58 +00:00
|
|
|
sin = (struct sockaddr_in *)nmp->nm_nam;
|
2020-05-28 07:35:07 +00:00
|
|
|
NET_EPOCH_ENTER(et);
|
2011-06-05 17:31:44 +00:00
|
|
|
CURVNET_SET(CRED_TO_VNET(nmp->nm_sockreq.nr_cred));
|
2020-05-28 07:35:07 +00:00
|
|
|
nh = fib4_lookup(fibnum, sin->sin_addr, 0, NHR_NONE, 0);
|
2011-06-05 17:31:44 +00:00
|
|
|
CURVNET_RESTORE();
|
2020-05-28 07:35:07 +00:00
|
|
|
if (nh != NULL)
|
|
|
|
addr = IA_SIN(ifatoia(nh->nh_ifa))->sin_addr;
|
|
|
|
NET_EPOCH_EXIT(et);
|
|
|
|
if (nh == NULL)
|
2016-01-15 09:05:14 +00:00
|
|
|
return (NULL);
|
|
|
|
|
2020-05-28 07:35:07 +00:00
|
|
|
if (IN_LOOPBACK(ntohl(addr.s_addr))) {
|
2016-01-15 09:05:14 +00:00
|
|
|
/* Ignore loopback addresses */
|
|
|
|
return (NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
*isinet6p = 0;
|
2020-05-28 07:35:07 +00:00
|
|
|
*((struct in_addr *)paddr) = addr;
|
2016-01-15 09:05:14 +00:00
|
|
|
|
|
|
|
return (u_int8_t *)paddr;
|
|
|
|
}
|
|
|
|
#endif
|
2009-05-04 15:23:58 +00:00
|
|
|
#ifdef INET6
|
2016-01-15 09:05:14 +00:00
|
|
|
if (nmp->nm_nam->sa_family == AF_INET6) {
|
|
|
|
struct sockaddr_in6 *sin6;
|
2020-05-28 09:52:28 +00:00
|
|
|
int error;
|
2009-05-04 15:23:58 +00:00
|
|
|
|
|
|
|
sin6 = (struct sockaddr_in6 *)nmp->nm_nam;
|
2016-01-15 09:05:14 +00:00
|
|
|
|
2011-06-05 17:31:44 +00:00
|
|
|
CURVNET_SET(CRED_TO_VNET(nmp->nm_sockreq.nr_cred));
|
2016-01-15 09:05:14 +00:00
|
|
|
error = in6_selectsrc_addr(fibnum, &sin6->sin6_addr,
|
|
|
|
sin6->sin6_scope_id, NULL, paddr, NULL);
|
2011-06-05 17:31:44 +00:00
|
|
|
CURVNET_RESTORE();
|
2016-01-15 09:05:14 +00:00
|
|
|
if (error != 0)
|
|
|
|
return (NULL);
|
|
|
|
|
|
|
|
if (IN6_IS_ADDR_LOOPBACK(paddr))
|
|
|
|
return (NULL);
|
|
|
|
|
|
|
|
/* Scope is embedded in */
|
|
|
|
*isinet6p = 1;
|
|
|
|
|
|
|
|
return (u_int8_t *)paddr;
|
2009-05-04 15:23:58 +00:00
|
|
|
}
|
2016-01-15 09:05:14 +00:00
|
|
|
#endif
|
|
|
|
return (NULL);
|
2009-05-04 15:23:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Copy NFS uid, gids from the cred structure.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
newnfs_copyincred(struct ucred *cr, struct nfscred *nfscr)
|
|
|
|
{
|
2009-06-19 17:10:35 +00:00
|
|
|
int i;
|
2009-05-04 15:23:58 +00:00
|
|
|
|
2010-04-15 22:57:30 +00:00
|
|
|
KASSERT(cr->cr_ngroups >= 0,
|
|
|
|
("newnfs_copyincred: negative cr_ngroups"));
|
2009-05-04 15:23:58 +00:00
|
|
|
nfscr->nfsc_uid = cr->cr_uid;
|
2009-06-20 17:11:07 +00:00
|
|
|
nfscr->nfsc_ngroups = MIN(cr->cr_ngroups, NFS_MAXGRPS + 1);
|
2009-06-19 17:10:35 +00:00
|
|
|
for (i = 0; i < nfscr->nfsc_ngroups; i++)
|
2009-05-04 15:23:58 +00:00
|
|
|
nfscr->nfsc_groups[i] = cr->cr_groups[i];
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Do any client specific initialization.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
nfscl_init(void)
|
|
|
|
{
|
|
|
|
static int inited = 0;
|
|
|
|
|
|
|
|
if (inited)
|
|
|
|
return;
|
|
|
|
inited = 1;
|
|
|
|
nfscl_inited = 1;
|
Allocate pager bufs from UMA instead of 80-ish mutex protected linked list.
o In vm_pager_bufferinit() create pbuf_zone and start accounting on how many
pbufs are we going to have set.
In various subsystems that are going to utilize pbufs create private zones
via call to pbuf_zsecond_create(). The latter calls uma_zsecond_create(),
and sets a limit on created zone. After startup preallocate pbufs according
to requirements of all pbuf zones.
Subsystems that used to have a private limit with old allocator now have
private pbuf zones: md(4), fusefs, NFS client, smbfs, VFS cluster, FFS,
swap, vnode pager.
The following subsystems use shared pbuf zone: cam(4), nvme(4), physio(9),
aio(4). They should have their private limits, but changing that is out of
scope of this commit.
o Fetch tunable value of kern.nswbuf from init_param2() and while here move
NSWBUF_MIN to opt_param.h and eliminate opt_swap.h, that was holding only
this option.
Default values aren't touched by this commit, but they probably should be
reviewed wrt to modern hardware.
This change removes a tight bottleneck from sendfile(2) operation, that
uses pbufs in vnode pager. Other pagers also would benefit from faster
allocation.
Together with: gallatin
Tested by: pho
2019-01-15 01:02:16 +00:00
|
|
|
ncl_pbuf_zone = pbuf_zsecond_create("nfspbuf", nswbuf / 2);
|
2009-05-04 15:23:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Check each of the attributes to be set, to ensure they aren't already
|
|
|
|
* the correct value. Disable setting ones already correct.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
nfscl_checksattr(struct vattr *vap, struct nfsvattr *nvap)
|
|
|
|
{
|
|
|
|
|
|
|
|
if (vap->va_mode != (mode_t)VNOVAL) {
|
|
|
|
if (vap->va_mode == nvap->na_mode)
|
|
|
|
vap->va_mode = (mode_t)VNOVAL;
|
|
|
|
}
|
|
|
|
if (vap->va_uid != (uid_t)VNOVAL) {
|
|
|
|
if (vap->va_uid == nvap->na_uid)
|
|
|
|
vap->va_uid = (uid_t)VNOVAL;
|
|
|
|
}
|
|
|
|
if (vap->va_gid != (gid_t)VNOVAL) {
|
|
|
|
if (vap->va_gid == nvap->na_gid)
|
|
|
|
vap->va_gid = (gid_t)VNOVAL;
|
|
|
|
}
|
|
|
|
if (vap->va_size != VNOVAL) {
|
|
|
|
if (vap->va_size == nvap->na_size)
|
|
|
|
vap->va_size = VNOVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We are normally called with only a partially initialized
|
|
|
|
* VAP. Since the NFSv3 spec says that server may use the
|
|
|
|
* file attributes to store the verifier, the spec requires
|
|
|
|
* us to do a SETATTR RPC. FreeBSD servers store the verifier
|
|
|
|
* in atime, but we can't really assume that all servers will
|
|
|
|
* so we ensure that our SETATTR sets both atime and mtime.
|
2014-12-28 21:13:52 +00:00
|
|
|
* Set the VA_UTIMES_NULL flag for this case, so that
|
|
|
|
* the server's time will be used. This is needed to
|
|
|
|
* work around a bug in some Solaris servers, where
|
|
|
|
* setting the time TOCLIENT causes the Setattr RPC
|
|
|
|
* to return NFS_OK, but not set va_mode.
|
2009-05-04 15:23:58 +00:00
|
|
|
*/
|
2014-12-28 21:13:52 +00:00
|
|
|
if (vap->va_mtime.tv_sec == VNOVAL) {
|
2009-05-04 15:23:58 +00:00
|
|
|
vfs_timestamp(&vap->va_mtime);
|
2014-12-28 21:13:52 +00:00
|
|
|
vap->va_vaflags |= VA_UTIMES_NULL;
|
|
|
|
}
|
2009-05-04 15:23:58 +00:00
|
|
|
if (vap->va_atime.tv_sec == VNOVAL)
|
|
|
|
vap->va_atime = vap->va_mtime;
|
|
|
|
return (1);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Map nfsv4 errors to errno.h errors.
|
|
|
|
* The uid and gid arguments are only used for NFSERR_BADOWNER and that
|
|
|
|
* error should only be returned for the Open, Create and Setattr Ops.
|
|
|
|
* As such, most calls can just pass in 0 for those arguments.
|
|
|
|
*/
|
2020-05-12 13:23:25 +00:00
|
|
|
int
|
2009-05-04 15:23:58 +00:00
|
|
|
nfscl_maperr(struct thread *td, int error, uid_t uid, gid_t gid)
|
|
|
|
{
|
|
|
|
struct proc *p;
|
|
|
|
|
2017-04-09 21:50:21 +00:00
|
|
|
if (error < 10000 || error >= NFSERR_STALEWRITEVERF)
|
2009-05-04 15:23:58 +00:00
|
|
|
return (error);
|
|
|
|
if (td != NULL)
|
|
|
|
p = td->td_proc;
|
|
|
|
else
|
|
|
|
p = NULL;
|
|
|
|
switch (error) {
|
|
|
|
case NFSERR_BADOWNER:
|
|
|
|
tprintf(p, LOG_INFO,
|
|
|
|
"No name and/or group mapping for uid,gid:(%d,%d)\n",
|
|
|
|
uid, gid);
|
|
|
|
return (EPERM);
|
2012-12-08 22:52:39 +00:00
|
|
|
case NFSERR_BADNAME:
|
|
|
|
case NFSERR_BADCHAR:
|
|
|
|
printf("nfsv4 char/name not handled by server\n");
|
|
|
|
return (ENOENT);
|
2009-05-04 15:23:58 +00:00
|
|
|
case NFSERR_STALECLIENTID:
|
|
|
|
case NFSERR_STALESTATEID:
|
|
|
|
case NFSERR_EXPIRED:
|
|
|
|
case NFSERR_BADSTATEID:
|
2012-12-08 22:52:39 +00:00
|
|
|
case NFSERR_BADSESSION:
|
2009-05-04 15:23:58 +00:00
|
|
|
printf("nfsv4 recover err returned %d\n", error);
|
|
|
|
return (EIO);
|
|
|
|
case NFSERR_BADHANDLE:
|
|
|
|
case NFSERR_SERVERFAULT:
|
|
|
|
case NFSERR_BADTYPE:
|
|
|
|
case NFSERR_FHEXPIRED:
|
|
|
|
case NFSERR_RESOURCE:
|
|
|
|
case NFSERR_MOVED:
|
|
|
|
case NFSERR_NOFILEHANDLE:
|
|
|
|
case NFSERR_MINORVERMISMATCH:
|
|
|
|
case NFSERR_OLDSTATEID:
|
|
|
|
case NFSERR_BADSEQID:
|
|
|
|
case NFSERR_LEASEMOVED:
|
|
|
|
case NFSERR_RECLAIMBAD:
|
|
|
|
case NFSERR_BADXDR:
|
|
|
|
case NFSERR_OPILLEGAL:
|
|
|
|
printf("nfsv4 client/server protocol prob err=%d\n",
|
|
|
|
error);
|
|
|
|
return (EIO);
|
|
|
|
default:
|
|
|
|
tprintf(p, LOG_INFO, "nfsv4 err=%d\n", error);
|
|
|
|
return (EIO);
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Check to see if the process for this owner exists. Return 1 if it doesn't
|
|
|
|
* and 0 otherwise.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
nfscl_procdoesntexist(u_int8_t *own)
|
|
|
|
{
|
|
|
|
union {
|
|
|
|
u_int32_t lval;
|
|
|
|
u_int8_t cval[4];
|
|
|
|
} tl;
|
|
|
|
struct proc *p;
|
|
|
|
pid_t pid;
|
2017-04-13 21:54:19 +00:00
|
|
|
int i, ret = 0;
|
|
|
|
|
|
|
|
/* For the single open_owner of all 0 bytes, just return 0. */
|
|
|
|
for (i = 0; i < NFSV4CL_LOCKNAMELEN; i++)
|
|
|
|
if (own[i] != 0)
|
|
|
|
break;
|
|
|
|
if (i == NFSV4CL_LOCKNAMELEN)
|
|
|
|
return (0);
|
2009-05-04 15:23:58 +00:00
|
|
|
|
|
|
|
tl.cval[0] = *own++;
|
|
|
|
tl.cval[1] = *own++;
|
|
|
|
tl.cval[2] = *own++;
|
|
|
|
tl.cval[3] = *own++;
|
|
|
|
pid = tl.lval;
|
2019-04-15 01:27:15 +00:00
|
|
|
p = pfind_any_locked(pid);
|
2009-05-04 15:23:58 +00:00
|
|
|
if (p == NULL)
|
|
|
|
return (1);
|
|
|
|
if (p->p_stats == NULL) {
|
|
|
|
PROC_UNLOCK(p);
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
tl.cval[0] = *own++;
|
|
|
|
tl.cval[1] = *own++;
|
|
|
|
tl.cval[2] = *own++;
|
|
|
|
tl.cval[3] = *own++;
|
|
|
|
if (tl.lval != p->p_stats->p_start.tv_sec) {
|
|
|
|
ret = 1;
|
|
|
|
} else {
|
|
|
|
tl.cval[0] = *own++;
|
|
|
|
tl.cval[1] = *own++;
|
|
|
|
tl.cval[2] = *own++;
|
|
|
|
tl.cval[3] = *own;
|
|
|
|
if (tl.lval != p->p_stats->p_start.tv_usec)
|
|
|
|
ret = 1;
|
|
|
|
}
|
|
|
|
PROC_UNLOCK(p);
|
|
|
|
return (ret);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* - nfs pseudo system call for the client
|
|
|
|
*/
|
|
|
|
/*
|
|
|
|
* MPSAFE
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
nfssvc_nfscl(struct thread *td, struct nfssvc_args *uap)
|
|
|
|
{
|
|
|
|
struct file *fp;
|
|
|
|
struct nfscbd_args nfscbdarg;
|
|
|
|
struct nfsd_nfscbd_args nfscbdarg2;
|
2012-12-02 01:16:04 +00:00
|
|
|
struct nameidata nd;
|
|
|
|
struct nfscl_dumpmntopts dumpmntopts;
|
Change the cap_rights_t type from uint64_t to a structure that we can extend
in the future in a backward compatible (API and ABI) way.
The cap_rights_t represents capability rights. We used to use one bit to
represent one right, but we are running out of spare bits. Currently the new
structure provides place for 114 rights (so 50 more than the previous
cap_rights_t), but it is possible to grow the structure to hold at least 285
rights, although we can make it even larger if 285 rights won't be enough.
The structure definition looks like this:
struct cap_rights {
uint64_t cr_rights[CAP_RIGHTS_VERSION + 2];
};
The initial CAP_RIGHTS_VERSION is 0.
The top two bits in the first element of the cr_rights[] array contain total
number of elements in the array - 2. This means if those two bits are equal to
0, we have 2 array elements.
The top two bits in all remaining array elements should be 0.
The next five bits in all array elements contain array index. Only one bit is
used and bit position in this five-bits range defines array index. This means
there can be at most five array elements in the future.
To define new right the CAPRIGHT() macro must be used. The macro takes two
arguments - an array index and a bit to set, eg.
#define CAP_PDKILL CAPRIGHT(1, 0x0000000000000800ULL)
We still support aliases that combine few rights, but the rights have to belong
to the same array element, eg:
#define CAP_LOOKUP CAPRIGHT(0, 0x0000000000000400ULL)
#define CAP_FCHMOD CAPRIGHT(0, 0x0000000000002000ULL)
#define CAP_FCHMODAT (CAP_FCHMOD | CAP_LOOKUP)
There is new API to manage the new cap_rights_t structure:
cap_rights_t *cap_rights_init(cap_rights_t *rights, ...);
void cap_rights_set(cap_rights_t *rights, ...);
void cap_rights_clear(cap_rights_t *rights, ...);
bool cap_rights_is_set(const cap_rights_t *rights, ...);
bool cap_rights_is_valid(const cap_rights_t *rights);
void cap_rights_merge(cap_rights_t *dst, const cap_rights_t *src);
void cap_rights_remove(cap_rights_t *dst, const cap_rights_t *src);
bool cap_rights_contains(const cap_rights_t *big, const cap_rights_t *little);
Capability rights to the cap_rights_init(), cap_rights_set(),
cap_rights_clear() and cap_rights_is_set() functions are provided by
separating them with commas, eg:
cap_rights_t rights;
cap_rights_init(&rights, CAP_READ, CAP_WRITE, CAP_FSTAT);
There is no need to terminate the list of rights, as those functions are
actually macros that take care of the termination, eg:
#define cap_rights_set(rights, ...) \
__cap_rights_set((rights), __VA_ARGS__, 0ULL)
void __cap_rights_set(cap_rights_t *rights, ...);
Thanks to using one bit as an array index we can assert in those functions that
there are no two rights belonging to different array elements provided
together. For example this is illegal and will be detected, because CAP_LOOKUP
belongs to element 0 and CAP_PDKILL to element 1:
cap_rights_init(&rights, CAP_LOOKUP | CAP_PDKILL);
Providing several rights that belongs to the same array's element this way is
correct, but is not advised. It should only be used for aliases definition.
This commit also breaks compatibility with some existing Capsicum system calls,
but I see no other way to do that. This should be fine as Capsicum is still
experimental and this change is not going to 9.x.
Sponsored by: The FreeBSD Foundation
2013-09-05 00:09:56 +00:00
|
|
|
cap_rights_t rights;
|
2012-12-02 01:16:04 +00:00
|
|
|
char *buf;
|
Change the cap_rights_t type from uint64_t to a structure that we can extend
in the future in a backward compatible (API and ABI) way.
The cap_rights_t represents capability rights. We used to use one bit to
represent one right, but we are running out of spare bits. Currently the new
structure provides place for 114 rights (so 50 more than the previous
cap_rights_t), but it is possible to grow the structure to hold at least 285
rights, although we can make it even larger if 285 rights won't be enough.
The structure definition looks like this:
struct cap_rights {
uint64_t cr_rights[CAP_RIGHTS_VERSION + 2];
};
The initial CAP_RIGHTS_VERSION is 0.
The top two bits in the first element of the cr_rights[] array contain total
number of elements in the array - 2. This means if those two bits are equal to
0, we have 2 array elements.
The top two bits in all remaining array elements should be 0.
The next five bits in all array elements contain array index. Only one bit is
used and bit position in this five-bits range defines array index. This means
there can be at most five array elements in the future.
To define new right the CAPRIGHT() macro must be used. The macro takes two
arguments - an array index and a bit to set, eg.
#define CAP_PDKILL CAPRIGHT(1, 0x0000000000000800ULL)
We still support aliases that combine few rights, but the rights have to belong
to the same array element, eg:
#define CAP_LOOKUP CAPRIGHT(0, 0x0000000000000400ULL)
#define CAP_FCHMOD CAPRIGHT(0, 0x0000000000002000ULL)
#define CAP_FCHMODAT (CAP_FCHMOD | CAP_LOOKUP)
There is new API to manage the new cap_rights_t structure:
cap_rights_t *cap_rights_init(cap_rights_t *rights, ...);
void cap_rights_set(cap_rights_t *rights, ...);
void cap_rights_clear(cap_rights_t *rights, ...);
bool cap_rights_is_set(const cap_rights_t *rights, ...);
bool cap_rights_is_valid(const cap_rights_t *rights);
void cap_rights_merge(cap_rights_t *dst, const cap_rights_t *src);
void cap_rights_remove(cap_rights_t *dst, const cap_rights_t *src);
bool cap_rights_contains(const cap_rights_t *big, const cap_rights_t *little);
Capability rights to the cap_rights_init(), cap_rights_set(),
cap_rights_clear() and cap_rights_is_set() functions are provided by
separating them with commas, eg:
cap_rights_t rights;
cap_rights_init(&rights, CAP_READ, CAP_WRITE, CAP_FSTAT);
There is no need to terminate the list of rights, as those functions are
actually macros that take care of the termination, eg:
#define cap_rights_set(rights, ...) \
__cap_rights_set((rights), __VA_ARGS__, 0ULL)
void __cap_rights_set(cap_rights_t *rights, ...);
Thanks to using one bit as an array index we can assert in those functions that
there are no two rights belonging to different array elements provided
together. For example this is illegal and will be detected, because CAP_LOOKUP
belongs to element 0 and CAP_PDKILL to element 1:
cap_rights_init(&rights, CAP_LOOKUP | CAP_PDKILL);
Providing several rights that belongs to the same array's element this way is
correct, but is not advised. It should only be used for aliases definition.
This commit also breaks compatibility with some existing Capsicum system calls,
but I see no other way to do that. This should be fine as Capsicum is still
experimental and this change is not going to 9.x.
Sponsored by: The FreeBSD Foundation
2013-09-05 00:09:56 +00:00
|
|
|
int error;
|
2017-07-29 19:52:47 +00:00
|
|
|
struct mount *mp;
|
|
|
|
struct nfsmount *nmp;
|
2009-05-04 15:23:58 +00:00
|
|
|
|
|
|
|
if (uap->flag & NFSSVC_CBADDSOCK) {
|
|
|
|
error = copyin(uap->argp, (caddr_t)&nfscbdarg, sizeof(nfscbdarg));
|
|
|
|
if (error)
|
|
|
|
return (error);
|
2011-08-11 12:30:23 +00:00
|
|
|
/*
|
|
|
|
* Since we don't know what rights might be required,
|
|
|
|
* pretend that we need them all. It is better to be too
|
|
|
|
* careful than too reckless.
|
|
|
|
*/
|
Change the cap_rights_t type from uint64_t to a structure that we can extend
in the future in a backward compatible (API and ABI) way.
The cap_rights_t represents capability rights. We used to use one bit to
represent one right, but we are running out of spare bits. Currently the new
structure provides place for 114 rights (so 50 more than the previous
cap_rights_t), but it is possible to grow the structure to hold at least 285
rights, although we can make it even larger if 285 rights won't be enough.
The structure definition looks like this:
struct cap_rights {
uint64_t cr_rights[CAP_RIGHTS_VERSION + 2];
};
The initial CAP_RIGHTS_VERSION is 0.
The top two bits in the first element of the cr_rights[] array contain total
number of elements in the array - 2. This means if those two bits are equal to
0, we have 2 array elements.
The top two bits in all remaining array elements should be 0.
The next five bits in all array elements contain array index. Only one bit is
used and bit position in this five-bits range defines array index. This means
there can be at most five array elements in the future.
To define new right the CAPRIGHT() macro must be used. The macro takes two
arguments - an array index and a bit to set, eg.
#define CAP_PDKILL CAPRIGHT(1, 0x0000000000000800ULL)
We still support aliases that combine few rights, but the rights have to belong
to the same array element, eg:
#define CAP_LOOKUP CAPRIGHT(0, 0x0000000000000400ULL)
#define CAP_FCHMOD CAPRIGHT(0, 0x0000000000002000ULL)
#define CAP_FCHMODAT (CAP_FCHMOD | CAP_LOOKUP)
There is new API to manage the new cap_rights_t structure:
cap_rights_t *cap_rights_init(cap_rights_t *rights, ...);
void cap_rights_set(cap_rights_t *rights, ...);
void cap_rights_clear(cap_rights_t *rights, ...);
bool cap_rights_is_set(const cap_rights_t *rights, ...);
bool cap_rights_is_valid(const cap_rights_t *rights);
void cap_rights_merge(cap_rights_t *dst, const cap_rights_t *src);
void cap_rights_remove(cap_rights_t *dst, const cap_rights_t *src);
bool cap_rights_contains(const cap_rights_t *big, const cap_rights_t *little);
Capability rights to the cap_rights_init(), cap_rights_set(),
cap_rights_clear() and cap_rights_is_set() functions are provided by
separating them with commas, eg:
cap_rights_t rights;
cap_rights_init(&rights, CAP_READ, CAP_WRITE, CAP_FSTAT);
There is no need to terminate the list of rights, as those functions are
actually macros that take care of the termination, eg:
#define cap_rights_set(rights, ...) \
__cap_rights_set((rights), __VA_ARGS__, 0ULL)
void __cap_rights_set(cap_rights_t *rights, ...);
Thanks to using one bit as an array index we can assert in those functions that
there are no two rights belonging to different array elements provided
together. For example this is illegal and will be detected, because CAP_LOOKUP
belongs to element 0 and CAP_PDKILL to element 1:
cap_rights_init(&rights, CAP_LOOKUP | CAP_PDKILL);
Providing several rights that belongs to the same array's element this way is
correct, but is not advised. It should only be used for aliases definition.
This commit also breaks compatibility with some existing Capsicum system calls,
but I see no other way to do that. This should be fine as Capsicum is still
experimental and this change is not going to 9.x.
Sponsored by: The FreeBSD Foundation
2013-09-05 00:09:56 +00:00
|
|
|
error = fget(td, nfscbdarg.sock,
|
|
|
|
cap_rights_init(&rights, CAP_SOCK_CLIENT), &fp);
|
|
|
|
if (error)
|
2009-05-04 15:23:58 +00:00
|
|
|
return (error);
|
|
|
|
if (fp->f_type != DTYPE_SOCKET) {
|
|
|
|
fdrop(fp, td);
|
|
|
|
return (EPERM);
|
|
|
|
}
|
|
|
|
error = nfscbd_addsock(fp);
|
|
|
|
fdrop(fp, td);
|
|
|
|
if (!error && nfscl_enablecallb == 0) {
|
|
|
|
nfsv4_cbport = nfscbdarg.port;
|
|
|
|
nfscl_enablecallb = 1;
|
|
|
|
}
|
|
|
|
} else if (uap->flag & NFSSVC_NFSCBD) {
|
|
|
|
if (uap->argp == NULL)
|
|
|
|
return (EINVAL);
|
|
|
|
error = copyin(uap->argp, (caddr_t)&nfscbdarg2,
|
|
|
|
sizeof(nfscbdarg2));
|
|
|
|
if (error)
|
|
|
|
return (error);
|
|
|
|
error = nfscbd_nfsd(td, &nfscbdarg2);
|
2012-12-02 01:16:04 +00:00
|
|
|
} else if (uap->flag & NFSSVC_DUMPMNTOPTS) {
|
|
|
|
error = copyin(uap->argp, &dumpmntopts, sizeof(dumpmntopts));
|
|
|
|
if (error == 0 && (dumpmntopts.ndmnt_blen < 256 ||
|
|
|
|
dumpmntopts.ndmnt_blen > 1024))
|
|
|
|
error = EINVAL;
|
|
|
|
if (error == 0)
|
|
|
|
error = nfsrv_lookupfilename(&nd,
|
|
|
|
dumpmntopts.ndmnt_fname, td);
|
|
|
|
if (error == 0 && strcmp(nd.ni_vp->v_mount->mnt_vfc->vfc_name,
|
|
|
|
"nfs") != 0) {
|
|
|
|
vput(nd.ni_vp);
|
|
|
|
error = EINVAL;
|
|
|
|
}
|
|
|
|
if (error == 0) {
|
|
|
|
buf = malloc(dumpmntopts.ndmnt_blen, M_TEMP, M_WAITOK);
|
|
|
|
nfscl_retopts(VFSTONFS(nd.ni_vp->v_mount), buf,
|
|
|
|
dumpmntopts.ndmnt_blen);
|
|
|
|
vput(nd.ni_vp);
|
|
|
|
error = copyout(buf, dumpmntopts.ndmnt_buf,
|
|
|
|
dumpmntopts.ndmnt_blen);
|
|
|
|
free(buf, M_TEMP);
|
|
|
|
}
|
2017-07-29 19:52:47 +00:00
|
|
|
} else if (uap->flag & NFSSVC_FORCEDISM) {
|
|
|
|
buf = malloc(MNAMELEN + 1, M_TEMP, M_WAITOK);
|
|
|
|
error = copyinstr(uap->argp, buf, MNAMELEN + 1, NULL);
|
|
|
|
if (error == 0) {
|
|
|
|
nmp = NULL;
|
|
|
|
mtx_lock(&mountlist_mtx);
|
|
|
|
TAILQ_FOREACH(mp, &mountlist, mnt_list) {
|
|
|
|
if (strcmp(mp->mnt_stat.f_mntonname, buf) ==
|
|
|
|
0 && strcmp(mp->mnt_stat.f_fstypename,
|
|
|
|
"nfs") == 0 && mp->mnt_data != NULL) {
|
|
|
|
nmp = VFSTONFS(mp);
|
Merge the pNFS server code from projects/pnfs-planb-server into head.
This code merge adds a pNFS service to the NFSv4.1 server. Although it is
a large commit it should not affect behaviour for a non-pNFS NFS server.
Some documentation on how this works can be found at:
http://people.freebsd.org/~rmacklem/pnfs-planb-setup.txt
and will hopefully be turned into a proper document soon.
This is a merge of the kernel code. Userland and man page changes will
come soon, once the dust settles on this merge.
It has passed a "make universe", so I hope it will not cause build problems.
It also adds NFSv4.1 server support for the "current stateid".
Here is a brief overview of the pNFS service:
A pNFS service separates the Read/Write oeprations from all the other NFSv4.1
Metadata operations. It is hoped that this separation allows a pNFS service
to be configured that exceeds the limits of a single NFS server for either
storage capacity and/or I/O bandwidth.
It is possible to configure mirroring within the data servers (DSs) so that
the data storage file for an MDS file will be mirrored on two or more of
the DSs.
When this is used, failure of a DS will not stop the pNFS service and a
failed DS can be recovered once repaired while the pNFS service continues
to operate. Although two way mirroring would be the norm, it is possible
to set a mirroring level of up to four or the number of DSs, whichever is
less.
The Metadata server will always be a single point of failure,
just as a single NFS server is.
A Plan B pNFS service consists of a single MetaData Server (MDS) and K
Data Servers (DS), all of which are recent FreeBSD systems.
Clients will mount the MDS as they would a single NFS server.
When files are created, the MDS creates a file tree identical to what a
single NFS server creates, except that all the regular (VREG) files will
be empty. As such, if you look at the exported tree on the MDS directly
on the MDS server (not via an NFS mount), the files will all be of size 0.
Each of these files will also have two extended attributes in the system
attribute name space:
pnfsd.dsfile - This extended attrbute stores the information that
the MDS needs to find the data storage file(s) on DS(s) for this file.
pnfsd.dsattr - This extended attribute stores the Size, AccessTime, ModifyTime
and Change attributes for the file, so that the MDS doesn't need to
acquire the attributes from the DS for every Getattr operation.
For each regular (VREG) file, the MDS creates a data storage file on one
(or more if mirroring is enabled) of the DSs in one of the "dsNN"
subdirectories. The name of this file is the file handle
of the file on the MDS in hexadecimal so that the name is unique.
The DSs use subdirectories named "ds0" to "dsN" so that no one directory
gets too large. The value of "N" is set via the sysctl vfs.nfsd.dsdirsize
on the MDS, with the default being 20.
For production servers that will store a lot of files, this value should
probably be much larger.
It can be increased when the "nfsd" daemon is not running on the MDS,
once the "dsK" directories are created.
For pNFS aware NFSv4.1 clients, the FreeBSD server will return two pieces
of information to the client that allows it to do I/O directly to the DS.
DeviceInfo - This is relatively static information that defines what a DS
is. The critical bits of information returned by the FreeBSD
server is the IP address of the DS and, for the Flexible
File layout, that NFSv4.1 is to be used and that it is
"tightly coupled".
There is a "deviceid" which identifies the DeviceInfo.
Layout - This is per file and can be recalled by the server when it
is no longer valid. For the FreeBSD server, there is support
for two types of layout, call File and Flexible File layout.
Both allow the client to do I/O on the DS via NFSv4.1 I/O
operations. The Flexible File layout is a more recent variant
that allows specification of mirrors, where the client is
expected to do writes to all mirrors to maintain them in a
consistent state. The Flexible File layout also allows the
client to report I/O errors for a DS back to the MDS.
The Flexible File layout supports two variants referred to as
"tightly coupled" vs "loosely coupled". The FreeBSD server always
uses the "tightly coupled" variant where the client uses the
same credentials to do I/O on the DS as it would on the MDS.
For the "loosely coupled" variant, the layout specifies a
synthetic user/group that the client uses to do I/O on the DS.
The FreeBSD server does not do striping and always returns
layouts for the entire file. The critical information in a layout
is Read vs Read/Writea and DeviceID(s) that identify which
DS(s) the data is stored on.
At this time, the MDS generates File Layout layouts to NFSv4.1 clients
that know how to do pNFS for the non-mirrored DS case unless the sysctl
vfs.nfsd.default_flexfile is set non-zero, in which case Flexible File
layouts are generated.
The mirrored DS configuration always generates Flexible File layouts.
For NFS clients that do not support NFSv4.1 pNFS, all I/O operations
are done against the MDS which acts as a proxy for the appropriate DS(s).
When the MDS receives an I/O RPC, it will do the RPC on the DS as a proxy.
If the DS is on the same machine, the MDS/DS will do the RPC on the DS as
a proxy and so on, until the machine runs out of some resource, such as
session slots or mbufs.
As such, DSs must be separate systems from the MDS.
Tested by: james.rose@framestore.com
Relnotes: yes
2018-06-12 19:36:32 +00:00
|
|
|
NFSDDSLOCK();
|
|
|
|
if (nfsv4_findmirror(nmp) != NULL) {
|
|
|
|
NFSDDSUNLOCK();
|
|
|
|
error = ENXIO;
|
|
|
|
nmp = NULL;
|
|
|
|
break;
|
|
|
|
}
|
2017-07-29 19:52:47 +00:00
|
|
|
mtx_lock(&nmp->nm_mtx);
|
|
|
|
if ((nmp->nm_privflag &
|
|
|
|
NFSMNTP_FORCEDISM) == 0) {
|
|
|
|
nmp->nm_privflag |=
|
|
|
|
(NFSMNTP_FORCEDISM |
|
|
|
|
NFSMNTP_CANCELRPCS);
|
|
|
|
mtx_unlock(&nmp->nm_mtx);
|
|
|
|
} else {
|
|
|
|
mtx_unlock(&nmp->nm_mtx);
|
2018-04-20 11:38:29 +00:00
|
|
|
nmp = NULL;
|
2017-07-29 19:52:47 +00:00
|
|
|
}
|
Merge the pNFS server code from projects/pnfs-planb-server into head.
This code merge adds a pNFS service to the NFSv4.1 server. Although it is
a large commit it should not affect behaviour for a non-pNFS NFS server.
Some documentation on how this works can be found at:
http://people.freebsd.org/~rmacklem/pnfs-planb-setup.txt
and will hopefully be turned into a proper document soon.
This is a merge of the kernel code. Userland and man page changes will
come soon, once the dust settles on this merge.
It has passed a "make universe", so I hope it will not cause build problems.
It also adds NFSv4.1 server support for the "current stateid".
Here is a brief overview of the pNFS service:
A pNFS service separates the Read/Write oeprations from all the other NFSv4.1
Metadata operations. It is hoped that this separation allows a pNFS service
to be configured that exceeds the limits of a single NFS server for either
storage capacity and/or I/O bandwidth.
It is possible to configure mirroring within the data servers (DSs) so that
the data storage file for an MDS file will be mirrored on two or more of
the DSs.
When this is used, failure of a DS will not stop the pNFS service and a
failed DS can be recovered once repaired while the pNFS service continues
to operate. Although two way mirroring would be the norm, it is possible
to set a mirroring level of up to four or the number of DSs, whichever is
less.
The Metadata server will always be a single point of failure,
just as a single NFS server is.
A Plan B pNFS service consists of a single MetaData Server (MDS) and K
Data Servers (DS), all of which are recent FreeBSD systems.
Clients will mount the MDS as they would a single NFS server.
When files are created, the MDS creates a file tree identical to what a
single NFS server creates, except that all the regular (VREG) files will
be empty. As such, if you look at the exported tree on the MDS directly
on the MDS server (not via an NFS mount), the files will all be of size 0.
Each of these files will also have two extended attributes in the system
attribute name space:
pnfsd.dsfile - This extended attrbute stores the information that
the MDS needs to find the data storage file(s) on DS(s) for this file.
pnfsd.dsattr - This extended attribute stores the Size, AccessTime, ModifyTime
and Change attributes for the file, so that the MDS doesn't need to
acquire the attributes from the DS for every Getattr operation.
For each regular (VREG) file, the MDS creates a data storage file on one
(or more if mirroring is enabled) of the DSs in one of the "dsNN"
subdirectories. The name of this file is the file handle
of the file on the MDS in hexadecimal so that the name is unique.
The DSs use subdirectories named "ds0" to "dsN" so that no one directory
gets too large. The value of "N" is set via the sysctl vfs.nfsd.dsdirsize
on the MDS, with the default being 20.
For production servers that will store a lot of files, this value should
probably be much larger.
It can be increased when the "nfsd" daemon is not running on the MDS,
once the "dsK" directories are created.
For pNFS aware NFSv4.1 clients, the FreeBSD server will return two pieces
of information to the client that allows it to do I/O directly to the DS.
DeviceInfo - This is relatively static information that defines what a DS
is. The critical bits of information returned by the FreeBSD
server is the IP address of the DS and, for the Flexible
File layout, that NFSv4.1 is to be used and that it is
"tightly coupled".
There is a "deviceid" which identifies the DeviceInfo.
Layout - This is per file and can be recalled by the server when it
is no longer valid. For the FreeBSD server, there is support
for two types of layout, call File and Flexible File layout.
Both allow the client to do I/O on the DS via NFSv4.1 I/O
operations. The Flexible File layout is a more recent variant
that allows specification of mirrors, where the client is
expected to do writes to all mirrors to maintain them in a
consistent state. The Flexible File layout also allows the
client to report I/O errors for a DS back to the MDS.
The Flexible File layout supports two variants referred to as
"tightly coupled" vs "loosely coupled". The FreeBSD server always
uses the "tightly coupled" variant where the client uses the
same credentials to do I/O on the DS as it would on the MDS.
For the "loosely coupled" variant, the layout specifies a
synthetic user/group that the client uses to do I/O on the DS.
The FreeBSD server does not do striping and always returns
layouts for the entire file. The critical information in a layout
is Read vs Read/Writea and DeviceID(s) that identify which
DS(s) the data is stored on.
At this time, the MDS generates File Layout layouts to NFSv4.1 clients
that know how to do pNFS for the non-mirrored DS case unless the sysctl
vfs.nfsd.default_flexfile is set non-zero, in which case Flexible File
layouts are generated.
The mirrored DS configuration always generates Flexible File layouts.
For NFS clients that do not support NFSv4.1 pNFS, all I/O operations
are done against the MDS which acts as a proxy for the appropriate DS(s).
When the MDS receives an I/O RPC, it will do the RPC on the DS as a proxy.
If the DS is on the same machine, the MDS/DS will do the RPC on the DS as
a proxy and so on, until the machine runs out of some resource, such as
session slots or mbufs.
As such, DSs must be separate systems from the MDS.
Tested by: james.rose@framestore.com
Relnotes: yes
2018-06-12 19:36:32 +00:00
|
|
|
NFSDDSUNLOCK();
|
2017-07-29 19:52:47 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
mtx_unlock(&mountlist_mtx);
|
|
|
|
|
|
|
|
if (nmp != NULL) {
|
|
|
|
/*
|
|
|
|
* Call newnfs_nmcancelreqs() to cause
|
|
|
|
* any RPCs in progress on the mount point to
|
|
|
|
* fail.
|
|
|
|
* This will cause any process waiting for an
|
|
|
|
* RPC to complete while holding a vnode lock
|
|
|
|
* on the mounted-on vnode (such as "df" or
|
|
|
|
* a non-forced "umount") to fail.
|
|
|
|
* This will unlock the mounted-on vnode so
|
|
|
|
* a forced dismount can succeed.
|
|
|
|
* Then clear NFSMNTP_CANCELRPCS and wakeup(),
|
|
|
|
* so that nfs_unmount() can complete.
|
|
|
|
*/
|
|
|
|
newnfs_nmcancelreqs(nmp);
|
|
|
|
mtx_lock(&nmp->nm_mtx);
|
|
|
|
nmp->nm_privflag &= ~NFSMNTP_CANCELRPCS;
|
|
|
|
wakeup(nmp);
|
|
|
|
mtx_unlock(&nmp->nm_mtx);
|
Merge the pNFS server code from projects/pnfs-planb-server into head.
This code merge adds a pNFS service to the NFSv4.1 server. Although it is
a large commit it should not affect behaviour for a non-pNFS NFS server.
Some documentation on how this works can be found at:
http://people.freebsd.org/~rmacklem/pnfs-planb-setup.txt
and will hopefully be turned into a proper document soon.
This is a merge of the kernel code. Userland and man page changes will
come soon, once the dust settles on this merge.
It has passed a "make universe", so I hope it will not cause build problems.
It also adds NFSv4.1 server support for the "current stateid".
Here is a brief overview of the pNFS service:
A pNFS service separates the Read/Write oeprations from all the other NFSv4.1
Metadata operations. It is hoped that this separation allows a pNFS service
to be configured that exceeds the limits of a single NFS server for either
storage capacity and/or I/O bandwidth.
It is possible to configure mirroring within the data servers (DSs) so that
the data storage file for an MDS file will be mirrored on two or more of
the DSs.
When this is used, failure of a DS will not stop the pNFS service and a
failed DS can be recovered once repaired while the pNFS service continues
to operate. Although two way mirroring would be the norm, it is possible
to set a mirroring level of up to four or the number of DSs, whichever is
less.
The Metadata server will always be a single point of failure,
just as a single NFS server is.
A Plan B pNFS service consists of a single MetaData Server (MDS) and K
Data Servers (DS), all of which are recent FreeBSD systems.
Clients will mount the MDS as they would a single NFS server.
When files are created, the MDS creates a file tree identical to what a
single NFS server creates, except that all the regular (VREG) files will
be empty. As such, if you look at the exported tree on the MDS directly
on the MDS server (not via an NFS mount), the files will all be of size 0.
Each of these files will also have two extended attributes in the system
attribute name space:
pnfsd.dsfile - This extended attrbute stores the information that
the MDS needs to find the data storage file(s) on DS(s) for this file.
pnfsd.dsattr - This extended attribute stores the Size, AccessTime, ModifyTime
and Change attributes for the file, so that the MDS doesn't need to
acquire the attributes from the DS for every Getattr operation.
For each regular (VREG) file, the MDS creates a data storage file on one
(or more if mirroring is enabled) of the DSs in one of the "dsNN"
subdirectories. The name of this file is the file handle
of the file on the MDS in hexadecimal so that the name is unique.
The DSs use subdirectories named "ds0" to "dsN" so that no one directory
gets too large. The value of "N" is set via the sysctl vfs.nfsd.dsdirsize
on the MDS, with the default being 20.
For production servers that will store a lot of files, this value should
probably be much larger.
It can be increased when the "nfsd" daemon is not running on the MDS,
once the "dsK" directories are created.
For pNFS aware NFSv4.1 clients, the FreeBSD server will return two pieces
of information to the client that allows it to do I/O directly to the DS.
DeviceInfo - This is relatively static information that defines what a DS
is. The critical bits of information returned by the FreeBSD
server is the IP address of the DS and, for the Flexible
File layout, that NFSv4.1 is to be used and that it is
"tightly coupled".
There is a "deviceid" which identifies the DeviceInfo.
Layout - This is per file and can be recalled by the server when it
is no longer valid. For the FreeBSD server, there is support
for two types of layout, call File and Flexible File layout.
Both allow the client to do I/O on the DS via NFSv4.1 I/O
operations. The Flexible File layout is a more recent variant
that allows specification of mirrors, where the client is
expected to do writes to all mirrors to maintain them in a
consistent state. The Flexible File layout also allows the
client to report I/O errors for a DS back to the MDS.
The Flexible File layout supports two variants referred to as
"tightly coupled" vs "loosely coupled". The FreeBSD server always
uses the "tightly coupled" variant where the client uses the
same credentials to do I/O on the DS as it would on the MDS.
For the "loosely coupled" variant, the layout specifies a
synthetic user/group that the client uses to do I/O on the DS.
The FreeBSD server does not do striping and always returns
layouts for the entire file. The critical information in a layout
is Read vs Read/Writea and DeviceID(s) that identify which
DS(s) the data is stored on.
At this time, the MDS generates File Layout layouts to NFSv4.1 clients
that know how to do pNFS for the non-mirrored DS case unless the sysctl
vfs.nfsd.default_flexfile is set non-zero, in which case Flexible File
layouts are generated.
The mirrored DS configuration always generates Flexible File layouts.
For NFS clients that do not support NFSv4.1 pNFS, all I/O operations
are done against the MDS which acts as a proxy for the appropriate DS(s).
When the MDS receives an I/O RPC, it will do the RPC on the DS as a proxy.
If the DS is on the same machine, the MDS/DS will do the RPC on the DS as
a proxy and so on, until the machine runs out of some resource, such as
session slots or mbufs.
As such, DSs must be separate systems from the MDS.
Tested by: james.rose@framestore.com
Relnotes: yes
2018-06-12 19:36:32 +00:00
|
|
|
} else if (error == 0)
|
2017-07-29 19:52:47 +00:00
|
|
|
error = EINVAL;
|
|
|
|
}
|
|
|
|
free(buf, M_TEMP);
|
2009-05-04 15:23:58 +00:00
|
|
|
} else {
|
|
|
|
error = EINVAL;
|
|
|
|
}
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
extern int (*nfsd_call_nfscl)(struct thread *, struct nfssvc_args *);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Called once to initialize data structures...
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
nfscl_modevent(module_t mod, int type, void *data)
|
|
|
|
{
|
|
|
|
int error = 0;
|
|
|
|
static int loaded = 0;
|
|
|
|
|
|
|
|
switch (type) {
|
|
|
|
case MOD_LOAD:
|
|
|
|
if (loaded)
|
|
|
|
return (0);
|
|
|
|
newnfs_portinit();
|
|
|
|
mtx_init(&ncl_iod_mutex, "ncl_iod_mutex", NULL, MTX_DEF);
|
|
|
|
nfscl_init();
|
|
|
|
NFSD_LOCK();
|
|
|
|
nfsrvd_cbinit(0);
|
|
|
|
NFSD_UNLOCK();
|
|
|
|
ncl_call_invalcaches = ncl_invalcaches;
|
|
|
|
nfsd_call_nfscl = nfssvc_nfscl;
|
|
|
|
loaded = 1;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case MOD_UNLOAD:
|
|
|
|
if (nfs_numnfscbd != 0) {
|
|
|
|
error = EBUSY;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2009-10-20 15:06:18 +00:00
|
|
|
/*
|
|
|
|
* XXX: Unloading of nfscl module is unsupported.
|
|
|
|
*/
|
|
|
|
#if 0
|
2009-05-04 15:23:58 +00:00
|
|
|
ncl_call_invalcaches = NULL;
|
|
|
|
nfsd_call_nfscl = NULL;
|
Allocate pager bufs from UMA instead of 80-ish mutex protected linked list.
o In vm_pager_bufferinit() create pbuf_zone and start accounting on how many
pbufs are we going to have set.
In various subsystems that are going to utilize pbufs create private zones
via call to pbuf_zsecond_create(). The latter calls uma_zsecond_create(),
and sets a limit on created zone. After startup preallocate pbufs according
to requirements of all pbuf zones.
Subsystems that used to have a private limit with old allocator now have
private pbuf zones: md(4), fusefs, NFS client, smbfs, VFS cluster, FFS,
swap, vnode pager.
The following subsystems use shared pbuf zone: cam(4), nvme(4), physio(9),
aio(4). They should have their private limits, but changing that is out of
scope of this commit.
o Fetch tunable value of kern.nswbuf from init_param2() and while here move
NSWBUF_MIN to opt_param.h and eliminate opt_swap.h, that was holding only
this option.
Default values aren't touched by this commit, but they probably should be
reviewed wrt to modern hardware.
This change removes a tight bottleneck from sendfile(2) operation, that
uses pbufs in vnode pager. Other pagers also would benefit from faster
allocation.
Together with: gallatin
Tested by: pho
2019-01-15 01:02:16 +00:00
|
|
|
uma_zdestroy(ncl_pbuf_zone);
|
2009-05-04 15:23:58 +00:00
|
|
|
/* and get rid of the mutexes */
|
|
|
|
mtx_destroy(&ncl_iod_mutex);
|
|
|
|
loaded = 0;
|
|
|
|
break;
|
2009-10-20 15:06:18 +00:00
|
|
|
#else
|
|
|
|
/* FALLTHROUGH */
|
|
|
|
#endif
|
2009-05-04 15:23:58 +00:00
|
|
|
default:
|
|
|
|
error = EOPNOTSUPP;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
static moduledata_t nfscl_mod = {
|
|
|
|
"nfscl",
|
|
|
|
nfscl_modevent,
|
|
|
|
NULL,
|
|
|
|
};
|
2009-10-20 15:01:46 +00:00
|
|
|
DECLARE_MODULE(nfscl, nfscl_mod, SI_SUB_VFS, SI_ORDER_FIRST);
|
2009-05-04 15:23:58 +00:00
|
|
|
|
|
|
|
/* So that loader and kldload(2) can find us, wherever we are.. */
|
|
|
|
MODULE_VERSION(nfscl, 1);
|
2009-05-22 20:55:29 +00:00
|
|
|
MODULE_DEPEND(nfscl, nfscommon, 1, 1, 1);
|
2010-06-15 00:25:04 +00:00
|
|
|
MODULE_DEPEND(nfscl, krpc, 1, 1, 1);
|
|
|
|
MODULE_DEPEND(nfscl, nfssvc, 1, 1, 1);
|