2008-03-26 15:23:12 +00:00
|
|
|
/* $NetBSD: svc_vc.c,v 1.7 2000/08/03 00:01:53 fvdl Exp $ */
|
|
|
|
|
2013-11-25 19:04:36 +00:00
|
|
|
/*-
|
|
|
|
* Copyright (c) 2009, Sun Microsystems, Inc.
|
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions are met:
|
|
|
|
* - Redistributions of source code must retain the above copyright notice,
|
|
|
|
* this list of conditions and the following disclaimer.
|
|
|
|
* - Redistributions in binary form must reproduce the above copyright notice,
|
|
|
|
* this list of conditions and the following disclaimer in the documentation
|
|
|
|
* and/or other materials provided with the distribution.
|
|
|
|
* - Neither the name of Sun Microsystems, Inc. nor the names of its
|
|
|
|
* contributors may be used to endorse or promote products derived
|
|
|
|
* from this software without specific prior written permission.
|
2008-03-26 15:23:12 +00:00
|
|
|
*
|
2013-11-25 19:04:36 +00:00
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
|
|
|
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
|
|
|
|
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
|
|
|
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
|
|
|
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
|
|
|
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
|
|
|
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
|
|
|
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
|
|
|
* POSSIBILITY OF SUCH DAMAGE.
|
2008-03-26 15:23:12 +00:00
|
|
|
*/
|
|
|
|
|
|
|
|
#if defined(LIBC_SCCS) && !defined(lint)
|
|
|
|
static char *sccsid2 = "@(#)svc_tcp.c 1.21 87/08/11 Copyr 1984 Sun Micro";
|
|
|
|
static char *sccsid = "@(#)svc_tcp.c 2.2 88/08/01 4.0 RPCSRC";
|
|
|
|
#endif
|
|
|
|
#include <sys/cdefs.h>
|
|
|
|
__FBSDID("$FreeBSD$");
|
|
|
|
|
|
|
|
/*
|
|
|
|
* svc_vc.c, Server side for Connection Oriented based RPC.
|
|
|
|
*
|
|
|
|
* Actually implements two flavors of transporter -
|
|
|
|
* a tcp rendezvouser (a listner and connection establisher)
|
|
|
|
* and a record/tcp stream.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <sys/param.h>
|
|
|
|
#include <sys/lock.h>
|
|
|
|
#include <sys/kernel.h>
|
|
|
|
#include <sys/malloc.h>
|
|
|
|
#include <sys/mbuf.h>
|
|
|
|
#include <sys/mutex.h>
|
2009-06-05 14:29:49 +00:00
|
|
|
#include <sys/proc.h>
|
2008-03-26 15:23:12 +00:00
|
|
|
#include <sys/protosw.h>
|
|
|
|
#include <sys/queue.h>
|
|
|
|
#include <sys/socket.h>
|
|
|
|
#include <sys/socketvar.h>
|
Implement support for RPCSEC_GSS authentication to both the NFS client
and server. This replaces the RPC implementation of the NFS client and
server with the newer RPC implementation originally developed
(actually ported from the userland sunrpc code) to support the NFS
Lock Manager. I have tested this code extensively and I believe it is
stable and that performance is at least equal to the legacy RPC
implementation.
The NFS code currently contains support for both the new RPC
implementation and the older legacy implementation inherited from the
original NFS codebase. The default is to use the new implementation -
add the NFS_LEGACYRPC option to fall back to the old code. When I
merge this support back to RELENG_7, I will probably change this so
that users have to 'opt in' to get the new code.
To use RPCSEC_GSS on either client or server, you must build a kernel
which includes the KGSSAPI option and the crypto device. On the
userland side, you must build at least a new libc, mountd, mount_nfs
and gssd. You must install new versions of /etc/rc.d/gssd and
/etc/rc.d/nfsd and add 'gssd_enable=YES' to /etc/rc.conf.
As long as gssd is running, you should be able to mount an NFS
filesystem from a server that requires RPCSEC_GSS authentication. The
mount itself can happen without any kerberos credentials but all
access to the filesystem will be denied unless the accessing user has
a valid ticket file in the standard place (/tmp/krb5cc_<uid>). There
is currently no support for situations where the ticket file is in a
different place, such as when the user logged in via SSH and has
delegated credentials from that login. This restriction is also
present in Solaris and Linux. In theory, we could improve this in
future, possibly using Brooks Davis' implementation of variant
symlinks.
Supporting RPCSEC_GSS on a server is nearly as simple. You must create
service creds for the server in the form 'nfs/<fqdn>@<REALM>' and
install them in /etc/krb5.keytab. The standard heimdal utility ktutil
makes this fairly easy. After the service creds have been created, you
can add a '-sec=krb5' option to /etc/exports and restart both mountd
and nfsd.
The only other difference an administrator should notice is that nfsd
doesn't fork to create service threads any more. In normal operation,
there will be two nfsd processes, one in userland waiting for TCP
connections and one in the kernel handling requests. The latter
process will create as many kthreads as required - these should be
visible via 'top -H'. The code has some support for varying the number
of service threads according to load but initially at least, nfsd uses
a fixed number of threads according to the value supplied to its '-n'
option.
Sponsored by: Isilon Systems
MFC after: 1 month
2008-11-03 10:38:00 +00:00
|
|
|
#include <sys/sx.h>
|
2008-03-26 15:23:12 +00:00
|
|
|
#include <sys/systm.h>
|
|
|
|
#include <sys/uio.h>
|
2009-08-24 10:09:30 +00:00
|
|
|
|
|
|
|
#include <net/vnet.h>
|
|
|
|
|
2008-03-26 15:23:12 +00:00
|
|
|
#include <netinet/tcp.h>
|
|
|
|
|
|
|
|
#include <rpc/rpc.h>
|
|
|
|
|
2012-12-08 00:29:16 +00:00
|
|
|
#include <rpc/krpc.h>
|
2008-03-28 09:50:32 +00:00
|
|
|
#include <rpc/rpc_com.h>
|
2008-03-26 15:23:12 +00:00
|
|
|
|
2009-06-05 14:29:49 +00:00
|
|
|
#include <security/mac/mac_framework.h>
|
|
|
|
|
Implement support for RPCSEC_GSS authentication to both the NFS client
and server. This replaces the RPC implementation of the NFS client and
server with the newer RPC implementation originally developed
(actually ported from the userland sunrpc code) to support the NFS
Lock Manager. I have tested this code extensively and I believe it is
stable and that performance is at least equal to the legacy RPC
implementation.
The NFS code currently contains support for both the new RPC
implementation and the older legacy implementation inherited from the
original NFS codebase. The default is to use the new implementation -
add the NFS_LEGACYRPC option to fall back to the old code. When I
merge this support back to RELENG_7, I will probably change this so
that users have to 'opt in' to get the new code.
To use RPCSEC_GSS on either client or server, you must build a kernel
which includes the KGSSAPI option and the crypto device. On the
userland side, you must build at least a new libc, mountd, mount_nfs
and gssd. You must install new versions of /etc/rc.d/gssd and
/etc/rc.d/nfsd and add 'gssd_enable=YES' to /etc/rc.conf.
As long as gssd is running, you should be able to mount an NFS
filesystem from a server that requires RPCSEC_GSS authentication. The
mount itself can happen without any kerberos credentials but all
access to the filesystem will be denied unless the accessing user has
a valid ticket file in the standard place (/tmp/krb5cc_<uid>). There
is currently no support for situations where the ticket file is in a
different place, such as when the user logged in via SSH and has
delegated credentials from that login. This restriction is also
present in Solaris and Linux. In theory, we could improve this in
future, possibly using Brooks Davis' implementation of variant
symlinks.
Supporting RPCSEC_GSS on a server is nearly as simple. You must create
service creds for the server in the form 'nfs/<fqdn>@<REALM>' and
install them in /etc/krb5.keytab. The standard heimdal utility ktutil
makes this fairly easy. After the service creds have been created, you
can add a '-sec=krb5' option to /etc/exports and restart both mountd
and nfsd.
The only other difference an administrator should notice is that nfsd
doesn't fork to create service threads any more. In normal operation,
there will be two nfsd processes, one in userland waiting for TCP
connections and one in the kernel handling requests. The latter
process will create as many kthreads as required - these should be
visible via 'top -H'. The code has some support for varying the number
of service threads according to load but initially at least, nfsd uses
a fixed number of threads according to the value supplied to its '-n'
option.
Sponsored by: Isilon Systems
MFC after: 1 month
2008-11-03 10:38:00 +00:00
|
|
|
static bool_t svc_vc_rendezvous_recv(SVCXPRT *, struct rpc_msg *,
|
|
|
|
struct sockaddr **, struct mbuf **);
|
2008-03-26 15:23:12 +00:00
|
|
|
static enum xprt_stat svc_vc_rendezvous_stat(SVCXPRT *);
|
|
|
|
static void svc_vc_rendezvous_destroy(SVCXPRT *);
|
|
|
|
static bool_t svc_vc_null(void);
|
|
|
|
static void svc_vc_destroy(SVCXPRT *);
|
|
|
|
static enum xprt_stat svc_vc_stat(SVCXPRT *);
|
2014-01-03 15:09:59 +00:00
|
|
|
static bool_t svc_vc_ack(SVCXPRT *, uint32_t *);
|
Implement support for RPCSEC_GSS authentication to both the NFS client
and server. This replaces the RPC implementation of the NFS client and
server with the newer RPC implementation originally developed
(actually ported from the userland sunrpc code) to support the NFS
Lock Manager. I have tested this code extensively and I believe it is
stable and that performance is at least equal to the legacy RPC
implementation.
The NFS code currently contains support for both the new RPC
implementation and the older legacy implementation inherited from the
original NFS codebase. The default is to use the new implementation -
add the NFS_LEGACYRPC option to fall back to the old code. When I
merge this support back to RELENG_7, I will probably change this so
that users have to 'opt in' to get the new code.
To use RPCSEC_GSS on either client or server, you must build a kernel
which includes the KGSSAPI option and the crypto device. On the
userland side, you must build at least a new libc, mountd, mount_nfs
and gssd. You must install new versions of /etc/rc.d/gssd and
/etc/rc.d/nfsd and add 'gssd_enable=YES' to /etc/rc.conf.
As long as gssd is running, you should be able to mount an NFS
filesystem from a server that requires RPCSEC_GSS authentication. The
mount itself can happen without any kerberos credentials but all
access to the filesystem will be denied unless the accessing user has
a valid ticket file in the standard place (/tmp/krb5cc_<uid>). There
is currently no support for situations where the ticket file is in a
different place, such as when the user logged in via SSH and has
delegated credentials from that login. This restriction is also
present in Solaris and Linux. In theory, we could improve this in
future, possibly using Brooks Davis' implementation of variant
symlinks.
Supporting RPCSEC_GSS on a server is nearly as simple. You must create
service creds for the server in the form 'nfs/<fqdn>@<REALM>' and
install them in /etc/krb5.keytab. The standard heimdal utility ktutil
makes this fairly easy. After the service creds have been created, you
can add a '-sec=krb5' option to /etc/exports and restart both mountd
and nfsd.
The only other difference an administrator should notice is that nfsd
doesn't fork to create service threads any more. In normal operation,
there will be two nfsd processes, one in userland waiting for TCP
connections and one in the kernel handling requests. The latter
process will create as many kthreads as required - these should be
visible via 'top -H'. The code has some support for varying the number
of service threads according to load but initially at least, nfsd uses
a fixed number of threads according to the value supplied to its '-n'
option.
Sponsored by: Isilon Systems
MFC after: 1 month
2008-11-03 10:38:00 +00:00
|
|
|
static bool_t svc_vc_recv(SVCXPRT *, struct rpc_msg *,
|
|
|
|
struct sockaddr **, struct mbuf **);
|
|
|
|
static bool_t svc_vc_reply(SVCXPRT *, struct rpc_msg *,
|
2014-01-03 15:09:59 +00:00
|
|
|
struct sockaddr *, struct mbuf *, uint32_t *seq);
|
2008-03-26 15:23:12 +00:00
|
|
|
static bool_t svc_vc_control(SVCXPRT *xprt, const u_int rq, void *in);
|
|
|
|
static bool_t svc_vc_rendezvous_control (SVCXPRT *xprt, const u_int rq,
|
|
|
|
void *in);
|
2012-12-08 00:29:16 +00:00
|
|
|
static void svc_vc_backchannel_destroy(SVCXPRT *);
|
|
|
|
static enum xprt_stat svc_vc_backchannel_stat(SVCXPRT *);
|
|
|
|
static bool_t svc_vc_backchannel_recv(SVCXPRT *, struct rpc_msg *,
|
|
|
|
struct sockaddr **, struct mbuf **);
|
|
|
|
static bool_t svc_vc_backchannel_reply(SVCXPRT *, struct rpc_msg *,
|
2014-01-03 15:09:59 +00:00
|
|
|
struct sockaddr *, struct mbuf *, uint32_t *);
|
2012-12-08 00:29:16 +00:00
|
|
|
static bool_t svc_vc_backchannel_control(SVCXPRT *xprt, const u_int rq,
|
|
|
|
void *in);
|
2008-03-26 15:23:12 +00:00
|
|
|
static SVCXPRT *svc_vc_create_conn(SVCPOOL *pool, struct socket *so,
|
|
|
|
struct sockaddr *raddr);
|
|
|
|
static int svc_vc_accept(struct socket *head, struct socket **sop);
|
2009-06-01 21:17:03 +00:00
|
|
|
static int svc_vc_soupcall(struct socket *so, void *arg, int waitflag);
|
Listening sockets improvements.
o Separate fields of struct socket that belong to listening from
fields that belong to normal dataflow, and unionize them. This
shrinks the structure a bit.
- Take out selinfo's from the socket buffers into the socket. The
first reason is to support braindamaged scenario when a socket is
added to kevent(2) and then listen(2) is cast on it. The second
reason is that there is future plan to make socket buffers pluggable,
so that for a dataflow socket a socket buffer can be changed, and
in this case we also want to keep same selinfos through the lifetime
of a socket.
- Remove struct struct so_accf. Since now listening stuff no longer
affects struct socket size, just move its fields into listening part
of the union.
- Provide sol_upcall field and enforce that so_upcall_set() may be called
only on a dataflow socket, which has buffers, and for listening sockets
provide solisten_upcall_set().
o Remove ACCEPT_LOCK() global.
- Add a mutex to socket, to be used instead of socket buffer lock to lock
fields of struct socket that don't belong to a socket buffer.
- Allow to acquire two socket locks, but the first one must belong to a
listening socket.
- Make soref()/sorele() to use atomic(9). This allows in some situations
to do soref() without owning socket lock. There is place for improvement
here, it is possible to make sorele() also to lock optionally.
- Most protocols aren't touched by this change, except UNIX local sockets.
See below for more information.
o Reduce copy-and-paste in kernel modules that accept connections from
listening sockets: provide function solisten_dequeue(), and use it in
the following modules: ctl(4), iscsi(4), ng_btsocket(4), ng_ksocket(4),
infiniband, rpc.
o UNIX local sockets.
- Removal of ACCEPT_LOCK() global uncovered several races in the UNIX
local sockets. Most races exist around spawning a new socket, when we
are connecting to a local listening socket. To cover them, we need to
hold locks on both PCBs when spawning a third one. This means holding
them across sonewconn(). This creates a LOR between pcb locks and
unp_list_lock.
- To fix the new LOR, abandon the global unp_list_lock in favor of global
unp_link_lock. Indeed, separating these two locks didn't provide us any
extra parralelism in the UNIX sockets.
- Now call into uipc_attach() may happen with unp_link_lock hold if, we
are accepting, or without unp_link_lock in case if we are just creating
a socket.
- Another problem in UNIX sockets is that uipc_close() basicly did nothing
for a listening socket. The vnode remained opened for connections. This
is fixed by removing vnode in uipc_close(). Maybe the right way would be
to do it for all sockets (not only listening), simply move the vnode
teardown from uipc_detach() to uipc_close()?
Sponsored by: Netflix
Differential Revision: https://reviews.freebsd.org/D9770
2017-06-08 21:30:34 +00:00
|
|
|
static int svc_vc_rendezvous_soupcall(struct socket *, void *, int);
|
2008-03-26 15:23:12 +00:00
|
|
|
|
|
|
|
static struct xp_ops svc_vc_rendezvous_ops = {
|
|
|
|
.xp_recv = svc_vc_rendezvous_recv,
|
|
|
|
.xp_stat = svc_vc_rendezvous_stat,
|
Implement support for RPCSEC_GSS authentication to both the NFS client
and server. This replaces the RPC implementation of the NFS client and
server with the newer RPC implementation originally developed
(actually ported from the userland sunrpc code) to support the NFS
Lock Manager. I have tested this code extensively and I believe it is
stable and that performance is at least equal to the legacy RPC
implementation.
The NFS code currently contains support for both the new RPC
implementation and the older legacy implementation inherited from the
original NFS codebase. The default is to use the new implementation -
add the NFS_LEGACYRPC option to fall back to the old code. When I
merge this support back to RELENG_7, I will probably change this so
that users have to 'opt in' to get the new code.
To use RPCSEC_GSS on either client or server, you must build a kernel
which includes the KGSSAPI option and the crypto device. On the
userland side, you must build at least a new libc, mountd, mount_nfs
and gssd. You must install new versions of /etc/rc.d/gssd and
/etc/rc.d/nfsd and add 'gssd_enable=YES' to /etc/rc.conf.
As long as gssd is running, you should be able to mount an NFS
filesystem from a server that requires RPCSEC_GSS authentication. The
mount itself can happen without any kerberos credentials but all
access to the filesystem will be denied unless the accessing user has
a valid ticket file in the standard place (/tmp/krb5cc_<uid>). There
is currently no support for situations where the ticket file is in a
different place, such as when the user logged in via SSH and has
delegated credentials from that login. This restriction is also
present in Solaris and Linux. In theory, we could improve this in
future, possibly using Brooks Davis' implementation of variant
symlinks.
Supporting RPCSEC_GSS on a server is nearly as simple. You must create
service creds for the server in the form 'nfs/<fqdn>@<REALM>' and
install them in /etc/krb5.keytab. The standard heimdal utility ktutil
makes this fairly easy. After the service creds have been created, you
can add a '-sec=krb5' option to /etc/exports and restart both mountd
and nfsd.
The only other difference an administrator should notice is that nfsd
doesn't fork to create service threads any more. In normal operation,
there will be two nfsd processes, one in userland waiting for TCP
connections and one in the kernel handling requests. The latter
process will create as many kthreads as required - these should be
visible via 'top -H'. The code has some support for varying the number
of service threads according to load but initially at least, nfsd uses
a fixed number of threads according to the value supplied to its '-n'
option.
Sponsored by: Isilon Systems
MFC after: 1 month
2008-11-03 10:38:00 +00:00
|
|
|
.xp_reply = (bool_t (*)(SVCXPRT *, struct rpc_msg *,
|
2014-01-03 15:09:59 +00:00
|
|
|
struct sockaddr *, struct mbuf *, uint32_t *))svc_vc_null,
|
2008-03-26 15:23:12 +00:00
|
|
|
.xp_destroy = svc_vc_rendezvous_destroy,
|
|
|
|
.xp_control = svc_vc_rendezvous_control
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct xp_ops svc_vc_ops = {
|
|
|
|
.xp_recv = svc_vc_recv,
|
|
|
|
.xp_stat = svc_vc_stat,
|
2014-01-03 15:09:59 +00:00
|
|
|
.xp_ack = svc_vc_ack,
|
2008-03-26 15:23:12 +00:00
|
|
|
.xp_reply = svc_vc_reply,
|
|
|
|
.xp_destroy = svc_vc_destroy,
|
|
|
|
.xp_control = svc_vc_control
|
|
|
|
};
|
|
|
|
|
2012-12-08 00:29:16 +00:00
|
|
|
static struct xp_ops svc_vc_backchannel_ops = {
|
|
|
|
.xp_recv = svc_vc_backchannel_recv,
|
|
|
|
.xp_stat = svc_vc_backchannel_stat,
|
|
|
|
.xp_reply = svc_vc_backchannel_reply,
|
|
|
|
.xp_destroy = svc_vc_backchannel_destroy,
|
|
|
|
.xp_control = svc_vc_backchannel_control
|
2008-03-26 15:23:12 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Usage:
|
|
|
|
* xprt = svc_vc_create(sock, send_buf_size, recv_buf_size);
|
|
|
|
*
|
|
|
|
* Creates, registers, and returns a (rpc) tcp based transporter.
|
|
|
|
* Once *xprt is initialized, it is registered as a transporter
|
|
|
|
* see (svc.h, xprt_register). This routine returns
|
|
|
|
* a NULL if a problem occurred.
|
|
|
|
*
|
|
|
|
* The filedescriptor passed in is expected to refer to a bound, but
|
|
|
|
* not yet connected socket.
|
|
|
|
*
|
|
|
|
* Since streams do buffered io similar to stdio, the caller can specify
|
|
|
|
* how big the send and receive buffers are via the second and third parms;
|
|
|
|
* 0 => use the system default.
|
|
|
|
*/
|
|
|
|
SVCXPRT *
|
|
|
|
svc_vc_create(SVCPOOL *pool, struct socket *so, size_t sendsize,
|
|
|
|
size_t recvsize)
|
|
|
|
{
|
2016-05-27 08:48:33 +00:00
|
|
|
SVCXPRT *xprt;
|
2008-03-26 15:23:12 +00:00
|
|
|
struct sockaddr* sa;
|
|
|
|
int error;
|
|
|
|
|
2013-04-08 19:03:01 +00:00
|
|
|
SOCK_LOCK(so);
|
|
|
|
if (so->so_state & (SS_ISCONNECTED|SS_ISDISCONNECTED)) {
|
|
|
|
SOCK_UNLOCK(so);
|
2015-08-18 18:12:46 +00:00
|
|
|
CURVNET_SET(so->so_vnet);
|
2008-06-26 10:21:54 +00:00
|
|
|
error = so->so_proto->pr_usrreqs->pru_peeraddr(so, &sa);
|
2015-08-18 18:12:46 +00:00
|
|
|
CURVNET_RESTORE();
|
2008-06-26 10:21:54 +00:00
|
|
|
if (error)
|
|
|
|
return (NULL);
|
|
|
|
xprt = svc_vc_create_conn(pool, so, sa);
|
|
|
|
free(sa, M_SONAME);
|
|
|
|
return (xprt);
|
|
|
|
}
|
2013-04-08 19:03:01 +00:00
|
|
|
SOCK_UNLOCK(so);
|
2008-06-26 10:21:54 +00:00
|
|
|
|
Implement support for RPCSEC_GSS authentication to both the NFS client
and server. This replaces the RPC implementation of the NFS client and
server with the newer RPC implementation originally developed
(actually ported from the userland sunrpc code) to support the NFS
Lock Manager. I have tested this code extensively and I believe it is
stable and that performance is at least equal to the legacy RPC
implementation.
The NFS code currently contains support for both the new RPC
implementation and the older legacy implementation inherited from the
original NFS codebase. The default is to use the new implementation -
add the NFS_LEGACYRPC option to fall back to the old code. When I
merge this support back to RELENG_7, I will probably change this so
that users have to 'opt in' to get the new code.
To use RPCSEC_GSS on either client or server, you must build a kernel
which includes the KGSSAPI option and the crypto device. On the
userland side, you must build at least a new libc, mountd, mount_nfs
and gssd. You must install new versions of /etc/rc.d/gssd and
/etc/rc.d/nfsd and add 'gssd_enable=YES' to /etc/rc.conf.
As long as gssd is running, you should be able to mount an NFS
filesystem from a server that requires RPCSEC_GSS authentication. The
mount itself can happen without any kerberos credentials but all
access to the filesystem will be denied unless the accessing user has
a valid ticket file in the standard place (/tmp/krb5cc_<uid>). There
is currently no support for situations where the ticket file is in a
different place, such as when the user logged in via SSH and has
delegated credentials from that login. This restriction is also
present in Solaris and Linux. In theory, we could improve this in
future, possibly using Brooks Davis' implementation of variant
symlinks.
Supporting RPCSEC_GSS on a server is nearly as simple. You must create
service creds for the server in the form 'nfs/<fqdn>@<REALM>' and
install them in /etc/krb5.keytab. The standard heimdal utility ktutil
makes this fairly easy. After the service creds have been created, you
can add a '-sec=krb5' option to /etc/exports and restart both mountd
and nfsd.
The only other difference an administrator should notice is that nfsd
doesn't fork to create service threads any more. In normal operation,
there will be two nfsd processes, one in userland waiting for TCP
connections and one in the kernel handling requests. The latter
process will create as many kthreads as required - these should be
visible via 'top -H'. The code has some support for varying the number
of service threads according to load but initially at least, nfsd uses
a fixed number of threads according to the value supplied to its '-n'
option.
Sponsored by: Isilon Systems
MFC after: 1 month
2008-11-03 10:38:00 +00:00
|
|
|
xprt = svc_xprt_alloc();
|
|
|
|
sx_init(&xprt->xp_lock, "xprt->xp_lock");
|
2008-03-26 15:23:12 +00:00
|
|
|
xprt->xp_pool = pool;
|
|
|
|
xprt->xp_socket = so;
|
|
|
|
xprt->xp_p1 = NULL;
|
|
|
|
xprt->xp_p2 = NULL;
|
|
|
|
xprt->xp_ops = &svc_vc_rendezvous_ops;
|
|
|
|
|
2015-08-18 18:12:46 +00:00
|
|
|
CURVNET_SET(so->so_vnet);
|
2008-03-26 15:23:12 +00:00
|
|
|
error = so->so_proto->pr_usrreqs->pru_sockaddr(so, &sa);
|
2015-08-18 18:12:46 +00:00
|
|
|
CURVNET_RESTORE();
|
2009-08-24 10:09:30 +00:00
|
|
|
if (error) {
|
2008-03-26 15:23:12 +00:00
|
|
|
goto cleanup_svc_vc_create;
|
2009-08-24 10:09:30 +00:00
|
|
|
}
|
2008-03-26 15:23:12 +00:00
|
|
|
|
Implement support for RPCSEC_GSS authentication to both the NFS client
and server. This replaces the RPC implementation of the NFS client and
server with the newer RPC implementation originally developed
(actually ported from the userland sunrpc code) to support the NFS
Lock Manager. I have tested this code extensively and I believe it is
stable and that performance is at least equal to the legacy RPC
implementation.
The NFS code currently contains support for both the new RPC
implementation and the older legacy implementation inherited from the
original NFS codebase. The default is to use the new implementation -
add the NFS_LEGACYRPC option to fall back to the old code. When I
merge this support back to RELENG_7, I will probably change this so
that users have to 'opt in' to get the new code.
To use RPCSEC_GSS on either client or server, you must build a kernel
which includes the KGSSAPI option and the crypto device. On the
userland side, you must build at least a new libc, mountd, mount_nfs
and gssd. You must install new versions of /etc/rc.d/gssd and
/etc/rc.d/nfsd and add 'gssd_enable=YES' to /etc/rc.conf.
As long as gssd is running, you should be able to mount an NFS
filesystem from a server that requires RPCSEC_GSS authentication. The
mount itself can happen without any kerberos credentials but all
access to the filesystem will be denied unless the accessing user has
a valid ticket file in the standard place (/tmp/krb5cc_<uid>). There
is currently no support for situations where the ticket file is in a
different place, such as when the user logged in via SSH and has
delegated credentials from that login. This restriction is also
present in Solaris and Linux. In theory, we could improve this in
future, possibly using Brooks Davis' implementation of variant
symlinks.
Supporting RPCSEC_GSS on a server is nearly as simple. You must create
service creds for the server in the form 'nfs/<fqdn>@<REALM>' and
install them in /etc/krb5.keytab. The standard heimdal utility ktutil
makes this fairly easy. After the service creds have been created, you
can add a '-sec=krb5' option to /etc/exports and restart both mountd
and nfsd.
The only other difference an administrator should notice is that nfsd
doesn't fork to create service threads any more. In normal operation,
there will be two nfsd processes, one in userland waiting for TCP
connections and one in the kernel handling requests. The latter
process will create as many kthreads as required - these should be
visible via 'top -H'. The code has some support for varying the number
of service threads according to load but initially at least, nfsd uses
a fixed number of threads according to the value supplied to its '-n'
option.
Sponsored by: Isilon Systems
MFC after: 1 month
2008-11-03 10:38:00 +00:00
|
|
|
memcpy(&xprt->xp_ltaddr, sa, sa->sa_len);
|
2008-03-26 15:23:12 +00:00
|
|
|
free(sa, M_SONAME);
|
|
|
|
|
|
|
|
xprt_register(xprt);
|
|
|
|
|
2015-04-07 10:25:27 +00:00
|
|
|
solisten(so, -1, curthread);
|
2008-03-26 15:23:12 +00:00
|
|
|
|
Listening sockets improvements.
o Separate fields of struct socket that belong to listening from
fields that belong to normal dataflow, and unionize them. This
shrinks the structure a bit.
- Take out selinfo's from the socket buffers into the socket. The
first reason is to support braindamaged scenario when a socket is
added to kevent(2) and then listen(2) is cast on it. The second
reason is that there is future plan to make socket buffers pluggable,
so that for a dataflow socket a socket buffer can be changed, and
in this case we also want to keep same selinfos through the lifetime
of a socket.
- Remove struct struct so_accf. Since now listening stuff no longer
affects struct socket size, just move its fields into listening part
of the union.
- Provide sol_upcall field and enforce that so_upcall_set() may be called
only on a dataflow socket, which has buffers, and for listening sockets
provide solisten_upcall_set().
o Remove ACCEPT_LOCK() global.
- Add a mutex to socket, to be used instead of socket buffer lock to lock
fields of struct socket that don't belong to a socket buffer.
- Allow to acquire two socket locks, but the first one must belong to a
listening socket.
- Make soref()/sorele() to use atomic(9). This allows in some situations
to do soref() without owning socket lock. There is place for improvement
here, it is possible to make sorele() also to lock optionally.
- Most protocols aren't touched by this change, except UNIX local sockets.
See below for more information.
o Reduce copy-and-paste in kernel modules that accept connections from
listening sockets: provide function solisten_dequeue(), and use it in
the following modules: ctl(4), iscsi(4), ng_btsocket(4), ng_ksocket(4),
infiniband, rpc.
o UNIX local sockets.
- Removal of ACCEPT_LOCK() global uncovered several races in the UNIX
local sockets. Most races exist around spawning a new socket, when we
are connecting to a local listening socket. To cover them, we need to
hold locks on both PCBs when spawning a third one. This means holding
them across sonewconn(). This creates a LOR between pcb locks and
unp_list_lock.
- To fix the new LOR, abandon the global unp_list_lock in favor of global
unp_link_lock. Indeed, separating these two locks didn't provide us any
extra parralelism in the UNIX sockets.
- Now call into uipc_attach() may happen with unp_link_lock hold if, we
are accepting, or without unp_link_lock in case if we are just creating
a socket.
- Another problem in UNIX sockets is that uipc_close() basicly did nothing
for a listening socket. The vnode remained opened for connections. This
is fixed by removing vnode in uipc_close(). Maybe the right way would be
to do it for all sockets (not only listening), simply move the vnode
teardown from uipc_detach() to uipc_close()?
Sponsored by: Netflix
Differential Revision: https://reviews.freebsd.org/D9770
2017-06-08 21:30:34 +00:00
|
|
|
SOLISTEN_LOCK(so);
|
2009-06-04 14:13:06 +00:00
|
|
|
xprt->xp_upcallset = 1;
|
Listening sockets improvements.
o Separate fields of struct socket that belong to listening from
fields that belong to normal dataflow, and unionize them. This
shrinks the structure a bit.
- Take out selinfo's from the socket buffers into the socket. The
first reason is to support braindamaged scenario when a socket is
added to kevent(2) and then listen(2) is cast on it. The second
reason is that there is future plan to make socket buffers pluggable,
so that for a dataflow socket a socket buffer can be changed, and
in this case we also want to keep same selinfos through the lifetime
of a socket.
- Remove struct struct so_accf. Since now listening stuff no longer
affects struct socket size, just move its fields into listening part
of the union.
- Provide sol_upcall field and enforce that so_upcall_set() may be called
only on a dataflow socket, which has buffers, and for listening sockets
provide solisten_upcall_set().
o Remove ACCEPT_LOCK() global.
- Add a mutex to socket, to be used instead of socket buffer lock to lock
fields of struct socket that don't belong to a socket buffer.
- Allow to acquire two socket locks, but the first one must belong to a
listening socket.
- Make soref()/sorele() to use atomic(9). This allows in some situations
to do soref() without owning socket lock. There is place for improvement
here, it is possible to make sorele() also to lock optionally.
- Most protocols aren't touched by this change, except UNIX local sockets.
See below for more information.
o Reduce copy-and-paste in kernel modules that accept connections from
listening sockets: provide function solisten_dequeue(), and use it in
the following modules: ctl(4), iscsi(4), ng_btsocket(4), ng_ksocket(4),
infiniband, rpc.
o UNIX local sockets.
- Removal of ACCEPT_LOCK() global uncovered several races in the UNIX
local sockets. Most races exist around spawning a new socket, when we
are connecting to a local listening socket. To cover them, we need to
hold locks on both PCBs when spawning a third one. This means holding
them across sonewconn(). This creates a LOR between pcb locks and
unp_list_lock.
- To fix the new LOR, abandon the global unp_list_lock in favor of global
unp_link_lock. Indeed, separating these two locks didn't provide us any
extra parralelism in the UNIX sockets.
- Now call into uipc_attach() may happen with unp_link_lock hold if, we
are accepting, or without unp_link_lock in case if we are just creating
a socket.
- Another problem in UNIX sockets is that uipc_close() basicly did nothing
for a listening socket. The vnode remained opened for connections. This
is fixed by removing vnode in uipc_close(). Maybe the right way would be
to do it for all sockets (not only listening), simply move the vnode
teardown from uipc_detach() to uipc_close()?
Sponsored by: Netflix
Differential Revision: https://reviews.freebsd.org/D9770
2017-06-08 21:30:34 +00:00
|
|
|
solisten_upcall_set(so, svc_vc_rendezvous_soupcall, xprt);
|
|
|
|
SOLISTEN_UNLOCK(so);
|
2008-03-26 15:23:12 +00:00
|
|
|
|
|
|
|
return (xprt);
|
2016-05-27 08:48:33 +00:00
|
|
|
|
2008-03-26 15:23:12 +00:00
|
|
|
cleanup_svc_vc_create:
|
2016-05-27 08:48:33 +00:00
|
|
|
sx_destroy(&xprt->xp_lock);
|
|
|
|
svc_xprt_free(xprt);
|
|
|
|
|
2008-03-26 15:23:12 +00:00
|
|
|
return (NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Create a new transport for a socket optained via soaccept().
|
|
|
|
*/
|
|
|
|
SVCXPRT *
|
|
|
|
svc_vc_create_conn(SVCPOOL *pool, struct socket *so, struct sockaddr *raddr)
|
|
|
|
{
|
2016-05-27 08:48:33 +00:00
|
|
|
SVCXPRT *xprt;
|
|
|
|
struct cf_conn *cd;
|
2008-03-26 15:23:12 +00:00
|
|
|
struct sockaddr* sa = NULL;
|
2008-06-26 10:21:54 +00:00
|
|
|
struct sockopt opt;
|
|
|
|
int one = 1;
|
2008-03-26 15:23:12 +00:00
|
|
|
int error;
|
|
|
|
|
2008-06-26 10:21:54 +00:00
|
|
|
bzero(&opt, sizeof(struct sockopt));
|
|
|
|
opt.sopt_dir = SOPT_SET;
|
|
|
|
opt.sopt_level = SOL_SOCKET;
|
|
|
|
opt.sopt_name = SO_KEEPALIVE;
|
|
|
|
opt.sopt_val = &one;
|
|
|
|
opt.sopt_valsize = sizeof(one);
|
|
|
|
error = sosetopt(so, &opt);
|
2009-08-24 10:09:30 +00:00
|
|
|
if (error) {
|
2008-06-26 10:21:54 +00:00
|
|
|
return (NULL);
|
2009-08-24 10:09:30 +00:00
|
|
|
}
|
2008-06-26 10:21:54 +00:00
|
|
|
|
|
|
|
if (so->so_proto->pr_protocol == IPPROTO_TCP) {
|
|
|
|
bzero(&opt, sizeof(struct sockopt));
|
|
|
|
opt.sopt_dir = SOPT_SET;
|
|
|
|
opt.sopt_level = IPPROTO_TCP;
|
|
|
|
opt.sopt_name = TCP_NODELAY;
|
|
|
|
opt.sopt_val = &one;
|
|
|
|
opt.sopt_valsize = sizeof(one);
|
|
|
|
error = sosetopt(so, &opt);
|
2009-08-24 10:09:30 +00:00
|
|
|
if (error) {
|
2008-06-26 10:21:54 +00:00
|
|
|
return (NULL);
|
2009-08-24 10:09:30 +00:00
|
|
|
}
|
2008-06-26 10:21:54 +00:00
|
|
|
}
|
|
|
|
|
2008-03-26 15:23:12 +00:00
|
|
|
cd = mem_alloc(sizeof(*cd));
|
|
|
|
cd->strm_stat = XPRT_IDLE;
|
|
|
|
|
Implement support for RPCSEC_GSS authentication to both the NFS client
and server. This replaces the RPC implementation of the NFS client and
server with the newer RPC implementation originally developed
(actually ported from the userland sunrpc code) to support the NFS
Lock Manager. I have tested this code extensively and I believe it is
stable and that performance is at least equal to the legacy RPC
implementation.
The NFS code currently contains support for both the new RPC
implementation and the older legacy implementation inherited from the
original NFS codebase. The default is to use the new implementation -
add the NFS_LEGACYRPC option to fall back to the old code. When I
merge this support back to RELENG_7, I will probably change this so
that users have to 'opt in' to get the new code.
To use RPCSEC_GSS on either client or server, you must build a kernel
which includes the KGSSAPI option and the crypto device. On the
userland side, you must build at least a new libc, mountd, mount_nfs
and gssd. You must install new versions of /etc/rc.d/gssd and
/etc/rc.d/nfsd and add 'gssd_enable=YES' to /etc/rc.conf.
As long as gssd is running, you should be able to mount an NFS
filesystem from a server that requires RPCSEC_GSS authentication. The
mount itself can happen without any kerberos credentials but all
access to the filesystem will be denied unless the accessing user has
a valid ticket file in the standard place (/tmp/krb5cc_<uid>). There
is currently no support for situations where the ticket file is in a
different place, such as when the user logged in via SSH and has
delegated credentials from that login. This restriction is also
present in Solaris and Linux. In theory, we could improve this in
future, possibly using Brooks Davis' implementation of variant
symlinks.
Supporting RPCSEC_GSS on a server is nearly as simple. You must create
service creds for the server in the form 'nfs/<fqdn>@<REALM>' and
install them in /etc/krb5.keytab. The standard heimdal utility ktutil
makes this fairly easy. After the service creds have been created, you
can add a '-sec=krb5' option to /etc/exports and restart both mountd
and nfsd.
The only other difference an administrator should notice is that nfsd
doesn't fork to create service threads any more. In normal operation,
there will be two nfsd processes, one in userland waiting for TCP
connections and one in the kernel handling requests. The latter
process will create as many kthreads as required - these should be
visible via 'top -H'. The code has some support for varying the number
of service threads according to load but initially at least, nfsd uses
a fixed number of threads according to the value supplied to its '-n'
option.
Sponsored by: Isilon Systems
MFC after: 1 month
2008-11-03 10:38:00 +00:00
|
|
|
xprt = svc_xprt_alloc();
|
|
|
|
sx_init(&xprt->xp_lock, "xprt->xp_lock");
|
2008-03-26 15:23:12 +00:00
|
|
|
xprt->xp_pool = pool;
|
|
|
|
xprt->xp_socket = so;
|
|
|
|
xprt->xp_p1 = cd;
|
|
|
|
xprt->xp_p2 = NULL;
|
|
|
|
xprt->xp_ops = &svc_vc_ops;
|
|
|
|
|
Implement support for RPCSEC_GSS authentication to both the NFS client
and server. This replaces the RPC implementation of the NFS client and
server with the newer RPC implementation originally developed
(actually ported from the userland sunrpc code) to support the NFS
Lock Manager. I have tested this code extensively and I believe it is
stable and that performance is at least equal to the legacy RPC
implementation.
The NFS code currently contains support for both the new RPC
implementation and the older legacy implementation inherited from the
original NFS codebase. The default is to use the new implementation -
add the NFS_LEGACYRPC option to fall back to the old code. When I
merge this support back to RELENG_7, I will probably change this so
that users have to 'opt in' to get the new code.
To use RPCSEC_GSS on either client or server, you must build a kernel
which includes the KGSSAPI option and the crypto device. On the
userland side, you must build at least a new libc, mountd, mount_nfs
and gssd. You must install new versions of /etc/rc.d/gssd and
/etc/rc.d/nfsd and add 'gssd_enable=YES' to /etc/rc.conf.
As long as gssd is running, you should be able to mount an NFS
filesystem from a server that requires RPCSEC_GSS authentication. The
mount itself can happen without any kerberos credentials but all
access to the filesystem will be denied unless the accessing user has
a valid ticket file in the standard place (/tmp/krb5cc_<uid>). There
is currently no support for situations where the ticket file is in a
different place, such as when the user logged in via SSH and has
delegated credentials from that login. This restriction is also
present in Solaris and Linux. In theory, we could improve this in
future, possibly using Brooks Davis' implementation of variant
symlinks.
Supporting RPCSEC_GSS on a server is nearly as simple. You must create
service creds for the server in the form 'nfs/<fqdn>@<REALM>' and
install them in /etc/krb5.keytab. The standard heimdal utility ktutil
makes this fairly easy. After the service creds have been created, you
can add a '-sec=krb5' option to /etc/exports and restart both mountd
and nfsd.
The only other difference an administrator should notice is that nfsd
doesn't fork to create service threads any more. In normal operation,
there will be two nfsd processes, one in userland waiting for TCP
connections and one in the kernel handling requests. The latter
process will create as many kthreads as required - these should be
visible via 'top -H'. The code has some support for varying the number
of service threads according to load but initially at least, nfsd uses
a fixed number of threads according to the value supplied to its '-n'
option.
Sponsored by: Isilon Systems
MFC after: 1 month
2008-11-03 10:38:00 +00:00
|
|
|
/*
|
|
|
|
* See http://www.connectathon.org/talks96/nfstcp.pdf - client
|
|
|
|
* has a 5 minute timer, server has a 6 minute timer.
|
|
|
|
*/
|
|
|
|
xprt->xp_idletimeout = 6 * 60;
|
|
|
|
|
|
|
|
memcpy(&xprt->xp_rtaddr, raddr, raddr->sa_len);
|
2008-03-26 15:23:12 +00:00
|
|
|
|
2015-08-18 18:12:46 +00:00
|
|
|
CURVNET_SET(so->so_vnet);
|
2008-03-26 15:23:12 +00:00
|
|
|
error = so->so_proto->pr_usrreqs->pru_sockaddr(so, &sa);
|
2015-08-18 18:12:46 +00:00
|
|
|
CURVNET_RESTORE();
|
2008-03-26 15:23:12 +00:00
|
|
|
if (error)
|
|
|
|
goto cleanup_svc_vc_create;
|
|
|
|
|
Implement support for RPCSEC_GSS authentication to both the NFS client
and server. This replaces the RPC implementation of the NFS client and
server with the newer RPC implementation originally developed
(actually ported from the userland sunrpc code) to support the NFS
Lock Manager. I have tested this code extensively and I believe it is
stable and that performance is at least equal to the legacy RPC
implementation.
The NFS code currently contains support for both the new RPC
implementation and the older legacy implementation inherited from the
original NFS codebase. The default is to use the new implementation -
add the NFS_LEGACYRPC option to fall back to the old code. When I
merge this support back to RELENG_7, I will probably change this so
that users have to 'opt in' to get the new code.
To use RPCSEC_GSS on either client or server, you must build a kernel
which includes the KGSSAPI option and the crypto device. On the
userland side, you must build at least a new libc, mountd, mount_nfs
and gssd. You must install new versions of /etc/rc.d/gssd and
/etc/rc.d/nfsd and add 'gssd_enable=YES' to /etc/rc.conf.
As long as gssd is running, you should be able to mount an NFS
filesystem from a server that requires RPCSEC_GSS authentication. The
mount itself can happen without any kerberos credentials but all
access to the filesystem will be denied unless the accessing user has
a valid ticket file in the standard place (/tmp/krb5cc_<uid>). There
is currently no support for situations where the ticket file is in a
different place, such as when the user logged in via SSH and has
delegated credentials from that login. This restriction is also
present in Solaris and Linux. In theory, we could improve this in
future, possibly using Brooks Davis' implementation of variant
symlinks.
Supporting RPCSEC_GSS on a server is nearly as simple. You must create
service creds for the server in the form 'nfs/<fqdn>@<REALM>' and
install them in /etc/krb5.keytab. The standard heimdal utility ktutil
makes this fairly easy. After the service creds have been created, you
can add a '-sec=krb5' option to /etc/exports and restart both mountd
and nfsd.
The only other difference an administrator should notice is that nfsd
doesn't fork to create service threads any more. In normal operation,
there will be two nfsd processes, one in userland waiting for TCP
connections and one in the kernel handling requests. The latter
process will create as many kthreads as required - these should be
visible via 'top -H'. The code has some support for varying the number
of service threads according to load but initially at least, nfsd uses
a fixed number of threads according to the value supplied to its '-n'
option.
Sponsored by: Isilon Systems
MFC after: 1 month
2008-11-03 10:38:00 +00:00
|
|
|
memcpy(&xprt->xp_ltaddr, sa, sa->sa_len);
|
2008-03-26 15:23:12 +00:00
|
|
|
free(sa, M_SONAME);
|
|
|
|
|
|
|
|
xprt_register(xprt);
|
|
|
|
|
|
|
|
SOCKBUF_LOCK(&so->so_rcv);
|
2009-06-04 14:13:06 +00:00
|
|
|
xprt->xp_upcallset = 1;
|
2009-06-01 21:17:03 +00:00
|
|
|
soupcall_set(so, SO_RCV, svc_vc_soupcall, xprt);
|
2008-03-26 15:23:12 +00:00
|
|
|
SOCKBUF_UNLOCK(&so->so_rcv);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Throw the transport into the active list in case it already
|
|
|
|
* has some data buffered.
|
|
|
|
*/
|
Implement support for RPCSEC_GSS authentication to both the NFS client
and server. This replaces the RPC implementation of the NFS client and
server with the newer RPC implementation originally developed
(actually ported from the userland sunrpc code) to support the NFS
Lock Manager. I have tested this code extensively and I believe it is
stable and that performance is at least equal to the legacy RPC
implementation.
The NFS code currently contains support for both the new RPC
implementation and the older legacy implementation inherited from the
original NFS codebase. The default is to use the new implementation -
add the NFS_LEGACYRPC option to fall back to the old code. When I
merge this support back to RELENG_7, I will probably change this so
that users have to 'opt in' to get the new code.
To use RPCSEC_GSS on either client or server, you must build a kernel
which includes the KGSSAPI option and the crypto device. On the
userland side, you must build at least a new libc, mountd, mount_nfs
and gssd. You must install new versions of /etc/rc.d/gssd and
/etc/rc.d/nfsd and add 'gssd_enable=YES' to /etc/rc.conf.
As long as gssd is running, you should be able to mount an NFS
filesystem from a server that requires RPCSEC_GSS authentication. The
mount itself can happen without any kerberos credentials but all
access to the filesystem will be denied unless the accessing user has
a valid ticket file in the standard place (/tmp/krb5cc_<uid>). There
is currently no support for situations where the ticket file is in a
different place, such as when the user logged in via SSH and has
delegated credentials from that login. This restriction is also
present in Solaris and Linux. In theory, we could improve this in
future, possibly using Brooks Davis' implementation of variant
symlinks.
Supporting RPCSEC_GSS on a server is nearly as simple. You must create
service creds for the server in the form 'nfs/<fqdn>@<REALM>' and
install them in /etc/krb5.keytab. The standard heimdal utility ktutil
makes this fairly easy. After the service creds have been created, you
can add a '-sec=krb5' option to /etc/exports and restart both mountd
and nfsd.
The only other difference an administrator should notice is that nfsd
doesn't fork to create service threads any more. In normal operation,
there will be two nfsd processes, one in userland waiting for TCP
connections and one in the kernel handling requests. The latter
process will create as many kthreads as required - these should be
visible via 'top -H'. The code has some support for varying the number
of service threads according to load but initially at least, nfsd uses
a fixed number of threads according to the value supplied to its '-n'
option.
Sponsored by: Isilon Systems
MFC after: 1 month
2008-11-03 10:38:00 +00:00
|
|
|
sx_xlock(&xprt->xp_lock);
|
2008-03-26 15:23:12 +00:00
|
|
|
xprt_active(xprt);
|
Implement support for RPCSEC_GSS authentication to both the NFS client
and server. This replaces the RPC implementation of the NFS client and
server with the newer RPC implementation originally developed
(actually ported from the userland sunrpc code) to support the NFS
Lock Manager. I have tested this code extensively and I believe it is
stable and that performance is at least equal to the legacy RPC
implementation.
The NFS code currently contains support for both the new RPC
implementation and the older legacy implementation inherited from the
original NFS codebase. The default is to use the new implementation -
add the NFS_LEGACYRPC option to fall back to the old code. When I
merge this support back to RELENG_7, I will probably change this so
that users have to 'opt in' to get the new code.
To use RPCSEC_GSS on either client or server, you must build a kernel
which includes the KGSSAPI option and the crypto device. On the
userland side, you must build at least a new libc, mountd, mount_nfs
and gssd. You must install new versions of /etc/rc.d/gssd and
/etc/rc.d/nfsd and add 'gssd_enable=YES' to /etc/rc.conf.
As long as gssd is running, you should be able to mount an NFS
filesystem from a server that requires RPCSEC_GSS authentication. The
mount itself can happen without any kerberos credentials but all
access to the filesystem will be denied unless the accessing user has
a valid ticket file in the standard place (/tmp/krb5cc_<uid>). There
is currently no support for situations where the ticket file is in a
different place, such as when the user logged in via SSH and has
delegated credentials from that login. This restriction is also
present in Solaris and Linux. In theory, we could improve this in
future, possibly using Brooks Davis' implementation of variant
symlinks.
Supporting RPCSEC_GSS on a server is nearly as simple. You must create
service creds for the server in the form 'nfs/<fqdn>@<REALM>' and
install them in /etc/krb5.keytab. The standard heimdal utility ktutil
makes this fairly easy. After the service creds have been created, you
can add a '-sec=krb5' option to /etc/exports and restart both mountd
and nfsd.
The only other difference an administrator should notice is that nfsd
doesn't fork to create service threads any more. In normal operation,
there will be two nfsd processes, one in userland waiting for TCP
connections and one in the kernel handling requests. The latter
process will create as many kthreads as required - these should be
visible via 'top -H'. The code has some support for varying the number
of service threads according to load but initially at least, nfsd uses
a fixed number of threads according to the value supplied to its '-n'
option.
Sponsored by: Isilon Systems
MFC after: 1 month
2008-11-03 10:38:00 +00:00
|
|
|
sx_xunlock(&xprt->xp_lock);
|
2008-03-26 15:23:12 +00:00
|
|
|
|
|
|
|
return (xprt);
|
|
|
|
cleanup_svc_vc_create:
|
2016-05-27 08:48:33 +00:00
|
|
|
sx_destroy(&xprt->xp_lock);
|
|
|
|
svc_xprt_free(xprt);
|
|
|
|
mem_free(cd, sizeof(*cd));
|
|
|
|
|
2008-03-26 15:23:12 +00:00
|
|
|
return (NULL);
|
|
|
|
}
|
|
|
|
|
2012-12-08 00:29:16 +00:00
|
|
|
/*
|
|
|
|
* Create a new transport for a backchannel on a clnt_vc socket.
|
|
|
|
*/
|
|
|
|
SVCXPRT *
|
|
|
|
svc_vc_create_backchannel(SVCPOOL *pool)
|
|
|
|
{
|
|
|
|
SVCXPRT *xprt = NULL;
|
|
|
|
struct cf_conn *cd = NULL;
|
|
|
|
|
|
|
|
cd = mem_alloc(sizeof(*cd));
|
|
|
|
cd->strm_stat = XPRT_IDLE;
|
|
|
|
|
|
|
|
xprt = svc_xprt_alloc();
|
|
|
|
sx_init(&xprt->xp_lock, "xprt->xp_lock");
|
|
|
|
xprt->xp_pool = pool;
|
|
|
|
xprt->xp_socket = NULL;
|
|
|
|
xprt->xp_p1 = cd;
|
|
|
|
xprt->xp_p2 = NULL;
|
|
|
|
xprt->xp_ops = &svc_vc_backchannel_ops;
|
|
|
|
return (xprt);
|
|
|
|
}
|
|
|
|
|
2008-03-26 15:23:12 +00:00
|
|
|
/*
|
|
|
|
* This does all of the accept except the final call to soaccept. The
|
|
|
|
* caller will call soaccept after dropping its locks (soaccept may
|
|
|
|
* call malloc).
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
svc_vc_accept(struct socket *head, struct socket **sop)
|
|
|
|
{
|
|
|
|
struct socket *so;
|
Listening sockets improvements.
o Separate fields of struct socket that belong to listening from
fields that belong to normal dataflow, and unionize them. This
shrinks the structure a bit.
- Take out selinfo's from the socket buffers into the socket. The
first reason is to support braindamaged scenario when a socket is
added to kevent(2) and then listen(2) is cast on it. The second
reason is that there is future plan to make socket buffers pluggable,
so that for a dataflow socket a socket buffer can be changed, and
in this case we also want to keep same selinfos through the lifetime
of a socket.
- Remove struct struct so_accf. Since now listening stuff no longer
affects struct socket size, just move its fields into listening part
of the union.
- Provide sol_upcall field and enforce that so_upcall_set() may be called
only on a dataflow socket, which has buffers, and for listening sockets
provide solisten_upcall_set().
o Remove ACCEPT_LOCK() global.
- Add a mutex to socket, to be used instead of socket buffer lock to lock
fields of struct socket that don't belong to a socket buffer.
- Allow to acquire two socket locks, but the first one must belong to a
listening socket.
- Make soref()/sorele() to use atomic(9). This allows in some situations
to do soref() without owning socket lock. There is place for improvement
here, it is possible to make sorele() also to lock optionally.
- Most protocols aren't touched by this change, except UNIX local sockets.
See below for more information.
o Reduce copy-and-paste in kernel modules that accept connections from
listening sockets: provide function solisten_dequeue(), and use it in
the following modules: ctl(4), iscsi(4), ng_btsocket(4), ng_ksocket(4),
infiniband, rpc.
o UNIX local sockets.
- Removal of ACCEPT_LOCK() global uncovered several races in the UNIX
local sockets. Most races exist around spawning a new socket, when we
are connecting to a local listening socket. To cover them, we need to
hold locks on both PCBs when spawning a third one. This means holding
them across sonewconn(). This creates a LOR between pcb locks and
unp_list_lock.
- To fix the new LOR, abandon the global unp_list_lock in favor of global
unp_link_lock. Indeed, separating these two locks didn't provide us any
extra parralelism in the UNIX sockets.
- Now call into uipc_attach() may happen with unp_link_lock hold if, we
are accepting, or without unp_link_lock in case if we are just creating
a socket.
- Another problem in UNIX sockets is that uipc_close() basicly did nothing
for a listening socket. The vnode remained opened for connections. This
is fixed by removing vnode in uipc_close(). Maybe the right way would be
to do it for all sockets (not only listening), simply move the vnode
teardown from uipc_detach() to uipc_close()?
Sponsored by: Netflix
Differential Revision: https://reviews.freebsd.org/D9770
2017-06-08 21:30:34 +00:00
|
|
|
int error = 0;
|
|
|
|
short nbio;
|
2008-03-26 15:23:12 +00:00
|
|
|
|
Listening sockets improvements.
o Separate fields of struct socket that belong to listening from
fields that belong to normal dataflow, and unionize them. This
shrinks the structure a bit.
- Take out selinfo's from the socket buffers into the socket. The
first reason is to support braindamaged scenario when a socket is
added to kevent(2) and then listen(2) is cast on it. The second
reason is that there is future plan to make socket buffers pluggable,
so that for a dataflow socket a socket buffer can be changed, and
in this case we also want to keep same selinfos through the lifetime
of a socket.
- Remove struct struct so_accf. Since now listening stuff no longer
affects struct socket size, just move its fields into listening part
of the union.
- Provide sol_upcall field and enforce that so_upcall_set() may be called
only on a dataflow socket, which has buffers, and for listening sockets
provide solisten_upcall_set().
o Remove ACCEPT_LOCK() global.
- Add a mutex to socket, to be used instead of socket buffer lock to lock
fields of struct socket that don't belong to a socket buffer.
- Allow to acquire two socket locks, but the first one must belong to a
listening socket.
- Make soref()/sorele() to use atomic(9). This allows in some situations
to do soref() without owning socket lock. There is place for improvement
here, it is possible to make sorele() also to lock optionally.
- Most protocols aren't touched by this change, except UNIX local sockets.
See below for more information.
o Reduce copy-and-paste in kernel modules that accept connections from
listening sockets: provide function solisten_dequeue(), and use it in
the following modules: ctl(4), iscsi(4), ng_btsocket(4), ng_ksocket(4),
infiniband, rpc.
o UNIX local sockets.
- Removal of ACCEPT_LOCK() global uncovered several races in the UNIX
local sockets. Most races exist around spawning a new socket, when we
are connecting to a local listening socket. To cover them, we need to
hold locks on both PCBs when spawning a third one. This means holding
them across sonewconn(). This creates a LOR between pcb locks and
unp_list_lock.
- To fix the new LOR, abandon the global unp_list_lock in favor of global
unp_link_lock. Indeed, separating these two locks didn't provide us any
extra parralelism in the UNIX sockets.
- Now call into uipc_attach() may happen with unp_link_lock hold if, we
are accepting, or without unp_link_lock in case if we are just creating
a socket.
- Another problem in UNIX sockets is that uipc_close() basicly did nothing
for a listening socket. The vnode remained opened for connections. This
is fixed by removing vnode in uipc_close(). Maybe the right way would be
to do it for all sockets (not only listening), simply move the vnode
teardown from uipc_detach() to uipc_close()?
Sponsored by: Netflix
Differential Revision: https://reviews.freebsd.org/D9770
2017-06-08 21:30:34 +00:00
|
|
|
/* XXXGL: shouldn't that be an assertion? */
|
2008-03-26 15:23:12 +00:00
|
|
|
if ((head->so_options & SO_ACCEPTCONN) == 0) {
|
|
|
|
error = EINVAL;
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
#ifdef MAC
|
2009-06-05 14:29:49 +00:00
|
|
|
error = mac_socket_check_accept(curthread->td_ucred, head);
|
2008-03-26 15:23:12 +00:00
|
|
|
if (error != 0)
|
|
|
|
goto done;
|
|
|
|
#endif
|
|
|
|
/*
|
Listening sockets improvements.
o Separate fields of struct socket that belong to listening from
fields that belong to normal dataflow, and unionize them. This
shrinks the structure a bit.
- Take out selinfo's from the socket buffers into the socket. The
first reason is to support braindamaged scenario when a socket is
added to kevent(2) and then listen(2) is cast on it. The second
reason is that there is future plan to make socket buffers pluggable,
so that for a dataflow socket a socket buffer can be changed, and
in this case we also want to keep same selinfos through the lifetime
of a socket.
- Remove struct struct so_accf. Since now listening stuff no longer
affects struct socket size, just move its fields into listening part
of the union.
- Provide sol_upcall field and enforce that so_upcall_set() may be called
only on a dataflow socket, which has buffers, and for listening sockets
provide solisten_upcall_set().
o Remove ACCEPT_LOCK() global.
- Add a mutex to socket, to be used instead of socket buffer lock to lock
fields of struct socket that don't belong to a socket buffer.
- Allow to acquire two socket locks, but the first one must belong to a
listening socket.
- Make soref()/sorele() to use atomic(9). This allows in some situations
to do soref() without owning socket lock. There is place for improvement
here, it is possible to make sorele() also to lock optionally.
- Most protocols aren't touched by this change, except UNIX local sockets.
See below for more information.
o Reduce copy-and-paste in kernel modules that accept connections from
listening sockets: provide function solisten_dequeue(), and use it in
the following modules: ctl(4), iscsi(4), ng_btsocket(4), ng_ksocket(4),
infiniband, rpc.
o UNIX local sockets.
- Removal of ACCEPT_LOCK() global uncovered several races in the UNIX
local sockets. Most races exist around spawning a new socket, when we
are connecting to a local listening socket. To cover them, we need to
hold locks on both PCBs when spawning a third one. This means holding
them across sonewconn(). This creates a LOR between pcb locks and
unp_list_lock.
- To fix the new LOR, abandon the global unp_list_lock in favor of global
unp_link_lock. Indeed, separating these two locks didn't provide us any
extra parralelism in the UNIX sockets.
- Now call into uipc_attach() may happen with unp_link_lock hold if, we
are accepting, or without unp_link_lock in case if we are just creating
a socket.
- Another problem in UNIX sockets is that uipc_close() basicly did nothing
for a listening socket. The vnode remained opened for connections. This
is fixed by removing vnode in uipc_close(). Maybe the right way would be
to do it for all sockets (not only listening), simply move the vnode
teardown from uipc_detach() to uipc_close()?
Sponsored by: Netflix
Differential Revision: https://reviews.freebsd.org/D9770
2017-06-08 21:30:34 +00:00
|
|
|
* XXXGL: we want non-blocking semantics. The socket could be a
|
|
|
|
* socket created by kernel as well as socket shared with userland,
|
|
|
|
* so we can't be sure about presense of SS_NBIO. We also shall not
|
|
|
|
* toggle it on the socket, since that may surprise userland. So we
|
|
|
|
* set SS_NBIO only temporarily.
|
2008-03-26 15:23:12 +00:00
|
|
|
*/
|
Listening sockets improvements.
o Separate fields of struct socket that belong to listening from
fields that belong to normal dataflow, and unionize them. This
shrinks the structure a bit.
- Take out selinfo's from the socket buffers into the socket. The
first reason is to support braindamaged scenario when a socket is
added to kevent(2) and then listen(2) is cast on it. The second
reason is that there is future plan to make socket buffers pluggable,
so that for a dataflow socket a socket buffer can be changed, and
in this case we also want to keep same selinfos through the lifetime
of a socket.
- Remove struct struct so_accf. Since now listening stuff no longer
affects struct socket size, just move its fields into listening part
of the union.
- Provide sol_upcall field and enforce that so_upcall_set() may be called
only on a dataflow socket, which has buffers, and for listening sockets
provide solisten_upcall_set().
o Remove ACCEPT_LOCK() global.
- Add a mutex to socket, to be used instead of socket buffer lock to lock
fields of struct socket that don't belong to a socket buffer.
- Allow to acquire two socket locks, but the first one must belong to a
listening socket.
- Make soref()/sorele() to use atomic(9). This allows in some situations
to do soref() without owning socket lock. There is place for improvement
here, it is possible to make sorele() also to lock optionally.
- Most protocols aren't touched by this change, except UNIX local sockets.
See below for more information.
o Reduce copy-and-paste in kernel modules that accept connections from
listening sockets: provide function solisten_dequeue(), and use it in
the following modules: ctl(4), iscsi(4), ng_btsocket(4), ng_ksocket(4),
infiniband, rpc.
o UNIX local sockets.
- Removal of ACCEPT_LOCK() global uncovered several races in the UNIX
local sockets. Most races exist around spawning a new socket, when we
are connecting to a local listening socket. To cover them, we need to
hold locks on both PCBs when spawning a third one. This means holding
them across sonewconn(). This creates a LOR between pcb locks and
unp_list_lock.
- To fix the new LOR, abandon the global unp_list_lock in favor of global
unp_link_lock. Indeed, separating these two locks didn't provide us any
extra parralelism in the UNIX sockets.
- Now call into uipc_attach() may happen with unp_link_lock hold if, we
are accepting, or without unp_link_lock in case if we are just creating
a socket.
- Another problem in UNIX sockets is that uipc_close() basicly did nothing
for a listening socket. The vnode remained opened for connections. This
is fixed by removing vnode in uipc_close(). Maybe the right way would be
to do it for all sockets (not only listening), simply move the vnode
teardown from uipc_detach() to uipc_close()?
Sponsored by: Netflix
Differential Revision: https://reviews.freebsd.org/D9770
2017-06-08 21:30:34 +00:00
|
|
|
SOLISTEN_LOCK(head);
|
|
|
|
nbio = head->so_state & SS_NBIO;
|
|
|
|
head->so_state |= SS_NBIO;
|
|
|
|
error = solisten_dequeue(head, &so, 0);
|
|
|
|
head->so_state &= (nbio & ~SS_NBIO);
|
|
|
|
if (error)
|
|
|
|
goto done;
|
2008-03-26 15:23:12 +00:00
|
|
|
|
Listening sockets improvements.
o Separate fields of struct socket that belong to listening from
fields that belong to normal dataflow, and unionize them. This
shrinks the structure a bit.
- Take out selinfo's from the socket buffers into the socket. The
first reason is to support braindamaged scenario when a socket is
added to kevent(2) and then listen(2) is cast on it. The second
reason is that there is future plan to make socket buffers pluggable,
so that for a dataflow socket a socket buffer can be changed, and
in this case we also want to keep same selinfos through the lifetime
of a socket.
- Remove struct struct so_accf. Since now listening stuff no longer
affects struct socket size, just move its fields into listening part
of the union.
- Provide sol_upcall field and enforce that so_upcall_set() may be called
only on a dataflow socket, which has buffers, and for listening sockets
provide solisten_upcall_set().
o Remove ACCEPT_LOCK() global.
- Add a mutex to socket, to be used instead of socket buffer lock to lock
fields of struct socket that don't belong to a socket buffer.
- Allow to acquire two socket locks, but the first one must belong to a
listening socket.
- Make soref()/sorele() to use atomic(9). This allows in some situations
to do soref() without owning socket lock. There is place for improvement
here, it is possible to make sorele() also to lock optionally.
- Most protocols aren't touched by this change, except UNIX local sockets.
See below for more information.
o Reduce copy-and-paste in kernel modules that accept connections from
listening sockets: provide function solisten_dequeue(), and use it in
the following modules: ctl(4), iscsi(4), ng_btsocket(4), ng_ksocket(4),
infiniband, rpc.
o UNIX local sockets.
- Removal of ACCEPT_LOCK() global uncovered several races in the UNIX
local sockets. Most races exist around spawning a new socket, when we
are connecting to a local listening socket. To cover them, we need to
hold locks on both PCBs when spawning a third one. This means holding
them across sonewconn(). This creates a LOR between pcb locks and
unp_list_lock.
- To fix the new LOR, abandon the global unp_list_lock in favor of global
unp_link_lock. Indeed, separating these two locks didn't provide us any
extra parralelism in the UNIX sockets.
- Now call into uipc_attach() may happen with unp_link_lock hold if, we
are accepting, or without unp_link_lock in case if we are just creating
a socket.
- Another problem in UNIX sockets is that uipc_close() basicly did nothing
for a listening socket. The vnode remained opened for connections. This
is fixed by removing vnode in uipc_close(). Maybe the right way would be
to do it for all sockets (not only listening), simply move the vnode
teardown from uipc_detach() to uipc_close()?
Sponsored by: Netflix
Differential Revision: https://reviews.freebsd.org/D9770
2017-06-08 21:30:34 +00:00
|
|
|
so->so_state |= nbio;
|
2008-03-26 15:23:12 +00:00
|
|
|
*sop = so;
|
|
|
|
|
|
|
|
/* connection has been removed from the listen queue */
|
Listening sockets improvements.
o Separate fields of struct socket that belong to listening from
fields that belong to normal dataflow, and unionize them. This
shrinks the structure a bit.
- Take out selinfo's from the socket buffers into the socket. The
first reason is to support braindamaged scenario when a socket is
added to kevent(2) and then listen(2) is cast on it. The second
reason is that there is future plan to make socket buffers pluggable,
so that for a dataflow socket a socket buffer can be changed, and
in this case we also want to keep same selinfos through the lifetime
of a socket.
- Remove struct struct so_accf. Since now listening stuff no longer
affects struct socket size, just move its fields into listening part
of the union.
- Provide sol_upcall field and enforce that so_upcall_set() may be called
only on a dataflow socket, which has buffers, and for listening sockets
provide solisten_upcall_set().
o Remove ACCEPT_LOCK() global.
- Add a mutex to socket, to be used instead of socket buffer lock to lock
fields of struct socket that don't belong to a socket buffer.
- Allow to acquire two socket locks, but the first one must belong to a
listening socket.
- Make soref()/sorele() to use atomic(9). This allows in some situations
to do soref() without owning socket lock. There is place for improvement
here, it is possible to make sorele() also to lock optionally.
- Most protocols aren't touched by this change, except UNIX local sockets.
See below for more information.
o Reduce copy-and-paste in kernel modules that accept connections from
listening sockets: provide function solisten_dequeue(), and use it in
the following modules: ctl(4), iscsi(4), ng_btsocket(4), ng_ksocket(4),
infiniband, rpc.
o UNIX local sockets.
- Removal of ACCEPT_LOCK() global uncovered several races in the UNIX
local sockets. Most races exist around spawning a new socket, when we
are connecting to a local listening socket. To cover them, we need to
hold locks on both PCBs when spawning a third one. This means holding
them across sonewconn(). This creates a LOR between pcb locks and
unp_list_lock.
- To fix the new LOR, abandon the global unp_list_lock in favor of global
unp_link_lock. Indeed, separating these two locks didn't provide us any
extra parralelism in the UNIX sockets.
- Now call into uipc_attach() may happen with unp_link_lock hold if, we
are accepting, or without unp_link_lock in case if we are just creating
a socket.
- Another problem in UNIX sockets is that uipc_close() basicly did nothing
for a listening socket. The vnode remained opened for connections. This
is fixed by removing vnode in uipc_close(). Maybe the right way would be
to do it for all sockets (not only listening), simply move the vnode
teardown from uipc_detach() to uipc_close()?
Sponsored by: Netflix
Differential Revision: https://reviews.freebsd.org/D9770
2017-06-08 21:30:34 +00:00
|
|
|
KNOTE_UNLOCKED(&head->so_rdsel.si_note, 0);
|
2008-03-26 15:23:12 +00:00
|
|
|
done:
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*ARGSUSED*/
|
|
|
|
static bool_t
|
Implement support for RPCSEC_GSS authentication to both the NFS client
and server. This replaces the RPC implementation of the NFS client and
server with the newer RPC implementation originally developed
(actually ported from the userland sunrpc code) to support the NFS
Lock Manager. I have tested this code extensively and I believe it is
stable and that performance is at least equal to the legacy RPC
implementation.
The NFS code currently contains support for both the new RPC
implementation and the older legacy implementation inherited from the
original NFS codebase. The default is to use the new implementation -
add the NFS_LEGACYRPC option to fall back to the old code. When I
merge this support back to RELENG_7, I will probably change this so
that users have to 'opt in' to get the new code.
To use RPCSEC_GSS on either client or server, you must build a kernel
which includes the KGSSAPI option and the crypto device. On the
userland side, you must build at least a new libc, mountd, mount_nfs
and gssd. You must install new versions of /etc/rc.d/gssd and
/etc/rc.d/nfsd and add 'gssd_enable=YES' to /etc/rc.conf.
As long as gssd is running, you should be able to mount an NFS
filesystem from a server that requires RPCSEC_GSS authentication. The
mount itself can happen without any kerberos credentials but all
access to the filesystem will be denied unless the accessing user has
a valid ticket file in the standard place (/tmp/krb5cc_<uid>). There
is currently no support for situations where the ticket file is in a
different place, such as when the user logged in via SSH and has
delegated credentials from that login. This restriction is also
present in Solaris and Linux. In theory, we could improve this in
future, possibly using Brooks Davis' implementation of variant
symlinks.
Supporting RPCSEC_GSS on a server is nearly as simple. You must create
service creds for the server in the form 'nfs/<fqdn>@<REALM>' and
install them in /etc/krb5.keytab. The standard heimdal utility ktutil
makes this fairly easy. After the service creds have been created, you
can add a '-sec=krb5' option to /etc/exports and restart both mountd
and nfsd.
The only other difference an administrator should notice is that nfsd
doesn't fork to create service threads any more. In normal operation,
there will be two nfsd processes, one in userland waiting for TCP
connections and one in the kernel handling requests. The latter
process will create as many kthreads as required - these should be
visible via 'top -H'. The code has some support for varying the number
of service threads according to load but initially at least, nfsd uses
a fixed number of threads according to the value supplied to its '-n'
option.
Sponsored by: Isilon Systems
MFC after: 1 month
2008-11-03 10:38:00 +00:00
|
|
|
svc_vc_rendezvous_recv(SVCXPRT *xprt, struct rpc_msg *msg,
|
|
|
|
struct sockaddr **addrp, struct mbuf **mp)
|
2008-03-26 15:23:12 +00:00
|
|
|
{
|
|
|
|
struct socket *so = NULL;
|
|
|
|
struct sockaddr *sa = NULL;
|
|
|
|
int error;
|
2009-06-17 22:50:26 +00:00
|
|
|
SVCXPRT *new_xprt;
|
2008-03-26 15:23:12 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* The socket upcall calls xprt_active() which will eventually
|
|
|
|
* cause the server to call us here. We attempt to accept a
|
|
|
|
* connection from the socket and turn it into a new
|
|
|
|
* transport. If the accept fails, we have drained all pending
|
|
|
|
* connections so we call xprt_inactive().
|
|
|
|
*/
|
Implement support for RPCSEC_GSS authentication to both the NFS client
and server. This replaces the RPC implementation of the NFS client and
server with the newer RPC implementation originally developed
(actually ported from the userland sunrpc code) to support the NFS
Lock Manager. I have tested this code extensively and I believe it is
stable and that performance is at least equal to the legacy RPC
implementation.
The NFS code currently contains support for both the new RPC
implementation and the older legacy implementation inherited from the
original NFS codebase. The default is to use the new implementation -
add the NFS_LEGACYRPC option to fall back to the old code. When I
merge this support back to RELENG_7, I will probably change this so
that users have to 'opt in' to get the new code.
To use RPCSEC_GSS on either client or server, you must build a kernel
which includes the KGSSAPI option and the crypto device. On the
userland side, you must build at least a new libc, mountd, mount_nfs
and gssd. You must install new versions of /etc/rc.d/gssd and
/etc/rc.d/nfsd and add 'gssd_enable=YES' to /etc/rc.conf.
As long as gssd is running, you should be able to mount an NFS
filesystem from a server that requires RPCSEC_GSS authentication. The
mount itself can happen without any kerberos credentials but all
access to the filesystem will be denied unless the accessing user has
a valid ticket file in the standard place (/tmp/krb5cc_<uid>). There
is currently no support for situations where the ticket file is in a
different place, such as when the user logged in via SSH and has
delegated credentials from that login. This restriction is also
present in Solaris and Linux. In theory, we could improve this in
future, possibly using Brooks Davis' implementation of variant
symlinks.
Supporting RPCSEC_GSS on a server is nearly as simple. You must create
service creds for the server in the form 'nfs/<fqdn>@<REALM>' and
install them in /etc/krb5.keytab. The standard heimdal utility ktutil
makes this fairly easy. After the service creds have been created, you
can add a '-sec=krb5' option to /etc/exports and restart both mountd
and nfsd.
The only other difference an administrator should notice is that nfsd
doesn't fork to create service threads any more. In normal operation,
there will be two nfsd processes, one in userland waiting for TCP
connections and one in the kernel handling requests. The latter
process will create as many kthreads as required - these should be
visible via 'top -H'. The code has some support for varying the number
of service threads according to load but initially at least, nfsd uses
a fixed number of threads according to the value supplied to its '-n'
option.
Sponsored by: Isilon Systems
MFC after: 1 month
2008-11-03 10:38:00 +00:00
|
|
|
sx_xlock(&xprt->xp_lock);
|
2008-03-26 15:23:12 +00:00
|
|
|
|
|
|
|
error = svc_vc_accept(xprt->xp_socket, &so);
|
|
|
|
|
|
|
|
if (error == EWOULDBLOCK) {
|
Implement support for RPCSEC_GSS authentication to both the NFS client
and server. This replaces the RPC implementation of the NFS client and
server with the newer RPC implementation originally developed
(actually ported from the userland sunrpc code) to support the NFS
Lock Manager. I have tested this code extensively and I believe it is
stable and that performance is at least equal to the legacy RPC
implementation.
The NFS code currently contains support for both the new RPC
implementation and the older legacy implementation inherited from the
original NFS codebase. The default is to use the new implementation -
add the NFS_LEGACYRPC option to fall back to the old code. When I
merge this support back to RELENG_7, I will probably change this so
that users have to 'opt in' to get the new code.
To use RPCSEC_GSS on either client or server, you must build a kernel
which includes the KGSSAPI option and the crypto device. On the
userland side, you must build at least a new libc, mountd, mount_nfs
and gssd. You must install new versions of /etc/rc.d/gssd and
/etc/rc.d/nfsd and add 'gssd_enable=YES' to /etc/rc.conf.
As long as gssd is running, you should be able to mount an NFS
filesystem from a server that requires RPCSEC_GSS authentication. The
mount itself can happen without any kerberos credentials but all
access to the filesystem will be denied unless the accessing user has
a valid ticket file in the standard place (/tmp/krb5cc_<uid>). There
is currently no support for situations where the ticket file is in a
different place, such as when the user logged in via SSH and has
delegated credentials from that login. This restriction is also
present in Solaris and Linux. In theory, we could improve this in
future, possibly using Brooks Davis' implementation of variant
symlinks.
Supporting RPCSEC_GSS on a server is nearly as simple. You must create
service creds for the server in the form 'nfs/<fqdn>@<REALM>' and
install them in /etc/krb5.keytab. The standard heimdal utility ktutil
makes this fairly easy. After the service creds have been created, you
can add a '-sec=krb5' option to /etc/exports and restart both mountd
and nfsd.
The only other difference an administrator should notice is that nfsd
doesn't fork to create service threads any more. In normal operation,
there will be two nfsd processes, one in userland waiting for TCP
connections and one in the kernel handling requests. The latter
process will create as many kthreads as required - these should be
visible via 'top -H'. The code has some support for varying the number
of service threads according to load but initially at least, nfsd uses
a fixed number of threads according to the value supplied to its '-n'
option.
Sponsored by: Isilon Systems
MFC after: 1 month
2008-11-03 10:38:00 +00:00
|
|
|
/*
|
|
|
|
* We must re-test for new connections after taking
|
|
|
|
* the lock to protect us in the case where a new
|
|
|
|
* connection arrives after our call to accept fails
|
2013-12-19 21:31:28 +00:00
|
|
|
* with EWOULDBLOCK.
|
Implement support for RPCSEC_GSS authentication to both the NFS client
and server. This replaces the RPC implementation of the NFS client and
server with the newer RPC implementation originally developed
(actually ported from the userland sunrpc code) to support the NFS
Lock Manager. I have tested this code extensively and I believe it is
stable and that performance is at least equal to the legacy RPC
implementation.
The NFS code currently contains support for both the new RPC
implementation and the older legacy implementation inherited from the
original NFS codebase. The default is to use the new implementation -
add the NFS_LEGACYRPC option to fall back to the old code. When I
merge this support back to RELENG_7, I will probably change this so
that users have to 'opt in' to get the new code.
To use RPCSEC_GSS on either client or server, you must build a kernel
which includes the KGSSAPI option and the crypto device. On the
userland side, you must build at least a new libc, mountd, mount_nfs
and gssd. You must install new versions of /etc/rc.d/gssd and
/etc/rc.d/nfsd and add 'gssd_enable=YES' to /etc/rc.conf.
As long as gssd is running, you should be able to mount an NFS
filesystem from a server that requires RPCSEC_GSS authentication. The
mount itself can happen without any kerberos credentials but all
access to the filesystem will be denied unless the accessing user has
a valid ticket file in the standard place (/tmp/krb5cc_<uid>). There
is currently no support for situations where the ticket file is in a
different place, such as when the user logged in via SSH and has
delegated credentials from that login. This restriction is also
present in Solaris and Linux. In theory, we could improve this in
future, possibly using Brooks Davis' implementation of variant
symlinks.
Supporting RPCSEC_GSS on a server is nearly as simple. You must create
service creds for the server in the form 'nfs/<fqdn>@<REALM>' and
install them in /etc/krb5.keytab. The standard heimdal utility ktutil
makes this fairly easy. After the service creds have been created, you
can add a '-sec=krb5' option to /etc/exports and restart both mountd
and nfsd.
The only other difference an administrator should notice is that nfsd
doesn't fork to create service threads any more. In normal operation,
there will be two nfsd processes, one in userland waiting for TCP
connections and one in the kernel handling requests. The latter
process will create as many kthreads as required - these should be
visible via 'top -H'. The code has some support for varying the number
of service threads according to load but initially at least, nfsd uses
a fixed number of threads according to the value supplied to its '-n'
option.
Sponsored by: Isilon Systems
MFC after: 1 month
2008-11-03 10:38:00 +00:00
|
|
|
*/
|
Listening sockets improvements.
o Separate fields of struct socket that belong to listening from
fields that belong to normal dataflow, and unionize them. This
shrinks the structure a bit.
- Take out selinfo's from the socket buffers into the socket. The
first reason is to support braindamaged scenario when a socket is
added to kevent(2) and then listen(2) is cast on it. The second
reason is that there is future plan to make socket buffers pluggable,
so that for a dataflow socket a socket buffer can be changed, and
in this case we also want to keep same selinfos through the lifetime
of a socket.
- Remove struct struct so_accf. Since now listening stuff no longer
affects struct socket size, just move its fields into listening part
of the union.
- Provide sol_upcall field and enforce that so_upcall_set() may be called
only on a dataflow socket, which has buffers, and for listening sockets
provide solisten_upcall_set().
o Remove ACCEPT_LOCK() global.
- Add a mutex to socket, to be used instead of socket buffer lock to lock
fields of struct socket that don't belong to a socket buffer.
- Allow to acquire two socket locks, but the first one must belong to a
listening socket.
- Make soref()/sorele() to use atomic(9). This allows in some situations
to do soref() without owning socket lock. There is place for improvement
here, it is possible to make sorele() also to lock optionally.
- Most protocols aren't touched by this change, except UNIX local sockets.
See below for more information.
o Reduce copy-and-paste in kernel modules that accept connections from
listening sockets: provide function solisten_dequeue(), and use it in
the following modules: ctl(4), iscsi(4), ng_btsocket(4), ng_ksocket(4),
infiniband, rpc.
o UNIX local sockets.
- Removal of ACCEPT_LOCK() global uncovered several races in the UNIX
local sockets. Most races exist around spawning a new socket, when we
are connecting to a local listening socket. To cover them, we need to
hold locks on both PCBs when spawning a third one. This means holding
them across sonewconn(). This creates a LOR between pcb locks and
unp_list_lock.
- To fix the new LOR, abandon the global unp_list_lock in favor of global
unp_link_lock. Indeed, separating these two locks didn't provide us any
extra parralelism in the UNIX sockets.
- Now call into uipc_attach() may happen with unp_link_lock hold if, we
are accepting, or without unp_link_lock in case if we are just creating
a socket.
- Another problem in UNIX sockets is that uipc_close() basicly did nothing
for a listening socket. The vnode remained opened for connections. This
is fixed by removing vnode in uipc_close(). Maybe the right way would be
to do it for all sockets (not only listening), simply move the vnode
teardown from uipc_detach() to uipc_close()?
Sponsored by: Netflix
Differential Revision: https://reviews.freebsd.org/D9770
2017-06-08 21:30:34 +00:00
|
|
|
SOLISTEN_LOCK(xprt->xp_socket);
|
|
|
|
if (TAILQ_EMPTY(&xprt->xp_socket->sol_comp))
|
2013-12-29 11:19:09 +00:00
|
|
|
xprt_inactive_self(xprt);
|
Listening sockets improvements.
o Separate fields of struct socket that belong to listening from
fields that belong to normal dataflow, and unionize them. This
shrinks the structure a bit.
- Take out selinfo's from the socket buffers into the socket. The
first reason is to support braindamaged scenario when a socket is
added to kevent(2) and then listen(2) is cast on it. The second
reason is that there is future plan to make socket buffers pluggable,
so that for a dataflow socket a socket buffer can be changed, and
in this case we also want to keep same selinfos through the lifetime
of a socket.
- Remove struct struct so_accf. Since now listening stuff no longer
affects struct socket size, just move its fields into listening part
of the union.
- Provide sol_upcall field and enforce that so_upcall_set() may be called
only on a dataflow socket, which has buffers, and for listening sockets
provide solisten_upcall_set().
o Remove ACCEPT_LOCK() global.
- Add a mutex to socket, to be used instead of socket buffer lock to lock
fields of struct socket that don't belong to a socket buffer.
- Allow to acquire two socket locks, but the first one must belong to a
listening socket.
- Make soref()/sorele() to use atomic(9). This allows in some situations
to do soref() without owning socket lock. There is place for improvement
here, it is possible to make sorele() also to lock optionally.
- Most protocols aren't touched by this change, except UNIX local sockets.
See below for more information.
o Reduce copy-and-paste in kernel modules that accept connections from
listening sockets: provide function solisten_dequeue(), and use it in
the following modules: ctl(4), iscsi(4), ng_btsocket(4), ng_ksocket(4),
infiniband, rpc.
o UNIX local sockets.
- Removal of ACCEPT_LOCK() global uncovered several races in the UNIX
local sockets. Most races exist around spawning a new socket, when we
are connecting to a local listening socket. To cover them, we need to
hold locks on both PCBs when spawning a third one. This means holding
them across sonewconn(). This creates a LOR between pcb locks and
unp_list_lock.
- To fix the new LOR, abandon the global unp_list_lock in favor of global
unp_link_lock. Indeed, separating these two locks didn't provide us any
extra parralelism in the UNIX sockets.
- Now call into uipc_attach() may happen with unp_link_lock hold if, we
are accepting, or without unp_link_lock in case if we are just creating
a socket.
- Another problem in UNIX sockets is that uipc_close() basicly did nothing
for a listening socket. The vnode remained opened for connections. This
is fixed by removing vnode in uipc_close(). Maybe the right way would be
to do it for all sockets (not only listening), simply move the vnode
teardown from uipc_detach() to uipc_close()?
Sponsored by: Netflix
Differential Revision: https://reviews.freebsd.org/D9770
2017-06-08 21:30:34 +00:00
|
|
|
SOLISTEN_UNLOCK(xprt->xp_socket);
|
Implement support for RPCSEC_GSS authentication to both the NFS client
and server. This replaces the RPC implementation of the NFS client and
server with the newer RPC implementation originally developed
(actually ported from the userland sunrpc code) to support the NFS
Lock Manager. I have tested this code extensively and I believe it is
stable and that performance is at least equal to the legacy RPC
implementation.
The NFS code currently contains support for both the new RPC
implementation and the older legacy implementation inherited from the
original NFS codebase. The default is to use the new implementation -
add the NFS_LEGACYRPC option to fall back to the old code. When I
merge this support back to RELENG_7, I will probably change this so
that users have to 'opt in' to get the new code.
To use RPCSEC_GSS on either client or server, you must build a kernel
which includes the KGSSAPI option and the crypto device. On the
userland side, you must build at least a new libc, mountd, mount_nfs
and gssd. You must install new versions of /etc/rc.d/gssd and
/etc/rc.d/nfsd and add 'gssd_enable=YES' to /etc/rc.conf.
As long as gssd is running, you should be able to mount an NFS
filesystem from a server that requires RPCSEC_GSS authentication. The
mount itself can happen without any kerberos credentials but all
access to the filesystem will be denied unless the accessing user has
a valid ticket file in the standard place (/tmp/krb5cc_<uid>). There
is currently no support for situations where the ticket file is in a
different place, such as when the user logged in via SSH and has
delegated credentials from that login. This restriction is also
present in Solaris and Linux. In theory, we could improve this in
future, possibly using Brooks Davis' implementation of variant
symlinks.
Supporting RPCSEC_GSS on a server is nearly as simple. You must create
service creds for the server in the form 'nfs/<fqdn>@<REALM>' and
install them in /etc/krb5.keytab. The standard heimdal utility ktutil
makes this fairly easy. After the service creds have been created, you
can add a '-sec=krb5' option to /etc/exports and restart both mountd
and nfsd.
The only other difference an administrator should notice is that nfsd
doesn't fork to create service threads any more. In normal operation,
there will be two nfsd processes, one in userland waiting for TCP
connections and one in the kernel handling requests. The latter
process will create as many kthreads as required - these should be
visible via 'top -H'. The code has some support for varying the number
of service threads according to load but initially at least, nfsd uses
a fixed number of threads according to the value supplied to its '-n'
option.
Sponsored by: Isilon Systems
MFC after: 1 month
2008-11-03 10:38:00 +00:00
|
|
|
sx_xunlock(&xprt->xp_lock);
|
2008-03-26 15:23:12 +00:00
|
|
|
return (FALSE);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (error) {
|
Listening sockets improvements.
o Separate fields of struct socket that belong to listening from
fields that belong to normal dataflow, and unionize them. This
shrinks the structure a bit.
- Take out selinfo's from the socket buffers into the socket. The
first reason is to support braindamaged scenario when a socket is
added to kevent(2) and then listen(2) is cast on it. The second
reason is that there is future plan to make socket buffers pluggable,
so that for a dataflow socket a socket buffer can be changed, and
in this case we also want to keep same selinfos through the lifetime
of a socket.
- Remove struct struct so_accf. Since now listening stuff no longer
affects struct socket size, just move its fields into listening part
of the union.
- Provide sol_upcall field and enforce that so_upcall_set() may be called
only on a dataflow socket, which has buffers, and for listening sockets
provide solisten_upcall_set().
o Remove ACCEPT_LOCK() global.
- Add a mutex to socket, to be used instead of socket buffer lock to lock
fields of struct socket that don't belong to a socket buffer.
- Allow to acquire two socket locks, but the first one must belong to a
listening socket.
- Make soref()/sorele() to use atomic(9). This allows in some situations
to do soref() without owning socket lock. There is place for improvement
here, it is possible to make sorele() also to lock optionally.
- Most protocols aren't touched by this change, except UNIX local sockets.
See below for more information.
o Reduce copy-and-paste in kernel modules that accept connections from
listening sockets: provide function solisten_dequeue(), and use it in
the following modules: ctl(4), iscsi(4), ng_btsocket(4), ng_ksocket(4),
infiniband, rpc.
o UNIX local sockets.
- Removal of ACCEPT_LOCK() global uncovered several races in the UNIX
local sockets. Most races exist around spawning a new socket, when we
are connecting to a local listening socket. To cover them, we need to
hold locks on both PCBs when spawning a third one. This means holding
them across sonewconn(). This creates a LOR between pcb locks and
unp_list_lock.
- To fix the new LOR, abandon the global unp_list_lock in favor of global
unp_link_lock. Indeed, separating these two locks didn't provide us any
extra parralelism in the UNIX sockets.
- Now call into uipc_attach() may happen with unp_link_lock hold if, we
are accepting, or without unp_link_lock in case if we are just creating
a socket.
- Another problem in UNIX sockets is that uipc_close() basicly did nothing
for a listening socket. The vnode remained opened for connections. This
is fixed by removing vnode in uipc_close(). Maybe the right way would be
to do it for all sockets (not only listening), simply move the vnode
teardown from uipc_detach() to uipc_close()?
Sponsored by: Netflix
Differential Revision: https://reviews.freebsd.org/D9770
2017-06-08 21:30:34 +00:00
|
|
|
SOLISTEN_LOCK(xprt->xp_socket);
|
2009-06-04 14:13:06 +00:00
|
|
|
if (xprt->xp_upcallset) {
|
|
|
|
xprt->xp_upcallset = 0;
|
|
|
|
soupcall_clear(xprt->xp_socket, SO_RCV);
|
|
|
|
}
|
Listening sockets improvements.
o Separate fields of struct socket that belong to listening from
fields that belong to normal dataflow, and unionize them. This
shrinks the structure a bit.
- Take out selinfo's from the socket buffers into the socket. The
first reason is to support braindamaged scenario when a socket is
added to kevent(2) and then listen(2) is cast on it. The second
reason is that there is future plan to make socket buffers pluggable,
so that for a dataflow socket a socket buffer can be changed, and
in this case we also want to keep same selinfos through the lifetime
of a socket.
- Remove struct struct so_accf. Since now listening stuff no longer
affects struct socket size, just move its fields into listening part
of the union.
- Provide sol_upcall field and enforce that so_upcall_set() may be called
only on a dataflow socket, which has buffers, and for listening sockets
provide solisten_upcall_set().
o Remove ACCEPT_LOCK() global.
- Add a mutex to socket, to be used instead of socket buffer lock to lock
fields of struct socket that don't belong to a socket buffer.
- Allow to acquire two socket locks, but the first one must belong to a
listening socket.
- Make soref()/sorele() to use atomic(9). This allows in some situations
to do soref() without owning socket lock. There is place for improvement
here, it is possible to make sorele() also to lock optionally.
- Most protocols aren't touched by this change, except UNIX local sockets.
See below for more information.
o Reduce copy-and-paste in kernel modules that accept connections from
listening sockets: provide function solisten_dequeue(), and use it in
the following modules: ctl(4), iscsi(4), ng_btsocket(4), ng_ksocket(4),
infiniband, rpc.
o UNIX local sockets.
- Removal of ACCEPT_LOCK() global uncovered several races in the UNIX
local sockets. Most races exist around spawning a new socket, when we
are connecting to a local listening socket. To cover them, we need to
hold locks on both PCBs when spawning a third one. This means holding
them across sonewconn(). This creates a LOR between pcb locks and
unp_list_lock.
- To fix the new LOR, abandon the global unp_list_lock in favor of global
unp_link_lock. Indeed, separating these two locks didn't provide us any
extra parralelism in the UNIX sockets.
- Now call into uipc_attach() may happen with unp_link_lock hold if, we
are accepting, or without unp_link_lock in case if we are just creating
a socket.
- Another problem in UNIX sockets is that uipc_close() basicly did nothing
for a listening socket. The vnode remained opened for connections. This
is fixed by removing vnode in uipc_close(). Maybe the right way would be
to do it for all sockets (not only listening), simply move the vnode
teardown from uipc_detach() to uipc_close()?
Sponsored by: Netflix
Differential Revision: https://reviews.freebsd.org/D9770
2017-06-08 21:30:34 +00:00
|
|
|
SOLISTEN_UNLOCK(xprt->xp_socket);
|
2013-12-29 11:19:09 +00:00
|
|
|
xprt_inactive_self(xprt);
|
Implement support for RPCSEC_GSS authentication to both the NFS client
and server. This replaces the RPC implementation of the NFS client and
server with the newer RPC implementation originally developed
(actually ported from the userland sunrpc code) to support the NFS
Lock Manager. I have tested this code extensively and I believe it is
stable and that performance is at least equal to the legacy RPC
implementation.
The NFS code currently contains support for both the new RPC
implementation and the older legacy implementation inherited from the
original NFS codebase. The default is to use the new implementation -
add the NFS_LEGACYRPC option to fall back to the old code. When I
merge this support back to RELENG_7, I will probably change this so
that users have to 'opt in' to get the new code.
To use RPCSEC_GSS on either client or server, you must build a kernel
which includes the KGSSAPI option and the crypto device. On the
userland side, you must build at least a new libc, mountd, mount_nfs
and gssd. You must install new versions of /etc/rc.d/gssd and
/etc/rc.d/nfsd and add 'gssd_enable=YES' to /etc/rc.conf.
As long as gssd is running, you should be able to mount an NFS
filesystem from a server that requires RPCSEC_GSS authentication. The
mount itself can happen without any kerberos credentials but all
access to the filesystem will be denied unless the accessing user has
a valid ticket file in the standard place (/tmp/krb5cc_<uid>). There
is currently no support for situations where the ticket file is in a
different place, such as when the user logged in via SSH and has
delegated credentials from that login. This restriction is also
present in Solaris and Linux. In theory, we could improve this in
future, possibly using Brooks Davis' implementation of variant
symlinks.
Supporting RPCSEC_GSS on a server is nearly as simple. You must create
service creds for the server in the form 'nfs/<fqdn>@<REALM>' and
install them in /etc/krb5.keytab. The standard heimdal utility ktutil
makes this fairly easy. After the service creds have been created, you
can add a '-sec=krb5' option to /etc/exports and restart both mountd
and nfsd.
The only other difference an administrator should notice is that nfsd
doesn't fork to create service threads any more. In normal operation,
there will be two nfsd processes, one in userland waiting for TCP
connections and one in the kernel handling requests. The latter
process will create as many kthreads as required - these should be
visible via 'top -H'. The code has some support for varying the number
of service threads according to load but initially at least, nfsd uses
a fixed number of threads according to the value supplied to its '-n'
option.
Sponsored by: Isilon Systems
MFC after: 1 month
2008-11-03 10:38:00 +00:00
|
|
|
sx_xunlock(&xprt->xp_lock);
|
2008-03-26 15:23:12 +00:00
|
|
|
return (FALSE);
|
|
|
|
}
|
|
|
|
|
Implement support for RPCSEC_GSS authentication to both the NFS client
and server. This replaces the RPC implementation of the NFS client and
server with the newer RPC implementation originally developed
(actually ported from the userland sunrpc code) to support the NFS
Lock Manager. I have tested this code extensively and I believe it is
stable and that performance is at least equal to the legacy RPC
implementation.
The NFS code currently contains support for both the new RPC
implementation and the older legacy implementation inherited from the
original NFS codebase. The default is to use the new implementation -
add the NFS_LEGACYRPC option to fall back to the old code. When I
merge this support back to RELENG_7, I will probably change this so
that users have to 'opt in' to get the new code.
To use RPCSEC_GSS on either client or server, you must build a kernel
which includes the KGSSAPI option and the crypto device. On the
userland side, you must build at least a new libc, mountd, mount_nfs
and gssd. You must install new versions of /etc/rc.d/gssd and
/etc/rc.d/nfsd and add 'gssd_enable=YES' to /etc/rc.conf.
As long as gssd is running, you should be able to mount an NFS
filesystem from a server that requires RPCSEC_GSS authentication. The
mount itself can happen without any kerberos credentials but all
access to the filesystem will be denied unless the accessing user has
a valid ticket file in the standard place (/tmp/krb5cc_<uid>). There
is currently no support for situations where the ticket file is in a
different place, such as when the user logged in via SSH and has
delegated credentials from that login. This restriction is also
present in Solaris and Linux. In theory, we could improve this in
future, possibly using Brooks Davis' implementation of variant
symlinks.
Supporting RPCSEC_GSS on a server is nearly as simple. You must create
service creds for the server in the form 'nfs/<fqdn>@<REALM>' and
install them in /etc/krb5.keytab. The standard heimdal utility ktutil
makes this fairly easy. After the service creds have been created, you
can add a '-sec=krb5' option to /etc/exports and restart both mountd
and nfsd.
The only other difference an administrator should notice is that nfsd
doesn't fork to create service threads any more. In normal operation,
there will be two nfsd processes, one in userland waiting for TCP
connections and one in the kernel handling requests. The latter
process will create as many kthreads as required - these should be
visible via 'top -H'. The code has some support for varying the number
of service threads according to load but initially at least, nfsd uses
a fixed number of threads according to the value supplied to its '-n'
option.
Sponsored by: Isilon Systems
MFC after: 1 month
2008-11-03 10:38:00 +00:00
|
|
|
sx_xunlock(&xprt->xp_lock);
|
2008-03-26 15:23:12 +00:00
|
|
|
|
2016-04-14 17:06:37 +00:00
|
|
|
sa = NULL;
|
2008-03-26 15:23:12 +00:00
|
|
|
error = soaccept(so, &sa);
|
|
|
|
|
|
|
|
if (error) {
|
|
|
|
/*
|
|
|
|
* XXX not sure if I need to call sofree or soclose here.
|
|
|
|
*/
|
|
|
|
if (sa)
|
|
|
|
free(sa, M_SONAME);
|
|
|
|
return (FALSE);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* svc_vc_create_conn will call xprt_register - we don't need
|
2009-06-17 22:50:26 +00:00
|
|
|
* to do anything with the new connection except derefence it.
|
2008-03-26 15:23:12 +00:00
|
|
|
*/
|
2009-06-17 22:50:26 +00:00
|
|
|
new_xprt = svc_vc_create_conn(xprt->xp_pool, so, sa);
|
|
|
|
if (!new_xprt) {
|
2008-06-26 10:21:54 +00:00
|
|
|
soclose(so);
|
2009-06-17 22:50:26 +00:00
|
|
|
} else {
|
|
|
|
SVC_RELEASE(new_xprt);
|
|
|
|
}
|
2008-06-26 10:21:54 +00:00
|
|
|
|
2008-03-26 15:23:12 +00:00
|
|
|
free(sa, M_SONAME);
|
|
|
|
|
|
|
|
return (FALSE); /* there is never an rpc msg to be processed */
|
|
|
|
}
|
|
|
|
|
|
|
|
/*ARGSUSED*/
|
|
|
|
static enum xprt_stat
|
|
|
|
svc_vc_rendezvous_stat(SVCXPRT *xprt)
|
|
|
|
{
|
|
|
|
|
|
|
|
return (XPRT_IDLE);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
svc_vc_destroy_common(SVCXPRT *xprt)
|
|
|
|
{
|
|
|
|
|
|
|
|
if (xprt->xp_socket)
|
|
|
|
(void)soclose(xprt->xp_socket);
|
|
|
|
|
Implement support for RPCSEC_GSS authentication to both the NFS client
and server. This replaces the RPC implementation of the NFS client and
server with the newer RPC implementation originally developed
(actually ported from the userland sunrpc code) to support the NFS
Lock Manager. I have tested this code extensively and I believe it is
stable and that performance is at least equal to the legacy RPC
implementation.
The NFS code currently contains support for both the new RPC
implementation and the older legacy implementation inherited from the
original NFS codebase. The default is to use the new implementation -
add the NFS_LEGACYRPC option to fall back to the old code. When I
merge this support back to RELENG_7, I will probably change this so
that users have to 'opt in' to get the new code.
To use RPCSEC_GSS on either client or server, you must build a kernel
which includes the KGSSAPI option and the crypto device. On the
userland side, you must build at least a new libc, mountd, mount_nfs
and gssd. You must install new versions of /etc/rc.d/gssd and
/etc/rc.d/nfsd and add 'gssd_enable=YES' to /etc/rc.conf.
As long as gssd is running, you should be able to mount an NFS
filesystem from a server that requires RPCSEC_GSS authentication. The
mount itself can happen without any kerberos credentials but all
access to the filesystem will be denied unless the accessing user has
a valid ticket file in the standard place (/tmp/krb5cc_<uid>). There
is currently no support for situations where the ticket file is in a
different place, such as when the user logged in via SSH and has
delegated credentials from that login. This restriction is also
present in Solaris and Linux. In theory, we could improve this in
future, possibly using Brooks Davis' implementation of variant
symlinks.
Supporting RPCSEC_GSS on a server is nearly as simple. You must create
service creds for the server in the form 'nfs/<fqdn>@<REALM>' and
install them in /etc/krb5.keytab. The standard heimdal utility ktutil
makes this fairly easy. After the service creds have been created, you
can add a '-sec=krb5' option to /etc/exports and restart both mountd
and nfsd.
The only other difference an administrator should notice is that nfsd
doesn't fork to create service threads any more. In normal operation,
there will be two nfsd processes, one in userland waiting for TCP
connections and one in the kernel handling requests. The latter
process will create as many kthreads as required - these should be
visible via 'top -H'. The code has some support for varying the number
of service threads according to load but initially at least, nfsd uses
a fixed number of threads according to the value supplied to its '-n'
option.
Sponsored by: Isilon Systems
MFC after: 1 month
2008-11-03 10:38:00 +00:00
|
|
|
if (xprt->xp_netid)
|
|
|
|
(void) mem_free(xprt->xp_netid, strlen(xprt->xp_netid) + 1);
|
|
|
|
svc_xprt_free(xprt);
|
2008-03-26 15:23:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
svc_vc_rendezvous_destroy(SVCXPRT *xprt)
|
|
|
|
{
|
|
|
|
|
Listening sockets improvements.
o Separate fields of struct socket that belong to listening from
fields that belong to normal dataflow, and unionize them. This
shrinks the structure a bit.
- Take out selinfo's from the socket buffers into the socket. The
first reason is to support braindamaged scenario when a socket is
added to kevent(2) and then listen(2) is cast on it. The second
reason is that there is future plan to make socket buffers pluggable,
so that for a dataflow socket a socket buffer can be changed, and
in this case we also want to keep same selinfos through the lifetime
of a socket.
- Remove struct struct so_accf. Since now listening stuff no longer
affects struct socket size, just move its fields into listening part
of the union.
- Provide sol_upcall field and enforce that so_upcall_set() may be called
only on a dataflow socket, which has buffers, and for listening sockets
provide solisten_upcall_set().
o Remove ACCEPT_LOCK() global.
- Add a mutex to socket, to be used instead of socket buffer lock to lock
fields of struct socket that don't belong to a socket buffer.
- Allow to acquire two socket locks, but the first one must belong to a
listening socket.
- Make soref()/sorele() to use atomic(9). This allows in some situations
to do soref() without owning socket lock. There is place for improvement
here, it is possible to make sorele() also to lock optionally.
- Most protocols aren't touched by this change, except UNIX local sockets.
See below for more information.
o Reduce copy-and-paste in kernel modules that accept connections from
listening sockets: provide function solisten_dequeue(), and use it in
the following modules: ctl(4), iscsi(4), ng_btsocket(4), ng_ksocket(4),
infiniband, rpc.
o UNIX local sockets.
- Removal of ACCEPT_LOCK() global uncovered several races in the UNIX
local sockets. Most races exist around spawning a new socket, when we
are connecting to a local listening socket. To cover them, we need to
hold locks on both PCBs when spawning a third one. This means holding
them across sonewconn(). This creates a LOR between pcb locks and
unp_list_lock.
- To fix the new LOR, abandon the global unp_list_lock in favor of global
unp_link_lock. Indeed, separating these two locks didn't provide us any
extra parralelism in the UNIX sockets.
- Now call into uipc_attach() may happen with unp_link_lock hold if, we
are accepting, or without unp_link_lock in case if we are just creating
a socket.
- Another problem in UNIX sockets is that uipc_close() basicly did nothing
for a listening socket. The vnode remained opened for connections. This
is fixed by removing vnode in uipc_close(). Maybe the right way would be
to do it for all sockets (not only listening), simply move the vnode
teardown from uipc_detach() to uipc_close()?
Sponsored by: Netflix
Differential Revision: https://reviews.freebsd.org/D9770
2017-06-08 21:30:34 +00:00
|
|
|
SOLISTEN_LOCK(xprt->xp_socket);
|
|
|
|
if (xprt->xp_upcallset) {
|
|
|
|
xprt->xp_upcallset = 0;
|
|
|
|
solisten_upcall_set(xprt->xp_socket, NULL, NULL);
|
|
|
|
}
|
|
|
|
SOLISTEN_UNLOCK(xprt->xp_socket);
|
|
|
|
|
2008-03-26 15:23:12 +00:00
|
|
|
svc_vc_destroy_common(xprt);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
svc_vc_destroy(SVCXPRT *xprt)
|
|
|
|
{
|
|
|
|
struct cf_conn *cd = (struct cf_conn *)xprt->xp_p1;
|
|
|
|
|
Listening sockets improvements.
o Separate fields of struct socket that belong to listening from
fields that belong to normal dataflow, and unionize them. This
shrinks the structure a bit.
- Take out selinfo's from the socket buffers into the socket. The
first reason is to support braindamaged scenario when a socket is
added to kevent(2) and then listen(2) is cast on it. The second
reason is that there is future plan to make socket buffers pluggable,
so that for a dataflow socket a socket buffer can be changed, and
in this case we also want to keep same selinfos through the lifetime
of a socket.
- Remove struct struct so_accf. Since now listening stuff no longer
affects struct socket size, just move its fields into listening part
of the union.
- Provide sol_upcall field and enforce that so_upcall_set() may be called
only on a dataflow socket, which has buffers, and for listening sockets
provide solisten_upcall_set().
o Remove ACCEPT_LOCK() global.
- Add a mutex to socket, to be used instead of socket buffer lock to lock
fields of struct socket that don't belong to a socket buffer.
- Allow to acquire two socket locks, but the first one must belong to a
listening socket.
- Make soref()/sorele() to use atomic(9). This allows in some situations
to do soref() without owning socket lock. There is place for improvement
here, it is possible to make sorele() also to lock optionally.
- Most protocols aren't touched by this change, except UNIX local sockets.
See below for more information.
o Reduce copy-and-paste in kernel modules that accept connections from
listening sockets: provide function solisten_dequeue(), and use it in
the following modules: ctl(4), iscsi(4), ng_btsocket(4), ng_ksocket(4),
infiniband, rpc.
o UNIX local sockets.
- Removal of ACCEPT_LOCK() global uncovered several races in the UNIX
local sockets. Most races exist around spawning a new socket, when we
are connecting to a local listening socket. To cover them, we need to
hold locks on both PCBs when spawning a third one. This means holding
them across sonewconn(). This creates a LOR between pcb locks and
unp_list_lock.
- To fix the new LOR, abandon the global unp_list_lock in favor of global
unp_link_lock. Indeed, separating these two locks didn't provide us any
extra parralelism in the UNIX sockets.
- Now call into uipc_attach() may happen with unp_link_lock hold if, we
are accepting, or without unp_link_lock in case if we are just creating
a socket.
- Another problem in UNIX sockets is that uipc_close() basicly did nothing
for a listening socket. The vnode remained opened for connections. This
is fixed by removing vnode in uipc_close(). Maybe the right way would be
to do it for all sockets (not only listening), simply move the vnode
teardown from uipc_detach() to uipc_close()?
Sponsored by: Netflix
Differential Revision: https://reviews.freebsd.org/D9770
2017-06-08 21:30:34 +00:00
|
|
|
SOCKBUF_LOCK(&xprt->xp_socket->so_rcv);
|
|
|
|
if (xprt->xp_upcallset) {
|
|
|
|
xprt->xp_upcallset = 0;
|
|
|
|
soupcall_clear(xprt->xp_socket, SO_RCV);
|
|
|
|
}
|
|
|
|
SOCKBUF_UNLOCK(&xprt->xp_socket->so_rcv);
|
|
|
|
|
2008-03-26 15:23:12 +00:00
|
|
|
svc_vc_destroy_common(xprt);
|
|
|
|
|
|
|
|
if (cd->mreq)
|
|
|
|
m_freem(cd->mreq);
|
|
|
|
if (cd->mpending)
|
|
|
|
m_freem(cd->mpending);
|
|
|
|
mem_free(cd, sizeof(*cd));
|
|
|
|
}
|
|
|
|
|
2012-12-08 00:29:16 +00:00
|
|
|
static void
|
|
|
|
svc_vc_backchannel_destroy(SVCXPRT *xprt)
|
|
|
|
{
|
|
|
|
struct cf_conn *cd = (struct cf_conn *)xprt->xp_p1;
|
|
|
|
struct mbuf *m, *m2;
|
|
|
|
|
|
|
|
svc_xprt_free(xprt);
|
|
|
|
m = cd->mreq;
|
|
|
|
while (m != NULL) {
|
|
|
|
m2 = m;
|
|
|
|
m = m->m_nextpkt;
|
|
|
|
m_freem(m2);
|
|
|
|
}
|
|
|
|
mem_free(cd, sizeof(*cd));
|
|
|
|
}
|
|
|
|
|
2008-03-26 15:23:12 +00:00
|
|
|
/*ARGSUSED*/
|
|
|
|
static bool_t
|
|
|
|
svc_vc_control(SVCXPRT *xprt, const u_int rq, void *in)
|
|
|
|
{
|
|
|
|
return (FALSE);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool_t
|
|
|
|
svc_vc_rendezvous_control(SVCXPRT *xprt, const u_int rq, void *in)
|
|
|
|
{
|
|
|
|
|
|
|
|
return (FALSE);
|
|
|
|
}
|
|
|
|
|
2012-12-08 00:29:16 +00:00
|
|
|
static bool_t
|
|
|
|
svc_vc_backchannel_control(SVCXPRT *xprt, const u_int rq, void *in)
|
|
|
|
{
|
|
|
|
|
|
|
|
return (FALSE);
|
|
|
|
}
|
|
|
|
|
2008-03-26 15:23:12 +00:00
|
|
|
static enum xprt_stat
|
|
|
|
svc_vc_stat(SVCXPRT *xprt)
|
|
|
|
{
|
|
|
|
struct cf_conn *cd;
|
|
|
|
|
|
|
|
cd = (struct cf_conn *)(xprt->xp_p1);
|
|
|
|
|
|
|
|
if (cd->strm_stat == XPRT_DIED)
|
|
|
|
return (XPRT_DIED);
|
|
|
|
|
2013-12-19 21:31:28 +00:00
|
|
|
if (cd->mreq != NULL && cd->resid == 0 && cd->eor)
|
|
|
|
return (XPRT_MOREREQS);
|
2008-03-26 15:23:12 +00:00
|
|
|
|
Implement support for RPCSEC_GSS authentication to both the NFS client
and server. This replaces the RPC implementation of the NFS client and
server with the newer RPC implementation originally developed
(actually ported from the userland sunrpc code) to support the NFS
Lock Manager. I have tested this code extensively and I believe it is
stable and that performance is at least equal to the legacy RPC
implementation.
The NFS code currently contains support for both the new RPC
implementation and the older legacy implementation inherited from the
original NFS codebase. The default is to use the new implementation -
add the NFS_LEGACYRPC option to fall back to the old code. When I
merge this support back to RELENG_7, I will probably change this so
that users have to 'opt in' to get the new code.
To use RPCSEC_GSS on either client or server, you must build a kernel
which includes the KGSSAPI option and the crypto device. On the
userland side, you must build at least a new libc, mountd, mount_nfs
and gssd. You must install new versions of /etc/rc.d/gssd and
/etc/rc.d/nfsd and add 'gssd_enable=YES' to /etc/rc.conf.
As long as gssd is running, you should be able to mount an NFS
filesystem from a server that requires RPCSEC_GSS authentication. The
mount itself can happen without any kerberos credentials but all
access to the filesystem will be denied unless the accessing user has
a valid ticket file in the standard place (/tmp/krb5cc_<uid>). There
is currently no support for situations where the ticket file is in a
different place, such as when the user logged in via SSH and has
delegated credentials from that login. This restriction is also
present in Solaris and Linux. In theory, we could improve this in
future, possibly using Brooks Davis' implementation of variant
symlinks.
Supporting RPCSEC_GSS on a server is nearly as simple. You must create
service creds for the server in the form 'nfs/<fqdn>@<REALM>' and
install them in /etc/krb5.keytab. The standard heimdal utility ktutil
makes this fairly easy. After the service creds have been created, you
can add a '-sec=krb5' option to /etc/exports and restart both mountd
and nfsd.
The only other difference an administrator should notice is that nfsd
doesn't fork to create service threads any more. In normal operation,
there will be two nfsd processes, one in userland waiting for TCP
connections and one in the kernel handling requests. The latter
process will create as many kthreads as required - these should be
visible via 'top -H'. The code has some support for varying the number
of service threads according to load but initially at least, nfsd uses
a fixed number of threads according to the value supplied to its '-n'
option.
Sponsored by: Isilon Systems
MFC after: 1 month
2008-11-03 10:38:00 +00:00
|
|
|
if (soreadable(xprt->xp_socket))
|
|
|
|
return (XPRT_MOREREQS);
|
|
|
|
|
2008-03-26 15:23:12 +00:00
|
|
|
return (XPRT_IDLE);
|
|
|
|
}
|
|
|
|
|
2014-01-03 15:09:59 +00:00
|
|
|
static bool_t
|
|
|
|
svc_vc_ack(SVCXPRT *xprt, uint32_t *ack)
|
|
|
|
{
|
|
|
|
|
2014-01-04 15:51:31 +00:00
|
|
|
*ack = atomic_load_acq_32(&xprt->xp_snt_cnt);
|
2014-11-12 09:57:15 +00:00
|
|
|
*ack -= sbused(&xprt->xp_socket->so_snd);
|
2014-01-03 15:09:59 +00:00
|
|
|
return (TRUE);
|
|
|
|
}
|
|
|
|
|
2012-12-08 00:29:16 +00:00
|
|
|
static enum xprt_stat
|
|
|
|
svc_vc_backchannel_stat(SVCXPRT *xprt)
|
|
|
|
{
|
|
|
|
struct cf_conn *cd;
|
|
|
|
|
|
|
|
cd = (struct cf_conn *)(xprt->xp_p1);
|
|
|
|
|
|
|
|
if (cd->mreq != NULL)
|
|
|
|
return (XPRT_MOREREQS);
|
|
|
|
|
|
|
|
return (XPRT_IDLE);
|
|
|
|
}
|
|
|
|
|
2013-12-19 21:31:28 +00:00
|
|
|
/*
|
|
|
|
* If we have an mbuf chain in cd->mpending, try to parse a record from it,
|
|
|
|
* leaving the result in cd->mreq. If we don't have a complete record, leave
|
|
|
|
* the partial result in cd->mreq and try to read more from the socket.
|
|
|
|
*/
|
2013-12-24 17:28:27 +00:00
|
|
|
static int
|
2013-12-19 21:31:28 +00:00
|
|
|
svc_vc_process_pending(SVCXPRT *xprt)
|
|
|
|
{
|
|
|
|
struct cf_conn *cd = (struct cf_conn *) xprt->xp_p1;
|
|
|
|
struct socket *so = xprt->xp_socket;
|
|
|
|
struct mbuf *m;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If cd->resid is non-zero, we have part of the
|
|
|
|
* record already, otherwise we are expecting a record
|
|
|
|
* marker.
|
|
|
|
*/
|
|
|
|
if (!cd->resid && cd->mpending) {
|
|
|
|
/*
|
|
|
|
* See if there is enough data buffered to
|
|
|
|
* make up a record marker. Make sure we can
|
|
|
|
* handle the case where the record marker is
|
|
|
|
* split across more than one mbuf.
|
|
|
|
*/
|
|
|
|
size_t n = 0;
|
|
|
|
uint32_t header;
|
|
|
|
|
|
|
|
m = cd->mpending;
|
|
|
|
while (n < sizeof(uint32_t) && m) {
|
|
|
|
n += m->m_len;
|
|
|
|
m = m->m_next;
|
|
|
|
}
|
|
|
|
if (n < sizeof(uint32_t)) {
|
|
|
|
so->so_rcv.sb_lowat = sizeof(uint32_t) - n;
|
2013-12-24 17:28:27 +00:00
|
|
|
return (FALSE);
|
2013-12-19 21:31:28 +00:00
|
|
|
}
|
|
|
|
m_copydata(cd->mpending, 0, sizeof(header),
|
|
|
|
(char *)&header);
|
|
|
|
header = ntohl(header);
|
|
|
|
cd->eor = (header & 0x80000000) != 0;
|
|
|
|
cd->resid = header & 0x7fffffff;
|
|
|
|
m_adj(cd->mpending, sizeof(uint32_t));
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Start pulling off mbufs from cd->mpending
|
|
|
|
* until we either have a complete record or
|
|
|
|
* we run out of data. We use m_split to pull
|
|
|
|
* data - it will pull as much as possible and
|
|
|
|
* split the last mbuf if necessary.
|
|
|
|
*/
|
|
|
|
while (cd->mpending && cd->resid) {
|
|
|
|
m = cd->mpending;
|
|
|
|
if (cd->mpending->m_next
|
|
|
|
|| cd->mpending->m_len > cd->resid)
|
|
|
|
cd->mpending = m_split(cd->mpending,
|
|
|
|
cd->resid, M_WAITOK);
|
|
|
|
else
|
|
|
|
cd->mpending = NULL;
|
|
|
|
if (cd->mreq)
|
|
|
|
m_last(cd->mreq)->m_next = m;
|
|
|
|
else
|
|
|
|
cd->mreq = m;
|
|
|
|
while (m) {
|
|
|
|
cd->resid -= m->m_len;
|
|
|
|
m = m->m_next;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-12-29 03:43:25 +00:00
|
|
|
/*
|
|
|
|
* Block receive upcalls if we have more data pending,
|
|
|
|
* otherwise report our need.
|
|
|
|
*/
|
|
|
|
if (cd->mpending)
|
|
|
|
so->so_rcv.sb_lowat = INT_MAX;
|
|
|
|
else
|
|
|
|
so->so_rcv.sb_lowat =
|
|
|
|
imax(1, imin(cd->resid, so->so_rcv.sb_hiwat / 2));
|
2013-12-24 17:28:27 +00:00
|
|
|
return (TRUE);
|
2013-12-19 21:31:28 +00:00
|
|
|
}
|
|
|
|
|
2008-03-26 15:23:12 +00:00
|
|
|
static bool_t
|
Implement support for RPCSEC_GSS authentication to both the NFS client
and server. This replaces the RPC implementation of the NFS client and
server with the newer RPC implementation originally developed
(actually ported from the userland sunrpc code) to support the NFS
Lock Manager. I have tested this code extensively and I believe it is
stable and that performance is at least equal to the legacy RPC
implementation.
The NFS code currently contains support for both the new RPC
implementation and the older legacy implementation inherited from the
original NFS codebase. The default is to use the new implementation -
add the NFS_LEGACYRPC option to fall back to the old code. When I
merge this support back to RELENG_7, I will probably change this so
that users have to 'opt in' to get the new code.
To use RPCSEC_GSS on either client or server, you must build a kernel
which includes the KGSSAPI option and the crypto device. On the
userland side, you must build at least a new libc, mountd, mount_nfs
and gssd. You must install new versions of /etc/rc.d/gssd and
/etc/rc.d/nfsd and add 'gssd_enable=YES' to /etc/rc.conf.
As long as gssd is running, you should be able to mount an NFS
filesystem from a server that requires RPCSEC_GSS authentication. The
mount itself can happen without any kerberos credentials but all
access to the filesystem will be denied unless the accessing user has
a valid ticket file in the standard place (/tmp/krb5cc_<uid>). There
is currently no support for situations where the ticket file is in a
different place, such as when the user logged in via SSH and has
delegated credentials from that login. This restriction is also
present in Solaris and Linux. In theory, we could improve this in
future, possibly using Brooks Davis' implementation of variant
symlinks.
Supporting RPCSEC_GSS on a server is nearly as simple. You must create
service creds for the server in the form 'nfs/<fqdn>@<REALM>' and
install them in /etc/krb5.keytab. The standard heimdal utility ktutil
makes this fairly easy. After the service creds have been created, you
can add a '-sec=krb5' option to /etc/exports and restart both mountd
and nfsd.
The only other difference an administrator should notice is that nfsd
doesn't fork to create service threads any more. In normal operation,
there will be two nfsd processes, one in userland waiting for TCP
connections and one in the kernel handling requests. The latter
process will create as many kthreads as required - these should be
visible via 'top -H'. The code has some support for varying the number
of service threads according to load but initially at least, nfsd uses
a fixed number of threads according to the value supplied to its '-n'
option.
Sponsored by: Isilon Systems
MFC after: 1 month
2008-11-03 10:38:00 +00:00
|
|
|
svc_vc_recv(SVCXPRT *xprt, struct rpc_msg *msg,
|
|
|
|
struct sockaddr **addrp, struct mbuf **mp)
|
2008-03-26 15:23:12 +00:00
|
|
|
{
|
|
|
|
struct cf_conn *cd = (struct cf_conn *) xprt->xp_p1;
|
|
|
|
struct uio uio;
|
|
|
|
struct mbuf *m;
|
2013-12-19 21:31:28 +00:00
|
|
|
struct socket* so = xprt->xp_socket;
|
Implement support for RPCSEC_GSS authentication to both the NFS client
and server. This replaces the RPC implementation of the NFS client and
server with the newer RPC implementation originally developed
(actually ported from the userland sunrpc code) to support the NFS
Lock Manager. I have tested this code extensively and I believe it is
stable and that performance is at least equal to the legacy RPC
implementation.
The NFS code currently contains support for both the new RPC
implementation and the older legacy implementation inherited from the
original NFS codebase. The default is to use the new implementation -
add the NFS_LEGACYRPC option to fall back to the old code. When I
merge this support back to RELENG_7, I will probably change this so
that users have to 'opt in' to get the new code.
To use RPCSEC_GSS on either client or server, you must build a kernel
which includes the KGSSAPI option and the crypto device. On the
userland side, you must build at least a new libc, mountd, mount_nfs
and gssd. You must install new versions of /etc/rc.d/gssd and
/etc/rc.d/nfsd and add 'gssd_enable=YES' to /etc/rc.conf.
As long as gssd is running, you should be able to mount an NFS
filesystem from a server that requires RPCSEC_GSS authentication. The
mount itself can happen without any kerberos credentials but all
access to the filesystem will be denied unless the accessing user has
a valid ticket file in the standard place (/tmp/krb5cc_<uid>). There
is currently no support for situations where the ticket file is in a
different place, such as when the user logged in via SSH and has
delegated credentials from that login. This restriction is also
present in Solaris and Linux. In theory, we could improve this in
future, possibly using Brooks Davis' implementation of variant
symlinks.
Supporting RPCSEC_GSS on a server is nearly as simple. You must create
service creds for the server in the form 'nfs/<fqdn>@<REALM>' and
install them in /etc/krb5.keytab. The standard heimdal utility ktutil
makes this fairly easy. After the service creds have been created, you
can add a '-sec=krb5' option to /etc/exports and restart both mountd
and nfsd.
The only other difference an administrator should notice is that nfsd
doesn't fork to create service threads any more. In normal operation,
there will be two nfsd processes, one in userland waiting for TCP
connections and one in the kernel handling requests. The latter
process will create as many kthreads as required - these should be
visible via 'top -H'. The code has some support for varying the number
of service threads according to load but initially at least, nfsd uses
a fixed number of threads according to the value supplied to its '-n'
option.
Sponsored by: Isilon Systems
MFC after: 1 month
2008-11-03 10:38:00 +00:00
|
|
|
XDR xdrs;
|
2008-03-26 15:23:12 +00:00
|
|
|
int error, rcvflag;
|
2014-07-01 20:47:16 +00:00
|
|
|
uint32_t xid_plus_direction[2];
|
2008-03-26 15:23:12 +00:00
|
|
|
|
Implement support for RPCSEC_GSS authentication to both the NFS client
and server. This replaces the RPC implementation of the NFS client and
server with the newer RPC implementation originally developed
(actually ported from the userland sunrpc code) to support the NFS
Lock Manager. I have tested this code extensively and I believe it is
stable and that performance is at least equal to the legacy RPC
implementation.
The NFS code currently contains support for both the new RPC
implementation and the older legacy implementation inherited from the
original NFS codebase. The default is to use the new implementation -
add the NFS_LEGACYRPC option to fall back to the old code. When I
merge this support back to RELENG_7, I will probably change this so
that users have to 'opt in' to get the new code.
To use RPCSEC_GSS on either client or server, you must build a kernel
which includes the KGSSAPI option and the crypto device. On the
userland side, you must build at least a new libc, mountd, mount_nfs
and gssd. You must install new versions of /etc/rc.d/gssd and
/etc/rc.d/nfsd and add 'gssd_enable=YES' to /etc/rc.conf.
As long as gssd is running, you should be able to mount an NFS
filesystem from a server that requires RPCSEC_GSS authentication. The
mount itself can happen without any kerberos credentials but all
access to the filesystem will be denied unless the accessing user has
a valid ticket file in the standard place (/tmp/krb5cc_<uid>). There
is currently no support for situations where the ticket file is in a
different place, such as when the user logged in via SSH and has
delegated credentials from that login. This restriction is also
present in Solaris and Linux. In theory, we could improve this in
future, possibly using Brooks Davis' implementation of variant
symlinks.
Supporting RPCSEC_GSS on a server is nearly as simple. You must create
service creds for the server in the form 'nfs/<fqdn>@<REALM>' and
install them in /etc/krb5.keytab. The standard heimdal utility ktutil
makes this fairly easy. After the service creds have been created, you
can add a '-sec=krb5' option to /etc/exports and restart both mountd
and nfsd.
The only other difference an administrator should notice is that nfsd
doesn't fork to create service threads any more. In normal operation,
there will be two nfsd processes, one in userland waiting for TCP
connections and one in the kernel handling requests. The latter
process will create as many kthreads as required - these should be
visible via 'top -H'. The code has some support for varying the number
of service threads according to load but initially at least, nfsd uses
a fixed number of threads according to the value supplied to its '-n'
option.
Sponsored by: Isilon Systems
MFC after: 1 month
2008-11-03 10:38:00 +00:00
|
|
|
/*
|
|
|
|
* Serialise access to the socket and our own record parsing
|
|
|
|
* state.
|
|
|
|
*/
|
|
|
|
sx_xlock(&xprt->xp_lock);
|
|
|
|
|
2008-03-26 15:23:12 +00:00
|
|
|
for (;;) {
|
2013-12-19 21:31:28 +00:00
|
|
|
/* If we have no request ready, check pending queue. */
|
|
|
|
while (cd->mpending &&
|
2013-12-24 17:28:27 +00:00
|
|
|
(cd->mreq == NULL || cd->resid != 0 || !cd->eor)) {
|
|
|
|
if (!svc_vc_process_pending(xprt))
|
|
|
|
break;
|
|
|
|
}
|
2013-12-19 21:31:28 +00:00
|
|
|
|
|
|
|
/* Process and return complete request in cd->mreq. */
|
|
|
|
if (cd->mreq != NULL && cd->resid == 0 && cd->eor) {
|
|
|
|
|
2014-07-01 20:47:16 +00:00
|
|
|
/*
|
|
|
|
* Now, check for a backchannel reply.
|
|
|
|
* The XID is in the first uint32_t of the reply
|
|
|
|
* and the message direction is the second one.
|
|
|
|
*/
|
|
|
|
if ((cd->mreq->m_len >= sizeof(xid_plus_direction) ||
|
|
|
|
m_length(cd->mreq, NULL) >=
|
|
|
|
sizeof(xid_plus_direction)) &&
|
|
|
|
xprt->xp_p2 != NULL) {
|
|
|
|
m_copydata(cd->mreq, 0,
|
|
|
|
sizeof(xid_plus_direction),
|
|
|
|
(char *)xid_plus_direction);
|
|
|
|
xid_plus_direction[0] =
|
|
|
|
ntohl(xid_plus_direction[0]);
|
|
|
|
xid_plus_direction[1] =
|
|
|
|
ntohl(xid_plus_direction[1]);
|
|
|
|
/* Check message direction. */
|
|
|
|
if (xid_plus_direction[1] == REPLY) {
|
|
|
|
clnt_bck_svccall(xprt->xp_p2,
|
|
|
|
cd->mreq,
|
|
|
|
xid_plus_direction[0]);
|
|
|
|
cd->mreq = NULL;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-12-19 21:31:28 +00:00
|
|
|
xdrmbuf_create(&xdrs, cd->mreq, XDR_DECODE);
|
|
|
|
cd->mreq = NULL;
|
|
|
|
|
|
|
|
/* Check for next request in a pending queue. */
|
|
|
|
svc_vc_process_pending(xprt);
|
|
|
|
if (cd->mreq == NULL || cd->resid != 0) {
|
|
|
|
SOCKBUF_LOCK(&so->so_rcv);
|
|
|
|
if (!soreadable(so))
|
2013-12-29 11:19:09 +00:00
|
|
|
xprt_inactive_self(xprt);
|
2013-12-19 21:31:28 +00:00
|
|
|
SOCKBUF_UNLOCK(&so->so_rcv);
|
2008-03-26 15:23:12 +00:00
|
|
|
}
|
|
|
|
|
2013-12-19 21:31:28 +00:00
|
|
|
sx_xunlock(&xprt->xp_lock);
|
2008-03-26 15:23:12 +00:00
|
|
|
|
2013-12-19 21:31:28 +00:00
|
|
|
if (! xdr_callmsg(&xdrs, msg)) {
|
Implement support for RPCSEC_GSS authentication to both the NFS client
and server. This replaces the RPC implementation of the NFS client and
server with the newer RPC implementation originally developed
(actually ported from the userland sunrpc code) to support the NFS
Lock Manager. I have tested this code extensively and I believe it is
stable and that performance is at least equal to the legacy RPC
implementation.
The NFS code currently contains support for both the new RPC
implementation and the older legacy implementation inherited from the
original NFS codebase. The default is to use the new implementation -
add the NFS_LEGACYRPC option to fall back to the old code. When I
merge this support back to RELENG_7, I will probably change this so
that users have to 'opt in' to get the new code.
To use RPCSEC_GSS on either client or server, you must build a kernel
which includes the KGSSAPI option and the crypto device. On the
userland side, you must build at least a new libc, mountd, mount_nfs
and gssd. You must install new versions of /etc/rc.d/gssd and
/etc/rc.d/nfsd and add 'gssd_enable=YES' to /etc/rc.conf.
As long as gssd is running, you should be able to mount an NFS
filesystem from a server that requires RPCSEC_GSS authentication. The
mount itself can happen without any kerberos credentials but all
access to the filesystem will be denied unless the accessing user has
a valid ticket file in the standard place (/tmp/krb5cc_<uid>). There
is currently no support for situations where the ticket file is in a
different place, such as when the user logged in via SSH and has
delegated credentials from that login. This restriction is also
present in Solaris and Linux. In theory, we could improve this in
future, possibly using Brooks Davis' implementation of variant
symlinks.
Supporting RPCSEC_GSS on a server is nearly as simple. You must create
service creds for the server in the form 'nfs/<fqdn>@<REALM>' and
install them in /etc/krb5.keytab. The standard heimdal utility ktutil
makes this fairly easy. After the service creds have been created, you
can add a '-sec=krb5' option to /etc/exports and restart both mountd
and nfsd.
The only other difference an administrator should notice is that nfsd
doesn't fork to create service threads any more. In normal operation,
there will be two nfsd processes, one in userland waiting for TCP
connections and one in the kernel handling requests. The latter
process will create as many kthreads as required - these should be
visible via 'top -H'. The code has some support for varying the number
of service threads according to load but initially at least, nfsd uses
a fixed number of threads according to the value supplied to its '-n'
option.
Sponsored by: Isilon Systems
MFC after: 1 month
2008-11-03 10:38:00 +00:00
|
|
|
XDR_DESTROY(&xdrs);
|
2013-12-19 21:31:28 +00:00
|
|
|
return (FALSE);
|
2008-03-26 15:23:12 +00:00
|
|
|
}
|
2013-12-19 21:31:28 +00:00
|
|
|
|
|
|
|
*addrp = NULL;
|
|
|
|
*mp = xdrmbuf_getall(&xdrs);
|
|
|
|
XDR_DESTROY(&xdrs);
|
|
|
|
|
|
|
|
return (TRUE);
|
2008-03-26 15:23:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The socket upcall calls xprt_active() which will eventually
|
|
|
|
* cause the server to call us here. We attempt to
|
|
|
|
* read as much as possible from the socket and put
|
|
|
|
* the result in cd->mpending. If the read fails,
|
|
|
|
* we have drained both cd->mpending and the socket so
|
|
|
|
* we can call xprt_inactive().
|
|
|
|
*/
|
|
|
|
uio.uio_resid = 1000000000;
|
|
|
|
uio.uio_td = curthread;
|
|
|
|
m = NULL;
|
|
|
|
rcvflag = MSG_DONTWAIT;
|
2013-12-19 21:31:28 +00:00
|
|
|
error = soreceive(so, NULL, &uio, &m, NULL, &rcvflag);
|
2008-03-26 15:23:12 +00:00
|
|
|
|
|
|
|
if (error == EWOULDBLOCK) {
|
Implement support for RPCSEC_GSS authentication to both the NFS client
and server. This replaces the RPC implementation of the NFS client and
server with the newer RPC implementation originally developed
(actually ported from the userland sunrpc code) to support the NFS
Lock Manager. I have tested this code extensively and I believe it is
stable and that performance is at least equal to the legacy RPC
implementation.
The NFS code currently contains support for both the new RPC
implementation and the older legacy implementation inherited from the
original NFS codebase. The default is to use the new implementation -
add the NFS_LEGACYRPC option to fall back to the old code. When I
merge this support back to RELENG_7, I will probably change this so
that users have to 'opt in' to get the new code.
To use RPCSEC_GSS on either client or server, you must build a kernel
which includes the KGSSAPI option and the crypto device. On the
userland side, you must build at least a new libc, mountd, mount_nfs
and gssd. You must install new versions of /etc/rc.d/gssd and
/etc/rc.d/nfsd and add 'gssd_enable=YES' to /etc/rc.conf.
As long as gssd is running, you should be able to mount an NFS
filesystem from a server that requires RPCSEC_GSS authentication. The
mount itself can happen without any kerberos credentials but all
access to the filesystem will be denied unless the accessing user has
a valid ticket file in the standard place (/tmp/krb5cc_<uid>). There
is currently no support for situations where the ticket file is in a
different place, such as when the user logged in via SSH and has
delegated credentials from that login. This restriction is also
present in Solaris and Linux. In theory, we could improve this in
future, possibly using Brooks Davis' implementation of variant
symlinks.
Supporting RPCSEC_GSS on a server is nearly as simple. You must create
service creds for the server in the form 'nfs/<fqdn>@<REALM>' and
install them in /etc/krb5.keytab. The standard heimdal utility ktutil
makes this fairly easy. After the service creds have been created, you
can add a '-sec=krb5' option to /etc/exports and restart both mountd
and nfsd.
The only other difference an administrator should notice is that nfsd
doesn't fork to create service threads any more. In normal operation,
there will be two nfsd processes, one in userland waiting for TCP
connections and one in the kernel handling requests. The latter
process will create as many kthreads as required - these should be
visible via 'top -H'. The code has some support for varying the number
of service threads according to load but initially at least, nfsd uses
a fixed number of threads according to the value supplied to its '-n'
option.
Sponsored by: Isilon Systems
MFC after: 1 month
2008-11-03 10:38:00 +00:00
|
|
|
/*
|
|
|
|
* We must re-test for readability after
|
|
|
|
* taking the lock to protect us in the case
|
|
|
|
* where a new packet arrives on the socket
|
|
|
|
* after our call to soreceive fails with
|
2013-12-19 21:31:28 +00:00
|
|
|
* EWOULDBLOCK.
|
Implement support for RPCSEC_GSS authentication to both the NFS client
and server. This replaces the RPC implementation of the NFS client and
server with the newer RPC implementation originally developed
(actually ported from the userland sunrpc code) to support the NFS
Lock Manager. I have tested this code extensively and I believe it is
stable and that performance is at least equal to the legacy RPC
implementation.
The NFS code currently contains support for both the new RPC
implementation and the older legacy implementation inherited from the
original NFS codebase. The default is to use the new implementation -
add the NFS_LEGACYRPC option to fall back to the old code. When I
merge this support back to RELENG_7, I will probably change this so
that users have to 'opt in' to get the new code.
To use RPCSEC_GSS on either client or server, you must build a kernel
which includes the KGSSAPI option and the crypto device. On the
userland side, you must build at least a new libc, mountd, mount_nfs
and gssd. You must install new versions of /etc/rc.d/gssd and
/etc/rc.d/nfsd and add 'gssd_enable=YES' to /etc/rc.conf.
As long as gssd is running, you should be able to mount an NFS
filesystem from a server that requires RPCSEC_GSS authentication. The
mount itself can happen without any kerberos credentials but all
access to the filesystem will be denied unless the accessing user has
a valid ticket file in the standard place (/tmp/krb5cc_<uid>). There
is currently no support for situations where the ticket file is in a
different place, such as when the user logged in via SSH and has
delegated credentials from that login. This restriction is also
present in Solaris and Linux. In theory, we could improve this in
future, possibly using Brooks Davis' implementation of variant
symlinks.
Supporting RPCSEC_GSS on a server is nearly as simple. You must create
service creds for the server in the form 'nfs/<fqdn>@<REALM>' and
install them in /etc/krb5.keytab. The standard heimdal utility ktutil
makes this fairly easy. After the service creds have been created, you
can add a '-sec=krb5' option to /etc/exports and restart both mountd
and nfsd.
The only other difference an administrator should notice is that nfsd
doesn't fork to create service threads any more. In normal operation,
there will be two nfsd processes, one in userland waiting for TCP
connections and one in the kernel handling requests. The latter
process will create as many kthreads as required - these should be
visible via 'top -H'. The code has some support for varying the number
of service threads according to load but initially at least, nfsd uses
a fixed number of threads according to the value supplied to its '-n'
option.
Sponsored by: Isilon Systems
MFC after: 1 month
2008-11-03 10:38:00 +00:00
|
|
|
*/
|
2013-12-19 21:31:28 +00:00
|
|
|
SOCKBUF_LOCK(&so->so_rcv);
|
|
|
|
if (!soreadable(so))
|
2013-12-29 11:19:09 +00:00
|
|
|
xprt_inactive_self(xprt);
|
2013-12-19 21:31:28 +00:00
|
|
|
SOCKBUF_UNLOCK(&so->so_rcv);
|
Implement support for RPCSEC_GSS authentication to both the NFS client
and server. This replaces the RPC implementation of the NFS client and
server with the newer RPC implementation originally developed
(actually ported from the userland sunrpc code) to support the NFS
Lock Manager. I have tested this code extensively and I believe it is
stable and that performance is at least equal to the legacy RPC
implementation.
The NFS code currently contains support for both the new RPC
implementation and the older legacy implementation inherited from the
original NFS codebase. The default is to use the new implementation -
add the NFS_LEGACYRPC option to fall back to the old code. When I
merge this support back to RELENG_7, I will probably change this so
that users have to 'opt in' to get the new code.
To use RPCSEC_GSS on either client or server, you must build a kernel
which includes the KGSSAPI option and the crypto device. On the
userland side, you must build at least a new libc, mountd, mount_nfs
and gssd. You must install new versions of /etc/rc.d/gssd and
/etc/rc.d/nfsd and add 'gssd_enable=YES' to /etc/rc.conf.
As long as gssd is running, you should be able to mount an NFS
filesystem from a server that requires RPCSEC_GSS authentication. The
mount itself can happen without any kerberos credentials but all
access to the filesystem will be denied unless the accessing user has
a valid ticket file in the standard place (/tmp/krb5cc_<uid>). There
is currently no support for situations where the ticket file is in a
different place, such as when the user logged in via SSH and has
delegated credentials from that login. This restriction is also
present in Solaris and Linux. In theory, we could improve this in
future, possibly using Brooks Davis' implementation of variant
symlinks.
Supporting RPCSEC_GSS on a server is nearly as simple. You must create
service creds for the server in the form 'nfs/<fqdn>@<REALM>' and
install them in /etc/krb5.keytab. The standard heimdal utility ktutil
makes this fairly easy. After the service creds have been created, you
can add a '-sec=krb5' option to /etc/exports and restart both mountd
and nfsd.
The only other difference an administrator should notice is that nfsd
doesn't fork to create service threads any more. In normal operation,
there will be two nfsd processes, one in userland waiting for TCP
connections and one in the kernel handling requests. The latter
process will create as many kthreads as required - these should be
visible via 'top -H'. The code has some support for varying the number
of service threads according to load but initially at least, nfsd uses
a fixed number of threads according to the value supplied to its '-n'
option.
Sponsored by: Isilon Systems
MFC after: 1 month
2008-11-03 10:38:00 +00:00
|
|
|
sx_xunlock(&xprt->xp_lock);
|
2008-03-26 15:23:12 +00:00
|
|
|
return (FALSE);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (error) {
|
2013-12-19 21:31:28 +00:00
|
|
|
SOCKBUF_LOCK(&so->so_rcv);
|
2009-06-04 14:13:06 +00:00
|
|
|
if (xprt->xp_upcallset) {
|
|
|
|
xprt->xp_upcallset = 0;
|
2013-12-19 21:31:28 +00:00
|
|
|
soupcall_clear(so, SO_RCV);
|
2009-06-04 14:13:06 +00:00
|
|
|
}
|
2013-12-19 21:31:28 +00:00
|
|
|
SOCKBUF_UNLOCK(&so->so_rcv);
|
2013-12-29 11:19:09 +00:00
|
|
|
xprt_inactive_self(xprt);
|
2008-03-26 15:23:12 +00:00
|
|
|
cd->strm_stat = XPRT_DIED;
|
Implement support for RPCSEC_GSS authentication to both the NFS client
and server. This replaces the RPC implementation of the NFS client and
server with the newer RPC implementation originally developed
(actually ported from the userland sunrpc code) to support the NFS
Lock Manager. I have tested this code extensively and I believe it is
stable and that performance is at least equal to the legacy RPC
implementation.
The NFS code currently contains support for both the new RPC
implementation and the older legacy implementation inherited from the
original NFS codebase. The default is to use the new implementation -
add the NFS_LEGACYRPC option to fall back to the old code. When I
merge this support back to RELENG_7, I will probably change this so
that users have to 'opt in' to get the new code.
To use RPCSEC_GSS on either client or server, you must build a kernel
which includes the KGSSAPI option and the crypto device. On the
userland side, you must build at least a new libc, mountd, mount_nfs
and gssd. You must install new versions of /etc/rc.d/gssd and
/etc/rc.d/nfsd and add 'gssd_enable=YES' to /etc/rc.conf.
As long as gssd is running, you should be able to mount an NFS
filesystem from a server that requires RPCSEC_GSS authentication. The
mount itself can happen without any kerberos credentials but all
access to the filesystem will be denied unless the accessing user has
a valid ticket file in the standard place (/tmp/krb5cc_<uid>). There
is currently no support for situations where the ticket file is in a
different place, such as when the user logged in via SSH and has
delegated credentials from that login. This restriction is also
present in Solaris and Linux. In theory, we could improve this in
future, possibly using Brooks Davis' implementation of variant
symlinks.
Supporting RPCSEC_GSS on a server is nearly as simple. You must create
service creds for the server in the form 'nfs/<fqdn>@<REALM>' and
install them in /etc/krb5.keytab. The standard heimdal utility ktutil
makes this fairly easy. After the service creds have been created, you
can add a '-sec=krb5' option to /etc/exports and restart both mountd
and nfsd.
The only other difference an administrator should notice is that nfsd
doesn't fork to create service threads any more. In normal operation,
there will be two nfsd processes, one in userland waiting for TCP
connections and one in the kernel handling requests. The latter
process will create as many kthreads as required - these should be
visible via 'top -H'. The code has some support for varying the number
of service threads according to load but initially at least, nfsd uses
a fixed number of threads according to the value supplied to its '-n'
option.
Sponsored by: Isilon Systems
MFC after: 1 month
2008-11-03 10:38:00 +00:00
|
|
|
sx_xunlock(&xprt->xp_lock);
|
2008-03-26 15:23:12 +00:00
|
|
|
return (FALSE);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!m) {
|
|
|
|
/*
|
|
|
|
* EOF - the other end has closed the socket.
|
|
|
|
*/
|
2013-12-29 11:19:09 +00:00
|
|
|
xprt_inactive_self(xprt);
|
2008-03-26 15:23:12 +00:00
|
|
|
cd->strm_stat = XPRT_DIED;
|
Implement support for RPCSEC_GSS authentication to both the NFS client
and server. This replaces the RPC implementation of the NFS client and
server with the newer RPC implementation originally developed
(actually ported from the userland sunrpc code) to support the NFS
Lock Manager. I have tested this code extensively and I believe it is
stable and that performance is at least equal to the legacy RPC
implementation.
The NFS code currently contains support for both the new RPC
implementation and the older legacy implementation inherited from the
original NFS codebase. The default is to use the new implementation -
add the NFS_LEGACYRPC option to fall back to the old code. When I
merge this support back to RELENG_7, I will probably change this so
that users have to 'opt in' to get the new code.
To use RPCSEC_GSS on either client or server, you must build a kernel
which includes the KGSSAPI option and the crypto device. On the
userland side, you must build at least a new libc, mountd, mount_nfs
and gssd. You must install new versions of /etc/rc.d/gssd and
/etc/rc.d/nfsd and add 'gssd_enable=YES' to /etc/rc.conf.
As long as gssd is running, you should be able to mount an NFS
filesystem from a server that requires RPCSEC_GSS authentication. The
mount itself can happen without any kerberos credentials but all
access to the filesystem will be denied unless the accessing user has
a valid ticket file in the standard place (/tmp/krb5cc_<uid>). There
is currently no support for situations where the ticket file is in a
different place, such as when the user logged in via SSH and has
delegated credentials from that login. This restriction is also
present in Solaris and Linux. In theory, we could improve this in
future, possibly using Brooks Davis' implementation of variant
symlinks.
Supporting RPCSEC_GSS on a server is nearly as simple. You must create
service creds for the server in the form 'nfs/<fqdn>@<REALM>' and
install them in /etc/krb5.keytab. The standard heimdal utility ktutil
makes this fairly easy. After the service creds have been created, you
can add a '-sec=krb5' option to /etc/exports and restart both mountd
and nfsd.
The only other difference an administrator should notice is that nfsd
doesn't fork to create service threads any more. In normal operation,
there will be two nfsd processes, one in userland waiting for TCP
connections and one in the kernel handling requests. The latter
process will create as many kthreads as required - these should be
visible via 'top -H'. The code has some support for varying the number
of service threads according to load but initially at least, nfsd uses
a fixed number of threads according to the value supplied to its '-n'
option.
Sponsored by: Isilon Systems
MFC after: 1 month
2008-11-03 10:38:00 +00:00
|
|
|
sx_xunlock(&xprt->xp_lock);
|
2008-03-26 15:23:12 +00:00
|
|
|
return (FALSE);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (cd->mpending)
|
|
|
|
m_last(cd->mpending)->m_next = m;
|
|
|
|
else
|
|
|
|
cd->mpending = m;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-12-08 00:29:16 +00:00
|
|
|
static bool_t
|
|
|
|
svc_vc_backchannel_recv(SVCXPRT *xprt, struct rpc_msg *msg,
|
|
|
|
struct sockaddr **addrp, struct mbuf **mp)
|
|
|
|
{
|
|
|
|
struct cf_conn *cd = (struct cf_conn *) xprt->xp_p1;
|
|
|
|
struct ct_data *ct;
|
|
|
|
struct mbuf *m;
|
|
|
|
XDR xdrs;
|
|
|
|
|
|
|
|
sx_xlock(&xprt->xp_lock);
|
|
|
|
ct = (struct ct_data *)xprt->xp_p2;
|
|
|
|
if (ct == NULL) {
|
|
|
|
sx_xunlock(&xprt->xp_lock);
|
|
|
|
return (FALSE);
|
|
|
|
}
|
|
|
|
mtx_lock(&ct->ct_lock);
|
|
|
|
m = cd->mreq;
|
|
|
|
if (m == NULL) {
|
2013-12-29 11:19:09 +00:00
|
|
|
xprt_inactive_self(xprt);
|
2012-12-08 00:29:16 +00:00
|
|
|
mtx_unlock(&ct->ct_lock);
|
|
|
|
sx_xunlock(&xprt->xp_lock);
|
|
|
|
return (FALSE);
|
|
|
|
}
|
|
|
|
cd->mreq = m->m_nextpkt;
|
|
|
|
mtx_unlock(&ct->ct_lock);
|
|
|
|
sx_xunlock(&xprt->xp_lock);
|
|
|
|
|
|
|
|
xdrmbuf_create(&xdrs, m, XDR_DECODE);
|
|
|
|
if (! xdr_callmsg(&xdrs, msg)) {
|
|
|
|
XDR_DESTROY(&xdrs);
|
|
|
|
return (FALSE);
|
|
|
|
}
|
|
|
|
*addrp = NULL;
|
|
|
|
*mp = xdrmbuf_getall(&xdrs);
|
|
|
|
XDR_DESTROY(&xdrs);
|
|
|
|
return (TRUE);
|
|
|
|
}
|
|
|
|
|
2008-03-26 15:23:12 +00:00
|
|
|
static bool_t
|
Implement support for RPCSEC_GSS authentication to both the NFS client
and server. This replaces the RPC implementation of the NFS client and
server with the newer RPC implementation originally developed
(actually ported from the userland sunrpc code) to support the NFS
Lock Manager. I have tested this code extensively and I believe it is
stable and that performance is at least equal to the legacy RPC
implementation.
The NFS code currently contains support for both the new RPC
implementation and the older legacy implementation inherited from the
original NFS codebase. The default is to use the new implementation -
add the NFS_LEGACYRPC option to fall back to the old code. When I
merge this support back to RELENG_7, I will probably change this so
that users have to 'opt in' to get the new code.
To use RPCSEC_GSS on either client or server, you must build a kernel
which includes the KGSSAPI option and the crypto device. On the
userland side, you must build at least a new libc, mountd, mount_nfs
and gssd. You must install new versions of /etc/rc.d/gssd and
/etc/rc.d/nfsd and add 'gssd_enable=YES' to /etc/rc.conf.
As long as gssd is running, you should be able to mount an NFS
filesystem from a server that requires RPCSEC_GSS authentication. The
mount itself can happen without any kerberos credentials but all
access to the filesystem will be denied unless the accessing user has
a valid ticket file in the standard place (/tmp/krb5cc_<uid>). There
is currently no support for situations where the ticket file is in a
different place, such as when the user logged in via SSH and has
delegated credentials from that login. This restriction is also
present in Solaris and Linux. In theory, we could improve this in
future, possibly using Brooks Davis' implementation of variant
symlinks.
Supporting RPCSEC_GSS on a server is nearly as simple. You must create
service creds for the server in the form 'nfs/<fqdn>@<REALM>' and
install them in /etc/krb5.keytab. The standard heimdal utility ktutil
makes this fairly easy. After the service creds have been created, you
can add a '-sec=krb5' option to /etc/exports and restart both mountd
and nfsd.
The only other difference an administrator should notice is that nfsd
doesn't fork to create service threads any more. In normal operation,
there will be two nfsd processes, one in userland waiting for TCP
connections and one in the kernel handling requests. The latter
process will create as many kthreads as required - these should be
visible via 'top -H'. The code has some support for varying the number
of service threads according to load but initially at least, nfsd uses
a fixed number of threads according to the value supplied to its '-n'
option.
Sponsored by: Isilon Systems
MFC after: 1 month
2008-11-03 10:38:00 +00:00
|
|
|
svc_vc_reply(SVCXPRT *xprt, struct rpc_msg *msg,
|
2014-01-03 15:09:59 +00:00
|
|
|
struct sockaddr *addr, struct mbuf *m, uint32_t *seq)
|
2008-03-26 15:23:12 +00:00
|
|
|
{
|
|
|
|
XDR xdrs;
|
|
|
|
struct mbuf *mrep;
|
Implement support for RPCSEC_GSS authentication to both the NFS client
and server. This replaces the RPC implementation of the NFS client and
server with the newer RPC implementation originally developed
(actually ported from the userland sunrpc code) to support the NFS
Lock Manager. I have tested this code extensively and I believe it is
stable and that performance is at least equal to the legacy RPC
implementation.
The NFS code currently contains support for both the new RPC
implementation and the older legacy implementation inherited from the
original NFS codebase. The default is to use the new implementation -
add the NFS_LEGACYRPC option to fall back to the old code. When I
merge this support back to RELENG_7, I will probably change this so
that users have to 'opt in' to get the new code.
To use RPCSEC_GSS on either client or server, you must build a kernel
which includes the KGSSAPI option and the crypto device. On the
userland side, you must build at least a new libc, mountd, mount_nfs
and gssd. You must install new versions of /etc/rc.d/gssd and
/etc/rc.d/nfsd and add 'gssd_enable=YES' to /etc/rc.conf.
As long as gssd is running, you should be able to mount an NFS
filesystem from a server that requires RPCSEC_GSS authentication. The
mount itself can happen without any kerberos credentials but all
access to the filesystem will be denied unless the accessing user has
a valid ticket file in the standard place (/tmp/krb5cc_<uid>). There
is currently no support for situations where the ticket file is in a
different place, such as when the user logged in via SSH and has
delegated credentials from that login. This restriction is also
present in Solaris and Linux. In theory, we could improve this in
future, possibly using Brooks Davis' implementation of variant
symlinks.
Supporting RPCSEC_GSS on a server is nearly as simple. You must create
service creds for the server in the form 'nfs/<fqdn>@<REALM>' and
install them in /etc/krb5.keytab. The standard heimdal utility ktutil
makes this fairly easy. After the service creds have been created, you
can add a '-sec=krb5' option to /etc/exports and restart both mountd
and nfsd.
The only other difference an administrator should notice is that nfsd
doesn't fork to create service threads any more. In normal operation,
there will be two nfsd processes, one in userland waiting for TCP
connections and one in the kernel handling requests. The latter
process will create as many kthreads as required - these should be
visible via 'top -H'. The code has some support for varying the number
of service threads according to load but initially at least, nfsd uses
a fixed number of threads according to the value supplied to its '-n'
option.
Sponsored by: Isilon Systems
MFC after: 1 month
2008-11-03 10:38:00 +00:00
|
|
|
bool_t stat = TRUE;
|
2014-01-03 15:09:59 +00:00
|
|
|
int error, len;
|
2008-03-26 15:23:12 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Leave space for record mark.
|
|
|
|
*/
|
2013-03-12 12:17:19 +00:00
|
|
|
mrep = m_gethdr(M_WAITOK, MT_DATA);
|
2008-03-26 15:23:12 +00:00
|
|
|
mrep->m_data += sizeof(uint32_t);
|
|
|
|
|
Implement support for RPCSEC_GSS authentication to both the NFS client
and server. This replaces the RPC implementation of the NFS client and
server with the newer RPC implementation originally developed
(actually ported from the userland sunrpc code) to support the NFS
Lock Manager. I have tested this code extensively and I believe it is
stable and that performance is at least equal to the legacy RPC
implementation.
The NFS code currently contains support for both the new RPC
implementation and the older legacy implementation inherited from the
original NFS codebase. The default is to use the new implementation -
add the NFS_LEGACYRPC option to fall back to the old code. When I
merge this support back to RELENG_7, I will probably change this so
that users have to 'opt in' to get the new code.
To use RPCSEC_GSS on either client or server, you must build a kernel
which includes the KGSSAPI option and the crypto device. On the
userland side, you must build at least a new libc, mountd, mount_nfs
and gssd. You must install new versions of /etc/rc.d/gssd and
/etc/rc.d/nfsd and add 'gssd_enable=YES' to /etc/rc.conf.
As long as gssd is running, you should be able to mount an NFS
filesystem from a server that requires RPCSEC_GSS authentication. The
mount itself can happen without any kerberos credentials but all
access to the filesystem will be denied unless the accessing user has
a valid ticket file in the standard place (/tmp/krb5cc_<uid>). There
is currently no support for situations where the ticket file is in a
different place, such as when the user logged in via SSH and has
delegated credentials from that login. This restriction is also
present in Solaris and Linux. In theory, we could improve this in
future, possibly using Brooks Davis' implementation of variant
symlinks.
Supporting RPCSEC_GSS on a server is nearly as simple. You must create
service creds for the server in the form 'nfs/<fqdn>@<REALM>' and
install them in /etc/krb5.keytab. The standard heimdal utility ktutil
makes this fairly easy. After the service creds have been created, you
can add a '-sec=krb5' option to /etc/exports and restart both mountd
and nfsd.
The only other difference an administrator should notice is that nfsd
doesn't fork to create service threads any more. In normal operation,
there will be two nfsd processes, one in userland waiting for TCP
connections and one in the kernel handling requests. The latter
process will create as many kthreads as required - these should be
visible via 'top -H'. The code has some support for varying the number
of service threads according to load but initially at least, nfsd uses
a fixed number of threads according to the value supplied to its '-n'
option.
Sponsored by: Isilon Systems
MFC after: 1 month
2008-11-03 10:38:00 +00:00
|
|
|
xdrmbuf_create(&xdrs, mrep, XDR_ENCODE);
|
|
|
|
|
|
|
|
if (msg->rm_reply.rp_stat == MSG_ACCEPTED &&
|
|
|
|
msg->rm_reply.rp_acpt.ar_stat == SUCCESS) {
|
|
|
|
if (!xdr_replymsg(&xdrs, msg))
|
|
|
|
stat = FALSE;
|
|
|
|
else
|
|
|
|
xdrmbuf_append(&xdrs, m);
|
|
|
|
} else {
|
|
|
|
stat = xdr_replymsg(&xdrs, msg);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (stat) {
|
2008-03-26 15:23:12 +00:00
|
|
|
m_fixhdr(mrep);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Prepend a record marker containing the reply length.
|
|
|
|
*/
|
2012-12-05 08:04:20 +00:00
|
|
|
M_PREPEND(mrep, sizeof(uint32_t), M_WAITOK);
|
2014-01-03 15:09:59 +00:00
|
|
|
len = mrep->m_pkthdr.len;
|
2008-03-26 15:23:12 +00:00
|
|
|
*mtod(mrep, uint32_t *) =
|
2014-01-03 15:09:59 +00:00
|
|
|
htonl(0x80000000 | (len - sizeof(uint32_t)));
|
2015-07-28 06:58:10 +00:00
|
|
|
atomic_add_32(&xprt->xp_snd_cnt, len);
|
2008-03-26 15:23:12 +00:00
|
|
|
error = sosend(xprt->xp_socket, NULL, NULL, mrep, NULL,
|
|
|
|
0, curthread);
|
|
|
|
if (!error) {
|
2014-01-04 15:51:31 +00:00
|
|
|
atomic_add_rel_32(&xprt->xp_snt_cnt, len);
|
2014-01-03 15:09:59 +00:00
|
|
|
if (seq)
|
|
|
|
*seq = xprt->xp_snd_cnt;
|
2008-03-26 15:23:12 +00:00
|
|
|
stat = TRUE;
|
2014-01-04 15:51:31 +00:00
|
|
|
} else
|
|
|
|
atomic_subtract_32(&xprt->xp_snd_cnt, len);
|
2008-03-26 15:23:12 +00:00
|
|
|
} else {
|
|
|
|
m_freem(mrep);
|
|
|
|
}
|
|
|
|
|
Implement support for RPCSEC_GSS authentication to both the NFS client
and server. This replaces the RPC implementation of the NFS client and
server with the newer RPC implementation originally developed
(actually ported from the userland sunrpc code) to support the NFS
Lock Manager. I have tested this code extensively and I believe it is
stable and that performance is at least equal to the legacy RPC
implementation.
The NFS code currently contains support for both the new RPC
implementation and the older legacy implementation inherited from the
original NFS codebase. The default is to use the new implementation -
add the NFS_LEGACYRPC option to fall back to the old code. When I
merge this support back to RELENG_7, I will probably change this so
that users have to 'opt in' to get the new code.
To use RPCSEC_GSS on either client or server, you must build a kernel
which includes the KGSSAPI option and the crypto device. On the
userland side, you must build at least a new libc, mountd, mount_nfs
and gssd. You must install new versions of /etc/rc.d/gssd and
/etc/rc.d/nfsd and add 'gssd_enable=YES' to /etc/rc.conf.
As long as gssd is running, you should be able to mount an NFS
filesystem from a server that requires RPCSEC_GSS authentication. The
mount itself can happen without any kerberos credentials but all
access to the filesystem will be denied unless the accessing user has
a valid ticket file in the standard place (/tmp/krb5cc_<uid>). There
is currently no support for situations where the ticket file is in a
different place, such as when the user logged in via SSH and has
delegated credentials from that login. This restriction is also
present in Solaris and Linux. In theory, we could improve this in
future, possibly using Brooks Davis' implementation of variant
symlinks.
Supporting RPCSEC_GSS on a server is nearly as simple. You must create
service creds for the server in the form 'nfs/<fqdn>@<REALM>' and
install them in /etc/krb5.keytab. The standard heimdal utility ktutil
makes this fairly easy. After the service creds have been created, you
can add a '-sec=krb5' option to /etc/exports and restart both mountd
and nfsd.
The only other difference an administrator should notice is that nfsd
doesn't fork to create service threads any more. In normal operation,
there will be two nfsd processes, one in userland waiting for TCP
connections and one in the kernel handling requests. The latter
process will create as many kthreads as required - these should be
visible via 'top -H'. The code has some support for varying the number
of service threads according to load but initially at least, nfsd uses
a fixed number of threads according to the value supplied to its '-n'
option.
Sponsored by: Isilon Systems
MFC after: 1 month
2008-11-03 10:38:00 +00:00
|
|
|
XDR_DESTROY(&xdrs);
|
2008-03-26 15:23:12 +00:00
|
|
|
|
|
|
|
return (stat);
|
|
|
|
}
|
|
|
|
|
2012-12-08 00:29:16 +00:00
|
|
|
static bool_t
|
|
|
|
svc_vc_backchannel_reply(SVCXPRT *xprt, struct rpc_msg *msg,
|
2014-01-03 15:09:59 +00:00
|
|
|
struct sockaddr *addr, struct mbuf *m, uint32_t *seq)
|
2012-12-08 00:29:16 +00:00
|
|
|
{
|
|
|
|
struct ct_data *ct;
|
|
|
|
XDR xdrs;
|
|
|
|
struct mbuf *mrep;
|
|
|
|
bool_t stat = TRUE;
|
|
|
|
int error;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Leave space for record mark.
|
|
|
|
*/
|
2013-03-12 12:17:19 +00:00
|
|
|
mrep = m_gethdr(M_WAITOK, MT_DATA);
|
2012-12-08 00:29:16 +00:00
|
|
|
mrep->m_data += sizeof(uint32_t);
|
|
|
|
|
|
|
|
xdrmbuf_create(&xdrs, mrep, XDR_ENCODE);
|
|
|
|
|
|
|
|
if (msg->rm_reply.rp_stat == MSG_ACCEPTED &&
|
|
|
|
msg->rm_reply.rp_acpt.ar_stat == SUCCESS) {
|
|
|
|
if (!xdr_replymsg(&xdrs, msg))
|
|
|
|
stat = FALSE;
|
|
|
|
else
|
|
|
|
xdrmbuf_append(&xdrs, m);
|
|
|
|
} else {
|
|
|
|
stat = xdr_replymsg(&xdrs, msg);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (stat) {
|
|
|
|
m_fixhdr(mrep);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Prepend a record marker containing the reply length.
|
|
|
|
*/
|
|
|
|
M_PREPEND(mrep, sizeof(uint32_t), M_WAITOK);
|
|
|
|
*mtod(mrep, uint32_t *) =
|
|
|
|
htonl(0x80000000 | (mrep->m_pkthdr.len
|
|
|
|
- sizeof(uint32_t)));
|
|
|
|
sx_xlock(&xprt->xp_lock);
|
|
|
|
ct = (struct ct_data *)xprt->xp_p2;
|
|
|
|
if (ct != NULL)
|
|
|
|
error = sosend(ct->ct_socket, NULL, NULL, mrep, NULL,
|
|
|
|
0, curthread);
|
|
|
|
else
|
|
|
|
error = EPIPE;
|
|
|
|
sx_xunlock(&xprt->xp_lock);
|
|
|
|
if (!error) {
|
|
|
|
stat = TRUE;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
m_freem(mrep);
|
|
|
|
}
|
|
|
|
|
|
|
|
XDR_DESTROY(&xdrs);
|
|
|
|
|
|
|
|
return (stat);
|
|
|
|
}
|
|
|
|
|
2008-03-26 15:23:12 +00:00
|
|
|
static bool_t
|
|
|
|
svc_vc_null()
|
|
|
|
{
|
|
|
|
|
|
|
|
return (FALSE);
|
|
|
|
}
|
|
|
|
|
2009-06-01 21:17:03 +00:00
|
|
|
static int
|
2008-03-26 15:23:12 +00:00
|
|
|
svc_vc_soupcall(struct socket *so, void *arg, int waitflag)
|
|
|
|
{
|
|
|
|
SVCXPRT *xprt = (SVCXPRT *) arg;
|
|
|
|
|
2013-12-19 21:31:28 +00:00
|
|
|
if (soreadable(xprt->xp_socket))
|
|
|
|
xprt_active(xprt);
|
2009-06-01 21:17:03 +00:00
|
|
|
return (SU_OK);
|
2008-03-26 15:23:12 +00:00
|
|
|
}
|
|
|
|
|
Listening sockets improvements.
o Separate fields of struct socket that belong to listening from
fields that belong to normal dataflow, and unionize them. This
shrinks the structure a bit.
- Take out selinfo's from the socket buffers into the socket. The
first reason is to support braindamaged scenario when a socket is
added to kevent(2) and then listen(2) is cast on it. The second
reason is that there is future plan to make socket buffers pluggable,
so that for a dataflow socket a socket buffer can be changed, and
in this case we also want to keep same selinfos through the lifetime
of a socket.
- Remove struct struct so_accf. Since now listening stuff no longer
affects struct socket size, just move its fields into listening part
of the union.
- Provide sol_upcall field and enforce that so_upcall_set() may be called
only on a dataflow socket, which has buffers, and for listening sockets
provide solisten_upcall_set().
o Remove ACCEPT_LOCK() global.
- Add a mutex to socket, to be used instead of socket buffer lock to lock
fields of struct socket that don't belong to a socket buffer.
- Allow to acquire two socket locks, but the first one must belong to a
listening socket.
- Make soref()/sorele() to use atomic(9). This allows in some situations
to do soref() without owning socket lock. There is place for improvement
here, it is possible to make sorele() also to lock optionally.
- Most protocols aren't touched by this change, except UNIX local sockets.
See below for more information.
o Reduce copy-and-paste in kernel modules that accept connections from
listening sockets: provide function solisten_dequeue(), and use it in
the following modules: ctl(4), iscsi(4), ng_btsocket(4), ng_ksocket(4),
infiniband, rpc.
o UNIX local sockets.
- Removal of ACCEPT_LOCK() global uncovered several races in the UNIX
local sockets. Most races exist around spawning a new socket, when we
are connecting to a local listening socket. To cover them, we need to
hold locks on both PCBs when spawning a third one. This means holding
them across sonewconn(). This creates a LOR between pcb locks and
unp_list_lock.
- To fix the new LOR, abandon the global unp_list_lock in favor of global
unp_link_lock. Indeed, separating these two locks didn't provide us any
extra parralelism in the UNIX sockets.
- Now call into uipc_attach() may happen with unp_link_lock hold if, we
are accepting, or without unp_link_lock in case if we are just creating
a socket.
- Another problem in UNIX sockets is that uipc_close() basicly did nothing
for a listening socket. The vnode remained opened for connections. This
is fixed by removing vnode in uipc_close(). Maybe the right way would be
to do it for all sockets (not only listening), simply move the vnode
teardown from uipc_detach() to uipc_close()?
Sponsored by: Netflix
Differential Revision: https://reviews.freebsd.org/D9770
2017-06-08 21:30:34 +00:00
|
|
|
static int
|
|
|
|
svc_vc_rendezvous_soupcall(struct socket *head, void *arg, int waitflag)
|
|
|
|
{
|
|
|
|
SVCXPRT *xprt = (SVCXPRT *) arg;
|
|
|
|
|
|
|
|
if (!TAILQ_EMPTY(&head->sol_comp))
|
|
|
|
xprt_active(xprt);
|
|
|
|
return (SU_OK);
|
|
|
|
}
|
|
|
|
|
2008-03-26 15:23:12 +00:00
|
|
|
#if 0
|
|
|
|
/*
|
|
|
|
* Get the effective UID of the sending process. Used by rpcbind, keyserv
|
|
|
|
* and rpc.yppasswdd on AF_LOCAL.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
__rpc_get_local_uid(SVCXPRT *transp, uid_t *uid) {
|
|
|
|
int sock, ret;
|
|
|
|
gid_t egid;
|
|
|
|
uid_t euid;
|
|
|
|
struct sockaddr *sa;
|
|
|
|
|
|
|
|
sock = transp->xp_fd;
|
Implement support for RPCSEC_GSS authentication to both the NFS client
and server. This replaces the RPC implementation of the NFS client and
server with the newer RPC implementation originally developed
(actually ported from the userland sunrpc code) to support the NFS
Lock Manager. I have tested this code extensively and I believe it is
stable and that performance is at least equal to the legacy RPC
implementation.
The NFS code currently contains support for both the new RPC
implementation and the older legacy implementation inherited from the
original NFS codebase. The default is to use the new implementation -
add the NFS_LEGACYRPC option to fall back to the old code. When I
merge this support back to RELENG_7, I will probably change this so
that users have to 'opt in' to get the new code.
To use RPCSEC_GSS on either client or server, you must build a kernel
which includes the KGSSAPI option and the crypto device. On the
userland side, you must build at least a new libc, mountd, mount_nfs
and gssd. You must install new versions of /etc/rc.d/gssd and
/etc/rc.d/nfsd and add 'gssd_enable=YES' to /etc/rc.conf.
As long as gssd is running, you should be able to mount an NFS
filesystem from a server that requires RPCSEC_GSS authentication. The
mount itself can happen without any kerberos credentials but all
access to the filesystem will be denied unless the accessing user has
a valid ticket file in the standard place (/tmp/krb5cc_<uid>). There
is currently no support for situations where the ticket file is in a
different place, such as when the user logged in via SSH and has
delegated credentials from that login. This restriction is also
present in Solaris and Linux. In theory, we could improve this in
future, possibly using Brooks Davis' implementation of variant
symlinks.
Supporting RPCSEC_GSS on a server is nearly as simple. You must create
service creds for the server in the form 'nfs/<fqdn>@<REALM>' and
install them in /etc/krb5.keytab. The standard heimdal utility ktutil
makes this fairly easy. After the service creds have been created, you
can add a '-sec=krb5' option to /etc/exports and restart both mountd
and nfsd.
The only other difference an administrator should notice is that nfsd
doesn't fork to create service threads any more. In normal operation,
there will be two nfsd processes, one in userland waiting for TCP
connections and one in the kernel handling requests. The latter
process will create as many kthreads as required - these should be
visible via 'top -H'. The code has some support for varying the number
of service threads according to load but initially at least, nfsd uses
a fixed number of threads according to the value supplied to its '-n'
option.
Sponsored by: Isilon Systems
MFC after: 1 month
2008-11-03 10:38:00 +00:00
|
|
|
sa = (struct sockaddr *)transp->xp_rtaddr;
|
2008-03-26 15:23:12 +00:00
|
|
|
if (sa->sa_family == AF_LOCAL) {
|
|
|
|
ret = getpeereid(sock, &euid, &egid);
|
|
|
|
if (ret == 0)
|
|
|
|
*uid = euid;
|
|
|
|
return (ret);
|
|
|
|
} else
|
|
|
|
return (-1);
|
|
|
|
}
|
|
|
|
#endif
|