2005-01-06 23:35:40 +00:00
|
|
|
/*-
|
1994-05-24 10:09:53 +00:00
|
|
|
* Copyright (c) 1982, 1986, 1989, 1990, 1993
|
|
|
|
* The Regents of the University of California. All rights reserved.
|
|
|
|
*
|
1998-11-05 14:28:26 +00:00
|
|
|
* sendfile(2) and related extensions:
|
2004-01-11 19:56:42 +00:00
|
|
|
* Copyright (c) 1998, David Greenman. All rights reserved.
|
1998-11-05 14:28:26 +00:00
|
|
|
*
|
1994-05-24 10:09:53 +00:00
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
* 4. Neither the name of the University nor the names of its contributors
|
|
|
|
* may be used to endorse or promote products derived from this software
|
|
|
|
* without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
|
|
|
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
|
|
|
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
|
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
|
|
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
|
|
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
|
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
|
|
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
|
|
|
* SUCH DAMAGE.
|
|
|
|
*
|
|
|
|
* @(#)uipc_syscalls.c 8.4 (Berkeley) 2/21/94
|
|
|
|
*/
|
|
|
|
|
2003-06-11 00:56:59 +00:00
|
|
|
#include <sys/cdefs.h>
|
|
|
|
__FBSDID("$FreeBSD$");
|
|
|
|
|
2006-11-03 15:23:16 +00:00
|
|
|
#include "opt_sctp.h"
|
1997-12-16 17:40:42 +00:00
|
|
|
#include "opt_compat.h"
|
1996-01-03 21:42:35 +00:00
|
|
|
#include "opt_ktrace.h"
|
2002-07-31 16:39:49 +00:00
|
|
|
#include "opt_mac.h"
|
1996-01-03 21:42:35 +00:00
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
#include <sys/param.h>
|
1994-05-25 09:21:21 +00:00
|
|
|
#include <sys/systm.h>
|
1998-11-05 14:28:26 +00:00
|
|
|
#include <sys/kernel.h>
|
2001-05-01 08:13:21 +00:00
|
|
|
#include <sys/lock.h>
|
|
|
|
#include <sys/mutex.h>
|
1995-10-23 15:42:12 +00:00
|
|
|
#include <sys/sysproto.h>
|
1997-10-12 20:26:33 +00:00
|
|
|
#include <sys/malloc.h>
|
1994-05-24 10:09:53 +00:00
|
|
|
#include <sys/filedesc.h>
|
2000-04-16 18:53:38 +00:00
|
|
|
#include <sys/event.h>
|
1994-05-24 10:09:53 +00:00
|
|
|
#include <sys/proc.h>
|
1997-03-23 03:37:54 +00:00
|
|
|
#include <sys/fcntl.h>
|
1994-05-24 10:09:53 +00:00
|
|
|
#include <sys/file.h>
|
2003-02-23 23:00:28 +00:00
|
|
|
#include <sys/filio.h>
|
2001-05-01 08:13:21 +00:00
|
|
|
#include <sys/mount.h>
|
1994-05-24 10:09:53 +00:00
|
|
|
#include <sys/mbuf.h>
|
|
|
|
#include <sys/protosw.h>
|
2003-11-16 06:11:26 +00:00
|
|
|
#include <sys/sf_buf.h>
|
1994-05-24 10:09:53 +00:00
|
|
|
#include <sys/socket.h>
|
|
|
|
#include <sys/socketvar.h>
|
1994-10-02 17:35:40 +00:00
|
|
|
#include <sys/signalvar.h>
|
2003-02-03 17:36:52 +00:00
|
|
|
#include <sys/syscallsubr.h>
|
Bring in mbuma to replace mballoc.
mbuma is an Mbuf & Cluster allocator built on top of a number of
extensions to the UMA framework, all included herein.
Extensions to UMA worth noting:
- Better layering between slab <-> zone caches; introduce
Keg structure which splits off slab cache away from the
zone structure and allows multiple zones to be stacked
on top of a single Keg (single type of slab cache);
perhaps we should look into defining a subset API on
top of the Keg for special use by malloc(9),
for example.
- UMA_ZONE_REFCNT zones can now be added, and reference
counters automagically allocated for them within the end
of the associated slab structures. uma_find_refcnt()
does a kextract to fetch the slab struct reference from
the underlying page, and lookup the corresponding refcnt.
mbuma things worth noting:
- integrates mbuf & cluster allocations with extended UMA
and provides caches for commonly-allocated items; defines
several zones (two primary, one secondary) and two kegs.
- change up certain code paths that always used to do:
m_get() + m_clget() to instead just use m_getcl() and
try to take advantage of the newly defined secondary
Packet zone.
- netstat(1) and systat(1) quickly hacked up to do basic
stat reporting but additional stats work needs to be
done once some other details within UMA have been taken
care of and it becomes clearer to how stats will work
within the modified framework.
From the user perspective, one implication is that the
NMBCLUSTERS compile-time option is no longer used. The
maximum number of clusters is still capped off according
to maxusers, but it can be made unlimited by setting
the kern.ipc.nmbclusters boot-time tunable to zero.
Work should be done to write an appropriate sysctl
handler allowing dynamic tuning of kern.ipc.nmbclusters
at runtime.
Additional things worth noting/known issues (READ):
- One report of 'ips' (ServeRAID) driver acting really
slow in conjunction with mbuma. Need more data.
Latest report is that ips is equally sucking with
and without mbuma.
- Giant leak in NFS code sometimes occurs, can't
reproduce but currently analyzing; brueffer is
able to reproduce but THIS IS NOT an mbuma-specific
problem and currently occurs even WITHOUT mbuma.
- Issues in network locking: there is at least one
code path in the rip code where one or more locks
are acquired and we end up in m_prepend() with
M_WAITOK, which causes WITNESS to whine from within
UMA. Current temporary solution: force all UMA
allocations to be M_NOWAIT from within UMA for now
to avoid deadlocks unless WITNESS is defined and we
can determine with certainty that we're not holding
any locks when we're M_WAITOK.
- I've seen at least one weird socketbuffer empty-but-
mbuf-still-attached panic. I don't believe this
to be related to mbuma but please keep your eyes
open, turn on debugging, and capture crash dumps.
This change removes more code than it adds.
A paper is available detailing the change and considering
various performance issues, it was presented at BSDCan2004:
http://www.unixdaemons.com/~bmilekic/netbuf_bmilekic.pdf
Please read the paper for Future Work and implementation
details, as well as credits.
Testing and Debugging:
rwatson,
brueffer,
Ketrien I. Saihr-Kesenchedra,
...
Reviewed by: Lots of people (for different parts)
2004-05-31 21:46:06 +00:00
|
|
|
#include <sys/sysctl.h>
|
1998-03-28 10:33:27 +00:00
|
|
|
#include <sys/uio.h>
|
1998-11-05 14:28:26 +00:00
|
|
|
#include <sys/vnode.h>
|
1994-05-24 10:09:53 +00:00
|
|
|
#ifdef KTRACE
|
|
|
|
#include <sys/ktrace.h>
|
|
|
|
#endif
|
2001-05-01 08:13:21 +00:00
|
|
|
|
2006-10-22 11:52:19 +00:00
|
|
|
#include <security/mac/mac_framework.h>
|
|
|
|
|
1998-11-05 14:28:26 +00:00
|
|
|
#include <vm/vm.h>
|
|
|
|
#include <vm/vm_object.h>
|
|
|
|
#include <vm/vm_page.h>
|
|
|
|
#include <vm/vm_pageout.h>
|
|
|
|
#include <vm/vm_kern.h>
|
|
|
|
#include <vm/vm_extern.h>
|
|
|
|
|
2006-11-03 15:23:16 +00:00
|
|
|
#ifdef SCTP
|
|
|
|
#include <netinet/sctp.h>
|
|
|
|
#include <netinet/sctp_peeloff.h>
|
|
|
|
#endif /* SCTP */
|
|
|
|
|
2002-03-19 21:25:46 +00:00
|
|
|
static int sendit(struct thread *td, int s, struct msghdr *mp, int flags);
|
2002-06-28 23:48:23 +00:00
|
|
|
static int recvit(struct thread *td, int s, struct msghdr *mp, void *namelenp);
|
2004-01-11 19:56:42 +00:00
|
|
|
|
2002-03-19 21:25:46 +00:00
|
|
|
static int accept1(struct thread *td, struct accept_args *uap, int compat);
|
2002-07-12 06:51:57 +00:00
|
|
|
static int do_sendfile(struct thread *td, struct sendfile_args *uap, int compat);
|
2002-03-19 21:25:46 +00:00
|
|
|
static int getsockname1(struct thread *td, struct getsockname_args *uap,
|
2002-03-24 05:09:11 +00:00
|
|
|
int compat);
|
2002-03-19 21:25:46 +00:00
|
|
|
static int getpeername1(struct thread *td, struct getpeername_args *uap,
|
2002-03-24 05:09:11 +00:00
|
|
|
int compat);
|
1995-10-23 15:42:12 +00:00
|
|
|
|
Bring in mbuma to replace mballoc.
mbuma is an Mbuf & Cluster allocator built on top of a number of
extensions to the UMA framework, all included herein.
Extensions to UMA worth noting:
- Better layering between slab <-> zone caches; introduce
Keg structure which splits off slab cache away from the
zone structure and allows multiple zones to be stacked
on top of a single Keg (single type of slab cache);
perhaps we should look into defining a subset API on
top of the Keg for special use by malloc(9),
for example.
- UMA_ZONE_REFCNT zones can now be added, and reference
counters automagically allocated for them within the end
of the associated slab structures. uma_find_refcnt()
does a kextract to fetch the slab struct reference from
the underlying page, and lookup the corresponding refcnt.
mbuma things worth noting:
- integrates mbuf & cluster allocations with extended UMA
and provides caches for commonly-allocated items; defines
several zones (two primary, one secondary) and two kegs.
- change up certain code paths that always used to do:
m_get() + m_clget() to instead just use m_getcl() and
try to take advantage of the newly defined secondary
Packet zone.
- netstat(1) and systat(1) quickly hacked up to do basic
stat reporting but additional stats work needs to be
done once some other details within UMA have been taken
care of and it becomes clearer to how stats will work
within the modified framework.
From the user perspective, one implication is that the
NMBCLUSTERS compile-time option is no longer used. The
maximum number of clusters is still capped off according
to maxusers, but it can be made unlimited by setting
the kern.ipc.nmbclusters boot-time tunable to zero.
Work should be done to write an appropriate sysctl
handler allowing dynamic tuning of kern.ipc.nmbclusters
at runtime.
Additional things worth noting/known issues (READ):
- One report of 'ips' (ServeRAID) driver acting really
slow in conjunction with mbuma. Need more data.
Latest report is that ips is equally sucking with
and without mbuma.
- Giant leak in NFS code sometimes occurs, can't
reproduce but currently analyzing; brueffer is
able to reproduce but THIS IS NOT an mbuma-specific
problem and currently occurs even WITHOUT mbuma.
- Issues in network locking: there is at least one
code path in the rip code where one or more locks
are acquired and we end up in m_prepend() with
M_WAITOK, which causes WITNESS to whine from within
UMA. Current temporary solution: force all UMA
allocations to be M_NOWAIT from within UMA for now
to avoid deadlocks unless WITNESS is defined and we
can determine with certainty that we're not holding
any locks when we're M_WAITOK.
- I've seen at least one weird socketbuffer empty-but-
mbuf-still-attached panic. I don't believe this
to be related to mbuma but please keep your eyes
open, turn on debugging, and capture crash dumps.
This change removes more code than it adds.
A paper is available detailing the change and considering
various performance issues, it was presented at BSDCan2004:
http://www.unixdaemons.com/~bmilekic/netbuf_bmilekic.pdf
Please read the paper for Future Work and implementation
details, as well as credits.
Testing and Debugging:
rwatson,
brueffer,
Ketrien I. Saihr-Kesenchedra,
...
Reviewed by: Lots of people (for different parts)
2004-05-31 21:46:06 +00:00
|
|
|
/*
|
|
|
|
* NSFBUFS-related variables and associated sysctls
|
|
|
|
*/
|
|
|
|
int nsfbufs;
|
|
|
|
int nsfbufspeak;
|
|
|
|
int nsfbufsused;
|
|
|
|
|
|
|
|
SYSCTL_INT(_kern_ipc, OID_AUTO, nsfbufs, CTLFLAG_RDTUN, &nsfbufs, 0,
|
|
|
|
"Maximum number of sendfile(2) sf_bufs available");
|
|
|
|
SYSCTL_INT(_kern_ipc, OID_AUTO, nsfbufspeak, CTLFLAG_RD, &nsfbufspeak, 0,
|
|
|
|
"Number of sendfile(2) sf_bufs at peak usage");
|
|
|
|
SYSCTL_INT(_kern_ipc, OID_AUTO, nsfbufsused, CTLFLAG_RD, &nsfbufsused, 0,
|
|
|
|
"Number of sendfile(2) sf_bufs in use");
|
|
|
|
|
2004-10-24 23:45:01 +00:00
|
|
|
/*
|
|
|
|
* Convert a user file descriptor to a kernel file entry. A reference on the
|
|
|
|
* file entry is held upon returning. This is lighter weight than
|
|
|
|
* fgetsock(), which bumps the socket reference drops the file reference
|
|
|
|
* count instead, as this approach avoids several additional mutex operations
|
2006-04-25 11:48:16 +00:00
|
|
|
* associated with the additional reference count. If requested, return the
|
|
|
|
* open file flags.
|
2004-10-24 23:45:01 +00:00
|
|
|
*/
|
|
|
|
static int
|
2006-04-25 11:48:16 +00:00
|
|
|
getsock(struct filedesc *fdp, int fd, struct file **fpp, u_int *fflagp)
|
2004-10-24 23:45:01 +00:00
|
|
|
{
|
|
|
|
struct file *fp;
|
|
|
|
int error;
|
|
|
|
|
|
|
|
fp = NULL;
|
|
|
|
if (fdp == NULL)
|
|
|
|
error = EBADF;
|
|
|
|
else {
|
Replace custom file descriptor array sleep lock constructed using a mutex
and flags with an sxlock. This leads to a significant and measurable
performance improvement as a result of access to shared locking for
frequent lookup operations, reduced general overhead, and reduced overhead
in the event of contention. All of these are imported for threaded
applications where simultaneous access to a shared file descriptor array
occurs frequently. Kris has reported 2x-4x transaction rate improvements
on 8-core MySQL benchmarks; smaller improvements can be expected for many
workloads as a result of reduced overhead.
- Generally eliminate the distinction between "fast" and regular
acquisisition of the filedesc lock; the plan is that they will now all
be fast. Change all locking instances to either shared or exclusive
locks.
- Correct a bug (pointed out by kib) in fdfree() where previously msleep()
was called without the mutex held; sx_sleep() is now always called with
the sxlock held exclusively.
- Universally hold the struct file lock over changes to struct file,
rather than the filedesc lock or no lock. Always update the f_ops
field last. A further memory barrier is required here in the future
(discussed with jhb).
- Improve locking and reference management in linux_at(), which fails to
properly acquire vnode references before using vnode pointers. Annotate
improper use of vn_fullpath(), which will be replaced at a future date.
In fcntl(), we conservatively acquire an exclusive lock, even though in
some cases a shared lock may be sufficient, which should be revisited.
The dropping of the filedesc lock in fdgrowtable() is no longer required
as the sxlock can be held over the sleep operation; we should consider
removing that (pointed out by attilio).
Tested by: kris
Discussed with: jhb, kris, attilio, jeff
2007-04-04 09:11:34 +00:00
|
|
|
FILEDESC_SLOCK(fdp);
|
2004-11-07 16:09:56 +00:00
|
|
|
fp = fget_locked(fdp, fd);
|
2004-11-13 11:53:02 +00:00
|
|
|
if (fp == NULL)
|
2004-10-24 23:45:01 +00:00
|
|
|
error = EBADF;
|
|
|
|
else if (fp->f_type != DTYPE_SOCKET) {
|
|
|
|
fp = NULL;
|
|
|
|
error = ENOTSOCK;
|
|
|
|
} else {
|
|
|
|
fhold(fp);
|
2006-04-25 11:48:16 +00:00
|
|
|
if (fflagp != NULL)
|
|
|
|
*fflagp = fp->f_flag;
|
2004-10-24 23:45:01 +00:00
|
|
|
error = 0;
|
|
|
|
}
|
Replace custom file descriptor array sleep lock constructed using a mutex
and flags with an sxlock. This leads to a significant and measurable
performance improvement as a result of access to shared locking for
frequent lookup operations, reduced general overhead, and reduced overhead
in the event of contention. All of these are imported for threaded
applications where simultaneous access to a shared file descriptor array
occurs frequently. Kris has reported 2x-4x transaction rate improvements
on 8-core MySQL benchmarks; smaller improvements can be expected for many
workloads as a result of reduced overhead.
- Generally eliminate the distinction between "fast" and regular
acquisisition of the filedesc lock; the plan is that they will now all
be fast. Change all locking instances to either shared or exclusive
locks.
- Correct a bug (pointed out by kib) in fdfree() where previously msleep()
was called without the mutex held; sx_sleep() is now always called with
the sxlock held exclusively.
- Universally hold the struct file lock over changes to struct file,
rather than the filedesc lock or no lock. Always update the f_ops
field last. A further memory barrier is required here in the future
(discussed with jhb).
- Improve locking and reference management in linux_at(), which fails to
properly acquire vnode references before using vnode pointers. Annotate
improper use of vn_fullpath(), which will be replaced at a future date.
In fcntl(), we conservatively acquire an exclusive lock, even though in
some cases a shared lock may be sufficient, which should be revisited.
The dropping of the filedesc lock in fdgrowtable() is no longer required
as the sxlock can be held over the sleep operation; we should consider
removing that (pointed out by attilio).
Tested by: kris
Discussed with: jhb, kris, attilio, jeff
2007-04-04 09:11:34 +00:00
|
|
|
FILEDESC_SUNLOCK(fdp);
|
2004-10-24 23:45:01 +00:00
|
|
|
}
|
|
|
|
*fpp = fp;
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* System call interface to the socket abstraction.
|
|
|
|
*/
|
2004-06-11 11:16:26 +00:00
|
|
|
#if defined(COMPAT_43)
|
1994-05-24 10:09:53 +00:00
|
|
|
#define COMPAT_OLDSOCK
|
|
|
|
#endif
|
|
|
|
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
socket(td, uap)
|
|
|
|
struct thread *td;
|
2007-05-16 20:41:08 +00:00
|
|
|
struct socket_args /* {
|
1995-10-23 15:42:12 +00:00
|
|
|
int domain;
|
|
|
|
int type;
|
|
|
|
int protocol;
|
|
|
|
} */ *uap;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
Giant pushdown syscalls in kern/uipc_syscalls.c. Affected calls:
recvmsg(), sendmsg(), recvfrom(), accept(), getpeername(), getsockname(),
socket(), connect(), accept(), send(), recv(), bind(), setsockopt(), listen(),
sendto(), shutdown(), socketpair(), sendfile()
2001-08-31 00:37:34 +00:00
|
|
|
struct filedesc *fdp;
|
1994-05-24 10:09:53 +00:00
|
|
|
struct socket *so;
|
|
|
|
struct file *fp;
|
|
|
|
int fd, error;
|
|
|
|
|
2005-07-05 22:49:10 +00:00
|
|
|
#ifdef MAC
|
|
|
|
error = mac_check_socket_create(td->td_ucred, uap->domain, uap->type,
|
|
|
|
uap->protocol);
|
|
|
|
if (error)
|
|
|
|
return (error);
|
|
|
|
#endif
|
2001-09-12 08:38:13 +00:00
|
|
|
fdp = td->td_proc->p_fd;
|
|
|
|
error = falloc(td, &fp, &fd);
|
1994-10-02 17:35:40 +00:00
|
|
|
if (error)
|
2004-03-04 01:57:48 +00:00
|
|
|
return (error);
|
2003-10-19 20:41:07 +00:00
|
|
|
/* An extra reference on `fp' has been held for us by falloc(). */
|
2004-03-29 02:21:56 +00:00
|
|
|
NET_LOCK_GIANT();
|
2001-12-31 17:45:16 +00:00
|
|
|
error = socreate(uap->domain, &so, uap->type, uap->protocol,
|
2002-02-27 18:32:23 +00:00
|
|
|
td->td_ucred, td);
|
2004-03-29 02:21:56 +00:00
|
|
|
NET_UNLOCK_GIANT();
|
1994-10-02 17:35:40 +00:00
|
|
|
if (error) {
|
2004-11-07 22:16:07 +00:00
|
|
|
fdclose(fdp, fp, fd, td);
|
1994-05-24 10:09:53 +00:00
|
|
|
} else {
|
Replace custom file descriptor array sleep lock constructed using a mutex
and flags with an sxlock. This leads to a significant and measurable
performance improvement as a result of access to shared locking for
frequent lookup operations, reduced general overhead, and reduced overhead
in the event of contention. All of these are imported for threaded
applications where simultaneous access to a shared file descriptor array
occurs frequently. Kris has reported 2x-4x transaction rate improvements
on 8-core MySQL benchmarks; smaller improvements can be expected for many
workloads as a result of reduced overhead.
- Generally eliminate the distinction between "fast" and regular
acquisisition of the filedesc lock; the plan is that they will now all
be fast. Change all locking instances to either shared or exclusive
locks.
- Correct a bug (pointed out by kib) in fdfree() where previously msleep()
was called without the mutex held; sx_sleep() is now always called with
the sxlock held exclusively.
- Universally hold the struct file lock over changes to struct file,
rather than the filedesc lock or no lock. Always update the f_ops
field last. A further memory barrier is required here in the future
(discussed with jhb).
- Improve locking and reference management in linux_at(), which fails to
properly acquire vnode references before using vnode pointers. Annotate
improper use of vn_fullpath(), which will be replaced at a future date.
In fcntl(), we conservatively acquire an exclusive lock, even though in
some cases a shared lock may be sufficient, which should be revisited.
The dropping of the filedesc lock in fdgrowtable() is no longer required
as the sxlock can be held over the sleep operation; we should consider
removing that (pointed out by attilio).
Tested by: kris
Discussed with: jhb, kris, attilio, jeff
2007-04-04 09:11:34 +00:00
|
|
|
FILE_LOCK(fp);
|
2003-01-13 00:33:17 +00:00
|
|
|
fp->f_data = so; /* already has ref count */
|
1999-08-04 18:53:50 +00:00
|
|
|
fp->f_flag = FREAD|FWRITE;
|
|
|
|
fp->f_type = DTYPE_SOCKET;
|
Replace custom file descriptor array sleep lock constructed using a mutex
and flags with an sxlock. This leads to a significant and measurable
performance improvement as a result of access to shared locking for
frequent lookup operations, reduced general overhead, and reduced overhead
in the event of contention. All of these are imported for threaded
applications where simultaneous access to a shared file descriptor array
occurs frequently. Kris has reported 2x-4x transaction rate improvements
on 8-core MySQL benchmarks; smaller improvements can be expected for many
workloads as a result of reduced overhead.
- Generally eliminate the distinction between "fast" and regular
acquisisition of the filedesc lock; the plan is that they will now all
be fast. Change all locking instances to either shared or exclusive
locks.
- Correct a bug (pointed out by kib) in fdfree() where previously msleep()
was called without the mutex held; sx_sleep() is now always called with
the sxlock held exclusively.
- Universally hold the struct file lock over changes to struct file,
rather than the filedesc lock or no lock. Always update the f_ops
field last. A further memory barrier is required here in the future
(discussed with jhb).
- Improve locking and reference management in linux_at(), which fails to
properly acquire vnode references before using vnode pointers. Annotate
improper use of vn_fullpath(), which will be replaced at a future date.
In fcntl(), we conservatively acquire an exclusive lock, even though in
some cases a shared lock may be sufficient, which should be revisited.
The dropping of the filedesc lock in fdgrowtable() is no longer required
as the sxlock can be held over the sleep operation; we should consider
removing that (pointed out by attilio).
Tested by: kris
Discussed with: jhb, kris, attilio, jeff
2007-04-04 09:11:34 +00:00
|
|
|
fp->f_ops = &socketops;
|
|
|
|
FILE_UNLOCK(fp);
|
2001-09-12 08:38:13 +00:00
|
|
|
td->td_retval[0] = fd;
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
2001-09-12 08:38:13 +00:00
|
|
|
fdrop(fp, td);
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* ARGSUSED */
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
bind(td, uap)
|
|
|
|
struct thread *td;
|
2007-05-16 20:41:08 +00:00
|
|
|
struct bind_args /* {
|
1995-10-23 15:42:12 +00:00
|
|
|
int s;
|
|
|
|
caddr_t name;
|
|
|
|
int namelen;
|
|
|
|
} */ *uap;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
1997-08-16 19:16:27 +00:00
|
|
|
struct sockaddr *sa;
|
1994-05-24 10:09:53 +00:00
|
|
|
int error;
|
|
|
|
|
2003-02-03 17:36:52 +00:00
|
|
|
if ((error = getsockaddr(&sa, uap->name, uap->namelen)) != 0)
|
|
|
|
return (error);
|
|
|
|
|
2006-07-19 18:28:52 +00:00
|
|
|
error = kern_bind(td, uap->s, sa);
|
|
|
|
free(sa, M_SONAME);
|
|
|
|
return (error);
|
2003-02-03 17:36:52 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
kern_bind(td, fd, sa)
|
|
|
|
struct thread *td;
|
|
|
|
int fd;
|
|
|
|
struct sockaddr *sa;
|
|
|
|
{
|
|
|
|
struct socket *so;
|
2004-10-24 23:45:01 +00:00
|
|
|
struct file *fp;
|
2003-02-03 17:36:52 +00:00
|
|
|
int error;
|
|
|
|
|
2004-03-29 02:21:56 +00:00
|
|
|
NET_LOCK_GIANT();
|
2006-04-25 11:48:16 +00:00
|
|
|
error = getsock(td->td_proc->p_fd, fd, &fp, NULL);
|
2004-10-24 23:45:01 +00:00
|
|
|
if (error)
|
Giant pushdown syscalls in kern/uipc_syscalls.c. Affected calls:
recvmsg(), sendmsg(), recvfrom(), accept(), getpeername(), getsockname(),
socket(), connect(), accept(), send(), recv(), bind(), setsockopt(), listen(),
sendto(), shutdown(), socketpair(), sendfile()
2001-08-31 00:37:34 +00:00
|
|
|
goto done2;
|
2004-10-24 23:45:01 +00:00
|
|
|
so = fp->f_data;
|
2002-07-31 16:39:49 +00:00
|
|
|
#ifdef MAC
|
2004-06-13 02:50:07 +00:00
|
|
|
SOCK_LOCK(so);
|
2002-07-31 16:39:49 +00:00
|
|
|
error = mac_check_socket_bind(td->td_ucred, so, sa);
|
2004-06-13 02:50:07 +00:00
|
|
|
SOCK_UNLOCK(so);
|
2003-02-03 17:36:52 +00:00
|
|
|
if (error)
|
2002-07-31 16:39:49 +00:00
|
|
|
goto done1;
|
|
|
|
#endif
|
2002-01-09 02:47:00 +00:00
|
|
|
error = sobind(so, sa, td);
|
2003-02-03 17:36:52 +00:00
|
|
|
#ifdef MAC
|
2001-11-17 03:07:11 +00:00
|
|
|
done1:
|
2003-02-03 17:36:52 +00:00
|
|
|
#endif
|
2004-10-24 23:45:01 +00:00
|
|
|
fdrop(fp, td);
|
Giant pushdown syscalls in kern/uipc_syscalls.c. Affected calls:
recvmsg(), sendmsg(), recvfrom(), accept(), getpeername(), getsockname(),
socket(), connect(), accept(), send(), recv(), bind(), setsockopt(), listen(),
sendto(), shutdown(), socketpair(), sendfile()
2001-08-31 00:37:34 +00:00
|
|
|
done2:
|
2004-03-29 02:21:56 +00:00
|
|
|
NET_UNLOCK_GIANT();
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* ARGSUSED */
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
listen(td, uap)
|
|
|
|
struct thread *td;
|
2007-05-16 20:41:08 +00:00
|
|
|
struct listen_args /* {
|
1995-10-23 15:42:12 +00:00
|
|
|
int s;
|
|
|
|
int backlog;
|
|
|
|
} */ *uap;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2002-01-09 02:47:00 +00:00
|
|
|
struct socket *so;
|
2004-10-24 23:45:01 +00:00
|
|
|
struct file *fp;
|
1994-05-24 10:09:53 +00:00
|
|
|
int error;
|
|
|
|
|
2004-03-29 02:21:56 +00:00
|
|
|
NET_LOCK_GIANT();
|
2006-04-25 11:48:16 +00:00
|
|
|
error = getsock(td->td_proc->p_fd, uap->s, &fp, NULL);
|
2004-10-24 23:45:01 +00:00
|
|
|
if (error == 0) {
|
|
|
|
so = fp->f_data;
|
2002-07-31 16:39:49 +00:00
|
|
|
#ifdef MAC
|
2004-06-13 02:50:07 +00:00
|
|
|
SOCK_LOCK(so);
|
2002-07-31 16:39:49 +00:00
|
|
|
error = mac_check_socket_listen(td->td_ucred, so);
|
2004-06-13 02:50:07 +00:00
|
|
|
SOCK_UNLOCK(so);
|
2002-07-31 16:39:49 +00:00
|
|
|
if (error)
|
|
|
|
goto done;
|
|
|
|
#endif
|
2002-01-09 02:47:00 +00:00
|
|
|
error = solisten(so, uap->backlog, td);
|
2002-07-31 16:39:49 +00:00
|
|
|
#ifdef MAC
|
|
|
|
done:
|
|
|
|
#endif
|
2004-10-24 23:45:01 +00:00
|
|
|
fdrop(fp, td);
|
Giant pushdown syscalls in kern/uipc_syscalls.c. Affected calls:
recvmsg(), sendmsg(), recvfrom(), accept(), getpeername(), getsockname(),
socket(), connect(), accept(), send(), recv(), bind(), setsockopt(), listen(),
sendto(), shutdown(), socketpair(), sendfile()
2001-08-31 00:37:34 +00:00
|
|
|
}
|
2004-03-29 02:21:56 +00:00
|
|
|
NET_UNLOCK_GIANT();
|
2000-11-18 21:01:04 +00:00
|
|
|
return(error);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
Giant pushdown syscalls in kern/uipc_syscalls.c. Affected calls:
recvmsg(), sendmsg(), recvfrom(), accept(), getpeername(), getsockname(),
socket(), connect(), accept(), send(), recv(), bind(), setsockopt(), listen(),
sendto(), shutdown(), socketpair(), sendfile()
2001-08-31 00:37:34 +00:00
|
|
|
/*
|
|
|
|
* accept1()
|
|
|
|
*/
|
1995-10-07 23:47:26 +00:00
|
|
|
static int
|
2001-09-12 08:38:13 +00:00
|
|
|
accept1(td, uap, compat)
|
|
|
|
struct thread *td;
|
2007-05-16 20:41:08 +00:00
|
|
|
struct accept_args /* {
|
1995-10-23 15:42:12 +00:00
|
|
|
int s;
|
2003-12-24 18:47:43 +00:00
|
|
|
struct sockaddr * __restrict name;
|
|
|
|
socklen_t * __restrict anamelen;
|
1995-10-23 15:42:12 +00:00
|
|
|
} */ *uap;
|
1995-10-07 23:47:26 +00:00
|
|
|
int compat;
|
2006-07-10 21:38:17 +00:00
|
|
|
{
|
|
|
|
struct sockaddr *name;
|
|
|
|
socklen_t namelen;
|
2006-07-27 19:54:41 +00:00
|
|
|
struct file *fp;
|
2006-07-10 21:38:17 +00:00
|
|
|
int error;
|
|
|
|
|
|
|
|
if (uap->name == NULL)
|
2006-07-27 19:54:41 +00:00
|
|
|
return (kern_accept(td, uap->s, NULL, NULL, NULL));
|
2006-07-10 21:38:17 +00:00
|
|
|
|
|
|
|
error = copyin(uap->anamelen, &namelen, sizeof (namelen));
|
|
|
|
if (error)
|
|
|
|
return (error);
|
|
|
|
|
2006-07-27 19:54:41 +00:00
|
|
|
error = kern_accept(td, uap->s, &name, &namelen, &fp);
|
2006-07-10 21:38:17 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* return a namelen of zero for older code which might
|
|
|
|
* ignore the return value from accept.
|
|
|
|
*/
|
|
|
|
if (error) {
|
|
|
|
(void) copyout(&namelen,
|
|
|
|
uap->anamelen, sizeof(*uap->anamelen));
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (error == 0 && name != NULL) {
|
|
|
|
#ifdef COMPAT_OLDSOCK
|
|
|
|
if (compat)
|
|
|
|
((struct osockaddr *)name)->sa_family =
|
|
|
|
name->sa_family;
|
|
|
|
#endif
|
|
|
|
error = copyout(name, uap->name, namelen);
|
|
|
|
}
|
|
|
|
if (error == 0)
|
|
|
|
error = copyout(&namelen, uap->anamelen,
|
|
|
|
sizeof(namelen));
|
|
|
|
if (error)
|
2006-07-27 19:54:41 +00:00
|
|
|
fdclose(td->td_proc->p_fd, fp, td->td_retval[0], td);
|
|
|
|
fdrop(fp, td);
|
2006-07-10 21:38:17 +00:00
|
|
|
free(name, M_SONAME);
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
kern_accept(struct thread *td, int s, struct sockaddr **name,
|
2006-07-27 19:54:41 +00:00
|
|
|
socklen_t *namelen, struct file **fp)
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
Giant pushdown syscalls in kern/uipc_syscalls.c. Affected calls:
recvmsg(), sendmsg(), recvfrom(), accept(), getpeername(), getsockname(),
socket(), connect(), accept(), send(), recv(), bind(), setsockopt(), listen(),
sendto(), shutdown(), socketpair(), sendfile()
2001-08-31 00:37:34 +00:00
|
|
|
struct filedesc *fdp;
|
2006-04-25 11:48:16 +00:00
|
|
|
struct file *headfp, *nfp = NULL;
|
Correct a resource leak introduced in recent accept locking changes:
when I reordered events in accept1() to allocate a file descriptor
earlier, I didn't properly update use of goto on exit to unwind for
cases where the file descriptor is now held, but wasn't previously.
The result was that, in the event of accept() on a non-blocking socket,
or in the event of a socket error, a file descriptor would be leaked.
This ended up being non-fatal in many cases, as the file descriptor
would be properly GC'd on process exit, so only showed up for processes
that do a lot of non-blocking accept() calls, and also live for a long
time (such as qmail).
This change updates the use of goto targets to do additional unwinding.
Eyes provided by: Brian Feldman <green@freebsd.org>
Feet, hands provided by: Stefan Ehmann <shoesoft@gmx.net>,
Dimitry Andric <dimitry@andric.com>
Arjan van Leeuwen <avleeuwen@piwebs.com>
2004-06-07 21:45:44 +00:00
|
|
|
struct sockaddr *sa = NULL;
|
Integrate accept locking from rwatson_netperf, introducing a new
global mutex, accept_mtx, which serializes access to the following
fields across all sockets:
so_qlen so_incqlen so_qstate
so_comp so_incomp so_list
so_head
While providing only coarse granularity, this approach avoids lock
order issues between sockets by avoiding ownership of the fields
by a specific socket and its per-socket mutexes.
While here, rewrite soclose(), sofree(), soaccept(), and
sonewconn() to add assertions, close additional races and address
lock order concerns. In particular:
- Reorganize the optimistic concurrency behavior in accept1() to
always allocate a file descriptor with falloc() so that if we do
find a socket, we don't have to encounter the "Oh, there wasn't
a socket" race that can occur if falloc() sleeps in the current
code, which broke inbound accept() ordering, not to mention
requiring backing out socket state changes in a way that raced
with the protocol level. We may want to add a lockless read of
the queue state if polling of empty queues proves to be important
to optimize.
- In accept1(), soref() the socket while holding the accept lock
so that the socket cannot be free'd in a race with the protocol
layer. Likewise in netgraph equivilents of the accept1() code.
- In sonewconn(), loop waiting for the queue to be small enough to
insert our new socket once we've committed to inserting it, or
races can occur that cause the incomplete socket queue to
overfill. In the previously implementation, it was sufficient
to simply tested once since calling soabort() didn't release
synchronization permitting another thread to insert a socket as
we discard a previous one.
- In soclose()/sofree()/et al, it is the responsibility of the
caller to remove a socket from the incomplete connection queue
before calling soabort(), which prevents soabort() from having
to walk into the accept socket to release the socket from its
queue, and avoids races when releasing the accept mutex to enter
soabort(), permitting soabort() to avoid lock ordering issues
with the caller.
- Generally cluster accept queue related operations together
throughout these functions in order to facilitate locking.
Annotate new locking in socketvar.h.
2004-06-02 04:15:39 +00:00
|
|
|
int error;
|
1996-03-11 15:37:44 +00:00
|
|
|
struct socket *head, *so;
|
1998-06-10 10:30:23 +00:00
|
|
|
int fd;
|
2001-11-17 03:07:11 +00:00
|
|
|
u_int fflag;
|
2002-10-03 02:13:00 +00:00
|
|
|
pid_t pgid;
|
2003-02-23 23:00:28 +00:00
|
|
|
int tmp;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2006-07-10 21:38:17 +00:00
|
|
|
if (name) {
|
|
|
|
*name = NULL;
|
|
|
|
if (*namelen < 0)
|
Integrate accept locking from rwatson_netperf, introducing a new
global mutex, accept_mtx, which serializes access to the following
fields across all sockets:
so_qlen so_incqlen so_qstate
so_comp so_incomp so_list
so_head
While providing only coarse granularity, this approach avoids lock
order issues between sockets by avoiding ownership of the fields
by a specific socket and its per-socket mutexes.
While here, rewrite soclose(), sofree(), soaccept(), and
sonewconn() to add assertions, close additional races and address
lock order concerns. In particular:
- Reorganize the optimistic concurrency behavior in accept1() to
always allocate a file descriptor with falloc() so that if we do
find a socket, we don't have to encounter the "Oh, there wasn't
a socket" race that can occur if falloc() sleeps in the current
code, which broke inbound accept() ordering, not to mention
requiring backing out socket state changes in a way that raced
with the protocol level. We may want to add a lockless read of
the queue state if polling of empty queues proves to be important
to optimize.
- In accept1(), soref() the socket while holding the accept lock
so that the socket cannot be free'd in a race with the protocol
layer. Likewise in netgraph equivilents of the accept1() code.
- In sonewconn(), loop waiting for the queue to be small enough to
insert our new socket once we've committed to inserting it, or
races can occur that cause the incomplete socket queue to
overfill. In the previously implementation, it was sufficient
to simply tested once since calling soabort() didn't release
synchronization permitting another thread to insert a socket as
we discard a previous one.
- In soclose()/sofree()/et al, it is the responsibility of the
caller to remove a socket from the incomplete connection queue
before calling soabort(), which prevents soabort() from having
to walk into the accept socket to release the socket from its
queue, and avoids races when releasing the accept mutex to enter
soabort(), permitting soabort() to avoid lock ordering issues
with the caller.
- Generally cluster accept queue related operations together
throughout these functions in order to facilitate locking.
Annotate new locking in socketvar.h.
2004-06-02 04:15:39 +00:00
|
|
|
return (EINVAL);
|
1994-10-02 17:35:40 +00:00
|
|
|
}
|
2006-07-10 21:38:17 +00:00
|
|
|
|
|
|
|
fdp = td->td_proc->p_fd;
|
2004-03-29 02:21:56 +00:00
|
|
|
NET_LOCK_GIANT();
|
2006-07-10 21:38:17 +00:00
|
|
|
error = getsock(fdp, s, &headfp, &fflag);
|
1994-10-02 17:35:40 +00:00
|
|
|
if (error)
|
Giant pushdown syscalls in kern/uipc_syscalls.c. Affected calls:
recvmsg(), sendmsg(), recvfrom(), accept(), getpeername(), getsockname(),
socket(), connect(), accept(), send(), recv(), bind(), setsockopt(), listen(),
sendto(), shutdown(), socketpair(), sendfile()
2001-08-31 00:37:34 +00:00
|
|
|
goto done2;
|
2006-04-25 11:48:16 +00:00
|
|
|
head = headfp->f_data;
|
1996-03-11 15:37:44 +00:00
|
|
|
if ((head->so_options & SO_ACCEPTCONN) == 0) {
|
2000-11-18 21:01:04 +00:00
|
|
|
error = EINVAL;
|
|
|
|
goto done;
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
2005-04-16 18:46:29 +00:00
|
|
|
#ifdef MAC
|
|
|
|
SOCK_LOCK(head);
|
|
|
|
error = mac_check_socket_accept(td->td_ucred, head);
|
|
|
|
SOCK_UNLOCK(head);
|
|
|
|
if (error != 0)
|
|
|
|
goto done;
|
|
|
|
#endif
|
Integrate accept locking from rwatson_netperf, introducing a new
global mutex, accept_mtx, which serializes access to the following
fields across all sockets:
so_qlen so_incqlen so_qstate
so_comp so_incomp so_list
so_head
While providing only coarse granularity, this approach avoids lock
order issues between sockets by avoiding ownership of the fields
by a specific socket and its per-socket mutexes.
While here, rewrite soclose(), sofree(), soaccept(), and
sonewconn() to add assertions, close additional races and address
lock order concerns. In particular:
- Reorganize the optimistic concurrency behavior in accept1() to
always allocate a file descriptor with falloc() so that if we do
find a socket, we don't have to encounter the "Oh, there wasn't
a socket" race that can occur if falloc() sleeps in the current
code, which broke inbound accept() ordering, not to mention
requiring backing out socket state changes in a way that raced
with the protocol level. We may want to add a lockless read of
the queue state if polling of empty queues proves to be important
to optimize.
- In accept1(), soref() the socket while holding the accept lock
so that the socket cannot be free'd in a race with the protocol
layer. Likewise in netgraph equivilents of the accept1() code.
- In sonewconn(), loop waiting for the queue to be small enough to
insert our new socket once we've committed to inserting it, or
races can occur that cause the incomplete socket queue to
overfill. In the previously implementation, it was sufficient
to simply tested once since calling soabort() didn't release
synchronization permitting another thread to insert a socket as
we discard a previous one.
- In soclose()/sofree()/et al, it is the responsibility of the
caller to remove a socket from the incomplete connection queue
before calling soabort(), which prevents soabort() from having
to walk into the accept socket to release the socket from its
queue, and avoids races when releasing the accept mutex to enter
soabort(), permitting soabort() to avoid lock ordering issues
with the caller.
- Generally cluster accept queue related operations together
throughout these functions in order to facilitate locking.
Annotate new locking in socketvar.h.
2004-06-02 04:15:39 +00:00
|
|
|
error = falloc(td, &nfp, &fd);
|
|
|
|
if (error)
|
|
|
|
goto done;
|
|
|
|
ACCEPT_LOCK();
|
|
|
|
if ((head->so_state & SS_NBIO) && TAILQ_EMPTY(&head->so_comp)) {
|
|
|
|
ACCEPT_UNLOCK();
|
|
|
|
error = EWOULDBLOCK;
|
Correct a resource leak introduced in recent accept locking changes:
when I reordered events in accept1() to allocate a file descriptor
earlier, I didn't properly update use of goto on exit to unwind for
cases where the file descriptor is now held, but wasn't previously.
The result was that, in the event of accept() on a non-blocking socket,
or in the event of a socket error, a file descriptor would be leaked.
This ended up being non-fatal in many cases, as the file descriptor
would be properly GC'd on process exit, so only showed up for processes
that do a lot of non-blocking accept() calls, and also live for a long
time (such as qmail).
This change updates the use of goto targets to do additional unwinding.
Eyes provided by: Brian Feldman <green@freebsd.org>
Feet, hands provided by: Stefan Ehmann <shoesoft@gmx.net>,
Dimitry Andric <dimitry@andric.com>
Arjan van Leeuwen <avleeuwen@piwebs.com>
2004-06-07 21:45:44 +00:00
|
|
|
goto noconnection;
|
Integrate accept locking from rwatson_netperf, introducing a new
global mutex, accept_mtx, which serializes access to the following
fields across all sockets:
so_qlen so_incqlen so_qstate
so_comp so_incomp so_list
so_head
While providing only coarse granularity, this approach avoids lock
order issues between sockets by avoiding ownership of the fields
by a specific socket and its per-socket mutexes.
While here, rewrite soclose(), sofree(), soaccept(), and
sonewconn() to add assertions, close additional races and address
lock order concerns. In particular:
- Reorganize the optimistic concurrency behavior in accept1() to
always allocate a file descriptor with falloc() so that if we do
find a socket, we don't have to encounter the "Oh, there wasn't
a socket" race that can occur if falloc() sleeps in the current
code, which broke inbound accept() ordering, not to mention
requiring backing out socket state changes in a way that raced
with the protocol level. We may want to add a lockless read of
the queue state if polling of empty queues proves to be important
to optimize.
- In accept1(), soref() the socket while holding the accept lock
so that the socket cannot be free'd in a race with the protocol
layer. Likewise in netgraph equivilents of the accept1() code.
- In sonewconn(), loop waiting for the queue to be small enough to
insert our new socket once we've committed to inserting it, or
races can occur that cause the incomplete socket queue to
overfill. In the previously implementation, it was sufficient
to simply tested once since calling soabort() didn't release
synchronization permitting another thread to insert a socket as
we discard a previous one.
- In soclose()/sofree()/et al, it is the responsibility of the
caller to remove a socket from the incomplete connection queue
before calling soabort(), which prevents soabort() from having
to walk into the accept socket to release the socket from its
queue, and avoids races when releasing the accept mutex to enter
soabort(), permitting soabort() to avoid lock ordering issues
with the caller.
- Generally cluster accept queue related operations together
throughout these functions in order to facilitate locking.
Annotate new locking in socketvar.h.
2004-06-02 04:15:39 +00:00
|
|
|
}
|
1999-11-16 10:56:05 +00:00
|
|
|
while (TAILQ_EMPTY(&head->so_comp) && head->so_error == 0) {
|
2004-06-14 18:16:22 +00:00
|
|
|
if (head->so_rcv.sb_state & SBS_CANTRCVMORE) {
|
1996-03-11 15:37:44 +00:00
|
|
|
head->so_error = ECONNABORTED;
|
1994-05-24 10:09:53 +00:00
|
|
|
break;
|
|
|
|
}
|
Integrate accept locking from rwatson_netperf, introducing a new
global mutex, accept_mtx, which serializes access to the following
fields across all sockets:
so_qlen so_incqlen so_qstate
so_comp so_incomp so_list
so_head
While providing only coarse granularity, this approach avoids lock
order issues between sockets by avoiding ownership of the fields
by a specific socket and its per-socket mutexes.
While here, rewrite soclose(), sofree(), soaccept(), and
sonewconn() to add assertions, close additional races and address
lock order concerns. In particular:
- Reorganize the optimistic concurrency behavior in accept1() to
always allocate a file descriptor with falloc() so that if we do
find a socket, we don't have to encounter the "Oh, there wasn't
a socket" race that can occur if falloc() sleeps in the current
code, which broke inbound accept() ordering, not to mention
requiring backing out socket state changes in a way that raced
with the protocol level. We may want to add a lockless read of
the queue state if polling of empty queues proves to be important
to optimize.
- In accept1(), soref() the socket while holding the accept lock
so that the socket cannot be free'd in a race with the protocol
layer. Likewise in netgraph equivilents of the accept1() code.
- In sonewconn(), loop waiting for the queue to be small enough to
insert our new socket once we've committed to inserting it, or
races can occur that cause the incomplete socket queue to
overfill. In the previously implementation, it was sufficient
to simply tested once since calling soabort() didn't release
synchronization permitting another thread to insert a socket as
we discard a previous one.
- In soclose()/sofree()/et al, it is the responsibility of the
caller to remove a socket from the incomplete connection queue
before calling soabort(), which prevents soabort() from having
to walk into the accept socket to release the socket from its
queue, and avoids races when releasing the accept mutex to enter
soabort(), permitting soabort() to avoid lock ordering issues
with the caller.
- Generally cluster accept queue related operations together
throughout these functions in order to facilitate locking.
Annotate new locking in socketvar.h.
2004-06-02 04:15:39 +00:00
|
|
|
error = msleep(&head->so_timeo, &accept_mtx, PSOCK | PCATCH,
|
1995-12-14 22:51:13 +00:00
|
|
|
"accept", 0);
|
1994-10-02 17:35:40 +00:00
|
|
|
if (error) {
|
Integrate accept locking from rwatson_netperf, introducing a new
global mutex, accept_mtx, which serializes access to the following
fields across all sockets:
so_qlen so_incqlen so_qstate
so_comp so_incomp so_list
so_head
While providing only coarse granularity, this approach avoids lock
order issues between sockets by avoiding ownership of the fields
by a specific socket and its per-socket mutexes.
While here, rewrite soclose(), sofree(), soaccept(), and
sonewconn() to add assertions, close additional races and address
lock order concerns. In particular:
- Reorganize the optimistic concurrency behavior in accept1() to
always allocate a file descriptor with falloc() so that if we do
find a socket, we don't have to encounter the "Oh, there wasn't
a socket" race that can occur if falloc() sleeps in the current
code, which broke inbound accept() ordering, not to mention
requiring backing out socket state changes in a way that raced
with the protocol level. We may want to add a lockless read of
the queue state if polling of empty queues proves to be important
to optimize.
- In accept1(), soref() the socket while holding the accept lock
so that the socket cannot be free'd in a race with the protocol
layer. Likewise in netgraph equivilents of the accept1() code.
- In sonewconn(), loop waiting for the queue to be small enough to
insert our new socket once we've committed to inserting it, or
races can occur that cause the incomplete socket queue to
overfill. In the previously implementation, it was sufficient
to simply tested once since calling soabort() didn't release
synchronization permitting another thread to insert a socket as
we discard a previous one.
- In soclose()/sofree()/et al, it is the responsibility of the
caller to remove a socket from the incomplete connection queue
before calling soabort(), which prevents soabort() from having
to walk into the accept socket to release the socket from its
queue, and avoids races when releasing the accept mutex to enter
soabort(), permitting soabort() to avoid lock ordering issues
with the caller.
- Generally cluster accept queue related operations together
throughout these functions in order to facilitate locking.
Annotate new locking in socketvar.h.
2004-06-02 04:15:39 +00:00
|
|
|
ACCEPT_UNLOCK();
|
Correct a resource leak introduced in recent accept locking changes:
when I reordered events in accept1() to allocate a file descriptor
earlier, I didn't properly update use of goto on exit to unwind for
cases where the file descriptor is now held, but wasn't previously.
The result was that, in the event of accept() on a non-blocking socket,
or in the event of a socket error, a file descriptor would be leaked.
This ended up being non-fatal in many cases, as the file descriptor
would be properly GC'd on process exit, so only showed up for processes
that do a lot of non-blocking accept() calls, and also live for a long
time (such as qmail).
This change updates the use of goto targets to do additional unwinding.
Eyes provided by: Brian Feldman <green@freebsd.org>
Feet, hands provided by: Stefan Ehmann <shoesoft@gmx.net>,
Dimitry Andric <dimitry@andric.com>
Arjan van Leeuwen <avleeuwen@piwebs.com>
2004-06-07 21:45:44 +00:00
|
|
|
goto noconnection;
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
}
|
1996-03-11 15:37:44 +00:00
|
|
|
if (head->so_error) {
|
|
|
|
error = head->so_error;
|
|
|
|
head->so_error = 0;
|
Integrate accept locking from rwatson_netperf, introducing a new
global mutex, accept_mtx, which serializes access to the following
fields across all sockets:
so_qlen so_incqlen so_qstate
so_comp so_incomp so_list
so_head
While providing only coarse granularity, this approach avoids lock
order issues between sockets by avoiding ownership of the fields
by a specific socket and its per-socket mutexes.
While here, rewrite soclose(), sofree(), soaccept(), and
sonewconn() to add assertions, close additional races and address
lock order concerns. In particular:
- Reorganize the optimistic concurrency behavior in accept1() to
always allocate a file descriptor with falloc() so that if we do
find a socket, we don't have to encounter the "Oh, there wasn't
a socket" race that can occur if falloc() sleeps in the current
code, which broke inbound accept() ordering, not to mention
requiring backing out socket state changes in a way that raced
with the protocol level. We may want to add a lockless read of
the queue state if polling of empty queues proves to be important
to optimize.
- In accept1(), soref() the socket while holding the accept lock
so that the socket cannot be free'd in a race with the protocol
layer. Likewise in netgraph equivilents of the accept1() code.
- In sonewconn(), loop waiting for the queue to be small enough to
insert our new socket once we've committed to inserting it, or
races can occur that cause the incomplete socket queue to
overfill. In the previously implementation, it was sufficient
to simply tested once since calling soabort() didn't release
synchronization permitting another thread to insert a socket as
we discard a previous one.
- In soclose()/sofree()/et al, it is the responsibility of the
caller to remove a socket from the incomplete connection queue
before calling soabort(), which prevents soabort() from having
to walk into the accept socket to release the socket from its
queue, and avoids races when releasing the accept mutex to enter
soabort(), permitting soabort() to avoid lock ordering issues
with the caller.
- Generally cluster accept queue related operations together
throughout these functions in order to facilitate locking.
Annotate new locking in socketvar.h.
2004-06-02 04:15:39 +00:00
|
|
|
ACCEPT_UNLOCK();
|
Correct a resource leak introduced in recent accept locking changes:
when I reordered events in accept1() to allocate a file descriptor
earlier, I didn't properly update use of goto on exit to unwind for
cases where the file descriptor is now held, but wasn't previously.
The result was that, in the event of accept() on a non-blocking socket,
or in the event of a socket error, a file descriptor would be leaked.
This ended up being non-fatal in many cases, as the file descriptor
would be properly GC'd on process exit, so only showed up for processes
that do a lot of non-blocking accept() calls, and also live for a long
time (such as qmail).
This change updates the use of goto targets to do additional unwinding.
Eyes provided by: Brian Feldman <green@freebsd.org>
Feet, hands provided by: Stefan Ehmann <shoesoft@gmx.net>,
Dimitry Andric <dimitry@andric.com>
Arjan van Leeuwen <avleeuwen@piwebs.com>
2004-06-07 21:45:44 +00:00
|
|
|
goto noconnection;
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
1999-11-16 10:56:05 +00:00
|
|
|
so = TAILQ_FIRST(&head->so_comp);
|
Integrate accept locking from rwatson_netperf, introducing a new
global mutex, accept_mtx, which serializes access to the following
fields across all sockets:
so_qlen so_incqlen so_qstate
so_comp so_incomp so_list
so_head
While providing only coarse granularity, this approach avoids lock
order issues between sockets by avoiding ownership of the fields
by a specific socket and its per-socket mutexes.
While here, rewrite soclose(), sofree(), soaccept(), and
sonewconn() to add assertions, close additional races and address
lock order concerns. In particular:
- Reorganize the optimistic concurrency behavior in accept1() to
always allocate a file descriptor with falloc() so that if we do
find a socket, we don't have to encounter the "Oh, there wasn't
a socket" race that can occur if falloc() sleeps in the current
code, which broke inbound accept() ordering, not to mention
requiring backing out socket state changes in a way that raced
with the protocol level. We may want to add a lockless read of
the queue state if polling of empty queues proves to be important
to optimize.
- In accept1(), soref() the socket while holding the accept lock
so that the socket cannot be free'd in a race with the protocol
layer. Likewise in netgraph equivilents of the accept1() code.
- In sonewconn(), loop waiting for the queue to be small enough to
insert our new socket once we've committed to inserting it, or
races can occur that cause the incomplete socket queue to
overfill. In the previously implementation, it was sufficient
to simply tested once since calling soabort() didn't release
synchronization permitting another thread to insert a socket as
we discard a previous one.
- In soclose()/sofree()/et al, it is the responsibility of the
caller to remove a socket from the incomplete connection queue
before calling soabort(), which prevents soabort() from having
to walk into the accept socket to release the socket from its
queue, and avoids races when releasing the accept mutex to enter
soabort(), permitting soabort() to avoid lock ordering issues
with the caller.
- Generally cluster accept queue related operations together
throughout these functions in order to facilitate locking.
Annotate new locking in socketvar.h.
2004-06-02 04:15:39 +00:00
|
|
|
KASSERT(!(so->so_qstate & SQ_INCOMP), ("accept1: so SQ_INCOMP"));
|
|
|
|
KASSERT(so->so_qstate & SQ_COMP, ("accept1: so not SQ_COMP"));
|
|
|
|
|
2004-06-12 23:36:59 +00:00
|
|
|
/*
|
|
|
|
* Before changing the flags on the socket, we have to bump the
|
2004-06-12 20:47:32 +00:00
|
|
|
* reference count. Otherwise, if the protocol calls sofree(),
|
2004-06-12 23:36:59 +00:00
|
|
|
* the socket will be released due to a zero refcount.
|
|
|
|
*/
|
2005-02-17 13:00:23 +00:00
|
|
|
SOCK_LOCK(so); /* soref() and so_state update */
|
Integrate accept locking from rwatson_netperf, introducing a new
global mutex, accept_mtx, which serializes access to the following
fields across all sockets:
so_qlen so_incqlen so_qstate
so_comp so_incomp so_list
so_head
While providing only coarse granularity, this approach avoids lock
order issues between sockets by avoiding ownership of the fields
by a specific socket and its per-socket mutexes.
While here, rewrite soclose(), sofree(), soaccept(), and
sonewconn() to add assertions, close additional races and address
lock order concerns. In particular:
- Reorganize the optimistic concurrency behavior in accept1() to
always allocate a file descriptor with falloc() so that if we do
find a socket, we don't have to encounter the "Oh, there wasn't
a socket" race that can occur if falloc() sleeps in the current
code, which broke inbound accept() ordering, not to mention
requiring backing out socket state changes in a way that raced
with the protocol level. We may want to add a lockless read of
the queue state if polling of empty queues proves to be important
to optimize.
- In accept1(), soref() the socket while holding the accept lock
so that the socket cannot be free'd in a race with the protocol
layer. Likewise in netgraph equivilents of the accept1() code.
- In sonewconn(), loop waiting for the queue to be small enough to
insert our new socket once we've committed to inserting it, or
races can occur that cause the incomplete socket queue to
overfill. In the previously implementation, it was sufficient
to simply tested once since calling soabort() didn't release
synchronization permitting another thread to insert a socket as
we discard a previous one.
- In soclose()/sofree()/et al, it is the responsibility of the
caller to remove a socket from the incomplete connection queue
before calling soabort(), which prevents soabort() from having
to walk into the accept socket to release the socket from its
queue, and avoids races when releasing the accept mutex to enter
soabort(), permitting soabort() to avoid lock ordering issues
with the caller.
- Generally cluster accept queue related operations together
throughout these functions in order to facilitate locking.
Annotate new locking in socketvar.h.
2004-06-02 04:15:39 +00:00
|
|
|
soref(so); /* file descriptor reference */
|
2004-06-12 23:36:59 +00:00
|
|
|
|
1997-03-31 12:30:01 +00:00
|
|
|
TAILQ_REMOVE(&head->so_comp, so, so_list);
|
|
|
|
head->so_qlen--;
|
2004-06-22 23:58:09 +00:00
|
|
|
so->so_state |= (head->so_state & SS_NBIO);
|
Integrate accept locking from rwatson_netperf, introducing a new
global mutex, accept_mtx, which serializes access to the following
fields across all sockets:
so_qlen so_incqlen so_qstate
so_comp so_incomp so_list
so_head
While providing only coarse granularity, this approach avoids lock
order issues between sockets by avoiding ownership of the fields
by a specific socket and its per-socket mutexes.
While here, rewrite soclose(), sofree(), soaccept(), and
sonewconn() to add assertions, close additional races and address
lock order concerns. In particular:
- Reorganize the optimistic concurrency behavior in accept1() to
always allocate a file descriptor with falloc() so that if we do
find a socket, we don't have to encounter the "Oh, there wasn't
a socket" race that can occur if falloc() sleeps in the current
code, which broke inbound accept() ordering, not to mention
requiring backing out socket state changes in a way that raced
with the protocol level. We may want to add a lockless read of
the queue state if polling of empty queues proves to be important
to optimize.
- In accept1(), soref() the socket while holding the accept lock
so that the socket cannot be free'd in a race with the protocol
layer. Likewise in netgraph equivilents of the accept1() code.
- In sonewconn(), loop waiting for the queue to be small enough to
insert our new socket once we've committed to inserting it, or
races can occur that cause the incomplete socket queue to
overfill. In the previously implementation, it was sufficient
to simply tested once since calling soabort() didn't release
synchronization permitting another thread to insert a socket as
we discard a previous one.
- In soclose()/sofree()/et al, it is the responsibility of the
caller to remove a socket from the incomplete connection queue
before calling soabort(), which prevents soabort() from having
to walk into the accept socket to release the socket from its
queue, and avoids races when releasing the accept mutex to enter
soabort(), permitting soabort() to avoid lock ordering issues
with the caller.
- Generally cluster accept queue related operations together
throughout these functions in order to facilitate locking.
Annotate new locking in socketvar.h.
2004-06-02 04:15:39 +00:00
|
|
|
so->so_qstate &= ~SQ_COMP;
|
|
|
|
so->so_head = NULL;
|
|
|
|
|
2005-02-17 13:00:23 +00:00
|
|
|
SOCK_UNLOCK(so);
|
Integrate accept locking from rwatson_netperf, introducing a new
global mutex, accept_mtx, which serializes access to the following
fields across all sockets:
so_qlen so_incqlen so_qstate
so_comp so_incomp so_list
so_head
While providing only coarse granularity, this approach avoids lock
order issues between sockets by avoiding ownership of the fields
by a specific socket and its per-socket mutexes.
While here, rewrite soclose(), sofree(), soaccept(), and
sonewconn() to add assertions, close additional races and address
lock order concerns. In particular:
- Reorganize the optimistic concurrency behavior in accept1() to
always allocate a file descriptor with falloc() so that if we do
find a socket, we don't have to encounter the "Oh, there wasn't
a socket" race that can occur if falloc() sleeps in the current
code, which broke inbound accept() ordering, not to mention
requiring backing out socket state changes in a way that raced
with the protocol level. We may want to add a lockless read of
the queue state if polling of empty queues proves to be important
to optimize.
- In accept1(), soref() the socket while holding the accept lock
so that the socket cannot be free'd in a race with the protocol
layer. Likewise in netgraph equivilents of the accept1() code.
- In sonewconn(), loop waiting for the queue to be small enough to
insert our new socket once we've committed to inserting it, or
races can occur that cause the incomplete socket queue to
overfill. In the previously implementation, it was sufficient
to simply tested once since calling soabort() didn't release
synchronization permitting another thread to insert a socket as
we discard a previous one.
- In soclose()/sofree()/et al, it is the responsibility of the
caller to remove a socket from the incomplete connection queue
before calling soabort(), which prevents soabort() from having
to walk into the accept socket to release the socket from its
queue, and avoids races when releasing the accept mutex to enter
soabort(), permitting soabort() to avoid lock ordering issues
with the caller.
- Generally cluster accept queue related operations together
throughout these functions in order to facilitate locking.
Annotate new locking in socketvar.h.
2004-06-02 04:15:39 +00:00
|
|
|
ACCEPT_UNLOCK();
|
1997-03-31 12:30:01 +00:00
|
|
|
|
2003-10-19 20:41:07 +00:00
|
|
|
/* An extra reference on `nfp' has been held for us by falloc(). */
|
2001-09-12 08:38:13 +00:00
|
|
|
td->td_retval[0] = fd;
|
1996-03-11 15:37:44 +00:00
|
|
|
|
2000-04-16 18:53:38 +00:00
|
|
|
/* connection has been removed from the listen queue */
|
2004-08-15 06:24:42 +00:00
|
|
|
KNOTE_UNLOCKED(&head->so_rcv.sb_sel.si_note, 0);
|
2000-04-16 18:53:38 +00:00
|
|
|
|
2002-10-03 02:13:00 +00:00
|
|
|
pgid = fgetown(&head->so_sigio);
|
|
|
|
if (pgid != 0)
|
|
|
|
fsetown(pgid, &so->so_sigio);
|
1996-03-11 15:37:44 +00:00
|
|
|
|
2002-01-13 11:58:06 +00:00
|
|
|
FILE_LOCK(nfp);
|
2003-01-13 00:33:17 +00:00
|
|
|
nfp->f_data = so; /* nfp has ref count from falloc */
|
2000-11-18 21:01:04 +00:00
|
|
|
nfp->f_flag = fflag;
|
|
|
|
nfp->f_type = DTYPE_SOCKET;
|
Replace custom file descriptor array sleep lock constructed using a mutex
and flags with an sxlock. This leads to a significant and measurable
performance improvement as a result of access to shared locking for
frequent lookup operations, reduced general overhead, and reduced overhead
in the event of contention. All of these are imported for threaded
applications where simultaneous access to a shared file descriptor array
occurs frequently. Kris has reported 2x-4x transaction rate improvements
on 8-core MySQL benchmarks; smaller improvements can be expected for many
workloads as a result of reduced overhead.
- Generally eliminate the distinction between "fast" and regular
acquisisition of the filedesc lock; the plan is that they will now all
be fast. Change all locking instances to either shared or exclusive
locks.
- Correct a bug (pointed out by kib) in fdfree() where previously msleep()
was called without the mutex held; sx_sleep() is now always called with
the sxlock held exclusively.
- Universally hold the struct file lock over changes to struct file,
rather than the filedesc lock or no lock. Always update the f_ops
field last. A further memory barrier is required here in the future
(discussed with jhb).
- Improve locking and reference management in linux_at(), which fails to
properly acquire vnode references before using vnode pointers. Annotate
improper use of vn_fullpath(), which will be replaced at a future date.
In fcntl(), we conservatively acquire an exclusive lock, even though in
some cases a shared lock may be sufficient, which should be revisited.
The dropping of the filedesc lock in fdgrowtable() is no longer required
as the sxlock can be held over the sleep operation; we should consider
removing that (pointed out by attilio).
Tested by: kris
Discussed with: jhb, kris, attilio, jeff
2007-04-04 09:11:34 +00:00
|
|
|
nfp->f_ops = &socketops;
|
2002-01-13 11:58:06 +00:00
|
|
|
FILE_UNLOCK(nfp);
|
2003-02-23 23:00:28 +00:00
|
|
|
/* Sync socket nonblocking/async state with file flags */
|
|
|
|
tmp = fflag & FNONBLOCK;
|
|
|
|
(void) fo_ioctl(nfp, FIONBIO, &tmp, td->td_ucred, td);
|
|
|
|
tmp = fflag & FASYNC;
|
|
|
|
(void) fo_ioctl(nfp, FIOASYNC, &tmp, td->td_ucred, td);
|
1997-08-16 19:16:27 +00:00
|
|
|
sa = 0;
|
2001-02-14 02:09:11 +00:00
|
|
|
error = soaccept(so, &sa);
|
|
|
|
if (error) {
|
|
|
|
/*
|
|
|
|
* return a namelen of zero for older code which might
|
2004-01-11 19:56:42 +00:00
|
|
|
* ignore the return value from accept.
|
|
|
|
*/
|
2006-07-10 21:38:17 +00:00
|
|
|
if (name)
|
|
|
|
*namelen = 0;
|
2001-02-14 02:09:11 +00:00
|
|
|
goto noconnection;
|
|
|
|
}
|
2000-11-18 21:01:04 +00:00
|
|
|
if (sa == NULL) {
|
2006-07-10 21:38:17 +00:00
|
|
|
if (name)
|
|
|
|
*namelen = 0;
|
2000-11-18 21:01:04 +00:00
|
|
|
goto done;
|
1997-08-16 19:16:27 +00:00
|
|
|
}
|
2006-07-10 21:38:17 +00:00
|
|
|
if (name) {
|
1997-12-15 02:29:11 +00:00
|
|
|
/* check sa_len before it is destroyed */
|
2006-07-10 21:38:17 +00:00
|
|
|
if (*namelen > sa->sa_len)
|
|
|
|
*namelen = sa->sa_len;
|
|
|
|
*name = sa;
|
|
|
|
sa = NULL;
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
2001-02-14 02:09:11 +00:00
|
|
|
noconnection:
|
1999-01-25 16:53:53 +00:00
|
|
|
if (sa)
|
|
|
|
FREE(sa, M_SONAME);
|
2000-11-18 21:01:04 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* close the new descriptor, assuming someone hasn't ripped it
|
|
|
|
* out from under us.
|
|
|
|
*/
|
2004-11-07 22:16:07 +00:00
|
|
|
if (error)
|
|
|
|
fdclose(fdp, nfp, fd, td);
|
2000-11-18 21:01:04 +00:00
|
|
|
|
|
|
|
/*
|
2006-07-27 19:54:41 +00:00
|
|
|
* Release explicitly held references before returning. We return
|
|
|
|
* a reference on nfp to the caller on success if they request it.
|
2000-11-18 21:01:04 +00:00
|
|
|
*/
|
|
|
|
done:
|
2006-07-27 19:54:41 +00:00
|
|
|
if (fp != NULL) {
|
|
|
|
if (error == 0) {
|
|
|
|
*fp = nfp;
|
|
|
|
nfp = NULL;
|
|
|
|
} else
|
|
|
|
*fp = NULL;
|
|
|
|
}
|
2000-11-18 21:01:04 +00:00
|
|
|
if (nfp != NULL)
|
2001-09-12 08:38:13 +00:00
|
|
|
fdrop(nfp, td);
|
2006-04-25 11:48:16 +00:00
|
|
|
fdrop(headfp, td);
|
Giant pushdown syscalls in kern/uipc_syscalls.c. Affected calls:
recvmsg(), sendmsg(), recvfrom(), accept(), getpeername(), getsockname(),
socket(), connect(), accept(), send(), recv(), bind(), setsockopt(), listen(),
sendto(), shutdown(), socketpair(), sendfile()
2001-08-31 00:37:34 +00:00
|
|
|
done2:
|
2004-03-29 02:21:56 +00:00
|
|
|
NET_UNLOCK_GIANT();
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
1995-10-23 15:42:12 +00:00
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
accept(td, uap)
|
|
|
|
struct thread *td;
|
1995-10-23 15:42:12 +00:00
|
|
|
struct accept_args *uap;
|
|
|
|
{
|
2001-09-12 08:38:13 +00:00
|
|
|
|
|
|
|
return (accept1(td, uap, 0));
|
1995-10-23 15:42:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef COMPAT_OLDSOCK
|
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
oaccept(td, uap)
|
|
|
|
struct thread *td;
|
1995-10-23 15:42:12 +00:00
|
|
|
struct accept_args *uap;
|
|
|
|
{
|
2001-09-12 08:38:13 +00:00
|
|
|
|
|
|
|
return (accept1(td, uap, 1));
|
1995-10-23 15:42:12 +00:00
|
|
|
}
|
|
|
|
#endif /* COMPAT_OLDSOCK */
|
1994-10-02 17:35:40 +00:00
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/* ARGSUSED */
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
connect(td, uap)
|
|
|
|
struct thread *td;
|
2007-05-16 20:41:08 +00:00
|
|
|
struct connect_args /* {
|
1995-10-23 15:42:12 +00:00
|
|
|
int s;
|
|
|
|
caddr_t name;
|
|
|
|
int namelen;
|
|
|
|
} */ *uap;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
1997-08-16 19:16:27 +00:00
|
|
|
struct sockaddr *sa;
|
2003-02-03 17:36:52 +00:00
|
|
|
int error;
|
|
|
|
|
|
|
|
error = getsockaddr(&sa, uap->name, uap->namelen);
|
|
|
|
if (error)
|
2004-01-10 13:03:43 +00:00
|
|
|
return (error);
|
2003-02-03 17:36:52 +00:00
|
|
|
|
2006-07-19 18:28:52 +00:00
|
|
|
error = kern_connect(td, uap->s, sa);
|
|
|
|
free(sa, M_SONAME);
|
|
|
|
return (error);
|
2003-02-03 17:36:52 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
int
|
|
|
|
kern_connect(td, fd, sa)
|
|
|
|
struct thread *td;
|
|
|
|
int fd;
|
|
|
|
struct sockaddr *sa;
|
|
|
|
{
|
|
|
|
struct socket *so;
|
2004-10-24 23:45:01 +00:00
|
|
|
struct file *fp;
|
2005-02-18 21:39:55 +00:00
|
|
|
int error;
|
2003-08-06 14:04:47 +00:00
|
|
|
int interrupted = 0;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2004-03-29 02:21:56 +00:00
|
|
|
NET_LOCK_GIANT();
|
2006-04-25 11:48:16 +00:00
|
|
|
error = getsock(td->td_proc->p_fd, fd, &fp, NULL);
|
2004-10-24 23:45:01 +00:00
|
|
|
if (error)
|
Giant pushdown syscalls in kern/uipc_syscalls.c. Affected calls:
recvmsg(), sendmsg(), recvfrom(), accept(), getpeername(), getsockname(),
socket(), connect(), accept(), send(), recv(), bind(), setsockopt(), listen(),
sendto(), shutdown(), socketpair(), sendfile()
2001-08-31 00:37:34 +00:00
|
|
|
goto done2;
|
2004-10-24 23:45:01 +00:00
|
|
|
so = fp->f_data;
|
2003-08-06 14:04:47 +00:00
|
|
|
if (so->so_state & SS_ISCONNECTING) {
|
2000-11-18 21:01:04 +00:00
|
|
|
error = EALREADY;
|
2001-11-17 03:07:11 +00:00
|
|
|
goto done1;
|
2000-11-18 21:01:04 +00:00
|
|
|
}
|
2002-07-31 16:39:49 +00:00
|
|
|
#ifdef MAC
|
2004-06-13 02:50:07 +00:00
|
|
|
SOCK_LOCK(so);
|
2002-07-31 16:39:49 +00:00
|
|
|
error = mac_check_socket_connect(td->td_ucred, so, sa);
|
2004-06-13 02:50:07 +00:00
|
|
|
SOCK_UNLOCK(so);
|
2002-07-31 16:39:49 +00:00
|
|
|
if (error)
|
|
|
|
goto bad;
|
|
|
|
#endif
|
2001-09-12 08:38:13 +00:00
|
|
|
error = soconnect(so, sa, td);
|
1994-05-24 10:09:53 +00:00
|
|
|
if (error)
|
|
|
|
goto bad;
|
|
|
|
if ((so->so_state & SS_NBIO) && (so->so_state & SS_ISCONNECTING)) {
|
2000-11-18 21:01:04 +00:00
|
|
|
error = EINPROGRESS;
|
2001-11-17 03:07:11 +00:00
|
|
|
goto done1;
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
2004-06-24 01:43:23 +00:00
|
|
|
SOCK_LOCK(so);
|
1994-10-02 17:35:40 +00:00
|
|
|
while ((so->so_state & SS_ISCONNECTING) && so->so_error == 0) {
|
2004-06-24 01:43:23 +00:00
|
|
|
error = msleep(&so->so_timeo, SOCK_MTX(so), PSOCK | PCATCH,
|
|
|
|
"connec", 0);
|
2003-08-06 14:04:47 +00:00
|
|
|
if (error) {
|
|
|
|
if (error == EINTR || error == ERESTART)
|
|
|
|
interrupted = 1;
|
1994-05-24 10:09:53 +00:00
|
|
|
break;
|
2003-08-06 14:04:47 +00:00
|
|
|
}
|
1994-10-02 17:35:40 +00:00
|
|
|
}
|
1994-05-24 10:09:53 +00:00
|
|
|
if (error == 0) {
|
|
|
|
error = so->so_error;
|
|
|
|
so->so_error = 0;
|
|
|
|
}
|
2004-06-24 01:43:23 +00:00
|
|
|
SOCK_UNLOCK(so);
|
1994-05-24 10:09:53 +00:00
|
|
|
bad:
|
2003-08-06 14:04:47 +00:00
|
|
|
if (!interrupted)
|
|
|
|
so->so_state &= ~SS_ISCONNECTING;
|
1994-05-24 10:09:53 +00:00
|
|
|
if (error == ERESTART)
|
|
|
|
error = EINTR;
|
2001-11-17 03:07:11 +00:00
|
|
|
done1:
|
2004-10-24 23:45:01 +00:00
|
|
|
fdrop(fp, td);
|
Giant pushdown syscalls in kern/uipc_syscalls.c. Affected calls:
recvmsg(), sendmsg(), recvfrom(), accept(), getpeername(), getsockname(),
socket(), connect(), accept(), send(), recv(), bind(), setsockopt(), listen(),
sendto(), shutdown(), socketpair(), sendfile()
2001-08-31 00:37:34 +00:00
|
|
|
done2:
|
2004-03-29 02:21:56 +00:00
|
|
|
NET_UNLOCK_GIANT();
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
socketpair(td, uap)
|
|
|
|
struct thread *td;
|
2007-05-16 20:41:08 +00:00
|
|
|
struct socketpair_args /* {
|
1995-10-23 15:42:12 +00:00
|
|
|
int domain;
|
|
|
|
int type;
|
|
|
|
int protocol;
|
|
|
|
int *rsv;
|
|
|
|
} */ *uap;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2007-05-16 20:41:08 +00:00
|
|
|
struct filedesc *fdp = td->td_proc->p_fd;
|
1994-05-24 10:09:53 +00:00
|
|
|
struct file *fp1, *fp2;
|
|
|
|
struct socket *so1, *so2;
|
|
|
|
int fd, error, sv[2];
|
|
|
|
|
2005-07-05 22:49:10 +00:00
|
|
|
#ifdef MAC
|
|
|
|
/* We might want to have a separate check for socket pairs. */
|
|
|
|
error = mac_check_socket_create(td->td_ucred, uap->domain, uap->type,
|
|
|
|
uap->protocol);
|
|
|
|
if (error)
|
|
|
|
return (error);
|
|
|
|
#endif
|
|
|
|
|
2004-03-29 02:21:56 +00:00
|
|
|
NET_LOCK_GIANT();
|
2001-12-31 17:45:16 +00:00
|
|
|
error = socreate(uap->domain, &so1, uap->type, uap->protocol,
|
2002-02-27 18:32:23 +00:00
|
|
|
td->td_ucred, td);
|
1994-10-02 17:35:40 +00:00
|
|
|
if (error)
|
Giant pushdown syscalls in kern/uipc_syscalls.c. Affected calls:
recvmsg(), sendmsg(), recvfrom(), accept(), getpeername(), getsockname(),
socket(), connect(), accept(), send(), recv(), bind(), setsockopt(), listen(),
sendto(), shutdown(), socketpair(), sendfile()
2001-08-31 00:37:34 +00:00
|
|
|
goto done2;
|
2001-12-31 17:45:16 +00:00
|
|
|
error = socreate(uap->domain, &so2, uap->type, uap->protocol,
|
2002-02-27 18:32:23 +00:00
|
|
|
td->td_ucred, td);
|
1994-10-02 17:35:40 +00:00
|
|
|
if (error)
|
1994-05-24 10:09:53 +00:00
|
|
|
goto free1;
|
2003-10-19 20:41:07 +00:00
|
|
|
/* On success extra reference to `fp1' and 'fp2' is set by falloc. */
|
2001-09-12 08:38:13 +00:00
|
|
|
error = falloc(td, &fp1, &fd);
|
1994-10-02 17:35:40 +00:00
|
|
|
if (error)
|
1994-05-24 10:09:53 +00:00
|
|
|
goto free2;
|
|
|
|
sv[0] = fd;
|
2003-01-13 00:33:17 +00:00
|
|
|
fp1->f_data = so1; /* so1 already has ref count */
|
2001-09-12 08:38:13 +00:00
|
|
|
error = falloc(td, &fp2, &fd);
|
1994-10-02 17:35:40 +00:00
|
|
|
if (error)
|
1994-05-24 10:09:53 +00:00
|
|
|
goto free3;
|
2003-01-13 00:33:17 +00:00
|
|
|
fp2->f_data = so2; /* so2 already has ref count */
|
1994-05-24 10:09:53 +00:00
|
|
|
sv[1] = fd;
|
1994-10-02 17:35:40 +00:00
|
|
|
error = soconnect2(so1, so2);
|
|
|
|
if (error)
|
1994-05-24 10:09:53 +00:00
|
|
|
goto free4;
|
|
|
|
if (uap->type == SOCK_DGRAM) {
|
|
|
|
/*
|
|
|
|
* Datagram socket connection is asymmetric.
|
|
|
|
*/
|
1994-10-02 17:35:40 +00:00
|
|
|
error = soconnect2(so2, so1);
|
|
|
|
if (error)
|
1994-05-24 10:09:53 +00:00
|
|
|
goto free4;
|
|
|
|
}
|
2002-01-13 11:58:06 +00:00
|
|
|
FILE_LOCK(fp1);
|
|
|
|
fp1->f_flag = FREAD|FWRITE;
|
|
|
|
fp1->f_type = DTYPE_SOCKET;
|
Replace custom file descriptor array sleep lock constructed using a mutex
and flags with an sxlock. This leads to a significant and measurable
performance improvement as a result of access to shared locking for
frequent lookup operations, reduced general overhead, and reduced overhead
in the event of contention. All of these are imported for threaded
applications where simultaneous access to a shared file descriptor array
occurs frequently. Kris has reported 2x-4x transaction rate improvements
on 8-core MySQL benchmarks; smaller improvements can be expected for many
workloads as a result of reduced overhead.
- Generally eliminate the distinction between "fast" and regular
acquisisition of the filedesc lock; the plan is that they will now all
be fast. Change all locking instances to either shared or exclusive
locks.
- Correct a bug (pointed out by kib) in fdfree() where previously msleep()
was called without the mutex held; sx_sleep() is now always called with
the sxlock held exclusively.
- Universally hold the struct file lock over changes to struct file,
rather than the filedesc lock or no lock. Always update the f_ops
field last. A further memory barrier is required here in the future
(discussed with jhb).
- Improve locking and reference management in linux_at(), which fails to
properly acquire vnode references before using vnode pointers. Annotate
improper use of vn_fullpath(), which will be replaced at a future date.
In fcntl(), we conservatively acquire an exclusive lock, even though in
some cases a shared lock may be sufficient, which should be revisited.
The dropping of the filedesc lock in fdgrowtable() is no longer required
as the sxlock can be held over the sleep operation; we should consider
removing that (pointed out by attilio).
Tested by: kris
Discussed with: jhb, kris, attilio, jeff
2007-04-04 09:11:34 +00:00
|
|
|
fp1->f_ops = &socketops;
|
2002-01-13 11:58:06 +00:00
|
|
|
FILE_UNLOCK(fp1);
|
|
|
|
FILE_LOCK(fp2);
|
|
|
|
fp2->f_flag = FREAD|FWRITE;
|
|
|
|
fp2->f_type = DTYPE_SOCKET;
|
Replace custom file descriptor array sleep lock constructed using a mutex
and flags with an sxlock. This leads to a significant and measurable
performance improvement as a result of access to shared locking for
frequent lookup operations, reduced general overhead, and reduced overhead
in the event of contention. All of these are imported for threaded
applications where simultaneous access to a shared file descriptor array
occurs frequently. Kris has reported 2x-4x transaction rate improvements
on 8-core MySQL benchmarks; smaller improvements can be expected for many
workloads as a result of reduced overhead.
- Generally eliminate the distinction between "fast" and regular
acquisisition of the filedesc lock; the plan is that they will now all
be fast. Change all locking instances to either shared or exclusive
locks.
- Correct a bug (pointed out by kib) in fdfree() where previously msleep()
was called without the mutex held; sx_sleep() is now always called with
the sxlock held exclusively.
- Universally hold the struct file lock over changes to struct file,
rather than the filedesc lock or no lock. Always update the f_ops
field last. A further memory barrier is required here in the future
(discussed with jhb).
- Improve locking and reference management in linux_at(), which fails to
properly acquire vnode references before using vnode pointers. Annotate
improper use of vn_fullpath(), which will be replaced at a future date.
In fcntl(), we conservatively acquire an exclusive lock, even though in
some cases a shared lock may be sufficient, which should be revisited.
The dropping of the filedesc lock in fdgrowtable() is no longer required
as the sxlock can be held over the sleep operation; we should consider
removing that (pointed out by attilio).
Tested by: kris
Discussed with: jhb, kris, attilio, jeff
2007-04-04 09:11:34 +00:00
|
|
|
fp2->f_ops = &socketops;
|
2002-01-13 11:58:06 +00:00
|
|
|
FILE_UNLOCK(fp2);
|
2007-04-02 19:15:47 +00:00
|
|
|
so1 = so2 = NULL;
|
2002-06-28 23:48:23 +00:00
|
|
|
error = copyout(sv, uap->rsv, 2 * sizeof (int));
|
2007-04-02 19:15:47 +00:00
|
|
|
if (error)
|
|
|
|
goto free4;
|
2001-09-12 08:38:13 +00:00
|
|
|
fdrop(fp1, td);
|
|
|
|
fdrop(fp2, td);
|
Giant pushdown syscalls in kern/uipc_syscalls.c. Affected calls:
recvmsg(), sendmsg(), recvfrom(), accept(), getpeername(), getsockname(),
socket(), connect(), accept(), send(), recv(), bind(), setsockopt(), listen(),
sendto(), shutdown(), socketpair(), sendfile()
2001-08-31 00:37:34 +00:00
|
|
|
goto done2;
|
1994-05-24 10:09:53 +00:00
|
|
|
free4:
|
2004-11-07 22:16:07 +00:00
|
|
|
fdclose(fdp, fp2, sv[1], td);
|
2001-09-12 08:38:13 +00:00
|
|
|
fdrop(fp2, td);
|
1994-05-24 10:09:53 +00:00
|
|
|
free3:
|
2004-11-07 22:16:07 +00:00
|
|
|
fdclose(fdp, fp1, sv[0], td);
|
2001-09-12 08:38:13 +00:00
|
|
|
fdrop(fp1, td);
|
1994-05-24 10:09:53 +00:00
|
|
|
free2:
|
2007-04-02 19:15:47 +00:00
|
|
|
if (so2 != NULL)
|
|
|
|
(void)soclose(so2);
|
1994-05-24 10:09:53 +00:00
|
|
|
free1:
|
2007-04-02 19:15:47 +00:00
|
|
|
if (so1 != NULL)
|
|
|
|
(void)soclose(so1);
|
Giant pushdown syscalls in kern/uipc_syscalls.c. Affected calls:
recvmsg(), sendmsg(), recvfrom(), accept(), getpeername(), getsockname(),
socket(), connect(), accept(), send(), recv(), bind(), setsockopt(), listen(),
sendto(), shutdown(), socketpair(), sendfile()
2001-08-31 00:37:34 +00:00
|
|
|
done2:
|
2004-03-29 02:21:56 +00:00
|
|
|
NET_UNLOCK_GIANT();
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
1998-02-09 06:11:36 +00:00
|
|
|
static int
|
2001-09-12 08:38:13 +00:00
|
|
|
sendit(td, s, mp, flags)
|
2007-05-16 20:41:08 +00:00
|
|
|
struct thread *td;
|
1994-10-02 17:35:40 +00:00
|
|
|
int s;
|
2007-05-16 20:41:08 +00:00
|
|
|
struct msghdr *mp;
|
1997-11-06 19:29:57 +00:00
|
|
|
int flags;
|
1994-10-02 17:35:40 +00:00
|
|
|
{
|
1997-08-16 19:16:27 +00:00
|
|
|
struct mbuf *control;
|
2003-05-05 20:33:38 +00:00
|
|
|
struct sockaddr *to;
|
|
|
|
int error;
|
2002-10-06 14:39:15 +00:00
|
|
|
|
2003-05-05 20:33:38 +00:00
|
|
|
if (mp->msg_name != NULL) {
|
1997-08-16 19:16:27 +00:00
|
|
|
error = getsockaddr(&to, mp->msg_name, mp->msg_namelen);
|
2003-05-29 18:36:26 +00:00
|
|
|
if (error) {
|
|
|
|
to = NULL;
|
|
|
|
goto bad;
|
|
|
|
}
|
2003-05-05 20:33:38 +00:00
|
|
|
mp->msg_name = to;
|
2004-01-11 19:56:42 +00:00
|
|
|
} else {
|
2003-05-05 20:33:38 +00:00
|
|
|
to = NULL;
|
2004-01-11 19:56:42 +00:00
|
|
|
}
|
2003-05-05 20:33:38 +00:00
|
|
|
|
1994-10-02 17:35:40 +00:00
|
|
|
if (mp->msg_control) {
|
|
|
|
if (mp->msg_controllen < sizeof(struct cmsghdr)
|
|
|
|
#ifdef COMPAT_OLDSOCK
|
|
|
|
&& mp->msg_flags != MSG_COMPAT
|
|
|
|
#endif
|
|
|
|
) {
|
|
|
|
error = EINVAL;
|
|
|
|
goto bad;
|
|
|
|
}
|
|
|
|
error = sockargs(&control, mp->msg_control,
|
|
|
|
mp->msg_controllen, MT_CONTROL);
|
|
|
|
if (error)
|
|
|
|
goto bad;
|
|
|
|
#ifdef COMPAT_OLDSOCK
|
|
|
|
if (mp->msg_flags == MSG_COMPAT) {
|
2007-05-16 20:41:08 +00:00
|
|
|
struct cmsghdr *cm;
|
1994-10-02 17:35:40 +00:00
|
|
|
|
2003-02-19 05:47:46 +00:00
|
|
|
M_PREPEND(control, sizeof(*cm), M_TRYWAIT);
|
1994-10-02 17:35:40 +00:00
|
|
|
if (control == 0) {
|
|
|
|
error = ENOBUFS;
|
|
|
|
goto bad;
|
|
|
|
} else {
|
|
|
|
cm = mtod(control, struct cmsghdr *);
|
|
|
|
cm->cmsg_len = control->m_len;
|
|
|
|
cm->cmsg_level = SOL_SOCKET;
|
|
|
|
cm->cmsg_type = SCM_RIGHTS;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
2000-11-18 21:01:04 +00:00
|
|
|
} else {
|
2003-05-05 20:33:38 +00:00
|
|
|
control = NULL;
|
|
|
|
}
|
|
|
|
|
2005-01-30 07:20:36 +00:00
|
|
|
error = kern_sendit(td, s, mp, flags, control, UIO_USERSPACE);
|
2003-05-05 20:33:38 +00:00
|
|
|
|
|
|
|
bad:
|
|
|
|
if (to)
|
|
|
|
FREE(to, M_SONAME);
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
2005-01-30 07:20:36 +00:00
|
|
|
kern_sendit(td, s, mp, flags, control, segflg)
|
2003-05-05 20:33:38 +00:00
|
|
|
struct thread *td;
|
|
|
|
int s;
|
|
|
|
struct msghdr *mp;
|
|
|
|
int flags;
|
|
|
|
struct mbuf *control;
|
2005-01-30 07:20:36 +00:00
|
|
|
enum uio_seg segflg;
|
2003-05-05 20:33:38 +00:00
|
|
|
{
|
2004-10-24 23:45:01 +00:00
|
|
|
struct file *fp;
|
2003-05-05 20:33:38 +00:00
|
|
|
struct uio auio;
|
|
|
|
struct iovec *iov;
|
|
|
|
struct socket *so;
|
|
|
|
int i;
|
|
|
|
int len, error;
|
|
|
|
#ifdef KTRACE
|
2004-07-10 15:42:16 +00:00
|
|
|
struct uio *ktruio = NULL;
|
2003-05-05 20:33:38 +00:00
|
|
|
#endif
|
|
|
|
|
2004-03-29 02:21:56 +00:00
|
|
|
NET_LOCK_GIANT();
|
2006-04-25 11:48:16 +00:00
|
|
|
error = getsock(td->td_proc->p_fd, s, &fp, NULL);
|
2004-10-24 23:45:01 +00:00
|
|
|
if (error)
|
2003-05-05 20:33:38 +00:00
|
|
|
goto bad2;
|
2004-10-24 23:45:01 +00:00
|
|
|
so = (struct socket *)fp->f_data;
|
2003-05-05 20:33:38 +00:00
|
|
|
|
|
|
|
#ifdef MAC
|
2004-06-13 02:50:07 +00:00
|
|
|
SOCK_LOCK(so);
|
2003-05-05 20:33:38 +00:00
|
|
|
error = mac_check_socket_send(td->td_ucred, so);
|
2004-06-13 02:50:07 +00:00
|
|
|
SOCK_UNLOCK(so);
|
2003-05-05 20:33:38 +00:00
|
|
|
if (error)
|
|
|
|
goto bad;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
auio.uio_iov = mp->msg_iov;
|
|
|
|
auio.uio_iovcnt = mp->msg_iovlen;
|
2005-01-30 07:20:36 +00:00
|
|
|
auio.uio_segflg = segflg;
|
2003-05-05 20:33:38 +00:00
|
|
|
auio.uio_rw = UIO_WRITE;
|
|
|
|
auio.uio_td = td;
|
|
|
|
auio.uio_offset = 0; /* XXX */
|
|
|
|
auio.uio_resid = 0;
|
|
|
|
iov = mp->msg_iov;
|
|
|
|
for (i = 0; i < mp->msg_iovlen; i++, iov++) {
|
|
|
|
if ((auio.uio_resid += iov->iov_len) < 0) {
|
|
|
|
error = EINVAL;
|
|
|
|
goto bad;
|
|
|
|
}
|
2000-11-18 21:01:04 +00:00
|
|
|
}
|
1994-10-02 17:35:40 +00:00
|
|
|
#ifdef KTRACE
|
2004-07-10 15:42:16 +00:00
|
|
|
if (KTRPOINT(td, KTR_GENIO))
|
|
|
|
ktruio = cloneuio(&auio);
|
1994-10-02 17:35:40 +00:00
|
|
|
#endif
|
|
|
|
len = auio.uio_resid;
|
soreceive_generic(), and sopoll_generic(). Add new functions sosend(),
soreceive(), and sopoll(), which are wrappers for pru_sosend,
pru_soreceive, and pru_sopoll, and are now used univerally by socket
consumers rather than either directly invoking the old so*() functions
or directly invoking the protocol switch method (about an even split
prior to this commit).
This completes an architectural change that was begun in 1996 to permit
protocols to provide substitute implementations, as now used by UDP.
Consumers now uniformly invoke sosend(), soreceive(), and sopoll() to
perform these operations on sockets -- in particular, distributed file
systems and socket system calls.
Architectural head nod: sam, gnn, wollman
2006-07-24 15:20:08 +00:00
|
|
|
error = sosend(so, mp->msg_name, &auio, 0, control, flags, td);
|
1994-10-02 17:35:40 +00:00
|
|
|
if (error) {
|
|
|
|
if (auio.uio_resid != len && (error == ERESTART ||
|
|
|
|
error == EINTR || error == EWOULDBLOCK))
|
|
|
|
error = 0;
|
2002-06-20 18:52:54 +00:00
|
|
|
/* Generation of SIGPIPE can be controlled per socket */
|
2005-03-08 16:11:41 +00:00
|
|
|
if (error == EPIPE && !(so->so_options & SO_NOSIGPIPE) &&
|
|
|
|
!(flags & MSG_NOSIGNAL)) {
|
2001-09-12 08:38:13 +00:00
|
|
|
PROC_LOCK(td->td_proc);
|
|
|
|
psignal(td->td_proc, SIGPIPE);
|
|
|
|
PROC_UNLOCK(td->td_proc);
|
2001-03-07 03:37:06 +00:00
|
|
|
}
|
1994-10-02 17:35:40 +00:00
|
|
|
}
|
|
|
|
if (error == 0)
|
2001-09-12 08:38:13 +00:00
|
|
|
td->td_retval[0] = len - auio.uio_resid;
|
1994-10-02 17:35:40 +00:00
|
|
|
#ifdef KTRACE
|
2004-07-10 15:42:16 +00:00
|
|
|
if (ktruio != NULL) {
|
|
|
|
ktruio->uio_resid = td->td_retval[0];
|
|
|
|
ktrgenio(s, UIO_WRITE, ktruio, error);
|
1994-10-02 17:35:40 +00:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
bad:
|
2004-10-24 23:45:01 +00:00
|
|
|
fdrop(fp, td);
|
2003-05-05 20:33:38 +00:00
|
|
|
bad2:
|
2004-03-29 02:21:56 +00:00
|
|
|
NET_UNLOCK_GIANT();
|
1994-10-02 17:35:40 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
sendto(td, uap)
|
|
|
|
struct thread *td;
|
2007-05-16 20:41:08 +00:00
|
|
|
struct sendto_args /* {
|
1995-10-23 15:42:12 +00:00
|
|
|
int s;
|
|
|
|
caddr_t buf;
|
|
|
|
size_t len;
|
|
|
|
int flags;
|
|
|
|
caddr_t to;
|
|
|
|
int tolen;
|
|
|
|
} */ *uap;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
|
|
|
struct msghdr msg;
|
|
|
|
struct iovec aiov;
|
Giant pushdown syscalls in kern/uipc_syscalls.c. Affected calls:
recvmsg(), sendmsg(), recvfrom(), accept(), getpeername(), getsockname(),
socket(), connect(), accept(), send(), recv(), bind(), setsockopt(), listen(),
sendto(), shutdown(), socketpair(), sendfile()
2001-08-31 00:37:34 +00:00
|
|
|
int error;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
|
|
|
msg.msg_name = uap->to;
|
|
|
|
msg.msg_namelen = uap->tolen;
|
|
|
|
msg.msg_iov = &aiov;
|
|
|
|
msg.msg_iovlen = 1;
|
|
|
|
msg.msg_control = 0;
|
|
|
|
#ifdef COMPAT_OLDSOCK
|
|
|
|
msg.msg_flags = 0;
|
|
|
|
#endif
|
|
|
|
aiov.iov_base = uap->buf;
|
|
|
|
aiov.iov_len = uap->len;
|
2001-09-12 08:38:13 +00:00
|
|
|
error = sendit(td, uap->s, &msg, uap->flags);
|
Giant pushdown syscalls in kern/uipc_syscalls.c. Affected calls:
recvmsg(), sendmsg(), recvfrom(), accept(), getpeername(), getsockname(),
socket(), connect(), accept(), send(), recv(), bind(), setsockopt(), listen(),
sendto(), shutdown(), socketpair(), sendfile()
2001-08-31 00:37:34 +00:00
|
|
|
return (error);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef COMPAT_OLDSOCK
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
osend(td, uap)
|
|
|
|
struct thread *td;
|
2007-05-16 20:41:08 +00:00
|
|
|
struct osend_args /* {
|
1995-10-23 15:42:12 +00:00
|
|
|
int s;
|
|
|
|
caddr_t buf;
|
|
|
|
int len;
|
|
|
|
int flags;
|
|
|
|
} */ *uap;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
|
|
|
struct msghdr msg;
|
|
|
|
struct iovec aiov;
|
Giant pushdown syscalls in kern/uipc_syscalls.c. Affected calls:
recvmsg(), sendmsg(), recvfrom(), accept(), getpeername(), getsockname(),
socket(), connect(), accept(), send(), recv(), bind(), setsockopt(), listen(),
sendto(), shutdown(), socketpair(), sendfile()
2001-08-31 00:37:34 +00:00
|
|
|
int error;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
|
|
|
msg.msg_name = 0;
|
|
|
|
msg.msg_namelen = 0;
|
|
|
|
msg.msg_iov = &aiov;
|
|
|
|
msg.msg_iovlen = 1;
|
|
|
|
aiov.iov_base = uap->buf;
|
|
|
|
aiov.iov_len = uap->len;
|
|
|
|
msg.msg_control = 0;
|
|
|
|
msg.msg_flags = 0;
|
2001-09-12 08:38:13 +00:00
|
|
|
error = sendit(td, uap->s, &msg, uap->flags);
|
Giant pushdown syscalls in kern/uipc_syscalls.c. Affected calls:
recvmsg(), sendmsg(), recvfrom(), accept(), getpeername(), getsockname(),
socket(), connect(), accept(), send(), recv(), bind(), setsockopt(), listen(),
sendto(), shutdown(), socketpair(), sendfile()
2001-08-31 00:37:34 +00:00
|
|
|
return (error);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
osendmsg(td, uap)
|
|
|
|
struct thread *td;
|
2004-07-10 15:42:16 +00:00
|
|
|
struct osendmsg_args /* {
|
1995-10-23 15:42:12 +00:00
|
|
|
int s;
|
|
|
|
caddr_t msg;
|
|
|
|
int flags;
|
|
|
|
} */ *uap;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
|
|
|
struct msghdr msg;
|
2004-07-10 15:42:16 +00:00
|
|
|
struct iovec *iov;
|
1994-05-24 10:09:53 +00:00
|
|
|
int error;
|
|
|
|
|
2002-06-28 23:48:23 +00:00
|
|
|
error = copyin(uap->msg, &msg, sizeof (struct omsghdr));
|
1994-10-02 17:35:40 +00:00
|
|
|
if (error)
|
2004-07-10 15:42:16 +00:00
|
|
|
return (error);
|
|
|
|
error = copyiniov(msg.msg_iov, msg.msg_iovlen, &iov, EMSGSIZE);
|
1994-10-02 17:35:40 +00:00
|
|
|
if (error)
|
2004-07-10 15:42:16 +00:00
|
|
|
return (error);
|
1994-05-24 10:09:53 +00:00
|
|
|
msg.msg_iov = iov;
|
2004-07-10 15:42:16 +00:00
|
|
|
msg.msg_flags = MSG_COMPAT;
|
2001-09-12 08:38:13 +00:00
|
|
|
error = sendit(td, uap->s, &msg, uap->flags);
|
2004-07-10 15:42:16 +00:00
|
|
|
free(iov, M_IOV);
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
sendmsg(td, uap)
|
|
|
|
struct thread *td;
|
2004-07-10 15:42:16 +00:00
|
|
|
struct sendmsg_args /* {
|
1995-10-23 15:42:12 +00:00
|
|
|
int s;
|
|
|
|
caddr_t msg;
|
|
|
|
int flags;
|
|
|
|
} */ *uap;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
|
|
|
struct msghdr msg;
|
2004-07-10 15:42:16 +00:00
|
|
|
struct iovec *iov;
|
1994-05-24 10:09:53 +00:00
|
|
|
int error;
|
|
|
|
|
2002-06-28 23:48:23 +00:00
|
|
|
error = copyin(uap->msg, &msg, sizeof (msg));
|
1994-10-02 17:35:40 +00:00
|
|
|
if (error)
|
2004-07-10 15:42:16 +00:00
|
|
|
return (error);
|
|
|
|
error = copyiniov(msg.msg_iov, msg.msg_iovlen, &iov, EMSGSIZE);
|
|
|
|
if (error)
|
|
|
|
return (error);
|
1994-05-24 10:09:53 +00:00
|
|
|
msg.msg_iov = iov;
|
|
|
|
#ifdef COMPAT_OLDSOCK
|
|
|
|
msg.msg_flags = 0;
|
|
|
|
#endif
|
2001-09-12 08:38:13 +00:00
|
|
|
error = sendit(td, uap->s, &msg, uap->flags);
|
2004-07-10 15:42:16 +00:00
|
|
|
free(iov, M_IOV);
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
2005-10-15 05:57:06 +00:00
|
|
|
int
|
2006-07-10 21:38:17 +00:00
|
|
|
kern_recvit(td, s, mp, fromseg, controlp)
|
2004-07-10 15:42:16 +00:00
|
|
|
struct thread *td;
|
1994-05-24 10:09:53 +00:00
|
|
|
int s;
|
2004-07-10 15:42:16 +00:00
|
|
|
struct msghdr *mp;
|
2006-07-10 21:38:17 +00:00
|
|
|
enum uio_seg fromseg;
|
2005-10-31 21:09:56 +00:00
|
|
|
struct mbuf **controlp;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
|
|
|
struct uio auio;
|
2004-07-10 15:42:16 +00:00
|
|
|
struct iovec *iov;
|
|
|
|
int i;
|
2003-12-24 18:47:43 +00:00
|
|
|
socklen_t len;
|
|
|
|
int error;
|
1997-08-16 19:16:27 +00:00
|
|
|
struct mbuf *m, *control = 0;
|
1996-05-09 20:15:26 +00:00
|
|
|
caddr_t ctlbuf;
|
2004-10-24 23:45:01 +00:00
|
|
|
struct file *fp;
|
1997-04-27 20:01:29 +00:00
|
|
|
struct socket *so;
|
1997-08-16 19:16:27 +00:00
|
|
|
struct sockaddr *fromsa = 0;
|
1994-05-24 10:09:53 +00:00
|
|
|
#ifdef KTRACE
|
2004-07-10 15:42:16 +00:00
|
|
|
struct uio *ktruio = NULL;
|
1994-05-24 10:09:53 +00:00
|
|
|
#endif
|
1995-05-30 08:16:23 +00:00
|
|
|
|
2005-10-31 21:09:56 +00:00
|
|
|
if(controlp != NULL)
|
|
|
|
*controlp = 0;
|
|
|
|
|
2004-03-29 02:21:56 +00:00
|
|
|
NET_LOCK_GIANT();
|
2006-04-25 11:48:16 +00:00
|
|
|
error = getsock(td->td_proc->p_fd, s, &fp, NULL);
|
2004-10-24 23:45:01 +00:00
|
|
|
if (error) {
|
2004-03-29 02:21:56 +00:00
|
|
|
NET_UNLOCK_GIANT();
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
2003-08-11 19:37:11 +00:00
|
|
|
}
|
2004-10-24 23:45:01 +00:00
|
|
|
so = fp->f_data;
|
2002-10-06 14:39:15 +00:00
|
|
|
|
|
|
|
#ifdef MAC
|
2004-06-13 02:50:07 +00:00
|
|
|
SOCK_LOCK(so);
|
2002-10-06 14:39:15 +00:00
|
|
|
error = mac_check_socket_receive(td->td_ucred, so);
|
2004-06-13 02:50:07 +00:00
|
|
|
SOCK_UNLOCK(so);
|
2002-10-06 14:39:15 +00:00
|
|
|
if (error) {
|
2004-10-24 23:45:01 +00:00
|
|
|
fdrop(fp, td);
|
2004-03-29 02:21:56 +00:00
|
|
|
NET_UNLOCK_GIANT();
|
2002-10-06 14:39:15 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
auio.uio_iov = mp->msg_iov;
|
|
|
|
auio.uio_iovcnt = mp->msg_iovlen;
|
2006-07-10 21:38:17 +00:00
|
|
|
auio.uio_segflg = UIO_USERSPACE;
|
1994-10-02 17:35:40 +00:00
|
|
|
auio.uio_rw = UIO_READ;
|
2001-09-12 08:38:13 +00:00
|
|
|
auio.uio_td = td;
|
1994-05-24 10:09:53 +00:00
|
|
|
auio.uio_offset = 0; /* XXX */
|
|
|
|
auio.uio_resid = 0;
|
|
|
|
iov = mp->msg_iov;
|
|
|
|
for (i = 0; i < mp->msg_iovlen; i++, iov++) {
|
2000-11-18 21:01:04 +00:00
|
|
|
if ((auio.uio_resid += iov->iov_len) < 0) {
|
2004-10-24 23:45:01 +00:00
|
|
|
fdrop(fp, td);
|
2004-03-29 02:21:56 +00:00
|
|
|
NET_UNLOCK_GIANT();
|
1994-05-24 10:09:53 +00:00
|
|
|
return (EINVAL);
|
2000-11-18 21:01:04 +00:00
|
|
|
}
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
#ifdef KTRACE
|
2004-07-10 15:42:16 +00:00
|
|
|
if (KTRPOINT(td, KTR_GENIO))
|
|
|
|
ktruio = cloneuio(&auio);
|
1994-05-24 10:09:53 +00:00
|
|
|
#endif
|
|
|
|
len = auio.uio_resid;
|
soreceive_generic(), and sopoll_generic(). Add new functions sosend(),
soreceive(), and sopoll(), which are wrappers for pru_sosend,
pru_soreceive, and pru_sopoll, and are now used univerally by socket
consumers rather than either directly invoking the old so*() functions
or directly invoking the protocol switch method (about an even split
prior to this commit).
This completes an architectural change that was begun in 1996 to permit
protocols to provide substitute implementations, as now used by UDP.
Consumers now uniformly invoke sosend(), soreceive(), and sopoll() to
perform these operations on sockets -- in particular, distributed file
systems and socket system calls.
Architectural head nod: sam, gnn, wollman
2006-07-24 15:20:08 +00:00
|
|
|
error = soreceive(so, &fromsa, &auio, (struct mbuf **)0,
|
2005-10-31 21:09:56 +00:00
|
|
|
(mp->msg_control || controlp) ? &control : (struct mbuf **)0,
|
1994-10-02 17:35:40 +00:00
|
|
|
&mp->msg_flags);
|
|
|
|
if (error) {
|
2003-12-24 18:47:43 +00:00
|
|
|
if (auio.uio_resid != (int)len && (error == ERESTART ||
|
1994-05-24 10:09:53 +00:00
|
|
|
error == EINTR || error == EWOULDBLOCK))
|
|
|
|
error = 0;
|
|
|
|
}
|
|
|
|
#ifdef KTRACE
|
2004-07-10 15:42:16 +00:00
|
|
|
if (ktruio != NULL) {
|
|
|
|
ktruio->uio_resid = (int)len - auio.uio_resid;
|
|
|
|
ktrgenio(s, UIO_READ, ktruio, error);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
#endif
|
1994-10-02 17:35:40 +00:00
|
|
|
if (error)
|
|
|
|
goto out;
|
2003-12-24 18:47:43 +00:00
|
|
|
td->td_retval[0] = (int)len - auio.uio_resid;
|
1994-10-02 17:35:40 +00:00
|
|
|
if (mp->msg_name) {
|
|
|
|
len = mp->msg_namelen;
|
1997-08-16 19:16:27 +00:00
|
|
|
if (len <= 0 || fromsa == 0)
|
1994-10-02 17:35:40 +00:00
|
|
|
len = 0;
|
|
|
|
else {
|
1997-12-14 03:15:21 +00:00
|
|
|
/* save sa_len before it is destroyed by MSG_COMPAT */
|
|
|
|
len = MIN(len, fromsa->sa_len);
|
1994-05-24 10:09:53 +00:00
|
|
|
#ifdef COMPAT_OLDSOCK
|
1994-10-02 17:35:40 +00:00
|
|
|
if (mp->msg_flags & MSG_COMPAT)
|
1997-08-16 19:16:27 +00:00
|
|
|
((struct osockaddr *)fromsa)->sa_family =
|
|
|
|
fromsa->sa_family;
|
|
|
|
#endif
|
2006-07-10 21:38:17 +00:00
|
|
|
if (fromseg == UIO_USERSPACE) {
|
|
|
|
error = copyout(fromsa, mp->msg_name,
|
|
|
|
(unsigned)len);
|
|
|
|
if (error)
|
|
|
|
goto out;
|
|
|
|
} else
|
|
|
|
bcopy(fromsa, mp->msg_name, len);
|
1994-10-02 17:35:40 +00:00
|
|
|
}
|
|
|
|
mp->msg_namelen = len;
|
|
|
|
}
|
2005-10-31 21:09:56 +00:00
|
|
|
if (mp->msg_control && controlp == NULL) {
|
1994-10-02 17:35:40 +00:00
|
|
|
#ifdef COMPAT_OLDSOCK
|
|
|
|
/*
|
|
|
|
* We assume that old recvmsg calls won't receive access
|
|
|
|
* rights and other control info, esp. as control info
|
|
|
|
* is always optional and those options didn't exist in 4.3.
|
|
|
|
* If we receive rights, trim the cmsghdr; anything else
|
|
|
|
* is tossed.
|
|
|
|
*/
|
|
|
|
if (control && mp->msg_flags & MSG_COMPAT) {
|
|
|
|
if (mtod(control, struct cmsghdr *)->cmsg_level !=
|
|
|
|
SOL_SOCKET ||
|
|
|
|
mtod(control, struct cmsghdr *)->cmsg_type !=
|
|
|
|
SCM_RIGHTS) {
|
|
|
|
mp->msg_controllen = 0;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
control->m_len -= sizeof (struct cmsghdr);
|
|
|
|
control->m_data += sizeof (struct cmsghdr);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
len = mp->msg_controllen;
|
1996-05-09 20:15:26 +00:00
|
|
|
m = control;
|
|
|
|
mp->msg_controllen = 0;
|
2002-06-28 23:48:23 +00:00
|
|
|
ctlbuf = mp->msg_control;
|
1996-05-09 20:15:26 +00:00
|
|
|
|
|
|
|
while (m && len > 0) {
|
|
|
|
unsigned int tocopy;
|
|
|
|
|
2004-01-11 19:56:42 +00:00
|
|
|
if (len >= m->m_len)
|
1996-05-09 20:15:26 +00:00
|
|
|
tocopy = m->m_len;
|
|
|
|
else {
|
1994-10-02 17:35:40 +00:00
|
|
|
mp->msg_flags |= MSG_CTRUNC;
|
1996-05-09 20:15:26 +00:00
|
|
|
tocopy = len;
|
|
|
|
}
|
2004-01-11 19:56:42 +00:00
|
|
|
|
2002-06-28 23:48:23 +00:00
|
|
|
if ((error = copyout(mtod(m, caddr_t),
|
1999-01-27 21:50:00 +00:00
|
|
|
ctlbuf, tocopy)) != 0)
|
1996-05-09 20:15:26 +00:00
|
|
|
goto out;
|
|
|
|
|
|
|
|
ctlbuf += tocopy;
|
|
|
|
len -= tocopy;
|
|
|
|
m = m->m_next;
|
1994-10-02 17:35:40 +00:00
|
|
|
}
|
1999-11-24 20:49:04 +00:00
|
|
|
mp->msg_controllen = ctlbuf - (caddr_t)mp->msg_control;
|
1994-10-02 17:35:40 +00:00
|
|
|
}
|
|
|
|
out:
|
2004-10-24 23:45:01 +00:00
|
|
|
fdrop(fp, td);
|
2004-03-29 02:21:56 +00:00
|
|
|
NET_UNLOCK_GIANT();
|
1997-08-16 19:16:27 +00:00
|
|
|
if (fromsa)
|
|
|
|
FREE(fromsa, M_SONAME);
|
2005-10-31 21:09:56 +00:00
|
|
|
|
|
|
|
if (error == 0 && controlp != NULL)
|
|
|
|
*controlp = control;
|
|
|
|
else if (control)
|
1994-10-02 17:35:40 +00:00
|
|
|
m_freem(control);
|
2005-10-31 21:09:56 +00:00
|
|
|
|
1994-10-02 17:35:40 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
2005-10-15 05:57:06 +00:00
|
|
|
static int
|
|
|
|
recvit(td, s, mp, namelenp)
|
|
|
|
struct thread *td;
|
|
|
|
int s;
|
|
|
|
struct msghdr *mp;
|
|
|
|
void *namelenp;
|
|
|
|
{
|
2006-07-10 21:38:17 +00:00
|
|
|
int error;
|
2005-10-15 05:57:06 +00:00
|
|
|
|
2006-07-10 21:38:17 +00:00
|
|
|
error = kern_recvit(td, s, mp, UIO_USERSPACE, NULL);
|
|
|
|
if (error)
|
|
|
|
return (error);
|
|
|
|
if (namelenp) {
|
|
|
|
error = copyout(&mp->msg_namelen, namelenp, sizeof (socklen_t));
|
|
|
|
#ifdef COMPAT_OLDSOCK
|
|
|
|
if (mp->msg_flags & MSG_COMPAT)
|
|
|
|
error = 0; /* old recvfrom didn't check */
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
return (error);
|
2005-10-15 05:57:06 +00:00
|
|
|
}
|
|
|
|
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
recvfrom(td, uap)
|
|
|
|
struct thread *td;
|
2007-05-16 20:41:08 +00:00
|
|
|
struct recvfrom_args /* {
|
1995-10-23 15:42:12 +00:00
|
|
|
int s;
|
|
|
|
caddr_t buf;
|
|
|
|
size_t len;
|
|
|
|
int flags;
|
2003-12-24 18:47:43 +00:00
|
|
|
struct sockaddr * __restrict from;
|
|
|
|
socklen_t * __restrict fromlenaddr;
|
1995-10-23 15:42:12 +00:00
|
|
|
} */ *uap;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
|
|
|
struct msghdr msg;
|
|
|
|
struct iovec aiov;
|
|
|
|
int error;
|
|
|
|
|
|
|
|
if (uap->fromlenaddr) {
|
2002-06-28 23:48:23 +00:00
|
|
|
error = copyin(uap->fromlenaddr,
|
|
|
|
&msg.msg_namelen, sizeof (msg.msg_namelen));
|
1994-10-02 17:35:40 +00:00
|
|
|
if (error)
|
Giant pushdown syscalls in kern/uipc_syscalls.c. Affected calls:
recvmsg(), sendmsg(), recvfrom(), accept(), getpeername(), getsockname(),
socket(), connect(), accept(), send(), recv(), bind(), setsockopt(), listen(),
sendto(), shutdown(), socketpair(), sendfile()
2001-08-31 00:37:34 +00:00
|
|
|
goto done2;
|
|
|
|
} else {
|
1994-05-24 10:09:53 +00:00
|
|
|
msg.msg_namelen = 0;
|
Giant pushdown syscalls in kern/uipc_syscalls.c. Affected calls:
recvmsg(), sendmsg(), recvfrom(), accept(), getpeername(), getsockname(),
socket(), connect(), accept(), send(), recv(), bind(), setsockopt(), listen(),
sendto(), shutdown(), socketpair(), sendfile()
2001-08-31 00:37:34 +00:00
|
|
|
}
|
1994-05-24 10:09:53 +00:00
|
|
|
msg.msg_name = uap->from;
|
|
|
|
msg.msg_iov = &aiov;
|
|
|
|
msg.msg_iovlen = 1;
|
|
|
|
aiov.iov_base = uap->buf;
|
|
|
|
aiov.iov_len = uap->len;
|
|
|
|
msg.msg_control = 0;
|
|
|
|
msg.msg_flags = uap->flags;
|
2002-06-29 00:02:01 +00:00
|
|
|
error = recvit(td, uap->s, &msg, uap->fromlenaddr);
|
Giant pushdown syscalls in kern/uipc_syscalls.c. Affected calls:
recvmsg(), sendmsg(), recvfrom(), accept(), getpeername(), getsockname(),
socket(), connect(), accept(), send(), recv(), bind(), setsockopt(), listen(),
sendto(), shutdown(), socketpair(), sendfile()
2001-08-31 00:37:34 +00:00
|
|
|
done2:
|
|
|
|
return(error);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
1994-10-02 17:35:40 +00:00
|
|
|
#ifdef COMPAT_OLDSOCK
|
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
orecvfrom(td, uap)
|
|
|
|
struct thread *td;
|
1994-10-02 17:35:40 +00:00
|
|
|
struct recvfrom_args *uap;
|
|
|
|
{
|
|
|
|
|
|
|
|
uap->flags |= MSG_COMPAT;
|
2001-09-12 08:38:13 +00:00
|
|
|
return (recvfrom(td, uap));
|
1994-10-02 17:35:40 +00:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
#ifdef COMPAT_OLDSOCK
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
orecv(td, uap)
|
|
|
|
struct thread *td;
|
2007-05-16 20:41:08 +00:00
|
|
|
struct orecv_args /* {
|
1995-10-23 15:42:12 +00:00
|
|
|
int s;
|
|
|
|
caddr_t buf;
|
|
|
|
int len;
|
|
|
|
int flags;
|
|
|
|
} */ *uap;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
|
|
|
struct msghdr msg;
|
|
|
|
struct iovec aiov;
|
Giant pushdown syscalls in kern/uipc_syscalls.c. Affected calls:
recvmsg(), sendmsg(), recvfrom(), accept(), getpeername(), getsockname(),
socket(), connect(), accept(), send(), recv(), bind(), setsockopt(), listen(),
sendto(), shutdown(), socketpair(), sendfile()
2001-08-31 00:37:34 +00:00
|
|
|
int error;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
|
|
|
msg.msg_name = 0;
|
|
|
|
msg.msg_namelen = 0;
|
|
|
|
msg.msg_iov = &aiov;
|
|
|
|
msg.msg_iovlen = 1;
|
|
|
|
aiov.iov_base = uap->buf;
|
|
|
|
aiov.iov_len = uap->len;
|
|
|
|
msg.msg_control = 0;
|
|
|
|
msg.msg_flags = uap->flags;
|
2002-06-29 00:02:01 +00:00
|
|
|
error = recvit(td, uap->s, &msg, NULL);
|
Giant pushdown syscalls in kern/uipc_syscalls.c. Affected calls:
recvmsg(), sendmsg(), recvfrom(), accept(), getpeername(), getsockname(),
socket(), connect(), accept(), send(), recv(), bind(), setsockopt(), listen(),
sendto(), shutdown(), socketpair(), sendfile()
2001-08-31 00:37:34 +00:00
|
|
|
return (error);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Old recvmsg. This code takes advantage of the fact that the old msghdr
|
|
|
|
* overlays the new one, missing only the flags, and with the (old) access
|
|
|
|
* rights where the control fields are now.
|
|
|
|
*/
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
orecvmsg(td, uap)
|
|
|
|
struct thread *td;
|
2004-07-10 15:42:16 +00:00
|
|
|
struct orecvmsg_args /* {
|
1995-10-23 15:42:12 +00:00
|
|
|
int s;
|
|
|
|
struct omsghdr *msg;
|
|
|
|
int flags;
|
|
|
|
} */ *uap;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
|
|
|
struct msghdr msg;
|
2004-07-10 15:42:16 +00:00
|
|
|
struct iovec *iov;
|
1994-05-24 10:09:53 +00:00
|
|
|
int error;
|
|
|
|
|
2002-06-29 00:02:01 +00:00
|
|
|
error = copyin(uap->msg, &msg, sizeof (struct omsghdr));
|
1994-10-02 17:35:40 +00:00
|
|
|
if (error)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
2004-07-10 15:42:16 +00:00
|
|
|
error = copyiniov(msg.msg_iov, msg.msg_iovlen, &iov, EMSGSIZE);
|
1994-10-02 17:35:40 +00:00
|
|
|
if (error)
|
2004-07-10 15:42:16 +00:00
|
|
|
return (error);
|
|
|
|
msg.msg_flags = uap->flags | MSG_COMPAT;
|
1994-05-24 10:09:53 +00:00
|
|
|
msg.msg_iov = iov;
|
2002-06-29 00:02:01 +00:00
|
|
|
error = recvit(td, uap->s, &msg, &uap->msg->msg_namelen);
|
1994-05-24 10:09:53 +00:00
|
|
|
if (msg.msg_controllen && error == 0)
|
2002-06-29 00:02:01 +00:00
|
|
|
error = copyout(&msg.msg_controllen,
|
|
|
|
&uap->msg->msg_accrightslen, sizeof (int));
|
2004-07-10 15:42:16 +00:00
|
|
|
free(iov, M_IOV);
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
recvmsg(td, uap)
|
|
|
|
struct thread *td;
|
2004-07-10 15:42:16 +00:00
|
|
|
struct recvmsg_args /* {
|
1995-10-23 15:42:12 +00:00
|
|
|
int s;
|
|
|
|
struct msghdr *msg;
|
|
|
|
int flags;
|
|
|
|
} */ *uap;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
|
|
|
struct msghdr msg;
|
2004-07-10 15:42:16 +00:00
|
|
|
struct iovec *uiov, *iov;
|
|
|
|
int error;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2002-06-29 00:02:01 +00:00
|
|
|
error = copyin(uap->msg, &msg, sizeof (msg));
|
1994-10-02 17:35:40 +00:00
|
|
|
if (error)
|
2004-07-10 15:42:16 +00:00
|
|
|
return (error);
|
|
|
|
error = copyiniov(msg.msg_iov, msg.msg_iovlen, &iov, EMSGSIZE);
|
|
|
|
if (error)
|
|
|
|
return (error);
|
1994-05-24 10:09:53 +00:00
|
|
|
msg.msg_flags = uap->flags;
|
2004-07-10 15:42:16 +00:00
|
|
|
#ifdef COMPAT_OLDSOCK
|
|
|
|
msg.msg_flags &= ~MSG_COMPAT;
|
1994-05-24 10:09:53 +00:00
|
|
|
#endif
|
|
|
|
uiov = msg.msg_iov;
|
|
|
|
msg.msg_iov = iov;
|
2002-06-29 00:02:01 +00:00
|
|
|
error = recvit(td, uap->s, &msg, NULL);
|
2004-07-10 15:42:16 +00:00
|
|
|
if (error == 0) {
|
1994-05-24 10:09:53 +00:00
|
|
|
msg.msg_iov = uiov;
|
2002-06-29 00:02:01 +00:00
|
|
|
error = copyout(&msg, uap->msg, sizeof(msg));
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
2004-07-10 15:42:16 +00:00
|
|
|
free(iov, M_IOV);
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
}
|
1995-10-07 23:47:26 +00:00
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/* ARGSUSED */
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
shutdown(td, uap)
|
|
|
|
struct thread *td;
|
2007-05-16 20:41:08 +00:00
|
|
|
struct shutdown_args /* {
|
1995-10-23 15:42:12 +00:00
|
|
|
int s;
|
|
|
|
int how;
|
|
|
|
} */ *uap;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2001-11-17 03:07:11 +00:00
|
|
|
struct socket *so;
|
2004-10-24 23:45:01 +00:00
|
|
|
struct file *fp;
|
1994-05-24 10:09:53 +00:00
|
|
|
int error;
|
|
|
|
|
2004-03-29 02:21:56 +00:00
|
|
|
NET_LOCK_GIANT();
|
2006-04-25 11:48:16 +00:00
|
|
|
error = getsock(td->td_proc->p_fd, uap->s, &fp, NULL);
|
2004-10-24 23:45:01 +00:00
|
|
|
if (error == 0) {
|
|
|
|
so = fp->f_data;
|
2001-11-17 03:07:11 +00:00
|
|
|
error = soshutdown(so, uap->how);
|
2004-10-24 23:45:01 +00:00
|
|
|
fdrop(fp, td);
|
Giant pushdown syscalls in kern/uipc_syscalls.c. Affected calls:
recvmsg(), sendmsg(), recvfrom(), accept(), getpeername(), getsockname(),
socket(), connect(), accept(), send(), recv(), bind(), setsockopt(), listen(),
sendto(), shutdown(), socketpair(), sendfile()
2001-08-31 00:37:34 +00:00
|
|
|
}
|
2004-03-29 02:21:56 +00:00
|
|
|
NET_UNLOCK_GIANT();
|
2004-10-24 23:45:01 +00:00
|
|
|
return (error);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* ARGSUSED */
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
setsockopt(td, uap)
|
|
|
|
struct thread *td;
|
2007-05-16 20:41:08 +00:00
|
|
|
struct setsockopt_args /* {
|
1995-10-23 15:42:12 +00:00
|
|
|
int s;
|
|
|
|
int level;
|
|
|
|
int name;
|
|
|
|
caddr_t val;
|
|
|
|
int valsize;
|
|
|
|
} */ *uap;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2004-07-17 21:06:36 +00:00
|
|
|
|
|
|
|
return (kern_setsockopt(td, uap->s, uap->level, uap->name,
|
|
|
|
uap->val, UIO_USERSPACE, uap->valsize));
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
kern_setsockopt(td, s, level, name, val, valseg, valsize)
|
|
|
|
struct thread *td;
|
|
|
|
int s;
|
|
|
|
int level;
|
|
|
|
int name;
|
|
|
|
void *val;
|
|
|
|
enum uio_seg valseg;
|
|
|
|
socklen_t valsize;
|
|
|
|
{
|
|
|
|
int error;
|
2001-11-17 03:07:11 +00:00
|
|
|
struct socket *so;
|
2004-10-24 23:45:01 +00:00
|
|
|
struct file *fp;
|
1998-08-23 03:07:17 +00:00
|
|
|
struct sockopt sopt;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2004-07-17 21:06:36 +00:00
|
|
|
if (val == NULL && valsize != 0)
|
1998-08-23 03:07:17 +00:00
|
|
|
return (EFAULT);
|
2006-06-20 12:36:40 +00:00
|
|
|
if ((int)valsize < 0)
|
1998-08-23 03:07:17 +00:00
|
|
|
return (EINVAL);
|
|
|
|
|
2004-07-17 21:06:36 +00:00
|
|
|
sopt.sopt_dir = SOPT_SET;
|
|
|
|
sopt.sopt_level = level;
|
|
|
|
sopt.sopt_name = name;
|
|
|
|
sopt.sopt_val = val;
|
|
|
|
sopt.sopt_valsize = valsize;
|
|
|
|
switch (valseg) {
|
|
|
|
case UIO_USERSPACE:
|
2001-09-12 08:38:13 +00:00
|
|
|
sopt.sopt_td = td;
|
2004-07-17 21:06:36 +00:00
|
|
|
break;
|
|
|
|
case UIO_SYSSPACE:
|
|
|
|
sopt.sopt_td = NULL;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
panic("kern_setsockopt called with bad valseg");
|
|
|
|
}
|
|
|
|
|
|
|
|
NET_LOCK_GIANT();
|
2006-04-25 11:48:16 +00:00
|
|
|
error = getsock(td->td_proc->p_fd, s, &fp, NULL);
|
2004-10-24 23:45:01 +00:00
|
|
|
if (error == 0) {
|
|
|
|
so = fp->f_data;
|
2001-11-17 03:07:11 +00:00
|
|
|
error = sosetopt(so, &sopt);
|
2004-10-24 23:45:01 +00:00
|
|
|
fdrop(fp, td);
|
Giant pushdown syscalls in kern/uipc_syscalls.c. Affected calls:
recvmsg(), sendmsg(), recvfrom(), accept(), getpeername(), getsockname(),
socket(), connect(), accept(), send(), recv(), bind(), setsockopt(), listen(),
sendto(), shutdown(), socketpair(), sendfile()
2001-08-31 00:37:34 +00:00
|
|
|
}
|
2004-03-29 02:21:56 +00:00
|
|
|
NET_UNLOCK_GIANT();
|
2000-11-18 21:01:04 +00:00
|
|
|
return(error);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* ARGSUSED */
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
getsockopt(td, uap)
|
|
|
|
struct thread *td;
|
2007-05-16 20:41:08 +00:00
|
|
|
struct getsockopt_args /* {
|
1995-10-23 15:42:12 +00:00
|
|
|
int s;
|
|
|
|
int level;
|
|
|
|
int name;
|
2003-12-24 18:47:43 +00:00
|
|
|
void * __restrict val;
|
|
|
|
socklen_t * __restrict avalsize;
|
1995-10-23 15:42:12 +00:00
|
|
|
} */ *uap;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2003-12-24 18:47:43 +00:00
|
|
|
socklen_t valsize;
|
|
|
|
int error;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
|
|
|
if (uap->val) {
|
2002-06-29 00:02:01 +00:00
|
|
|
error = copyin(uap->avalsize, &valsize, sizeof (valsize));
|
2001-11-17 03:07:11 +00:00
|
|
|
if (error)
|
2004-07-17 21:06:36 +00:00
|
|
|
return (error);
|
2000-11-18 21:01:04 +00:00
|
|
|
}
|
1998-08-23 03:07:17 +00:00
|
|
|
|
2004-07-17 21:06:36 +00:00
|
|
|
error = kern_getsockopt(td, uap->s, uap->level, uap->name,
|
|
|
|
uap->val, UIO_USERSPACE, &valsize);
|
1998-08-23 03:07:17 +00:00
|
|
|
|
2004-07-17 21:06:36 +00:00
|
|
|
if (error == 0)
|
2002-06-29 00:02:01 +00:00
|
|
|
error = copyout(&valsize, uap->avalsize, sizeof (valsize));
|
2004-07-17 21:06:36 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Kernel version of getsockopt.
|
|
|
|
* optval can be a userland or userspace. optlen is always a kernel pointer.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
kern_getsockopt(td, s, level, name, val, valseg, valsize)
|
|
|
|
struct thread *td;
|
|
|
|
int s;
|
|
|
|
int level;
|
|
|
|
int name;
|
|
|
|
void *val;
|
|
|
|
enum uio_seg valseg;
|
|
|
|
socklen_t *valsize;
|
|
|
|
{
|
|
|
|
int error;
|
|
|
|
struct socket *so;
|
2004-10-24 23:45:01 +00:00
|
|
|
struct file *fp;
|
2004-07-17 21:06:36 +00:00
|
|
|
struct sockopt sopt;
|
|
|
|
|
|
|
|
if (val == NULL)
|
|
|
|
*valsize = 0;
|
2006-06-20 12:36:40 +00:00
|
|
|
if ((int)*valsize < 0)
|
2004-07-17 21:06:36 +00:00
|
|
|
return (EINVAL);
|
|
|
|
|
|
|
|
sopt.sopt_dir = SOPT_GET;
|
|
|
|
sopt.sopt_level = level;
|
|
|
|
sopt.sopt_name = name;
|
|
|
|
sopt.sopt_val = val;
|
|
|
|
sopt.sopt_valsize = (size_t)*valsize; /* checked non-negative above */
|
|
|
|
switch (valseg) {
|
|
|
|
case UIO_USERSPACE:
|
|
|
|
sopt.sopt_td = td;
|
|
|
|
break;
|
|
|
|
case UIO_SYSSPACE:
|
|
|
|
sopt.sopt_td = NULL;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
panic("kern_getsockopt called with bad valseg");
|
|
|
|
}
|
|
|
|
|
|
|
|
NET_LOCK_GIANT();
|
2006-04-25 11:48:16 +00:00
|
|
|
error = getsock(td->td_proc->p_fd, s, &fp, NULL);
|
2004-10-24 23:45:01 +00:00
|
|
|
if (error == 0) {
|
|
|
|
so = fp->f_data;
|
2004-07-17 21:06:36 +00:00
|
|
|
error = sogetopt(so, &sopt);
|
|
|
|
*valsize = sopt.sopt_valsize;
|
2004-10-24 23:45:01 +00:00
|
|
|
fdrop(fp, td);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
2004-03-29 02:21:56 +00:00
|
|
|
NET_UNLOCK_GIANT();
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2004-01-11 19:56:42 +00:00
|
|
|
* getsockname1() - Get socket name.
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
1995-10-23 15:42:12 +00:00
|
|
|
/* ARGSUSED */
|
1995-10-07 23:47:26 +00:00
|
|
|
static int
|
2001-09-12 08:38:13 +00:00
|
|
|
getsockname1(td, uap, compat)
|
|
|
|
struct thread *td;
|
2007-05-16 20:41:08 +00:00
|
|
|
struct getsockname_args /* {
|
1995-10-23 15:42:12 +00:00
|
|
|
int fdes;
|
2003-12-24 18:47:43 +00:00
|
|
|
struct sockaddr * __restrict asa;
|
|
|
|
socklen_t * __restrict alen;
|
1995-10-23 15:42:12 +00:00
|
|
|
} */ *uap;
|
1995-10-07 23:47:26 +00:00
|
|
|
int compat;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
1997-08-16 19:16:27 +00:00
|
|
|
struct sockaddr *sa;
|
2006-07-10 21:38:17 +00:00
|
|
|
socklen_t len;
|
|
|
|
int error;
|
|
|
|
|
|
|
|
error = copyin(uap->alen, &len, sizeof(len));
|
|
|
|
if (error)
|
|
|
|
return (error);
|
|
|
|
|
|
|
|
error = kern_getsockname(td, uap->fdes, &sa, &len);
|
|
|
|
if (error)
|
|
|
|
return (error);
|
|
|
|
|
|
|
|
if (len != 0) {
|
|
|
|
#ifdef COMPAT_OLDSOCK
|
|
|
|
if (compat)
|
|
|
|
((struct osockaddr *)sa)->sa_family = sa->sa_family;
|
|
|
|
#endif
|
|
|
|
error = copyout(sa, uap->asa, (u_int)len);
|
|
|
|
}
|
|
|
|
free(sa, M_SONAME);
|
|
|
|
if (error == 0)
|
|
|
|
error = copyout(&len, uap->alen, sizeof(len));
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
kern_getsockname(struct thread *td, int fd, struct sockaddr **sa,
|
|
|
|
socklen_t *alen)
|
|
|
|
{
|
|
|
|
struct socket *so;
|
2004-10-24 23:45:01 +00:00
|
|
|
struct file *fp;
|
2003-12-24 18:47:43 +00:00
|
|
|
socklen_t len;
|
|
|
|
int error;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2006-07-10 21:38:17 +00:00
|
|
|
if (*alen < 0)
|
|
|
|
return (EINVAL);
|
|
|
|
|
2004-03-29 02:21:56 +00:00
|
|
|
NET_LOCK_GIANT();
|
2006-07-10 21:38:17 +00:00
|
|
|
error = getsock(td->td_proc->p_fd, fd, &fp, NULL);
|
2004-10-24 23:45:01 +00:00
|
|
|
if (error)
|
2006-07-10 21:38:17 +00:00
|
|
|
goto done;
|
2004-10-24 23:45:01 +00:00
|
|
|
so = fp->f_data;
|
2006-07-10 21:38:17 +00:00
|
|
|
*sa = NULL;
|
|
|
|
error = (*so->so_proto->pr_usrreqs->pru_sockaddr)(so, sa);
|
1994-10-02 17:35:40 +00:00
|
|
|
if (error)
|
1994-05-24 10:09:53 +00:00
|
|
|
goto bad;
|
2006-07-10 21:38:17 +00:00
|
|
|
if (*sa == NULL)
|
1997-08-16 19:16:27 +00:00
|
|
|
len = 0;
|
2006-07-10 21:38:17 +00:00
|
|
|
else
|
|
|
|
len = MIN(*alen, (*sa)->sa_len);
|
|
|
|
*alen = len;
|
1994-05-24 10:09:53 +00:00
|
|
|
bad:
|
2004-10-24 23:45:01 +00:00
|
|
|
fdrop(fp, td);
|
2006-07-10 21:38:17 +00:00
|
|
|
if (error && *sa) {
|
|
|
|
free(*sa, M_SONAME);
|
|
|
|
*sa = NULL;
|
|
|
|
}
|
|
|
|
done:
|
2004-03-29 02:21:56 +00:00
|
|
|
NET_UNLOCK_GIANT();
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
getsockname(td, uap)
|
|
|
|
struct thread *td;
|
1995-10-23 15:42:12 +00:00
|
|
|
struct getsockname_args *uap;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2001-09-12 08:38:13 +00:00
|
|
|
|
|
|
|
return (getsockname1(td, uap, 0));
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
1995-10-23 15:42:12 +00:00
|
|
|
#ifdef COMPAT_OLDSOCK
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
ogetsockname(td, uap)
|
|
|
|
struct thread *td;
|
1995-10-23 15:42:12 +00:00
|
|
|
struct getsockname_args *uap;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2001-09-12 08:38:13 +00:00
|
|
|
|
|
|
|
return (getsockname1(td, uap, 1));
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
1995-10-23 15:42:12 +00:00
|
|
|
#endif /* COMPAT_OLDSOCK */
|
1994-05-24 10:09:53 +00:00
|
|
|
|
1995-10-23 15:42:12 +00:00
|
|
|
/*
|
Giant pushdown syscalls in kern/uipc_syscalls.c. Affected calls:
recvmsg(), sendmsg(), recvfrom(), accept(), getpeername(), getsockname(),
socket(), connect(), accept(), send(), recv(), bind(), setsockopt(), listen(),
sendto(), shutdown(), socketpair(), sendfile()
2001-08-31 00:37:34 +00:00
|
|
|
* getpeername1() - Get name of peer for connected socket.
|
1995-10-23 15:42:12 +00:00
|
|
|
*/
|
|
|
|
/* ARGSUSED */
|
1995-10-07 23:47:26 +00:00
|
|
|
static int
|
2001-09-12 08:38:13 +00:00
|
|
|
getpeername1(td, uap, compat)
|
|
|
|
struct thread *td;
|
2007-05-16 20:41:08 +00:00
|
|
|
struct getpeername_args /* {
|
1995-10-23 15:42:12 +00:00
|
|
|
int fdes;
|
2003-12-24 18:47:43 +00:00
|
|
|
struct sockaddr * __restrict asa;
|
|
|
|
socklen_t * __restrict alen;
|
1995-10-23 15:42:12 +00:00
|
|
|
} */ *uap;
|
1995-10-07 23:47:26 +00:00
|
|
|
int compat;
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
1997-08-16 19:16:27 +00:00
|
|
|
struct sockaddr *sa;
|
2006-07-10 21:38:17 +00:00
|
|
|
socklen_t len;
|
|
|
|
int error;
|
|
|
|
|
|
|
|
error = copyin(uap->alen, &len, sizeof (len));
|
|
|
|
if (error)
|
|
|
|
return (error);
|
|
|
|
|
|
|
|
error = kern_getpeername(td, uap->fdes, &sa, &len);
|
|
|
|
if (error)
|
|
|
|
return (error);
|
|
|
|
|
|
|
|
if (len != 0) {
|
|
|
|
#ifdef COMPAT_OLDSOCK
|
|
|
|
if (compat)
|
|
|
|
((struct osockaddr *)sa)->sa_family = sa->sa_family;
|
|
|
|
#endif
|
|
|
|
error = copyout(sa, uap->asa, (u_int)len);
|
|
|
|
}
|
|
|
|
free(sa, M_SONAME);
|
|
|
|
if (error == 0)
|
|
|
|
error = copyout(&len, uap->alen, sizeof(len));
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
|
|
|
int
|
|
|
|
kern_getpeername(struct thread *td, int fd, struct sockaddr **sa,
|
|
|
|
socklen_t *alen)
|
|
|
|
{
|
|
|
|
struct socket *so;
|
2004-10-24 23:45:01 +00:00
|
|
|
struct file *fp;
|
2003-12-24 18:47:43 +00:00
|
|
|
socklen_t len;
|
|
|
|
int error;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2006-07-10 21:38:17 +00:00
|
|
|
if (*alen < 0)
|
|
|
|
return (EINVAL);
|
|
|
|
|
2004-03-29 02:21:56 +00:00
|
|
|
NET_LOCK_GIANT();
|
2006-07-10 21:38:17 +00:00
|
|
|
error = getsock(td->td_proc->p_fd, fd, &fp, NULL);
|
2004-10-24 23:45:01 +00:00
|
|
|
if (error)
|
Giant pushdown syscalls in kern/uipc_syscalls.c. Affected calls:
recvmsg(), sendmsg(), recvfrom(), accept(), getpeername(), getsockname(),
socket(), connect(), accept(), send(), recv(), bind(), setsockopt(), listen(),
sendto(), shutdown(), socketpair(), sendfile()
2001-08-31 00:37:34 +00:00
|
|
|
goto done2;
|
2004-10-24 23:45:01 +00:00
|
|
|
so = fp->f_data;
|
2000-11-18 21:01:04 +00:00
|
|
|
if ((so->so_state & (SS_ISCONNECTED|SS_ISCONFIRMING)) == 0) {
|
Giant pushdown syscalls in kern/uipc_syscalls.c. Affected calls:
recvmsg(), sendmsg(), recvfrom(), accept(), getpeername(), getsockname(),
socket(), connect(), accept(), send(), recv(), bind(), setsockopt(), listen(),
sendto(), shutdown(), socketpair(), sendfile()
2001-08-31 00:37:34 +00:00
|
|
|
error = ENOTCONN;
|
2001-11-17 03:07:11 +00:00
|
|
|
goto done1;
|
2000-11-18 21:01:04 +00:00
|
|
|
}
|
2006-07-10 21:38:17 +00:00
|
|
|
*sa = NULL;
|
|
|
|
error = (*so->so_proto->pr_usrreqs->pru_peeraddr)(so, sa);
|
1994-10-02 17:35:40 +00:00
|
|
|
if (error)
|
1994-05-24 10:09:53 +00:00
|
|
|
goto bad;
|
2006-07-10 21:38:17 +00:00
|
|
|
if (*sa == NULL)
|
1997-08-16 19:16:27 +00:00
|
|
|
len = 0;
|
2006-07-10 21:38:17 +00:00
|
|
|
else
|
|
|
|
len = MIN(*alen, (*sa)->sa_len);
|
|
|
|
*alen = len;
|
1994-05-24 10:09:53 +00:00
|
|
|
bad:
|
2006-07-10 21:38:17 +00:00
|
|
|
if (error && *sa) {
|
|
|
|
free(*sa, M_SONAME);
|
|
|
|
*sa = NULL;
|
|
|
|
}
|
2001-11-17 03:07:11 +00:00
|
|
|
done1:
|
2004-10-24 23:45:01 +00:00
|
|
|
fdrop(fp, td);
|
Giant pushdown syscalls in kern/uipc_syscalls.c. Affected calls:
recvmsg(), sendmsg(), recvfrom(), accept(), getpeername(), getsockname(),
socket(), connect(), accept(), send(), recv(), bind(), setsockopt(), listen(),
sendto(), shutdown(), socketpair(), sendfile()
2001-08-31 00:37:34 +00:00
|
|
|
done2:
|
2004-03-29 02:21:56 +00:00
|
|
|
NET_UNLOCK_GIANT();
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
1995-10-23 15:42:12 +00:00
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
getpeername(td, uap)
|
|
|
|
struct thread *td;
|
1995-10-23 15:42:12 +00:00
|
|
|
struct getpeername_args *uap;
|
|
|
|
{
|
2001-09-12 08:38:13 +00:00
|
|
|
|
|
|
|
return (getpeername1(td, uap, 0));
|
1995-10-23 15:42:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef COMPAT_OLDSOCK
|
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
ogetpeername(td, uap)
|
|
|
|
struct thread *td;
|
1995-10-23 15:42:12 +00:00
|
|
|
struct ogetpeername_args *uap;
|
|
|
|
{
|
2001-09-12 08:38:13 +00:00
|
|
|
|
1995-10-23 15:42:12 +00:00
|
|
|
/* XXX uap should have type `getpeername_args *' to begin with. */
|
2001-09-12 08:38:13 +00:00
|
|
|
return (getpeername1(td, (struct getpeername_args *)uap, 1));
|
1995-10-23 15:42:12 +00:00
|
|
|
}
|
|
|
|
#endif /* COMPAT_OLDSOCK */
|
1994-10-02 17:35:40 +00:00
|
|
|
|
1994-05-25 09:21:21 +00:00
|
|
|
int
|
1994-05-24 10:09:53 +00:00
|
|
|
sockargs(mp, buf, buflen, type)
|
|
|
|
struct mbuf **mp;
|
|
|
|
caddr_t buf;
|
|
|
|
int buflen, type;
|
|
|
|
{
|
2007-05-16 20:41:08 +00:00
|
|
|
struct sockaddr *sa;
|
|
|
|
struct mbuf *m;
|
1994-05-24 10:09:53 +00:00
|
|
|
int error;
|
|
|
|
|
|
|
|
if ((u_int)buflen > MLEN) {
|
|
|
|
#ifdef COMPAT_OLDSOCK
|
|
|
|
if (type == MT_SONAME && (u_int)buflen <= 112)
|
|
|
|
buflen = MLEN; /* unix domain compat. hack */
|
|
|
|
else
|
|
|
|
#endif
|
2004-06-07 09:59:50 +00:00
|
|
|
if ((u_int)buflen > MCLBYTES)
|
|
|
|
return (EINVAL);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
2003-02-19 05:47:46 +00:00
|
|
|
m = m_get(M_TRYWAIT, type);
|
1994-05-24 10:09:53 +00:00
|
|
|
if (m == NULL)
|
|
|
|
return (ENOBUFS);
|
2004-06-07 09:59:50 +00:00
|
|
|
if ((u_int)buflen > MLEN) {
|
|
|
|
MCLGET(m, M_TRYWAIT);
|
|
|
|
if ((m->m_flags & M_EXT) == 0) {
|
|
|
|
m_free(m);
|
|
|
|
return (ENOBUFS);
|
|
|
|
}
|
|
|
|
}
|
1994-05-24 10:09:53 +00:00
|
|
|
m->m_len = buflen;
|
|
|
|
error = copyin(buf, mtod(m, caddr_t), (u_int)buflen);
|
|
|
|
if (error)
|
|
|
|
(void) m_free(m);
|
|
|
|
else {
|
|
|
|
*mp = m;
|
|
|
|
if (type == MT_SONAME) {
|
|
|
|
sa = mtod(m, struct sockaddr *);
|
|
|
|
|
|
|
|
#if defined(COMPAT_OLDSOCK) && BYTE_ORDER != BIG_ENDIAN
|
|
|
|
if (sa->sa_family == 0 && sa->sa_len < AF_MAX)
|
|
|
|
sa->sa_family = sa->sa_len;
|
|
|
|
#endif
|
|
|
|
sa->sa_len = buflen;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
1997-08-16 19:16:27 +00:00
|
|
|
int
|
|
|
|
getsockaddr(namp, uaddr, len)
|
|
|
|
struct sockaddr **namp;
|
|
|
|
caddr_t uaddr;
|
|
|
|
size_t len;
|
|
|
|
{
|
|
|
|
struct sockaddr *sa;
|
|
|
|
int error;
|
|
|
|
|
|
|
|
if (len > SOCK_MAXADDRLEN)
|
2004-01-10 13:03:43 +00:00
|
|
|
return (ENAMETOOLONG);
|
2004-01-10 08:28:54 +00:00
|
|
|
if (len < offsetof(struct sockaddr, sa_data[0]))
|
2004-01-10 17:14:53 +00:00
|
|
|
return (EINVAL);
|
2003-02-19 05:47:46 +00:00
|
|
|
MALLOC(sa, struct sockaddr *, len, M_SONAME, M_WAITOK);
|
1997-08-16 19:16:27 +00:00
|
|
|
error = copyin(uaddr, sa, len);
|
|
|
|
if (error) {
|
|
|
|
FREE(sa, M_SONAME);
|
|
|
|
} else {
|
|
|
|
#if defined(COMPAT_OLDSOCK) && BYTE_ORDER != BIG_ENDIAN
|
|
|
|
if (sa->sa_family == 0 && sa->sa_len < AF_MAX)
|
|
|
|
sa->sa_family = sa->sa_len;
|
|
|
|
#endif
|
|
|
|
sa->sa_len = len;
|
|
|
|
*namp = sa;
|
|
|
|
}
|
2004-01-10 13:03:43 +00:00
|
|
|
return (error);
|
1997-08-16 19:16:27 +00:00
|
|
|
}
|
|
|
|
|
2004-03-16 19:04:28 +00:00
|
|
|
/*
|
2004-04-04 19:15:45 +00:00
|
|
|
* Detach mapped page and release resources back to the system.
|
2004-03-16 19:04:28 +00:00
|
|
|
*/
|
|
|
|
void
|
|
|
|
sf_buf_mext(void *addr, void *args)
|
|
|
|
{
|
|
|
|
vm_page_t m;
|
|
|
|
|
|
|
|
m = sf_buf_page(args);
|
|
|
|
sf_buf_free(args);
|
|
|
|
vm_page_lock_queues();
|
|
|
|
vm_page_unwire(m, 0);
|
|
|
|
/*
|
|
|
|
* Check for the object going away on us. This can
|
|
|
|
* happen since we don't hold a reference to it.
|
|
|
|
* If so, we're responsible for freeing the page.
|
|
|
|
*/
|
|
|
|
if (m->wire_count == 0 && m->object == NULL)
|
|
|
|
vm_page_free(m);
|
|
|
|
vm_page_unlock_queues();
|
|
|
|
}
|
|
|
|
|
1998-11-05 14:28:26 +00:00
|
|
|
/*
|
2000-11-04 07:16:08 +00:00
|
|
|
* sendfile(2)
|
Giant pushdown syscalls in kern/uipc_syscalls.c. Affected calls:
recvmsg(), sendmsg(), recvfrom(), accept(), getpeername(), getsockname(),
socket(), connect(), accept(), send(), recv(), bind(), setsockopt(), listen(),
sendto(), shutdown(), socketpair(), sendfile()
2001-08-31 00:37:34 +00:00
|
|
|
*
|
1998-11-05 14:28:26 +00:00
|
|
|
* int sendfile(int fd, int s, off_t offset, size_t nbytes,
|
|
|
|
* struct sf_hdtr *hdtr, off_t *sbytes, int flags)
|
|
|
|
*
|
|
|
|
* Send a file specified by 'fd' and starting at 'offset' to a socket
|
2007-03-04 22:36:48 +00:00
|
|
|
* specified by 's'. Send only 'nbytes' of the file or until EOF if nbytes ==
|
2007-03-05 13:10:58 +00:00
|
|
|
* 0. Optionally add a header and/or trailer to the socket output. If
|
2007-03-04 22:36:48 +00:00
|
|
|
* specified, write the total number of bytes sent into *sbytes.
|
1998-11-05 14:28:26 +00:00
|
|
|
*/
|
|
|
|
int
|
2001-09-12 08:38:13 +00:00
|
|
|
sendfile(struct thread *td, struct sendfile_args *uap)
|
2002-07-12 06:51:57 +00:00
|
|
|
{
|
|
|
|
|
|
|
|
return (do_sendfile(td, uap, 0));
|
|
|
|
}
|
|
|
|
|
2006-02-28 19:39:18 +00:00
|
|
|
static int
|
|
|
|
do_sendfile(struct thread *td, struct sendfile_args *uap, int compat)
|
|
|
|
{
|
|
|
|
struct sf_hdtr hdtr;
|
|
|
|
struct uio *hdr_uio, *trl_uio;
|
|
|
|
int error;
|
|
|
|
|
|
|
|
hdr_uio = trl_uio = NULL;
|
|
|
|
|
|
|
|
if (uap->hdtr != NULL) {
|
|
|
|
error = copyin(uap->hdtr, &hdtr, sizeof(hdtr));
|
|
|
|
if (error)
|
|
|
|
goto out;
|
|
|
|
if (hdtr.headers != NULL) {
|
|
|
|
error = copyinuio(hdtr.headers, hdtr.hdr_cnt, &hdr_uio);
|
|
|
|
if (error)
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
if (hdtr.trailers != NULL) {
|
|
|
|
error = copyinuio(hdtr.trailers, hdtr.trl_cnt, &trl_uio);
|
|
|
|
if (error)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
error = kern_sendfile(td, uap, hdr_uio, trl_uio, compat);
|
|
|
|
out:
|
|
|
|
if (hdr_uio)
|
|
|
|
free(hdr_uio, M_IOV);
|
|
|
|
if (trl_uio)
|
|
|
|
free(trl_uio, M_IOV);
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
|
2002-07-12 06:51:57 +00:00
|
|
|
#ifdef COMPAT_FREEBSD4
|
|
|
|
int
|
|
|
|
freebsd4_sendfile(struct thread *td, struct freebsd4_sendfile_args *uap)
|
|
|
|
{
|
|
|
|
struct sendfile_args args;
|
|
|
|
|
|
|
|
args.fd = uap->fd;
|
|
|
|
args.s = uap->s;
|
|
|
|
args.offset = uap->offset;
|
|
|
|
args.nbytes = uap->nbytes;
|
|
|
|
args.hdtr = uap->hdtr;
|
|
|
|
args.sbytes = uap->sbytes;
|
|
|
|
args.flags = uap->flags;
|
|
|
|
|
|
|
|
return (do_sendfile(td, &args, 1));
|
|
|
|
}
|
|
|
|
#endif /* COMPAT_FREEBSD4 */
|
|
|
|
|
2006-02-28 19:39:18 +00:00
|
|
|
int
|
|
|
|
kern_sendfile(struct thread *td, struct sendfile_args *uap,
|
|
|
|
struct uio *hdr_uio, struct uio *trl_uio, int compat)
|
1998-11-05 14:28:26 +00:00
|
|
|
{
|
2006-05-25 15:10:13 +00:00
|
|
|
struct file *sock_fp;
|
1998-11-05 14:28:26 +00:00
|
|
|
struct vnode *vp;
|
2006-03-27 04:23:16 +00:00
|
|
|
struct vm_object *obj = NULL;
|
2001-11-17 03:07:11 +00:00
|
|
|
struct socket *so = NULL;
|
2006-11-02 16:53:26 +00:00
|
|
|
struct mbuf *m = NULL;
|
1998-11-05 14:28:26 +00:00
|
|
|
struct sf_buf *sf;
|
|
|
|
struct vm_page *pg;
|
2007-05-19 20:50:59 +00:00
|
|
|
off_t off, xfsize, fsbytes = 0, sbytes = 0, rem = 0;
|
|
|
|
int error, hdrlen = 0, mnw = 0;
|
2006-03-27 04:23:16 +00:00
|
|
|
int vfslocked;
|
1998-11-05 14:28:26 +00:00
|
|
|
|
2006-03-27 04:23:16 +00:00
|
|
|
NET_LOCK_GIANT();
|
2001-11-14 06:30:36 +00:00
|
|
|
|
1998-11-05 14:28:26 +00:00
|
|
|
/*
|
2006-11-02 16:53:26 +00:00
|
|
|
* The file descriptor must be a regular file and have a
|
|
|
|
* backing VM object.
|
|
|
|
* File offset must be positive. If it goes beyond EOF
|
|
|
|
* we send only the header/trailer and no payload data.
|
1998-11-05 14:28:26 +00:00
|
|
|
*/
|
2001-11-14 06:30:36 +00:00
|
|
|
if ((error = fgetvp_read(td, uap->fd, &vp)) != 0)
|
2006-11-06 21:53:19 +00:00
|
|
|
goto out;
|
2006-03-27 04:23:16 +00:00
|
|
|
vfslocked = VFS_LOCK_GIANT(vp->v_mount);
|
2003-06-19 03:55:01 +00:00
|
|
|
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
|
2005-01-25 00:40:01 +00:00
|
|
|
obj = vp->v_object;
|
2006-03-27 04:23:16 +00:00
|
|
|
if (obj != NULL) {
|
|
|
|
/*
|
|
|
|
* Temporarily increase the backing VM object's reference
|
|
|
|
* count so that a forced reclamation of its vnode does not
|
|
|
|
* immediately destroy it.
|
|
|
|
*/
|
|
|
|
VM_OBJECT_LOCK(obj);
|
|
|
|
if ((obj->flags & OBJ_DEAD) == 0) {
|
|
|
|
vm_object_reference_locked(obj);
|
|
|
|
VM_OBJECT_UNLOCK(obj);
|
|
|
|
} else {
|
|
|
|
VM_OBJECT_UNLOCK(obj);
|
|
|
|
obj = NULL;
|
|
|
|
}
|
|
|
|
}
|
2005-01-24 14:13:24 +00:00
|
|
|
VOP_UNLOCK(vp, 0, td);
|
2006-03-27 04:23:16 +00:00
|
|
|
VFS_UNLOCK_GIANT(vfslocked);
|
2005-01-25 00:40:01 +00:00
|
|
|
if (obj == NULL) {
|
1998-11-05 14:28:26 +00:00
|
|
|
error = EINVAL;
|
2006-11-06 21:53:19 +00:00
|
|
|
goto out;
|
1998-11-05 14:28:26 +00:00
|
|
|
}
|
2006-11-02 16:53:26 +00:00
|
|
|
if (uap->offset < 0) {
|
|
|
|
error = EINVAL;
|
2006-11-06 21:53:19 +00:00
|
|
|
goto out;
|
2006-11-02 16:53:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The socket must be a stream socket and connected.
|
|
|
|
* Remember if it a blocking or non-blocking socket.
|
|
|
|
*/
|
|
|
|
if ((error = getsock(td->td_proc->p_fd, uap->s, &sock_fp,
|
|
|
|
NULL)) != 0)
|
2006-11-06 21:53:19 +00:00
|
|
|
goto out;
|
2006-05-25 15:10:13 +00:00
|
|
|
so = sock_fp->f_data;
|
1998-11-05 14:28:26 +00:00
|
|
|
if (so->so_type != SOCK_STREAM) {
|
|
|
|
error = EINVAL;
|
2006-11-06 21:53:19 +00:00
|
|
|
goto out;
|
1998-11-05 14:28:26 +00:00
|
|
|
}
|
|
|
|
if ((so->so_state & SS_ISCONNECTED) == 0) {
|
|
|
|
error = ENOTCONN;
|
2006-11-06 21:53:19 +00:00
|
|
|
goto out;
|
1998-11-05 14:28:26 +00:00
|
|
|
}
|
2006-11-02 16:53:26 +00:00
|
|
|
/*
|
|
|
|
* Do not wait on memory allocations but return ENOMEM for
|
|
|
|
* caller to retry later.
|
|
|
|
* XXX: Experimental.
|
|
|
|
*/
|
|
|
|
if (uap->flags & SF_MNOWAIT)
|
|
|
|
mnw = 1;
|
1998-11-05 14:28:26 +00:00
|
|
|
|
2002-10-06 14:39:15 +00:00
|
|
|
#ifdef MAC
|
2004-06-13 02:50:07 +00:00
|
|
|
SOCK_LOCK(so);
|
2002-10-06 14:39:15 +00:00
|
|
|
error = mac_check_socket_send(td->td_ucred, so);
|
2004-06-13 02:50:07 +00:00
|
|
|
SOCK_UNLOCK(so);
|
2002-10-06 14:39:15 +00:00
|
|
|
if (error)
|
2006-11-06 21:53:19 +00:00
|
|
|
goto out;
|
2002-10-06 14:39:15 +00:00
|
|
|
#endif
|
|
|
|
|
2006-11-02 16:53:26 +00:00
|
|
|
/* If headers are specified copy them into mbufs. */
|
2006-02-28 19:39:18 +00:00
|
|
|
if (hdr_uio != NULL) {
|
|
|
|
hdr_uio->uio_td = td;
|
|
|
|
hdr_uio->uio_rw = UIO_WRITE;
|
|
|
|
if (hdr_uio->uio_resid > 0) {
|
2006-11-12 20:57:00 +00:00
|
|
|
/*
|
|
|
|
* In FBSD < 5.0 the nbytes to send also included
|
|
|
|
* the header. If compat is specified subtract the
|
|
|
|
* header size from nbytes.
|
|
|
|
*/
|
|
|
|
if (compat) {
|
|
|
|
if (uap->nbytes > hdr_uio->uio_resid)
|
|
|
|
uap->nbytes -= hdr_uio->uio_resid;
|
|
|
|
else
|
|
|
|
uap->nbytes = 0;
|
|
|
|
}
|
2006-11-02 16:53:26 +00:00
|
|
|
m = m_uiotombuf(hdr_uio, (mnw ? M_NOWAIT : M_WAITOK),
|
2006-11-02 17:37:22 +00:00
|
|
|
0, 0, 0);
|
2006-11-02 16:53:26 +00:00
|
|
|
if (m == NULL) {
|
|
|
|
error = mnw ? EAGAIN : ENOBUFS;
|
2006-11-06 21:53:19 +00:00
|
|
|
goto out;
|
2006-11-02 16:53:26 +00:00
|
|
|
}
|
2007-05-19 20:50:59 +00:00
|
|
|
hdrlen = m_length(m, NULL);
|
1998-11-05 14:28:26 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2006-11-02 16:53:26 +00:00
|
|
|
/* Protect against multiple writers to the socket. */
|
2003-02-19 05:47:46 +00:00
|
|
|
(void) sblock(&so->so_snd, M_WAITOK);
|
1998-11-05 14:28:26 +00:00
|
|
|
|
|
|
|
/*
|
2006-11-02 16:53:26 +00:00
|
|
|
* Loop through the pages of the file, starting with the requested
|
1998-11-05 14:28:26 +00:00
|
|
|
* offset. Get a file page (do I/O if necessary), map the file page
|
|
|
|
* into an sf_buf, attach an mbuf header to the sf_buf, and queue
|
|
|
|
* it on the socket.
|
2006-11-02 16:53:26 +00:00
|
|
|
* This is done in two loops. The inner loop turns as many pages
|
|
|
|
* as it can, up to available socket buffer space, without blocking
|
|
|
|
* into mbufs to have it bulk delivered into the socket send buffer.
|
|
|
|
* The outer loop checks the state and available space of the socket
|
|
|
|
* and takes care of the overall progress.
|
1998-11-05 14:28:26 +00:00
|
|
|
*/
|
2007-05-19 20:50:59 +00:00
|
|
|
for (off = uap->offset, rem = uap->nbytes; ; ) {
|
2006-11-02 16:53:26 +00:00
|
|
|
int loopbytes = 0;
|
|
|
|
int space = 0;
|
|
|
|
int done = 0;
|
1998-11-05 14:28:26 +00:00
|
|
|
|
1998-11-06 19:16:30 +00:00
|
|
|
/*
|
2006-11-02 16:53:26 +00:00
|
|
|
* Check the socket state for ongoing connection,
|
|
|
|
* no errors and space in socket buffer.
|
|
|
|
* If space is low allow for the remainder of the
|
|
|
|
* file to be processed if it fits the socket buffer.
|
|
|
|
* Otherwise block in waiting for sufficient space
|
|
|
|
* to proceed, or if the socket is nonblocking, return
|
|
|
|
* to userland with EAGAIN while reporting how far
|
|
|
|
* we've come.
|
|
|
|
* We wait until the socket buffer has significant free
|
|
|
|
* space to do bulk sends. This makes good use of file
|
|
|
|
* system read ahead and allows packet segmentation
|
|
|
|
* offloading hardware to take over lots of work. If
|
|
|
|
* we were not careful here we would send off only one
|
|
|
|
* sfbuf at a time.
|
1998-11-06 19:16:30 +00:00
|
|
|
*/
|
2004-06-19 03:23:14 +00:00
|
|
|
SOCKBUF_LOCK(&so->so_snd);
|
2006-11-02 16:53:26 +00:00
|
|
|
if (so->so_snd.sb_lowat < so->so_snd.sb_hiwat / 2)
|
|
|
|
so->so_snd.sb_lowat = so->so_snd.sb_hiwat / 2;
|
|
|
|
retry_space:
|
|
|
|
if (so->so_snd.sb_state & SBS_CANTSENDMORE) {
|
|
|
|
error = EPIPE;
|
|
|
|
SOCKBUF_UNLOCK(&so->so_snd);
|
|
|
|
goto done;
|
|
|
|
} else if (so->so_error) {
|
|
|
|
error = so->so_error;
|
|
|
|
so->so_error = 0;
|
2004-06-19 03:23:14 +00:00
|
|
|
SOCKBUF_UNLOCK(&so->so_snd);
|
1998-11-06 19:16:30 +00:00
|
|
|
goto done;
|
|
|
|
}
|
2006-11-02 16:53:26 +00:00
|
|
|
space = sbspace(&so->so_snd);
|
|
|
|
if (space < rem &&
|
|
|
|
(space <= 0 ||
|
|
|
|
space < so->so_snd.sb_lowat)) {
|
|
|
|
if (so->so_state & SS_NBIO) {
|
|
|
|
SOCKBUF_UNLOCK(&so->so_snd);
|
|
|
|
error = EAGAIN;
|
|
|
|
goto done;
|
1999-01-24 01:15:58 +00:00
|
|
|
}
|
2002-07-23 01:09:34 +00:00
|
|
|
/*
|
2006-11-02 16:53:26 +00:00
|
|
|
* sbwait drops the lock while sleeping.
|
|
|
|
* When we loop back to retry_space the
|
|
|
|
* state may have changed and we retest
|
|
|
|
* for it.
|
|
|
|
*/
|
|
|
|
error = sbwait(&so->so_snd);
|
|
|
|
/*
|
|
|
|
* An error from sbwait usually indicates that we've
|
|
|
|
* been interrupted by a signal. If we've sent anything
|
|
|
|
* then return bytes sent, otherwise return the error.
|
2002-07-23 01:09:34 +00:00
|
|
|
*/
|
2006-11-02 16:53:26 +00:00
|
|
|
if (error) {
|
|
|
|
SOCKBUF_UNLOCK(&so->so_snd);
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
goto retry_space;
|
1999-01-24 01:15:58 +00:00
|
|
|
}
|
2006-11-02 16:53:26 +00:00
|
|
|
SOCKBUF_UNLOCK(&so->so_snd);
|
1999-01-24 01:15:58 +00:00
|
|
|
|
2007-05-19 20:50:59 +00:00
|
|
|
/*
|
|
|
|
* Reduce space in the socket buffer by the size of
|
|
|
|
* the header mbuf chain.
|
|
|
|
* hdrlen is set to 0 after the first loop.
|
|
|
|
*/
|
|
|
|
space -= hdrlen;
|
|
|
|
|
1999-01-24 01:15:58 +00:00
|
|
|
/*
|
2006-11-02 16:53:26 +00:00
|
|
|
* Loop and construct maximum sized mbuf chain to be bulk
|
|
|
|
* dumped into socket buffer.
|
1999-01-24 01:15:58 +00:00
|
|
|
*/
|
2006-11-02 16:53:26 +00:00
|
|
|
while(space > loopbytes) {
|
|
|
|
vm_pindex_t pindex;
|
|
|
|
vm_offset_t pgoff;
|
|
|
|
struct mbuf *m0;
|
1999-01-24 01:15:58 +00:00
|
|
|
|
2006-11-02 16:53:26 +00:00
|
|
|
VM_OBJECT_LOCK(obj);
|
1998-11-05 14:28:26 +00:00
|
|
|
/*
|
2006-11-02 16:53:26 +00:00
|
|
|
* Calculate the amount to transfer.
|
|
|
|
* Not to exceed a page, the EOF,
|
|
|
|
* or the passed in nbytes.
|
1998-11-05 14:28:26 +00:00
|
|
|
*/
|
2006-11-02 16:53:26 +00:00
|
|
|
pgoff = (vm_offset_t)(off & PAGE_MASK);
|
|
|
|
xfsize = omin(PAGE_SIZE - pgoff,
|
2007-04-19 05:54:45 +00:00
|
|
|
obj->un_pager.vnp.vnp_size - uap->offset -
|
2007-05-19 20:50:59 +00:00
|
|
|
fsbytes - loopbytes);
|
2006-11-02 16:53:26 +00:00
|
|
|
if (uap->nbytes)
|
2007-05-19 20:50:59 +00:00
|
|
|
rem = (uap->nbytes - fsbytes - loopbytes);
|
2006-11-02 16:53:26 +00:00
|
|
|
else
|
2007-05-19 20:50:59 +00:00
|
|
|
rem = obj->un_pager.vnp.vnp_size -
|
|
|
|
uap->offset - fsbytes - loopbytes;
|
2006-11-02 16:53:26 +00:00
|
|
|
xfsize = omin(rem, xfsize);
|
|
|
|
if (xfsize <= 0) {
|
|
|
|
VM_OBJECT_UNLOCK(obj);
|
|
|
|
done = 1; /* all data sent */
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* Don't overflow the send buffer.
|
|
|
|
* Stop here and send out what we've
|
|
|
|
* already got.
|
|
|
|
*/
|
|
|
|
if (space < loopbytes + xfsize) {
|
|
|
|
VM_OBJECT_UNLOCK(obj);
|
|
|
|
break;
|
|
|
|
}
|
2007-04-20 19:49:20 +00:00
|
|
|
|
1998-11-05 14:28:26 +00:00
|
|
|
/*
|
2007-05-19 20:50:59 +00:00
|
|
|
* Attempt to look up the page. Allocate
|
|
|
|
* if not found or wait and loop if busy.
|
1998-11-05 14:28:26 +00:00
|
|
|
*/
|
2006-11-02 16:53:26 +00:00
|
|
|
pindex = OFF_TO_IDX(off);
|
2007-04-20 19:49:20 +00:00
|
|
|
pg = vm_page_grab(obj, pindex, VM_ALLOC_NOBUSY |
|
|
|
|
VM_ALLOC_NORMAL | VM_ALLOC_WIRED | VM_ALLOC_RETRY);
|
2006-11-02 16:53:26 +00:00
|
|
|
|
In order to better support flexible and extensible access control,
make a series of modifications to the credential arguments relating
to file read and write operations to cliarfy which credential is
used for what:
- Change fo_read() and fo_write() to accept "active_cred" instead of
"cred", and change the semantics of consumers of fo_read() and
fo_write() to pass the active credential of the thread requesting
an operation rather than the cached file cred. The cached file
cred is still available in fo_read() and fo_write() consumers
via fp->f_cred. These changes largely in sys_generic.c.
For each implementation of fo_read() and fo_write(), update cred
usage to reflect this change and maintain current semantics:
- badfo_readwrite() unchanged
- kqueue_read/write() unchanged
pipe_read/write() now authorize MAC using active_cred rather
than td->td_ucred
- soo_read/write() unchanged
- vn_read/write() now authorize MAC using active_cred but
VOP_READ/WRITE() with fp->f_cred
Modify vn_rdwr() to accept two credential arguments instead of a
single credential: active_cred and file_cred. Use active_cred
for MAC authorization, and select a credential for use in
VOP_READ/WRITE() based on whether file_cred is NULL or not. If
file_cred is provided, authorize the VOP using that cred,
otherwise the active credential, matching current semantics.
Modify current vn_rdwr() consumers to pass a file_cred if used
in the context of a struct file, and to always pass active_cred.
When vn_rdwr() is used without a file_cred, pass NOCRED.
These changes should maintain current semantics for read/write,
but avoid a redundant passing of fp->f_cred, as well as making
it more clear what the origin of each credential is in file
descriptor read/write operations.
Follow-up commits will make similar changes to other file descriptor
operations, and modify the MAC framework to pass both credentials
to MAC policy modules so they can implement either semantic for
revocation.
Obtained from: TrustedBSD Project
Sponsored by: DARPA, NAI Labs
2002-08-15 20:55:08 +00:00
|
|
|
/*
|
2006-11-02 16:53:26 +00:00
|
|
|
* Check if page is valid for what we need,
|
|
|
|
* otherwise initiate I/O.
|
|
|
|
* If we already turned some pages into mbufs,
|
|
|
|
* send them off before we come here again and
|
|
|
|
* block.
|
In order to better support flexible and extensible access control,
make a series of modifications to the credential arguments relating
to file read and write operations to cliarfy which credential is
used for what:
- Change fo_read() and fo_write() to accept "active_cred" instead of
"cred", and change the semantics of consumers of fo_read() and
fo_write() to pass the active credential of the thread requesting
an operation rather than the cached file cred. The cached file
cred is still available in fo_read() and fo_write() consumers
via fp->f_cred. These changes largely in sys_generic.c.
For each implementation of fo_read() and fo_write(), update cred
usage to reflect this change and maintain current semantics:
- badfo_readwrite() unchanged
- kqueue_read/write() unchanged
pipe_read/write() now authorize MAC using active_cred rather
than td->td_ucred
- soo_read/write() unchanged
- vn_read/write() now authorize MAC using active_cred but
VOP_READ/WRITE() with fp->f_cred
Modify vn_rdwr() to accept two credential arguments instead of a
single credential: active_cred and file_cred. Use active_cred
for MAC authorization, and select a credential for use in
VOP_READ/WRITE() based on whether file_cred is NULL or not. If
file_cred is provided, authorize the VOP using that cred,
otherwise the active credential, matching current semantics.
Modify current vn_rdwr() consumers to pass a file_cred if used
in the context of a struct file, and to always pass active_cred.
When vn_rdwr() is used without a file_cred, pass NOCRED.
These changes should maintain current semantics for read/write,
but avoid a redundant passing of fp->f_cred, as well as making
it more clear what the origin of each credential is in file
descriptor read/write operations.
Follow-up commits will make similar changes to other file descriptor
operations, and modify the MAC framework to pass both credentials
to MAC policy modules so they can implement either semantic for
revocation.
Obtained from: TrustedBSD Project
Sponsored by: DARPA, NAI Labs
2002-08-15 20:55:08 +00:00
|
|
|
*/
|
2006-11-02 16:53:26 +00:00
|
|
|
if (pg->valid && vm_page_is_valid(pg, pgoff, xfsize))
|
2004-10-20 17:44:40 +00:00
|
|
|
VM_OBJECT_UNLOCK(obj);
|
2006-11-02 16:53:26 +00:00
|
|
|
else if (m != NULL)
|
|
|
|
error = EAGAIN; /* send what we already got */
|
|
|
|
else if (uap->flags & SF_NODISKIO)
|
|
|
|
error = EBUSY;
|
|
|
|
else {
|
|
|
|
int bsize, resid;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Ensure that our page is still around
|
|
|
|
* when the I/O completes.
|
|
|
|
*/
|
|
|
|
vm_page_io_start(pg);
|
|
|
|
VM_OBJECT_UNLOCK(obj);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Get the page from backing store.
|
|
|
|
*/
|
|
|
|
bsize = vp->v_mount->mnt_stat.f_iosize;
|
|
|
|
vfslocked = VFS_LOCK_GIANT(vp->v_mount);
|
|
|
|
vn_lock(vp, LK_SHARED | LK_RETRY, td);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* XXXMAC: Because we don't have fp->f_cred
|
|
|
|
* here, we pass in NOCRED. This is probably
|
|
|
|
* wrong, but is consistent with our original
|
|
|
|
* implementation.
|
|
|
|
*/
|
|
|
|
error = vn_rdwr(UIO_READ, vp, NULL, MAXBSIZE,
|
|
|
|
trunc_page(off), UIO_NOCOPY, IO_NODELOCKED |
|
|
|
|
IO_VMIO | ((MAXBSIZE / bsize) << IO_SEQSHIFT),
|
|
|
|
td->td_ucred, NOCRED, &resid, td);
|
|
|
|
VOP_UNLOCK(vp, 0, td);
|
|
|
|
VFS_UNLOCK_GIANT(vfslocked);
|
|
|
|
VM_OBJECT_LOCK(obj);
|
|
|
|
vm_page_io_finish(pg);
|
|
|
|
if (!error)
|
|
|
|
VM_OBJECT_UNLOCK(obj);
|
|
|
|
mbstat.sf_iocnt++;
|
|
|
|
}
|
|
|
|
if (error) {
|
|
|
|
vm_page_lock_queues();
|
|
|
|
vm_page_unwire(pg, 0);
|
|
|
|
/*
|
|
|
|
* See if anyone else might know about
|
|
|
|
* this page. If not and it is not valid,
|
|
|
|
* then free it.
|
|
|
|
*/
|
|
|
|
if (pg->wire_count == 0 && pg->valid == 0 &&
|
|
|
|
pg->busy == 0 && !(pg->oflags & VPO_BUSY) &&
|
|
|
|
pg->hold_count == 0) {
|
|
|
|
vm_page_free(pg);
|
|
|
|
}
|
|
|
|
vm_page_unlock_queues();
|
|
|
|
VM_OBJECT_UNLOCK(obj);
|
|
|
|
if (error == EAGAIN)
|
|
|
|
error = 0; /* not a real error */
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2004-02-08 07:35:48 +00:00
|
|
|
/*
|
2006-11-02 16:53:26 +00:00
|
|
|
* Get a sendfile buf. We usually wait as long
|
|
|
|
* as necessary, but this wait can be interrupted.
|
2004-02-08 07:35:48 +00:00
|
|
|
*/
|
2006-11-02 16:53:26 +00:00
|
|
|
if ((sf = sf_buf_alloc(pg,
|
|
|
|
(mnw ? SFB_NOWAIT : SFB_CATCH))) == NULL) {
|
|
|
|
mbstat.sf_allocfail++;
|
|
|
|
vm_page_lock_queues();
|
|
|
|
vm_page_unwire(pg, 0);
|
|
|
|
/*
|
|
|
|
* XXX: Not same check as above!?
|
|
|
|
*/
|
|
|
|
if (pg->wire_count == 0 && pg->object == NULL)
|
|
|
|
vm_page_free(pg);
|
|
|
|
vm_page_unlock_queues();
|
|
|
|
error = (mnw ? EAGAIN : EINTR);
|
|
|
|
break;
|
2004-02-08 07:35:48 +00:00
|
|
|
}
|
2001-03-08 16:28:10 +00:00
|
|
|
|
2006-11-02 16:53:26 +00:00
|
|
|
/*
|
|
|
|
* Get an mbuf and set it up as having
|
|
|
|
* external storage.
|
|
|
|
*/
|
|
|
|
m0 = m_get((mnw ? M_NOWAIT : M_WAITOK), MT_DATA);
|
|
|
|
if (m0 == NULL) {
|
|
|
|
error = (mnw ? EAGAIN : ENOBUFS);
|
|
|
|
sf_buf_mext((void *)sf_buf_kva(sf), sf);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
MEXTADD(m0, sf_buf_kva(sf), PAGE_SIZE, sf_buf_mext,
|
|
|
|
sf, M_RDONLY, EXT_SFBUF);
|
|
|
|
m0->m_data = (char *)sf_buf_kva(sf) + pgoff;
|
|
|
|
m0->m_len = xfsize;
|
|
|
|
|
|
|
|
/* Append to mbuf chain. */
|
|
|
|
if (m != NULL)
|
|
|
|
m_cat(m, m0);
|
|
|
|
else
|
|
|
|
m = m0;
|
2001-03-08 16:28:10 +00:00
|
|
|
|
2006-11-02 16:53:26 +00:00
|
|
|
/* Keep track of bits processed. */
|
|
|
|
loopbytes += xfsize;
|
|
|
|
off += xfsize;
|
2004-02-01 07:56:44 +00:00
|
|
|
}
|
|
|
|
|
2006-11-02 16:53:26 +00:00
|
|
|
/* Add the buffer chain to the socket buffer. */
|
|
|
|
if (m != NULL) {
|
2007-05-19 20:50:59 +00:00
|
|
|
int mlen, err;
|
2006-11-12 20:57:00 +00:00
|
|
|
|
|
|
|
mlen = m_length(m, NULL);
|
2006-11-02 16:53:26 +00:00
|
|
|
SOCKBUF_LOCK(&so->so_snd);
|
2004-06-14 18:16:22 +00:00
|
|
|
if (so->so_snd.sb_state & SBS_CANTSENDMORE) {
|
1998-11-05 14:28:26 +00:00
|
|
|
error = EPIPE;
|
2004-06-19 03:23:14 +00:00
|
|
|
SOCKBUF_UNLOCK(&so->so_snd);
|
1998-11-06 19:16:30 +00:00
|
|
|
goto done;
|
|
|
|
}
|
2006-11-02 16:53:26 +00:00
|
|
|
SOCKBUF_UNLOCK(&so->so_snd);
|
2007-05-19 20:50:59 +00:00
|
|
|
/* Avoid error aliasing. */
|
|
|
|
err = (*so->so_proto->pr_usrreqs->pru_send)
|
2006-11-02 16:53:26 +00:00
|
|
|
(so, 0, m, NULL, NULL, td);
|
2007-05-19 20:50:59 +00:00
|
|
|
if (err == 0) {
|
|
|
|
/*
|
|
|
|
* We need two counters to get the
|
|
|
|
* file offset and nbytes to send
|
|
|
|
* right:
|
|
|
|
* - sbytes contains the total amount
|
|
|
|
* of bytes sent, including headers.
|
|
|
|
* - fsbytes contains the total amount
|
|
|
|
* of bytes sent from the file.
|
|
|
|
*/
|
2006-11-12 20:57:00 +00:00
|
|
|
sbytes += mlen;
|
2007-05-19 20:50:59 +00:00
|
|
|
fsbytes += mlen;
|
|
|
|
if (hdrlen) {
|
|
|
|
fsbytes -= hdrlen;
|
|
|
|
hdrlen = 0;
|
|
|
|
}
|
|
|
|
} else if (error == 0)
|
|
|
|
error = err;
|
2006-11-02 16:53:26 +00:00
|
|
|
m = NULL; /* pru_send always consumes */
|
1998-11-05 14:28:26 +00:00
|
|
|
}
|
2006-11-02 16:53:26 +00:00
|
|
|
|
|
|
|
/* Quit outer loop on error or when we're done. */
|
|
|
|
if (error || done)
|
1998-11-05 14:28:26 +00:00
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Send trailers. Wimp out and use writev(2).
|
|
|
|
*/
|
2006-02-28 19:39:18 +00:00
|
|
|
if (trl_uio != NULL) {
|
|
|
|
error = kern_writev(td, uap->s, trl_uio);
|
|
|
|
if (error)
|
|
|
|
goto done;
|
2006-11-12 20:57:00 +00:00
|
|
|
sbytes += td->td_retval[0];
|
1998-11-05 14:28:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
done:
|
2006-11-02 16:53:26 +00:00
|
|
|
sbunlock(&so->so_snd);
|
2006-11-06 21:53:19 +00:00
|
|
|
out:
|
2001-04-26 00:14:14 +00:00
|
|
|
/*
|
2001-09-12 08:38:13 +00:00
|
|
|
* If there was no error we have to clear td->td_retval[0]
|
2001-04-26 00:14:14 +00:00
|
|
|
* because it may have been set by writev.
|
|
|
|
*/
|
|
|
|
if (error == 0) {
|
2001-09-12 08:38:13 +00:00
|
|
|
td->td_retval[0] = 0;
|
2001-04-26 00:14:14 +00:00
|
|
|
}
|
1998-11-05 14:28:26 +00:00
|
|
|
if (uap->sbytes != NULL) {
|
|
|
|
copyout(&sbytes, uap->sbytes, sizeof(off_t));
|
|
|
|
}
|
2006-03-27 04:23:16 +00:00
|
|
|
if (obj != NULL)
|
|
|
|
vm_object_deallocate(obj);
|
|
|
|
if (vp != NULL) {
|
|
|
|
vfslocked = VFS_LOCK_GIANT(vp->v_mount);
|
This is what was "fdfix2.patch," a fix for fd sharing. It's pretty
far-reaching in fd-land, so you'll want to consult the code for
changes. The biggest change is that now, you don't use
fp->f_ops->fo_foo(fp, bar)
but instead
fo_foo(fp, bar),
which increments and decrements the fp refcount upon entry and exit.
Two new calls, fhold() and fdrop(), are provided. Each does what it
seems like it should, and if fdrop() brings the refcount to zero, the
fd is freed as well.
Thanks to peter ("to hell with it, it looks ok to me.") for his review.
Thanks to msmith for keeping me from putting locks everywhere :)
Reviewed by: peter
1999-09-19 17:00:25 +00:00
|
|
|
vrele(vp);
|
2006-03-27 04:23:16 +00:00
|
|
|
VFS_UNLOCK_GIANT(vfslocked);
|
|
|
|
}
|
2001-11-17 03:07:11 +00:00
|
|
|
if (so)
|
2006-05-25 15:10:13 +00:00
|
|
|
fdrop(sock_fp, td);
|
2006-11-02 16:53:26 +00:00
|
|
|
if (m)
|
|
|
|
m_freem(m);
|
2003-12-01 22:12:50 +00:00
|
|
|
|
2006-03-27 04:23:16 +00:00
|
|
|
NET_UNLOCK_GIANT();
|
2003-12-01 22:12:50 +00:00
|
|
|
|
|
|
|
if (error == ERESTART)
|
|
|
|
error = EINTR;
|
|
|
|
|
1998-11-05 14:28:26 +00:00
|
|
|
return (error);
|
|
|
|
}
|
2006-11-03 15:23:16 +00:00
|
|
|
|
2006-11-07 21:28:12 +00:00
|
|
|
/*
|
|
|
|
* SCTP syscalls.
|
|
|
|
* Functionality only compiled in if SCTP is defined in the kernel Makefile,
|
|
|
|
* otherwise all return EOPNOTSUPP.
|
|
|
|
* XXX: We should make this loadable one day.
|
|
|
|
*/
|
2006-11-03 15:23:16 +00:00
|
|
|
int
|
|
|
|
sctp_peeloff(td, uap)
|
|
|
|
struct thread *td;
|
2006-11-07 21:28:12 +00:00
|
|
|
struct sctp_peeloff_args /* {
|
2006-11-03 15:23:16 +00:00
|
|
|
int sd;
|
|
|
|
caddr_t name;
|
|
|
|
} */ *uap;
|
|
|
|
{
|
|
|
|
#ifdef SCTP
|
|
|
|
struct filedesc *fdp;
|
|
|
|
struct file *nfp = NULL;
|
|
|
|
int error;
|
|
|
|
struct socket *head, *so;
|
|
|
|
int fd;
|
|
|
|
u_int fflag;
|
|
|
|
|
|
|
|
fdp = td->td_proc->p_fd;
|
|
|
|
error = fgetsock(td, uap->sd, &head, &fflag);
|
|
|
|
if (error)
|
|
|
|
goto done2;
|
|
|
|
error = sctp_can_peel_off(head, (sctp_assoc_t)uap->name);
|
|
|
|
if (error)
|
|
|
|
goto done2;
|
|
|
|
/*
|
|
|
|
* At this point we know we do have a assoc to pull
|
|
|
|
* we proceed to get the fd setup. This may block
|
|
|
|
* but that is ok.
|
|
|
|
*/
|
|
|
|
|
|
|
|
error = falloc(td, &nfp, &fd);
|
|
|
|
if (error)
|
|
|
|
goto done;
|
|
|
|
td->td_retval[0] = fd;
|
|
|
|
|
|
|
|
so = sonewconn(head, SS_ISCONNECTED);
|
|
|
|
if (so == NULL)
|
|
|
|
goto noconnection;
|
|
|
|
/*
|
|
|
|
* Before changing the flags on the socket, we have to bump the
|
|
|
|
* reference count. Otherwise, if the protocol calls sofree(),
|
|
|
|
* the socket will be released due to a zero refcount.
|
|
|
|
*/
|
|
|
|
SOCK_LOCK(so);
|
|
|
|
soref(so); /* file descriptor reference */
|
|
|
|
SOCK_UNLOCK(so);
|
|
|
|
|
|
|
|
ACCEPT_LOCK();
|
|
|
|
|
|
|
|
TAILQ_REMOVE(&head->so_comp, so, so_list);
|
|
|
|
head->so_qlen--;
|
|
|
|
so->so_state |= (head->so_state & SS_NBIO);
|
|
|
|
so->so_state &= ~SS_NOFDREF;
|
|
|
|
so->so_qstate &= ~SQ_COMP;
|
|
|
|
so->so_head = NULL;
|
|
|
|
|
|
|
|
ACCEPT_UNLOCK();
|
|
|
|
|
|
|
|
error = sctp_do_peeloff(head, so, (sctp_assoc_t)uap->name);
|
|
|
|
if (error)
|
|
|
|
goto noconnection;
|
|
|
|
if (head->so_sigio != NULL)
|
|
|
|
fsetown(fgetown(&head->so_sigio), &so->so_sigio);
|
|
|
|
|
|
|
|
FILE_LOCK(nfp);
|
|
|
|
nfp->f_data = so;
|
|
|
|
nfp->f_flag = fflag;
|
|
|
|
nfp->f_type = DTYPE_SOCKET;
|
Replace custom file descriptor array sleep lock constructed using a mutex
and flags with an sxlock. This leads to a significant and measurable
performance improvement as a result of access to shared locking for
frequent lookup operations, reduced general overhead, and reduced overhead
in the event of contention. All of these are imported for threaded
applications where simultaneous access to a shared file descriptor array
occurs frequently. Kris has reported 2x-4x transaction rate improvements
on 8-core MySQL benchmarks; smaller improvements can be expected for many
workloads as a result of reduced overhead.
- Generally eliminate the distinction between "fast" and regular
acquisisition of the filedesc lock; the plan is that they will now all
be fast. Change all locking instances to either shared or exclusive
locks.
- Correct a bug (pointed out by kib) in fdfree() where previously msleep()
was called without the mutex held; sx_sleep() is now always called with
the sxlock held exclusively.
- Universally hold the struct file lock over changes to struct file,
rather than the filedesc lock or no lock. Always update the f_ops
field last. A further memory barrier is required here in the future
(discussed with jhb).
- Improve locking and reference management in linux_at(), which fails to
properly acquire vnode references before using vnode pointers. Annotate
improper use of vn_fullpath(), which will be replaced at a future date.
In fcntl(), we conservatively acquire an exclusive lock, even though in
some cases a shared lock may be sufficient, which should be revisited.
The dropping of the filedesc lock in fdgrowtable() is no longer required
as the sxlock can be held over the sleep operation; we should consider
removing that (pointed out by attilio).
Tested by: kris
Discussed with: jhb, kris, attilio, jeff
2007-04-04 09:11:34 +00:00
|
|
|
nfp->f_ops = &socketops;
|
2006-11-03 15:23:16 +00:00
|
|
|
FILE_UNLOCK(nfp);
|
|
|
|
|
2006-11-07 21:28:12 +00:00
|
|
|
noconnection:
|
2006-11-03 15:23:16 +00:00
|
|
|
/*
|
|
|
|
* close the new descriptor, assuming someone hasn't ripped it
|
|
|
|
* out from under us.
|
|
|
|
*/
|
|
|
|
if (error)
|
|
|
|
fdclose(fdp, nfp, fd, td);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Release explicitly held references before returning.
|
|
|
|
*/
|
2006-11-07 21:28:12 +00:00
|
|
|
done:
|
2006-11-03 15:23:16 +00:00
|
|
|
if (nfp != NULL)
|
|
|
|
fdrop(nfp, td);
|
|
|
|
fputsock(head);
|
2006-11-07 21:28:12 +00:00
|
|
|
done2:
|
2006-11-03 15:23:16 +00:00
|
|
|
return (error);
|
2006-11-07 21:28:12 +00:00
|
|
|
#else /* SCTP */
|
2006-11-03 15:23:16 +00:00
|
|
|
return (EOPNOTSUPP);
|
2006-11-07 21:28:12 +00:00
|
|
|
#endif /* SCTP */
|
2006-11-03 15:23:16 +00:00
|
|
|
}
|
|
|
|
|
2006-11-07 21:28:12 +00:00
|
|
|
int
|
|
|
|
sctp_generic_sendmsg (td, uap)
|
2006-11-03 15:23:16 +00:00
|
|
|
struct thread *td;
|
2006-11-07 21:28:12 +00:00
|
|
|
struct sctp_generic_sendmsg_args /* {
|
|
|
|
int sd,
|
|
|
|
caddr_t msg,
|
|
|
|
int mlen,
|
|
|
|
caddr_t to,
|
|
|
|
__socklen_t tolen,
|
|
|
|
struct sctp_sndrcvinfo *sinfo,
|
|
|
|
int flags
|
|
|
|
} */ *uap;
|
2006-11-03 15:23:16 +00:00
|
|
|
{
|
|
|
|
#ifdef SCTP
|
2006-11-07 21:28:12 +00:00
|
|
|
struct sctp_sndrcvinfo sinfo, *u_sinfo = NULL;
|
2006-11-03 15:23:16 +00:00
|
|
|
struct socket *so;
|
|
|
|
struct file *fp;
|
2006-11-07 21:28:12 +00:00
|
|
|
int use_rcvinfo = 1;
|
|
|
|
int error = 0, len;
|
|
|
|
struct sockaddr *to = NULL;
|
2006-11-03 15:23:16 +00:00
|
|
|
#ifdef KTRACE
|
|
|
|
struct uio *ktruio = NULL;
|
|
|
|
#endif
|
|
|
|
struct uio auio;
|
|
|
|
struct iovec iov[1];
|
|
|
|
|
2006-11-07 21:28:12 +00:00
|
|
|
if (uap->sinfo) {
|
2006-11-03 15:23:16 +00:00
|
|
|
error = copyin(uap->sinfo, &sinfo, sizeof (sinfo));
|
|
|
|
if (error)
|
|
|
|
return (error);
|
|
|
|
u_sinfo = &sinfo;
|
|
|
|
}
|
2006-11-07 21:28:12 +00:00
|
|
|
if (uap->tolen) {
|
2006-11-03 15:23:16 +00:00
|
|
|
error = getsockaddr(&to, uap->to, uap->tolen);
|
|
|
|
if (error) {
|
|
|
|
to = NULL;
|
|
|
|
goto sctp_bad2;
|
|
|
|
}
|
|
|
|
}
|
2006-11-07 21:28:12 +00:00
|
|
|
|
2006-11-03 15:23:16 +00:00
|
|
|
error = getsock(td->td_proc->p_fd, uap->sd, &fp, NULL);
|
|
|
|
if (error)
|
|
|
|
goto sctp_bad;
|
|
|
|
|
|
|
|
iov[0].iov_base = uap->msg;
|
|
|
|
iov[0].iov_len = uap->mlen;
|
|
|
|
|
|
|
|
so = (struct socket *)fp->f_data;
|
|
|
|
#ifdef MAC
|
|
|
|
SOCK_LOCK(so);
|
|
|
|
error = mac_check_socket_send(td->td_ucred, so);
|
|
|
|
SOCK_UNLOCK(so);
|
|
|
|
if (error)
|
|
|
|
goto sctp_bad;
|
2006-11-07 21:28:12 +00:00
|
|
|
#endif /* MAC */
|
2006-11-03 15:23:16 +00:00
|
|
|
|
|
|
|
auio.uio_iov = iov;
|
|
|
|
auio.uio_iovcnt = 1;
|
|
|
|
auio.uio_segflg = UIO_USERSPACE;
|
|
|
|
auio.uio_rw = UIO_WRITE;
|
|
|
|
auio.uio_td = td;
|
|
|
|
auio.uio_offset = 0; /* XXX */
|
|
|
|
auio.uio_resid = 0;
|
|
|
|
len = auio.uio_resid = uap->mlen;
|
2006-11-07 21:28:12 +00:00
|
|
|
error = sctp_lower_sosend(so, to, &auio,
|
|
|
|
(struct mbuf *)NULL, (struct mbuf *)NULL,
|
|
|
|
uap->flags, use_rcvinfo, u_sinfo, td);
|
2006-11-03 15:23:16 +00:00
|
|
|
if (error) {
|
|
|
|
if (auio.uio_resid != len && (error == ERESTART ||
|
|
|
|
error == EINTR || error == EWOULDBLOCK))
|
|
|
|
error = 0;
|
2006-11-07 21:28:12 +00:00
|
|
|
/* Generation of SIGPIPE can be controlled per socket. */
|
2006-11-03 15:23:16 +00:00
|
|
|
if (error == EPIPE && !(so->so_options & SO_NOSIGPIPE) &&
|
|
|
|
!(uap->flags & MSG_NOSIGNAL)) {
|
|
|
|
PROC_LOCK(td->td_proc);
|
|
|
|
psignal(td->td_proc, SIGPIPE);
|
|
|
|
PROC_UNLOCK(td->td_proc);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (error == 0)
|
|
|
|
td->td_retval[0] = len - auio.uio_resid;
|
|
|
|
#ifdef KTRACE
|
|
|
|
if (ktruio != NULL) {
|
|
|
|
ktruio->uio_resid = td->td_retval[0];
|
|
|
|
ktrgenio(uap->sd, UIO_WRITE, ktruio, error);
|
|
|
|
}
|
2006-11-07 21:28:12 +00:00
|
|
|
#endif /* KTRACE */
|
|
|
|
sctp_bad:
|
2006-11-03 15:23:16 +00:00
|
|
|
fdrop(fp, td);
|
2006-11-07 21:28:12 +00:00
|
|
|
sctp_bad2:
|
2006-11-03 15:23:16 +00:00
|
|
|
if (to)
|
2006-11-07 21:28:12 +00:00
|
|
|
free(to, M_SONAME);
|
2006-11-03 15:23:16 +00:00
|
|
|
return (error);
|
2006-11-07 21:28:12 +00:00
|
|
|
#else /* SCTP */
|
2006-11-03 15:23:16 +00:00
|
|
|
return (EOPNOTSUPP);
|
2006-11-07 21:28:12 +00:00
|
|
|
#endif /* SCTP */
|
2006-11-03 15:23:16 +00:00
|
|
|
}
|
|
|
|
|
2006-11-07 21:28:12 +00:00
|
|
|
int
|
|
|
|
sctp_generic_sendmsg_iov(td, uap)
|
2006-11-03 15:23:16 +00:00
|
|
|
struct thread *td;
|
2006-11-07 21:28:12 +00:00
|
|
|
struct sctp_generic_sendmsg_iov_args /* {
|
|
|
|
int sd,
|
|
|
|
struct iovec *iov,
|
|
|
|
int iovlen,
|
|
|
|
caddr_t to,
|
|
|
|
__socklen_t tolen,
|
|
|
|
struct sctp_sndrcvinfo *sinfo,
|
|
|
|
int flags
|
|
|
|
} */ *uap;
|
2006-11-03 15:23:16 +00:00
|
|
|
{
|
|
|
|
#ifdef SCTP
|
2006-11-07 21:28:12 +00:00
|
|
|
struct sctp_sndrcvinfo sinfo, *u_sinfo = NULL;
|
2006-11-03 15:23:16 +00:00
|
|
|
struct socket *so;
|
|
|
|
struct file *fp;
|
2006-11-07 21:28:12 +00:00
|
|
|
int use_rcvinfo = 1;
|
2006-11-03 15:23:16 +00:00
|
|
|
int error=0, len, i;
|
2006-11-07 21:28:12 +00:00
|
|
|
struct sockaddr *to = NULL;
|
2006-11-03 15:23:16 +00:00
|
|
|
#ifdef KTRACE
|
|
|
|
struct uio *ktruio = NULL;
|
|
|
|
#endif
|
|
|
|
struct uio auio;
|
|
|
|
struct iovec *iov, *tiov;
|
|
|
|
|
2006-11-07 21:28:12 +00:00
|
|
|
if (uap->sinfo) {
|
2006-11-03 15:23:16 +00:00
|
|
|
error = copyin(uap->sinfo, &sinfo, sizeof (sinfo));
|
|
|
|
if (error)
|
|
|
|
return (error);
|
|
|
|
u_sinfo = &sinfo;
|
|
|
|
}
|
2006-11-07 21:28:12 +00:00
|
|
|
if (uap->tolen) {
|
2006-11-03 15:23:16 +00:00
|
|
|
error = getsockaddr(&to, uap->to, uap->tolen);
|
|
|
|
if (error) {
|
|
|
|
to = NULL;
|
|
|
|
goto sctp_bad2;
|
|
|
|
}
|
|
|
|
}
|
2006-11-07 21:28:12 +00:00
|
|
|
|
2006-11-03 15:23:16 +00:00
|
|
|
error = getsock(td->td_proc->p_fd, uap->sd, &fp, NULL);
|
|
|
|
if (error)
|
|
|
|
goto sctp_bad1;
|
|
|
|
|
|
|
|
error = copyiniov(uap->iov, uap->iovlen, &iov, EMSGSIZE);
|
|
|
|
if (error)
|
|
|
|
goto sctp_bad1;
|
|
|
|
|
|
|
|
so = (struct socket *)fp->f_data;
|
|
|
|
#ifdef MAC
|
|
|
|
SOCK_LOCK(so);
|
|
|
|
error = mac_check_socket_send(td->td_ucred, so);
|
|
|
|
SOCK_UNLOCK(so);
|
|
|
|
if (error)
|
|
|
|
goto sctp_bad;
|
2006-11-07 21:28:12 +00:00
|
|
|
#endif /* MAC */
|
2006-11-03 15:23:16 +00:00
|
|
|
|
|
|
|
auio.uio_iov = iov;
|
|
|
|
auio.uio_iovcnt = uap->iovlen;
|
|
|
|
auio.uio_segflg = UIO_USERSPACE;
|
|
|
|
auio.uio_rw = UIO_WRITE;
|
|
|
|
auio.uio_td = td;
|
|
|
|
auio.uio_offset = 0; /* XXX */
|
|
|
|
auio.uio_resid = 0;
|
|
|
|
tiov = iov;
|
|
|
|
for (i = 0; i <uap->iovlen; i++, tiov++) {
|
|
|
|
if ((auio.uio_resid += tiov->iov_len) < 0) {
|
|
|
|
error = EINVAL;
|
|
|
|
goto sctp_bad;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
len = auio.uio_resid;
|
2006-11-07 21:28:12 +00:00
|
|
|
error = sctp_lower_sosend(so, to, &auio,
|
|
|
|
(struct mbuf *)NULL, (struct mbuf *)NULL,
|
|
|
|
uap->flags, use_rcvinfo, u_sinfo, td);
|
2006-11-03 15:23:16 +00:00
|
|
|
if (error) {
|
|
|
|
if (auio.uio_resid != len && (error == ERESTART ||
|
|
|
|
error == EINTR || error == EWOULDBLOCK))
|
|
|
|
error = 0;
|
|
|
|
/* Generation of SIGPIPE can be controlled per socket */
|
|
|
|
if (error == EPIPE && !(so->so_options & SO_NOSIGPIPE) &&
|
|
|
|
!(uap->flags & MSG_NOSIGNAL)) {
|
|
|
|
PROC_LOCK(td->td_proc);
|
|
|
|
psignal(td->td_proc, SIGPIPE);
|
|
|
|
PROC_UNLOCK(td->td_proc);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (error == 0)
|
|
|
|
td->td_retval[0] = len - auio.uio_resid;
|
|
|
|
#ifdef KTRACE
|
|
|
|
if (ktruio != NULL) {
|
|
|
|
ktruio->uio_resid = td->td_retval[0];
|
|
|
|
ktrgenio(uap->sd, UIO_WRITE, ktruio, error);
|
|
|
|
}
|
2006-11-07 21:28:12 +00:00
|
|
|
#endif /* KTRACE */
|
|
|
|
sctp_bad:
|
2006-11-03 15:23:16 +00:00
|
|
|
free(iov, M_IOV);
|
2006-11-07 21:28:12 +00:00
|
|
|
sctp_bad1:
|
2006-11-03 15:23:16 +00:00
|
|
|
fdrop(fp, td);
|
2006-11-07 21:28:12 +00:00
|
|
|
sctp_bad2:
|
2006-11-03 15:23:16 +00:00
|
|
|
if (to)
|
2006-11-07 21:28:12 +00:00
|
|
|
free(to, M_SONAME);
|
2006-11-03 15:23:16 +00:00
|
|
|
return (error);
|
2006-11-07 21:28:12 +00:00
|
|
|
#else /* SCTP */
|
2006-11-03 15:23:16 +00:00
|
|
|
return (EOPNOTSUPP);
|
2006-11-07 21:28:12 +00:00
|
|
|
#endif /* SCTP */
|
2006-11-03 15:23:16 +00:00
|
|
|
}
|
|
|
|
|
2006-11-07 21:28:12 +00:00
|
|
|
int
|
|
|
|
sctp_generic_recvmsg(td, uap)
|
2006-11-03 15:23:16 +00:00
|
|
|
struct thread *td;
|
2006-11-07 21:28:12 +00:00
|
|
|
struct sctp_generic_recvmsg_args /* {
|
|
|
|
int sd,
|
|
|
|
struct iovec *iov,
|
|
|
|
int iovlen,
|
|
|
|
struct sockaddr *from,
|
|
|
|
__socklen_t *fromlenaddr,
|
|
|
|
struct sctp_sndrcvinfo *sinfo,
|
|
|
|
int *msg_flags
|
|
|
|
} */ *uap;
|
2006-11-03 15:23:16 +00:00
|
|
|
{
|
|
|
|
#ifdef SCTP
|
|
|
|
u_int8_t sockbufstore[256];
|
|
|
|
struct uio auio;
|
|
|
|
struct iovec *iov, *tiov;
|
|
|
|
struct sctp_sndrcvinfo sinfo;
|
|
|
|
struct socket *so;
|
|
|
|
struct file *fp;
|
|
|
|
struct sockaddr *fromsa;
|
|
|
|
int fromlen;
|
2007-01-24 12:59:56 +00:00
|
|
|
int len, i, msg_flags;
|
2006-11-07 21:28:12 +00:00
|
|
|
int error = 0;
|
2006-11-03 15:23:16 +00:00
|
|
|
#ifdef KTRACE
|
|
|
|
struct uio *ktruio = NULL;
|
|
|
|
#endif
|
|
|
|
error = getsock(td->td_proc->p_fd, uap->sd, &fp, NULL);
|
|
|
|
if (error) {
|
|
|
|
return (error);
|
|
|
|
}
|
|
|
|
error = copyiniov(uap->iov, uap->iovlen, &iov, EMSGSIZE);
|
|
|
|
if (error) {
|
|
|
|
goto out1;
|
|
|
|
}
|
2006-11-07 21:28:12 +00:00
|
|
|
|
2006-11-03 15:23:16 +00:00
|
|
|
so = fp->f_data;
|
|
|
|
#ifdef MAC
|
|
|
|
SOCK_LOCK(so);
|
|
|
|
error = mac_check_socket_receive(td->td_ucred, so);
|
|
|
|
SOCK_UNLOCK(so);
|
|
|
|
if (error) {
|
|
|
|
goto out;
|
|
|
|
return (error);
|
|
|
|
}
|
2006-11-07 21:28:12 +00:00
|
|
|
#endif /* MAC */
|
|
|
|
|
2006-11-03 15:23:16 +00:00
|
|
|
if (uap->fromlenaddr) {
|
|
|
|
error = copyin(uap->fromlenaddr,
|
|
|
|
&fromlen, sizeof (fromlen));
|
|
|
|
if (error) {
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
fromlen = 0;
|
|
|
|
}
|
2007-01-24 12:59:56 +00:00
|
|
|
if(uap->msg_flags) {
|
|
|
|
error = copyin(uap->msg_flags, &msg_flags, sizeof (int));
|
|
|
|
if (error) {
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
msg_flags = 0;
|
|
|
|
}
|
2006-11-03 15:23:16 +00:00
|
|
|
auio.uio_iov = iov;
|
|
|
|
auio.uio_iovcnt = uap->iovlen;
|
|
|
|
auio.uio_segflg = UIO_USERSPACE;
|
|
|
|
auio.uio_rw = UIO_READ;
|
|
|
|
auio.uio_td = td;
|
|
|
|
auio.uio_offset = 0; /* XXX */
|
|
|
|
auio.uio_resid = 0;
|
|
|
|
tiov = iov;
|
|
|
|
for (i = 0; i <uap->iovlen; i++, tiov++) {
|
|
|
|
if ((auio.uio_resid += tiov->iov_len) < 0) {
|
|
|
|
error = EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
len = auio.uio_resid;
|
|
|
|
fromsa = (struct sockaddr *)sockbufstore;
|
2007-01-24 12:59:56 +00:00
|
|
|
|
2006-11-03 15:23:16 +00:00
|
|
|
#ifdef KTRACE
|
|
|
|
if (KTRPOINT(td, KTR_GENIO))
|
|
|
|
ktruio = cloneuio(&auio);
|
2006-11-07 21:28:12 +00:00
|
|
|
#endif /* KTRACE */
|
2006-11-03 15:23:16 +00:00
|
|
|
error = sctp_sorecvmsg(so, &auio, (struct mbuf **)NULL,
|
2006-11-07 21:28:12 +00:00
|
|
|
fromsa, fromlen, &msg_flags,
|
|
|
|
(struct sctp_sndrcvinfo *)&sinfo, 1);
|
2006-11-03 15:23:16 +00:00
|
|
|
if (error) {
|
|
|
|
if (auio.uio_resid != (int)len && (error == ERESTART ||
|
|
|
|
error == EINTR || error == EWOULDBLOCK))
|
|
|
|
error = 0;
|
|
|
|
} else {
|
2006-11-07 21:28:12 +00:00
|
|
|
if (uap->sinfo)
|
2006-11-03 15:23:16 +00:00
|
|
|
error = copyout(&sinfo, uap->sinfo, sizeof (sinfo));
|
|
|
|
}
|
|
|
|
#ifdef KTRACE
|
|
|
|
if (ktruio != NULL) {
|
|
|
|
ktruio->uio_resid = (int)len - auio.uio_resid;
|
|
|
|
ktrgenio(uap->sd, UIO_READ, ktruio, error);
|
|
|
|
}
|
2006-11-07 21:28:12 +00:00
|
|
|
#endif /* KTRACE */
|
2006-11-03 15:23:16 +00:00
|
|
|
if (error)
|
|
|
|
goto out;
|
|
|
|
td->td_retval[0] = (int)len - auio.uio_resid;
|
2006-11-07 21:28:12 +00:00
|
|
|
|
2006-11-03 15:23:16 +00:00
|
|
|
if (fromlen && uap->from) {
|
|
|
|
len = fromlen;
|
|
|
|
if (len <= 0 || fromsa == 0)
|
|
|
|
len = 0;
|
|
|
|
else {
|
|
|
|
len = MIN(len, fromsa->sa_len);
|
|
|
|
error = copyout(fromsa, uap->from, (unsigned)len);
|
|
|
|
if (error)
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
error = copyout(&len, uap->fromlenaddr, sizeof (socklen_t));
|
2006-11-07 21:28:12 +00:00
|
|
|
if (error) {
|
2006-11-03 15:23:16 +00:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (uap->msg_flags) {
|
|
|
|
error = copyout(&msg_flags, uap->msg_flags, sizeof (int));
|
2006-11-07 21:28:12 +00:00
|
|
|
if (error) {
|
2006-11-03 15:23:16 +00:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
out:
|
|
|
|
free(iov, M_IOV);
|
|
|
|
out1:
|
|
|
|
fdrop(fp, td);
|
|
|
|
return (error);
|
2006-11-07 21:28:12 +00:00
|
|
|
#else /* SCTP */
|
2006-11-03 15:23:16 +00:00
|
|
|
return (EOPNOTSUPP);
|
2006-11-07 21:28:12 +00:00
|
|
|
#endif /* SCTP */
|
2006-11-03 15:23:16 +00:00
|
|
|
}
|