2005-01-06 23:35:40 +00:00
|
|
|
/*-
|
2017-11-20 19:43:44 +00:00
|
|
|
* SPDX-License-Identifier: BSD-3-Clause
|
|
|
|
*
|
1994-05-24 10:09:53 +00:00
|
|
|
* Copyright (c) 1982, 1986, 1990, 1993
|
|
|
|
* The Regents of the University of California. All rights reserved.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
2016-09-15 13:16:20 +00:00
|
|
|
* 3. Neither the name of the University nor the names of its contributors
|
1994-05-24 10:09:53 +00:00
|
|
|
* may be used to endorse or promote products derived from this software
|
|
|
|
* without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
|
|
|
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
|
|
|
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
|
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
|
|
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
|
|
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
|
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
|
|
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
|
|
|
* SUCH DAMAGE.
|
|
|
|
*
|
|
|
|
* @(#)sys_socket.c 8.1 (Berkeley) 6/10/93
|
|
|
|
*/
|
|
|
|
|
2003-06-11 00:56:59 +00:00
|
|
|
#include <sys/cdefs.h>
|
|
|
|
__FBSDID("$FreeBSD$");
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
#include <sys/param.h>
|
|
|
|
#include <sys/systm.h>
|
2016-03-01 18:12:14 +00:00
|
|
|
#include <sys/aio.h>
|
2014-09-22 16:20:47 +00:00
|
|
|
#include <sys/domain.h>
|
1994-05-24 10:09:53 +00:00
|
|
|
#include <sys/file.h>
|
2003-01-01 01:56:19 +00:00
|
|
|
#include <sys/filedesc.h>
|
2016-03-01 18:12:14 +00:00
|
|
|
#include <sys/kernel.h>
|
|
|
|
#include <sys/kthread.h>
|
2014-09-22 16:20:47 +00:00
|
|
|
#include <sys/malloc.h>
|
The SO_NOSIGPIPE socket option allows a user process to mark a socket
so that the socket does not generate SIGPIPE, only EPIPE, when a write
is attempted after socket shutdown. When the option was introduced in
2002, this required the logic for determining whether SIGPIPE was
generated to be pushed down from dofilewrite() to the socket layer so
that the socket options could be considered. However, the change in
2002 omitted modification to soo_write() required to add that logic,
resulting in SIGPIPE not being generated even without SO_NOSIGPIPE when
the socket was written to using write() or related generic system calls.
This change adds the EPIPE logic to soo_write(), generating a SIGPIPE
signal to the process associated with the passed uio in the event that
the SO_NOSIGPIPE option is not set.
Notes:
- The are upsides and downsides to placing this logic in the socket
layer as opposed to the file descriptor layer. This is really fd
layer logic, but because we need so_options, we have a choice of
layering violations and pick this one.
- SIGPIPE possibly should be delivered to the thread performing the
write, not the process performing the write.
- uio->uio_td and the td argument to soo_write() might potentially
differ; we use the thread in the uio argument.
- The "sigpipe" regression test in src/tools/regression/sockets/sigpipe
tests for the bug.
Submitted by: Mikko Tyolajarvi <mbsd at pacbell dot net>
Talked with: glebius, alfred
PR: 78478
MFC after: 1 week
2005-03-11 15:06:16 +00:00
|
|
|
#include <sys/proc.h>
|
1994-05-24 10:09:53 +00:00
|
|
|
#include <sys/protosw.h>
|
2003-01-01 01:56:19 +00:00
|
|
|
#include <sys/sigio.h>
|
The SO_NOSIGPIPE socket option allows a user process to mark a socket
so that the socket does not generate SIGPIPE, only EPIPE, when a write
is attempted after socket shutdown. When the option was introduced in
2002, this required the logic for determining whether SIGPIPE was
generated to be pushed down from dofilewrite() to the socket layer so
that the socket options could be considered. However, the change in
2002 omitted modification to soo_write() required to add that logic,
resulting in SIGPIPE not being generated even without SO_NOSIGPIPE when
the socket was written to using write() or related generic system calls.
This change adds the EPIPE logic to soo_write(), generating a SIGPIPE
signal to the process associated with the passed uio in the event that
the SO_NOSIGPIPE option is not set.
Notes:
- The are upsides and downsides to placing this logic in the socket
layer as opposed to the file descriptor layer. This is really fd
layer logic, but because we need so_options, we have a choice of
layering violations and pick this one.
- SIGPIPE possibly should be delivered to the thread performing the
write, not the process performing the write.
- uio->uio_td and the td argument to soo_write() might potentially
differ; we use the thread in the uio argument.
- The "sigpipe" regression test in src/tools/regression/sockets/sigpipe
tests for the bug.
Submitted by: Mikko Tyolajarvi <mbsd at pacbell dot net>
Talked with: glebius, alfred
PR: 78478
MFC after: 1 week
2005-03-11 15:06:16 +00:00
|
|
|
#include <sys/signal.h>
|
|
|
|
#include <sys/signalvar.h>
|
1994-05-24 10:09:53 +00:00
|
|
|
#include <sys/socket.h>
|
|
|
|
#include <sys/socketvar.h>
|
1997-03-24 11:52:29 +00:00
|
|
|
#include <sys/filio.h> /* XXX */
|
|
|
|
#include <sys/sockio.h>
|
1994-05-24 10:09:53 +00:00
|
|
|
#include <sys/stat.h>
|
2016-03-01 18:12:14 +00:00
|
|
|
#include <sys/sysctl.h>
|
|
|
|
#include <sys/sysproto.h>
|
|
|
|
#include <sys/taskqueue.h>
|
1998-03-28 10:33:27 +00:00
|
|
|
#include <sys/uio.h>
|
2000-05-11 22:08:57 +00:00
|
|
|
#include <sys/ucred.h>
|
2014-09-22 16:20:47 +00:00
|
|
|
#include <sys/un.h>
|
|
|
|
#include <sys/unpcb.h>
|
|
|
|
#include <sys/user.h>
|
1994-05-24 10:09:53 +00:00
|
|
|
|
|
|
|
#include <net/if.h>
|
2013-10-26 17:58:36 +00:00
|
|
|
#include <net/if_var.h>
|
1994-05-24 10:09:53 +00:00
|
|
|
#include <net/route.h>
|
2009-08-01 19:26:27 +00:00
|
|
|
#include <net/vnet.h>
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2014-09-22 16:20:47 +00:00
|
|
|
#include <netinet/in.h>
|
|
|
|
#include <netinet/in_pcb.h>
|
|
|
|
|
2006-10-22 11:52:19 +00:00
|
|
|
#include <security/mac/mac_framework.h>
|
|
|
|
|
2016-03-01 18:12:14 +00:00
|
|
|
#include <vm/vm.h>
|
|
|
|
#include <vm/pmap.h>
|
|
|
|
#include <vm/vm_extern.h>
|
|
|
|
#include <vm/vm_map.h>
|
|
|
|
|
2020-02-26 14:26:36 +00:00
|
|
|
static SYSCTL_NODE(_kern_ipc, OID_AUTO, aio, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
|
2016-03-01 18:12:14 +00:00
|
|
|
"socket AIO stats");
|
|
|
|
|
|
|
|
static int empty_results;
|
|
|
|
SYSCTL_INT(_kern_ipc_aio, OID_AUTO, empty_results, CTLFLAG_RD, &empty_results,
|
|
|
|
0, "socket operation returned EAGAIN");
|
|
|
|
|
|
|
|
static int empty_retries;
|
|
|
|
SYSCTL_INT(_kern_ipc_aio, OID_AUTO, empty_retries, CTLFLAG_RD, &empty_retries,
|
|
|
|
0, "socket operation retries");
|
|
|
|
|
2014-08-26 14:44:08 +00:00
|
|
|
static fo_rdwr_t soo_read;
|
|
|
|
static fo_rdwr_t soo_write;
|
|
|
|
static fo_ioctl_t soo_ioctl;
|
|
|
|
static fo_poll_t soo_poll;
|
|
|
|
extern fo_kqfilter_t soo_kqfilter;
|
|
|
|
static fo_stat_t soo_stat;
|
|
|
|
static fo_close_t soo_close;
|
2014-09-22 16:20:47 +00:00
|
|
|
static fo_fill_kinfo_t soo_fill_kinfo;
|
2016-03-01 18:12:14 +00:00
|
|
|
static fo_aio_queue_t soo_aio_queue;
|
|
|
|
|
|
|
|
static void soo_aio_cancel(struct kaiocb *job);
|
2014-08-26 14:44:08 +00:00
|
|
|
|
2007-03-04 17:50:46 +00:00
|
|
|
struct fileops socketops = {
|
2003-06-18 18:16:40 +00:00
|
|
|
.fo_read = soo_read,
|
|
|
|
.fo_write = soo_write,
|
2014-08-26 14:44:08 +00:00
|
|
|
.fo_truncate = invfo_truncate,
|
2003-06-18 18:16:40 +00:00
|
|
|
.fo_ioctl = soo_ioctl,
|
|
|
|
.fo_poll = soo_poll,
|
|
|
|
.fo_kqfilter = soo_kqfilter,
|
|
|
|
.fo_stat = soo_stat,
|
|
|
|
.fo_close = soo_close,
|
2011-08-16 20:07:47 +00:00
|
|
|
.fo_chmod = invfo_chmod,
|
|
|
|
.fo_chown = invfo_chown,
|
2013-08-15 07:54:31 +00:00
|
|
|
.fo_sendfile = invfo_sendfile,
|
2014-09-22 16:20:47 +00:00
|
|
|
.fo_fill_kinfo = soo_fill_kinfo,
|
2016-03-01 18:12:14 +00:00
|
|
|
.fo_aio_queue = soo_aio_queue,
|
2003-06-18 18:16:40 +00:00
|
|
|
.fo_flags = DFLAG_PASSABLE
|
2001-02-15 16:34:11 +00:00
|
|
|
};
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2014-08-26 14:44:08 +00:00
|
|
|
static int
|
2007-03-04 17:50:46 +00:00
|
|
|
soo_read(struct file *fp, struct uio *uio, struct ucred *active_cred,
|
|
|
|
int flags, struct thread *td)
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2003-01-13 00:33:17 +00:00
|
|
|
struct socket *so = fp->f_data;
|
2002-03-15 08:03:46 +00:00
|
|
|
int error;
|
|
|
|
|
Change the curvnet variable from a global const struct vnet *,
previously always pointing to the default vnet context, to a
dynamically changing thread-local one. The currvnet context
should be set on entry to networking code via CURVNET_SET() macros,
and reverted to previous state via CURVNET_RESTORE(). Recursions
on curvnet are permitted, though strongly discuouraged.
This change should have no functional impact on nooptions VIMAGE
kernel builds, where CURVNET_* macros expand to whitespace.
The curthread->td_vnet (aka curvnet) variable's purpose is to be an
indicator of the vnet context in which the current network-related
operation takes place, in case we cannot deduce the current vnet
context from any other source, such as by looking at mbuf's
m->m_pkthdr.rcvif->if_vnet, sockets's so->so_vnet etc. Moreover, so
far curvnet has turned out to be an invaluable consistency checking
aid: it helps to catch cases when sockets, ifnets or any other
vnet-aware structures may have leaked from one vnet to another.
The exact placement of the CURVNET_SET() / CURVNET_RESTORE() macros
was a result of an empirical iterative process, whith an aim to
reduce recursions on CURVNET_SET() to a minimum, while still reducing
the scope of CURVNET_SET() to networking only operations - the
alternative would be calling CURVNET_SET() on each system call entry.
In general, curvnet has to be set in three typicall cases: when
processing socket-related requests from userspace or from within the
kernel; when processing inbound traffic flowing from device drivers
to upper layers of the networking stack, and when executing
timer-driven networking functions.
This change also introduces a DDB subcommand to show the list of all
vnet instances.
Approved by: julian (mentor)
2009-05-05 10:56:12 +00:00
|
|
|
#ifdef MAC
|
2007-10-24 19:04:04 +00:00
|
|
|
error = mac_socket_check_receive(active_cred, so);
|
2007-08-06 14:26:03 +00:00
|
|
|
if (error)
|
2002-10-06 14:39:15 +00:00
|
|
|
return (error);
|
|
|
|
#endif
|
Change the curvnet variable from a global const struct vnet *,
previously always pointing to the default vnet context, to a
dynamically changing thread-local one. The currvnet context
should be set on entry to networking code via CURVNET_SET() macros,
and reverted to previous state via CURVNET_RESTORE(). Recursions
on curvnet are permitted, though strongly discuouraged.
This change should have no functional impact on nooptions VIMAGE
kernel builds, where CURVNET_* macros expand to whitespace.
The curthread->td_vnet (aka curvnet) variable's purpose is to be an
indicator of the vnet context in which the current network-related
operation takes place, in case we cannot deduce the current vnet
context from any other source, such as by looking at mbuf's
m->m_pkthdr.rcvif->if_vnet, sockets's so->so_vnet etc. Moreover, so
far curvnet has turned out to be an invaluable consistency checking
aid: it helps to catch cases when sockets, ifnets or any other
vnet-aware structures may have leaked from one vnet to another.
The exact placement of the CURVNET_SET() / CURVNET_RESTORE() macros
was a result of an empirical iterative process, whith an aim to
reduce recursions on CURVNET_SET() to a minimum, while still reducing
the scope of CURVNET_SET() to networking only operations - the
alternative would be calling CURVNET_SET() on each system call entry.
In general, curvnet has to be set in three typicall cases: when
processing socket-related requests from userspace or from within the
kernel; when processing inbound traffic flowing from device drivers
to upper layers of the networking stack, and when executing
timer-driven networking functions.
This change also introduces a DDB subcommand to show the list of all
vnet instances.
Approved by: julian (mentor)
2009-05-05 10:56:12 +00:00
|
|
|
error = soreceive(so, 0, uio, 0, 0, 0);
|
|
|
|
return (error);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
2014-08-26 14:44:08 +00:00
|
|
|
static int
|
2007-03-04 17:50:46 +00:00
|
|
|
soo_write(struct file *fp, struct uio *uio, struct ucred *active_cred,
|
|
|
|
int flags, struct thread *td)
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2003-01-13 00:33:17 +00:00
|
|
|
struct socket *so = fp->f_data;
|
2002-03-15 08:03:46 +00:00
|
|
|
int error;
|
|
|
|
|
2002-10-06 14:39:15 +00:00
|
|
|
#ifdef MAC
|
2007-10-24 19:04:04 +00:00
|
|
|
error = mac_socket_check_send(active_cred, so);
|
2007-08-06 14:26:03 +00:00
|
|
|
if (error)
|
2002-10-06 14:39:15 +00:00
|
|
|
return (error);
|
|
|
|
#endif
|
soreceive_generic(), and sopoll_generic(). Add new functions sosend(),
soreceive(), and sopoll(), which are wrappers for pru_sosend,
pru_soreceive, and pru_sopoll, and are now used univerally by socket
consumers rather than either directly invoking the old so*() functions
or directly invoking the protocol switch method (about an even split
prior to this commit).
This completes an architectural change that was begun in 1996 to permit
protocols to provide substitute implementations, as now used by UDP.
Consumers now uniformly invoke sosend(), soreceive(), and sopoll() to
perform these operations on sockets -- in particular, distributed file
systems and socket system calls.
Architectural head nod: sam, gnn, wollman
2006-07-24 15:20:08 +00:00
|
|
|
error = sosend(so, 0, uio, 0, 0, 0, uio->uio_td);
|
The SO_NOSIGPIPE socket option allows a user process to mark a socket
so that the socket does not generate SIGPIPE, only EPIPE, when a write
is attempted after socket shutdown. When the option was introduced in
2002, this required the logic for determining whether SIGPIPE was
generated to be pushed down from dofilewrite() to the socket layer so
that the socket options could be considered. However, the change in
2002 omitted modification to soo_write() required to add that logic,
resulting in SIGPIPE not being generated even without SO_NOSIGPIPE when
the socket was written to using write() or related generic system calls.
This change adds the EPIPE logic to soo_write(), generating a SIGPIPE
signal to the process associated with the passed uio in the event that
the SO_NOSIGPIPE option is not set.
Notes:
- The are upsides and downsides to placing this logic in the socket
layer as opposed to the file descriptor layer. This is really fd
layer logic, but because we need so_options, we have a choice of
layering violations and pick this one.
- SIGPIPE possibly should be delivered to the thread performing the
write, not the process performing the write.
- uio->uio_td and the td argument to soo_write() might potentially
differ; we use the thread in the uio argument.
- The "sigpipe" regression test in src/tools/regression/sockets/sigpipe
tests for the bug.
Submitted by: Mikko Tyolajarvi <mbsd at pacbell dot net>
Talked with: glebius, alfred
PR: 78478
MFC after: 1 week
2005-03-11 15:06:16 +00:00
|
|
|
if (error == EPIPE && (so->so_options & SO_NOSIGPIPE) == 0) {
|
|
|
|
PROC_LOCK(uio->uio_td->td_proc);
|
2010-06-29 20:44:19 +00:00
|
|
|
tdsignal(uio->uio_td, SIGPIPE);
|
The SO_NOSIGPIPE socket option allows a user process to mark a socket
so that the socket does not generate SIGPIPE, only EPIPE, when a write
is attempted after socket shutdown. When the option was introduced in
2002, this required the logic for determining whether SIGPIPE was
generated to be pushed down from dofilewrite() to the socket layer so
that the socket options could be considered. However, the change in
2002 omitted modification to soo_write() required to add that logic,
resulting in SIGPIPE not being generated even without SO_NOSIGPIPE when
the socket was written to using write() or related generic system calls.
This change adds the EPIPE logic to soo_write(), generating a SIGPIPE
signal to the process associated with the passed uio in the event that
the SO_NOSIGPIPE option is not set.
Notes:
- The are upsides and downsides to placing this logic in the socket
layer as opposed to the file descriptor layer. This is really fd
layer logic, but because we need so_options, we have a choice of
layering violations and pick this one.
- SIGPIPE possibly should be delivered to the thread performing the
write, not the process performing the write.
- uio->uio_td and the td argument to soo_write() might potentially
differ; we use the thread in the uio argument.
- The "sigpipe" regression test in src/tools/regression/sockets/sigpipe
tests for the bug.
Submitted by: Mikko Tyolajarvi <mbsd at pacbell dot net>
Talked with: glebius, alfred
PR: 78478
MFC after: 1 week
2005-03-11 15:06:16 +00:00
|
|
|
PROC_UNLOCK(uio->uio_td->td_proc);
|
|
|
|
}
|
2002-03-15 08:03:46 +00:00
|
|
|
return (error);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
2014-08-26 14:44:08 +00:00
|
|
|
static int
|
2007-03-04 17:50:46 +00:00
|
|
|
soo_ioctl(struct file *fp, u_long cmd, void *data, struct ucred *active_cred,
|
|
|
|
struct thread *td)
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2004-11-13 17:21:26 +00:00
|
|
|
struct socket *so = fp->f_data;
|
|
|
|
int error = 0;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
|
|
|
switch (cmd) {
|
|
|
|
case FIONBIO:
|
2004-06-17 22:48:11 +00:00
|
|
|
SOCK_LOCK(so);
|
1994-05-24 10:09:53 +00:00
|
|
|
if (*(int *)data)
|
|
|
|
so->so_state |= SS_NBIO;
|
|
|
|
else
|
|
|
|
so->so_state &= ~SS_NBIO;
|
2004-06-17 22:48:11 +00:00
|
|
|
SOCK_UNLOCK(so);
|
2004-11-13 17:21:26 +00:00
|
|
|
break;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
|
|
|
case FIOASYNC:
|
|
|
|
if (*(int *)data) {
|
2004-06-17 22:48:11 +00:00
|
|
|
SOCK_LOCK(so);
|
1994-05-24 10:09:53 +00:00
|
|
|
so->so_state |= SS_ASYNC;
|
Listening sockets improvements.
o Separate fields of struct socket that belong to listening from
fields that belong to normal dataflow, and unionize them. This
shrinks the structure a bit.
- Take out selinfo's from the socket buffers into the socket. The
first reason is to support braindamaged scenario when a socket is
added to kevent(2) and then listen(2) is cast on it. The second
reason is that there is future plan to make socket buffers pluggable,
so that for a dataflow socket a socket buffer can be changed, and
in this case we also want to keep same selinfos through the lifetime
of a socket.
- Remove struct struct so_accf. Since now listening stuff no longer
affects struct socket size, just move its fields into listening part
of the union.
- Provide sol_upcall field and enforce that so_upcall_set() may be called
only on a dataflow socket, which has buffers, and for listening sockets
provide solisten_upcall_set().
o Remove ACCEPT_LOCK() global.
- Add a mutex to socket, to be used instead of socket buffer lock to lock
fields of struct socket that don't belong to a socket buffer.
- Allow to acquire two socket locks, but the first one must belong to a
listening socket.
- Make soref()/sorele() to use atomic(9). This allows in some situations
to do soref() without owning socket lock. There is place for improvement
here, it is possible to make sorele() also to lock optionally.
- Most protocols aren't touched by this change, except UNIX local sockets.
See below for more information.
o Reduce copy-and-paste in kernel modules that accept connections from
listening sockets: provide function solisten_dequeue(), and use it in
the following modules: ctl(4), iscsi(4), ng_btsocket(4), ng_ksocket(4),
infiniband, rpc.
o UNIX local sockets.
- Removal of ACCEPT_LOCK() global uncovered several races in the UNIX
local sockets. Most races exist around spawning a new socket, when we
are connecting to a local listening socket. To cover them, we need to
hold locks on both PCBs when spawning a third one. This means holding
them across sonewconn(). This creates a LOR between pcb locks and
unp_list_lock.
- To fix the new LOR, abandon the global unp_list_lock in favor of global
unp_link_lock. Indeed, separating these two locks didn't provide us any
extra parralelism in the UNIX sockets.
- Now call into uipc_attach() may happen with unp_link_lock hold if, we
are accepting, or without unp_link_lock in case if we are just creating
a socket.
- Another problem in UNIX sockets is that uipc_close() basicly did nothing
for a listening socket. The vnode remained opened for connections. This
is fixed by removing vnode in uipc_close(). Maybe the right way would be
to do it for all sockets (not only listening), simply move the vnode
teardown from uipc_detach() to uipc_close()?
Sponsored by: Netflix
Differential Revision: https://reviews.freebsd.org/D9770
2017-06-08 21:30:34 +00:00
|
|
|
if (SOLISTENING(so)) {
|
|
|
|
so->sol_sbrcv_flags |= SB_ASYNC;
|
|
|
|
so->sol_sbsnd_flags |= SB_ASYNC;
|
|
|
|
} else {
|
2022-05-12 13:22:12 -07:00
|
|
|
SOCK_RECVBUF_LOCK(so);
|
Listening sockets improvements.
o Separate fields of struct socket that belong to listening from
fields that belong to normal dataflow, and unionize them. This
shrinks the structure a bit.
- Take out selinfo's from the socket buffers into the socket. The
first reason is to support braindamaged scenario when a socket is
added to kevent(2) and then listen(2) is cast on it. The second
reason is that there is future plan to make socket buffers pluggable,
so that for a dataflow socket a socket buffer can be changed, and
in this case we also want to keep same selinfos through the lifetime
of a socket.
- Remove struct struct so_accf. Since now listening stuff no longer
affects struct socket size, just move its fields into listening part
of the union.
- Provide sol_upcall field and enforce that so_upcall_set() may be called
only on a dataflow socket, which has buffers, and for listening sockets
provide solisten_upcall_set().
o Remove ACCEPT_LOCK() global.
- Add a mutex to socket, to be used instead of socket buffer lock to lock
fields of struct socket that don't belong to a socket buffer.
- Allow to acquire two socket locks, but the first one must belong to a
listening socket.
- Make soref()/sorele() to use atomic(9). This allows in some situations
to do soref() without owning socket lock. There is place for improvement
here, it is possible to make sorele() also to lock optionally.
- Most protocols aren't touched by this change, except UNIX local sockets.
See below for more information.
o Reduce copy-and-paste in kernel modules that accept connections from
listening sockets: provide function solisten_dequeue(), and use it in
the following modules: ctl(4), iscsi(4), ng_btsocket(4), ng_ksocket(4),
infiniband, rpc.
o UNIX local sockets.
- Removal of ACCEPT_LOCK() global uncovered several races in the UNIX
local sockets. Most races exist around spawning a new socket, when we
are connecting to a local listening socket. To cover them, we need to
hold locks on both PCBs when spawning a third one. This means holding
them across sonewconn(). This creates a LOR between pcb locks and
unp_list_lock.
- To fix the new LOR, abandon the global unp_list_lock in favor of global
unp_link_lock. Indeed, separating these two locks didn't provide us any
extra parralelism in the UNIX sockets.
- Now call into uipc_attach() may happen with unp_link_lock hold if, we
are accepting, or without unp_link_lock in case if we are just creating
a socket.
- Another problem in UNIX sockets is that uipc_close() basicly did nothing
for a listening socket. The vnode remained opened for connections. This
is fixed by removing vnode in uipc_close(). Maybe the right way would be
to do it for all sockets (not only listening), simply move the vnode
teardown from uipc_detach() to uipc_close()?
Sponsored by: Netflix
Differential Revision: https://reviews.freebsd.org/D9770
2017-06-08 21:30:34 +00:00
|
|
|
so->so_rcv.sb_flags |= SB_ASYNC;
|
2022-05-12 13:22:12 -07:00
|
|
|
SOCK_RECVBUF_UNLOCK(so);
|
|
|
|
SOCK_SENDBUF_LOCK(so);
|
Listening sockets improvements.
o Separate fields of struct socket that belong to listening from
fields that belong to normal dataflow, and unionize them. This
shrinks the structure a bit.
- Take out selinfo's from the socket buffers into the socket. The
first reason is to support braindamaged scenario when a socket is
added to kevent(2) and then listen(2) is cast on it. The second
reason is that there is future plan to make socket buffers pluggable,
so that for a dataflow socket a socket buffer can be changed, and
in this case we also want to keep same selinfos through the lifetime
of a socket.
- Remove struct struct so_accf. Since now listening stuff no longer
affects struct socket size, just move its fields into listening part
of the union.
- Provide sol_upcall field and enforce that so_upcall_set() may be called
only on a dataflow socket, which has buffers, and for listening sockets
provide solisten_upcall_set().
o Remove ACCEPT_LOCK() global.
- Add a mutex to socket, to be used instead of socket buffer lock to lock
fields of struct socket that don't belong to a socket buffer.
- Allow to acquire two socket locks, but the first one must belong to a
listening socket.
- Make soref()/sorele() to use atomic(9). This allows in some situations
to do soref() without owning socket lock. There is place for improvement
here, it is possible to make sorele() also to lock optionally.
- Most protocols aren't touched by this change, except UNIX local sockets.
See below for more information.
o Reduce copy-and-paste in kernel modules that accept connections from
listening sockets: provide function solisten_dequeue(), and use it in
the following modules: ctl(4), iscsi(4), ng_btsocket(4), ng_ksocket(4),
infiniband, rpc.
o UNIX local sockets.
- Removal of ACCEPT_LOCK() global uncovered several races in the UNIX
local sockets. Most races exist around spawning a new socket, when we
are connecting to a local listening socket. To cover them, we need to
hold locks on both PCBs when spawning a third one. This means holding
them across sonewconn(). This creates a LOR between pcb locks and
unp_list_lock.
- To fix the new LOR, abandon the global unp_list_lock in favor of global
unp_link_lock. Indeed, separating these two locks didn't provide us any
extra parralelism in the UNIX sockets.
- Now call into uipc_attach() may happen with unp_link_lock hold if, we
are accepting, or without unp_link_lock in case if we are just creating
a socket.
- Another problem in UNIX sockets is that uipc_close() basicly did nothing
for a listening socket. The vnode remained opened for connections. This
is fixed by removing vnode in uipc_close(). Maybe the right way would be
to do it for all sockets (not only listening), simply move the vnode
teardown from uipc_detach() to uipc_close()?
Sponsored by: Netflix
Differential Revision: https://reviews.freebsd.org/D9770
2017-06-08 21:30:34 +00:00
|
|
|
so->so_snd.sb_flags |= SB_ASYNC;
|
2022-05-12 13:22:12 -07:00
|
|
|
SOCK_SENDBUF_UNLOCK(so);
|
Listening sockets improvements.
o Separate fields of struct socket that belong to listening from
fields that belong to normal dataflow, and unionize them. This
shrinks the structure a bit.
- Take out selinfo's from the socket buffers into the socket. The
first reason is to support braindamaged scenario when a socket is
added to kevent(2) and then listen(2) is cast on it. The second
reason is that there is future plan to make socket buffers pluggable,
so that for a dataflow socket a socket buffer can be changed, and
in this case we also want to keep same selinfos through the lifetime
of a socket.
- Remove struct struct so_accf. Since now listening stuff no longer
affects struct socket size, just move its fields into listening part
of the union.
- Provide sol_upcall field and enforce that so_upcall_set() may be called
only on a dataflow socket, which has buffers, and for listening sockets
provide solisten_upcall_set().
o Remove ACCEPT_LOCK() global.
- Add a mutex to socket, to be used instead of socket buffer lock to lock
fields of struct socket that don't belong to a socket buffer.
- Allow to acquire two socket locks, but the first one must belong to a
listening socket.
- Make soref()/sorele() to use atomic(9). This allows in some situations
to do soref() without owning socket lock. There is place for improvement
here, it is possible to make sorele() also to lock optionally.
- Most protocols aren't touched by this change, except UNIX local sockets.
See below for more information.
o Reduce copy-and-paste in kernel modules that accept connections from
listening sockets: provide function solisten_dequeue(), and use it in
the following modules: ctl(4), iscsi(4), ng_btsocket(4), ng_ksocket(4),
infiniband, rpc.
o UNIX local sockets.
- Removal of ACCEPT_LOCK() global uncovered several races in the UNIX
local sockets. Most races exist around spawning a new socket, when we
are connecting to a local listening socket. To cover them, we need to
hold locks on both PCBs when spawning a third one. This means holding
them across sonewconn(). This creates a LOR between pcb locks and
unp_list_lock.
- To fix the new LOR, abandon the global unp_list_lock in favor of global
unp_link_lock. Indeed, separating these two locks didn't provide us any
extra parralelism in the UNIX sockets.
- Now call into uipc_attach() may happen with unp_link_lock hold if, we
are accepting, or without unp_link_lock in case if we are just creating
a socket.
- Another problem in UNIX sockets is that uipc_close() basicly did nothing
for a listening socket. The vnode remained opened for connections. This
is fixed by removing vnode in uipc_close(). Maybe the right way would be
to do it for all sockets (not only listening), simply move the vnode
teardown from uipc_detach() to uipc_close()?
Sponsored by: Netflix
Differential Revision: https://reviews.freebsd.org/D9770
2017-06-08 21:30:34 +00:00
|
|
|
}
|
2004-06-17 22:48:11 +00:00
|
|
|
SOCK_UNLOCK(so);
|
1994-05-24 10:09:53 +00:00
|
|
|
} else {
|
2004-06-17 22:48:11 +00:00
|
|
|
SOCK_LOCK(so);
|
1994-05-24 10:09:53 +00:00
|
|
|
so->so_state &= ~SS_ASYNC;
|
Listening sockets improvements.
o Separate fields of struct socket that belong to listening from
fields that belong to normal dataflow, and unionize them. This
shrinks the structure a bit.
- Take out selinfo's from the socket buffers into the socket. The
first reason is to support braindamaged scenario when a socket is
added to kevent(2) and then listen(2) is cast on it. The second
reason is that there is future plan to make socket buffers pluggable,
so that for a dataflow socket a socket buffer can be changed, and
in this case we also want to keep same selinfos through the lifetime
of a socket.
- Remove struct struct so_accf. Since now listening stuff no longer
affects struct socket size, just move its fields into listening part
of the union.
- Provide sol_upcall field and enforce that so_upcall_set() may be called
only on a dataflow socket, which has buffers, and for listening sockets
provide solisten_upcall_set().
o Remove ACCEPT_LOCK() global.
- Add a mutex to socket, to be used instead of socket buffer lock to lock
fields of struct socket that don't belong to a socket buffer.
- Allow to acquire two socket locks, but the first one must belong to a
listening socket.
- Make soref()/sorele() to use atomic(9). This allows in some situations
to do soref() without owning socket lock. There is place for improvement
here, it is possible to make sorele() also to lock optionally.
- Most protocols aren't touched by this change, except UNIX local sockets.
See below for more information.
o Reduce copy-and-paste in kernel modules that accept connections from
listening sockets: provide function solisten_dequeue(), and use it in
the following modules: ctl(4), iscsi(4), ng_btsocket(4), ng_ksocket(4),
infiniband, rpc.
o UNIX local sockets.
- Removal of ACCEPT_LOCK() global uncovered several races in the UNIX
local sockets. Most races exist around spawning a new socket, when we
are connecting to a local listening socket. To cover them, we need to
hold locks on both PCBs when spawning a third one. This means holding
them across sonewconn(). This creates a LOR between pcb locks and
unp_list_lock.
- To fix the new LOR, abandon the global unp_list_lock in favor of global
unp_link_lock. Indeed, separating these two locks didn't provide us any
extra parralelism in the UNIX sockets.
- Now call into uipc_attach() may happen with unp_link_lock hold if, we
are accepting, or without unp_link_lock in case if we are just creating
a socket.
- Another problem in UNIX sockets is that uipc_close() basicly did nothing
for a listening socket. The vnode remained opened for connections. This
is fixed by removing vnode in uipc_close(). Maybe the right way would be
to do it for all sockets (not only listening), simply move the vnode
teardown from uipc_detach() to uipc_close()?
Sponsored by: Netflix
Differential Revision: https://reviews.freebsd.org/D9770
2017-06-08 21:30:34 +00:00
|
|
|
if (SOLISTENING(so)) {
|
|
|
|
so->sol_sbrcv_flags &= ~SB_ASYNC;
|
|
|
|
so->sol_sbsnd_flags &= ~SB_ASYNC;
|
|
|
|
} else {
|
2022-05-12 13:22:12 -07:00
|
|
|
SOCK_RECVBUF_LOCK(so);
|
Listening sockets improvements.
o Separate fields of struct socket that belong to listening from
fields that belong to normal dataflow, and unionize them. This
shrinks the structure a bit.
- Take out selinfo's from the socket buffers into the socket. The
first reason is to support braindamaged scenario when a socket is
added to kevent(2) and then listen(2) is cast on it. The second
reason is that there is future plan to make socket buffers pluggable,
so that for a dataflow socket a socket buffer can be changed, and
in this case we also want to keep same selinfos through the lifetime
of a socket.
- Remove struct struct so_accf. Since now listening stuff no longer
affects struct socket size, just move its fields into listening part
of the union.
- Provide sol_upcall field and enforce that so_upcall_set() may be called
only on a dataflow socket, which has buffers, and for listening sockets
provide solisten_upcall_set().
o Remove ACCEPT_LOCK() global.
- Add a mutex to socket, to be used instead of socket buffer lock to lock
fields of struct socket that don't belong to a socket buffer.
- Allow to acquire two socket locks, but the first one must belong to a
listening socket.
- Make soref()/sorele() to use atomic(9). This allows in some situations
to do soref() without owning socket lock. There is place for improvement
here, it is possible to make sorele() also to lock optionally.
- Most protocols aren't touched by this change, except UNIX local sockets.
See below for more information.
o Reduce copy-and-paste in kernel modules that accept connections from
listening sockets: provide function solisten_dequeue(), and use it in
the following modules: ctl(4), iscsi(4), ng_btsocket(4), ng_ksocket(4),
infiniband, rpc.
o UNIX local sockets.
- Removal of ACCEPT_LOCK() global uncovered several races in the UNIX
local sockets. Most races exist around spawning a new socket, when we
are connecting to a local listening socket. To cover them, we need to
hold locks on both PCBs when spawning a third one. This means holding
them across sonewconn(). This creates a LOR between pcb locks and
unp_list_lock.
- To fix the new LOR, abandon the global unp_list_lock in favor of global
unp_link_lock. Indeed, separating these two locks didn't provide us any
extra parralelism in the UNIX sockets.
- Now call into uipc_attach() may happen with unp_link_lock hold if, we
are accepting, or without unp_link_lock in case if we are just creating
a socket.
- Another problem in UNIX sockets is that uipc_close() basicly did nothing
for a listening socket. The vnode remained opened for connections. This
is fixed by removing vnode in uipc_close(). Maybe the right way would be
to do it for all sockets (not only listening), simply move the vnode
teardown from uipc_detach() to uipc_close()?
Sponsored by: Netflix
Differential Revision: https://reviews.freebsd.org/D9770
2017-06-08 21:30:34 +00:00
|
|
|
so->so_rcv.sb_flags &= ~SB_ASYNC;
|
2022-05-12 13:22:12 -07:00
|
|
|
SOCK_RECVBUF_UNLOCK(so);
|
|
|
|
SOCK_SENDBUF_LOCK(so);
|
Listening sockets improvements.
o Separate fields of struct socket that belong to listening from
fields that belong to normal dataflow, and unionize them. This
shrinks the structure a bit.
- Take out selinfo's from the socket buffers into the socket. The
first reason is to support braindamaged scenario when a socket is
added to kevent(2) and then listen(2) is cast on it. The second
reason is that there is future plan to make socket buffers pluggable,
so that for a dataflow socket a socket buffer can be changed, and
in this case we also want to keep same selinfos through the lifetime
of a socket.
- Remove struct struct so_accf. Since now listening stuff no longer
affects struct socket size, just move its fields into listening part
of the union.
- Provide sol_upcall field and enforce that so_upcall_set() may be called
only on a dataflow socket, which has buffers, and for listening sockets
provide solisten_upcall_set().
o Remove ACCEPT_LOCK() global.
- Add a mutex to socket, to be used instead of socket buffer lock to lock
fields of struct socket that don't belong to a socket buffer.
- Allow to acquire two socket locks, but the first one must belong to a
listening socket.
- Make soref()/sorele() to use atomic(9). This allows in some situations
to do soref() without owning socket lock. There is place for improvement
here, it is possible to make sorele() also to lock optionally.
- Most protocols aren't touched by this change, except UNIX local sockets.
See below for more information.
o Reduce copy-and-paste in kernel modules that accept connections from
listening sockets: provide function solisten_dequeue(), and use it in
the following modules: ctl(4), iscsi(4), ng_btsocket(4), ng_ksocket(4),
infiniband, rpc.
o UNIX local sockets.
- Removal of ACCEPT_LOCK() global uncovered several races in the UNIX
local sockets. Most races exist around spawning a new socket, when we
are connecting to a local listening socket. To cover them, we need to
hold locks on both PCBs when spawning a third one. This means holding
them across sonewconn(). This creates a LOR between pcb locks and
unp_list_lock.
- To fix the new LOR, abandon the global unp_list_lock in favor of global
unp_link_lock. Indeed, separating these two locks didn't provide us any
extra parralelism in the UNIX sockets.
- Now call into uipc_attach() may happen with unp_link_lock hold if, we
are accepting, or without unp_link_lock in case if we are just creating
a socket.
- Another problem in UNIX sockets is that uipc_close() basicly did nothing
for a listening socket. The vnode remained opened for connections. This
is fixed by removing vnode in uipc_close(). Maybe the right way would be
to do it for all sockets (not only listening), simply move the vnode
teardown from uipc_detach() to uipc_close()?
Sponsored by: Netflix
Differential Revision: https://reviews.freebsd.org/D9770
2017-06-08 21:30:34 +00:00
|
|
|
so->so_snd.sb_flags &= ~SB_ASYNC;
|
2022-05-12 13:22:12 -07:00
|
|
|
SOCK_SENDBUF_UNLOCK(so);
|
Listening sockets improvements.
o Separate fields of struct socket that belong to listening from
fields that belong to normal dataflow, and unionize them. This
shrinks the structure a bit.
- Take out selinfo's from the socket buffers into the socket. The
first reason is to support braindamaged scenario when a socket is
added to kevent(2) and then listen(2) is cast on it. The second
reason is that there is future plan to make socket buffers pluggable,
so that for a dataflow socket a socket buffer can be changed, and
in this case we also want to keep same selinfos through the lifetime
of a socket.
- Remove struct struct so_accf. Since now listening stuff no longer
affects struct socket size, just move its fields into listening part
of the union.
- Provide sol_upcall field and enforce that so_upcall_set() may be called
only on a dataflow socket, which has buffers, and for listening sockets
provide solisten_upcall_set().
o Remove ACCEPT_LOCK() global.
- Add a mutex to socket, to be used instead of socket buffer lock to lock
fields of struct socket that don't belong to a socket buffer.
- Allow to acquire two socket locks, but the first one must belong to a
listening socket.
- Make soref()/sorele() to use atomic(9). This allows in some situations
to do soref() without owning socket lock. There is place for improvement
here, it is possible to make sorele() also to lock optionally.
- Most protocols aren't touched by this change, except UNIX local sockets.
See below for more information.
o Reduce copy-and-paste in kernel modules that accept connections from
listening sockets: provide function solisten_dequeue(), and use it in
the following modules: ctl(4), iscsi(4), ng_btsocket(4), ng_ksocket(4),
infiniband, rpc.
o UNIX local sockets.
- Removal of ACCEPT_LOCK() global uncovered several races in the UNIX
local sockets. Most races exist around spawning a new socket, when we
are connecting to a local listening socket. To cover them, we need to
hold locks on both PCBs when spawning a third one. This means holding
them across sonewconn(). This creates a LOR between pcb locks and
unp_list_lock.
- To fix the new LOR, abandon the global unp_list_lock in favor of global
unp_link_lock. Indeed, separating these two locks didn't provide us any
extra parralelism in the UNIX sockets.
- Now call into uipc_attach() may happen with unp_link_lock hold if, we
are accepting, or without unp_link_lock in case if we are just creating
a socket.
- Another problem in UNIX sockets is that uipc_close() basicly did nothing
for a listening socket. The vnode remained opened for connections. This
is fixed by removing vnode in uipc_close(). Maybe the right way would be
to do it for all sockets (not only listening), simply move the vnode
teardown from uipc_detach() to uipc_close()?
Sponsored by: Netflix
Differential Revision: https://reviews.freebsd.org/D9770
2017-06-08 21:30:34 +00:00
|
|
|
}
|
2004-06-17 22:48:11 +00:00
|
|
|
SOCK_UNLOCK(so);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
2004-11-13 17:21:26 +00:00
|
|
|
break;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
|
|
|
case FIONREAD:
|
2021-09-12 16:05:49 -04:00
|
|
|
SOCK_RECVBUF_LOCK(so);
|
2020-11-07 21:17:49 +00:00
|
|
|
if (SOLISTENING(so)) {
|
|
|
|
error = EINVAL;
|
|
|
|
} else {
|
2021-09-12 16:05:49 -04:00
|
|
|
*(int *)data = sbavail(&so->so_rcv) - so->so_rcv.sb_ctl;
|
2020-11-07 21:17:49 +00:00
|
|
|
}
|
2021-09-12 16:05:49 -04:00
|
|
|
SOCK_RECVBUF_UNLOCK(so);
|
2004-11-13 17:21:26 +00:00
|
|
|
break;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
There are a number of ways an application can check if there are
inbound data waiting on a filedescriptor, such as a pipe or a socket,
for instance by using select(2), poll(2), kqueue(2), ioctl(FIONREAD)
etc.
But we have no way of finding out if written data have yet to be
disposed of, for instance, transmitted (and ack'ed!) to some remote
host, or read by the applicantion at the far end of the pipe.
The closest we get, is calling shutdown(2) on a TCP socket in
non-blocking mode, but this has the undesirable sideeffect of
preventing future communication.
Add a complement to FIONREAD, called FIONWRITE, which returns the
number of bytes not yet properly disposed of. Implement it for
all sockets.
Background:
A HTTP server will want to time out connections, if no new request
arrives within a certain period after the last transmitted response
has actually been sent (and ack'ed).
For a busy HTTP server, this timeout can be subsecond duration.
In order to signal to a load-balancer that the connection is truly
dead, TCP_RST will be the preferred method, as this avoids the need
for a RTT delay for FIN handshaking, with a client which, surprisingly
often, no longer at the remote IP number.
If a slow, distant client is being served a response which is big
enough to fill the window, but small enough to fit in the socket
buffer, the write(2) call will return immediately.
If the session timeout is armed at that time, all bytes in the
response may not have been transmitted by the time it fires.
FIONWRITE allows the timeout to check that no data is outstanding
on the connection, before it TCP_RST's it.
Input & Idea from: rwatson
Approved by: re (kib)
2009-06-28 11:28:14 +00:00
|
|
|
case FIONWRITE:
|
|
|
|
/* Unlocked read. */
|
2020-11-07 21:17:49 +00:00
|
|
|
if (SOLISTENING(so)) {
|
|
|
|
error = EINVAL;
|
|
|
|
} else {
|
|
|
|
*(int *)data = sbavail(&so->so_snd);
|
|
|
|
}
|
There are a number of ways an application can check if there are
inbound data waiting on a filedescriptor, such as a pipe or a socket,
for instance by using select(2), poll(2), kqueue(2), ioctl(FIONREAD)
etc.
But we have no way of finding out if written data have yet to be
disposed of, for instance, transmitted (and ack'ed!) to some remote
host, or read by the applicantion at the far end of the pipe.
The closest we get, is calling shutdown(2) on a TCP socket in
non-blocking mode, but this has the undesirable sideeffect of
preventing future communication.
Add a complement to FIONREAD, called FIONWRITE, which returns the
number of bytes not yet properly disposed of. Implement it for
all sockets.
Background:
A HTTP server will want to time out connections, if no new request
arrives within a certain period after the last transmitted response
has actually been sent (and ack'ed).
For a busy HTTP server, this timeout can be subsecond duration.
In order to signal to a load-balancer that the connection is truly
dead, TCP_RST will be the preferred method, as this avoids the need
for a RTT delay for FIN handshaking, with a client which, surprisingly
often, no longer at the remote IP number.
If a slow, distant client is being served a response which is big
enough to fill the window, but small enough to fit in the socket
buffer, the write(2) call will return immediately.
If the session timeout is armed at that time, all bytes in the
response may not have been transmitted by the time it fires.
FIONWRITE allows the timeout to check that no data is outstanding
on the connection, before it TCP_RST's it.
Input & Idea from: rwatson
Approved by: re (kib)
2009-06-28 11:28:14 +00:00
|
|
|
break;
|
|
|
|
|
2009-06-30 13:38:49 +00:00
|
|
|
case FIONSPACE:
|
2014-11-12 09:57:15 +00:00
|
|
|
/* Unlocked read. */
|
2020-11-07 21:17:49 +00:00
|
|
|
if (SOLISTENING(so)) {
|
|
|
|
error = EINVAL;
|
|
|
|
} else {
|
|
|
|
if ((so->so_snd.sb_hiwat < sbused(&so->so_snd)) ||
|
|
|
|
(so->so_snd.sb_mbmax < so->so_snd.sb_mbcnt)) {
|
|
|
|
*(int *)data = 0;
|
|
|
|
} else {
|
|
|
|
*(int *)data = sbspace(&so->so_snd);
|
|
|
|
}
|
|
|
|
}
|
2009-06-30 13:38:49 +00:00
|
|
|
break;
|
|
|
|
|
Installed the second patch attached to kern/7899 with some changes suggested
by bde, a few other tweaks to get the patch to apply cleanly again and
some improvements to the comments.
This change closes some fairly minor security holes associated with
F_SETOWN, fixes a few bugs, and removes some limitations that F_SETOWN
had on tty devices. For more details, see the description on the PR.
Because this patch increases the size of the proc and pgrp structures,
it is necessary to re-install the includes and recompile libkvm,
the vinum lkm, fstat, gcore, gdb, ipfilter, ps, top, and w.
PR: kern/7899
Reviewed by: bde, elvind
1998-11-11 10:04:13 +00:00
|
|
|
case FIOSETOWN:
|
2004-11-13 17:21:26 +00:00
|
|
|
error = fsetown(*(int *)data, &so->so_sigio);
|
|
|
|
break;
|
Installed the second patch attached to kern/7899 with some changes suggested
by bde, a few other tweaks to get the patch to apply cleanly again and
some improvements to the comments.
This change closes some fairly minor security holes associated with
F_SETOWN, fixes a few bugs, and removes some limitations that F_SETOWN
had on tty devices. For more details, see the description on the PR.
Because this patch increases the size of the proc and pgrp structures,
it is necessary to re-install the includes and recompile libkvm,
the vinum lkm, fstat, gcore, gdb, ipfilter, ps, top, and w.
PR: kern/7899
Reviewed by: bde, elvind
1998-11-11 10:04:13 +00:00
|
|
|
|
|
|
|
case FIOGETOWN:
|
2002-10-03 02:13:00 +00:00
|
|
|
*(int *)data = fgetown(&so->so_sigio);
|
2004-11-13 17:21:26 +00:00
|
|
|
break;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
Installed the second patch attached to kern/7899 with some changes suggested
by bde, a few other tweaks to get the patch to apply cleanly again and
some improvements to the comments.
This change closes some fairly minor security holes associated with
F_SETOWN, fixes a few bugs, and removes some limitations that F_SETOWN
had on tty devices. For more details, see the description on the PR.
Because this patch increases the size of the proc and pgrp structures,
it is necessary to re-install the includes and recompile libkvm,
the vinum lkm, fstat, gcore, gdb, ipfilter, ps, top, and w.
PR: kern/7899
Reviewed by: bde, elvind
1998-11-11 10:04:13 +00:00
|
|
|
case SIOCSPGRP:
|
2004-11-13 17:21:26 +00:00
|
|
|
error = fsetown(-(*(int *)data), &so->so_sigio);
|
|
|
|
break;
|
Installed the second patch attached to kern/7899 with some changes suggested
by bde, a few other tweaks to get the patch to apply cleanly again and
some improvements to the comments.
This change closes some fairly minor security holes associated with
F_SETOWN, fixes a few bugs, and removes some limitations that F_SETOWN
had on tty devices. For more details, see the description on the PR.
Because this patch increases the size of the proc and pgrp structures,
it is necessary to re-install the includes and recompile libkvm,
the vinum lkm, fstat, gcore, gdb, ipfilter, ps, top, and w.
PR: kern/7899
Reviewed by: bde, elvind
1998-11-11 10:04:13 +00:00
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
case SIOCGPGRP:
|
2002-10-03 02:13:00 +00:00
|
|
|
*(int *)data = -fgetown(&so->so_sigio);
|
2004-11-13 17:21:26 +00:00
|
|
|
break;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
|
|
|
case SIOCATMARK:
|
2004-06-20 17:35:50 +00:00
|
|
|
/* Unlocked read. */
|
2020-11-07 21:17:49 +00:00
|
|
|
if (SOLISTENING(so)) {
|
|
|
|
error = EINVAL;
|
|
|
|
} else {
|
|
|
|
*(int *)data = (so->so_rcv.sb_state & SBS_RCVATMARK) != 0;
|
|
|
|
}
|
2004-11-13 17:21:26 +00:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
/*
|
2007-03-04 17:50:46 +00:00
|
|
|
* Interface/routing/protocol specific ioctls: interface and
|
|
|
|
* routing ioctls should have a different entry since a
|
|
|
|
* socket is unnecessary.
|
2004-11-13 17:21:26 +00:00
|
|
|
*/
|
|
|
|
if (IOCGROUP(cmd) == 'i')
|
|
|
|
error = ifioctl(so, cmd, data, td);
|
2011-02-16 21:29:13 +00:00
|
|
|
else if (IOCGROUP(cmd) == 'r') {
|
|
|
|
CURVNET_SET(so->so_vnet);
|
2021-04-23 09:14:42 -04:00
|
|
|
error = rtioctl_fib(cmd, data, so->so_fibnum);
|
2011-02-16 21:29:13 +00:00
|
|
|
CURVNET_RESTORE();
|
|
|
|
} else {
|
|
|
|
CURVNET_SET(so->so_vnet);
|
2004-11-13 17:21:26 +00:00
|
|
|
error = ((*so->so_proto->pr_usrreqs->pru_control)
|
|
|
|
(so, cmd, data, 0, td));
|
2011-02-16 21:29:13 +00:00
|
|
|
CURVNET_RESTORE();
|
|
|
|
}
|
2004-11-13 17:21:26 +00:00
|
|
|
break;
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
2007-08-06 14:26:03 +00:00
|
|
|
return (error);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
2014-08-26 14:44:08 +00:00
|
|
|
static int
|
2007-03-04 17:50:46 +00:00
|
|
|
soo_poll(struct file *fp, int events, struct ucred *active_cred,
|
|
|
|
struct thread *td)
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2003-01-13 00:33:17 +00:00
|
|
|
struct socket *so = fp->f_data;
|
2007-08-06 14:26:03 +00:00
|
|
|
#ifdef MAC
|
2004-11-13 17:21:26 +00:00
|
|
|
int error;
|
|
|
|
|
2007-10-24 19:04:04 +00:00
|
|
|
error = mac_socket_check_poll(active_cred, so);
|
2007-08-06 14:26:03 +00:00
|
|
|
if (error)
|
2005-04-16 18:46:29 +00:00
|
|
|
return (error);
|
|
|
|
#endif
|
2007-08-06 14:26:03 +00:00
|
|
|
return (sopoll(so, events, fp->f_cred, td));
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
2014-08-26 14:44:08 +00:00
|
|
|
static int
|
2021-10-02 00:25:39 +02:00
|
|
|
soo_stat(struct file *fp, struct stat *ub, struct ucred *active_cred)
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2003-01-13 00:33:17 +00:00
|
|
|
struct socket *so = fp->f_data;
|
2004-07-22 20:40:23 +00:00
|
|
|
int error;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
|
|
|
bzero((caddr_t)ub, sizeof (*ub));
|
2000-07-02 23:56:45 +00:00
|
|
|
ub->st_mode = S_IFSOCK;
|
2005-04-16 18:46:29 +00:00
|
|
|
#ifdef MAC
|
2007-10-24 19:04:04 +00:00
|
|
|
error = mac_socket_check_stat(active_cred, so);
|
2007-08-06 14:26:03 +00:00
|
|
|
if (error)
|
2005-04-16 18:46:29 +00:00
|
|
|
return (error);
|
|
|
|
#endif
|
2020-03-20 20:09:00 +00:00
|
|
|
SOCK_LOCK(so);
|
2017-06-09 15:54:48 +00:00
|
|
|
if (!SOLISTENING(so)) {
|
|
|
|
struct sockbuf *sb;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If SBS_CANTRCVMORE is set, but there's still data left
|
|
|
|
* in the receive buffer, the socket is still readable.
|
|
|
|
*/
|
|
|
|
sb = &so->so_rcv;
|
|
|
|
SOCKBUF_LOCK(sb);
|
|
|
|
if ((sb->sb_state & SBS_CANTRCVMORE) == 0 || sbavail(sb))
|
|
|
|
ub->st_mode |= S_IRUSR | S_IRGRP | S_IROTH;
|
|
|
|
ub->st_size = sbavail(sb) - sb->sb_ctl;
|
|
|
|
SOCKBUF_UNLOCK(sb);
|
2020-09-01 22:12:32 +00:00
|
|
|
|
2017-06-09 15:54:48 +00:00
|
|
|
sb = &so->so_snd;
|
|
|
|
SOCKBUF_LOCK(sb);
|
|
|
|
if ((sb->sb_state & SBS_CANTSENDMORE) == 0)
|
|
|
|
ub->st_mode |= S_IWUSR | S_IWGRP | S_IWOTH;
|
|
|
|
SOCKBUF_UNLOCK(sb);
|
|
|
|
}
|
2000-05-11 22:08:57 +00:00
|
|
|
ub->st_uid = so->so_cred->cr_uid;
|
|
|
|
ub->st_gid = so->so_cred->cr_gid;
|
2020-03-20 20:09:00 +00:00
|
|
|
error = so->so_proto->pr_usrreqs->pru_sense(so, ub);
|
|
|
|
SOCK_UNLOCK(so);
|
|
|
|
return (error);
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
2001-11-17 03:07:11 +00:00
|
|
|
/*
|
2007-03-04 17:50:46 +00:00
|
|
|
* API socket close on file pointer. We call soclose() to close the socket
|
|
|
|
* (including initiating closing protocols). soclose() will sorele() the
|
|
|
|
* file reference but the actual socket will not go away until the socket's
|
|
|
|
* ref count hits 0.
|
2001-11-17 03:07:11 +00:00
|
|
|
*/
|
2014-08-26 14:44:08 +00:00
|
|
|
static int
|
2007-03-04 17:50:46 +00:00
|
|
|
soo_close(struct file *fp, struct thread *td)
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
|
|
|
int error = 0;
|
2001-11-17 03:07:11 +00:00
|
|
|
struct socket *so;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2003-01-13 00:33:17 +00:00
|
|
|
so = fp->f_data;
|
1999-08-04 18:53:50 +00:00
|
|
|
fp->f_ops = &badfileops;
|
2003-01-13 00:33:17 +00:00
|
|
|
fp->f_data = NULL;
|
2002-01-13 11:58:06 +00:00
|
|
|
|
|
|
|
if (so)
|
2001-11-17 03:07:11 +00:00
|
|
|
error = soclose(so);
|
1994-05-24 10:09:53 +00:00
|
|
|
return (error);
|
|
|
|
}
|
2014-09-22 16:20:47 +00:00
|
|
|
|
|
|
|
static int
|
|
|
|
soo_fill_kinfo(struct file *fp, struct kinfo_file *kif, struct filedesc *fdp)
|
|
|
|
{
|
|
|
|
struct sockaddr *sa;
|
|
|
|
struct inpcb *inpcb;
|
|
|
|
struct unpcb *unpcb;
|
|
|
|
struct socket *so;
|
|
|
|
int error;
|
|
|
|
|
|
|
|
kif->kf_type = KF_TYPE_SOCKET;
|
|
|
|
so = fp->f_data;
|
2018-01-11 20:26:17 +00:00
|
|
|
CURVNET_SET(so->so_vnet);
|
Commit the 64-bit inode project.
Extend the ino_t, dev_t, nlink_t types to 64-bit ints. Modify
struct dirent layout to add d_off, increase the size of d_fileno
to 64-bits, increase the size of d_namlen to 16-bits, and change
the required alignment. Increase struct statfs f_mntfromname[] and
f_mntonname[] array length MNAMELEN to 1024.
ABI breakage is mitigated by providing compatibility using versioned
symbols, ingenious use of the existing padding in structures, and
by employing other tricks. Unfortunately, not everything can be
fixed, especially outside the base system. For instance, third-party
APIs which pass struct stat around are broken in backward and
forward incompatible ways.
Kinfo sysctl MIBs ABI is changed in backward-compatible way, but
there is no general mechanism to handle other sysctl MIBS which
return structures where the layout has changed. It was considered
that the breakage is either in the management interfaces, where we
usually allow ABI slip, or is not important.
Struct xvnode changed layout, no compat shims are provided.
For struct xtty, dev_t tty device member was reduced to uint32_t.
It was decided that keeping ABI compat in this case is more useful
than reporting 64-bit dev_t, for the sake of pstat.
Update note: strictly follow the instructions in UPDATING. Build
and install the new kernel with COMPAT_FREEBSD11 option enabled,
then reboot, and only then install new world.
Credits: The 64-bit inode project, also known as ino64, started life
many years ago as a project by Gleb Kurtsou (gleb). Kirk McKusick
(mckusick) then picked up and updated the patch, and acted as a
flag-waver. Feedback, suggestions, and discussions were carried
by Ed Maste (emaste), John Baldwin (jhb), Jilles Tjoelker (jilles),
and Rick Macklem (rmacklem). Kris Moore (kris) performed an initial
ports investigation followed by an exp-run by Antoine Brodin (antoine).
Essential and all-embracing testing was done by Peter Holm (pho).
The heavy lifting of coordinating all these efforts and bringing the
project to completion were done by Konstantin Belousov (kib).
Sponsored by: The FreeBSD Foundation (emaste, kib)
Differential revision: https://reviews.freebsd.org/D10439
2017-05-23 09:29:05 +00:00
|
|
|
kif->kf_un.kf_sock.kf_sock_domain0 =
|
|
|
|
so->so_proto->pr_domain->dom_family;
|
|
|
|
kif->kf_un.kf_sock.kf_sock_type0 = so->so_type;
|
|
|
|
kif->kf_un.kf_sock.kf_sock_protocol0 = so->so_proto->pr_protocol;
|
2014-09-22 16:20:47 +00:00
|
|
|
kif->kf_un.kf_sock.kf_sock_pcb = (uintptr_t)so->so_pcb;
|
Commit the 64-bit inode project.
Extend the ino_t, dev_t, nlink_t types to 64-bit ints. Modify
struct dirent layout to add d_off, increase the size of d_fileno
to 64-bits, increase the size of d_namlen to 16-bits, and change
the required alignment. Increase struct statfs f_mntfromname[] and
f_mntonname[] array length MNAMELEN to 1024.
ABI breakage is mitigated by providing compatibility using versioned
symbols, ingenious use of the existing padding in structures, and
by employing other tricks. Unfortunately, not everything can be
fixed, especially outside the base system. For instance, third-party
APIs which pass struct stat around are broken in backward and
forward incompatible ways.
Kinfo sysctl MIBs ABI is changed in backward-compatible way, but
there is no general mechanism to handle other sysctl MIBS which
return structures where the layout has changed. It was considered
that the breakage is either in the management interfaces, where we
usually allow ABI slip, or is not important.
Struct xvnode changed layout, no compat shims are provided.
For struct xtty, dev_t tty device member was reduced to uint32_t.
It was decided that keeping ABI compat in this case is more useful
than reporting 64-bit dev_t, for the sake of pstat.
Update note: strictly follow the instructions in UPDATING. Build
and install the new kernel with COMPAT_FREEBSD11 option enabled,
then reboot, and only then install new world.
Credits: The 64-bit inode project, also known as ino64, started life
many years ago as a project by Gleb Kurtsou (gleb). Kirk McKusick
(mckusick) then picked up and updated the patch, and acted as a
flag-waver. Feedback, suggestions, and discussions were carried
by Ed Maste (emaste), John Baldwin (jhb), Jilles Tjoelker (jilles),
and Rick Macklem (rmacklem). Kris Moore (kris) performed an initial
ports investigation followed by an exp-run by Antoine Brodin (antoine).
Essential and all-embracing testing was done by Peter Holm (pho).
The heavy lifting of coordinating all these efforts and bringing the
project to completion were done by Konstantin Belousov (kib).
Sponsored by: The FreeBSD Foundation (emaste, kib)
Differential revision: https://reviews.freebsd.org/D10439
2017-05-23 09:29:05 +00:00
|
|
|
switch (kif->kf_un.kf_sock.kf_sock_domain0) {
|
2014-09-22 16:20:47 +00:00
|
|
|
case AF_INET:
|
|
|
|
case AF_INET6:
|
Commit the 64-bit inode project.
Extend the ino_t, dev_t, nlink_t types to 64-bit ints. Modify
struct dirent layout to add d_off, increase the size of d_fileno
to 64-bits, increase the size of d_namlen to 16-bits, and change
the required alignment. Increase struct statfs f_mntfromname[] and
f_mntonname[] array length MNAMELEN to 1024.
ABI breakage is mitigated by providing compatibility using versioned
symbols, ingenious use of the existing padding in structures, and
by employing other tricks. Unfortunately, not everything can be
fixed, especially outside the base system. For instance, third-party
APIs which pass struct stat around are broken in backward and
forward incompatible ways.
Kinfo sysctl MIBs ABI is changed in backward-compatible way, but
there is no general mechanism to handle other sysctl MIBS which
return structures where the layout has changed. It was considered
that the breakage is either in the management interfaces, where we
usually allow ABI slip, or is not important.
Struct xvnode changed layout, no compat shims are provided.
For struct xtty, dev_t tty device member was reduced to uint32_t.
It was decided that keeping ABI compat in this case is more useful
than reporting 64-bit dev_t, for the sake of pstat.
Update note: strictly follow the instructions in UPDATING. Build
and install the new kernel with COMPAT_FREEBSD11 option enabled,
then reboot, and only then install new world.
Credits: The 64-bit inode project, also known as ino64, started life
many years ago as a project by Gleb Kurtsou (gleb). Kirk McKusick
(mckusick) then picked up and updated the patch, and acted as a
flag-waver. Feedback, suggestions, and discussions were carried
by Ed Maste (emaste), John Baldwin (jhb), Jilles Tjoelker (jilles),
and Rick Macklem (rmacklem). Kris Moore (kris) performed an initial
ports investigation followed by an exp-run by Antoine Brodin (antoine).
Essential and all-embracing testing was done by Peter Holm (pho).
The heavy lifting of coordinating all these efforts and bringing the
project to completion were done by Konstantin Belousov (kib).
Sponsored by: The FreeBSD Foundation (emaste, kib)
Differential revision: https://reviews.freebsd.org/D10439
2017-05-23 09:29:05 +00:00
|
|
|
if (kif->kf_un.kf_sock.kf_sock_protocol0 == IPPROTO_TCP) {
|
2014-09-22 16:20:47 +00:00
|
|
|
if (so->so_pcb != NULL) {
|
|
|
|
inpcb = (struct inpcb *)(so->so_pcb);
|
|
|
|
kif->kf_un.kf_sock.kf_sock_inpcb =
|
|
|
|
(uintptr_t)inpcb->inp_ppcb;
|
2017-05-26 22:17:44 +00:00
|
|
|
kif->kf_un.kf_sock.kf_sock_sendq =
|
|
|
|
sbused(&so->so_snd);
|
|
|
|
kif->kf_un.kf_sock.kf_sock_recvq =
|
|
|
|
sbused(&so->so_rcv);
|
2014-09-22 16:20:47 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case AF_UNIX:
|
|
|
|
if (so->so_pcb != NULL) {
|
|
|
|
unpcb = (struct unpcb *)(so->so_pcb);
|
|
|
|
if (unpcb->unp_conn) {
|
|
|
|
kif->kf_un.kf_sock.kf_sock_unpconn =
|
|
|
|
(uintptr_t)unpcb->unp_conn;
|
|
|
|
kif->kf_un.kf_sock.kf_sock_rcv_sb_state =
|
|
|
|
so->so_rcv.sb_state;
|
|
|
|
kif->kf_un.kf_sock.kf_sock_snd_sb_state =
|
|
|
|
so->so_snd.sb_state;
|
2017-05-26 22:17:44 +00:00
|
|
|
kif->kf_un.kf_sock.kf_sock_sendq =
|
|
|
|
sbused(&so->so_snd);
|
|
|
|
kif->kf_un.kf_sock.kf_sock_recvq =
|
|
|
|
sbused(&so->so_rcv);
|
2014-09-22 16:20:47 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
error = so->so_proto->pr_usrreqs->pru_sockaddr(so, &sa);
|
Commit the 64-bit inode project.
Extend the ino_t, dev_t, nlink_t types to 64-bit ints. Modify
struct dirent layout to add d_off, increase the size of d_fileno
to 64-bits, increase the size of d_namlen to 16-bits, and change
the required alignment. Increase struct statfs f_mntfromname[] and
f_mntonname[] array length MNAMELEN to 1024.
ABI breakage is mitigated by providing compatibility using versioned
symbols, ingenious use of the existing padding in structures, and
by employing other tricks. Unfortunately, not everything can be
fixed, especially outside the base system. For instance, third-party
APIs which pass struct stat around are broken in backward and
forward incompatible ways.
Kinfo sysctl MIBs ABI is changed in backward-compatible way, but
there is no general mechanism to handle other sysctl MIBS which
return structures where the layout has changed. It was considered
that the breakage is either in the management interfaces, where we
usually allow ABI slip, or is not important.
Struct xvnode changed layout, no compat shims are provided.
For struct xtty, dev_t tty device member was reduced to uint32_t.
It was decided that keeping ABI compat in this case is more useful
than reporting 64-bit dev_t, for the sake of pstat.
Update note: strictly follow the instructions in UPDATING. Build
and install the new kernel with COMPAT_FREEBSD11 option enabled,
then reboot, and only then install new world.
Credits: The 64-bit inode project, also known as ino64, started life
many years ago as a project by Gleb Kurtsou (gleb). Kirk McKusick
(mckusick) then picked up and updated the patch, and acted as a
flag-waver. Feedback, suggestions, and discussions were carried
by Ed Maste (emaste), John Baldwin (jhb), Jilles Tjoelker (jilles),
and Rick Macklem (rmacklem). Kris Moore (kris) performed an initial
ports investigation followed by an exp-run by Antoine Brodin (antoine).
Essential and all-embracing testing was done by Peter Holm (pho).
The heavy lifting of coordinating all these efforts and bringing the
project to completion were done by Konstantin Belousov (kib).
Sponsored by: The FreeBSD Foundation (emaste, kib)
Differential revision: https://reviews.freebsd.org/D10439
2017-05-23 09:29:05 +00:00
|
|
|
if (error == 0 &&
|
|
|
|
sa->sa_len <= sizeof(kif->kf_un.kf_sock.kf_sa_local)) {
|
|
|
|
bcopy(sa, &kif->kf_un.kf_sock.kf_sa_local, sa->sa_len);
|
2014-09-22 16:20:47 +00:00
|
|
|
free(sa, M_SONAME);
|
|
|
|
}
|
|
|
|
error = so->so_proto->pr_usrreqs->pru_peeraddr(so, &sa);
|
Commit the 64-bit inode project.
Extend the ino_t, dev_t, nlink_t types to 64-bit ints. Modify
struct dirent layout to add d_off, increase the size of d_fileno
to 64-bits, increase the size of d_namlen to 16-bits, and change
the required alignment. Increase struct statfs f_mntfromname[] and
f_mntonname[] array length MNAMELEN to 1024.
ABI breakage is mitigated by providing compatibility using versioned
symbols, ingenious use of the existing padding in structures, and
by employing other tricks. Unfortunately, not everything can be
fixed, especially outside the base system. For instance, third-party
APIs which pass struct stat around are broken in backward and
forward incompatible ways.
Kinfo sysctl MIBs ABI is changed in backward-compatible way, but
there is no general mechanism to handle other sysctl MIBS which
return structures where the layout has changed. It was considered
that the breakage is either in the management interfaces, where we
usually allow ABI slip, or is not important.
Struct xvnode changed layout, no compat shims are provided.
For struct xtty, dev_t tty device member was reduced to uint32_t.
It was decided that keeping ABI compat in this case is more useful
than reporting 64-bit dev_t, for the sake of pstat.
Update note: strictly follow the instructions in UPDATING. Build
and install the new kernel with COMPAT_FREEBSD11 option enabled,
then reboot, and only then install new world.
Credits: The 64-bit inode project, also known as ino64, started life
many years ago as a project by Gleb Kurtsou (gleb). Kirk McKusick
(mckusick) then picked up and updated the patch, and acted as a
flag-waver. Feedback, suggestions, and discussions were carried
by Ed Maste (emaste), John Baldwin (jhb), Jilles Tjoelker (jilles),
and Rick Macklem (rmacklem). Kris Moore (kris) performed an initial
ports investigation followed by an exp-run by Antoine Brodin (antoine).
Essential and all-embracing testing was done by Peter Holm (pho).
The heavy lifting of coordinating all these efforts and bringing the
project to completion were done by Konstantin Belousov (kib).
Sponsored by: The FreeBSD Foundation (emaste, kib)
Differential revision: https://reviews.freebsd.org/D10439
2017-05-23 09:29:05 +00:00
|
|
|
if (error == 0 &&
|
|
|
|
sa->sa_len <= sizeof(kif->kf_un.kf_sock.kf_sa_peer)) {
|
|
|
|
bcopy(sa, &kif->kf_un.kf_sock.kf_sa_peer, sa->sa_len);
|
2014-09-22 16:20:47 +00:00
|
|
|
free(sa, M_SONAME);
|
|
|
|
}
|
|
|
|
strncpy(kif->kf_path, so->so_proto->pr_domain->dom_name,
|
|
|
|
sizeof(kif->kf_path));
|
2018-01-11 20:26:17 +00:00
|
|
|
CURVNET_RESTORE();
|
2014-09-22 16:20:47 +00:00
|
|
|
return (0);
|
|
|
|
}
|
2016-03-01 18:12:14 +00:00
|
|
|
|
2016-06-15 20:56:45 +00:00
|
|
|
/*
|
|
|
|
* Use the 'backend3' field in AIO jobs to store the amount of data
|
|
|
|
* completed by the AIO job so far.
|
|
|
|
*/
|
|
|
|
#define aio_done backend3
|
|
|
|
|
2016-03-01 18:12:14 +00:00
|
|
|
static STAILQ_HEAD(, task) soaio_jobs;
|
|
|
|
static struct mtx soaio_jobs_lock;
|
|
|
|
static struct task soaio_kproc_task;
|
|
|
|
static int soaio_starting, soaio_idle, soaio_queued;
|
|
|
|
static struct unrhdr *soaio_kproc_unr;
|
|
|
|
|
|
|
|
static int soaio_max_procs = MAX_AIO_PROCS;
|
|
|
|
SYSCTL_INT(_kern_ipc_aio, OID_AUTO, max_procs, CTLFLAG_RW, &soaio_max_procs, 0,
|
|
|
|
"Maximum number of kernel processes to use for async socket IO");
|
|
|
|
|
|
|
|
static int soaio_num_procs;
|
|
|
|
SYSCTL_INT(_kern_ipc_aio, OID_AUTO, num_procs, CTLFLAG_RD, &soaio_num_procs, 0,
|
|
|
|
"Number of active kernel processes for async socket IO");
|
|
|
|
|
|
|
|
static int soaio_target_procs = TARGET_AIO_PROCS;
|
|
|
|
SYSCTL_INT(_kern_ipc_aio, OID_AUTO, target_procs, CTLFLAG_RD,
|
|
|
|
&soaio_target_procs, 0,
|
|
|
|
"Preferred number of ready kernel processes for async socket IO");
|
|
|
|
|
|
|
|
static int soaio_lifetime;
|
|
|
|
SYSCTL_INT(_kern_ipc_aio, OID_AUTO, lifetime, CTLFLAG_RW, &soaio_lifetime, 0,
|
|
|
|
"Maximum lifetime for idle aiod");
|
|
|
|
|
|
|
|
static void
|
|
|
|
soaio_kproc_loop(void *arg)
|
|
|
|
{
|
|
|
|
struct proc *p;
|
|
|
|
struct vmspace *myvm;
|
|
|
|
struct task *task;
|
|
|
|
int error, id, pending;
|
|
|
|
|
|
|
|
id = (intptr_t)arg;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Grab an extra reference on the daemon's vmspace so that it
|
|
|
|
* doesn't get freed by jobs that switch to a different
|
|
|
|
* vmspace.
|
|
|
|
*/
|
|
|
|
p = curproc;
|
|
|
|
myvm = vmspace_acquire_ref(p);
|
|
|
|
|
|
|
|
mtx_lock(&soaio_jobs_lock);
|
|
|
|
MPASS(soaio_starting > 0);
|
|
|
|
soaio_starting--;
|
|
|
|
for (;;) {
|
|
|
|
while (!STAILQ_EMPTY(&soaio_jobs)) {
|
|
|
|
task = STAILQ_FIRST(&soaio_jobs);
|
|
|
|
STAILQ_REMOVE_HEAD(&soaio_jobs, ta_link);
|
|
|
|
soaio_queued--;
|
|
|
|
pending = task->ta_pending;
|
|
|
|
task->ta_pending = 0;
|
|
|
|
mtx_unlock(&soaio_jobs_lock);
|
|
|
|
|
|
|
|
task->ta_func(task->ta_context, pending);
|
|
|
|
|
|
|
|
mtx_lock(&soaio_jobs_lock);
|
|
|
|
}
|
|
|
|
MPASS(soaio_queued == 0);
|
|
|
|
|
|
|
|
if (p->p_vmspace != myvm) {
|
|
|
|
mtx_unlock(&soaio_jobs_lock);
|
|
|
|
vmspace_switch_aio(myvm);
|
|
|
|
mtx_lock(&soaio_jobs_lock);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
soaio_idle++;
|
|
|
|
error = mtx_sleep(&soaio_idle, &soaio_jobs_lock, 0, "-",
|
|
|
|
soaio_lifetime);
|
|
|
|
soaio_idle--;
|
|
|
|
if (error == EWOULDBLOCK && STAILQ_EMPTY(&soaio_jobs) &&
|
|
|
|
soaio_num_procs > soaio_target_procs)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
soaio_num_procs--;
|
|
|
|
mtx_unlock(&soaio_jobs_lock);
|
|
|
|
free_unr(soaio_kproc_unr, id);
|
|
|
|
kproc_exit(0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
soaio_kproc_create(void *context, int pending)
|
|
|
|
{
|
|
|
|
struct proc *p;
|
|
|
|
int error, id;
|
|
|
|
|
|
|
|
mtx_lock(&soaio_jobs_lock);
|
|
|
|
for (;;) {
|
|
|
|
if (soaio_num_procs < soaio_target_procs) {
|
|
|
|
/* Must create */
|
|
|
|
} else if (soaio_num_procs >= soaio_max_procs) {
|
|
|
|
/*
|
|
|
|
* Hit the limit on kernel processes, don't
|
|
|
|
* create another one.
|
|
|
|
*/
|
|
|
|
break;
|
|
|
|
} else if (soaio_queued <= soaio_idle + soaio_starting) {
|
|
|
|
/*
|
|
|
|
* No more AIO jobs waiting for a process to be
|
|
|
|
* created, so stop.
|
|
|
|
*/
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
soaio_starting++;
|
|
|
|
mtx_unlock(&soaio_jobs_lock);
|
|
|
|
|
|
|
|
id = alloc_unr(soaio_kproc_unr);
|
|
|
|
error = kproc_create(soaio_kproc_loop, (void *)(intptr_t)id,
|
|
|
|
&p, 0, 0, "soaiod%d", id);
|
|
|
|
if (error != 0) {
|
|
|
|
free_unr(soaio_kproc_unr, id);
|
|
|
|
mtx_lock(&soaio_jobs_lock);
|
|
|
|
soaio_starting--;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
mtx_lock(&soaio_jobs_lock);
|
|
|
|
soaio_num_procs++;
|
|
|
|
}
|
|
|
|
mtx_unlock(&soaio_jobs_lock);
|
|
|
|
}
|
|
|
|
|
2016-04-29 20:12:45 +00:00
|
|
|
void
|
2016-03-01 18:12:14 +00:00
|
|
|
soaio_enqueue(struct task *task)
|
|
|
|
{
|
|
|
|
|
|
|
|
mtx_lock(&soaio_jobs_lock);
|
|
|
|
MPASS(task->ta_pending == 0);
|
|
|
|
task->ta_pending++;
|
|
|
|
STAILQ_INSERT_TAIL(&soaio_jobs, task, ta_link);
|
|
|
|
soaio_queued++;
|
|
|
|
if (soaio_queued <= soaio_idle)
|
|
|
|
wakeup_one(&soaio_idle);
|
|
|
|
else if (soaio_num_procs < soaio_max_procs)
|
|
|
|
taskqueue_enqueue(taskqueue_thread, &soaio_kproc_task);
|
|
|
|
mtx_unlock(&soaio_jobs_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
soaio_init(void)
|
|
|
|
{
|
|
|
|
|
|
|
|
soaio_lifetime = AIOD_LIFETIME_DEFAULT;
|
|
|
|
STAILQ_INIT(&soaio_jobs);
|
|
|
|
mtx_init(&soaio_jobs_lock, "soaio jobs", NULL, MTX_DEF);
|
|
|
|
soaio_kproc_unr = new_unrhdr(1, INT_MAX, NULL);
|
|
|
|
TASK_INIT(&soaio_kproc_task, 0, soaio_kproc_create, NULL);
|
|
|
|
}
|
|
|
|
SYSINIT(soaio, SI_SUB_VFS, SI_ORDER_ANY, soaio_init, NULL);
|
|
|
|
|
|
|
|
static __inline int
|
|
|
|
soaio_ready(struct socket *so, struct sockbuf *sb)
|
|
|
|
{
|
|
|
|
return (sb == &so->so_rcv ? soreadable(so) : sowriteable(so));
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
soaio_process_job(struct socket *so, struct sockbuf *sb, struct kaiocb *job)
|
|
|
|
{
|
|
|
|
struct ucred *td_savedcred;
|
|
|
|
struct thread *td;
|
2022-04-13 16:08:23 -07:00
|
|
|
#ifdef MAC
|
|
|
|
struct file *fp = job->fd_file;
|
|
|
|
#endif
|
2021-12-14 17:12:53 +00:00
|
|
|
size_t cnt, done, job_total_nbytes __diagused;
|
2016-06-21 22:19:06 +00:00
|
|
|
long ru_before;
|
2016-03-01 18:12:14 +00:00
|
|
|
int error, flags;
|
|
|
|
|
|
|
|
SOCKBUF_UNLOCK(sb);
|
|
|
|
aio_switch_vmspace(job);
|
|
|
|
td = curthread;
|
|
|
|
retry:
|
|
|
|
td_savedcred = td->td_ucred;
|
|
|
|
td->td_ucred = job->cred;
|
|
|
|
|
2021-01-02 16:34:20 -07:00
|
|
|
job_total_nbytes = job->uiop->uio_resid + job->aio_done;
|
2016-06-15 20:56:45 +00:00
|
|
|
done = job->aio_done;
|
2021-01-02 16:34:20 -07:00
|
|
|
cnt = job->uiop->uio_resid;
|
|
|
|
job->uiop->uio_offset = 0;
|
|
|
|
job->uiop->uio_td = td;
|
2016-03-01 18:12:14 +00:00
|
|
|
flags = MSG_NBIO;
|
|
|
|
|
2016-06-21 22:19:06 +00:00
|
|
|
/*
|
|
|
|
* For resource usage accounting, only count a completed request
|
|
|
|
* as a single message to avoid counting multiple calls to
|
|
|
|
* sosend/soreceive on a blocking socket.
|
|
|
|
*/
|
2016-03-01 18:12:14 +00:00
|
|
|
|
|
|
|
if (sb == &so->so_rcv) {
|
2016-06-21 22:19:06 +00:00
|
|
|
ru_before = td->td_ru.ru_msgrcv;
|
2016-03-01 18:12:14 +00:00
|
|
|
#ifdef MAC
|
|
|
|
error = mac_socket_check_receive(fp->f_cred, so);
|
|
|
|
if (error == 0)
|
|
|
|
|
|
|
|
#endif
|
2021-01-02 16:34:20 -07:00
|
|
|
error = soreceive(so, NULL, job->uiop, NULL, NULL,
|
|
|
|
&flags);
|
2016-06-21 22:19:06 +00:00
|
|
|
if (td->td_ru.ru_msgrcv != ru_before)
|
|
|
|
job->msgrcv = 1;
|
2016-03-01 18:12:14 +00:00
|
|
|
} else {
|
2017-01-06 23:41:45 +00:00
|
|
|
if (!TAILQ_EMPTY(&sb->sb_aiojobq))
|
|
|
|
flags |= MSG_MORETOCOME;
|
2016-06-21 22:19:06 +00:00
|
|
|
ru_before = td->td_ru.ru_msgsnd;
|
2016-03-01 18:12:14 +00:00
|
|
|
#ifdef MAC
|
|
|
|
error = mac_socket_check_send(fp->f_cred, so);
|
|
|
|
if (error == 0)
|
|
|
|
#endif
|
2021-01-02 16:34:20 -07:00
|
|
|
error = sosend(so, NULL, job->uiop, NULL, NULL, flags,
|
|
|
|
td);
|
2016-06-21 22:19:06 +00:00
|
|
|
if (td->td_ru.ru_msgsnd != ru_before)
|
|
|
|
job->msgsnd = 1;
|
2016-03-01 18:12:14 +00:00
|
|
|
if (error == EPIPE && (so->so_options & SO_NOSIGPIPE) == 0) {
|
|
|
|
PROC_LOCK(job->userproc);
|
|
|
|
kern_psignal(job->userproc, SIGPIPE);
|
|
|
|
PROC_UNLOCK(job->userproc);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-01-02 16:34:20 -07:00
|
|
|
done += cnt - job->uiop->uio_resid;
|
2016-06-15 20:56:45 +00:00
|
|
|
job->aio_done = done;
|
2016-03-01 18:12:14 +00:00
|
|
|
td->td_ucred = td_savedcred;
|
|
|
|
|
|
|
|
if (error == EWOULDBLOCK) {
|
|
|
|
/*
|
2016-05-24 03:13:27 +00:00
|
|
|
* The request was either partially completed or not
|
|
|
|
* completed at all due to racing with a read() or
|
|
|
|
* write() on the socket. If the socket is
|
|
|
|
* non-blocking, return with any partial completion.
|
|
|
|
* If the socket is blocking or if no progress has
|
|
|
|
* been made, requeue this request at the head of the
|
2016-03-01 18:12:14 +00:00
|
|
|
* queue to try again when the socket is ready.
|
|
|
|
*/
|
2021-01-02 16:34:20 -07:00
|
|
|
MPASS(done != job_total_nbytes);
|
2016-03-01 18:12:14 +00:00
|
|
|
SOCKBUF_LOCK(sb);
|
2016-05-24 03:13:27 +00:00
|
|
|
if (done == 0 || !(so->so_state & SS_NBIO)) {
|
|
|
|
empty_results++;
|
|
|
|
if (soaio_ready(so, sb)) {
|
|
|
|
empty_retries++;
|
|
|
|
SOCKBUF_UNLOCK(sb);
|
|
|
|
goto retry;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!aio_set_cancel_function(job, soo_aio_cancel)) {
|
|
|
|
SOCKBUF_UNLOCK(sb);
|
|
|
|
if (done != 0)
|
|
|
|
aio_complete(job, done, 0);
|
|
|
|
else
|
|
|
|
aio_cancel(job);
|
|
|
|
SOCKBUF_LOCK(sb);
|
|
|
|
} else {
|
|
|
|
TAILQ_INSERT_HEAD(&sb->sb_aiojobq, job, list);
|
|
|
|
}
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
SOCKBUF_UNLOCK(sb);
|
|
|
|
}
|
|
|
|
if (done != 0 && (error == ERESTART || error == EINTR ||
|
|
|
|
error == EWOULDBLOCK))
|
|
|
|
error = 0;
|
|
|
|
if (error)
|
|
|
|
aio_complete(job, -1, error);
|
|
|
|
else
|
|
|
|
aio_complete(job, done, 0);
|
|
|
|
SOCKBUF_LOCK(sb);
|
2016-03-01 18:12:14 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
soaio_process_sb(struct socket *so, struct sockbuf *sb)
|
|
|
|
{
|
|
|
|
struct kaiocb *job;
|
|
|
|
|
2017-07-17 16:59:22 +00:00
|
|
|
CURVNET_SET(so->so_vnet);
|
2016-03-01 18:12:14 +00:00
|
|
|
SOCKBUF_LOCK(sb);
|
|
|
|
while (!TAILQ_EMPTY(&sb->sb_aiojobq) && soaio_ready(so, sb)) {
|
|
|
|
job = TAILQ_FIRST(&sb->sb_aiojobq);
|
|
|
|
TAILQ_REMOVE(&sb->sb_aiojobq, job, list);
|
|
|
|
if (!aio_clear_cancel_function(job))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
soaio_process_job(so, sb, job);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If there are still pending requests, the socket must not be
|
|
|
|
* ready so set SB_AIO to request a wakeup when the socket
|
|
|
|
* becomes ready.
|
|
|
|
*/
|
|
|
|
if (!TAILQ_EMPTY(&sb->sb_aiojobq))
|
|
|
|
sb->sb_flags |= SB_AIO;
|
|
|
|
sb->sb_flags &= ~SB_AIO_RUNNING;
|
|
|
|
SOCKBUF_UNLOCK(sb);
|
|
|
|
|
|
|
|
sorele(so);
|
2017-07-17 16:59:22 +00:00
|
|
|
CURVNET_RESTORE();
|
2016-03-01 18:12:14 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
soaio_rcv(void *context, int pending)
|
|
|
|
{
|
|
|
|
struct socket *so;
|
|
|
|
|
|
|
|
so = context;
|
|
|
|
soaio_process_sb(so, &so->so_rcv);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
soaio_snd(void *context, int pending)
|
|
|
|
{
|
|
|
|
struct socket *so;
|
|
|
|
|
|
|
|
so = context;
|
|
|
|
soaio_process_sb(so, &so->so_snd);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2022-05-12 13:22:12 -07:00
|
|
|
sowakeup_aio(struct socket *so, sb_which which)
|
2016-03-01 18:12:14 +00:00
|
|
|
{
|
2022-05-12 13:22:12 -07:00
|
|
|
struct sockbuf *sb = sobuf(so, which);
|
|
|
|
|
|
|
|
SOCK_BUF_LOCK_ASSERT(so, which);
|
2016-03-01 18:12:14 +00:00
|
|
|
|
|
|
|
sb->sb_flags &= ~SB_AIO;
|
|
|
|
if (sb->sb_flags & SB_AIO_RUNNING)
|
|
|
|
return;
|
|
|
|
sb->sb_flags |= SB_AIO_RUNNING;
|
|
|
|
soref(so);
|
|
|
|
soaio_enqueue(&sb->sb_aiotask);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
soo_aio_cancel(struct kaiocb *job)
|
|
|
|
{
|
|
|
|
struct socket *so;
|
|
|
|
struct sockbuf *sb;
|
2016-05-24 21:09:05 +00:00
|
|
|
long done;
|
2016-03-01 18:12:14 +00:00
|
|
|
int opcode;
|
|
|
|
|
|
|
|
so = job->fd_file->f_data;
|
|
|
|
opcode = job->uaiocb.aio_lio_opcode;
|
2021-01-02 21:25:05 -07:00
|
|
|
if (opcode & LIO_READ)
|
2016-03-01 18:12:14 +00:00
|
|
|
sb = &so->so_rcv;
|
|
|
|
else {
|
2021-01-02 21:25:05 -07:00
|
|
|
MPASS(opcode & LIO_WRITE);
|
2016-03-01 18:12:14 +00:00
|
|
|
sb = &so->so_snd;
|
|
|
|
}
|
|
|
|
|
|
|
|
SOCKBUF_LOCK(sb);
|
|
|
|
if (!aio_cancel_cleared(job))
|
|
|
|
TAILQ_REMOVE(&sb->sb_aiojobq, job, list);
|
|
|
|
if (TAILQ_EMPTY(&sb->sb_aiojobq))
|
|
|
|
sb->sb_flags &= ~SB_AIO;
|
|
|
|
SOCKBUF_UNLOCK(sb);
|
|
|
|
|
2016-06-15 20:56:45 +00:00
|
|
|
done = job->aio_done;
|
2016-05-24 21:09:05 +00:00
|
|
|
if (done != 0)
|
|
|
|
aio_complete(job, done, 0);
|
|
|
|
else
|
|
|
|
aio_cancel(job);
|
2016-03-01 18:12:14 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
soo_aio_queue(struct file *fp, struct kaiocb *job)
|
|
|
|
{
|
|
|
|
struct socket *so;
|
|
|
|
struct sockbuf *sb;
|
2022-05-12 13:22:12 -07:00
|
|
|
sb_which which;
|
2016-04-29 20:11:09 +00:00
|
|
|
int error;
|
2016-03-01 18:12:14 +00:00
|
|
|
|
|
|
|
so = fp->f_data;
|
2016-04-29 20:11:09 +00:00
|
|
|
error = (*so->so_proto->pr_usrreqs->pru_aio_queue)(so, job);
|
|
|
|
if (error == 0)
|
|
|
|
return (0);
|
|
|
|
|
2021-09-10 17:21:11 -04:00
|
|
|
/* Lock through the socket, since this may be a listening socket. */
|
2021-01-02 21:25:05 -07:00
|
|
|
switch (job->uaiocb.aio_lio_opcode & (LIO_WRITE | LIO_READ)) {
|
2016-03-01 18:12:14 +00:00
|
|
|
case LIO_READ:
|
2021-09-10 17:21:11 -04:00
|
|
|
SOCK_RECVBUF_LOCK(so);
|
2022-05-12 13:22:12 -07:00
|
|
|
sb = &so->so_rcv;
|
|
|
|
which = SO_RCV;
|
2016-03-01 18:12:14 +00:00
|
|
|
break;
|
|
|
|
case LIO_WRITE:
|
2021-09-10 17:21:11 -04:00
|
|
|
SOCK_SENDBUF_LOCK(so);
|
2022-05-12 13:22:12 -07:00
|
|
|
sb = &so->so_snd;
|
|
|
|
which = SO_SND;
|
2016-03-01 18:12:14 +00:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return (EINVAL);
|
|
|
|
}
|
|
|
|
|
2021-09-10 17:21:11 -04:00
|
|
|
if (SOLISTENING(so)) {
|
|
|
|
if (sb == &so->so_rcv)
|
|
|
|
SOCK_RECVBUF_UNLOCK(so);
|
|
|
|
else
|
|
|
|
SOCK_SENDBUF_UNLOCK(so);
|
|
|
|
return (EINVAL);
|
|
|
|
}
|
|
|
|
|
2016-03-01 18:12:14 +00:00
|
|
|
if (!aio_set_cancel_function(job, soo_aio_cancel))
|
|
|
|
panic("new job was cancelled");
|
|
|
|
TAILQ_INSERT_TAIL(&sb->sb_aiojobq, job, list);
|
|
|
|
if (!(sb->sb_flags & SB_AIO_RUNNING)) {
|
|
|
|
if (soaio_ready(so, sb))
|
2022-05-12 13:22:12 -07:00
|
|
|
sowakeup_aio(so, which);
|
2016-03-01 18:12:14 +00:00
|
|
|
else
|
|
|
|
sb->sb_flags |= SB_AIO;
|
|
|
|
}
|
|
|
|
SOCKBUF_UNLOCK(sb);
|
|
|
|
return (0);
|
|
|
|
}
|