freebsd-skq/sys/kern/uipc_socket2.c

398 lines
10 KiB
C
Raw Normal View History

/*-
1994-05-24 10:09:53 +00:00
* Copyright (c) 1982, 1986, 1988, 1990, 1993
* The Regents of the University of California. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)uipc_socket2.c 8.1 (Berkeley) 6/10/93
*/
2003-06-11 00:56:59 +00:00
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include "opt_param.h"
1994-05-24 10:09:53 +00:00
#include <sys/param.h>
#include <sys/domain.h>
#include <sys/lock.h>
#include <sys/malloc.h>
1994-05-24 10:09:53 +00:00
#include <sys/mbuf.h>
#include <sys/mutex.h>
#include <sys/proc.h>
1994-05-24 10:09:53 +00:00
#include <sys/protosw.h>
#include <sys/resourcevar.h>
1994-05-24 10:09:53 +00:00
#include <sys/socket.h>
#include <sys/socketvar.h>
#include <sys/stat.h>
1994-05-24 10:09:53 +00:00
/*
* Primitive routines for operating on sockets.
1994-05-24 10:09:53 +00:00
*/
/*
* Procedures to manipulate state flags of socket
* and do appropriate wakeups. Normal sequence from the
* active (originating) side is that soisconnecting() is
* called during processing of connect() call,
* resulting in an eventual call to soisconnected() if/when the
* connection is established. When the connection is torn down
* soisdisconnecting() is called during processing of disconnect() call,
* and soisdisconnected() is called when the connection to the peer
* is totally severed. The semantics of these routines are such that
* connectionless protocols can call soisconnected() and soisdisconnected()
* only, bypassing the in-progress calls when setting up a ``connection''
* takes no time.
*
* From the passive side, a socket is created with
* two queues of sockets: so_incomp for connections in progress
* and so_comp for connections already made and awaiting user acceptance.
1994-05-24 10:09:53 +00:00
* As a protocol is preparing incoming connections, it creates a socket
* structure queued on so_incomp by calling sonewconn(). When the connection
1994-05-24 10:09:53 +00:00
* is established, soisconnected() is called, and transfers the
* socket structure to so_comp, making it available to accept().
1995-05-30 08:16:23 +00:00
*
1994-05-24 10:09:53 +00:00
* If a socket is closed with sockets on either
* so_incomp or so_comp, these sockets are dropped.
1994-05-24 10:09:53 +00:00
*
* If higher level protocols are implemented in
* the kernel, the wakeups done here will sometimes
* cause software-interrupt process scheduling.
*/
void
1994-05-24 10:09:53 +00:00
soisconnecting(so)
register struct socket *so;
{
SOCK_LOCK(so);
1994-05-24 10:09:53 +00:00
so->so_state &= ~(SS_ISCONNECTED|SS_ISDISCONNECTING);
so->so_state |= SS_ISCONNECTING;
SOCK_UNLOCK(so);
1994-05-24 10:09:53 +00:00
}
void
soisconnected(so)
struct socket *so;
{
Integrate accept locking from rwatson_netperf, introducing a new global mutex, accept_mtx, which serializes access to the following fields across all sockets: so_qlen so_incqlen so_qstate so_comp so_incomp so_list so_head While providing only coarse granularity, this approach avoids lock order issues between sockets by avoiding ownership of the fields by a specific socket and its per-socket mutexes. While here, rewrite soclose(), sofree(), soaccept(), and sonewconn() to add assertions, close additional races and address lock order concerns. In particular: - Reorganize the optimistic concurrency behavior in accept1() to always allocate a file descriptor with falloc() so that if we do find a socket, we don't have to encounter the "Oh, there wasn't a socket" race that can occur if falloc() sleeps in the current code, which broke inbound accept() ordering, not to mention requiring backing out socket state changes in a way that raced with the protocol level. We may want to add a lockless read of the queue state if polling of empty queues proves to be important to optimize. - In accept1(), soref() the socket while holding the accept lock so that the socket cannot be free'd in a race with the protocol layer. Likewise in netgraph equivilents of the accept1() code. - In sonewconn(), loop waiting for the queue to be small enough to insert our new socket once we've committed to inserting it, or races can occur that cause the incomplete socket queue to overfill. In the previously implementation, it was sufficient to simply tested once since calling soabort() didn't release synchronization permitting another thread to insert a socket as we discard a previous one. - In soclose()/sofree()/et al, it is the responsibility of the caller to remove a socket from the incomplete connection queue before calling soabort(), which prevents soabort() from having to walk into the accept socket to release the socket from its queue, and avoids races when releasing the accept mutex to enter soabort(), permitting soabort() to avoid lock ordering issues with the caller. - Generally cluster accept queue related operations together throughout these functions in order to facilitate locking. Annotate new locking in socketvar.h.
2004-06-02 04:15:39 +00:00
struct socket *head;
ACCEPT_LOCK();
SOCK_LOCK(so);
so->so_state &= ~(SS_ISCONNECTING|SS_ISDISCONNECTING|SS_ISCONFIRMING);
so->so_state |= SS_ISCONNECTED;
Integrate accept locking from rwatson_netperf, introducing a new global mutex, accept_mtx, which serializes access to the following fields across all sockets: so_qlen so_incqlen so_qstate so_comp so_incomp so_list so_head While providing only coarse granularity, this approach avoids lock order issues between sockets by avoiding ownership of the fields by a specific socket and its per-socket mutexes. While here, rewrite soclose(), sofree(), soaccept(), and sonewconn() to add assertions, close additional races and address lock order concerns. In particular: - Reorganize the optimistic concurrency behavior in accept1() to always allocate a file descriptor with falloc() so that if we do find a socket, we don't have to encounter the "Oh, there wasn't a socket" race that can occur if falloc() sleeps in the current code, which broke inbound accept() ordering, not to mention requiring backing out socket state changes in a way that raced with the protocol level. We may want to add a lockless read of the queue state if polling of empty queues proves to be important to optimize. - In accept1(), soref() the socket while holding the accept lock so that the socket cannot be free'd in a race with the protocol layer. Likewise in netgraph equivilents of the accept1() code. - In sonewconn(), loop waiting for the queue to be small enough to insert our new socket once we've committed to inserting it, or races can occur that cause the incomplete socket queue to overfill. In the previously implementation, it was sufficient to simply tested once since calling soabort() didn't release synchronization permitting another thread to insert a socket as we discard a previous one. - In soclose()/sofree()/et al, it is the responsibility of the caller to remove a socket from the incomplete connection queue before calling soabort(), which prevents soabort() from having to walk into the accept socket to release the socket from its queue, and avoids races when releasing the accept mutex to enter soabort(), permitting soabort() to avoid lock ordering issues with the caller. - Generally cluster accept queue related operations together throughout these functions in order to facilitate locking. Annotate new locking in socketvar.h.
2004-06-02 04:15:39 +00:00
head = so->so_head;
if (head != NULL && (so->so_qstate & SQ_INCOMP)) {
if ((so->so_options & SO_ACCEPTFILTER) == 0) {
SOCK_UNLOCK(so);
Integrate accept locking from rwatson_netperf, introducing a new global mutex, accept_mtx, which serializes access to the following fields across all sockets: so_qlen so_incqlen so_qstate so_comp so_incomp so_list so_head While providing only coarse granularity, this approach avoids lock order issues between sockets by avoiding ownership of the fields by a specific socket and its per-socket mutexes. While here, rewrite soclose(), sofree(), soaccept(), and sonewconn() to add assertions, close additional races and address lock order concerns. In particular: - Reorganize the optimistic concurrency behavior in accept1() to always allocate a file descriptor with falloc() so that if we do find a socket, we don't have to encounter the "Oh, there wasn't a socket" race that can occur if falloc() sleeps in the current code, which broke inbound accept() ordering, not to mention requiring backing out socket state changes in a way that raced with the protocol level. We may want to add a lockless read of the queue state if polling of empty queues proves to be important to optimize. - In accept1(), soref() the socket while holding the accept lock so that the socket cannot be free'd in a race with the protocol layer. Likewise in netgraph equivilents of the accept1() code. - In sonewconn(), loop waiting for the queue to be small enough to insert our new socket once we've committed to inserting it, or races can occur that cause the incomplete socket queue to overfill. In the previously implementation, it was sufficient to simply tested once since calling soabort() didn't release synchronization permitting another thread to insert a socket as we discard a previous one. - In soclose()/sofree()/et al, it is the responsibility of the caller to remove a socket from the incomplete connection queue before calling soabort(), which prevents soabort() from having to walk into the accept socket to release the socket from its queue, and avoids races when releasing the accept mutex to enter soabort(), permitting soabort() to avoid lock ordering issues with the caller. - Generally cluster accept queue related operations together throughout these functions in order to facilitate locking. Annotate new locking in socketvar.h.
2004-06-02 04:15:39 +00:00
TAILQ_REMOVE(&head->so_incomp, so, so_list);
head->so_incqlen--;
so->so_qstate &= ~SQ_INCOMP;
TAILQ_INSERT_TAIL(&head->so_comp, so, so_list);
head->so_qlen++;
so->so_qstate |= SQ_COMP;
ACCEPT_UNLOCK();
sorwakeup(head);
wakeup_one(&head->so_timeo);
} else {
ACCEPT_UNLOCK();
so->so_upcall =
head->so_accf->so_accept_filter->accf_callback;
so->so_upcallarg = head->so_accf->so_accept_filter_arg;
so->so_rcv.sb_flags |= SB_UPCALL;
so->so_options &= ~SO_ACCEPTFILTER;
SOCK_UNLOCK(so);
so->so_upcall(so, so->so_upcallarg, M_DONTWAIT);
}
Integrate accept locking from rwatson_netperf, introducing a new global mutex, accept_mtx, which serializes access to the following fields across all sockets: so_qlen so_incqlen so_qstate so_comp so_incomp so_list so_head While providing only coarse granularity, this approach avoids lock order issues between sockets by avoiding ownership of the fields by a specific socket and its per-socket mutexes. While here, rewrite soclose(), sofree(), soaccept(), and sonewconn() to add assertions, close additional races and address lock order concerns. In particular: - Reorganize the optimistic concurrency behavior in accept1() to always allocate a file descriptor with falloc() so that if we do find a socket, we don't have to encounter the "Oh, there wasn't a socket" race that can occur if falloc() sleeps in the current code, which broke inbound accept() ordering, not to mention requiring backing out socket state changes in a way that raced with the protocol level. We may want to add a lockless read of the queue state if polling of empty queues proves to be important to optimize. - In accept1(), soref() the socket while holding the accept lock so that the socket cannot be free'd in a race with the protocol layer. Likewise in netgraph equivilents of the accept1() code. - In sonewconn(), loop waiting for the queue to be small enough to insert our new socket once we've committed to inserting it, or races can occur that cause the incomplete socket queue to overfill. In the previously implementation, it was sufficient to simply tested once since calling soabort() didn't release synchronization permitting another thread to insert a socket as we discard a previous one. - In soclose()/sofree()/et al, it is the responsibility of the caller to remove a socket from the incomplete connection queue before calling soabort(), which prevents soabort() from having to walk into the accept socket to release the socket from its queue, and avoids races when releasing the accept mutex to enter soabort(), permitting soabort() to avoid lock ordering issues with the caller. - Generally cluster accept queue related operations together throughout these functions in order to facilitate locking. Annotate new locking in socketvar.h.
2004-06-02 04:15:39 +00:00
return;
1994-05-24 10:09:53 +00:00
}
SOCK_UNLOCK(so);
Integrate accept locking from rwatson_netperf, introducing a new global mutex, accept_mtx, which serializes access to the following fields across all sockets: so_qlen so_incqlen so_qstate so_comp so_incomp so_list so_head While providing only coarse granularity, this approach avoids lock order issues between sockets by avoiding ownership of the fields by a specific socket and its per-socket mutexes. While here, rewrite soclose(), sofree(), soaccept(), and sonewconn() to add assertions, close additional races and address lock order concerns. In particular: - Reorganize the optimistic concurrency behavior in accept1() to always allocate a file descriptor with falloc() so that if we do find a socket, we don't have to encounter the "Oh, there wasn't a socket" race that can occur if falloc() sleeps in the current code, which broke inbound accept() ordering, not to mention requiring backing out socket state changes in a way that raced with the protocol level. We may want to add a lockless read of the queue state if polling of empty queues proves to be important to optimize. - In accept1(), soref() the socket while holding the accept lock so that the socket cannot be free'd in a race with the protocol layer. Likewise in netgraph equivilents of the accept1() code. - In sonewconn(), loop waiting for the queue to be small enough to insert our new socket once we've committed to inserting it, or races can occur that cause the incomplete socket queue to overfill. In the previously implementation, it was sufficient to simply tested once since calling soabort() didn't release synchronization permitting another thread to insert a socket as we discard a previous one. - In soclose()/sofree()/et al, it is the responsibility of the caller to remove a socket from the incomplete connection queue before calling soabort(), which prevents soabort() from having to walk into the accept socket to release the socket from its queue, and avoids races when releasing the accept mutex to enter soabort(), permitting soabort() to avoid lock ordering issues with the caller. - Generally cluster accept queue related operations together throughout these functions in order to facilitate locking. Annotate new locking in socketvar.h.
2004-06-02 04:15:39 +00:00
ACCEPT_UNLOCK();
wakeup(&so->so_timeo);
sorwakeup(so);
sowwakeup(so);
1994-05-24 10:09:53 +00:00
}
void
1994-05-24 10:09:53 +00:00
soisdisconnecting(so)
register struct socket *so;
{
/*
* Note: This code assumes that SOCK_LOCK(so) and
In the current world order, each socket has two mutexes: a mutex that protects socket and receive socket buffer state, and a second mutex to protect send socket buffer state. In some places, the mutex shared between the socket and receive socket buffer will be acquired twice, once by each layer, resulting in some inconsistency, but providing the abstraction benefit of being able to more easily separate the two mutexes in the future if desired. When transitioning a socket to the SS_ISDISCONNECTING or SS_ISDISCONNECTED states, grab the socket/receive socket buffer lock once rather than grabbing it as the socket lock, modifying socket state, then grabbing a second time as the receive lock in order to modify the socket buffer state to indicate no further data can be read. This change is believed to close a race between the change in socket state and the change in socket buffer state, which for a remotely initiated close on a UNIX domain socket, resulted in soreceive() returning ENOTCONN rather than an EOF condition. A similar race still exists in the case of send, however, and is harder to fix as the socket and send socket buffer mutexes are not the same, and we would like to avoid holding combinations of socket mutexes over sb_upcall until we've finished clarifying the locking protocol for upcalls. This change has the side affect of reducing the number of mutex operations to initiate disconnect or perform disconnect on a socket by two. PR: 78824 Rerported by: Marc Olzheim <marcolz@stack.nl> MFC after: 2 weeks
2005-05-27 17:16:43 +00:00
* SOCKBUF_LOCK(&so->so_rcv) are the same.
*/
In the current world order, each socket has two mutexes: a mutex that protects socket and receive socket buffer state, and a second mutex to protect send socket buffer state. In some places, the mutex shared between the socket and receive socket buffer will be acquired twice, once by each layer, resulting in some inconsistency, but providing the abstraction benefit of being able to more easily separate the two mutexes in the future if desired. When transitioning a socket to the SS_ISDISCONNECTING or SS_ISDISCONNECTED states, grab the socket/receive socket buffer lock once rather than grabbing it as the socket lock, modifying socket state, then grabbing a second time as the receive lock in order to modify the socket buffer state to indicate no further data can be read. This change is believed to close a race between the change in socket state and the change in socket buffer state, which for a remotely initiated close on a UNIX domain socket, resulted in soreceive() returning ENOTCONN rather than an EOF condition. A similar race still exists in the case of send, however, and is harder to fix as the socket and send socket buffer mutexes are not the same, and we would like to avoid holding combinations of socket mutexes over sb_upcall until we've finished clarifying the locking protocol for upcalls. This change has the side affect of reducing the number of mutex operations to initiate disconnect or perform disconnect on a socket by two. PR: 78824 Rerported by: Marc Olzheim <marcolz@stack.nl> MFC after: 2 weeks
2005-05-27 17:16:43 +00:00
SOCKBUF_LOCK(&so->so_rcv);
1994-05-24 10:09:53 +00:00
so->so_state &= ~SS_ISCONNECTING;
so->so_state |= SS_ISDISCONNECTING;
so->so_rcv.sb_state |= SBS_CANTRCVMORE;
sorwakeup_locked(so);
SOCKBUF_LOCK(&so->so_snd);
so->so_snd.sb_state |= SBS_CANTSENDMORE;
sowwakeup_locked(so);
wakeup(&so->so_timeo);
1994-05-24 10:09:53 +00:00
}
void
soisdisconnected(so)
1994-05-24 10:09:53 +00:00
register struct socket *so;
{
/*
* Note: This code assumes that SOCK_LOCK(so) and
In the current world order, each socket has two mutexes: a mutex that protects socket and receive socket buffer state, and a second mutex to protect send socket buffer state. In some places, the mutex shared between the socket and receive socket buffer will be acquired twice, once by each layer, resulting in some inconsistency, but providing the abstraction benefit of being able to more easily separate the two mutexes in the future if desired. When transitioning a socket to the SS_ISDISCONNECTING or SS_ISDISCONNECTED states, grab the socket/receive socket buffer lock once rather than grabbing it as the socket lock, modifying socket state, then grabbing a second time as the receive lock in order to modify the socket buffer state to indicate no further data can be read. This change is believed to close a race between the change in socket state and the change in socket buffer state, which for a remotely initiated close on a UNIX domain socket, resulted in soreceive() returning ENOTCONN rather than an EOF condition. A similar race still exists in the case of send, however, and is harder to fix as the socket and send socket buffer mutexes are not the same, and we would like to avoid holding combinations of socket mutexes over sb_upcall until we've finished clarifying the locking protocol for upcalls. This change has the side affect of reducing the number of mutex operations to initiate disconnect or perform disconnect on a socket by two. PR: 78824 Rerported by: Marc Olzheim <marcolz@stack.nl> MFC after: 2 weeks
2005-05-27 17:16:43 +00:00
* SOCKBUF_LOCK(&so->so_rcv) are the same.
*/
In the current world order, each socket has two mutexes: a mutex that protects socket and receive socket buffer state, and a second mutex to protect send socket buffer state. In some places, the mutex shared between the socket and receive socket buffer will be acquired twice, once by each layer, resulting in some inconsistency, but providing the abstraction benefit of being able to more easily separate the two mutexes in the future if desired. When transitioning a socket to the SS_ISDISCONNECTING or SS_ISDISCONNECTED states, grab the socket/receive socket buffer lock once rather than grabbing it as the socket lock, modifying socket state, then grabbing a second time as the receive lock in order to modify the socket buffer state to indicate no further data can be read. This change is believed to close a race between the change in socket state and the change in socket buffer state, which for a remotely initiated close on a UNIX domain socket, resulted in soreceive() returning ENOTCONN rather than an EOF condition. A similar race still exists in the case of send, however, and is harder to fix as the socket and send socket buffer mutexes are not the same, and we would like to avoid holding combinations of socket mutexes over sb_upcall until we've finished clarifying the locking protocol for upcalls. This change has the side affect of reducing the number of mutex operations to initiate disconnect or perform disconnect on a socket by two. PR: 78824 Rerported by: Marc Olzheim <marcolz@stack.nl> MFC after: 2 weeks
2005-05-27 17:16:43 +00:00
SOCKBUF_LOCK(&so->so_rcv);
1994-05-24 10:09:53 +00:00
so->so_state &= ~(SS_ISCONNECTING|SS_ISCONNECTED|SS_ISDISCONNECTING);
so->so_state |= SS_ISDISCONNECTED;
so->so_rcv.sb_state |= SBS_CANTRCVMORE;
sorwakeup_locked(so);
SOCKBUF_LOCK(&so->so_snd);
so->so_snd.sb_state |= SBS_CANTSENDMORE;
Merge next step in socket buffer locking: - sowakeup() now asserts the socket buffer lock on entry. Move the call to KNOTE higher in sowakeup() so that it is made with the socket buffer lock held for consistency with other calls. Release the socket buffer lock prior to calling into pgsigio(), so_upcall(), or aio_swake(). Locking for this event management will need revisiting in the future, but this model avoids lock order reversals when upcalls into other subsystems result in socket/socket buffer operations. Assert that the socket buffer lock is not held at the end of the function. - Wrapper macros for sowakeup(), sorwakeup() and sowwakeup(), now have _locked versions which assert the socket buffer lock on entry. If a wakeup is required by sb_notify(), invoke sowakeup(); otherwise, unconditionally release the socket buffer lock. This results in the socket buffer lock being released whether a wakeup is required or not. - Break out socantsendmore() into socantsendmore_locked() that asserts the socket buffer lock. socantsendmore() unconditionally locks the socket buffer before calling socantsendmore_locked(). Note that both functions return with the socket buffer unlocked as socantsendmore_locked() calls sowwakeup_locked() which has the same properties. Assert that the socket buffer is unlocked on return. - Break out socantrcvmore() into socantrcvmore_locked() that asserts the socket buffer lock. socantrcvmore() unconditionally locks the socket buffer before calling socantrcvmore_locked(). Note that both functions return with the socket buffer unlocked as socantrcvmore_locked() calls sorwakeup_locked() which has similar properties. Assert that the socket buffer is unlocked on return. - Break out sbrelease() into a sbrelease_locked() that asserts the socket buffer lock. sbrelease() unconditionally locks the socket buffer before calling sbrelease_locked(). sbrelease_locked() now invokes sbflush_locked() instead of sbflush(). - Assert the socket buffer lock in socket buffer sanity check functions sblastrecordchk(), sblastmbufchk(). - Assert the socket buffer lock in SBLINKRECORD(). - Break out various sbappend() functions into sbappend_locked() (and variations on that name) that assert the socket buffer lock. The !_locked() variations unconditionally lock the socket buffer before calling their _locked counterparts. Internally, make sure to call _locked() support routines, etc, if already holding the socket buffer lock. - Break out sbinsertoob() into sbinsertoob_locked() that asserts the socket buffer lock. sbinsertoob() unconditionally locks the socket buffer before calling sbinsertoob_locked(). - Break out sbflush() into sbflush_locked() that asserts the socket buffer lock. sbflush() unconditionally locks the socket buffer before calling sbflush_locked(). Update panic strings for new function names. - Break out sbdrop() into sbdrop_locked() that asserts the socket buffer lock. sbdrop() unconditionally locks the socket buffer before calling sbdrop_locked(). - Break out sbdroprecord() into sbdroprecord_locked() that asserts the socket buffer lock. sbdroprecord() unconditionally locks the socket buffer before calling sbdroprecord_locked(). - sofree() now calls socantsendmore_locked() and re-acquires the socket buffer lock on return. It also now calls sbrelease_locked(). - sorflush() now calls socantrcvmore_locked() and re-acquires the socket buffer lock on return. Clean up/mess up other behavior in sorflush() relating to the temporary stack copy of the socket buffer used with dom_dispose by more properly initializing the temporary copy, and selectively bzeroing/copying more carefully to prevent WITNESS from getting confused by improperly initialized mutexes. Annotate why that's necessary, or at least, needed. - soisconnected() now calls sbdrop_locked() before unlocking the socket buffer to avoid locking overhead. Some parts of this change were: Submitted by: sam Sponsored by: FreeBSD Foundation Obtained from: BSD/OS
2004-06-21 00:20:43 +00:00
sbdrop_locked(&so->so_snd, so->so_snd.sb_cc);
sowwakeup_locked(so);
wakeup(&so->so_timeo);
1994-05-24 10:09:53 +00:00
}
/*
* Create a "control" mbuf containing the specified data
* with the specified type for presentation on a socket buffer.
*/
struct mbuf *
sbcreatecontrol(p, size, type, level)
caddr_t p;
register int size;
int type, level;
{
register struct cmsghdr *cp;
struct mbuf *m;
if (CMSG_SPACE((u_int)size) > MCLBYTES)
return ((struct mbuf *) NULL);
if (CMSG_SPACE((u_int)size) > MLEN)
Bring in mbuma to replace mballoc. mbuma is an Mbuf & Cluster allocator built on top of a number of extensions to the UMA framework, all included herein. Extensions to UMA worth noting: - Better layering between slab <-> zone caches; introduce Keg structure which splits off slab cache away from the zone structure and allows multiple zones to be stacked on top of a single Keg (single type of slab cache); perhaps we should look into defining a subset API on top of the Keg for special use by malloc(9), for example. - UMA_ZONE_REFCNT zones can now be added, and reference counters automagically allocated for them within the end of the associated slab structures. uma_find_refcnt() does a kextract to fetch the slab struct reference from the underlying page, and lookup the corresponding refcnt. mbuma things worth noting: - integrates mbuf & cluster allocations with extended UMA and provides caches for commonly-allocated items; defines several zones (two primary, one secondary) and two kegs. - change up certain code paths that always used to do: m_get() + m_clget() to instead just use m_getcl() and try to take advantage of the newly defined secondary Packet zone. - netstat(1) and systat(1) quickly hacked up to do basic stat reporting but additional stats work needs to be done once some other details within UMA have been taken care of and it becomes clearer to how stats will work within the modified framework. From the user perspective, one implication is that the NMBCLUSTERS compile-time option is no longer used. The maximum number of clusters is still capped off according to maxusers, but it can be made unlimited by setting the kern.ipc.nmbclusters boot-time tunable to zero. Work should be done to write an appropriate sysctl handler allowing dynamic tuning of kern.ipc.nmbclusters at runtime. Additional things worth noting/known issues (READ): - One report of 'ips' (ServeRAID) driver acting really slow in conjunction with mbuma. Need more data. Latest report is that ips is equally sucking with and without mbuma. - Giant leak in NFS code sometimes occurs, can't reproduce but currently analyzing; brueffer is able to reproduce but THIS IS NOT an mbuma-specific problem and currently occurs even WITHOUT mbuma. - Issues in network locking: there is at least one code path in the rip code where one or more locks are acquired and we end up in m_prepend() with M_WAITOK, which causes WITNESS to whine from within UMA. Current temporary solution: force all UMA allocations to be M_NOWAIT from within UMA for now to avoid deadlocks unless WITNESS is defined and we can determine with certainty that we're not holding any locks when we're M_WAITOK. - I've seen at least one weird socketbuffer empty-but- mbuf-still-attached panic. I don't believe this to be related to mbuma but please keep your eyes open, turn on debugging, and capture crash dumps. This change removes more code than it adds. A paper is available detailing the change and considering various performance issues, it was presented at BSDCan2004: http://www.unixdaemons.com/~bmilekic/netbuf_bmilekic.pdf Please read the paper for Future Work and implementation details, as well as credits. Testing and Debugging: rwatson, brueffer, Ketrien I. Saihr-Kesenchedra, ... Reviewed by: Lots of people (for different parts)
2004-05-31 21:46:06 +00:00
m = m_getcl(M_DONTWAIT, MT_CONTROL, 0);
else
m = m_get(M_DONTWAIT, MT_CONTROL);
if (m == NULL)
return ((struct mbuf *) NULL);
cp = mtod(m, struct cmsghdr *);
m->m_len = 0;
KASSERT(CMSG_SPACE((u_int)size) <= M_TRAILINGSPACE(m),
("sbcreatecontrol: short mbuf"));
if (p != NULL)
(void)memcpy(CMSG_DATA(cp), p, size);
m->m_len = CMSG_SPACE(size);
cp->cmsg_len = CMSG_LEN(size);
cp->cmsg_level = level;
cp->cmsg_type = type;
return (m);
}
/*
* Some routines that return EOPNOTSUPP for entry points that are not
* supported by a protocol. Fill in as needed.
*/
int
pru_accept_notsupp(struct socket *so, struct sockaddr **nam)
{
return EOPNOTSUPP;
}
Support for dynamically loadable and unloadable protocols within existing protocol families. The protosw[] array of any particular protocol family ("domain") is of fixed size defined at compile time. This made it impossible to dynamically add or remove any protocols to or from it. We work around this by introducing so called SPACER's which are embedded into the protosw[] array at compile time. The SPACER's have a special protocol number (32767) to indicate the fact that they are SPACER's but are otherwise NULL. Only as many protocols can be dynamically loaded as SPACER's are provided in the protosw[] structure. The pr_usrreqs structure is treated more special and contains pointers to dummy functions only returning EOPNOTSUPP. This is needed because the use of those functions pointers is usually not checked within the kernel because until now it was assumed to be a valid function pointer. Instead of fixing all potential callers we just return a proper error code. Two new functions provide a clean API to register and unregister a protocol. The register function expects a pointer to a valid and complete struct protosw including a pointer to struct pru_usrreqs provided by the caller. Upon successful registration the pr_init() function will be called to finish initialization of the protocol. The unregister function restores the SPACER in place of the protocol again. It is the responseability of the caller to ensure proper closing of all sockets and freeing of memory allocation by the unloading protocol. sys/protosw.h o Define generic PROTO_SPACER to be 32767 o Prototypes for all pru_*_notsupp() functions o Prototypes for pf_proto_[un]register() functions kern/uipc_domain.c o Global struct pr_usrreqs nousrreqs containing valid pointers to the pru_*_notsupp() functions o New functions pf_proto_[un]register() kern/uipc_socket2.c o New functions bodies for all pru_*_notsupp() functions
2004-10-19 15:13:30 +00:00
int
pru_attach_notsupp(struct socket *so, int proto, struct thread *td)
{
return EOPNOTSUPP;
}
int
pru_bind_notsupp(struct socket *so, struct sockaddr *nam, struct thread *td)
{
return EOPNOTSUPP;
}
int
pru_connect_notsupp(struct socket *so, struct sockaddr *nam, struct thread *td)
{
return EOPNOTSUPP;
}
int
pru_connect2_notsupp(struct socket *so1, struct socket *so2)
{
return EOPNOTSUPP;
}
int
pru_control_notsupp(struct socket *so, u_long cmd, caddr_t data,
Support for dynamically loadable and unloadable protocols within existing protocol families. The protosw[] array of any particular protocol family ("domain") is of fixed size defined at compile time. This made it impossible to dynamically add or remove any protocols to or from it. We work around this by introducing so called SPACER's which are embedded into the protosw[] array at compile time. The SPACER's have a special protocol number (32767) to indicate the fact that they are SPACER's but are otherwise NULL. Only as many protocols can be dynamically loaded as SPACER's are provided in the protosw[] structure. The pr_usrreqs structure is treated more special and contains pointers to dummy functions only returning EOPNOTSUPP. This is needed because the use of those functions pointers is usually not checked within the kernel because until now it was assumed to be a valid function pointer. Instead of fixing all potential callers we just return a proper error code. Two new functions provide a clean API to register and unregister a protocol. The register function expects a pointer to a valid and complete struct protosw including a pointer to struct pru_usrreqs provided by the caller. Upon successful registration the pr_init() function will be called to finish initialization of the protocol. The unregister function restores the SPACER in place of the protocol again. It is the responseability of the caller to ensure proper closing of all sockets and freeing of memory allocation by the unloading protocol. sys/protosw.h o Define generic PROTO_SPACER to be 32767 o Prototypes for all pru_*_notsupp() functions o Prototypes for pf_proto_[un]register() functions kern/uipc_domain.c o Global struct pr_usrreqs nousrreqs containing valid pointers to the pru_*_notsupp() functions o New functions pf_proto_[un]register() kern/uipc_socket2.c o New functions bodies for all pru_*_notsupp() functions
2004-10-19 15:13:30 +00:00
struct ifnet *ifp, struct thread *td)
{
return EOPNOTSUPP;
}
int
pru_disconnect_notsupp(struct socket *so)
{
return EOPNOTSUPP;
}
int
pru_listen_notsupp(struct socket *so, int backlog, struct thread *td)
{
return EOPNOTSUPP;
}
Support for dynamically loadable and unloadable protocols within existing protocol families. The protosw[] array of any particular protocol family ("domain") is of fixed size defined at compile time. This made it impossible to dynamically add or remove any protocols to or from it. We work around this by introducing so called SPACER's which are embedded into the protosw[] array at compile time. The SPACER's have a special protocol number (32767) to indicate the fact that they are SPACER's but are otherwise NULL. Only as many protocols can be dynamically loaded as SPACER's are provided in the protosw[] structure. The pr_usrreqs structure is treated more special and contains pointers to dummy functions only returning EOPNOTSUPP. This is needed because the use of those functions pointers is usually not checked within the kernel because until now it was assumed to be a valid function pointer. Instead of fixing all potential callers we just return a proper error code. Two new functions provide a clean API to register and unregister a protocol. The register function expects a pointer to a valid and complete struct protosw including a pointer to struct pru_usrreqs provided by the caller. Upon successful registration the pr_init() function will be called to finish initialization of the protocol. The unregister function restores the SPACER in place of the protocol again. It is the responseability of the caller to ensure proper closing of all sockets and freeing of memory allocation by the unloading protocol. sys/protosw.h o Define generic PROTO_SPACER to be 32767 o Prototypes for all pru_*_notsupp() functions o Prototypes for pf_proto_[un]register() functions kern/uipc_domain.c o Global struct pr_usrreqs nousrreqs containing valid pointers to the pru_*_notsupp() functions o New functions pf_proto_[un]register() kern/uipc_socket2.c o New functions bodies for all pru_*_notsupp() functions
2004-10-19 15:13:30 +00:00
int
pru_peeraddr_notsupp(struct socket *so, struct sockaddr **nam)
{
return EOPNOTSUPP;
}
int
pru_rcvd_notsupp(struct socket *so, int flags)
{
return EOPNOTSUPP;
}
int
pru_rcvoob_notsupp(struct socket *so, struct mbuf *m, int flags)
{
return EOPNOTSUPP;
}
Support for dynamically loadable and unloadable protocols within existing protocol families. The protosw[] array of any particular protocol family ("domain") is of fixed size defined at compile time. This made it impossible to dynamically add or remove any protocols to or from it. We work around this by introducing so called SPACER's which are embedded into the protosw[] array at compile time. The SPACER's have a special protocol number (32767) to indicate the fact that they are SPACER's but are otherwise NULL. Only as many protocols can be dynamically loaded as SPACER's are provided in the protosw[] structure. The pr_usrreqs structure is treated more special and contains pointers to dummy functions only returning EOPNOTSUPP. This is needed because the use of those functions pointers is usually not checked within the kernel because until now it was assumed to be a valid function pointer. Instead of fixing all potential callers we just return a proper error code. Two new functions provide a clean API to register and unregister a protocol. The register function expects a pointer to a valid and complete struct protosw including a pointer to struct pru_usrreqs provided by the caller. Upon successful registration the pr_init() function will be called to finish initialization of the protocol. The unregister function restores the SPACER in place of the protocol again. It is the responseability of the caller to ensure proper closing of all sockets and freeing of memory allocation by the unloading protocol. sys/protosw.h o Define generic PROTO_SPACER to be 32767 o Prototypes for all pru_*_notsupp() functions o Prototypes for pf_proto_[un]register() functions kern/uipc_domain.c o Global struct pr_usrreqs nousrreqs containing valid pointers to the pru_*_notsupp() functions o New functions pf_proto_[un]register() kern/uipc_socket2.c o New functions bodies for all pru_*_notsupp() functions
2004-10-19 15:13:30 +00:00
int
pru_send_notsupp(struct socket *so, int flags, struct mbuf *m,
struct sockaddr *addr, struct mbuf *control, struct thread *td)
{
return EOPNOTSUPP;
}
/*
* This isn't really a ``null'' operation, but it's the default one
* and doesn't do anything destructive.
*/
int
pru_sense_null(struct socket *so, struct stat *sb)
{
sb->st_blksize = so->so_snd.sb_hiwat;
return 0;
}
Support for dynamically loadable and unloadable protocols within existing protocol families. The protosw[] array of any particular protocol family ("domain") is of fixed size defined at compile time. This made it impossible to dynamically add or remove any protocols to or from it. We work around this by introducing so called SPACER's which are embedded into the protosw[] array at compile time. The SPACER's have a special protocol number (32767) to indicate the fact that they are SPACER's but are otherwise NULL. Only as many protocols can be dynamically loaded as SPACER's are provided in the protosw[] structure. The pr_usrreqs structure is treated more special and contains pointers to dummy functions only returning EOPNOTSUPP. This is needed because the use of those functions pointers is usually not checked within the kernel because until now it was assumed to be a valid function pointer. Instead of fixing all potential callers we just return a proper error code. Two new functions provide a clean API to register and unregister a protocol. The register function expects a pointer to a valid and complete struct protosw including a pointer to struct pru_usrreqs provided by the caller. Upon successful registration the pr_init() function will be called to finish initialization of the protocol. The unregister function restores the SPACER in place of the protocol again. It is the responseability of the caller to ensure proper closing of all sockets and freeing of memory allocation by the unloading protocol. sys/protosw.h o Define generic PROTO_SPACER to be 32767 o Prototypes for all pru_*_notsupp() functions o Prototypes for pf_proto_[un]register() functions kern/uipc_domain.c o Global struct pr_usrreqs nousrreqs containing valid pointers to the pru_*_notsupp() functions o New functions pf_proto_[un]register() kern/uipc_socket2.c o New functions bodies for all pru_*_notsupp() functions
2004-10-19 15:13:30 +00:00
int
pru_shutdown_notsupp(struct socket *so)
{
return EOPNOTSUPP;
}
int
pru_sockaddr_notsupp(struct socket *so, struct sockaddr **nam)
{
return EOPNOTSUPP;
}
int
pru_sosend_notsupp(struct socket *so, struct sockaddr *addr, struct uio *uio,
struct mbuf *top, struct mbuf *control, int flags, struct thread *td)
{
return EOPNOTSUPP;
}
int
pru_soreceive_notsupp(struct socket *so, struct sockaddr **paddr,
struct uio *uio, struct mbuf **mp0, struct mbuf **controlp,
int *flagsp)
{
return EOPNOTSUPP;
}
int
pru_sopoll_notsupp(struct socket *so, int events, struct ucred *cred,
struct thread *td)
{
return EOPNOTSUPP;
}
/*
* Make a copy of a sockaddr in a malloced buffer of type M_SONAME.
*/
struct sockaddr *
sodupsockaddr(const struct sockaddr *sa, int mflags)
{
struct sockaddr *sa2;
sa2 = malloc(sa->sa_len, M_SONAME, mflags);
if (sa2)
bcopy(sa, sa2, sa->sa_len);
return sa2;
}
/*
* Create an external-format (``xsocket'') structure using the information
* in the kernel-format socket structure pointed to by so. This is done
* to reduce the spew of irrelevant information over this interface,
* to isolate user code from changes in the kernel structure, and
* potentially to provide information-hiding if we decide that
* some of this information should be hidden from users.
*/
void
sotoxsocket(struct socket *so, struct xsocket *xso)
{
xso->xso_len = sizeof *xso;
xso->xso_so = so;
xso->so_type = so->so_type;
xso->so_options = so->so_options;
xso->so_linger = so->so_linger;
xso->so_state = so->so_state;
xso->so_pcb = so->so_pcb;
xso->xso_protocol = so->so_proto->pr_protocol;
xso->xso_family = so->so_proto->pr_domain->dom_family;
xso->so_qlen = so->so_qlen;
xso->so_incqlen = so->so_incqlen;
xso->so_qlimit = so->so_qlimit;
xso->so_timeo = so->so_timeo;
xso->so_error = so->so_error;
xso->so_pgid = so->so_sigio ? so->so_sigio->sio_pgid : 0;
xso->so_oobmark = so->so_oobmark;
sbtoxsockbuf(&so->so_snd, &xso->so_snd);
sbtoxsockbuf(&so->so_rcv, &xso->so_rcv);
xso->so_uid = so->so_cred->cr_uid;
}
/*
* This does the same for sockbufs. Note that the xsockbuf structure,
* since it is always embedded in a socket, does not include a self
* pointer nor a length. We make this entry point public in case
* some other mechanism needs it.
*/
void
sbtoxsockbuf(struct sockbuf *sb, struct xsockbuf *xsb)
{
xsb->sb_cc = sb->sb_cc;
xsb->sb_hiwat = sb->sb_hiwat;
xsb->sb_mbcnt = sb->sb_mbcnt;
xsb->sb_mbmax = sb->sb_mbmax;
xsb->sb_lowat = sb->sb_lowat;
xsb->sb_flags = sb->sb_flags;
xsb->sb_timeo = sb->sb_timeo;
}