2005-01-07 01:45:51 +00:00
|
|
|
/*-
|
1995-10-03 16:54:17 +00:00
|
|
|
* Copyright (c) 1982, 1986, 1988, 1990, 1993, 1994, 1995
|
1994-05-24 10:09:53 +00:00
|
|
|
* The Regents of the University of California. All rights reserved.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
* 4. Neither the name of the University nor the names of its contributors
|
|
|
|
* may be used to endorse or promote products derived from this software
|
|
|
|
* without specific prior written permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
|
|
|
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
|
|
|
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
|
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
|
|
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
|
|
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
|
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
|
|
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
|
|
|
* SUCH DAMAGE.
|
|
|
|
*
|
1995-10-03 16:54:17 +00:00
|
|
|
* @(#)tcp_input.c 8.12 (Berkeley) 5/24/95
|
1994-05-24 10:09:53 +00:00
|
|
|
*/
|
|
|
|
|
2007-10-07 20:44:24 +00:00
|
|
|
#include <sys/cdefs.h>
|
|
|
|
__FBSDID("$FreeBSD$");
|
|
|
|
|
Initial import of RFC 2385 (TCP-MD5) digest support.
This is the first of two commits; bringing in the kernel support first.
This can be enabled by compiling a kernel with options TCP_SIGNATURE
and FAST_IPSEC.
For the uninitiated, this is a TCP option which provides for a means of
authenticating TCP sessions which came into being before IPSEC. It is
still relevant today, however, as it is used by many commercial router
vendors, particularly with BGP, and as such has become a requirement for
interconnect at many major Internet points of presence.
Several parts of the TCP and IP headers, including the segment payload,
are digested with MD5, including a shared secret. The PF_KEY interface
is used to manage the secrets using security associations in the SADB.
There is a limitation here in that as there is no way to map a TCP flow
per-port back to an SPI without polluting tcpcb or using the SPD; the
code to do the latter is unstable at this time. Therefore this code only
supports per-host keying granularity.
Whilst FAST_IPSEC is mutually exclusive with KAME IPSEC (and thus IPv6),
TCP_SIGNATURE applies only to IPv4. For the vast majority of prospective
users of this feature, this will not pose any problem.
This implementation is output-only; that is, the option is honoured when
responding to a host initiating a TCP session, but no effort is made
[yet] to authenticate inbound traffic. This is, however, sufficient to
interwork with Cisco equipment.
Tested with a Cisco 2501 running IOS 12.0(27), and Quagga 0.96.4 with
local patches. Patches for tcpdump to validate TCP-MD5 sessions are also
available from me upon request.
Sponsored by: sentex.net
2004-02-11 04:26:04 +00:00
|
|
|
#include "opt_inet.h"
|
2000-01-09 19:17:30 +00:00
|
|
|
#include "opt_inet6.h"
|
1997-09-16 18:36:06 +00:00
|
|
|
#include "opt_tcpdebug.h"
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
#include <sys/param.h>
|
1995-11-09 20:23:09 +00:00
|
|
|
#include <sys/kernel.h>
|
1994-05-24 10:09:53 +00:00
|
|
|
#include <sys/malloc.h>
|
|
|
|
#include <sys/mbuf.h>
|
|
|
|
#include <sys/socket.h>
|
|
|
|
#include <sys/socketvar.h>
|
2002-04-30 01:54:54 +00:00
|
|
|
#include <sys/sysctl.h>
|
1996-04-04 10:46:44 +00:00
|
|
|
#include <sys/syslog.h>
|
2002-04-30 01:54:54 +00:00
|
|
|
#include <sys/systm.h>
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2004-02-24 15:27:41 +00:00
|
|
|
#include <vm/uma.h>
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
#include <net/if.h>
|
|
|
|
#include <net/route.h>
|
2009-08-01 19:26:27 +00:00
|
|
|
#include <net/vnet.h>
|
1994-05-24 10:09:53 +00:00
|
|
|
|
|
|
|
#include <netinet/in.h>
|
2002-04-30 01:54:54 +00:00
|
|
|
#include <netinet/in_pcb.h>
|
1994-05-24 10:09:53 +00:00
|
|
|
#include <netinet/in_systm.h>
|
2002-04-30 01:54:54 +00:00
|
|
|
#include <netinet/in_var.h>
|
1994-05-24 10:09:53 +00:00
|
|
|
#include <netinet/ip.h>
|
|
|
|
#include <netinet/ip_var.h>
|
2005-11-18 20:12:40 +00:00
|
|
|
#include <netinet/ip_options.h>
|
2000-07-04 16:35:15 +00:00
|
|
|
#include <netinet/ip6.h>
|
|
|
|
#include <netinet6/in6_pcb.h>
|
2002-04-30 01:54:54 +00:00
|
|
|
#include <netinet6/ip6_var.h>
|
|
|
|
#include <netinet6/nd6.h>
|
1994-05-24 10:09:53 +00:00
|
|
|
#include <netinet/tcp.h>
|
|
|
|
#include <netinet/tcp_fsm.h>
|
|
|
|
#include <netinet/tcp_seq.h>
|
|
|
|
#include <netinet/tcp_timer.h>
|
|
|
|
#include <netinet/tcp_var.h>
|
2000-01-09 19:17:30 +00:00
|
|
|
#include <netinet6/tcp6_var.h>
|
1994-05-24 10:09:53 +00:00
|
|
|
#include <netinet/tcpip.h>
|
1994-09-15 10:36:56 +00:00
|
|
|
#ifdef TCPDEBUG
|
1994-05-24 10:09:53 +00:00
|
|
|
#include <netinet/tcp_debug.h>
|
2000-01-09 19:17:30 +00:00
|
|
|
#endif /* TCPDEBUG */
|
|
|
|
|
2010-10-16 05:37:45 +00:00
|
|
|
static int tcp_reass_sysctl_maxseg(SYSCTL_HANDLER_ARGS);
|
|
|
|
static int tcp_reass_sysctl_qsize(SYSCTL_HANDLER_ARGS);
|
|
|
|
|
2011-11-07 15:43:11 +00:00
|
|
|
static SYSCTL_NODE(_net_inet_tcp, OID_AUTO, reass, CTLFLAG_RW, 0,
|
2007-03-19 19:00:51 +00:00
|
|
|
"TCP Segment Reassembly Queue");
|
2004-02-24 15:27:41 +00:00
|
|
|
|
2010-11-22 19:32:54 +00:00
|
|
|
static VNET_DEFINE(int, tcp_reass_maxseg) = 0;
|
2010-04-29 11:52:42 +00:00
|
|
|
#define V_tcp_reass_maxseg VNET(tcp_reass_maxseg)
|
2011-01-18 21:14:13 +00:00
|
|
|
SYSCTL_VNET_PROC(_net_inet_tcp_reass, OID_AUTO, maxsegments,
|
|
|
|
CTLTYPE_INT | CTLFLAG_RDTUN,
|
2010-10-16 05:37:45 +00:00
|
|
|
&VNET_NAME(tcp_reass_maxseg), 0, &tcp_reass_sysctl_maxseg, "I",
|
2007-03-19 19:00:51 +00:00
|
|
|
"Global maximum number of TCP Segments in Reassembly Queue");
|
2004-02-24 15:27:41 +00:00
|
|
|
|
2010-11-22 19:32:54 +00:00
|
|
|
static VNET_DEFINE(int, tcp_reass_qsize) = 0;
|
2010-09-25 04:58:46 +00:00
|
|
|
#define V_tcp_reass_qsize VNET(tcp_reass_qsize)
|
2011-01-18 21:14:13 +00:00
|
|
|
SYSCTL_VNET_PROC(_net_inet_tcp_reass, OID_AUTO, cursegments,
|
|
|
|
CTLTYPE_INT | CTLFLAG_RD,
|
2010-10-16 05:37:45 +00:00
|
|
|
&VNET_NAME(tcp_reass_qsize), 0, &tcp_reass_sysctl_qsize, "I",
|
2007-03-19 19:00:51 +00:00
|
|
|
"Global number of TCP Segments currently in Reassembly Queue");
|
2004-02-24 15:27:41 +00:00
|
|
|
|
2010-11-22 19:32:54 +00:00
|
|
|
static VNET_DEFINE(int, tcp_reass_overflows) = 0;
|
2010-04-29 11:52:42 +00:00
|
|
|
#define V_tcp_reass_overflows VNET(tcp_reass_overflows)
|
2011-01-18 21:14:13 +00:00
|
|
|
SYSCTL_VNET_INT(_net_inet_tcp_reass, OID_AUTO, overflows,
|
|
|
|
CTLTYPE_INT | CTLFLAG_RD,
|
Build on Jeff Roberson's linker-set based dynamic per-CPU allocator
(DPCPU), as suggested by Peter Wemm, and implement a new per-virtual
network stack memory allocator. Modify vnet to use the allocator
instead of monolithic global container structures (vinet, ...). This
change solves many binary compatibility problems associated with
VIMAGE, and restores ELF symbols for virtualized global variables.
Each virtualized global variable exists as a "reference copy", and also
once per virtual network stack. Virtualized global variables are
tagged at compile-time, placing the in a special linker set, which is
loaded into a contiguous region of kernel memory. Virtualized global
variables in the base kernel are linked as normal, but those in modules
are copied and relocated to a reserved portion of the kernel's vnet
region with the help of a the kernel linker.
Virtualized global variables exist in per-vnet memory set up when the
network stack instance is created, and are initialized statically from
the reference copy. Run-time access occurs via an accessor macro, which
converts from the current vnet and requested symbol to a per-vnet
address. When "options VIMAGE" is not compiled into the kernel, normal
global ELF symbols will be used instead and indirection is avoided.
This change restores static initialization for network stack global
variables, restores support for non-global symbols and types, eliminates
the need for many subsystem constructors, eliminates large per-subsystem
structures that caused many binary compatibility issues both for
monitoring applications (netstat) and kernel modules, removes the
per-function INIT_VNET_*() macros throughout the stack, eliminates the
need for vnet_symmap ksym(2) munging, and eliminates duplicate
definitions of virtualized globals under VIMAGE_GLOBALS.
Bump __FreeBSD_version and update UPDATING.
Portions submitted by: bz
Reviewed by: bz, zec
Discussed with: gnn, jamie, jeff, jhb, julian, sam
Suggested by: peter
Approved by: re (kensmith)
2009-07-14 22:48:30 +00:00
|
|
|
&VNET_NAME(tcp_reass_overflows), 0,
|
2007-03-19 19:00:51 +00:00
|
|
|
"Global number of TCP Segment Reassembly Queue Overflows");
|
2004-02-24 15:27:41 +00:00
|
|
|
|
2010-11-22 19:32:54 +00:00
|
|
|
static VNET_DEFINE(uma_zone_t, tcp_reass_zone);
|
2010-09-25 04:58:46 +00:00
|
|
|
#define V_tcp_reass_zone VNET(tcp_reass_zone)
|
|
|
|
|
2004-02-24 15:27:41 +00:00
|
|
|
/* Initialize TCP reassembly queue */
|
2006-04-21 09:25:40 +00:00
|
|
|
static void
|
|
|
|
tcp_reass_zone_change(void *tag)
|
|
|
|
{
|
|
|
|
|
Commit step 1 of the vimage project, (network stack)
virtualization work done by Marko Zec (zec@).
This is the first in a series of commits over the course
of the next few weeks.
Mark all uses of global variables to be virtualized
with a V_ prefix.
Use macros to map them back to their global names for
now, so this is a NOP change only.
We hope to have caught at least 85-90% of what is needed
so we do not invalidate a lot of outstanding patches again.
Obtained from: //depot/projects/vimage-commit2/...
Reviewed by: brooks, des, ed, mav, julian,
jamie, kris, rwatson, zec, ...
(various people I forgot, different versions)
md5 (with a bit of help)
Sponsored by: NLnet Foundation, The FreeBSD Foundation
X-MFC after: never
V_Commit_Message_Reviewed_By: more people than the patch
2008-08-17 23:27:27 +00:00
|
|
|
V_tcp_reass_maxseg = nmbclusters / 16;
|
First pass at separating per-vnet initializer functions
from existing functions for initializing global state.
At this stage, the new per-vnet initializer functions are
directly called from the existing global initialization code,
which should in most cases result in compiler inlining those
new functions, hence yielding a near-zero functional change.
Modify the existing initializer functions which are invoked via
protosw, like ip_init() et. al., to allow them to be invoked
multiple times, i.e. per each vnet. Global state, if any,
is initialized only if such functions are called within the
context of vnet0, which will be determined via the
IS_DEFAULT_VNET(curvnet) check (currently always true).
While here, V_irtualize a few remaining global UMA zones
used by net/netinet/netipsec networking code. While it is
not yet clear to me or anybody else whether this is the right
thing to do, at this stage this makes the code more readable,
and makes it easier to track uncollected UMA-zone-backed
objects on vnet removal. In the long run, it's quite possible
that some form of shared use of UMA zone pools among multiple
vnets should be considered.
Bump __FreeBSD_version due to changes in layout of structs
vnet_ipfw, vnet_inet and vnet_net.
Approved by: julian (mentor)
2009-04-06 22:29:41 +00:00
|
|
|
uma_zone_set_max(V_tcp_reass_zone, V_tcp_reass_maxseg);
|
2006-04-21 09:25:40 +00:00
|
|
|
}
|
|
|
|
|
2004-02-24 15:27:41 +00:00
|
|
|
void
|
2007-05-10 15:58:48 +00:00
|
|
|
tcp_reass_init(void)
|
2004-02-24 15:27:41 +00:00
|
|
|
{
|
2007-05-10 15:58:48 +00:00
|
|
|
|
Commit step 1 of the vimage project, (network stack)
virtualization work done by Marko Zec (zec@).
This is the first in a series of commits over the course
of the next few weeks.
Mark all uses of global variables to be virtualized
with a V_ prefix.
Use macros to map them back to their global names for
now, so this is a NOP change only.
We hope to have caught at least 85-90% of what is needed
so we do not invalidate a lot of outstanding patches again.
Obtained from: //depot/projects/vimage-commit2/...
Reviewed by: brooks, des, ed, mav, julian,
jamie, kris, rwatson, zec, ...
(various people I forgot, different versions)
md5 (with a bit of help)
Sponsored by: NLnet Foundation, The FreeBSD Foundation
X-MFC after: never
V_Commit_Message_Reviewed_By: more people than the patch
2008-08-17 23:27:27 +00:00
|
|
|
V_tcp_reass_maxseg = nmbclusters / 16;
|
2004-02-24 15:27:41 +00:00
|
|
|
TUNABLE_INT_FETCH("net.inet.tcp.reass.maxsegments",
|
Commit step 1 of the vimage project, (network stack)
virtualization work done by Marko Zec (zec@).
This is the first in a series of commits over the course
of the next few weeks.
Mark all uses of global variables to be virtualized
with a V_ prefix.
Use macros to map them back to their global names for
now, so this is a NOP change only.
We hope to have caught at least 85-90% of what is needed
so we do not invalidate a lot of outstanding patches again.
Obtained from: //depot/projects/vimage-commit2/...
Reviewed by: brooks, des, ed, mav, julian,
jamie, kris, rwatson, zec, ...
(various people I forgot, different versions)
md5 (with a bit of help)
Sponsored by: NLnet Foundation, The FreeBSD Foundation
X-MFC after: never
V_Commit_Message_Reviewed_By: more people than the patch
2008-08-17 23:27:27 +00:00
|
|
|
&V_tcp_reass_maxseg);
|
First pass at separating per-vnet initializer functions
from existing functions for initializing global state.
At this stage, the new per-vnet initializer functions are
directly called from the existing global initialization code,
which should in most cases result in compiler inlining those
new functions, hence yielding a near-zero functional change.
Modify the existing initializer functions which are invoked via
protosw, like ip_init() et. al., to allow them to be invoked
multiple times, i.e. per each vnet. Global state, if any,
is initialized only if such functions are called within the
context of vnet0, which will be determined via the
IS_DEFAULT_VNET(curvnet) check (currently always true).
While here, V_irtualize a few remaining global UMA zones
used by net/netinet/netipsec networking code. While it is
not yet clear to me or anybody else whether this is the right
thing to do, at this stage this makes the code more readable,
and makes it easier to track uncollected UMA-zone-backed
objects on vnet removal. In the long run, it's quite possible
that some form of shared use of UMA zone pools among multiple
vnets should be considered.
Bump __FreeBSD_version due to changes in layout of structs
vnet_ipfw, vnet_inet and vnet_net.
Approved by: julian (mentor)
2009-04-06 22:29:41 +00:00
|
|
|
V_tcp_reass_zone = uma_zcreate("tcpreass", sizeof (struct tseg_qent),
|
2004-02-24 15:27:41 +00:00
|
|
|
NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
|
First pass at separating per-vnet initializer functions
from existing functions for initializing global state.
At this stage, the new per-vnet initializer functions are
directly called from the existing global initialization code,
which should in most cases result in compiler inlining those
new functions, hence yielding a near-zero functional change.
Modify the existing initializer functions which are invoked via
protosw, like ip_init() et. al., to allow them to be invoked
multiple times, i.e. per each vnet. Global state, if any,
is initialized only if such functions are called within the
context of vnet0, which will be determined via the
IS_DEFAULT_VNET(curvnet) check (currently always true).
While here, V_irtualize a few remaining global UMA zones
used by net/netinet/netipsec networking code. While it is
not yet clear to me or anybody else whether this is the right
thing to do, at this stage this makes the code more readable,
and makes it easier to track uncollected UMA-zone-backed
objects on vnet removal. In the long run, it's quite possible
that some form of shared use of UMA zone pools among multiple
vnets should be considered.
Bump __FreeBSD_version due to changes in layout of structs
vnet_ipfw, vnet_inet and vnet_net.
Approved by: julian (mentor)
2009-04-06 22:29:41 +00:00
|
|
|
uma_zone_set_max(V_tcp_reass_zone, V_tcp_reass_maxseg);
|
2006-04-21 09:25:40 +00:00
|
|
|
EVENTHANDLER_REGISTER(nmbclusters_change,
|
|
|
|
tcp_reass_zone_change, NULL, EVENTHANDLER_PRI_ANY);
|
2004-02-24 15:27:41 +00:00
|
|
|
}
|
|
|
|
|
2010-03-07 15:58:44 +00:00
|
|
|
#ifdef VIMAGE
|
|
|
|
void
|
|
|
|
tcp_reass_destroy(void)
|
|
|
|
{
|
|
|
|
|
|
|
|
uma_zdestroy(V_tcp_reass_zone);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2010-09-25 04:58:46 +00:00
|
|
|
void
|
|
|
|
tcp_reass_flush(struct tcpcb *tp)
|
|
|
|
{
|
|
|
|
struct tseg_qent *qe;
|
|
|
|
|
|
|
|
INP_WLOCK_ASSERT(tp->t_inpcb);
|
|
|
|
|
|
|
|
while ((qe = LIST_FIRST(&tp->t_segq)) != NULL) {
|
|
|
|
LIST_REMOVE(qe, tqe_q);
|
|
|
|
m_freem(qe->tqe_m);
|
|
|
|
uma_zfree(V_tcp_reass_zone, qe);
|
|
|
|
tp->t_segqlen--;
|
|
|
|
}
|
|
|
|
|
|
|
|
KASSERT((tp->t_segqlen == 0),
|
|
|
|
("TCP reass queue %p segment count is %d instead of 0 after flush.",
|
|
|
|
tp, tp->t_segqlen));
|
|
|
|
}
|
|
|
|
|
2010-10-16 05:37:45 +00:00
|
|
|
static int
|
|
|
|
tcp_reass_sysctl_maxseg(SYSCTL_HANDLER_ARGS)
|
|
|
|
{
|
|
|
|
V_tcp_reass_maxseg = uma_zone_get_max(V_tcp_reass_zone);
|
|
|
|
return (sysctl_handle_int(oidp, arg1, arg2, req));
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
tcp_reass_sysctl_qsize(SYSCTL_HANDLER_ARGS)
|
|
|
|
{
|
|
|
|
V_tcp_reass_qsize = uma_zone_get_cur(V_tcp_reass_zone);
|
|
|
|
return (sysctl_handle_int(oidp, arg1, arg2, req));
|
|
|
|
}
|
|
|
|
|
2007-05-13 22:16:13 +00:00
|
|
|
int
|
2007-03-21 19:37:55 +00:00
|
|
|
tcp_reass(struct tcpcb *tp, struct tcphdr *th, int *tlenp, struct mbuf *m)
|
1994-05-24 10:09:53 +00:00
|
|
|
{
|
2000-01-09 19:17:30 +00:00
|
|
|
struct tseg_qent *q;
|
|
|
|
struct tseg_qent *p = NULL;
|
|
|
|
struct tseg_qent *nq;
|
2004-02-24 15:27:41 +00:00
|
|
|
struct tseg_qent *te = NULL;
|
1994-05-24 10:09:53 +00:00
|
|
|
struct socket *so = tp->t_inpcb->inp_socket;
|
2011-10-07 16:39:03 +00:00
|
|
|
char *s = NULL;
|
1994-05-24 10:09:53 +00:00
|
|
|
int flags;
|
2011-10-07 16:39:03 +00:00
|
|
|
struct tseg_qent tqs;
|
1994-05-24 10:09:53 +00:00
|
|
|
|
2008-04-17 21:38:18 +00:00
|
|
|
INP_WLOCK_ASSERT(tp->t_inpcb);
|
2004-11-23 23:41:20 +00:00
|
|
|
|
2004-02-24 15:27:41 +00:00
|
|
|
/*
|
|
|
|
* XXX: tcp_reass() is rather inefficient with its data structures
|
2009-05-25 14:51:47 +00:00
|
|
|
* and should be rewritten (see NetBSD for optimizations).
|
2004-02-24 15:27:41 +00:00
|
|
|
*/
|
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
2004-11-23 23:41:20 +00:00
|
|
|
* Call with th==NULL after become established to
|
1994-05-24 10:09:53 +00:00
|
|
|
* force pre-ESTABLISHED data up to user socket.
|
|
|
|
*/
|
2004-11-23 23:41:20 +00:00
|
|
|
if (th == NULL)
|
1994-05-24 10:09:53 +00:00
|
|
|
goto present;
|
|
|
|
|
2004-02-24 15:27:41 +00:00
|
|
|
/*
|
2010-10-16 07:12:39 +00:00
|
|
|
* Limit the number of segments that can be queued to reduce the
|
|
|
|
* potential for mbuf exhaustion. For best performance, we want to be
|
|
|
|
* able to queue a full window's worth of segments. The size of the
|
|
|
|
* socket receive buffer determines our advertised window and grows
|
|
|
|
* automatically when socket buffer autotuning is enabled. Use it as the
|
|
|
|
* basis for our queue limit.
|
|
|
|
* Always let the missing segment through which caused this queue.
|
|
|
|
* NB: Access to the socket buffer is left intentionally unlocked as we
|
|
|
|
* can tolerate stale information here.
|
|
|
|
*
|
|
|
|
* XXXLAS: Using sbspace(so->so_rcv) instead of so->so_rcv.sb_hiwat
|
|
|
|
* should work but causes packets to be dropped when they shouldn't.
|
|
|
|
* Investigate why and re-evaluate the below limit after the behaviour
|
|
|
|
* is understood.
|
2004-02-24 15:27:41 +00:00
|
|
|
*/
|
|
|
|
if (th->th_seq != tp->rcv_nxt &&
|
2010-10-16 07:12:39 +00:00
|
|
|
tp->t_segqlen >= (so->so_rcv.sb_hiwat / tp->t_maxseg) + 1) {
|
Commit step 1 of the vimage project, (network stack)
virtualization work done by Marko Zec (zec@).
This is the first in a series of commits over the course
of the next few weeks.
Mark all uses of global variables to be virtualized
with a V_ prefix.
Use macros to map them back to their global names for
now, so this is a NOP change only.
We hope to have caught at least 85-90% of what is needed
so we do not invalidate a lot of outstanding patches again.
Obtained from: //depot/projects/vimage-commit2/...
Reviewed by: brooks, des, ed, mav, julian,
jamie, kris, rwatson, zec, ...
(various people I forgot, different versions)
md5 (with a bit of help)
Sponsored by: NLnet Foundation, The FreeBSD Foundation
X-MFC after: never
V_Commit_Message_Reviewed_By: more people than the patch
2008-08-17 23:27:27 +00:00
|
|
|
V_tcp_reass_overflows++;
|
2009-04-11 22:07:19 +00:00
|
|
|
TCPSTAT_INC(tcps_rcvmemdrop);
|
2004-02-24 15:27:41 +00:00
|
|
|
m_freem(m);
|
2005-04-10 05:21:29 +00:00
|
|
|
*tlenp = 0;
|
2011-10-07 16:39:03 +00:00
|
|
|
if ((s = tcp_log_addrs(&tp->t_inpcb->inp_inc, th, NULL, NULL))) {
|
|
|
|
log(LOG_DEBUG, "%s; %s: queue limit reached, "
|
|
|
|
"segment dropped\n", s, __func__);
|
|
|
|
free(s, M_TCPLOG);
|
|
|
|
}
|
2004-02-24 15:27:41 +00:00
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Allocate a new queue entry. If we can't, or hit the zone limit
|
|
|
|
* just drop the pkt.
|
2011-10-07 16:39:03 +00:00
|
|
|
*
|
|
|
|
* Use a temporary structure on the stack for the missing segment
|
|
|
|
* when the zone is exhausted. Otherwise we may get stuck.
|
2004-02-24 15:27:41 +00:00
|
|
|
*/
|
First pass at separating per-vnet initializer functions
from existing functions for initializing global state.
At this stage, the new per-vnet initializer functions are
directly called from the existing global initialization code,
which should in most cases result in compiler inlining those
new functions, hence yielding a near-zero functional change.
Modify the existing initializer functions which are invoked via
protosw, like ip_init() et. al., to allow them to be invoked
multiple times, i.e. per each vnet. Global state, if any,
is initialized only if such functions are called within the
context of vnet0, which will be determined via the
IS_DEFAULT_VNET(curvnet) check (currently always true).
While here, V_irtualize a few remaining global UMA zones
used by net/netinet/netipsec networking code. While it is
not yet clear to me or anybody else whether this is the right
thing to do, at this stage this makes the code more readable,
and makes it easier to track uncollected UMA-zone-backed
objects on vnet removal. In the long run, it's quite possible
that some form of shared use of UMA zone pools among multiple
vnets should be considered.
Bump __FreeBSD_version due to changes in layout of structs
vnet_ipfw, vnet_inet and vnet_net.
Approved by: julian (mentor)
2009-04-06 22:29:41 +00:00
|
|
|
te = uma_zalloc(V_tcp_reass_zone, M_NOWAIT);
|
2011-11-27 02:32:08 +00:00
|
|
|
if (te == NULL) {
|
|
|
|
if (th->th_seq != tp->rcv_nxt) {
|
|
|
|
TCPSTAT_INC(tcps_rcvmemdrop);
|
|
|
|
m_freem(m);
|
|
|
|
*tlenp = 0;
|
|
|
|
if ((s = tcp_log_addrs(&tp->t_inpcb->inp_inc, th, NULL,
|
|
|
|
NULL))) {
|
|
|
|
log(LOG_DEBUG, "%s; %s: global zone limit "
|
|
|
|
"reached, segment dropped\n", s, __func__);
|
|
|
|
free(s, M_TCPLOG);
|
|
|
|
}
|
|
|
|
return (0);
|
|
|
|
} else {
|
|
|
|
bzero(&tqs, sizeof(struct tseg_qent));
|
|
|
|
te = &tqs;
|
|
|
|
if ((s = tcp_log_addrs(&tp->t_inpcb->inp_inc, th, NULL,
|
|
|
|
NULL))) {
|
|
|
|
log(LOG_DEBUG,
|
|
|
|
"%s; %s: global zone limit reached, using "
|
|
|
|
"stack for missing segment\n", s, __func__);
|
|
|
|
free(s, M_TCPLOG);
|
|
|
|
}
|
2011-10-07 16:39:03 +00:00
|
|
|
}
|
2000-01-09 19:17:30 +00:00
|
|
|
}
|
2004-02-24 15:27:41 +00:00
|
|
|
tp->t_segqlen++;
|
1998-08-24 07:47:39 +00:00
|
|
|
|
1994-05-24 10:09:53 +00:00
|
|
|
/*
|
|
|
|
* Find a segment which begins after this one does.
|
|
|
|
*/
|
2000-01-09 19:17:30 +00:00
|
|
|
LIST_FOREACH(q, &tp->t_segq, tqe_q) {
|
|
|
|
if (SEQ_GT(q->tqe_th->th_seq, th->th_seq))
|
1994-05-24 10:09:53 +00:00
|
|
|
break;
|
2000-01-09 19:17:30 +00:00
|
|
|
p = q;
|
|
|
|
}
|
1994-05-24 10:09:53 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If there is a preceding segment, it may provide some of
|
|
|
|
* our data already. If so, drop the data from the incoming
|
|
|
|
* segment. If it provides all of our data, drop us.
|
|
|
|
*/
|
1998-08-24 07:47:39 +00:00
|
|
|
if (p != NULL) {
|
2007-03-21 19:37:55 +00:00
|
|
|
int i;
|
1994-05-24 10:09:53 +00:00
|
|
|
/* conversion to int (in i) handles seq wraparound */
|
2000-01-09 19:17:30 +00:00
|
|
|
i = p->tqe_th->th_seq + p->tqe_len - th->th_seq;
|
1994-05-24 10:09:53 +00:00
|
|
|
if (i > 0) {
|
2000-01-09 19:17:30 +00:00
|
|
|
if (i >= *tlenp) {
|
2009-04-11 22:07:19 +00:00
|
|
|
TCPSTAT_INC(tcps_rcvduppack);
|
|
|
|
TCPSTAT_ADD(tcps_rcvdupbyte, *tlenp);
|
1994-05-24 10:09:53 +00:00
|
|
|
m_freem(m);
|
First pass at separating per-vnet initializer functions
from existing functions for initializing global state.
At this stage, the new per-vnet initializer functions are
directly called from the existing global initialization code,
which should in most cases result in compiler inlining those
new functions, hence yielding a near-zero functional change.
Modify the existing initializer functions which are invoked via
protosw, like ip_init() et. al., to allow them to be invoked
multiple times, i.e. per each vnet. Global state, if any,
is initialized only if such functions are called within the
context of vnet0, which will be determined via the
IS_DEFAULT_VNET(curvnet) check (currently always true).
While here, V_irtualize a few remaining global UMA zones
used by net/netinet/netipsec networking code. While it is
not yet clear to me or anybody else whether this is the right
thing to do, at this stage this makes the code more readable,
and makes it easier to track uncollected UMA-zone-backed
objects on vnet removal. In the long run, it's quite possible
that some form of shared use of UMA zone pools among multiple
vnets should be considered.
Bump __FreeBSD_version due to changes in layout of structs
vnet_ipfw, vnet_inet and vnet_net.
Approved by: julian (mentor)
2009-04-06 22:29:41 +00:00
|
|
|
uma_zfree(V_tcp_reass_zone, te);
|
2004-02-24 15:27:41 +00:00
|
|
|
tp->t_segqlen--;
|
1995-02-09 23:13:27 +00:00
|
|
|
/*
|
|
|
|
* Try to present any queued data
|
|
|
|
* at the left window edge to the user.
|
|
|
|
* This is needed after the 3-WHS
|
|
|
|
* completes.
|
|
|
|
*/
|
|
|
|
goto present; /* ??? */
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
m_adj(m, i);
|
2000-01-09 19:17:30 +00:00
|
|
|
*tlenp -= i;
|
|
|
|
th->th_seq += i;
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
}
|
2011-01-07 21:40:34 +00:00
|
|
|
tp->t_rcvoopack++;
|
2009-04-11 22:07:19 +00:00
|
|
|
TCPSTAT_INC(tcps_rcvoopack);
|
|
|
|
TCPSTAT_ADD(tcps_rcvoobyte, *tlenp);
|
1994-05-24 10:09:53 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* While we overlap succeeding segments trim them or,
|
|
|
|
* if they are completely covered, dequeue them.
|
|
|
|
*/
|
1998-08-24 07:47:39 +00:00
|
|
|
while (q) {
|
2007-03-21 19:37:55 +00:00
|
|
|
int i = (th->th_seq + *tlenp) - q->tqe_th->th_seq;
|
1994-05-24 10:09:53 +00:00
|
|
|
if (i <= 0)
|
|
|
|
break;
|
2000-01-09 19:17:30 +00:00
|
|
|
if (i < q->tqe_len) {
|
|
|
|
q->tqe_th->th_seq += i;
|
|
|
|
q->tqe_len -= i;
|
|
|
|
m_adj(q->tqe_m, i);
|
1994-05-24 10:09:53 +00:00
|
|
|
break;
|
|
|
|
}
|
1998-08-24 07:47:39 +00:00
|
|
|
|
2000-01-09 19:17:30 +00:00
|
|
|
nq = LIST_NEXT(q, tqe_q);
|
|
|
|
LIST_REMOVE(q, tqe_q);
|
|
|
|
m_freem(q->tqe_m);
|
First pass at separating per-vnet initializer functions
from existing functions for initializing global state.
At this stage, the new per-vnet initializer functions are
directly called from the existing global initialization code,
which should in most cases result in compiler inlining those
new functions, hence yielding a near-zero functional change.
Modify the existing initializer functions which are invoked via
protosw, like ip_init() et. al., to allow them to be invoked
multiple times, i.e. per each vnet. Global state, if any,
is initialized only if such functions are called within the
context of vnet0, which will be determined via the
IS_DEFAULT_VNET(curvnet) check (currently always true).
While here, V_irtualize a few remaining global UMA zones
used by net/netinet/netipsec networking code. While it is
not yet clear to me or anybody else whether this is the right
thing to do, at this stage this makes the code more readable,
and makes it easier to track uncollected UMA-zone-backed
objects on vnet removal. In the long run, it's quite possible
that some form of shared use of UMA zone pools among multiple
vnets should be considered.
Bump __FreeBSD_version due to changes in layout of structs
vnet_ipfw, vnet_inet and vnet_net.
Approved by: julian (mentor)
2009-04-06 22:29:41 +00:00
|
|
|
uma_zfree(V_tcp_reass_zone, q);
|
2004-02-24 15:27:41 +00:00
|
|
|
tp->t_segqlen--;
|
1998-08-24 07:47:39 +00:00
|
|
|
q = nq;
|
1994-05-24 10:09:53 +00:00
|
|
|
}
|
|
|
|
|
2000-01-09 19:17:30 +00:00
|
|
|
/* Insert the new segment queue entry into place. */
|
|
|
|
te->tqe_m = m;
|
|
|
|
te->tqe_th = th;
|
|
|
|
te->tqe_len = *tlenp;
|
|
|
|
|
1998-08-24 07:47:39 +00:00
|
|
|
if (p == NULL) {
|
2000-01-09 19:17:30 +00:00
|
|
|
LIST_INSERT_HEAD(&tp->t_segq, te, tqe_q);
|
1998-08-24 07:47:39 +00:00
|
|
|
} else {
|
2011-10-07 16:39:03 +00:00
|
|
|
KASSERT(te != &tqs, ("%s: temporary stack based entry not "
|
|
|
|
"first element in queue", __func__));
|
2000-01-09 19:17:30 +00:00
|
|
|
LIST_INSERT_AFTER(p, te, tqe_q);
|
1998-08-24 07:47:39 +00:00
|
|
|
}
|
1994-05-24 10:09:53 +00:00
|
|
|
|
|
|
|
present:
|
|
|
|
/*
|
|
|
|
* Present data to user, advancing rcv_nxt through
|
|
|
|
* completed sequence space.
|
|
|
|
*/
|
1995-02-09 23:13:27 +00:00
|
|
|
if (!TCPS_HAVEESTABLISHED(tp->t_state))
|
1994-05-24 10:09:53 +00:00
|
|
|
return (0);
|
2000-01-09 19:17:30 +00:00
|
|
|
q = LIST_FIRST(&tp->t_segq);
|
|
|
|
if (!q || q->tqe_th->th_seq != tp->rcv_nxt)
|
1994-05-24 10:09:53 +00:00
|
|
|
return (0);
|
Reduce the number of unnecessary unlock-relocks on socket buffer mutexes
associated with performing a wakeup on the socket buffer:
- When performing an sbappend*() followed by a so[rw]wakeup(), explicitly
acquire the socket buffer lock and use the _locked() variants of both
calls. Note that the _locked() sowakeup() versions unlock the mutex on
return. This is done in uipc_send(), divert_packet(), mroute
socket_send(), raw_append(), tcp_reass(), tcp_input(), and udp_append().
- When the socket buffer lock is dropped before a sowakeup(), remove the
explicit unlock and use the _locked() sowakeup() variant. This is done
in soisdisconnecting(), soisdisconnected() when setting the can't send/
receive flags and dropping data, and in uipc_rcvd() which adjusting
back-pressure on the sockets.
For UNIX domain sockets running mpsafe with a contention-intensive SMP
mysql benchmark, this results in a 1.6% query rate improvement due to
reduce mutex costs.
2004-06-26 19:10:39 +00:00
|
|
|
SOCKBUF_LOCK(&so->so_rcv);
|
1994-05-24 10:09:53 +00:00
|
|
|
do {
|
2000-01-09 19:17:30 +00:00
|
|
|
tp->rcv_nxt += q->tqe_len;
|
|
|
|
flags = q->tqe_th->th_flags & TH_FIN;
|
|
|
|
nq = LIST_NEXT(q, tqe_q);
|
|
|
|
LIST_REMOVE(q, tqe_q);
|
2004-06-14 18:16:22 +00:00
|
|
|
if (so->so_rcv.sb_state & SBS_CANTRCVMORE)
|
2000-01-09 19:17:30 +00:00
|
|
|
m_freem(q->tqe_m);
|
2002-05-31 11:52:35 +00:00
|
|
|
else
|
Reduce the number of unnecessary unlock-relocks on socket buffer mutexes
associated with performing a wakeup on the socket buffer:
- When performing an sbappend*() followed by a so[rw]wakeup(), explicitly
acquire the socket buffer lock and use the _locked() variants of both
calls. Note that the _locked() sowakeup() versions unlock the mutex on
return. This is done in uipc_send(), divert_packet(), mroute
socket_send(), raw_append(), tcp_reass(), tcp_input(), and udp_append().
- When the socket buffer lock is dropped before a sowakeup(), remove the
explicit unlock and use the _locked() sowakeup() variant. This is done
in soisdisconnecting(), soisdisconnected() when setting the can't send/
receive flags and dropping data, and in uipc_rcvd() which adjusting
back-pressure on the sockets.
For UNIX domain sockets running mpsafe with a contention-intensive SMP
mysql benchmark, this results in a 1.6% query rate improvement due to
reduce mutex costs.
2004-06-26 19:10:39 +00:00
|
|
|
sbappendstream_locked(&so->so_rcv, q->tqe_m);
|
2011-10-07 16:39:03 +00:00
|
|
|
if (q != &tqs)
|
|
|
|
uma_zfree(V_tcp_reass_zone, q);
|
2004-02-24 15:27:41 +00:00
|
|
|
tp->t_segqlen--;
|
1998-08-24 07:47:39 +00:00
|
|
|
q = nq;
|
2000-01-09 19:17:30 +00:00
|
|
|
} while (q && q->tqe_th->th_seq == tp->rcv_nxt);
|
|
|
|
ND6_HINT(tp);
|
Reduce the number of unnecessary unlock-relocks on socket buffer mutexes
associated with performing a wakeup on the socket buffer:
- When performing an sbappend*() followed by a so[rw]wakeup(), explicitly
acquire the socket buffer lock and use the _locked() variants of both
calls. Note that the _locked() sowakeup() versions unlock the mutex on
return. This is done in uipc_send(), divert_packet(), mroute
socket_send(), raw_append(), tcp_reass(), tcp_input(), and udp_append().
- When the socket buffer lock is dropped before a sowakeup(), remove the
explicit unlock and use the _locked() sowakeup() variant. This is done
in soisdisconnecting(), soisdisconnected() when setting the can't send/
receive flags and dropping data, and in uipc_rcvd() which adjusting
back-pressure on the sockets.
For UNIX domain sockets running mpsafe with a contention-intensive SMP
mysql benchmark, this results in a 1.6% query rate improvement due to
reduce mutex costs.
2004-06-26 19:10:39 +00:00
|
|
|
sorwakeup_locked(so);
|
1994-05-24 10:09:53 +00:00
|
|
|
return (flags);
|
|
|
|
}
|