2001-11-22 04:50:44 +00:00
|
|
|
/*-
|
2002-03-14 16:53:39 +00:00
|
|
|
* Copyright (c) 2001 Networks Associates Technology, Inc.
|
2001-11-22 04:50:44 +00:00
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* This software was developed for the FreeBSD Project by Jonathan Lemon
|
|
|
|
* and NAI Labs, the Security Research Division of Network Associates, Inc.
|
|
|
|
* under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the
|
|
|
|
* DARPA CHATS research program.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
|
|
* documentation and/or other materials provided with the distribution.
|
|
|
|
* 3. The name of the author may not be used to endorse or promote
|
|
|
|
* products derived from this software without specific prior written
|
|
|
|
* permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
|
|
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
|
|
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
|
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
|
|
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
|
|
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
|
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
|
|
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
|
|
|
* SUCH DAMAGE.
|
|
|
|
*
|
|
|
|
* $FreeBSD$
|
|
|
|
*/
|
|
|
|
|
Initial import of RFC 2385 (TCP-MD5) digest support.
This is the first of two commits; bringing in the kernel support first.
This can be enabled by compiling a kernel with options TCP_SIGNATURE
and FAST_IPSEC.
For the uninitiated, this is a TCP option which provides for a means of
authenticating TCP sessions which came into being before IPSEC. It is
still relevant today, however, as it is used by many commercial router
vendors, particularly with BGP, and as such has become a requirement for
interconnect at many major Internet points of presence.
Several parts of the TCP and IP headers, including the segment payload,
are digested with MD5, including a shared secret. The PF_KEY interface
is used to manage the secrets using security associations in the SADB.
There is a limitation here in that as there is no way to map a TCP flow
per-port back to an SPI without polluting tcpcb or using the SPD; the
code to do the latter is unstable at this time. Therefore this code only
supports per-host keying granularity.
Whilst FAST_IPSEC is mutually exclusive with KAME IPSEC (and thus IPv6),
TCP_SIGNATURE applies only to IPv4. For the vast majority of prospective
users of this feature, this will not pose any problem.
This implementation is output-only; that is, the option is honoured when
responding to a host initiating a TCP session, but no effort is made
[yet] to authenticate inbound traffic. This is, however, sufficient to
interwork with Cisco equipment.
Tested with a Cisco 2501 running IOS 12.0(27), and Quagga 0.96.4 with
local patches. Patches for tcpdump to validate TCP-MD5 sessions are also
available from me upon request.
Sponsored by: sentex.net
2004-02-11 04:26:04 +00:00
|
|
|
#include "opt_inet.h"
|
2001-11-22 04:50:44 +00:00
|
|
|
#include "opt_inet6.h"
|
|
|
|
#include "opt_ipsec.h"
|
2002-07-31 19:06:49 +00:00
|
|
|
#include "opt_mac.h"
|
2004-07-17 19:44:13 +00:00
|
|
|
#include "opt_random_ip_id.h"
|
2003-08-13 10:20:57 +00:00
|
|
|
#include "opt_tcpdebug.h"
|
2004-06-23 21:04:37 +00:00
|
|
|
#include "opt_tcp_sack.h"
|
2001-11-22 04:50:44 +00:00
|
|
|
|
|
|
|
#include <sys/param.h>
|
|
|
|
#include <sys/systm.h>
|
|
|
|
#include <sys/kernel.h>
|
|
|
|
#include <sys/sysctl.h>
|
|
|
|
#include <sys/malloc.h>
|
2002-07-31 19:06:49 +00:00
|
|
|
#include <sys/mac.h>
|
2001-11-22 04:50:44 +00:00
|
|
|
#include <sys/mbuf.h>
|
|
|
|
#include <sys/md5.h>
|
|
|
|
#include <sys/proc.h> /* for proc0 declaration */
|
|
|
|
#include <sys/random.h>
|
|
|
|
#include <sys/socket.h>
|
|
|
|
#include <sys/socketvar.h>
|
|
|
|
|
|
|
|
#include <net/if.h>
|
|
|
|
#include <net/route.h>
|
|
|
|
|
|
|
|
#include <netinet/in.h>
|
|
|
|
#include <netinet/in_systm.h>
|
|
|
|
#include <netinet/ip.h>
|
|
|
|
#include <netinet/in_var.h>
|
|
|
|
#include <netinet/in_pcb.h>
|
|
|
|
#include <netinet/ip_var.h>
|
|
|
|
#ifdef INET6
|
|
|
|
#include <netinet/ip6.h>
|
|
|
|
#include <netinet/icmp6.h>
|
|
|
|
#include <netinet6/nd6.h>
|
|
|
|
#include <netinet6/ip6_var.h>
|
|
|
|
#include <netinet6/in6_pcb.h>
|
|
|
|
#endif
|
|
|
|
#include <netinet/tcp.h>
|
2003-08-13 10:20:57 +00:00
|
|
|
#ifdef TCPDEBUG
|
|
|
|
#include <netinet/tcpip.h>
|
|
|
|
#endif
|
2001-11-22 04:50:44 +00:00
|
|
|
#include <netinet/tcp_fsm.h>
|
|
|
|
#include <netinet/tcp_seq.h>
|
|
|
|
#include <netinet/tcp_timer.h>
|
|
|
|
#include <netinet/tcp_var.h>
|
2003-08-13 10:20:57 +00:00
|
|
|
#ifdef TCPDEBUG
|
|
|
|
#include <netinet/tcp_debug.h>
|
|
|
|
#endif
|
2001-11-22 04:50:44 +00:00
|
|
|
#ifdef INET6
|
|
|
|
#include <netinet6/tcp6_var.h>
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifdef IPSEC
|
|
|
|
#include <netinet6/ipsec.h>
|
|
|
|
#ifdef INET6
|
|
|
|
#include <netinet6/ipsec6.h>
|
|
|
|
#endif
|
|
|
|
#endif /*IPSEC*/
|
|
|
|
|
2002-10-16 02:25:05 +00:00
|
|
|
#ifdef FAST_IPSEC
|
|
|
|
#include <netipsec/ipsec.h>
|
|
|
|
#ifdef INET6
|
|
|
|
#include <netipsec/ipsec6.h>
|
|
|
|
#endif
|
|
|
|
#include <netipsec/key.h>
|
|
|
|
#endif /*FAST_IPSEC*/
|
|
|
|
|
2001-11-22 04:50:44 +00:00
|
|
|
#include <machine/in_cksum.h>
|
2002-03-20 05:48:55 +00:00
|
|
|
#include <vm/uma.h>
|
2001-11-22 04:50:44 +00:00
|
|
|
|
2001-12-19 06:12:14 +00:00
|
|
|
static int tcp_syncookies = 1;
|
|
|
|
SYSCTL_INT(_net_inet_tcp, OID_AUTO, syncookies, CTLFLAG_RW,
|
|
|
|
&tcp_syncookies, 0,
|
|
|
|
"Use TCP SYN cookies if the syncache overflows");
|
|
|
|
|
2001-11-22 04:50:44 +00:00
|
|
|
static void syncache_drop(struct syncache *, struct syncache_head *);
|
|
|
|
static void syncache_free(struct syncache *);
|
2001-12-19 06:12:14 +00:00
|
|
|
static void syncache_insert(struct syncache *, struct syncache_head *);
|
2001-11-22 04:50:44 +00:00
|
|
|
struct syncache *syncache_lookup(struct in_conninfo *, struct syncache_head **);
|
2003-08-13 10:20:57 +00:00
|
|
|
#ifdef TCPDEBUG
|
|
|
|
static int syncache_respond(struct syncache *, struct mbuf *, struct socket *);
|
|
|
|
#else
|
2001-11-22 04:50:44 +00:00
|
|
|
static int syncache_respond(struct syncache *, struct mbuf *);
|
2003-08-13 10:20:57 +00:00
|
|
|
#endif
|
2002-05-14 18:57:55 +00:00
|
|
|
static struct socket *syncache_socket(struct syncache *, struct socket *,
|
|
|
|
struct mbuf *m);
|
2001-11-22 04:50:44 +00:00
|
|
|
static void syncache_timer(void *);
|
2004-07-17 19:44:13 +00:00
|
|
|
static u_int32_t syncookie_generate(struct syncache *, u_int32_t *);
|
2001-12-19 06:12:14 +00:00
|
|
|
static struct syncache *syncookie_lookup(struct in_conninfo *,
|
|
|
|
struct tcphdr *, struct socket *);
|
2001-11-22 04:50:44 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Transmit the SYN,ACK fewer times than TCP_MAXRXTSHIFT specifies.
|
|
|
|
* 3 retransmits corresponds to a timeout of (1 + 2 + 4 + 8 == 15) seconds,
|
|
|
|
* the odds are that the user has given up attempting to connect by then.
|
|
|
|
*/
|
|
|
|
#define SYNCACHE_MAXREXMTS 3
|
|
|
|
|
|
|
|
/* Arbitrary values */
|
|
|
|
#define TCP_SYNCACHE_HASHSIZE 512
|
|
|
|
#define TCP_SYNCACHE_BUCKETLIMIT 30
|
|
|
|
|
|
|
|
struct tcp_syncache {
|
|
|
|
struct syncache_head *hashbase;
|
2002-03-20 05:48:55 +00:00
|
|
|
uma_zone_t zone;
|
2001-11-22 04:50:44 +00:00
|
|
|
u_int hashsize;
|
|
|
|
u_int hashmask;
|
|
|
|
u_int bucket_limit;
|
|
|
|
u_int cache_count;
|
|
|
|
u_int cache_limit;
|
|
|
|
u_int rexmt_limit;
|
|
|
|
u_int hash_secret;
|
|
|
|
TAILQ_HEAD(, syncache) timerq[SYNCACHE_MAXREXMTS + 1];
|
|
|
|
struct callout tt_timerq[SYNCACHE_MAXREXMTS + 1];
|
|
|
|
};
|
|
|
|
static struct tcp_syncache tcp_syncache;
|
|
|
|
|
|
|
|
SYSCTL_NODE(_net_inet_tcp, OID_AUTO, syncache, CTLFLAG_RW, 0, "TCP SYN cache");
|
|
|
|
|
2003-10-21 18:28:36 +00:00
|
|
|
SYSCTL_INT(_net_inet_tcp_syncache, OID_AUTO, bucketlimit, CTLFLAG_RDTUN,
|
2001-11-22 04:50:44 +00:00
|
|
|
&tcp_syncache.bucket_limit, 0, "Per-bucket hash limit for syncache");
|
|
|
|
|
2003-10-21 18:28:36 +00:00
|
|
|
SYSCTL_INT(_net_inet_tcp_syncache, OID_AUTO, cachelimit, CTLFLAG_RDTUN,
|
2001-11-22 04:50:44 +00:00
|
|
|
&tcp_syncache.cache_limit, 0, "Overall entry limit for syncache");
|
|
|
|
|
|
|
|
SYSCTL_INT(_net_inet_tcp_syncache, OID_AUTO, count, CTLFLAG_RD,
|
|
|
|
&tcp_syncache.cache_count, 0, "Current number of entries in syncache");
|
|
|
|
|
2003-10-21 18:28:36 +00:00
|
|
|
SYSCTL_INT(_net_inet_tcp_syncache, OID_AUTO, hashsize, CTLFLAG_RDTUN,
|
2001-11-22 04:50:44 +00:00
|
|
|
&tcp_syncache.hashsize, 0, "Size of TCP syncache hashtable");
|
|
|
|
|
|
|
|
SYSCTL_INT(_net_inet_tcp_syncache, OID_AUTO, rexmtlimit, CTLFLAG_RW,
|
|
|
|
&tcp_syncache.rexmt_limit, 0, "Limit on SYN/ACK retransmissions");
|
|
|
|
|
|
|
|
static MALLOC_DEFINE(M_SYNCACHE, "syncache", "TCP syncache");
|
|
|
|
|
|
|
|
#define SYNCACHE_HASH(inc, mask) \
|
|
|
|
((tcp_syncache.hash_secret ^ \
|
|
|
|
(inc)->inc_faddr.s_addr ^ \
|
|
|
|
((inc)->inc_faddr.s_addr >> 16) ^ \
|
|
|
|
(inc)->inc_fport ^ (inc)->inc_lport) & mask)
|
|
|
|
|
|
|
|
#define SYNCACHE_HASH6(inc, mask) \
|
|
|
|
((tcp_syncache.hash_secret ^ \
|
|
|
|
(inc)->inc6_faddr.s6_addr32[0] ^ \
|
|
|
|
(inc)->inc6_faddr.s6_addr32[3] ^ \
|
|
|
|
(inc)->inc_fport ^ (inc)->inc_lport) & mask)
|
|
|
|
|
|
|
|
#define ENDPTS_EQ(a, b) ( \
|
2002-01-22 17:54:28 +00:00
|
|
|
(a)->ie_fport == (b)->ie_fport && \
|
2001-11-22 04:50:44 +00:00
|
|
|
(a)->ie_lport == (b)->ie_lport && \
|
|
|
|
(a)->ie_faddr.s_addr == (b)->ie_faddr.s_addr && \
|
|
|
|
(a)->ie_laddr.s_addr == (b)->ie_laddr.s_addr \
|
|
|
|
)
|
|
|
|
|
|
|
|
#define ENDPTS6_EQ(a, b) (memcmp(a, b, sizeof(*a)) == 0)
|
|
|
|
|
2002-11-09 12:55:07 +00:00
|
|
|
#define SYNCACHE_TIMEOUT(sc, slot) do { \
|
|
|
|
sc->sc_rxtslot = (slot); \
|
|
|
|
sc->sc_rxttime = ticks + TCPTV_RTOBASE * tcp_backoff[(slot)]; \
|
|
|
|
TAILQ_INSERT_TAIL(&tcp_syncache.timerq[(slot)], sc, sc_timerq); \
|
|
|
|
if (!callout_active(&tcp_syncache.tt_timerq[(slot)])) \
|
|
|
|
callout_reset(&tcp_syncache.tt_timerq[(slot)], \
|
|
|
|
TCPTV_RTOBASE * tcp_backoff[(slot)], \
|
|
|
|
syncache_timer, (void *)((intptr_t)(slot))); \
|
2001-11-22 04:50:44 +00:00
|
|
|
} while (0)
|
|
|
|
|
|
|
|
static void
|
|
|
|
syncache_free(struct syncache *sc)
|
|
|
|
{
|
|
|
|
if (sc->sc_ipopts)
|
|
|
|
(void) m_free(sc->sc_ipopts);
|
2003-11-20 20:07:39 +00:00
|
|
|
|
2002-03-20 05:48:55 +00:00
|
|
|
uma_zfree(tcp_syncache.zone, sc);
|
2001-11-22 04:50:44 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
syncache_init(void)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
tcp_syncache.cache_count = 0;
|
|
|
|
tcp_syncache.hashsize = TCP_SYNCACHE_HASHSIZE;
|
|
|
|
tcp_syncache.bucket_limit = TCP_SYNCACHE_BUCKETLIMIT;
|
|
|
|
tcp_syncache.cache_limit =
|
|
|
|
tcp_syncache.hashsize * tcp_syncache.bucket_limit;
|
|
|
|
tcp_syncache.rexmt_limit = SYNCACHE_MAXREXMTS;
|
|
|
|
tcp_syncache.hash_secret = arc4random();
|
|
|
|
|
|
|
|
TUNABLE_INT_FETCH("net.inet.tcp.syncache.hashsize",
|
|
|
|
&tcp_syncache.hashsize);
|
|
|
|
TUNABLE_INT_FETCH("net.inet.tcp.syncache.cachelimit",
|
|
|
|
&tcp_syncache.cache_limit);
|
|
|
|
TUNABLE_INT_FETCH("net.inet.tcp.syncache.bucketlimit",
|
|
|
|
&tcp_syncache.bucket_limit);
|
|
|
|
if (!powerof2(tcp_syncache.hashsize)) {
|
|
|
|
printf("WARNING: syncache hash size is not a power of 2.\n");
|
|
|
|
tcp_syncache.hashsize = 512; /* safe default */
|
|
|
|
}
|
|
|
|
tcp_syncache.hashmask = tcp_syncache.hashsize - 1;
|
|
|
|
|
|
|
|
/* Allocate the hash table. */
|
|
|
|
MALLOC(tcp_syncache.hashbase, struct syncache_head *,
|
|
|
|
tcp_syncache.hashsize * sizeof(struct syncache_head),
|
2003-02-19 05:47:46 +00:00
|
|
|
M_SYNCACHE, M_WAITOK);
|
2001-11-22 04:50:44 +00:00
|
|
|
|
|
|
|
/* Initialize the hash buckets. */
|
|
|
|
for (i = 0; i < tcp_syncache.hashsize; i++) {
|
|
|
|
TAILQ_INIT(&tcp_syncache.hashbase[i].sch_bucket);
|
|
|
|
tcp_syncache.hashbase[i].sch_length = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Initialize the timer queues. */
|
2001-11-23 12:31:27 +00:00
|
|
|
for (i = 0; i <= SYNCACHE_MAXREXMTS; i++) {
|
2001-11-22 04:50:44 +00:00
|
|
|
TAILQ_INIT(&tcp_syncache.timerq[i]);
|
2003-11-10 23:29:33 +00:00
|
|
|
callout_init(&tcp_syncache.tt_timerq[i],
|
|
|
|
debug_mpsafenet ? CALLOUT_MPSAFE : 0);
|
2001-11-22 04:50:44 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Allocate the syncache entries. Allow the zone to allocate one
|
|
|
|
* more entry than cache limit, so a new entry can bump out an
|
|
|
|
* older one.
|
|
|
|
*/
|
2002-03-20 05:48:55 +00:00
|
|
|
tcp_syncache.zone = uma_zcreate("syncache", sizeof(struct syncache),
|
|
|
|
NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
|
|
|
|
uma_zone_set_max(tcp_syncache.zone, tcp_syncache.cache_limit);
|
2004-01-22 23:14:48 +00:00
|
|
|
tcp_syncache.cache_limit -= 1;
|
2001-11-22 04:50:44 +00:00
|
|
|
}
|
|
|
|
|
2001-12-19 06:12:14 +00:00
|
|
|
static void
|
2001-11-22 04:50:44 +00:00
|
|
|
syncache_insert(sc, sch)
|
|
|
|
struct syncache *sc;
|
|
|
|
struct syncache_head *sch;
|
|
|
|
{
|
|
|
|
struct syncache *sc2;
|
2003-11-11 17:54:47 +00:00
|
|
|
int i;
|
|
|
|
|
|
|
|
INP_INFO_WLOCK_ASSERT(&tcbinfo);
|
2001-11-22 04:50:44 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Make sure that we don't overflow the per-bucket
|
|
|
|
* limit or the total cache size limit.
|
|
|
|
*/
|
|
|
|
if (sch->sch_length >= tcp_syncache.bucket_limit) {
|
|
|
|
/*
|
|
|
|
* The bucket is full, toss the oldest element.
|
|
|
|
*/
|
|
|
|
sc2 = TAILQ_FIRST(&sch->sch_bucket);
|
2001-12-19 06:12:14 +00:00
|
|
|
sc2->sc_tp->ts_recent = ticks;
|
2001-11-22 04:50:44 +00:00
|
|
|
syncache_drop(sc2, sch);
|
|
|
|
tcpstat.tcps_sc_bucketoverflow++;
|
|
|
|
} else if (tcp_syncache.cache_count >= tcp_syncache.cache_limit) {
|
|
|
|
/*
|
|
|
|
* The cache is full. Toss the oldest entry in the
|
|
|
|
* entire cache. This is the front entry in the
|
|
|
|
* first non-empty timer queue with the largest
|
|
|
|
* timeout value.
|
|
|
|
*/
|
|
|
|
for (i = SYNCACHE_MAXREXMTS; i >= 0; i--) {
|
|
|
|
sc2 = TAILQ_FIRST(&tcp_syncache.timerq[i]);
|
|
|
|
if (sc2 != NULL)
|
|
|
|
break;
|
|
|
|
}
|
2001-12-19 06:12:14 +00:00
|
|
|
sc2->sc_tp->ts_recent = ticks;
|
2001-11-22 04:50:44 +00:00
|
|
|
syncache_drop(sc2, NULL);
|
|
|
|
tcpstat.tcps_sc_cacheoverflow++;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Initialize the entry's timer. */
|
|
|
|
SYNCACHE_TIMEOUT(sc, 0);
|
|
|
|
|
|
|
|
/* Put it into the bucket. */
|
|
|
|
TAILQ_INSERT_TAIL(&sch->sch_bucket, sc, sc_hash);
|
|
|
|
sch->sch_length++;
|
|
|
|
tcp_syncache.cache_count++;
|
|
|
|
tcpstat.tcps_sc_added++;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
syncache_drop(sc, sch)
|
|
|
|
struct syncache *sc;
|
|
|
|
struct syncache_head *sch;
|
|
|
|
{
|
2003-11-11 17:54:47 +00:00
|
|
|
INP_INFO_WLOCK_ASSERT(&tcbinfo);
|
2001-11-22 04:50:44 +00:00
|
|
|
|
|
|
|
if (sch == NULL) {
|
|
|
|
#ifdef INET6
|
|
|
|
if (sc->sc_inc.inc_isipv6) {
|
|
|
|
sch = &tcp_syncache.hashbase[
|
|
|
|
SYNCACHE_HASH6(&sc->sc_inc, tcp_syncache.hashmask)];
|
|
|
|
} else
|
|
|
|
#endif
|
|
|
|
{
|
|
|
|
sch = &tcp_syncache.hashbase[
|
|
|
|
SYNCACHE_HASH(&sc->sc_inc, tcp_syncache.hashmask)];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
TAILQ_REMOVE(&sch->sch_bucket, sc, sc_hash);
|
|
|
|
sch->sch_length--;
|
|
|
|
tcp_syncache.cache_count--;
|
|
|
|
|
|
|
|
TAILQ_REMOVE(&tcp_syncache.timerq[sc->sc_rxtslot], sc, sc_timerq);
|
|
|
|
if (TAILQ_EMPTY(&tcp_syncache.timerq[sc->sc_rxtslot]))
|
|
|
|
callout_stop(&tcp_syncache.tt_timerq[sc->sc_rxtslot]);
|
|
|
|
|
|
|
|
syncache_free(sc);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Walk the timer queues, looking for SYN,ACKs that need to be retransmitted.
|
|
|
|
* If we have retransmitted an entry the maximum number of times, expire it.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
syncache_timer(xslot)
|
|
|
|
void *xslot;
|
|
|
|
{
|
2001-12-19 15:57:43 +00:00
|
|
|
intptr_t slot = (intptr_t)xslot;
|
2001-11-22 04:50:44 +00:00
|
|
|
struct syncache *sc, *nsc;
|
|
|
|
struct inpcb *inp;
|
|
|
|
|
2003-02-12 00:42:12 +00:00
|
|
|
INP_INFO_WLOCK(&tcbinfo);
|
2001-11-22 04:50:44 +00:00
|
|
|
if (callout_pending(&tcp_syncache.tt_timerq[slot]) ||
|
|
|
|
!callout_active(&tcp_syncache.tt_timerq[slot])) {
|
2003-11-11 17:54:47 +00:00
|
|
|
/* XXX can this happen? */
|
2003-11-11 18:16:54 +00:00
|
|
|
INP_INFO_WUNLOCK(&tcbinfo);
|
2001-11-22 04:50:44 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
callout_deactivate(&tcp_syncache.tt_timerq[slot]);
|
|
|
|
|
|
|
|
nsc = TAILQ_FIRST(&tcp_syncache.timerq[slot]);
|
|
|
|
while (nsc != NULL) {
|
|
|
|
if (ticks < nsc->sc_rxttime)
|
|
|
|
break;
|
|
|
|
sc = nsc;
|
|
|
|
inp = sc->sc_tp->t_inpcb;
|
|
|
|
if (slot == SYNCACHE_MAXREXMTS ||
|
|
|
|
slot >= tcp_syncache.rexmt_limit ||
|
2003-01-05 07:56:24 +00:00
|
|
|
inp == NULL || inp->inp_gencnt != sc->sc_inp_gencnt) {
|
2002-06-28 19:12:38 +00:00
|
|
|
nsc = TAILQ_NEXT(sc, sc_timerq);
|
2001-11-22 04:50:44 +00:00
|
|
|
syncache_drop(sc, NULL);
|
|
|
|
tcpstat.tcps_sc_stale++;
|
|
|
|
continue;
|
|
|
|
}
|
2002-06-28 19:12:38 +00:00
|
|
|
/*
|
|
|
|
* syncache_respond() may call back into the syncache to
|
|
|
|
* to modify another entry, so do not obtain the next
|
|
|
|
* entry on the timer chain until it has completed.
|
|
|
|
*/
|
2003-08-13 10:20:57 +00:00
|
|
|
#ifdef TCPDEBUG
|
|
|
|
(void) syncache_respond(sc, NULL, NULL);
|
|
|
|
#else
|
2001-11-22 04:50:44 +00:00
|
|
|
(void) syncache_respond(sc, NULL);
|
2003-08-13 10:20:57 +00:00
|
|
|
#endif
|
2002-06-28 19:12:38 +00:00
|
|
|
nsc = TAILQ_NEXT(sc, sc_timerq);
|
2001-11-22 04:50:44 +00:00
|
|
|
tcpstat.tcps_sc_retransmitted++;
|
|
|
|
TAILQ_REMOVE(&tcp_syncache.timerq[slot], sc, sc_timerq);
|
|
|
|
SYNCACHE_TIMEOUT(sc, slot + 1);
|
|
|
|
}
|
|
|
|
if (nsc != NULL)
|
|
|
|
callout_reset(&tcp_syncache.tt_timerq[slot],
|
|
|
|
nsc->sc_rxttime - ticks, syncache_timer, (void *)(slot));
|
2003-11-11 18:16:54 +00:00
|
|
|
INP_INFO_WUNLOCK(&tcbinfo);
|
2001-11-22 04:50:44 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Find an entry in the syncache.
|
|
|
|
*/
|
|
|
|
struct syncache *
|
|
|
|
syncache_lookup(inc, schp)
|
|
|
|
struct in_conninfo *inc;
|
|
|
|
struct syncache_head **schp;
|
|
|
|
{
|
|
|
|
struct syncache *sc;
|
|
|
|
struct syncache_head *sch;
|
2003-11-11 17:54:47 +00:00
|
|
|
|
|
|
|
INP_INFO_WLOCK_ASSERT(&tcbinfo);
|
2001-11-22 04:50:44 +00:00
|
|
|
|
|
|
|
#ifdef INET6
|
|
|
|
if (inc->inc_isipv6) {
|
|
|
|
sch = &tcp_syncache.hashbase[
|
|
|
|
SYNCACHE_HASH6(inc, tcp_syncache.hashmask)];
|
|
|
|
*schp = sch;
|
|
|
|
TAILQ_FOREACH(sc, &sch->sch_bucket, sc_hash) {
|
2003-11-11 17:54:47 +00:00
|
|
|
if (ENDPTS6_EQ(&inc->inc_ie, &sc->sc_inc.inc_ie))
|
2001-11-22 04:50:44 +00:00
|
|
|
return (sc);
|
|
|
|
}
|
|
|
|
} else
|
|
|
|
#endif
|
|
|
|
{
|
|
|
|
sch = &tcp_syncache.hashbase[
|
|
|
|
SYNCACHE_HASH(inc, tcp_syncache.hashmask)];
|
|
|
|
*schp = sch;
|
|
|
|
TAILQ_FOREACH(sc, &sch->sch_bucket, sc_hash) {
|
|
|
|
#ifdef INET6
|
|
|
|
if (sc->sc_inc.inc_isipv6)
|
|
|
|
continue;
|
|
|
|
#endif
|
2003-11-11 17:54:47 +00:00
|
|
|
if (ENDPTS_EQ(&inc->inc_ie, &sc->sc_inc.inc_ie))
|
2001-11-22 04:50:44 +00:00
|
|
|
return (sc);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return (NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This function is called when we get a RST for a
|
|
|
|
* non-existent connection, so that we can see if the
|
|
|
|
* connection is in the syn cache. If it is, zap it.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
syncache_chkrst(inc, th)
|
|
|
|
struct in_conninfo *inc;
|
|
|
|
struct tcphdr *th;
|
|
|
|
{
|
|
|
|
struct syncache *sc;
|
|
|
|
struct syncache_head *sch;
|
|
|
|
|
2003-11-11 17:54:47 +00:00
|
|
|
INP_INFO_WLOCK_ASSERT(&tcbinfo);
|
|
|
|
|
2001-11-22 04:50:44 +00:00
|
|
|
sc = syncache_lookup(inc, &sch);
|
|
|
|
if (sc == NULL)
|
|
|
|
return;
|
|
|
|
/*
|
|
|
|
* If the RST bit is set, check the sequence number to see
|
|
|
|
* if this is a valid reset segment.
|
|
|
|
* RFC 793 page 37:
|
|
|
|
* In all states except SYN-SENT, all reset (RST) segments
|
|
|
|
* are validated by checking their SEQ-fields. A reset is
|
|
|
|
* valid if its sequence number is in the window.
|
|
|
|
*
|
|
|
|
* The sequence number in the reset segment is normally an
|
|
|
|
* echo of our outgoing acknowlegement numbers, but some hosts
|
|
|
|
* send a reset with the sequence number at the rightmost edge
|
|
|
|
* of our receive window, and we have to handle this case.
|
|
|
|
*/
|
|
|
|
if (SEQ_GEQ(th->th_seq, sc->sc_irs) &&
|
|
|
|
SEQ_LEQ(th->th_seq, sc->sc_irs + sc->sc_wnd)) {
|
|
|
|
syncache_drop(sc, sch);
|
|
|
|
tcpstat.tcps_sc_reset++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
syncache_badack(inc)
|
|
|
|
struct in_conninfo *inc;
|
|
|
|
{
|
|
|
|
struct syncache *sc;
|
|
|
|
struct syncache_head *sch;
|
|
|
|
|
2003-11-11 17:54:47 +00:00
|
|
|
INP_INFO_WLOCK_ASSERT(&tcbinfo);
|
|
|
|
|
2001-11-22 04:50:44 +00:00
|
|
|
sc = syncache_lookup(inc, &sch);
|
|
|
|
if (sc != NULL) {
|
|
|
|
syncache_drop(sc, sch);
|
|
|
|
tcpstat.tcps_sc_badack++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
syncache_unreach(inc, th)
|
|
|
|
struct in_conninfo *inc;
|
|
|
|
struct tcphdr *th;
|
|
|
|
{
|
|
|
|
struct syncache *sc;
|
|
|
|
struct syncache_head *sch;
|
|
|
|
|
2003-11-11 17:54:47 +00:00
|
|
|
INP_INFO_WLOCK_ASSERT(&tcbinfo);
|
|
|
|
|
2001-11-22 04:50:44 +00:00
|
|
|
/* we are called at splnet() here */
|
|
|
|
sc = syncache_lookup(inc, &sch);
|
|
|
|
if (sc == NULL)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* If the sequence number != sc_iss, then it's a bogus ICMP msg */
|
|
|
|
if (ntohl(th->th_seq) != sc->sc_iss)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If we've rertransmitted 3 times and this is our second error,
|
|
|
|
* we remove the entry. Otherwise, we allow it to continue on.
|
|
|
|
* This prevents us from incorrectly nuking an entry during a
|
|
|
|
* spurious network outage.
|
|
|
|
*
|
|
|
|
* See tcp_notify().
|
|
|
|
*/
|
|
|
|
if ((sc->sc_flags & SCF_UNREACH) == 0 || sc->sc_rxtslot < 3) {
|
|
|
|
sc->sc_flags |= SCF_UNREACH;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
syncache_drop(sc, sch);
|
|
|
|
tcpstat.tcps_sc_unreach++;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Build a new TCP socket structure from a syncache entry.
|
|
|
|
*/
|
|
|
|
static struct socket *
|
2002-05-14 18:57:55 +00:00
|
|
|
syncache_socket(sc, lso, m)
|
2001-11-22 04:50:44 +00:00
|
|
|
struct syncache *sc;
|
|
|
|
struct socket *lso;
|
2002-05-14 18:57:55 +00:00
|
|
|
struct mbuf *m;
|
2001-11-22 04:50:44 +00:00
|
|
|
{
|
|
|
|
struct inpcb *inp = NULL;
|
|
|
|
struct socket *so;
|
|
|
|
struct tcpcb *tp;
|
|
|
|
|
2004-06-16 03:36:06 +00:00
|
|
|
NET_ASSERT_GIANT();
|
2003-11-11 17:54:47 +00:00
|
|
|
INP_INFO_WLOCK_ASSERT(&tcbinfo);
|
|
|
|
|
2001-11-22 04:50:44 +00:00
|
|
|
/*
|
|
|
|
* Ok, create the full blown connection, and set things up
|
|
|
|
* as they would have been set up if we had created the
|
|
|
|
* connection when the SYN arrived. If we can't create
|
|
|
|
* the connection, abort it.
|
|
|
|
*/
|
|
|
|
so = sonewconn(lso, SS_ISCONNECTED);
|
|
|
|
if (so == NULL) {
|
|
|
|
/*
|
|
|
|
* Drop the connection; we will send a RST if the peer
|
|
|
|
* retransmits the ACK,
|
|
|
|
*/
|
|
|
|
tcpstat.tcps_listendrop++;
|
2003-11-11 17:54:47 +00:00
|
|
|
goto abort2;
|
2001-11-22 04:50:44 +00:00
|
|
|
}
|
2002-07-31 19:06:49 +00:00
|
|
|
#ifdef MAC
|
2004-06-13 02:50:07 +00:00
|
|
|
SOCK_LOCK(so);
|
2002-07-31 19:06:49 +00:00
|
|
|
mac_set_socket_peer_from_mbuf(m, so);
|
2004-06-13 02:50:07 +00:00
|
|
|
SOCK_UNLOCK(so);
|
2002-07-31 19:06:49 +00:00
|
|
|
#endif
|
2001-11-22 04:50:44 +00:00
|
|
|
|
|
|
|
inp = sotoinpcb(so);
|
2003-11-11 17:54:47 +00:00
|
|
|
INP_LOCK(inp);
|
2001-11-22 04:50:44 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Insert new socket into hash list.
|
|
|
|
*/
|
2002-02-28 17:11:10 +00:00
|
|
|
inp->inp_inc.inc_isipv6 = sc->sc_inc.inc_isipv6;
|
2001-11-22 04:50:44 +00:00
|
|
|
#ifdef INET6
|
|
|
|
if (sc->sc_inc.inc_isipv6) {
|
|
|
|
inp->in6p_laddr = sc->sc_inc.inc6_laddr;
|
|
|
|
} else {
|
|
|
|
inp->inp_vflag &= ~INP_IPV6;
|
|
|
|
inp->inp_vflag |= INP_IPV4;
|
|
|
|
#endif
|
|
|
|
inp->inp_laddr = sc->sc_inc.inc_laddr;
|
|
|
|
#ifdef INET6
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
inp->inp_lport = sc->sc_inc.inc_lport;
|
|
|
|
if (in_pcbinshash(inp) != 0) {
|
|
|
|
/*
|
|
|
|
* Undo the assignments above if we failed to
|
|
|
|
* put the PCB on the hash lists.
|
|
|
|
*/
|
|
|
|
#ifdef INET6
|
|
|
|
if (sc->sc_inc.inc_isipv6)
|
|
|
|
inp->in6p_laddr = in6addr_any;
|
|
|
|
else
|
|
|
|
#endif
|
|
|
|
inp->inp_laddr.s_addr = INADDR_ANY;
|
|
|
|
inp->inp_lport = 0;
|
|
|
|
goto abort;
|
|
|
|
}
|
|
|
|
#ifdef IPSEC
|
- cleanup SP refcnt issue.
- share policy-on-socket for listening socket.
- don't copy policy-on-socket at all. secpolicy no longer contain
spidx, which saves a lot of memory.
- deep-copy pcb policy if it is an ipsec policy. assign ID field to
all SPD entries. make it possible for racoon to grab SPD entry on
pcb.
- fixed the order of searching SA table for packets.
- fixed to get a security association header. a mode is always needed
to compare them.
- fixed that the incorrect time was set to
sadb_comb_{hard|soft}_usetime.
- disallow port spec for tunnel mode policy (as we don't reassemble).
- an user can define a policy-id.
- clear enc/auth key before freeing.
- fixed that the kernel crashed when key_spdacquire() was called
because key_spdacquire() had been implemented imcopletely.
- preparation for 64bit sequence number.
- maintain ordered list of SA, based on SA id.
- cleanup secasvar management; refcnt is key.c responsibility;
alloc/free is keydb.c responsibility.
- cleanup, avoid double-loop.
- use hash for spi-based lookup.
- mark persistent SP "persistent".
XXX in theory refcnt should do the right thing, however, we have
"spdflush" which would touch all SPs. another solution would be to
de-register persistent SPs from sptree.
- u_short -> u_int16_t
- reduce kernel stack usage by auto variable secasindex.
- clarify function name confusion. ipsec_*_policy ->
ipsec_*_pcbpolicy.
- avoid variable name confusion.
(struct inpcbpolicy *)pcb_sp, spp (struct secpolicy **), sp (struct
secpolicy *)
- count number of ipsec encapsulations on ipsec4_output, so that we
can tell ip_output() how to handle the packet further.
- When the value of the ul_proto is ICMP or ICMPV6, the port field in
"src" of the spidx specifies ICMP type, and the port field in "dst"
of the spidx specifies ICMP code.
- avoid from applying IPsec transport mode to the packets when the
kernel forwards the packets.
Tested by: nork
Obtained from: KAME
2003-11-04 16:02:05 +00:00
|
|
|
/* copy old policy into new socket's */
|
|
|
|
if (ipsec_copy_pcbpolicy(sotoinpcb(lso)->inp_sp, inp->inp_sp))
|
|
|
|
printf("syncache_expand: could not copy policy\n");
|
|
|
|
#endif
|
|
|
|
#ifdef FAST_IPSEC
|
2001-11-22 04:50:44 +00:00
|
|
|
/* copy old policy into new socket's */
|
|
|
|
if (ipsec_copy_policy(sotoinpcb(lso)->inp_sp, inp->inp_sp))
|
|
|
|
printf("syncache_expand: could not copy policy\n");
|
|
|
|
#endif
|
|
|
|
#ifdef INET6
|
|
|
|
if (sc->sc_inc.inc_isipv6) {
|
|
|
|
struct inpcb *oinp = sotoinpcb(lso);
|
|
|
|
struct in6_addr laddr6;
|
2004-01-22 23:10:11 +00:00
|
|
|
struct sockaddr_in6 sin6;
|
2001-11-22 04:50:44 +00:00
|
|
|
/*
|
|
|
|
* Inherit socket options from the listening socket.
|
|
|
|
* Note that in6p_inputopts are not (and should not be)
|
|
|
|
* copied, since it stores previously received options and is
|
|
|
|
* used to detect if each new option is different than the
|
|
|
|
* previous one and hence should be passed to a user.
|
|
|
|
* If we copied in6p_inputopts, a user would not be able to
|
|
|
|
* receive options just after calling the accept system call.
|
|
|
|
*/
|
|
|
|
inp->inp_flags |= oinp->inp_flags & INP_CONTROLOPTS;
|
|
|
|
if (oinp->in6p_outputopts)
|
|
|
|
inp->in6p_outputopts =
|
|
|
|
ip6_copypktopts(oinp->in6p_outputopts, M_NOWAIT);
|
|
|
|
|
2004-01-22 23:10:11 +00:00
|
|
|
sin6.sin6_family = AF_INET6;
|
|
|
|
sin6.sin6_len = sizeof(sin6);
|
|
|
|
sin6.sin6_addr = sc->sc_inc.inc6_faddr;
|
|
|
|
sin6.sin6_port = sc->sc_inc.inc_fport;
|
|
|
|
sin6.sin6_flowinfo = sin6.sin6_scope_id = 0;
|
2001-11-22 04:50:44 +00:00
|
|
|
laddr6 = inp->in6p_laddr;
|
|
|
|
if (IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_laddr))
|
|
|
|
inp->in6p_laddr = sc->sc_inc.inc6_laddr;
|
2004-03-27 21:05:46 +00:00
|
|
|
if (in6_pcbconnect(inp, (struct sockaddr *)&sin6,
|
|
|
|
thread0.td_ucred)) {
|
2001-11-22 04:50:44 +00:00
|
|
|
inp->in6p_laddr = laddr6;
|
|
|
|
goto abort;
|
|
|
|
}
|
2004-07-17 19:44:13 +00:00
|
|
|
/* Override flowlabel from in6_pcbconnect. */
|
|
|
|
inp->in6p_flowinfo &= ~IPV6_FLOWLABEL_MASK;
|
|
|
|
inp->in6p_flowinfo |= sc->sc_flowlabel;
|
2001-11-22 04:50:44 +00:00
|
|
|
} else
|
|
|
|
#endif
|
|
|
|
{
|
|
|
|
struct in_addr laddr;
|
2004-01-22 23:10:11 +00:00
|
|
|
struct sockaddr_in sin;
|
2001-11-22 04:50:44 +00:00
|
|
|
|
|
|
|
inp->inp_options = ip_srcroute();
|
|
|
|
if (inp->inp_options == NULL) {
|
|
|
|
inp->inp_options = sc->sc_ipopts;
|
|
|
|
sc->sc_ipopts = NULL;
|
|
|
|
}
|
|
|
|
|
2004-01-22 23:10:11 +00:00
|
|
|
sin.sin_family = AF_INET;
|
|
|
|
sin.sin_len = sizeof(sin);
|
|
|
|
sin.sin_addr = sc->sc_inc.inc_faddr;
|
|
|
|
sin.sin_port = sc->sc_inc.inc_fport;
|
|
|
|
bzero((caddr_t)sin.sin_zero, sizeof(sin.sin_zero));
|
2001-11-22 04:50:44 +00:00
|
|
|
laddr = inp->inp_laddr;
|
|
|
|
if (inp->inp_laddr.s_addr == INADDR_ANY)
|
|
|
|
inp->inp_laddr = sc->sc_inc.inc_laddr;
|
2004-03-27 21:05:46 +00:00
|
|
|
if (in_pcbconnect(inp, (struct sockaddr *)&sin,
|
|
|
|
thread0.td_ucred)) {
|
2001-11-22 04:50:44 +00:00
|
|
|
inp->inp_laddr = laddr;
|
|
|
|
goto abort;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
tp = intotcpcb(inp);
|
|
|
|
tp->t_state = TCPS_SYN_RECEIVED;
|
|
|
|
tp->iss = sc->sc_iss;
|
|
|
|
tp->irs = sc->sc_irs;
|
|
|
|
tcp_rcvseqinit(tp);
|
|
|
|
tcp_sendseqinit(tp);
|
|
|
|
tp->snd_wl1 = sc->sc_irs;
|
|
|
|
tp->rcv_up = sc->sc_irs + 1;
|
|
|
|
tp->rcv_wnd = sc->sc_wnd;
|
|
|
|
tp->rcv_adv += tp->rcv_wnd;
|
|
|
|
|
2002-02-20 16:47:11 +00:00
|
|
|
tp->t_flags = sototcpcb(lso)->t_flags & (TF_NOPUSH|TF_NODELAY);
|
2001-11-22 04:50:44 +00:00
|
|
|
if (sc->sc_flags & SCF_NOOPT)
|
|
|
|
tp->t_flags |= TF_NOOPT;
|
|
|
|
if (sc->sc_flags & SCF_WINSCALE) {
|
|
|
|
tp->t_flags |= TF_REQ_SCALE|TF_RCVD_SCALE;
|
|
|
|
tp->requested_s_scale = sc->sc_requested_s_scale;
|
|
|
|
tp->request_r_scale = sc->sc_request_r_scale;
|
|
|
|
}
|
|
|
|
if (sc->sc_flags & SCF_TIMESTAMP) {
|
|
|
|
tp->t_flags |= TF_REQ_TSTMP|TF_RCVD_TSTMP;
|
|
|
|
tp->ts_recent = sc->sc_tsrecent;
|
|
|
|
tp->ts_recent_age = ticks;
|
|
|
|
}
|
|
|
|
if (sc->sc_flags & SCF_CC) {
|
|
|
|
/*
|
|
|
|
* Initialization of the tcpcb for transaction;
|
|
|
|
* set SND.WND = SEG.WND,
|
|
|
|
* initialize CCsend and CCrecv.
|
|
|
|
*/
|
|
|
|
tp->t_flags |= TF_REQ_CC|TF_RCVD_CC;
|
|
|
|
tp->cc_send = sc->sc_cc_send;
|
|
|
|
tp->cc_recv = sc->sc_cc_recv;
|
|
|
|
}
|
Initial import of RFC 2385 (TCP-MD5) digest support.
This is the first of two commits; bringing in the kernel support first.
This can be enabled by compiling a kernel with options TCP_SIGNATURE
and FAST_IPSEC.
For the uninitiated, this is a TCP option which provides for a means of
authenticating TCP sessions which came into being before IPSEC. It is
still relevant today, however, as it is used by many commercial router
vendors, particularly with BGP, and as such has become a requirement for
interconnect at many major Internet points of presence.
Several parts of the TCP and IP headers, including the segment payload,
are digested with MD5, including a shared secret. The PF_KEY interface
is used to manage the secrets using security associations in the SADB.
There is a limitation here in that as there is no way to map a TCP flow
per-port back to an SPI without polluting tcpcb or using the SPD; the
code to do the latter is unstable at this time. Therefore this code only
supports per-host keying granularity.
Whilst FAST_IPSEC is mutually exclusive with KAME IPSEC (and thus IPv6),
TCP_SIGNATURE applies only to IPv4. For the vast majority of prospective
users of this feature, this will not pose any problem.
This implementation is output-only; that is, the option is honoured when
responding to a host initiating a TCP session, but no effort is made
[yet] to authenticate inbound traffic. This is, however, sufficient to
interwork with Cisco equipment.
Tested with a Cisco 2501 running IOS 12.0(27), and Quagga 0.96.4 with
local patches. Patches for tcpdump to validate TCP-MD5 sessions are also
available from me upon request.
Sponsored by: sentex.net
2004-02-11 04:26:04 +00:00
|
|
|
#ifdef TCP_SIGNATURE
|
|
|
|
if (sc->sc_flags & SCF_SIGNATURE)
|
|
|
|
tp->t_flags |= TF_SIGNATURE;
|
2004-02-13 18:21:45 +00:00
|
|
|
#endif
|
2004-06-23 21:04:37 +00:00
|
|
|
if (sc->sc_flags & SCF_SACK) {
|
|
|
|
tp->sack_enable = 1;
|
|
|
|
tp->t_flags |= TF_SACK_PERMIT;
|
|
|
|
}
|
2003-11-20 20:07:39 +00:00
|
|
|
/*
|
|
|
|
* Set up MSS and get cached values from tcp_hostcache.
|
|
|
|
* This might overwrite some of the defaults we just set.
|
|
|
|
*/
|
2001-11-22 04:50:44 +00:00
|
|
|
tcp_mss(tp, sc->sc_peer_mss);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If the SYN,ACK was retransmitted, reset cwnd to 1 segment.
|
|
|
|
*/
|
|
|
|
if (sc->sc_rxtslot != 0)
|
|
|
|
tp->snd_cwnd = tp->t_maxseg;
|
|
|
|
callout_reset(tp->tt_keep, tcp_keepinit, tcp_timer_keep, tp);
|
|
|
|
|
2003-11-11 17:54:47 +00:00
|
|
|
INP_UNLOCK(inp);
|
|
|
|
|
2001-11-22 04:50:44 +00:00
|
|
|
tcpstat.tcps_accepts++;
|
|
|
|
return (so);
|
|
|
|
|
|
|
|
abort:
|
2003-11-11 17:54:47 +00:00
|
|
|
INP_UNLOCK(inp);
|
|
|
|
abort2:
|
2001-11-22 04:50:44 +00:00
|
|
|
if (so != NULL)
|
|
|
|
(void) soabort(so);
|
|
|
|
return (NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This function gets called when we receive an ACK for a
|
|
|
|
* socket in the LISTEN state. We look up the connection
|
|
|
|
* in the syncache, and if its there, we pull it out of
|
|
|
|
* the cache and turn it into a full-blown connection in
|
|
|
|
* the SYN-RECEIVED state.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
syncache_expand(inc, th, sop, m)
|
|
|
|
struct in_conninfo *inc;
|
|
|
|
struct tcphdr *th;
|
|
|
|
struct socket **sop;
|
|
|
|
struct mbuf *m;
|
|
|
|
{
|
|
|
|
struct syncache *sc;
|
|
|
|
struct syncache_head *sch;
|
|
|
|
struct socket *so;
|
|
|
|
|
2003-11-11 17:54:47 +00:00
|
|
|
INP_INFO_WLOCK_ASSERT(&tcbinfo);
|
|
|
|
|
2001-11-22 04:50:44 +00:00
|
|
|
sc = syncache_lookup(inc, &sch);
|
2001-12-19 06:12:14 +00:00
|
|
|
if (sc == NULL) {
|
|
|
|
/*
|
|
|
|
* There is no syncache entry, so see if this ACK is
|
|
|
|
* a returning syncookie. To do this, first:
|
|
|
|
* A. See if this socket has had a syncache entry dropped in
|
|
|
|
* the past. We don't want to accept a bogus syncookie
|
|
|
|
* if we've never received a SYN.
|
|
|
|
* B. check that the syncookie is valid. If it is, then
|
|
|
|
* cobble up a fake syncache entry, and return.
|
|
|
|
*/
|
|
|
|
if (!tcp_syncookies)
|
|
|
|
return (0);
|
|
|
|
sc = syncookie_lookup(inc, th, *sop);
|
|
|
|
if (sc == NULL)
|
|
|
|
return (0);
|
|
|
|
sch = NULL;
|
|
|
|
tcpstat.tcps_sc_recvcookie++;
|
|
|
|
}
|
2001-11-22 04:50:44 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If seg contains an ACK, but not for our SYN/ACK, send a RST.
|
|
|
|
*/
|
|
|
|
if (th->th_ack != sc->sc_iss + 1)
|
|
|
|
return (0);
|
|
|
|
|
2002-05-14 18:57:55 +00:00
|
|
|
so = syncache_socket(sc, *sop, m);
|
2001-11-22 04:50:44 +00:00
|
|
|
if (so == NULL) {
|
|
|
|
#if 0
|
|
|
|
resetandabort:
|
|
|
|
/* XXXjlemon check this - is this correct? */
|
|
|
|
(void) tcp_respond(NULL, m, m, th,
|
|
|
|
th->th_seq + tlen, (tcp_seq)0, TH_RST|TH_ACK);
|
|
|
|
#endif
|
|
|
|
m_freem(m); /* XXX only needed for above */
|
|
|
|
tcpstat.tcps_sc_aborted++;
|
2003-11-20 20:07:39 +00:00
|
|
|
} else
|
2001-11-22 04:50:44 +00:00
|
|
|
tcpstat.tcps_sc_completed++;
|
2003-11-20 20:07:39 +00:00
|
|
|
|
2001-11-22 04:50:44 +00:00
|
|
|
if (sch == NULL)
|
|
|
|
syncache_free(sc);
|
|
|
|
else
|
|
|
|
syncache_drop(sc, sch);
|
|
|
|
*sop = so;
|
|
|
|
return (1);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Given a LISTEN socket and an inbound SYN request, add
|
|
|
|
* this to the syn cache, and send back a segment:
|
|
|
|
* <SEQ=ISS><ACK=RCV_NXT><CTL=SYN,ACK>
|
|
|
|
* to the source.
|
|
|
|
*
|
|
|
|
* IMPORTANT NOTE: We do _NOT_ ACK data that might accompany the SYN.
|
|
|
|
* Doing so would require that we hold onto the data and deliver it
|
|
|
|
* to the application. However, if we are the target of a SYN-flood
|
|
|
|
* DoS attack, an attacker could send data which would eventually
|
|
|
|
* consume all available buffer space if it were ACKed. By not ACKing
|
|
|
|
* the data, we avoid this DoS scenario.
|
|
|
|
*/
|
|
|
|
int
|
|
|
|
syncache_add(inc, to, th, sop, m)
|
|
|
|
struct in_conninfo *inc;
|
|
|
|
struct tcpopt *to;
|
|
|
|
struct tcphdr *th;
|
|
|
|
struct socket **sop;
|
|
|
|
struct mbuf *m;
|
|
|
|
{
|
|
|
|
struct tcpcb *tp;
|
|
|
|
struct socket *so;
|
|
|
|
struct syncache *sc = NULL;
|
|
|
|
struct syncache_head *sch;
|
|
|
|
struct mbuf *ipopts = NULL;
|
2003-11-20 20:07:39 +00:00
|
|
|
struct rmxp_tao tao;
|
2004-07-17 19:44:13 +00:00
|
|
|
u_int32_t flowtmp;
|
2003-11-11 17:54:47 +00:00
|
|
|
int i, win;
|
|
|
|
|
|
|
|
INP_INFO_WLOCK_ASSERT(&tcbinfo);
|
2001-11-22 04:50:44 +00:00
|
|
|
|
|
|
|
so = *sop;
|
|
|
|
tp = sototcpcb(so);
|
2003-11-20 20:07:39 +00:00
|
|
|
bzero(&tao, sizeof(tao));
|
2001-11-22 04:50:44 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Remember the IP options, if any.
|
|
|
|
*/
|
|
|
|
#ifdef INET6
|
|
|
|
if (!inc->inc_isipv6)
|
|
|
|
#endif
|
|
|
|
ipopts = ip_srcroute();
|
|
|
|
|
|
|
|
/*
|
|
|
|
* See if we already have an entry for this connection.
|
|
|
|
* If we do, resend the SYN,ACK, and reset the retransmit timer.
|
|
|
|
*
|
|
|
|
* XXX
|
|
|
|
* should the syncache be re-initialized with the contents
|
|
|
|
* of the new SYN here (which may have different options?)
|
|
|
|
*/
|
|
|
|
sc = syncache_lookup(inc, &sch);
|
|
|
|
if (sc != NULL) {
|
|
|
|
tcpstat.tcps_sc_dupsyn++;
|
|
|
|
if (ipopts) {
|
|
|
|
/*
|
|
|
|
* If we were remembering a previous source route,
|
|
|
|
* forget it and use the new one we've been given.
|
|
|
|
*/
|
|
|
|
if (sc->sc_ipopts)
|
|
|
|
(void) m_free(sc->sc_ipopts);
|
|
|
|
sc->sc_ipopts = ipopts;
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* Update timestamp if present.
|
|
|
|
*/
|
|
|
|
if (sc->sc_flags & SCF_TIMESTAMP)
|
|
|
|
sc->sc_tsrecent = to->to_tsval;
|
2002-02-12 02:03:50 +00:00
|
|
|
/*
|
|
|
|
* PCB may have changed, pick up new values.
|
|
|
|
*/
|
|
|
|
sc->sc_tp = tp;
|
|
|
|
sc->sc_inp_gencnt = tp->t_inpcb->inp_gencnt;
|
2003-08-13 10:20:57 +00:00
|
|
|
#ifdef TCPDEBUG
|
|
|
|
if (syncache_respond(sc, m, so) == 0) {
|
|
|
|
#else
|
2001-11-22 04:50:44 +00:00
|
|
|
if (syncache_respond(sc, m) == 0) {
|
2003-08-13 10:20:57 +00:00
|
|
|
#endif
|
2003-11-11 17:54:47 +00:00
|
|
|
/* NB: guarded by INP_INFO_WLOCK(&tcbinfo) */
|
2001-11-22 04:50:44 +00:00
|
|
|
TAILQ_REMOVE(&tcp_syncache.timerq[sc->sc_rxtslot],
|
|
|
|
sc, sc_timerq);
|
|
|
|
SYNCACHE_TIMEOUT(sc, sc->sc_rxtslot);
|
|
|
|
tcpstat.tcps_sndacks++;
|
|
|
|
tcpstat.tcps_sndtotal++;
|
|
|
|
}
|
|
|
|
*sop = NULL;
|
|
|
|
return (1);
|
|
|
|
}
|
|
|
|
|
2002-03-20 05:48:55 +00:00
|
|
|
sc = uma_zalloc(tcp_syncache.zone, M_NOWAIT);
|
2001-11-22 04:50:44 +00:00
|
|
|
if (sc == NULL) {
|
|
|
|
/*
|
|
|
|
* The zone allocator couldn't provide more entries.
|
|
|
|
* Treat this as if the cache was full; drop the oldest
|
|
|
|
* entry and insert the new one.
|
|
|
|
*/
|
2003-11-11 17:54:47 +00:00
|
|
|
/* NB: guarded by INP_INFO_WLOCK(&tcbinfo) */
|
2001-11-22 04:50:44 +00:00
|
|
|
for (i = SYNCACHE_MAXREXMTS; i >= 0; i--) {
|
|
|
|
sc = TAILQ_FIRST(&tcp_syncache.timerq[i]);
|
|
|
|
if (sc != NULL)
|
|
|
|
break;
|
|
|
|
}
|
2001-12-19 06:12:14 +00:00
|
|
|
sc->sc_tp->ts_recent = ticks;
|
2001-11-22 04:50:44 +00:00
|
|
|
syncache_drop(sc, NULL);
|
|
|
|
tcpstat.tcps_sc_zonefail++;
|
2002-03-20 05:48:55 +00:00
|
|
|
sc = uma_zalloc(tcp_syncache.zone, M_NOWAIT);
|
2001-11-22 04:50:44 +00:00
|
|
|
if (sc == NULL) {
|
|
|
|
if (ipopts)
|
|
|
|
(void) m_free(ipopts);
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Fill in the syncache values.
|
|
|
|
*/
|
2001-11-27 11:55:28 +00:00
|
|
|
bzero(sc, sizeof(*sc));
|
2001-11-22 04:50:44 +00:00
|
|
|
sc->sc_tp = tp;
|
|
|
|
sc->sc_inp_gencnt = tp->t_inpcb->inp_gencnt;
|
|
|
|
sc->sc_ipopts = ipopts;
|
|
|
|
sc->sc_inc.inc_fport = inc->inc_fport;
|
|
|
|
sc->sc_inc.inc_lport = inc->inc_lport;
|
|
|
|
#ifdef INET6
|
|
|
|
sc->sc_inc.inc_isipv6 = inc->inc_isipv6;
|
|
|
|
if (inc->inc_isipv6) {
|
|
|
|
sc->sc_inc.inc6_faddr = inc->inc6_faddr;
|
|
|
|
sc->sc_inc.inc6_laddr = inc->inc6_laddr;
|
|
|
|
} else
|
|
|
|
#endif
|
|
|
|
{
|
|
|
|
sc->sc_inc.inc_faddr = inc->inc_faddr;
|
|
|
|
sc->sc_inc.inc_laddr = inc->inc_laddr;
|
|
|
|
}
|
|
|
|
sc->sc_irs = th->th_seq;
|
2003-01-29 03:49:49 +00:00
|
|
|
sc->sc_flags = 0;
|
|
|
|
sc->sc_peer_mss = to->to_flags & TOF_MSS ? to->to_mss : 0;
|
2004-07-17 19:44:13 +00:00
|
|
|
sc->sc_flowlabel = 0;
|
|
|
|
if (tcp_syncookies) {
|
|
|
|
sc->sc_iss = syncookie_generate(sc, &flowtmp);
|
2004-07-17 21:40:14 +00:00
|
|
|
#ifdef INET6
|
2004-07-17 19:44:13 +00:00
|
|
|
if (inc->inc_isipv6 &&
|
|
|
|
(sc->sc_tp->t_inpcb->in6p_flags & IN6P_AUTOFLOWLABEL)) {
|
|
|
|
sc->sc_flowlabel = flowtmp & IPV6_FLOWLABEL_MASK;
|
|
|
|
}
|
2004-07-17 21:40:14 +00:00
|
|
|
#endif
|
2004-07-17 19:44:13 +00:00
|
|
|
} else {
|
2001-12-21 04:41:08 +00:00
|
|
|
sc->sc_iss = arc4random();
|
2004-07-17 21:40:14 +00:00
|
|
|
#ifdef INET6
|
2004-07-17 19:44:13 +00:00
|
|
|
if (inc->inc_isipv6 &&
|
|
|
|
(sc->sc_tp->t_inpcb->in6p_flags & IN6P_AUTOFLOWLABEL)) {
|
|
|
|
sc->sc_flowlabel =
|
|
|
|
#ifdef RANDOM_IP_ID
|
|
|
|
(htonl(ip6_randomflowlabel()) & IPV6_FLOWLABEL_MASK);
|
|
|
|
#else
|
|
|
|
(htonl(ip6_flow_seq++) & IPV6_FLOWLABEL_MASK);
|
|
|
|
#endif
|
|
|
|
}
|
2004-07-17 21:40:14 +00:00
|
|
|
#endif
|
2004-07-17 19:44:13 +00:00
|
|
|
}
|
2001-11-22 04:50:44 +00:00
|
|
|
|
|
|
|
/* Initial receive window: clip sbspace to [0 .. TCP_MAXWIN] */
|
|
|
|
win = sbspace(&so->so_rcv);
|
|
|
|
win = imax(win, 0);
|
|
|
|
win = imin(win, TCP_MAXWIN);
|
|
|
|
sc->sc_wnd = win;
|
|
|
|
|
|
|
|
if (tcp_do_rfc1323) {
|
|
|
|
/*
|
|
|
|
* A timestamp received in a SYN makes
|
|
|
|
* it ok to send timestamp requests and replies.
|
|
|
|
*/
|
|
|
|
if (to->to_flags & TOF_TS) {
|
|
|
|
sc->sc_tsrecent = to->to_tsval;
|
|
|
|
sc->sc_flags |= SCF_TIMESTAMP;
|
|
|
|
}
|
|
|
|
if (to->to_flags & TOF_SCALE) {
|
|
|
|
int wscale = 0;
|
|
|
|
|
|
|
|
/* Compute proper scaling value from buffer space */
|
|
|
|
while (wscale < TCP_MAX_WINSHIFT &&
|
|
|
|
(TCP_MAXWIN << wscale) < so->so_rcv.sb_hiwat)
|
|
|
|
wscale++;
|
|
|
|
sc->sc_request_r_scale = wscale;
|
|
|
|
sc->sc_requested_s_scale = to->to_requested_s_scale;
|
|
|
|
sc->sc_flags |= SCF_WINSCALE;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (tcp_do_rfc1644) {
|
|
|
|
/*
|
|
|
|
* A CC or CC.new option received in a SYN makes
|
|
|
|
* it ok to send CC in subsequent segments.
|
|
|
|
*/
|
|
|
|
if (to->to_flags & (TOF_CC|TOF_CCNEW)) {
|
|
|
|
sc->sc_cc_recv = to->to_cc;
|
|
|
|
sc->sc_cc_send = CC_INC(tcp_ccgen);
|
|
|
|
sc->sc_flags |= SCF_CC;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (tp->t_flags & TF_NOOPT)
|
|
|
|
sc->sc_flags = SCF_NOOPT;
|
Initial import of RFC 2385 (TCP-MD5) digest support.
This is the first of two commits; bringing in the kernel support first.
This can be enabled by compiling a kernel with options TCP_SIGNATURE
and FAST_IPSEC.
For the uninitiated, this is a TCP option which provides for a means of
authenticating TCP sessions which came into being before IPSEC. It is
still relevant today, however, as it is used by many commercial router
vendors, particularly with BGP, and as such has become a requirement for
interconnect at many major Internet points of presence.
Several parts of the TCP and IP headers, including the segment payload,
are digested with MD5, including a shared secret. The PF_KEY interface
is used to manage the secrets using security associations in the SADB.
There is a limitation here in that as there is no way to map a TCP flow
per-port back to an SPI without polluting tcpcb or using the SPD; the
code to do the latter is unstable at this time. Therefore this code only
supports per-host keying granularity.
Whilst FAST_IPSEC is mutually exclusive with KAME IPSEC (and thus IPv6),
TCP_SIGNATURE applies only to IPv4. For the vast majority of prospective
users of this feature, this will not pose any problem.
This implementation is output-only; that is, the option is honoured when
responding to a host initiating a TCP session, but no effort is made
[yet] to authenticate inbound traffic. This is, however, sufficient to
interwork with Cisco equipment.
Tested with a Cisco 2501 running IOS 12.0(27), and Quagga 0.96.4 with
local patches. Patches for tcpdump to validate TCP-MD5 sessions are also
available from me upon request.
Sponsored by: sentex.net
2004-02-11 04:26:04 +00:00
|
|
|
#ifdef TCP_SIGNATURE
|
|
|
|
/*
|
|
|
|
* If listening socket requested TCP digests, and received SYN
|
|
|
|
* contains the option, flag this in the syncache so that
|
|
|
|
* syncache_respond() will do the right thing with the SYN+ACK.
|
|
|
|
* XXX Currently we always record the option by default and will
|
|
|
|
* attempt to use it in syncache_respond().
|
|
|
|
*/
|
|
|
|
if (to->to_flags & TOF_SIGNATURE)
|
|
|
|
sc->sc_flags = SCF_SIGNATURE;
|
2004-02-13 18:21:45 +00:00
|
|
|
#endif
|
2001-11-22 04:50:44 +00:00
|
|
|
|
2004-06-23 21:04:37 +00:00
|
|
|
if (to->to_flags & TOF_SACK)
|
|
|
|
sc->sc_flags |= SCF_SACK;
|
|
|
|
|
2001-11-22 04:50:44 +00:00
|
|
|
/*
|
|
|
|
* XXX
|
|
|
|
* We have the option here of not doing TAO (even if the segment
|
|
|
|
* qualifies) and instead fall back to a normal 3WHS via the syncache.
|
|
|
|
* This allows us to apply synflood protection to TAO-qualifying SYNs
|
|
|
|
* also. However, there should be a hueristic to determine when to
|
|
|
|
* do this, and is not present at the moment.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Perform TAO test on incoming CC (SEG.CC) option, if any.
|
|
|
|
* - compare SEG.CC against cached CC from the same host, if any.
|
|
|
|
* - if SEG.CC > chached value, SYN must be new and is accepted
|
|
|
|
* immediately: save new CC in the cache, mark the socket
|
|
|
|
* connected, enter ESTABLISHED state, turn on flag to
|
|
|
|
* send a SYN in the next segment.
|
|
|
|
* A virtual advertised window is set in rcv_adv to
|
|
|
|
* initialize SWS prevention. Then enter normal segment
|
|
|
|
* processing: drop SYN, process data and FIN.
|
|
|
|
* - otherwise do a normal 3-way handshake.
|
|
|
|
*/
|
2003-11-20 20:07:39 +00:00
|
|
|
if (tcp_do_rfc1644)
|
|
|
|
tcp_hc_gettao(&sc->sc_inc, &tao);
|
|
|
|
|
2001-11-22 04:50:44 +00:00
|
|
|
if ((to->to_flags & TOF_CC) != 0) {
|
|
|
|
if (((tp->t_flags & TF_NOPUSH) != 0) &&
|
2003-11-20 20:07:39 +00:00
|
|
|
sc->sc_flags & SCF_CC && tao.tao_cc != 0 &&
|
|
|
|
CC_GT(to->to_cc, tao.tao_cc)) {
|
2001-11-22 04:50:44 +00:00
|
|
|
sc->sc_rxtslot = 0;
|
2002-05-14 18:57:55 +00:00
|
|
|
so = syncache_socket(sc, *sop, m);
|
2001-11-22 04:50:44 +00:00
|
|
|
if (so != NULL) {
|
2003-11-20 20:07:39 +00:00
|
|
|
tao.tao_cc = to->to_cc;
|
|
|
|
tcp_hc_updatetao(&sc->sc_inc, TCP_HC_TAO_CC,
|
|
|
|
tao.tao_cc, 0);
|
2001-11-22 04:50:44 +00:00
|
|
|
*sop = so;
|
|
|
|
}
|
|
|
|
syncache_free(sc);
|
|
|
|
return (so != NULL);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* No CC option, but maybe CC.NEW: invalidate cached value.
|
|
|
|
*/
|
2003-11-20 20:07:39 +00:00
|
|
|
if (tcp_do_rfc1644) {
|
|
|
|
tao.tao_cc = 0;
|
|
|
|
tcp_hc_updatetao(&sc->sc_inc, TCP_HC_TAO_CC,
|
|
|
|
tao.tao_cc, 0);
|
|
|
|
}
|
2001-11-22 04:50:44 +00:00
|
|
|
}
|
2003-11-20 20:07:39 +00:00
|
|
|
|
2001-11-22 04:50:44 +00:00
|
|
|
/*
|
|
|
|
* TAO test failed or there was no CC option,
|
|
|
|
* do a standard 3-way handshake.
|
|
|
|
*/
|
2003-08-13 10:20:57 +00:00
|
|
|
#ifdef TCPDEBUG
|
|
|
|
if (syncache_respond(sc, m, so) == 0) {
|
|
|
|
#else
|
2001-12-19 06:12:14 +00:00
|
|
|
if (syncache_respond(sc, m) == 0) {
|
2003-08-13 10:20:57 +00:00
|
|
|
#endif
|
2001-12-19 06:12:14 +00:00
|
|
|
syncache_insert(sc, sch);
|
|
|
|
tcpstat.tcps_sndacks++;
|
|
|
|
tcpstat.tcps_sndtotal++;
|
2001-11-22 04:50:44 +00:00
|
|
|
} else {
|
|
|
|
syncache_free(sc);
|
2001-12-19 06:12:14 +00:00
|
|
|
tcpstat.tcps_sc_dropped++;
|
2001-11-22 04:50:44 +00:00
|
|
|
}
|
|
|
|
*sop = NULL;
|
|
|
|
return (1);
|
|
|
|
}
|
|
|
|
|
2003-08-13 10:20:57 +00:00
|
|
|
#ifdef TCPDEBUG
|
|
|
|
static int
|
|
|
|
syncache_respond(sc, m, so)
|
|
|
|
struct syncache *sc;
|
|
|
|
struct mbuf *m;
|
|
|
|
struct socket *so;
|
|
|
|
#else
|
2001-11-22 04:50:44 +00:00
|
|
|
static int
|
|
|
|
syncache_respond(sc, m)
|
|
|
|
struct syncache *sc;
|
|
|
|
struct mbuf *m;
|
2003-08-13 10:20:57 +00:00
|
|
|
#endif
|
2001-11-22 04:50:44 +00:00
|
|
|
{
|
|
|
|
u_int8_t *optp;
|
|
|
|
int optlen, error;
|
|
|
|
u_int16_t tlen, hlen, mssopt;
|
|
|
|
struct ip *ip = NULL;
|
|
|
|
struct tcphdr *th;
|
2003-11-11 17:54:47 +00:00
|
|
|
struct inpcb *inp;
|
2001-11-22 04:50:44 +00:00
|
|
|
#ifdef INET6
|
|
|
|
struct ip6_hdr *ip6 = NULL;
|
|
|
|
#endif
|
|
|
|
|
2003-11-20 20:07:39 +00:00
|
|
|
hlen =
|
2001-11-22 04:50:44 +00:00
|
|
|
#ifdef INET6
|
2003-11-20 20:07:39 +00:00
|
|
|
(sc->sc_inc.inc_isipv6) ? sizeof(struct ip6_hdr) :
|
2001-11-22 04:50:44 +00:00
|
|
|
#endif
|
2003-11-20 20:07:39 +00:00
|
|
|
sizeof(struct ip);
|
|
|
|
|
|
|
|
KASSERT((&sc->sc_inc) != NULL, ("syncache_respond with NULL in_conninfo pointer"));
|
|
|
|
|
|
|
|
/* Determine MSS we advertize to other end of connection */
|
|
|
|
mssopt = tcp_mssopt(&sc->sc_inc);
|
2001-11-22 04:50:44 +00:00
|
|
|
|
|
|
|
/* Compute the size of the TCP options. */
|
|
|
|
if (sc->sc_flags & SCF_NOOPT) {
|
|
|
|
optlen = 0;
|
|
|
|
} else {
|
|
|
|
optlen = TCPOLEN_MAXSEG +
|
|
|
|
((sc->sc_flags & SCF_WINSCALE) ? 4 : 0) +
|
|
|
|
((sc->sc_flags & SCF_TIMESTAMP) ? TCPOLEN_TSTAMP_APPA : 0) +
|
|
|
|
((sc->sc_flags & SCF_CC) ? TCPOLEN_CC_APPA * 2 : 0);
|
Initial import of RFC 2385 (TCP-MD5) digest support.
This is the first of two commits; bringing in the kernel support first.
This can be enabled by compiling a kernel with options TCP_SIGNATURE
and FAST_IPSEC.
For the uninitiated, this is a TCP option which provides for a means of
authenticating TCP sessions which came into being before IPSEC. It is
still relevant today, however, as it is used by many commercial router
vendors, particularly with BGP, and as such has become a requirement for
interconnect at many major Internet points of presence.
Several parts of the TCP and IP headers, including the segment payload,
are digested with MD5, including a shared secret. The PF_KEY interface
is used to manage the secrets using security associations in the SADB.
There is a limitation here in that as there is no way to map a TCP flow
per-port back to an SPI without polluting tcpcb or using the SPD; the
code to do the latter is unstable at this time. Therefore this code only
supports per-host keying granularity.
Whilst FAST_IPSEC is mutually exclusive with KAME IPSEC (and thus IPv6),
TCP_SIGNATURE applies only to IPv4. For the vast majority of prospective
users of this feature, this will not pose any problem.
This implementation is output-only; that is, the option is honoured when
responding to a host initiating a TCP session, but no effort is made
[yet] to authenticate inbound traffic. This is, however, sufficient to
interwork with Cisco equipment.
Tested with a Cisco 2501 running IOS 12.0(27), and Quagga 0.96.4 with
local patches. Patches for tcpdump to validate TCP-MD5 sessions are also
available from me upon request.
Sponsored by: sentex.net
2004-02-11 04:26:04 +00:00
|
|
|
#ifdef TCP_SIGNATURE
|
2004-02-13 18:21:45 +00:00
|
|
|
optlen += (sc->sc_flags & SCF_SIGNATURE) ?
|
2004-02-14 21:49:48 +00:00
|
|
|
TCPOLEN_SIGNATURE + 2 : 0;
|
2004-02-13 18:21:45 +00:00
|
|
|
#endif
|
2004-06-23 21:04:37 +00:00
|
|
|
optlen += ((sc->sc_flags & SCF_SACK) ? 4 : 0);
|
2001-11-22 04:50:44 +00:00
|
|
|
}
|
|
|
|
tlen = hlen + sizeof(struct tcphdr) + optlen;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* XXX
|
|
|
|
* assume that the entire packet will fit in a header mbuf
|
|
|
|
*/
|
|
|
|
KASSERT(max_linkhdr + tlen <= MHLEN, ("syncache: mbuf too small"));
|
|
|
|
|
|
|
|
/*
|
|
|
|
* XXX shouldn't this reuse the mbuf if possible ?
|
|
|
|
* Create the IP+TCP header from scratch.
|
|
|
|
*/
|
|
|
|
if (m)
|
|
|
|
m_freem(m);
|
|
|
|
|
2003-02-19 05:47:46 +00:00
|
|
|
m = m_gethdr(M_DONTWAIT, MT_HEADER);
|
2001-11-22 04:50:44 +00:00
|
|
|
if (m == NULL)
|
|
|
|
return (ENOBUFS);
|
|
|
|
m->m_data += max_linkhdr;
|
|
|
|
m->m_len = tlen;
|
|
|
|
m->m_pkthdr.len = tlen;
|
|
|
|
m->m_pkthdr.rcvif = NULL;
|
2003-11-11 17:54:47 +00:00
|
|
|
inp = sc->sc_tp->t_inpcb;
|
|
|
|
INP_LOCK(inp);
|
2002-07-31 19:06:49 +00:00
|
|
|
#ifdef MAC
|
2004-05-04 02:11:47 +00:00
|
|
|
mac_create_mbuf_from_inpcb(inp, m);
|
2002-07-31 19:06:49 +00:00
|
|
|
#endif
|
2001-11-22 04:50:44 +00:00
|
|
|
|
|
|
|
#ifdef INET6
|
|
|
|
if (sc->sc_inc.inc_isipv6) {
|
|
|
|
ip6 = mtod(m, struct ip6_hdr *);
|
|
|
|
ip6->ip6_vfc = IPV6_VERSION;
|
|
|
|
ip6->ip6_nxt = IPPROTO_TCP;
|
|
|
|
ip6->ip6_src = sc->sc_inc.inc6_laddr;
|
|
|
|
ip6->ip6_dst = sc->sc_inc.inc6_faddr;
|
|
|
|
ip6->ip6_plen = htons(tlen - hlen);
|
|
|
|
/* ip6_hlim is set after checksum */
|
2004-07-17 19:44:13 +00:00
|
|
|
ip6->ip6_flow &= ~IPV6_FLOWLABEL_MASK;
|
|
|
|
ip6->ip6_flow |= sc->sc_flowlabel;
|
2001-11-22 04:50:44 +00:00
|
|
|
|
|
|
|
th = (struct tcphdr *)(ip6 + 1);
|
|
|
|
} else
|
|
|
|
#endif
|
|
|
|
{
|
|
|
|
ip = mtod(m, struct ip *);
|
|
|
|
ip->ip_v = IPVERSION;
|
|
|
|
ip->ip_hl = sizeof(struct ip) >> 2;
|
|
|
|
ip->ip_len = tlen;
|
|
|
|
ip->ip_id = 0;
|
|
|
|
ip->ip_off = 0;
|
|
|
|
ip->ip_sum = 0;
|
|
|
|
ip->ip_p = IPPROTO_TCP;
|
|
|
|
ip->ip_src = sc->sc_inc.inc_laddr;
|
|
|
|
ip->ip_dst = sc->sc_inc.inc_faddr;
|
2003-11-11 17:54:47 +00:00
|
|
|
ip->ip_ttl = inp->inp_ip_ttl; /* XXX */
|
|
|
|
ip->ip_tos = inp->inp_ip_tos; /* XXX */
|
2002-06-14 03:08:05 +00:00
|
|
|
|
|
|
|
/*
|
2002-12-20 11:24:02 +00:00
|
|
|
* See if we should do MTU discovery. Route lookups are
|
|
|
|
* expensive, so we will only unset the DF bit if:
|
2002-08-05 22:34:15 +00:00
|
|
|
*
|
|
|
|
* 1) path_mtu_discovery is disabled
|
|
|
|
* 2) the SCF_UNREACH flag has been set
|
2002-06-14 03:08:05 +00:00
|
|
|
*/
|
2002-12-20 11:24:02 +00:00
|
|
|
if (path_mtu_discovery && ((sc->sc_flags & SCF_UNREACH) == 0))
|
2002-06-14 03:08:05 +00:00
|
|
|
ip->ip_off |= IP_DF;
|
2001-11-22 04:50:44 +00:00
|
|
|
|
|
|
|
th = (struct tcphdr *)(ip + 1);
|
|
|
|
}
|
|
|
|
th->th_sport = sc->sc_inc.inc_lport;
|
|
|
|
th->th_dport = sc->sc_inc.inc_fport;
|
|
|
|
|
|
|
|
th->th_seq = htonl(sc->sc_iss);
|
|
|
|
th->th_ack = htonl(sc->sc_irs + 1);
|
|
|
|
th->th_off = (sizeof(struct tcphdr) + optlen) >> 2;
|
|
|
|
th->th_x2 = 0;
|
|
|
|
th->th_flags = TH_SYN|TH_ACK;
|
|
|
|
th->th_win = htons(sc->sc_wnd);
|
|
|
|
th->th_urp = 0;
|
|
|
|
|
|
|
|
/* Tack on the TCP options. */
|
2002-12-20 11:24:02 +00:00
|
|
|
if (optlen != 0) {
|
|
|
|
optp = (u_int8_t *)(th + 1);
|
|
|
|
*optp++ = TCPOPT_MAXSEG;
|
|
|
|
*optp++ = TCPOLEN_MAXSEG;
|
|
|
|
*optp++ = (mssopt >> 8) & 0xff;
|
|
|
|
*optp++ = mssopt & 0xff;
|
|
|
|
|
|
|
|
if (sc->sc_flags & SCF_WINSCALE) {
|
|
|
|
*((u_int32_t *)optp) = htonl(TCPOPT_NOP << 24 |
|
|
|
|
TCPOPT_WINDOW << 16 | TCPOLEN_WINDOW << 8 |
|
|
|
|
sc->sc_request_r_scale);
|
|
|
|
optp += 4;
|
|
|
|
}
|
2001-11-22 04:50:44 +00:00
|
|
|
|
2002-12-20 11:24:02 +00:00
|
|
|
if (sc->sc_flags & SCF_TIMESTAMP) {
|
|
|
|
u_int32_t *lp = (u_int32_t *)(optp);
|
2001-11-22 04:50:44 +00:00
|
|
|
|
2002-12-20 11:24:02 +00:00
|
|
|
/* Form timestamp option per appendix A of RFC 1323. */
|
|
|
|
*lp++ = htonl(TCPOPT_TSTAMP_HDR);
|
|
|
|
*lp++ = htonl(ticks);
|
|
|
|
*lp = htonl(sc->sc_tsrecent);
|
|
|
|
optp += TCPOLEN_TSTAMP_APPA;
|
|
|
|
}
|
2001-11-22 04:50:44 +00:00
|
|
|
|
2002-12-20 11:24:02 +00:00
|
|
|
/*
|
|
|
|
* Send CC and CC.echo if we received CC from our peer.
|
|
|
|
*/
|
|
|
|
if (sc->sc_flags & SCF_CC) {
|
|
|
|
u_int32_t *lp = (u_int32_t *)(optp);
|
|
|
|
|
|
|
|
*lp++ = htonl(TCPOPT_CC_HDR(TCPOPT_CC));
|
|
|
|
*lp++ = htonl(sc->sc_cc_send);
|
|
|
|
*lp++ = htonl(TCPOPT_CC_HDR(TCPOPT_CCECHO));
|
|
|
|
*lp = htonl(sc->sc_cc_recv);
|
|
|
|
optp += TCPOLEN_CC_APPA * 2;
|
|
|
|
}
|
Initial import of RFC 2385 (TCP-MD5) digest support.
This is the first of two commits; bringing in the kernel support first.
This can be enabled by compiling a kernel with options TCP_SIGNATURE
and FAST_IPSEC.
For the uninitiated, this is a TCP option which provides for a means of
authenticating TCP sessions which came into being before IPSEC. It is
still relevant today, however, as it is used by many commercial router
vendors, particularly with BGP, and as such has become a requirement for
interconnect at many major Internet points of presence.
Several parts of the TCP and IP headers, including the segment payload,
are digested with MD5, including a shared secret. The PF_KEY interface
is used to manage the secrets using security associations in the SADB.
There is a limitation here in that as there is no way to map a TCP flow
per-port back to an SPI without polluting tcpcb or using the SPD; the
code to do the latter is unstable at this time. Therefore this code only
supports per-host keying granularity.
Whilst FAST_IPSEC is mutually exclusive with KAME IPSEC (and thus IPv6),
TCP_SIGNATURE applies only to IPv4. For the vast majority of prospective
users of this feature, this will not pose any problem.
This implementation is output-only; that is, the option is honoured when
responding to a host initiating a TCP session, but no effort is made
[yet] to authenticate inbound traffic. This is, however, sufficient to
interwork with Cisco equipment.
Tested with a Cisco 2501 running IOS 12.0(27), and Quagga 0.96.4 with
local patches. Patches for tcpdump to validate TCP-MD5 sessions are also
available from me upon request.
Sponsored by: sentex.net
2004-02-11 04:26:04 +00:00
|
|
|
|
|
|
|
#ifdef TCP_SIGNATURE
|
|
|
|
/*
|
|
|
|
* Handle TCP-MD5 passive opener response.
|
|
|
|
*/
|
|
|
|
if (sc->sc_flags & SCF_SIGNATURE) {
|
|
|
|
u_int8_t *bp = optp;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
*bp++ = TCPOPT_SIGNATURE;
|
|
|
|
*bp++ = TCPOLEN_SIGNATURE;
|
|
|
|
for (i = 0; i < TCP_SIGLEN; i++)
|
|
|
|
*bp++ = 0;
|
2004-02-13 18:21:45 +00:00
|
|
|
tcp_signature_compute(m, sizeof(struct ip), 0, optlen,
|
Initial import of RFC 2385 (TCP-MD5) digest support.
This is the first of two commits; bringing in the kernel support first.
This can be enabled by compiling a kernel with options TCP_SIGNATURE
and FAST_IPSEC.
For the uninitiated, this is a TCP option which provides for a means of
authenticating TCP sessions which came into being before IPSEC. It is
still relevant today, however, as it is used by many commercial router
vendors, particularly with BGP, and as such has become a requirement for
interconnect at many major Internet points of presence.
Several parts of the TCP and IP headers, including the segment payload,
are digested with MD5, including a shared secret. The PF_KEY interface
is used to manage the secrets using security associations in the SADB.
There is a limitation here in that as there is no way to map a TCP flow
per-port back to an SPI without polluting tcpcb or using the SPD; the
code to do the latter is unstable at this time. Therefore this code only
supports per-host keying granularity.
Whilst FAST_IPSEC is mutually exclusive with KAME IPSEC (and thus IPv6),
TCP_SIGNATURE applies only to IPv4. For the vast majority of prospective
users of this feature, this will not pose any problem.
This implementation is output-only; that is, the option is honoured when
responding to a host initiating a TCP session, but no effort is made
[yet] to authenticate inbound traffic. This is, however, sufficient to
interwork with Cisco equipment.
Tested with a Cisco 2501 running IOS 12.0(27), and Quagga 0.96.4 with
local patches. Patches for tcpdump to validate TCP-MD5 sessions are also
available from me upon request.
Sponsored by: sentex.net
2004-02-11 04:26:04 +00:00
|
|
|
optp + 2, IPSEC_DIR_OUTBOUND);
|
|
|
|
*bp++ = TCPOPT_NOP;
|
|
|
|
*bp++ = TCPOPT_EOL;
|
|
|
|
optp += TCPOLEN_SIGNATURE + 2;
|
|
|
|
}
|
|
|
|
#endif /* TCP_SIGNATURE */
|
2004-06-23 21:04:37 +00:00
|
|
|
|
|
|
|
if (sc->sc_flags & SCF_SACK) {
|
|
|
|
*(u_int32_t *)optp = htonl(TCPOPT_SACK_PERMIT_HDR);
|
|
|
|
optp += 4;
|
|
|
|
}
|
2001-11-22 04:50:44 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef INET6
|
|
|
|
if (sc->sc_inc.inc_isipv6) {
|
|
|
|
th->th_sum = 0;
|
|
|
|
th->th_sum = in6_cksum(m, IPPROTO_TCP, hlen, tlen - hlen);
|
2003-11-20 20:07:39 +00:00
|
|
|
ip6->ip6_hlim = in6_selecthlim(NULL, NULL);
|
|
|
|
error = ip6_output(m, NULL, NULL, 0, NULL, NULL, inp);
|
2001-11-22 04:50:44 +00:00
|
|
|
} else
|
|
|
|
#endif
|
|
|
|
{
|
|
|
|
th->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr,
|
|
|
|
htons(tlen - hlen + IPPROTO_TCP));
|
|
|
|
m->m_pkthdr.csum_flags = CSUM_TCP;
|
|
|
|
m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
|
2003-08-13 10:20:57 +00:00
|
|
|
#ifdef TCPDEBUG
|
|
|
|
/*
|
|
|
|
* Trace.
|
|
|
|
*/
|
|
|
|
if (so != NULL && so->so_options & SO_DEBUG) {
|
|
|
|
struct tcpcb *tp = sototcpcb(so);
|
|
|
|
tcp_trace(TA_OUTPUT, tp->t_state, tp,
|
|
|
|
mtod(m, void *), th, 0);
|
|
|
|
}
|
|
|
|
#endif
|
2003-11-20 20:07:39 +00:00
|
|
|
error = ip_output(m, sc->sc_ipopts, NULL, 0, NULL, inp);
|
2001-11-22 04:50:44 +00:00
|
|
|
}
|
2003-11-11 17:54:47 +00:00
|
|
|
INP_UNLOCK(inp);
|
2001-11-22 04:50:44 +00:00
|
|
|
return (error);
|
|
|
|
}
|
2001-12-19 06:12:14 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* cookie layers:
|
|
|
|
*
|
|
|
|
* |. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .|
|
|
|
|
* | peer iss |
|
2003-02-24 03:15:48 +00:00
|
|
|
* | MD5(laddr,faddr,secret,lport,fport) |. . . . . . .|
|
2001-12-19 06:12:14 +00:00
|
|
|
* | 0 |(A)| |
|
|
|
|
* (A): peer mss index
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The values below are chosen to minimize the size of the tcp_secret
|
2003-02-23 19:04:23 +00:00
|
|
|
* table, as well as providing roughly a 16 second lifetime for the cookie.
|
2001-12-19 06:12:14 +00:00
|
|
|
*/
|
|
|
|
|
2003-02-23 19:04:23 +00:00
|
|
|
#define SYNCOOKIE_WNDBITS 5 /* exposed bits for window indexing */
|
|
|
|
#define SYNCOOKIE_TIMESHIFT 1 /* scale ticks to window time units */
|
2001-12-19 06:12:14 +00:00
|
|
|
|
|
|
|
#define SYNCOOKIE_WNDMASK ((1 << SYNCOOKIE_WNDBITS) - 1)
|
2003-02-23 19:04:23 +00:00
|
|
|
#define SYNCOOKIE_NSECRETS (1 << SYNCOOKIE_WNDBITS)
|
2001-12-19 06:12:14 +00:00
|
|
|
#define SYNCOOKIE_TIMEOUT \
|
|
|
|
(hz * (1 << SYNCOOKIE_WNDBITS) / (1 << SYNCOOKIE_TIMESHIFT))
|
|
|
|
#define SYNCOOKIE_DATAMASK ((3 << SYNCOOKIE_WNDBITS) | SYNCOOKIE_WNDMASK)
|
|
|
|
|
|
|
|
static struct {
|
2003-02-23 19:04:23 +00:00
|
|
|
u_int32_t ts_secbits[4];
|
2001-12-19 06:12:14 +00:00
|
|
|
u_int ts_expire;
|
|
|
|
} tcp_secret[SYNCOOKIE_NSECRETS];
|
|
|
|
|
|
|
|
static int tcp_msstab[] = { 0, 536, 1460, 8960 };
|
|
|
|
|
|
|
|
static MD5_CTX syn_ctx;
|
|
|
|
|
|
|
|
#define MD5Add(v) MD5Update(&syn_ctx, (u_char *)&v, sizeof(v))
|
|
|
|
|
2003-02-23 19:04:23 +00:00
|
|
|
struct md5_add {
|
|
|
|
u_int32_t laddr, faddr;
|
|
|
|
u_int32_t secbits[4];
|
|
|
|
u_int16_t lport, fport;
|
|
|
|
};
|
|
|
|
|
|
|
|
#ifdef CTASSERT
|
|
|
|
CTASSERT(sizeof(struct md5_add) == 28);
|
|
|
|
#endif
|
|
|
|
|
2001-12-19 06:12:14 +00:00
|
|
|
/*
|
|
|
|
* Consider the problem of a recreated (and retransmitted) cookie. If the
|
|
|
|
* original SYN was accepted, the connection is established. The second
|
|
|
|
* SYN is inflight, and if it arrives with an ISN that falls within the
|
|
|
|
* receive window, the connection is killed.
|
|
|
|
*
|
|
|
|
* However, since cookies have other problems, this may not be worth
|
|
|
|
* worrying about.
|
|
|
|
*/
|
|
|
|
|
|
|
|
static u_int32_t
|
2004-07-17 19:44:13 +00:00
|
|
|
syncookie_generate(struct syncache *sc, u_int32_t *flowid)
|
2001-12-19 06:12:14 +00:00
|
|
|
{
|
|
|
|
u_int32_t md5_buffer[4];
|
|
|
|
u_int32_t data;
|
2003-02-23 19:04:23 +00:00
|
|
|
int idx, i;
|
|
|
|
struct md5_add add;
|
2001-12-19 06:12:14 +00:00
|
|
|
|
2003-11-11 17:54:47 +00:00
|
|
|
/* NB: single threaded; could add INP_INFO_WLOCK_ASSERT(&tcbinfo) */
|
|
|
|
|
2003-02-23 19:04:23 +00:00
|
|
|
idx = ((ticks << SYNCOOKIE_TIMESHIFT) / hz) & SYNCOOKIE_WNDMASK;
|
2001-12-19 06:12:14 +00:00
|
|
|
if (tcp_secret[idx].ts_expire < ticks) {
|
2003-02-23 19:04:23 +00:00
|
|
|
for (i = 0; i < 4; i++)
|
|
|
|
tcp_secret[idx].ts_secbits[i] = arc4random();
|
2001-12-19 06:12:14 +00:00
|
|
|
tcp_secret[idx].ts_expire = ticks + SYNCOOKIE_TIMEOUT;
|
|
|
|
}
|
|
|
|
for (data = sizeof(tcp_msstab) / sizeof(int) - 1; data > 0; data--)
|
|
|
|
if (tcp_msstab[data] <= sc->sc_peer_mss)
|
|
|
|
break;
|
2003-02-23 19:04:23 +00:00
|
|
|
data = (data << SYNCOOKIE_WNDBITS) | idx;
|
2001-12-19 06:12:14 +00:00
|
|
|
data ^= sc->sc_irs; /* peer's iss */
|
|
|
|
MD5Init(&syn_ctx);
|
|
|
|
#ifdef INET6
|
|
|
|
if (sc->sc_inc.inc_isipv6) {
|
|
|
|
MD5Add(sc->sc_inc.inc6_laddr);
|
|
|
|
MD5Add(sc->sc_inc.inc6_faddr);
|
2003-02-23 19:04:23 +00:00
|
|
|
add.laddr = 0;
|
|
|
|
add.faddr = 0;
|
2001-12-19 06:12:14 +00:00
|
|
|
} else
|
|
|
|
#endif
|
|
|
|
{
|
2003-02-23 19:04:23 +00:00
|
|
|
add.laddr = sc->sc_inc.inc_laddr.s_addr;
|
|
|
|
add.faddr = sc->sc_inc.inc_faddr.s_addr;
|
2001-12-19 06:12:14 +00:00
|
|
|
}
|
2003-02-23 19:04:23 +00:00
|
|
|
add.lport = sc->sc_inc.inc_lport;
|
|
|
|
add.fport = sc->sc_inc.inc_fport;
|
|
|
|
add.secbits[0] = tcp_secret[idx].ts_secbits[0];
|
|
|
|
add.secbits[1] = tcp_secret[idx].ts_secbits[1];
|
|
|
|
add.secbits[2] = tcp_secret[idx].ts_secbits[2];
|
|
|
|
add.secbits[3] = tcp_secret[idx].ts_secbits[3];
|
|
|
|
MD5Add(add);
|
2001-12-19 06:12:14 +00:00
|
|
|
MD5Final((u_char *)&md5_buffer, &syn_ctx);
|
2003-02-23 19:04:23 +00:00
|
|
|
data ^= (md5_buffer[0] & ~SYNCOOKIE_WNDMASK);
|
2004-07-17 19:44:13 +00:00
|
|
|
*flowid = md5_buffer[1];
|
2001-12-19 06:12:14 +00:00
|
|
|
return (data);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct syncache *
|
|
|
|
syncookie_lookup(inc, th, so)
|
|
|
|
struct in_conninfo *inc;
|
|
|
|
struct tcphdr *th;
|
|
|
|
struct socket *so;
|
|
|
|
{
|
|
|
|
u_int32_t md5_buffer[4];
|
|
|
|
struct syncache *sc;
|
|
|
|
u_int32_t data;
|
|
|
|
int wnd, idx;
|
2003-02-23 19:04:23 +00:00
|
|
|
struct md5_add add;
|
2001-12-19 06:12:14 +00:00
|
|
|
|
2003-11-11 17:54:47 +00:00
|
|
|
/* NB: single threaded; could add INP_INFO_WLOCK_ASSERT(&tcbinfo) */
|
|
|
|
|
2001-12-19 06:12:14 +00:00
|
|
|
data = (th->th_ack - 1) ^ (th->th_seq - 1); /* remove ISS */
|
2003-02-23 19:04:23 +00:00
|
|
|
idx = data & SYNCOOKIE_WNDMASK;
|
2001-12-19 06:12:14 +00:00
|
|
|
if (tcp_secret[idx].ts_expire < ticks ||
|
|
|
|
sototcpcb(so)->ts_recent + SYNCOOKIE_TIMEOUT < ticks)
|
|
|
|
return (NULL);
|
|
|
|
MD5Init(&syn_ctx);
|
|
|
|
#ifdef INET6
|
|
|
|
if (inc->inc_isipv6) {
|
|
|
|
MD5Add(inc->inc6_laddr);
|
|
|
|
MD5Add(inc->inc6_faddr);
|
2003-02-23 19:04:23 +00:00
|
|
|
add.laddr = 0;
|
|
|
|
add.faddr = 0;
|
2001-12-19 06:12:14 +00:00
|
|
|
} else
|
|
|
|
#endif
|
|
|
|
{
|
2003-02-23 19:04:23 +00:00
|
|
|
add.laddr = inc->inc_laddr.s_addr;
|
|
|
|
add.faddr = inc->inc_faddr.s_addr;
|
2001-12-19 06:12:14 +00:00
|
|
|
}
|
2003-02-23 19:04:23 +00:00
|
|
|
add.lport = inc->inc_lport;
|
|
|
|
add.fport = inc->inc_fport;
|
|
|
|
add.secbits[0] = tcp_secret[idx].ts_secbits[0];
|
|
|
|
add.secbits[1] = tcp_secret[idx].ts_secbits[1];
|
|
|
|
add.secbits[2] = tcp_secret[idx].ts_secbits[2];
|
|
|
|
add.secbits[3] = tcp_secret[idx].ts_secbits[3];
|
|
|
|
MD5Add(add);
|
2001-12-19 06:12:14 +00:00
|
|
|
MD5Final((u_char *)&md5_buffer, &syn_ctx);
|
2003-02-23 19:04:23 +00:00
|
|
|
data ^= md5_buffer[0];
|
2001-12-19 06:12:14 +00:00
|
|
|
if ((data & ~SYNCOOKIE_DATAMASK) != 0)
|
|
|
|
return (NULL);
|
|
|
|
data = data >> SYNCOOKIE_WNDBITS;
|
|
|
|
|
2002-03-20 05:48:55 +00:00
|
|
|
sc = uma_zalloc(tcp_syncache.zone, M_NOWAIT);
|
2001-12-19 06:12:14 +00:00
|
|
|
if (sc == NULL)
|
|
|
|
return (NULL);
|
|
|
|
/*
|
|
|
|
* Fill in the syncache values.
|
|
|
|
* XXX duplicate code from syncache_add
|
|
|
|
*/
|
|
|
|
sc->sc_ipopts = NULL;
|
|
|
|
sc->sc_inc.inc_fport = inc->inc_fport;
|
|
|
|
sc->sc_inc.inc_lport = inc->inc_lport;
|
2004-07-17 19:44:13 +00:00
|
|
|
sc->sc_tp = sototcpcb(so);
|
2001-12-19 06:12:14 +00:00
|
|
|
#ifdef INET6
|
|
|
|
sc->sc_inc.inc_isipv6 = inc->inc_isipv6;
|
|
|
|
if (inc->inc_isipv6) {
|
|
|
|
sc->sc_inc.inc6_faddr = inc->inc6_faddr;
|
|
|
|
sc->sc_inc.inc6_laddr = inc->inc6_laddr;
|
2004-07-17 19:44:13 +00:00
|
|
|
if (sc->sc_tp->t_inpcb->in6p_flags & IN6P_AUTOFLOWLABEL)
|
|
|
|
sc->sc_flowlabel = md5_buffer[1] & IPV6_FLOWLABEL_MASK;
|
2001-12-19 06:12:14 +00:00
|
|
|
} else
|
|
|
|
#endif
|
|
|
|
{
|
|
|
|
sc->sc_inc.inc_faddr = inc->inc_faddr;
|
|
|
|
sc->sc_inc.inc_laddr = inc->inc_laddr;
|
|
|
|
}
|
|
|
|
sc->sc_irs = th->th_seq - 1;
|
|
|
|
sc->sc_iss = th->th_ack - 1;
|
|
|
|
wnd = sbspace(&so->so_rcv);
|
|
|
|
wnd = imax(wnd, 0);
|
|
|
|
wnd = imin(wnd, TCP_MAXWIN);
|
|
|
|
sc->sc_wnd = wnd;
|
|
|
|
sc->sc_flags = 0;
|
|
|
|
sc->sc_rxtslot = 0;
|
|
|
|
sc->sc_peer_mss = tcp_msstab[data];
|
|
|
|
return (sc);
|
|
|
|
}
|