Remove bogus check for accept queue length and associated failure handling

from the incoming SYN handling section of tcp_input().

Enforcement of the accept queue limits is done by sonewconn() after the
3WHS is completed.  It is not necessary to have an earlier check before a
connection request enters the SYN cache awaiting the full handshake.  It
rather limits the effectiveness of the syncache by preventing legit and
illegit connections from entering it and having them shaken out before we
hit the real limit which may have vanished by then.

Change return value of syncache_add() to void.  No status communication
is required.
This commit is contained in:
Andre Oppermann 2007-04-20 14:34:54 +00:00
parent e207f80039
commit 4d6e713043
4 changed files with 23 additions and 35 deletions

View File

@ -966,24 +966,18 @@ tcp_input(struct mbuf *m, int off0)
* SYN appears to be valid. Create compressed TCP state
* for syncache.
*/
if (so->so_qlen <= so->so_qlimit) {
#ifdef TCPDEBUG
if (so->so_options & SO_DEBUG)
tcp_trace(TA_INPUT, ostate, tp,
(void *)tcp_saveipgen, &tcp_savetcp, 0);
if (so->so_options & SO_DEBUG)
tcp_trace(TA_INPUT, ostate, tp,
(void *)tcp_saveipgen, &tcp_savetcp, 0);
#endif
tcp_dooptions(&to, optp, optlen, TO_SYN);
if (!syncache_add(&inc, &to, th, inp, &so, m))
goto dropunlock;
/*
* Entry added to syncache, mbuf used to
* send SYN-ACK packet. Everything unlocked
* already.
*/
return;
}
/* Catch all. Everthing that makes it down here is junk. */
goto dropunlock;
tcp_dooptions(&to, optp, optlen, TO_SYN);
syncache_add(&inc, &to, th, inp, &so, m);
/*
* Entry added to syncache and mbuf consumed.
* Everything unlocked already by syncache_add().
*/
return;
}
/*

View File

@ -966,24 +966,18 @@ tcp_input(struct mbuf *m, int off0)
* SYN appears to be valid. Create compressed TCP state
* for syncache.
*/
if (so->so_qlen <= so->so_qlimit) {
#ifdef TCPDEBUG
if (so->so_options & SO_DEBUG)
tcp_trace(TA_INPUT, ostate, tp,
(void *)tcp_saveipgen, &tcp_savetcp, 0);
if (so->so_options & SO_DEBUG)
tcp_trace(TA_INPUT, ostate, tp,
(void *)tcp_saveipgen, &tcp_savetcp, 0);
#endif
tcp_dooptions(&to, optp, optlen, TO_SYN);
if (!syncache_add(&inc, &to, th, inp, &so, m))
goto dropunlock;
/*
* Entry added to syncache, mbuf used to
* send SYN-ACK packet. Everything unlocked
* already.
*/
return;
}
/* Catch all. Everthing that makes it down here is junk. */
goto dropunlock;
tcp_dooptions(&to, optp, optlen, TO_SYN);
syncache_add(&inc, &to, th, inp, &so, m);
/*
* Entry added to syncache and mbuf consumed.
* Everything unlocked already by syncache_add().
*/
return;
}
/*

View File

@ -832,7 +832,7 @@ syncache_expand(struct in_conninfo *inc, struct tcpopt *to, struct tcphdr *th,
* consume all available buffer space if it were ACKed. By not ACKing
* the data, we avoid this DoS scenario.
*/
int
void
syncache_add(struct in_conninfo *inc, struct tcpopt *to, struct tcphdr *th,
struct inpcb *inp, struct socket **lsop, struct mbuf *m)
{
@ -1079,7 +1079,7 @@ syncache_add(struct in_conninfo *inc, struct tcpopt *to, struct tcphdr *th,
#endif
*lsop = NULL;
m_freem(m);
return (1);
return;
}
static int

View File

@ -542,7 +542,7 @@ void syncache_init(void);
void syncache_unreach(struct in_conninfo *, struct tcphdr *);
int syncache_expand(struct in_conninfo *, struct tcpopt *,
struct tcphdr *, struct socket **, struct mbuf *);
int syncache_add(struct in_conninfo *, struct tcpopt *,
void syncache_add(struct in_conninfo *, struct tcpopt *,
struct tcphdr *, struct inpcb *, struct socket **, struct mbuf *);
void syncache_chkrst(struct in_conninfo *, struct tcphdr *);
void syncache_badack(struct in_conninfo *);