freebsd-skq/sys/netinet/tcp_timer.c

987 lines
29 KiB
C
Raw Normal View History

/*-
* Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995
1994-05-24 10:09:53 +00:00
* The Regents of the University of California. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)tcp_timer.c 8.2 (Berkeley) 5/24/95
1994-05-24 10:09:53 +00:00
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include "opt_inet.h"
#include "opt_inet6.h"
1997-09-16 18:36:06 +00:00
#include "opt_tcpdebug.h"
#include "opt_rss.h"
1997-09-16 18:36:06 +00:00
1994-05-24 10:09:53 +00:00
#include <sys/param.h>
#include <sys/kernel.h>
#include <sys/lock.h>
#include <sys/mbuf.h>
#include <sys/mutex.h>
#include <sys/protosw.h>
#include <sys/smp.h>
1994-05-24 10:09:53 +00:00
#include <sys/socket.h>
#include <sys/socketvar.h>
#include <sys/sysctl.h>
#include <sys/systm.h>
#include <net/if.h>
1994-05-24 10:09:53 +00:00
#include <net/route.h>
#include <net/rss_config.h>
#include <net/vnet.h>
#include <net/netisr.h>
1994-05-24 10:09:53 +00:00
This commit marks the first formal contribution of the "Five New TCP Congestion Control Algorithms for FreeBSD" FreeBSD Foundation funded project. More details about the project are available at: http://caia.swin.edu.au/freebsd/5cc/ - Add a KPI and supporting infrastructure to allow modular congestion control algorithms to be used in the net stack. Algorithms can maintain per-connection state if required, and connections maintain their own algorithm pointer, which allows different connections to concurrently use different algorithms. The TCP_CONGESTION socket option can be used with getsockopt()/setsockopt() to programmatically query or change the congestion control algorithm respectively from within an application at runtime. - Integrate the framework with the TCP stack in as least intrusive a manner as possible. Care was also taken to develop the framework in a way that should allow integration with other congestion aware transport protocols (e.g. SCTP) in the future. The hope is that we will one day be able to share a single set of congestion control algorithm modules between all congestion aware transport protocols. - Introduce a new congestion recovery (TF_CONGRECOVERY) state into the TCP stack and use it to decouple the meaning of recovery from a congestion event and recovery from packet loss (TF_FASTRECOVERY) a la RFC2581. ECN and delay based congestion control protocols don't generally need to recover from packet loss and need a different way to note a congestion recovery episode within the stack. - Remove the net.inet.tcp.newreno sysctl, which simplifies some portions of code and ensures the stack always uses the appropriate mechanisms for recovering from packet loss during a congestion recovery episode. - Extract the NewReno congestion control algorithm from the TCP stack and massage it into module form. NewReno is always built into the kernel and will remain the default algorithm for the forseeable future. Implementations of additional different algorithms will become available in the near future. - Bump __FreeBSD_version to 900025 and note in UPDATING that rebuilding code that relies on the size of "struct tcpcb" is required. Many thanks go to the Cisco University Research Program Fund at Community Foundation Silicon Valley and the FreeBSD Foundation. Their support of our work at the Centre for Advanced Internet Architectures, Swinburne University of Technology is greatly appreciated. In collaboration with: David Hayes <dahayes at swin edu au> and Grenville Armitage <garmitage at swin edu au> Sponsored by: Cisco URP, FreeBSD Foundation Reviewed by: rpaulo Tested by: David Hayes (and many others over the years) MFC after: 3 months
2010-11-12 06:41:55 +00:00
#include <netinet/cc.h>
1994-05-24 10:09:53 +00:00
#include <netinet/in.h>
#include <netinet/in_kdtrace.h>
1994-05-24 10:09:53 +00:00
#include <netinet/in_pcb.h>
#include <netinet/in_rss.h>
#include <netinet/in_systm.h>
#ifdef INET6
#include <netinet6/in6_pcb.h>
#endif
1994-05-24 10:09:53 +00:00
#include <netinet/ip_var.h>
#include <netinet/tcp_fsm.h>
#include <netinet/tcp_timer.h>
#include <netinet/tcp_var.h>
#ifdef INET6
#include <netinet6/tcp6_var.h>
#endif
1994-05-24 10:09:53 +00:00
#include <netinet/tcpip.h>
#ifdef TCPDEBUG
#include <netinet/tcp_debug.h>
#endif
1994-05-24 10:09:53 +00:00
int tcp_keepinit;
SYSCTL_PROC(_net_inet_tcp, TCPCTL_KEEPINIT, keepinit, CTLTYPE_INT|CTLFLAG_RW,
&tcp_keepinit, 0, sysctl_msec_to_ticks, "I", "time to establish connection");
int tcp_keepidle;
SYSCTL_PROC(_net_inet_tcp, TCPCTL_KEEPIDLE, keepidle, CTLTYPE_INT|CTLFLAG_RW,
&tcp_keepidle, 0, sysctl_msec_to_ticks, "I", "time before keepalive probes begin");
int tcp_keepintvl;
SYSCTL_PROC(_net_inet_tcp, TCPCTL_KEEPINTVL, keepintvl, CTLTYPE_INT|CTLFLAG_RW,
&tcp_keepintvl, 0, sysctl_msec_to_ticks, "I", "time between keepalive probes");
int tcp_delacktime;
2007-03-19 19:00:51 +00:00
SYSCTL_PROC(_net_inet_tcp, TCPCTL_DELACKTIME, delacktime, CTLTYPE_INT|CTLFLAG_RW,
&tcp_delacktime, 0, sysctl_msec_to_ticks, "I",
"Time before a delayed ACK is sent");
int tcp_msl;
SYSCTL_PROC(_net_inet_tcp, OID_AUTO, msl, CTLTYPE_INT|CTLFLAG_RW,
&tcp_msl, 0, sysctl_msec_to_ticks, "I", "Maximum segment lifetime");
int tcp_rexmit_min;
SYSCTL_PROC(_net_inet_tcp, OID_AUTO, rexmit_min, CTLTYPE_INT|CTLFLAG_RW,
2007-03-19 19:00:51 +00:00
&tcp_rexmit_min, 0, sysctl_msec_to_ticks, "I",
"Minimum Retransmission Timeout");
int tcp_rexmit_slop;
SYSCTL_PROC(_net_inet_tcp, OID_AUTO, rexmit_slop, CTLTYPE_INT|CTLFLAG_RW,
2007-03-19 19:00:51 +00:00
&tcp_rexmit_slop, 0, sysctl_msec_to_ticks, "I",
"Retransmission Timer Slop");
static int always_keepalive = 1;
SYSCTL_INT(_net_inet_tcp, OID_AUTO, always_keepalive, CTLFLAG_RW,
&always_keepalive , 0, "Assume SO_KEEPALIVE on all TCP connections");
int tcp_fast_finwait2_recycle = 0;
SYSCTL_INT(_net_inet_tcp, OID_AUTO, fast_finwait2_recycle, CTLFLAG_RW,
2007-03-19 19:00:51 +00:00
&tcp_fast_finwait2_recycle, 0,
"Recycle closed FIN_WAIT_2 connections faster");
int tcp_finwait2_timeout;
SYSCTL_PROC(_net_inet_tcp, OID_AUTO, finwait2_timeout, CTLTYPE_INT|CTLFLAG_RW,
2007-03-19 19:00:51 +00:00
&tcp_finwait2_timeout, 0, sysctl_msec_to_ticks, "I", "FIN-WAIT2 timeout");
int tcp_keepcnt = TCPTV_KEEPCNT;
SYSCTL_INT(_net_inet_tcp, OID_AUTO, keepcnt, CTLFLAG_RW, &tcp_keepcnt, 0,
"Number of keepalive probes to send");
/* max idle probes */
int tcp_maxpersistidle;
static int tcp_rexmit_drop_options = 0;
SYSCTL_INT(_net_inet_tcp, OID_AUTO, rexmit_drop_options, CTLFLAG_RW,
&tcp_rexmit_drop_options, 0,
"Drop TCP options from 3rd and later retransmitted SYN");
static VNET_DEFINE(int, tcp_pmtud_blackhole_detect);
#define V_tcp_pmtud_blackhole_detect VNET(tcp_pmtud_blackhole_detect)
SYSCTL_INT(_net_inet_tcp, OID_AUTO, pmtud_blackhole_detection,
CTLFLAG_RW|CTLFLAG_VNET,
&VNET_NAME(tcp_pmtud_blackhole_detect), 0,
"Path MTU Discovery Black Hole Detection Enabled");
static VNET_DEFINE(int, tcp_pmtud_blackhole_activated);
#define V_tcp_pmtud_blackhole_activated \
VNET(tcp_pmtud_blackhole_activated)
SYSCTL_INT(_net_inet_tcp, OID_AUTO, pmtud_blackhole_activated,
CTLFLAG_RD|CTLFLAG_VNET,
&VNET_NAME(tcp_pmtud_blackhole_activated), 0,
"Path MTU Discovery Black Hole Detection, Activation Count");
static VNET_DEFINE(int, tcp_pmtud_blackhole_activated_min_mss);
#define V_tcp_pmtud_blackhole_activated_min_mss \
VNET(tcp_pmtud_blackhole_activated_min_mss)
SYSCTL_INT(_net_inet_tcp, OID_AUTO, pmtud_blackhole_activated_min_mss,
CTLFLAG_RD|CTLFLAG_VNET,
&VNET_NAME(tcp_pmtud_blackhole_activated_min_mss), 0,
"Path MTU Discovery Black Hole Detection, Activation Count at min MSS");
static VNET_DEFINE(int, tcp_pmtud_blackhole_failed);
#define V_tcp_pmtud_blackhole_failed VNET(tcp_pmtud_blackhole_failed)
SYSCTL_INT(_net_inet_tcp, OID_AUTO, pmtud_blackhole_failed,
CTLFLAG_RD|CTLFLAG_VNET,
&VNET_NAME(tcp_pmtud_blackhole_failed), 0,
"Path MTU Discovery Black Hole Detection, Failure Count");
#ifdef INET
static VNET_DEFINE(int, tcp_pmtud_blackhole_mss) = 1200;
#define V_tcp_pmtud_blackhole_mss VNET(tcp_pmtud_blackhole_mss)
SYSCTL_INT(_net_inet_tcp, OID_AUTO, pmtud_blackhole_mss,
CTLFLAG_RW|CTLFLAG_VNET,
&VNET_NAME(tcp_pmtud_blackhole_mss), 0,
"Path MTU Discovery Black Hole Detection lowered MSS");
#endif
#ifdef INET6
static VNET_DEFINE(int, tcp_v6pmtud_blackhole_mss) = 1220;
#define V_tcp_v6pmtud_blackhole_mss VNET(tcp_v6pmtud_blackhole_mss)
SYSCTL_INT(_net_inet_tcp, OID_AUTO, v6pmtud_blackhole_mss,
CTLFLAG_RW|CTLFLAG_VNET,
&VNET_NAME(tcp_v6pmtud_blackhole_mss), 0,
"Path MTU Discovery IPv6 Black Hole Detection lowered MSS");
#endif
#ifdef RSS
static int per_cpu_timers = 1;
#else
static int per_cpu_timers = 0;
#endif
SYSCTL_INT(_net_inet_tcp, OID_AUTO, per_cpu_timers, CTLFLAG_RW,
&per_cpu_timers , 0, "run tcp timers on all cpus");
#if 0
#define INP_CPU(inp) (per_cpu_timers ? (!CPU_ABSENT(((inp)->inp_flowid % (mp_maxid+1))) ? \
((inp)->inp_flowid % (mp_maxid+1)) : curcpu) : 0)
#endif
/*
* Map the given inp to a CPU id.
*
* This queries RSS if it's compiled in, else it defaults to the current
* CPU ID.
*/
static inline int
inp_to_cpuid(struct inpcb *inp)
{
u_int cpuid;
#ifdef RSS
if (per_cpu_timers) {
cpuid = rss_hash2cpuid(inp->inp_flowid, inp->inp_flowtype);
if (cpuid == NETISR_CPUID_NONE)
return (curcpu); /* XXX */
else
return (cpuid);
}
#else
/* Legacy, pre-RSS behaviour */
if (per_cpu_timers) {
/*
* We don't have a flowid -> cpuid mapping, so cheat and
* just map unknown cpuids to curcpu. Not the best, but
* apparently better than defaulting to swi 0.
*/
cpuid = inp->inp_flowid % (mp_maxid + 1);
if (! CPU_ABSENT(cpuid))
return (cpuid);
return (curcpu);
}
#endif
/* Default for RSS and non-RSS - cpuid 0 */
else {
return (0);
}
}
1994-05-24 10:09:53 +00:00
/*
* Tcp protocol timeout routine called every 500 ms.
* Updates timestamps used for TCP
1994-05-24 10:09:53 +00:00
* causes finite state machine actions if timers expire.
*/
void
tcp_slowtimo(void)
1994-05-24 10:09:53 +00:00
{
VNET_ITERATOR_DECL(vnet_iter);
VNET_LIST_RLOCK_NOSLEEP();
VNET_FOREACH(vnet_iter) {
CURVNET_SET(vnet_iter);
(void) tcp_tw_2msl_scan(0);
CURVNET_RESTORE();
}
VNET_LIST_RUNLOCK_NOSLEEP();
1994-05-24 10:09:53 +00:00
}
int tcp_syn_backoff[TCP_MAXRXTSHIFT + 1] =
{ 1, 1, 1, 1, 1, 2, 4, 8, 16, 32, 64, 64, 64 };
1994-05-24 10:09:53 +00:00
int tcp_backoff[TCP_MAXRXTSHIFT + 1] =
{ 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 512, 512, 512 };
1994-05-24 10:09:53 +00:00
static int tcp_totbackoff = 2559; /* sum of tcp_backoff[] */
/*
* TCP timer processing.
*/
void
tcp_timer_delack(void *xtp)
1994-05-24 10:09:53 +00:00
{
struct tcpcb *tp = xtp;
struct inpcb *inp;
CURVNET_SET(tp->t_vnet);
inp = tp->t_inpcb;
KASSERT(inp != NULL, ("%s: tp %p tp->t_inpcb == NULL", __func__, tp));
INP_WLOCK(inp);
if (callout_pending(&tp->t_timers->tt_delack) ||
!callout_active(&tp->t_timers->tt_delack)) {
INP_WUNLOCK(inp);
CURVNET_RESTORE();
return;
}
callout_deactivate(&tp->t_timers->tt_delack);
if ((inp->inp_flags & INP_DROPPED) != 0) {
INP_WUNLOCK(inp);
CURVNET_RESTORE();
return;
}
KASSERT((tp->t_timers->tt_flags & TT_STOPPED) == 0,
("%s: tp %p tcpcb can't be stopped here", __func__, tp));
KASSERT((tp->t_timers->tt_flags & TT_DELACK) != 0,
("%s: tp %p delack callout should be running", __func__, tp));
tp->t_flags |= TF_ACKNOW;
TCPSTAT_INC(tcps_delack);
(void) tcp_output(tp);
INP_WUNLOCK(inp);
CURVNET_RESTORE();
}
1994-05-24 10:09:53 +00:00
void
tcp_timer_2msl(void *xtp)
{
struct tcpcb *tp = xtp;
struct inpcb *inp;
CURVNET_SET(tp->t_vnet);
#ifdef TCPDEBUG
int ostate;
1994-05-24 10:09:53 +00:00
ostate = tp->t_state;
#endif
INP_INFO_RLOCK(&V_tcbinfo);
inp = tp->t_inpcb;
KASSERT(inp != NULL, ("%s: tp %p tp->t_inpcb == NULL", __func__, tp));
INP_WLOCK(inp);
tcp_free_sackholes(tp);
if (callout_pending(&tp->t_timers->tt_2msl) ||
!callout_active(&tp->t_timers->tt_2msl)) {
INP_WUNLOCK(tp->t_inpcb);
INP_INFO_RUNLOCK(&V_tcbinfo);
CURVNET_RESTORE();
return;
}
callout_deactivate(&tp->t_timers->tt_2msl);
if ((inp->inp_flags & INP_DROPPED) != 0) {
INP_WUNLOCK(inp);
INP_INFO_RUNLOCK(&V_tcbinfo);
CURVNET_RESTORE();
return;
}
KASSERT((tp->t_timers->tt_flags & TT_STOPPED) == 0,
("%s: tp %p tcpcb can't be stopped here", __func__, tp));
KASSERT((tp->t_timers->tt_flags & TT_2MSL) != 0,
("%s: tp %p 2msl callout should be running", __func__, tp));
1994-05-24 10:09:53 +00:00
/*
* 2 MSL timeout in shutdown went off. If we're closed but
* still waiting for peer to close and connection has been idle
* too long delete connection control block. Otherwise, check
* again in a bit.
*
* If in TIME_WAIT state just ignore as this timeout is handled in
* tcp_tw_2msl_scan().
*
* If fastrecycle of FIN_WAIT_2, in FIN_WAIT_2 and receiver has closed,
* there's no point in hanging onto FIN_WAIT_2 socket. Just close it.
* Ignore fact that there were recent incoming segments.
1994-05-24 10:09:53 +00:00
*/
if ((inp->inp_flags & INP_TIMEWAIT) != 0) {
INP_WUNLOCK(inp);
INP_INFO_RUNLOCK(&V_tcbinfo);
CURVNET_RESTORE();
return;
}
if (tcp_fast_finwait2_recycle && tp->t_state == TCPS_FIN_WAIT_2 &&
tp->t_inpcb && tp->t_inpcb->inp_socket &&
(tp->t_inpcb->inp_socket->so_rcv.sb_state & SBS_CANTRCVMORE)) {
TCPSTAT_INC(tcps_finwait2_drops);
tp = tcp_close(tp);
} else {
if (ticks - tp->t_rcvtime <= TP_MAXIDLE(tp)) {
if (!callout_reset(&tp->t_timers->tt_2msl,
TP_KEEPINTVL(tp), tcp_timer_2msl, tp)) {
tp->t_timers->tt_flags &= ~TT_2MSL_RST;
}
} else
tp = tcp_close(tp);
}
#ifdef TCPDEBUG
if (tp != NULL && (tp->t_inpcb->inp_socket->so_options & SO_DEBUG))
tcp_trace(TA_USER, ostate, tp, (void *)0, (struct tcphdr *)0,
PRU_SLOWTIMO);
#endif
TCP_PROBE2(debug__user, tp, PRU_SLOWTIMO);
if (tp != NULL)
INP_WUNLOCK(inp);
INP_INFO_RUNLOCK(&V_tcbinfo);
CURVNET_RESTORE();
}
1994-05-24 10:09:53 +00:00
void
tcp_timer_keep(void *xtp)
{
struct tcpcb *tp = xtp;
struct tcptemp *t_template;
struct inpcb *inp;
CURVNET_SET(tp->t_vnet);
#ifdef TCPDEBUG
int ostate;
ostate = tp->t_state;
#endif
INP_INFO_RLOCK(&V_tcbinfo);
inp = tp->t_inpcb;
KASSERT(inp != NULL, ("%s: tp %p tp->t_inpcb == NULL", __func__, tp));
INP_WLOCK(inp);
if (callout_pending(&tp->t_timers->tt_keep) ||
!callout_active(&tp->t_timers->tt_keep)) {
INP_WUNLOCK(inp);
INP_INFO_RUNLOCK(&V_tcbinfo);
CURVNET_RESTORE();
return;
}
callout_deactivate(&tp->t_timers->tt_keep);
if ((inp->inp_flags & INP_DROPPED) != 0) {
INP_WUNLOCK(inp);
INP_INFO_RUNLOCK(&V_tcbinfo);
CURVNET_RESTORE();
return;
}
KASSERT((tp->t_timers->tt_flags & TT_STOPPED) == 0,
("%s: tp %p tcpcb can't be stopped here", __func__, tp));
KASSERT((tp->t_timers->tt_flags & TT_KEEP) != 0,
("%s: tp %p keep callout should be running", __func__, tp));
1994-05-24 10:09:53 +00:00
/*
* Keep-alive timer went off; send something
* or drop connection if idle for too long.
1994-05-24 10:09:53 +00:00
*/
TCPSTAT_INC(tcps_keeptimeo);
if (tp->t_state < TCPS_ESTABLISHED)
goto dropit;
if ((always_keepalive || inp->inp_socket->so_options & SO_KEEPALIVE) &&
tp->t_state <= TCPS_CLOSING) {
if (ticks - tp->t_rcvtime >= TP_KEEPIDLE(tp) + TP_MAXIDLE(tp))
goto dropit;
1994-05-24 10:09:53 +00:00
/*
* Send a packet designed to force a response
* if the peer is up and reachable:
* either an ACK if the connection is still alive,
* or an RST if the peer has closed the connection
* due to timeout or reboot.
* Using sequence number tp->snd_una-1
* causes the transmitted zero-length segment
* to lie outside the receive window;
* by the protocol spec, this requires the
* correspondent TCP to respond.
1994-05-24 10:09:53 +00:00
*/
TCPSTAT_INC(tcps_keepprobe);
t_template = tcpip_maketemplate(inp);
if (t_template) {
tcp_respond(tp, t_template->tt_ipgen,
&t_template->tt_t, (struct mbuf *)NULL,
tp->rcv_nxt, tp->snd_una - 1, 0);
free(t_template, M_TEMP);
}
if (!callout_reset(&tp->t_timers->tt_keep, TP_KEEPINTVL(tp),
tcp_timer_keep, tp)) {
tp->t_timers->tt_flags &= ~TT_KEEP_RST;
}
} else if (!callout_reset(&tp->t_timers->tt_keep, TP_KEEPIDLE(tp),
tcp_timer_keep, tp)) {
tp->t_timers->tt_flags &= ~TT_KEEP_RST;
}
#ifdef TCPDEBUG
if (inp->inp_socket->so_options & SO_DEBUG)
tcp_trace(TA_USER, ostate, tp, (void *)0, (struct tcphdr *)0,
PRU_SLOWTIMO);
#endif
TCP_PROBE2(debug__user, tp, PRU_SLOWTIMO);
INP_WUNLOCK(inp);
INP_INFO_RUNLOCK(&V_tcbinfo);
CURVNET_RESTORE();
return;
dropit:
TCPSTAT_INC(tcps_keepdrops);
tp = tcp_drop(tp, ETIMEDOUT);
#ifdef TCPDEBUG
if (tp != NULL && (tp->t_inpcb->inp_socket->so_options & SO_DEBUG))
tcp_trace(TA_USER, ostate, tp, (void *)0, (struct tcphdr *)0,
PRU_SLOWTIMO);
#endif
TCP_PROBE2(debug__user, tp, PRU_SLOWTIMO);
if (tp != NULL)
INP_WUNLOCK(tp->t_inpcb);
INP_INFO_RUNLOCK(&V_tcbinfo);
CURVNET_RESTORE();
}
void
tcp_timer_persist(void *xtp)
{
struct tcpcb *tp = xtp;
struct inpcb *inp;
CURVNET_SET(tp->t_vnet);
#ifdef TCPDEBUG
int ostate;
1994-05-24 10:09:53 +00:00
ostate = tp->t_state;
#endif
INP_INFO_RLOCK(&V_tcbinfo);
inp = tp->t_inpcb;
KASSERT(inp != NULL, ("%s: tp %p tp->t_inpcb == NULL", __func__, tp));
INP_WLOCK(inp);
if (callout_pending(&tp->t_timers->tt_persist) ||
!callout_active(&tp->t_timers->tt_persist)) {
INP_WUNLOCK(inp);
INP_INFO_RUNLOCK(&V_tcbinfo);
CURVNET_RESTORE();
return;
}
callout_deactivate(&tp->t_timers->tt_persist);
if ((inp->inp_flags & INP_DROPPED) != 0) {
INP_WUNLOCK(inp);
INP_INFO_RUNLOCK(&V_tcbinfo);
CURVNET_RESTORE();
return;
}
KASSERT((tp->t_timers->tt_flags & TT_STOPPED) == 0,
("%s: tp %p tcpcb can't be stopped here", __func__, tp));
KASSERT((tp->t_timers->tt_flags & TT_PERSIST) != 0,
("%s: tp %p persist callout should be running", __func__, tp));
1994-05-24 10:09:53 +00:00
/*
* Persistance timer into zero window.
* Force a byte to be output, if possible.
*/
TCPSTAT_INC(tcps_persisttimeo);
/*
* Hack: if the peer is dead/unreachable, we do not
* time out if the window is closed. After a full
* backoff, drop the connection if the idle time
* (no responses to probes) reaches the maximum
* backoff that we would use if retransmitting.
*/
if (tp->t_rxtshift == TCP_MAXRXTSHIFT &&
(ticks - tp->t_rcvtime >= tcp_maxpersistidle ||
ticks - tp->t_rcvtime >= TCP_REXMTVAL(tp) * tcp_totbackoff)) {
TCPSTAT_INC(tcps_persistdrop);
tp = tcp_drop(tp, ETIMEDOUT);
goto out;
}
/*
* If the user has closed the socket then drop a persisting
* connection after a much reduced timeout.
*/
if (tp->t_state > TCPS_CLOSE_WAIT &&
(ticks - tp->t_rcvtime) >= TCPTV_PERSMAX) {
TCPSTAT_INC(tcps_persistdrop);
tp = tcp_drop(tp, ETIMEDOUT);
goto out;
}
tcp_setpersist(tp);
tp->t_flags |= TF_FORCEDATA;
(void) tcp_output(tp);
tp->t_flags &= ~TF_FORCEDATA;
out:
#ifdef TCPDEBUG
if (tp != NULL && tp->t_inpcb->inp_socket->so_options & SO_DEBUG)
tcp_trace(TA_USER, ostate, tp, NULL, NULL, PRU_SLOWTIMO);
#endif
TCP_PROBE2(debug__user, tp, PRU_SLOWTIMO);
if (tp != NULL)
INP_WUNLOCK(inp);
INP_INFO_RUNLOCK(&V_tcbinfo);
CURVNET_RESTORE();
}
void
tcp_timer_rexmt(void * xtp)
{
struct tcpcb *tp = xtp;
CURVNET_SET(tp->t_vnet);
int rexmt;
int headlocked;
struct inpcb *inp;
#ifdef TCPDEBUG
int ostate;
ostate = tp->t_state;
#endif
INP_INFO_RLOCK(&V_tcbinfo);
inp = tp->t_inpcb;
KASSERT(inp != NULL, ("%s: tp %p tp->t_inpcb == NULL", __func__, tp));
INP_WLOCK(inp);
if (callout_pending(&tp->t_timers->tt_rexmt) ||
!callout_active(&tp->t_timers->tt_rexmt)) {
INP_WUNLOCK(inp);
INP_INFO_RUNLOCK(&V_tcbinfo);
CURVNET_RESTORE();
return;
}
callout_deactivate(&tp->t_timers->tt_rexmt);
if ((inp->inp_flags & INP_DROPPED) != 0) {
INP_WUNLOCK(inp);
INP_INFO_RUNLOCK(&V_tcbinfo);
CURVNET_RESTORE();
return;
}
KASSERT((tp->t_timers->tt_flags & TT_STOPPED) == 0,
("%s: tp %p tcpcb can't be stopped here", __func__, tp));
KASSERT((tp->t_timers->tt_flags & TT_REXMT) != 0,
("%s: tp %p rexmt callout should be running", __func__, tp));
tcp_free_sackholes(tp);
/*
* Retransmission timer went off. Message has not
* been acked within retransmit interval. Back off
* to a longer retransmit interval and retransmit one segment.
*/
if (++tp->t_rxtshift > TCP_MAXRXTSHIFT) {
tp->t_rxtshift = TCP_MAXRXTSHIFT;
TCPSTAT_INC(tcps_timeoutdrop);
tp = tcp_drop(tp, tp->t_softerror ?
tp->t_softerror : ETIMEDOUT);
headlocked = 1;
goto out;
}
INP_INFO_RUNLOCK(&V_tcbinfo);
headlocked = 0;
if (tp->t_state == TCPS_SYN_SENT) {
/*
* If the SYN was retransmitted, indicate CWND to be
* limited to 1 segment in cc_conn_init().
*/
tp->snd_cwnd = 1;
} else if (tp->t_rxtshift == 1) {
/*
* first retransmit; record ssthresh and cwnd so they can
* be recovered if this turns out to be a "bad" retransmit.
* A retransmit is considered "bad" if an ACK for this
* segment is received within RTT/2 interval; the assumption
* here is that the ACK was already in flight. See
* "On Estimating End-to-End Network Path Properties" by
* Allman and Paxson for more details.
*/
tp->snd_cwnd_prev = tp->snd_cwnd;
tp->snd_ssthresh_prev = tp->snd_ssthresh;
tp->snd_recover_prev = tp->snd_recover;
This commit marks the first formal contribution of the "Five New TCP Congestion Control Algorithms for FreeBSD" FreeBSD Foundation funded project. More details about the project are available at: http://caia.swin.edu.au/freebsd/5cc/ - Add a KPI and supporting infrastructure to allow modular congestion control algorithms to be used in the net stack. Algorithms can maintain per-connection state if required, and connections maintain their own algorithm pointer, which allows different connections to concurrently use different algorithms. The TCP_CONGESTION socket option can be used with getsockopt()/setsockopt() to programmatically query or change the congestion control algorithm respectively from within an application at runtime. - Integrate the framework with the TCP stack in as least intrusive a manner as possible. Care was also taken to develop the framework in a way that should allow integration with other congestion aware transport protocols (e.g. SCTP) in the future. The hope is that we will one day be able to share a single set of congestion control algorithm modules between all congestion aware transport protocols. - Introduce a new congestion recovery (TF_CONGRECOVERY) state into the TCP stack and use it to decouple the meaning of recovery from a congestion event and recovery from packet loss (TF_FASTRECOVERY) a la RFC2581. ECN and delay based congestion control protocols don't generally need to recover from packet loss and need a different way to note a congestion recovery episode within the stack. - Remove the net.inet.tcp.newreno sysctl, which simplifies some portions of code and ensures the stack always uses the appropriate mechanisms for recovering from packet loss during a congestion recovery episode. - Extract the NewReno congestion control algorithm from the TCP stack and massage it into module form. NewReno is always built into the kernel and will remain the default algorithm for the forseeable future. Implementations of additional different algorithms will become available in the near future. - Bump __FreeBSD_version to 900025 and note in UPDATING that rebuilding code that relies on the size of "struct tcpcb" is required. Many thanks go to the Cisco University Research Program Fund at Community Foundation Silicon Valley and the FreeBSD Foundation. Their support of our work at the Centre for Advanced Internet Architectures, Swinburne University of Technology is greatly appreciated. In collaboration with: David Hayes <dahayes at swin edu au> and Grenville Armitage <garmitage at swin edu au> Sponsored by: Cisco URP, FreeBSD Foundation Reviewed by: rpaulo Tested by: David Hayes (and many others over the years) MFC after: 3 months
2010-11-12 06:41:55 +00:00
if (IN_FASTRECOVERY(tp->t_flags))
tp->t_flags |= TF_WASFRECOVERY;
else
This commit marks the first formal contribution of the "Five New TCP Congestion Control Algorithms for FreeBSD" FreeBSD Foundation funded project. More details about the project are available at: http://caia.swin.edu.au/freebsd/5cc/ - Add a KPI and supporting infrastructure to allow modular congestion control algorithms to be used in the net stack. Algorithms can maintain per-connection state if required, and connections maintain their own algorithm pointer, which allows different connections to concurrently use different algorithms. The TCP_CONGESTION socket option can be used with getsockopt()/setsockopt() to programmatically query or change the congestion control algorithm respectively from within an application at runtime. - Integrate the framework with the TCP stack in as least intrusive a manner as possible. Care was also taken to develop the framework in a way that should allow integration with other congestion aware transport protocols (e.g. SCTP) in the future. The hope is that we will one day be able to share a single set of congestion control algorithm modules between all congestion aware transport protocols. - Introduce a new congestion recovery (TF_CONGRECOVERY) state into the TCP stack and use it to decouple the meaning of recovery from a congestion event and recovery from packet loss (TF_FASTRECOVERY) a la RFC2581. ECN and delay based congestion control protocols don't generally need to recover from packet loss and need a different way to note a congestion recovery episode within the stack. - Remove the net.inet.tcp.newreno sysctl, which simplifies some portions of code and ensures the stack always uses the appropriate mechanisms for recovering from packet loss during a congestion recovery episode. - Extract the NewReno congestion control algorithm from the TCP stack and massage it into module form. NewReno is always built into the kernel and will remain the default algorithm for the forseeable future. Implementations of additional different algorithms will become available in the near future. - Bump __FreeBSD_version to 900025 and note in UPDATING that rebuilding code that relies on the size of "struct tcpcb" is required. Many thanks go to the Cisco University Research Program Fund at Community Foundation Silicon Valley and the FreeBSD Foundation. Their support of our work at the Centre for Advanced Internet Architectures, Swinburne University of Technology is greatly appreciated. In collaboration with: David Hayes <dahayes at swin edu au> and Grenville Armitage <garmitage at swin edu au> Sponsored by: Cisco URP, FreeBSD Foundation Reviewed by: rpaulo Tested by: David Hayes (and many others over the years) MFC after: 3 months
2010-11-12 06:41:55 +00:00
tp->t_flags &= ~TF_WASFRECOVERY;
if (IN_CONGRECOVERY(tp->t_flags))
tp->t_flags |= TF_WASCRECOVERY;
else
tp->t_flags &= ~TF_WASCRECOVERY;
tp->t_badrxtwin = ticks + (tp->t_srtt >> (TCP_RTT_SHIFT + 1));
TCP reuses t_rxtshift to determine the backoff timer used for both the persist state and the retransmit timer. However, the code that implements "bad retransmit recovery" only checks t_rxtshift to see if an ACK has been received in during the first retransmit timeout window. As a result, if ticks has wrapped over to a negative value and a socket is in the persist state, it can incorrectly treat an ACK from the remote peer as a "bad retransmit recovery" and restore saved values such as snd_ssthresh and snd_cwnd. However, if the socket has never had a retransmit timeout, then these saved values will be zero, so snd_ssthresh and snd_cwnd will be set to 0. If the socket is in fast recovery (this can be caused by excessive duplicate ACKs such as those fixed by 220794), then each ACK that arrives triggers either NewReno or SACK partial ACK handling which clamps snd_cwnd to be no larger than snd_ssthresh. In effect, the socket's send window is permamently stuck at 0 even though the remote peer is advertising a much larger window and pending data is only sent via TCP window probes (so one byte every few seconds). Fix this by adding a new TCP pcb flag (TF_PREVVALID) that indicates that the various snd_*_prev fields in the pcb are valid and only perform "bad retransmit recovery" if this flag is set in the pcb. The flag is set on the first retransmit timeout that occurs and is cleared on subsequent retransmit timeouts or when entering the persist state. Reviewed by: bz MFC after: 2 weeks
2011-04-29 15:40:12 +00:00
tp->t_flags |= TF_PREVVALID;
} else
tp->t_flags &= ~TF_PREVVALID;
TCPSTAT_INC(tcps_rexmttimeo);
if (tp->t_state == TCPS_SYN_SENT)
rexmt = TCPTV_RTOBASE * tcp_syn_backoff[tp->t_rxtshift];
else
rexmt = TCP_REXMTVAL(tp) * tcp_backoff[tp->t_rxtshift];
TCPT_RANGESET(tp->t_rxtcur, rexmt,
tp->t_rttmin, TCPTV_REXMTMAX);
/*
* We enter the path for PLMTUD if connection is established or, if
* connection is FIN_WAIT_1 status, reason for the last is that if
* amount of data we send is very small, we could send it in couple of
* packets and process straight to FIN. In that case we won't catch
* ESTABLISHED state.
*/
if (V_tcp_pmtud_blackhole_detect && (((tp->t_state == TCPS_ESTABLISHED))
|| (tp->t_state == TCPS_FIN_WAIT_1))) {
int optlen;
#ifdef INET6
int isipv6;
#endif
if (((tp->t_flags2 & (TF2_PLPMTU_PMTUD|TF2_PLPMTU_MAXSEGSNT)) ==
(TF2_PLPMTU_PMTUD|TF2_PLPMTU_MAXSEGSNT)) &&
(tp->t_rxtshift <= 2)) {
/*
* Enter Path MTU Black-hole Detection mechanism:
* - Disable Path MTU Discovery (IP "DF" bit).
* - Reduce MTU to lower value than what we
* negotiated with peer.
*/
/* Record that we may have found a black hole. */
tp->t_flags2 |= TF2_PLPMTU_BLACKHOLE;
/* Keep track of previous MSS. */
optlen = tp->t_maxopd - tp->t_maxseg;
tp->t_pmtud_saved_maxopd = tp->t_maxopd;
/*
* Reduce the MSS to blackhole value or to the default
* in an attempt to retransmit.
*/
#ifdef INET6
isipv6 = (tp->t_inpcb->inp_vflag & INP_IPV6) ? 1 : 0;
if (isipv6 &&
tp->t_maxopd > V_tcp_v6pmtud_blackhole_mss) {
/* Use the sysctl tuneable blackhole MSS. */
tp->t_maxopd = V_tcp_v6pmtud_blackhole_mss;
V_tcp_pmtud_blackhole_activated++;
} else if (isipv6) {
/* Use the default MSS. */
tp->t_maxopd = V_tcp_v6mssdflt;
/*
* Disable Path MTU Discovery when we switch to
* minmss.
*/
tp->t_flags2 &= ~TF2_PLPMTU_PMTUD;
V_tcp_pmtud_blackhole_activated_min_mss++;
}
#endif
#if defined(INET6) && defined(INET)
else
#endif
#ifdef INET
if (tp->t_maxopd > V_tcp_pmtud_blackhole_mss) {
/* Use the sysctl tuneable blackhole MSS. */
tp->t_maxopd = V_tcp_pmtud_blackhole_mss;
V_tcp_pmtud_blackhole_activated++;
} else {
/* Use the default MSS. */
tp->t_maxopd = V_tcp_mssdflt;
/*
* Disable Path MTU Discovery when we switch to
* minmss.
*/
tp->t_flags2 &= ~TF2_PLPMTU_PMTUD;
V_tcp_pmtud_blackhole_activated_min_mss++;
}
#endif
tp->t_maxseg = tp->t_maxopd - optlen;
/*
* Reset the slow-start flight size
* as it may depend on the new MSS.
*/
if (CC_ALGO(tp)->conn_init != NULL)
CC_ALGO(tp)->conn_init(tp->ccv);
} else {
/*
* If further retransmissions are still unsuccessful
* with a lowered MTU, maybe this isn't a blackhole and
* we restore the previous MSS and blackhole detection
* flags.
*/
if ((tp->t_flags2 & TF2_PLPMTU_BLACKHOLE) &&
(tp->t_rxtshift > 4)) {
tp->t_flags2 |= TF2_PLPMTU_PMTUD;
tp->t_flags2 &= ~TF2_PLPMTU_BLACKHOLE;
optlen = tp->t_maxopd - tp->t_maxseg;
tp->t_maxopd = tp->t_pmtud_saved_maxopd;
tp->t_maxseg = tp->t_maxopd - optlen;
V_tcp_pmtud_blackhole_failed++;
/*
* Reset the slow-start flight size as it
* may depend on the new MSS.
*/
if (CC_ALGO(tp)->conn_init != NULL)
CC_ALGO(tp)->conn_init(tp->ccv);
}
}
}
/*
* Disable RFC1323 and SACK if we haven't got any response to
* our third SYN to work-around some broken terminal servers
* (most of which have hopefully been retired) that have bad VJ
* header compression code which trashes TCP segments containing
* unknown-to-them TCP options.
*/
if (tcp_rexmit_drop_options && (tp->t_state == TCPS_SYN_SENT) &&
(tp->t_rxtshift == 3))
tp->t_flags &= ~(TF_REQ_SCALE|TF_REQ_TSTMP|TF_SACK_PERMIT);
1994-05-24 10:09:53 +00:00
/*
* If we backed off this far, our srtt estimate is probably bogus.
* Clobber it so we'll take the next rtt measurement as our srtt;
* move the current srtt into rttvar to keep the current
* retransmit times until then.
1994-05-24 10:09:53 +00:00
*/
if (tp->t_rxtshift > TCP_MAXRXTSHIFT / 4) {
#ifdef INET6
if ((tp->t_inpcb->inp_vflag & INP_IPV6) != 0)
in6_losing(tp->t_inpcb);
#endif
tp->t_rttvar += (tp->t_srtt >> TCP_RTT_SHIFT);
tp->t_srtt = 0;
1994-05-24 10:09:53 +00:00
}
tp->snd_nxt = tp->snd_una;
tp->snd_recover = tp->snd_max;
/*
* Force a segment to be sent.
*/
tp->t_flags |= TF_ACKNOW;
/*
* If timing a segment in this window, stop the timer.
*/
tp->t_rtttime = 0;
This commit marks the first formal contribution of the "Five New TCP Congestion Control Algorithms for FreeBSD" FreeBSD Foundation funded project. More details about the project are available at: http://caia.swin.edu.au/freebsd/5cc/ - Add a KPI and supporting infrastructure to allow modular congestion control algorithms to be used in the net stack. Algorithms can maintain per-connection state if required, and connections maintain their own algorithm pointer, which allows different connections to concurrently use different algorithms. The TCP_CONGESTION socket option can be used with getsockopt()/setsockopt() to programmatically query or change the congestion control algorithm respectively from within an application at runtime. - Integrate the framework with the TCP stack in as least intrusive a manner as possible. Care was also taken to develop the framework in a way that should allow integration with other congestion aware transport protocols (e.g. SCTP) in the future. The hope is that we will one day be able to share a single set of congestion control algorithm modules between all congestion aware transport protocols. - Introduce a new congestion recovery (TF_CONGRECOVERY) state into the TCP stack and use it to decouple the meaning of recovery from a congestion event and recovery from packet loss (TF_FASTRECOVERY) a la RFC2581. ECN and delay based congestion control protocols don't generally need to recover from packet loss and need a different way to note a congestion recovery episode within the stack. - Remove the net.inet.tcp.newreno sysctl, which simplifies some portions of code and ensures the stack always uses the appropriate mechanisms for recovering from packet loss during a congestion recovery episode. - Extract the NewReno congestion control algorithm from the TCP stack and massage it into module form. NewReno is always built into the kernel and will remain the default algorithm for the forseeable future. Implementations of additional different algorithms will become available in the near future. - Bump __FreeBSD_version to 900025 and note in UPDATING that rebuilding code that relies on the size of "struct tcpcb" is required. Many thanks go to the Cisco University Research Program Fund at Community Foundation Silicon Valley and the FreeBSD Foundation. Their support of our work at the Centre for Advanced Internet Architectures, Swinburne University of Technology is greatly appreciated. In collaboration with: David Hayes <dahayes at swin edu au> and Grenville Armitage <garmitage at swin edu au> Sponsored by: Cisco URP, FreeBSD Foundation Reviewed by: rpaulo Tested by: David Hayes (and many others over the years) MFC after: 3 months
2010-11-12 06:41:55 +00:00
cc_cong_signal(tp, NULL, CC_RTO);
This commit marks the first formal contribution of the "Five New TCP Congestion Control Algorithms for FreeBSD" FreeBSD Foundation funded project. More details about the project are available at: http://caia.swin.edu.au/freebsd/5cc/ - Add a KPI and supporting infrastructure to allow modular congestion control algorithms to be used in the net stack. Algorithms can maintain per-connection state if required, and connections maintain their own algorithm pointer, which allows different connections to concurrently use different algorithms. The TCP_CONGESTION socket option can be used with getsockopt()/setsockopt() to programmatically query or change the congestion control algorithm respectively from within an application at runtime. - Integrate the framework with the TCP stack in as least intrusive a manner as possible. Care was also taken to develop the framework in a way that should allow integration with other congestion aware transport protocols (e.g. SCTP) in the future. The hope is that we will one day be able to share a single set of congestion control algorithm modules between all congestion aware transport protocols. - Introduce a new congestion recovery (TF_CONGRECOVERY) state into the TCP stack and use it to decouple the meaning of recovery from a congestion event and recovery from packet loss (TF_FASTRECOVERY) a la RFC2581. ECN and delay based congestion control protocols don't generally need to recover from packet loss and need a different way to note a congestion recovery episode within the stack. - Remove the net.inet.tcp.newreno sysctl, which simplifies some portions of code and ensures the stack always uses the appropriate mechanisms for recovering from packet loss during a congestion recovery episode. - Extract the NewReno congestion control algorithm from the TCP stack and massage it into module form. NewReno is always built into the kernel and will remain the default algorithm for the forseeable future. Implementations of additional different algorithms will become available in the near future. - Bump __FreeBSD_version to 900025 and note in UPDATING that rebuilding code that relies on the size of "struct tcpcb" is required. Many thanks go to the Cisco University Research Program Fund at Community Foundation Silicon Valley and the FreeBSD Foundation. Their support of our work at the Centre for Advanced Internet Architectures, Swinburne University of Technology is greatly appreciated. In collaboration with: David Hayes <dahayes at swin edu au> and Grenville Armitage <garmitage at swin edu au> Sponsored by: Cisco URP, FreeBSD Foundation Reviewed by: rpaulo Tested by: David Hayes (and many others over the years) MFC after: 3 months
2010-11-12 06:41:55 +00:00
(void) tcp_output(tp);
out:
#ifdef TCPDEBUG
if (tp != NULL && (tp->t_inpcb->inp_socket->so_options & SO_DEBUG))
tcp_trace(TA_USER, ostate, tp, (void *)0, (struct tcphdr *)0,
PRU_SLOWTIMO);
#endif
TCP_PROBE2(debug__user, tp, PRU_SLOWTIMO);
if (tp != NULL)
INP_WUNLOCK(inp);
if (headlocked)
INP_INFO_RUNLOCK(&V_tcbinfo);
CURVNET_RESTORE();
}
void
tcp_timer_activate(struct tcpcb *tp, uint32_t timer_type, u_int delta)
{
struct callout *t_callout;
timeout_t *f_callout;
struct inpcb *inp = tp->t_inpcb;
int cpu = inp_to_cpuid(inp);
uint32_t f_reset;
#ifdef TCP_OFFLOAD
if (tp->t_flags & TF_TOE)
return;
#endif
if (tp->t_timers->tt_flags & TT_STOPPED)
return;
switch (timer_type) {
case TT_DELACK:
t_callout = &tp->t_timers->tt_delack;
f_callout = tcp_timer_delack;
f_reset = TT_DELACK_RST;
break;
case TT_REXMT:
t_callout = &tp->t_timers->tt_rexmt;
f_callout = tcp_timer_rexmt;
f_reset = TT_REXMT_RST;
break;
case TT_PERSIST:
t_callout = &tp->t_timers->tt_persist;
f_callout = tcp_timer_persist;
f_reset = TT_PERSIST_RST;
break;
case TT_KEEP:
t_callout = &tp->t_timers->tt_keep;
f_callout = tcp_timer_keep;
f_reset = TT_KEEP_RST;
break;
case TT_2MSL:
t_callout = &tp->t_timers->tt_2msl;
f_callout = tcp_timer_2msl;
f_reset = TT_2MSL_RST;
break;
default:
panic("tp %p bad timer_type %#x", tp, timer_type);
}
if (delta == 0) {
if ((tp->t_timers->tt_flags & timer_type) &&
callout_stop(t_callout) &&
(tp->t_timers->tt_flags & f_reset)) {
tp->t_timers->tt_flags &= ~(timer_type | f_reset);
}
} else {
if ((tp->t_timers->tt_flags & timer_type) == 0) {
tp->t_timers->tt_flags |= (timer_type | f_reset);
callout_reset_on(t_callout, delta, f_callout, tp, cpu);
} else {
/* Reset already running callout on the same CPU. */
if (!callout_reset(t_callout, delta, f_callout, tp)) {
/*
* Callout not cancelled, consider it as not
* properly restarted. */
tp->t_timers->tt_flags &= ~f_reset;
}
}
}
}
int
tcp_timer_active(struct tcpcb *tp, uint32_t timer_type)
{
struct callout *t_callout;
switch (timer_type) {
case TT_DELACK:
t_callout = &tp->t_timers->tt_delack;
break;
case TT_REXMT:
t_callout = &tp->t_timers->tt_rexmt;
break;
case TT_PERSIST:
t_callout = &tp->t_timers->tt_persist;
break;
case TT_KEEP:
t_callout = &tp->t_timers->tt_keep;
break;
case TT_2MSL:
t_callout = &tp->t_timers->tt_2msl;
break;
default:
panic("tp %p bad timer_type %#x", tp, timer_type);
}
return callout_active(t_callout);
1994-05-24 10:09:53 +00:00
}
void
tcp_timer_stop(struct tcpcb *tp, uint32_t timer_type)
{
struct callout *t_callout;
timeout_t *f_callout;
uint32_t f_reset;
tp->t_timers->tt_flags |= TT_STOPPED;
switch (timer_type) {
case TT_DELACK:
t_callout = &tp->t_timers->tt_delack;
f_callout = tcp_timer_delack_discard;
f_reset = TT_DELACK_RST;
break;
case TT_REXMT:
t_callout = &tp->t_timers->tt_rexmt;
f_callout = tcp_timer_rexmt_discard;
f_reset = TT_REXMT_RST;
break;
case TT_PERSIST:
t_callout = &tp->t_timers->tt_persist;
f_callout = tcp_timer_persist_discard;
f_reset = TT_PERSIST_RST;
break;
case TT_KEEP:
t_callout = &tp->t_timers->tt_keep;
f_callout = tcp_timer_keep_discard;
f_reset = TT_KEEP_RST;
break;
case TT_2MSL:
t_callout = &tp->t_timers->tt_2msl;
f_callout = tcp_timer_2msl_discard;
f_reset = TT_2MSL_RST;
break;
default:
panic("tp %p bad timer_type %#x", tp, timer_type);
}
if (tp->t_timers->tt_flags & timer_type) {
if (callout_stop(t_callout) &&
(tp->t_timers->tt_flags & f_reset)) {
tp->t_timers->tt_flags &= ~(timer_type | f_reset);
} else {
/*
* Can't stop the callout, defer tcpcb actual deletion
* to the last tcp timer discard callout.
* The TT_STOPPED flag will ensure that no tcp timer
* callouts can be restarted on our behalf, and
* past this point currently running callouts waiting
* on inp lock will return right away after the
* classical check for callout reset/stop events:
* callout_pending() || !callout_active()
*/
callout_reset(t_callout, 1, f_callout, tp);
}
}
}
#define ticks_to_msecs(t) (1000*(t) / hz)
void
tcp_timer_to_xtimer(struct tcpcb *tp, struct tcp_timer *timer,
struct xtcp_timer *xtimer)
{
sbintime_t now;
bzero(xtimer, sizeof(*xtimer));
if (timer == NULL)
return;
now = getsbinuptime();
if (callout_active(&timer->tt_delack))
xtimer->tt_delack = (timer->tt_delack.c_time - now) / SBT_1MS;
if (callout_active(&timer->tt_rexmt))
xtimer->tt_rexmt = (timer->tt_rexmt.c_time - now) / SBT_1MS;
if (callout_active(&timer->tt_persist))
xtimer->tt_persist = (timer->tt_persist.c_time - now) / SBT_1MS;
if (callout_active(&timer->tt_keep))
xtimer->tt_keep = (timer->tt_keep.c_time - now) / SBT_1MS;
if (callout_active(&timer->tt_2msl))
xtimer->tt_2msl = (timer->tt_2msl.c_time - now) / SBT_1MS;
xtimer->t_rcvtime = ticks_to_msecs(ticks - tp->t_rcvtime);
}