tcp: finish SACK loss recovery on sudden lack of SACK blocks

While a receiver should continue sending SACK blocks for the
duration of a SACK loss recovery, if for some reason the
TCP options no longer contain these SACK blocks, but we
already started maintaining the Scoreboard, keep on handling
incoming ACKs (without SACK) as belonging to the SACK recovery.

Reported by:		thj
Reviewed by:		tuexen, #transport
MFC after:		2 weeks
Sponsored by:		NetApp, Inc.
Differential Revision:	https://reviews.freebsd.org/D36046
This commit is contained in:
Richard Scheffenegger 2022-08-31 14:49:25 +02:00
parent 544f047f89
commit c21b7b55be

View File

@ -268,6 +268,20 @@ kmod_tcpstat_add(int statnum, int val)
counter_u64_add(VNET(tcpstat)[statnum], val);
}
/*
* Make sure that we only start a SACK loss recovery when
* receiving a duplicate ACK with a SACK block, and also
* complete SACK loss recovery in case the other end
* reneges.
*/
static bool inline
tcp_is_sack_recovery(struct tcpcb *tp, struct tcpopt *to)
{
return ((tp->t_flags & TF_SACK_PERMIT) &&
((to->to_flags & TOF_SACK) ||
(!TAILQ_EMPTY(&tp->snd_holes))));
}
#ifdef TCP_HHOOK
/*
* Wrapper for the TCP established input helper hook.
@ -2491,9 +2505,7 @@ tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
TCPSTAT_INC(tcps_rcvacktoomuch);
goto dropafterack;
}
if ((tp->t_flags & TF_SACK_PERMIT) &&
((to.to_flags & TOF_SACK) ||
!TAILQ_EMPTY(&tp->snd_holes))) {
if (tcp_is_sack_recovery(tp, &to)) {
if (((sack_changed = tcp_sack_doack(tp, &to, th->th_ack)) != 0) &&
(tp->t_flags & TF_LRD)) {
tcp_sack_lost_retransmission(tp, th);
@ -2566,8 +2578,7 @@ tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
* duplicating packets or a possible DoS attack.
*/
if (th->th_ack != tp->snd_una ||
((tp->t_flags & TF_SACK_PERMIT) &&
(to.to_flags & TOF_SACK) &&
(tcp_is_sack_recovery(tp, &to) &&
!sack_changed))
break;
else if (!tcp_timer_active(tp, TT_REXMT))
@ -2579,8 +2590,7 @@ tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
if (V_tcp_do_prr &&
IN_FASTRECOVERY(tp->t_flags)) {
tcp_do_prr_ack(tp, th, &to);
} else if ((tp->t_flags & TF_SACK_PERMIT) &&
(to.to_flags & TOF_SACK) &&
} else if (tcp_is_sack_recovery(tp, &to) &&
IN_FASTRECOVERY(tp->t_flags)) {
int awnd;
@ -2653,8 +2663,7 @@ tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
* snd_ssthresh is already updated by
* cc_cong_signal.
*/
if ((tp->t_flags & TF_SACK_PERMIT) &&
(to.to_flags & TOF_SACK)) {
if (tcp_is_sack_recovery(tp, &to)) {
tp->sackhint.prr_delivered =
tp->sackhint.sacked_bytes;
} else {
@ -2666,8 +2675,7 @@ tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
tp->sackhint.recover_fs = max(1,
tp->snd_nxt - tp->snd_una);
}
if ((tp->t_flags & TF_SACK_PERMIT) &&
(to.to_flags & TOF_SACK)) {
if (tcp_is_sack_recovery(tp, &to)) {
TCPSTAT_INC(
tcps_sack_recovery_episode);
tp->snd_recover = tp->snd_nxt;
@ -2759,8 +2767,7 @@ tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
* from the left side. Such partial ACKs should not be
* counted as dupacks here.
*/
if ((tp->t_flags & TF_SACK_PERMIT) &&
(to.to_flags & TOF_SACK) &&
if (tcp_is_sack_recovery(tp, &to) &&
sack_changed) {
tp->t_dupacks++;
/* limit overhead by setting maxseg last */
@ -3942,8 +3949,7 @@ tcp_do_prr_ack(struct tcpcb *tp, struct tcphdr *th, struct tcpopt *to)
* (del_data) and an estimate of how many bytes are in the
* network.
*/
if (((tp->t_flags & TF_SACK_PERMIT) &&
(to->to_flags & TOF_SACK)) ||
if (tcp_is_sack_recovery(tp, to) ||
(IN_CONGRECOVERY(tp->t_flags) &&
!IN_FASTRECOVERY(tp->t_flags))) {
del_data = tp->sackhint.delivered_data;
@ -3987,8 +3993,7 @@ tcp_do_prr_ack(struct tcpcb *tp, struct tcphdr *th, struct tcpopt *to)
* accordingly.
*/
if (IN_FASTRECOVERY(tp->t_flags)) {
if ((tp->t_flags & TF_SACK_PERMIT) &&
(to->to_flags & TOF_SACK)) {
if (tcp_is_sack_recovery(tp, to)) {
tp->snd_cwnd = tp->snd_nxt - tp->snd_recover +
tp->sackhint.sack_bytes_rexmit +
(snd_cnt * maxseg);