Use existing TF_SACK_PERMIT flag in struct tcpcb t_flags field instead of
a decdicated sack_enable int for this bool. Change all users accordingly.
This commit is contained in:
parent
0ca3f933eb
commit
3529149e9a
@ -1090,13 +1090,9 @@ tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
|
||||
tp->snd_wnd = th->th_win;
|
||||
if (to.to_flags & TOF_MSS)
|
||||
tcp_mss(tp, to.to_mss);
|
||||
if (tp->sack_enable) {
|
||||
if (!(to.to_flags & TOF_SACKPERM))
|
||||
tp->sack_enable = 0;
|
||||
else
|
||||
tp->t_flags |= TF_SACK_PERMIT;
|
||||
}
|
||||
|
||||
if ((tp->t_flags & TF_SACK_PERMIT) &&
|
||||
(to.to_flags & TOF_SACKPERM) == 0)
|
||||
tp->t_flags &= ~TF_SACK_PERMIT;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1142,9 +1138,11 @@ tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
|
||||
if (SEQ_GT(th->th_ack, tp->snd_una) &&
|
||||
SEQ_LEQ(th->th_ack, tp->snd_max) &&
|
||||
tp->snd_cwnd >= tp->snd_wnd &&
|
||||
((!tcp_do_newreno && !tp->sack_enable &&
|
||||
((!tcp_do_newreno &&
|
||||
!(tp->t_flags & TF_SACK_PERMIT) &&
|
||||
tp->t_dupacks < tcprexmtthresh) ||
|
||||
((tcp_do_newreno || tp->sack_enable) &&
|
||||
((tcp_do_newreno ||
|
||||
(tp->t_flags & TF_SACK_PERMIT)) &&
|
||||
!IN_FASTRECOVERY(tp) &&
|
||||
(to.to_flags & TOF_SACK) == 0 &&
|
||||
TAILQ_EMPTY(&tp->snd_holes)))) {
|
||||
@ -1253,7 +1251,7 @@ tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
|
||||
* we have enough buffer space to take it.
|
||||
*/
|
||||
/* Clean receiver SACK report if present */
|
||||
if (tp->sack_enable && tp->rcv_numsacks)
|
||||
if ((tp->t_flags & TF_SACK_PERMIT) && tp->rcv_numsacks)
|
||||
tcp_clean_sackreport(tp);
|
||||
++tcpstat.tcps_preddat;
|
||||
tp->rcv_nxt += tlen;
|
||||
@ -1860,7 +1858,7 @@ tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
|
||||
tcpstat.tcps_rcvacktoomuch++;
|
||||
goto dropafterack;
|
||||
}
|
||||
if (tp->sack_enable &&
|
||||
if ((tp->t_flags & TF_SACK_PERMIT) &&
|
||||
((to.to_flags & TOF_SACK) ||
|
||||
!TAILQ_EMPTY(&tp->snd_holes)))
|
||||
tcp_sack_doack(tp, &to, th->th_ack);
|
||||
@ -1895,9 +1893,11 @@ tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
|
||||
th->th_ack != tp->snd_una)
|
||||
tp->t_dupacks = 0;
|
||||
else if (++tp->t_dupacks > tcprexmtthresh ||
|
||||
((tcp_do_newreno || tp->sack_enable) &&
|
||||
((tcp_do_newreno ||
|
||||
(tp->t_flags & TF_SACK_PERMIT)) &&
|
||||
IN_FASTRECOVERY(tp))) {
|
||||
if (tp->sack_enable && IN_FASTRECOVERY(tp)) {
|
||||
if ((tp->t_flags & TF_SACK_PERMIT) &&
|
||||
IN_FASTRECOVERY(tp)) {
|
||||
int awnd;
|
||||
|
||||
/*
|
||||
@ -1928,7 +1928,7 @@ tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
|
||||
* check to see if we're in newreno
|
||||
* recovery.
|
||||
*/
|
||||
if (tp->sack_enable) {
|
||||
if (tp->t_flags & TF_SACK_PERMIT) {
|
||||
if (IN_FASTRECOVERY(tp)) {
|
||||
tp->t_dupacks = 0;
|
||||
break;
|
||||
@ -1949,7 +1949,7 @@ tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
|
||||
tp->snd_recover = tp->snd_max;
|
||||
tcp_timer_activate(tp, TT_REXMT, 0);
|
||||
tp->t_rtttime = 0;
|
||||
if (tp->sack_enable) {
|
||||
if (tp->t_flags & TF_SACK_PERMIT) {
|
||||
tcpstat.tcps_sack_recovery_episode++;
|
||||
tp->sack_newdata = tp->snd_nxt;
|
||||
tp->snd_cwnd = tp->t_maxseg;
|
||||
@ -2010,10 +2010,10 @@ tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
|
||||
* If the congestion window was inflated to account
|
||||
* for the other side's cached packets, retract it.
|
||||
*/
|
||||
if (tcp_do_newreno || tp->sack_enable) {
|
||||
if (tcp_do_newreno || (tp->t_flags & TF_SACK_PERMIT)) {
|
||||
if (IN_FASTRECOVERY(tp)) {
|
||||
if (SEQ_LT(th->th_ack, tp->snd_recover)) {
|
||||
if (tp->sack_enable)
|
||||
if (tp->t_flags & TF_SACK_PERMIT)
|
||||
tcp_sack_partialack(tp, th);
|
||||
else
|
||||
tcp_newreno_partial_ack(tp, th);
|
||||
@ -2144,7 +2144,7 @@ tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
|
||||
* Otherwise open linearly: maxseg per window
|
||||
* (maxseg^2 / cwnd per packet).
|
||||
*/
|
||||
if ((!tcp_do_newreno && !tp->sack_enable) ||
|
||||
if ((!tcp_do_newreno && !(tp->t_flags & TF_SACK_PERMIT)) ||
|
||||
!IN_FASTRECOVERY(tp)) {
|
||||
u_int cw = tp->snd_cwnd;
|
||||
u_int incr = tp->t_maxseg;
|
||||
@ -2164,17 +2164,17 @@ tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
|
||||
}
|
||||
sowwakeup_locked(so);
|
||||
/* detect una wraparound */
|
||||
if ((tcp_do_newreno || tp->sack_enable) &&
|
||||
if ((tcp_do_newreno || (tp->t_flags & TF_SACK_PERMIT)) &&
|
||||
!IN_FASTRECOVERY(tp) &&
|
||||
SEQ_GT(tp->snd_una, tp->snd_recover) &&
|
||||
SEQ_LEQ(th->th_ack, tp->snd_recover))
|
||||
tp->snd_recover = th->th_ack - 1;
|
||||
if ((tcp_do_newreno || tp->sack_enable) &&
|
||||
if ((tcp_do_newreno || (tp->t_flags & TF_SACK_PERMIT)) &&
|
||||
IN_FASTRECOVERY(tp) &&
|
||||
SEQ_GEQ(th->th_ack, tp->snd_recover))
|
||||
EXIT_FASTRECOVERY(tp);
|
||||
tp->snd_una = th->th_ack;
|
||||
if (tp->sack_enable) {
|
||||
if (tp->t_flags & TF_SACK_PERMIT) {
|
||||
if (SEQ_GT(tp->snd_una, tp->snd_recover))
|
||||
tp->snd_recover = tp->snd_una;
|
||||
}
|
||||
@ -2385,7 +2385,7 @@ tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
|
||||
thflags = tcp_reass(tp, th, &tlen, m);
|
||||
tp->t_flags |= TF_ACKNOW;
|
||||
}
|
||||
if (tlen > 0 && tp->sack_enable)
|
||||
if (tlen > 0 && (tp->t_flags & TF_SACK_PERMIT))
|
||||
tcp_update_sack_list(tp, save_start, save_end);
|
||||
#if 0
|
||||
/*
|
||||
|
@ -197,7 +197,8 @@ tcp_output(struct tcpcb *tp)
|
||||
* snd_nxt. There may be SACK information that allows us to avoid
|
||||
* resending already delivered data. Adjust snd_nxt accordingly.
|
||||
*/
|
||||
if (tp->sack_enable && SEQ_LT(tp->snd_nxt, tp->snd_max))
|
||||
if ((tp->t_flags & TF_SACK_PERMIT) &&
|
||||
SEQ_LT(tp->snd_nxt, tp->snd_max))
|
||||
tcp_sack_adjust(tp);
|
||||
sendalot = 0;
|
||||
off = tp->snd_nxt - tp->snd_una;
|
||||
@ -219,7 +220,7 @@ tcp_output(struct tcpcb *tp)
|
||||
sack_bytes_rxmt = 0;
|
||||
len = 0;
|
||||
p = NULL;
|
||||
if (tp->sack_enable && IN_FASTRECOVERY(tp) &&
|
||||
if ((tp->t_flags & TF_SACK_PERMIT) && IN_FASTRECOVERY(tp) &&
|
||||
(p = tcp_sack_output(tp, &sack_bytes_rxmt))) {
|
||||
long cwin;
|
||||
|
||||
@ -566,7 +567,8 @@ tcp_output(struct tcpcb *tp)
|
||||
* after the retransmission timer has been turned off. Make sure
|
||||
* that the retransmission timer is set.
|
||||
*/
|
||||
if (tp->sack_enable && SEQ_GT(tp->snd_max, tp->snd_una) &&
|
||||
if ((tp->t_flags & TF_SACK_PERMIT) &&
|
||||
SEQ_GT(tp->snd_max, tp->snd_una) &&
|
||||
!tcp_timer_active(tp, TT_REXMT) &&
|
||||
!tcp_timer_active(tp, TT_PERSIST)) {
|
||||
tcp_timer_activate(tp, TT_REXMT, tp->t_rxtcur);
|
||||
@ -656,7 +658,7 @@ tcp_output(struct tcpcb *tp)
|
||||
tp->rfbuf_ts = ticks;
|
||||
}
|
||||
/* Selective ACK's. */
|
||||
if (tp->sack_enable) {
|
||||
if (tp->t_flags & TF_SACK_PERMIT) {
|
||||
if (flags & TH_SYN)
|
||||
to.to_flags |= TOF_SACKPERM;
|
||||
else if (TCPS_HAVEESTABLISHED(tp->t_state) &&
|
||||
|
@ -1090,13 +1090,9 @@ tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
|
||||
tp->snd_wnd = th->th_win;
|
||||
if (to.to_flags & TOF_MSS)
|
||||
tcp_mss(tp, to.to_mss);
|
||||
if (tp->sack_enable) {
|
||||
if (!(to.to_flags & TOF_SACKPERM))
|
||||
tp->sack_enable = 0;
|
||||
else
|
||||
tp->t_flags |= TF_SACK_PERMIT;
|
||||
}
|
||||
|
||||
if ((tp->t_flags & TF_SACK_PERMIT) &&
|
||||
(to.to_flags & TOF_SACKPERM) == 0)
|
||||
tp->t_flags &= ~TF_SACK_PERMIT;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1142,9 +1138,11 @@ tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
|
||||
if (SEQ_GT(th->th_ack, tp->snd_una) &&
|
||||
SEQ_LEQ(th->th_ack, tp->snd_max) &&
|
||||
tp->snd_cwnd >= tp->snd_wnd &&
|
||||
((!tcp_do_newreno && !tp->sack_enable &&
|
||||
((!tcp_do_newreno &&
|
||||
!(tp->t_flags & TF_SACK_PERMIT) &&
|
||||
tp->t_dupacks < tcprexmtthresh) ||
|
||||
((tcp_do_newreno || tp->sack_enable) &&
|
||||
((tcp_do_newreno ||
|
||||
(tp->t_flags & TF_SACK_PERMIT)) &&
|
||||
!IN_FASTRECOVERY(tp) &&
|
||||
(to.to_flags & TOF_SACK) == 0 &&
|
||||
TAILQ_EMPTY(&tp->snd_holes)))) {
|
||||
@ -1253,7 +1251,7 @@ tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
|
||||
* we have enough buffer space to take it.
|
||||
*/
|
||||
/* Clean receiver SACK report if present */
|
||||
if (tp->sack_enable && tp->rcv_numsacks)
|
||||
if ((tp->t_flags & TF_SACK_PERMIT) && tp->rcv_numsacks)
|
||||
tcp_clean_sackreport(tp);
|
||||
++tcpstat.tcps_preddat;
|
||||
tp->rcv_nxt += tlen;
|
||||
@ -1860,7 +1858,7 @@ tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
|
||||
tcpstat.tcps_rcvacktoomuch++;
|
||||
goto dropafterack;
|
||||
}
|
||||
if (tp->sack_enable &&
|
||||
if ((tp->t_flags & TF_SACK_PERMIT) &&
|
||||
((to.to_flags & TOF_SACK) ||
|
||||
!TAILQ_EMPTY(&tp->snd_holes)))
|
||||
tcp_sack_doack(tp, &to, th->th_ack);
|
||||
@ -1895,9 +1893,11 @@ tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
|
||||
th->th_ack != tp->snd_una)
|
||||
tp->t_dupacks = 0;
|
||||
else if (++tp->t_dupacks > tcprexmtthresh ||
|
||||
((tcp_do_newreno || tp->sack_enable) &&
|
||||
((tcp_do_newreno ||
|
||||
(tp->t_flags & TF_SACK_PERMIT)) &&
|
||||
IN_FASTRECOVERY(tp))) {
|
||||
if (tp->sack_enable && IN_FASTRECOVERY(tp)) {
|
||||
if ((tp->t_flags & TF_SACK_PERMIT) &&
|
||||
IN_FASTRECOVERY(tp)) {
|
||||
int awnd;
|
||||
|
||||
/*
|
||||
@ -1928,7 +1928,7 @@ tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
|
||||
* check to see if we're in newreno
|
||||
* recovery.
|
||||
*/
|
||||
if (tp->sack_enable) {
|
||||
if (tp->t_flags & TF_SACK_PERMIT) {
|
||||
if (IN_FASTRECOVERY(tp)) {
|
||||
tp->t_dupacks = 0;
|
||||
break;
|
||||
@ -1949,7 +1949,7 @@ tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
|
||||
tp->snd_recover = tp->snd_max;
|
||||
tcp_timer_activate(tp, TT_REXMT, 0);
|
||||
tp->t_rtttime = 0;
|
||||
if (tp->sack_enable) {
|
||||
if (tp->t_flags & TF_SACK_PERMIT) {
|
||||
tcpstat.tcps_sack_recovery_episode++;
|
||||
tp->sack_newdata = tp->snd_nxt;
|
||||
tp->snd_cwnd = tp->t_maxseg;
|
||||
@ -2010,10 +2010,10 @@ tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
|
||||
* If the congestion window was inflated to account
|
||||
* for the other side's cached packets, retract it.
|
||||
*/
|
||||
if (tcp_do_newreno || tp->sack_enable) {
|
||||
if (tcp_do_newreno || (tp->t_flags & TF_SACK_PERMIT)) {
|
||||
if (IN_FASTRECOVERY(tp)) {
|
||||
if (SEQ_LT(th->th_ack, tp->snd_recover)) {
|
||||
if (tp->sack_enable)
|
||||
if (tp->t_flags & TF_SACK_PERMIT)
|
||||
tcp_sack_partialack(tp, th);
|
||||
else
|
||||
tcp_newreno_partial_ack(tp, th);
|
||||
@ -2144,7 +2144,7 @@ tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
|
||||
* Otherwise open linearly: maxseg per window
|
||||
* (maxseg^2 / cwnd per packet).
|
||||
*/
|
||||
if ((!tcp_do_newreno && !tp->sack_enable) ||
|
||||
if ((!tcp_do_newreno && !(tp->t_flags & TF_SACK_PERMIT)) ||
|
||||
!IN_FASTRECOVERY(tp)) {
|
||||
u_int cw = tp->snd_cwnd;
|
||||
u_int incr = tp->t_maxseg;
|
||||
@ -2164,17 +2164,17 @@ tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
|
||||
}
|
||||
sowwakeup_locked(so);
|
||||
/* detect una wraparound */
|
||||
if ((tcp_do_newreno || tp->sack_enable) &&
|
||||
if ((tcp_do_newreno || (tp->t_flags & TF_SACK_PERMIT)) &&
|
||||
!IN_FASTRECOVERY(tp) &&
|
||||
SEQ_GT(tp->snd_una, tp->snd_recover) &&
|
||||
SEQ_LEQ(th->th_ack, tp->snd_recover))
|
||||
tp->snd_recover = th->th_ack - 1;
|
||||
if ((tcp_do_newreno || tp->sack_enable) &&
|
||||
if ((tcp_do_newreno || (tp->t_flags & TF_SACK_PERMIT)) &&
|
||||
IN_FASTRECOVERY(tp) &&
|
||||
SEQ_GEQ(th->th_ack, tp->snd_recover))
|
||||
EXIT_FASTRECOVERY(tp);
|
||||
tp->snd_una = th->th_ack;
|
||||
if (tp->sack_enable) {
|
||||
if (tp->t_flags & TF_SACK_PERMIT) {
|
||||
if (SEQ_GT(tp->snd_una, tp->snd_recover))
|
||||
tp->snd_recover = tp->snd_una;
|
||||
}
|
||||
@ -2385,7 +2385,7 @@ tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so,
|
||||
thflags = tcp_reass(tp, th, &tlen, m);
|
||||
tp->t_flags |= TF_ACKNOW;
|
||||
}
|
||||
if (tlen > 0 && tp->sack_enable)
|
||||
if (tlen > 0 && (tp->t_flags & TF_SACK_PERMIT))
|
||||
tcp_update_sack_list(tp, save_start, save_end);
|
||||
#if 0
|
||||
/*
|
||||
|
@ -658,7 +658,8 @@ tcp_newtcpcb(struct inpcb *inp)
|
||||
|
||||
if (tcp_do_rfc1323)
|
||||
tp->t_flags = (TF_REQ_SCALE|TF_REQ_TSTMP);
|
||||
tp->sack_enable = tcp_do_sack;
|
||||
if (tcp_do_sack)
|
||||
tp->t_flags |= TF_SACK_PERMIT;
|
||||
TAILQ_INIT(&tp->snd_holes);
|
||||
tp->t_inpcb = inp; /* XXX */
|
||||
/*
|
||||
@ -1607,7 +1608,7 @@ tcp_mtudisc(struct inpcb *inp, int errno)
|
||||
tp->snd_nxt = tp->snd_una;
|
||||
tcp_free_sackholes(tp);
|
||||
tp->snd_recover = tp->snd_max;
|
||||
if (tp->sack_enable)
|
||||
if (tp->t_flags & TF_SACK_PERMIT)
|
||||
EXIT_FASTRECOVERY(tp);
|
||||
tcp_output(tp);
|
||||
return (inp);
|
||||
|
@ -713,11 +713,9 @@ syncache_socket(struct syncache *sc, struct socket *lso, struct mbuf *m)
|
||||
if (sc->sc_flags & SCF_SIGNATURE)
|
||||
tp->t_flags |= TF_SIGNATURE;
|
||||
#endif
|
||||
if (sc->sc_flags & SCF_SACK) {
|
||||
tp->sack_enable = 1;
|
||||
if (sc->sc_flags & SCF_SACK)
|
||||
tp->t_flags |= TF_SACK_PERMIT;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Set up MSS and get cached values from tcp_hostcache.
|
||||
|
@ -658,7 +658,8 @@ tcp_newtcpcb(struct inpcb *inp)
|
||||
|
||||
if (tcp_do_rfc1323)
|
||||
tp->t_flags = (TF_REQ_SCALE|TF_REQ_TSTMP);
|
||||
tp->sack_enable = tcp_do_sack;
|
||||
if (tcp_do_sack)
|
||||
tp->t_flags |= TF_SACK_PERMIT;
|
||||
TAILQ_INIT(&tp->snd_holes);
|
||||
tp->t_inpcb = inp; /* XXX */
|
||||
/*
|
||||
@ -1607,7 +1608,7 @@ tcp_mtudisc(struct inpcb *inp, int errno)
|
||||
tp->snd_nxt = tp->snd_una;
|
||||
tcp_free_sackholes(tp);
|
||||
tp->snd_recover = tp->snd_max;
|
||||
if (tp->sack_enable)
|
||||
if (tp->t_flags & TF_SACK_PERMIT)
|
||||
EXIT_FASTRECOVERY(tp);
|
||||
tcp_output(tp);
|
||||
return (inp);
|
||||
|
@ -1235,7 +1235,7 @@ tcp_fill_info(struct tcpcb *tp, struct tcp_info *ti)
|
||||
ti->tcpi_state = tp->t_state;
|
||||
if ((tp->t_flags & TF_REQ_TSTMP) && (tp->t_flags & TF_RCVD_TSTMP))
|
||||
ti->tcpi_options |= TCPI_OPT_TIMESTAMPS;
|
||||
if (tp->sack_enable)
|
||||
if (tp->t_flags & TF_SACK_PERMIT)
|
||||
ti->tcpi_options |= TCPI_OPT_SACK;
|
||||
if ((tp->t_flags & TF_REQ_SCALE) && (tp->t_flags & TF_RCVD_SCALE)) {
|
||||
ti->tcpi_options |= TCPI_OPT_WSCALE;
|
||||
@ -1863,8 +1863,8 @@ db_print_tcpcb(struct tcpcb *tp, const char *name, int indent)
|
||||
tp->snd_recover_prev, tp->t_badrxtwin);
|
||||
|
||||
db_print_indent(indent);
|
||||
db_printf("sack_enable: %d snd_numholes: %d snd_holes first: %p\n",
|
||||
tp->sack_enable, tp->snd_numholes, TAILQ_FIRST(&tp->snd_holes));
|
||||
db_printf("snd_numholes: %d snd_holes first: %p\n",
|
||||
tp->snd_numholes, TAILQ_FIRST(&tp->snd_holes));
|
||||
|
||||
db_print_indent(indent);
|
||||
db_printf("snd_fack: 0x%08x rcv_numsacks: %d sack_newdata: "
|
||||
|
@ -183,7 +183,6 @@ struct tcpcb {
|
||||
u_long t_badrxtwin; /* window for retransmit recovery */
|
||||
u_char snd_limited; /* segments limited transmitted */
|
||||
/* SACK related state */
|
||||
int sack_enable; /* enable SACK for this connection */
|
||||
int snd_numholes; /* number of holes seen by sender */
|
||||
TAILQ_HEAD(sackhole_head, sackhole) snd_holes;
|
||||
/* SACK scoreboard (sorted) */
|
||||
|
Loading…
Reference in New Issue
Block a user