Remove all trailing white space from the BBR/Rack fold. Bits
left around by emacs (thanks emacs).
This commit is contained in:
parent
243996fd94
commit
a96435c96a
File diff suppressed because it is too large
Load Diff
@ -501,7 +501,7 @@ rack_init_sysctls(void)
|
|||||||
{
|
{
|
||||||
struct sysctl_oid *rack_counters;
|
struct sysctl_oid *rack_counters;
|
||||||
struct sysctl_oid *rack_attack;
|
struct sysctl_oid *rack_attack;
|
||||||
|
|
||||||
SYSCTL_ADD_S32(&rack_sysctl_ctx,
|
SYSCTL_ADD_S32(&rack_sysctl_ctx,
|
||||||
SYSCTL_CHILDREN(rack_sysctl_root),
|
SYSCTL_CHILDREN(rack_sysctl_root),
|
||||||
OID_AUTO, "rate_sample_method", CTLFLAG_RW,
|
OID_AUTO, "rate_sample_method", CTLFLAG_RW,
|
||||||
@ -1052,7 +1052,7 @@ rb_map_cmp(struct rack_sendmap *b, struct rack_sendmap *a)
|
|||||||
{
|
{
|
||||||
if (SEQ_GEQ(b->r_start, a->r_start) &&
|
if (SEQ_GEQ(b->r_start, a->r_start) &&
|
||||||
SEQ_LT(b->r_start, a->r_end)) {
|
SEQ_LT(b->r_start, a->r_end)) {
|
||||||
/*
|
/*
|
||||||
* The entry b is within the
|
* The entry b is within the
|
||||||
* block a. i.e.:
|
* block a. i.e.:
|
||||||
* a -- |-------------|
|
* a -- |-------------|
|
||||||
@ -1064,15 +1064,15 @@ rb_map_cmp(struct rack_sendmap *b, struct rack_sendmap *a)
|
|||||||
*/
|
*/
|
||||||
return (0);
|
return (0);
|
||||||
} else if (SEQ_GEQ(b->r_start, a->r_end)) {
|
} else if (SEQ_GEQ(b->r_start, a->r_end)) {
|
||||||
/*
|
/*
|
||||||
* b falls as either the next
|
* b falls as either the next
|
||||||
* sequence block after a so a
|
* sequence block after a so a
|
||||||
* is said to be smaller than b.
|
* is said to be smaller than b.
|
||||||
* i.e:
|
* i.e:
|
||||||
* a -- |------|
|
* a -- |------|
|
||||||
* b -- |--------|
|
* b -- |--------|
|
||||||
* or
|
* or
|
||||||
* b -- |-----|
|
* b -- |-----|
|
||||||
*/
|
*/
|
||||||
return (1);
|
return (1);
|
||||||
}
|
}
|
||||||
@ -1212,7 +1212,7 @@ rack_log_rtt_upd(struct tcpcb *tp, struct tcp_rack *rack, int32_t t,
|
|||||||
log.u_bbr.flex2 = o_srtt;
|
log.u_bbr.flex2 = o_srtt;
|
||||||
log.u_bbr.flex3 = o_var;
|
log.u_bbr.flex3 = o_var;
|
||||||
log.u_bbr.flex4 = rack->r_ctl.rack_rs.rs_rtt_lowest;
|
log.u_bbr.flex4 = rack->r_ctl.rack_rs.rs_rtt_lowest;
|
||||||
log.u_bbr.flex5 = rack->r_ctl.rack_rs.rs_rtt_highest;
|
log.u_bbr.flex5 = rack->r_ctl.rack_rs.rs_rtt_highest;
|
||||||
log.u_bbr.flex6 = rack->r_ctl.rack_rs.rs_rtt_cnt;
|
log.u_bbr.flex6 = rack->r_ctl.rack_rs.rs_rtt_cnt;
|
||||||
log.u_bbr.rttProp = rack->r_ctl.rack_rs.rs_rtt_tot;
|
log.u_bbr.rttProp = rack->r_ctl.rack_rs.rs_rtt_tot;
|
||||||
log.u_bbr.flex8 = rack->r_ctl.rc_rate_sample_method;
|
log.u_bbr.flex8 = rack->r_ctl.rc_rate_sample_method;
|
||||||
@ -1230,7 +1230,7 @@ rack_log_rtt_upd(struct tcpcb *tp, struct tcp_rack *rack, int32_t t,
|
|||||||
static void
|
static void
|
||||||
rack_log_rtt_sample(struct tcp_rack *rack, uint32_t rtt)
|
rack_log_rtt_sample(struct tcp_rack *rack, uint32_t rtt)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
* Log the rtt sample we are
|
* Log the rtt sample we are
|
||||||
* applying to the srtt algorithm in
|
* applying to the srtt algorithm in
|
||||||
* useconds.
|
* useconds.
|
||||||
@ -1238,7 +1238,7 @@ rack_log_rtt_sample(struct tcp_rack *rack, uint32_t rtt)
|
|||||||
if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
|
if (rack->rc_tp->t_logstate != TCP_LOG_STATE_OFF) {
|
||||||
union tcp_log_stackspecific log;
|
union tcp_log_stackspecific log;
|
||||||
struct timeval tv;
|
struct timeval tv;
|
||||||
|
|
||||||
/* Convert our ms to a microsecond */
|
/* Convert our ms to a microsecond */
|
||||||
memset(&log, 0, sizeof(log));
|
memset(&log, 0, sizeof(log));
|
||||||
log.u_bbr.flex1 = rtt * 1000;
|
log.u_bbr.flex1 = rtt * 1000;
|
||||||
@ -1359,7 +1359,7 @@ rack_log_type_hrdwtso(struct tcpcb *tp, struct tcp_rack *rack, int len, int mod,
|
|||||||
0, &log, false, &tv);
|
0, &log, false, &tv);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
rack_log_type_just_return(struct tcp_rack *rack, uint32_t cts, uint32_t tlen, uint32_t slot, uint8_t hpts_calling)
|
rack_log_type_just_return(struct tcp_rack *rack, uint32_t cts, uint32_t tlen, uint32_t slot, uint8_t hpts_calling)
|
||||||
{
|
{
|
||||||
@ -1862,7 +1862,7 @@ rack_cc_after_idle(struct tcpcb *tp)
|
|||||||
|
|
||||||
if (tp->snd_cwnd == 1)
|
if (tp->snd_cwnd == 1)
|
||||||
i_cwnd = tp->t_maxseg; /* SYN(-ACK) lost */
|
i_cwnd = tp->t_maxseg; /* SYN(-ACK) lost */
|
||||||
else
|
else
|
||||||
i_cwnd = tcp_compute_initwnd(tcp_maxseg(tp));
|
i_cwnd = tcp_compute_initwnd(tcp_maxseg(tp));
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -2014,14 +2014,14 @@ rack_calc_thresh_tlp(struct tcpcb *tp, struct tcp_rack *rack,
|
|||||||
struct rack_sendmap *prsm;
|
struct rack_sendmap *prsm;
|
||||||
uint32_t thresh, len;
|
uint32_t thresh, len;
|
||||||
int maxseg;
|
int maxseg;
|
||||||
|
|
||||||
if (srtt == 0)
|
if (srtt == 0)
|
||||||
srtt = 1;
|
srtt = 1;
|
||||||
if (rack->r_ctl.rc_tlp_threshold)
|
if (rack->r_ctl.rc_tlp_threshold)
|
||||||
thresh = srtt + (srtt / rack->r_ctl.rc_tlp_threshold);
|
thresh = srtt + (srtt / rack->r_ctl.rc_tlp_threshold);
|
||||||
else
|
else
|
||||||
thresh = (srtt * 2);
|
thresh = (srtt * 2);
|
||||||
|
|
||||||
/* Get the previous sent packet, if any */
|
/* Get the previous sent packet, if any */
|
||||||
maxseg = ctf_fixed_maxseg(tp);
|
maxseg = ctf_fixed_maxseg(tp);
|
||||||
counter_u64_add(rack_enter_tlp_calc, 1);
|
counter_u64_add(rack_enter_tlp_calc, 1);
|
||||||
@ -2048,7 +2048,7 @@ rack_calc_thresh_tlp(struct tcpcb *tp, struct tcp_rack *rack,
|
|||||||
*/
|
*/
|
||||||
uint32_t inter_gap = 0;
|
uint32_t inter_gap = 0;
|
||||||
int idx, nidx;
|
int idx, nidx;
|
||||||
|
|
||||||
counter_u64_add(rack_used_tlpmethod, 1);
|
counter_u64_add(rack_used_tlpmethod, 1);
|
||||||
idx = rsm->r_rtr_cnt - 1;
|
idx = rsm->r_rtr_cnt - 1;
|
||||||
nidx = prsm->r_rtr_cnt - 1;
|
nidx = prsm->r_rtr_cnt - 1;
|
||||||
@ -2062,7 +2062,7 @@ rack_calc_thresh_tlp(struct tcpcb *tp, struct tcp_rack *rack,
|
|||||||
* Possibly compensate for delayed-ack.
|
* Possibly compensate for delayed-ack.
|
||||||
*/
|
*/
|
||||||
uint32_t alt_thresh;
|
uint32_t alt_thresh;
|
||||||
|
|
||||||
counter_u64_add(rack_used_tlpmethod2, 1);
|
counter_u64_add(rack_used_tlpmethod2, 1);
|
||||||
alt_thresh = srtt + (srtt / 2) + rack_delayed_ack_time;
|
alt_thresh = srtt + (srtt / 2) + rack_delayed_ack_time;
|
||||||
if (alt_thresh > thresh)
|
if (alt_thresh > thresh)
|
||||||
@ -2188,7 +2188,7 @@ rack_timer_start(struct tcpcb *tp, struct tcp_rack *rack, uint32_t cts, int sup_
|
|||||||
int32_t idx;
|
int32_t idx;
|
||||||
int32_t is_tlp_timer = 0;
|
int32_t is_tlp_timer = 0;
|
||||||
struct rack_sendmap *rsm;
|
struct rack_sendmap *rsm;
|
||||||
|
|
||||||
if (rack->t_timers_stopped) {
|
if (rack->t_timers_stopped) {
|
||||||
/* All timers have been stopped none are to run */
|
/* All timers have been stopped none are to run */
|
||||||
return (0);
|
return (0);
|
||||||
@ -2208,9 +2208,9 @@ activate_rxt:
|
|||||||
rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap);
|
rsm = TAILQ_FIRST(&rack->r_ctl.rc_tmap);
|
||||||
if (rsm) {
|
if (rsm) {
|
||||||
idx = rsm->r_rtr_cnt - 1;
|
idx = rsm->r_rtr_cnt - 1;
|
||||||
if (TSTMP_GEQ(rsm->r_tim_lastsent[idx], rack->r_ctl.rc_tlp_rxt_last_time))
|
if (TSTMP_GEQ(rsm->r_tim_lastsent[idx], rack->r_ctl.rc_tlp_rxt_last_time))
|
||||||
tstmp_touse = rsm->r_tim_lastsent[idx];
|
tstmp_touse = rsm->r_tim_lastsent[idx];
|
||||||
else
|
else
|
||||||
tstmp_touse = rack->r_ctl.rc_tlp_rxt_last_time;
|
tstmp_touse = rack->r_ctl.rc_tlp_rxt_last_time;
|
||||||
if (TSTMP_GT(tstmp_touse, cts))
|
if (TSTMP_GT(tstmp_touse, cts))
|
||||||
time_since_sent = cts - tstmp_touse;
|
time_since_sent = cts - tstmp_touse;
|
||||||
@ -2259,7 +2259,7 @@ activate_rxt:
|
|||||||
if ((rack->use_rack_cheat == 0) &&
|
if ((rack->use_rack_cheat == 0) &&
|
||||||
(IN_RECOVERY(tp->t_flags)) &&
|
(IN_RECOVERY(tp->t_flags)) &&
|
||||||
(rack->r_ctl.rc_prr_sndcnt < ctf_fixed_maxseg(tp))) {
|
(rack->r_ctl.rc_prr_sndcnt < ctf_fixed_maxseg(tp))) {
|
||||||
/*
|
/*
|
||||||
* We are not cheating, in recovery and
|
* We are not cheating, in recovery and
|
||||||
* not enough ack's to yet get our next
|
* not enough ack's to yet get our next
|
||||||
* retransmission out.
|
* retransmission out.
|
||||||
@ -2304,9 +2304,9 @@ activate_tlp:
|
|||||||
}
|
}
|
||||||
idx = rsm->r_rtr_cnt - 1;
|
idx = rsm->r_rtr_cnt - 1;
|
||||||
time_since_sent = 0;
|
time_since_sent = 0;
|
||||||
if (TSTMP_GEQ(rsm->r_tim_lastsent[idx], rack->r_ctl.rc_tlp_rxt_last_time))
|
if (TSTMP_GEQ(rsm->r_tim_lastsent[idx], rack->r_ctl.rc_tlp_rxt_last_time))
|
||||||
tstmp_touse = rsm->r_tim_lastsent[idx];
|
tstmp_touse = rsm->r_tim_lastsent[idx];
|
||||||
else
|
else
|
||||||
tstmp_touse = rack->r_ctl.rc_tlp_rxt_last_time;
|
tstmp_touse = rack->r_ctl.rc_tlp_rxt_last_time;
|
||||||
if (TSTMP_GT(tstmp_touse, cts))
|
if (TSTMP_GT(tstmp_touse, cts))
|
||||||
time_since_sent = cts - tstmp_touse;
|
time_since_sent = cts - tstmp_touse;
|
||||||
@ -2381,7 +2381,7 @@ rack_exit_persist(struct tcpcb *tp, struct tcp_rack *rack)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
rack_start_hpts_timer(struct tcp_rack *rack, struct tcpcb *tp, uint32_t cts,
|
rack_start_hpts_timer(struct tcp_rack *rack, struct tcpcb *tp, uint32_t cts,
|
||||||
int32_t slot, uint32_t tot_len_this_send, int sup_rack)
|
int32_t slot, uint32_t tot_len_this_send, int sup_rack)
|
||||||
{
|
{
|
||||||
struct inpcb *inp;
|
struct inpcb *inp;
|
||||||
@ -2407,12 +2407,12 @@ rack_start_hpts_timer(struct tcp_rack *rack, struct tcpcb *tp, uint32_t cts,
|
|||||||
rack->r_ctl.rc_timer_exp = 0;
|
rack->r_ctl.rc_timer_exp = 0;
|
||||||
if (rack->rc_inp->inp_in_hpts == 0) {
|
if (rack->rc_inp->inp_in_hpts == 0) {
|
||||||
rack->r_ctl.rc_hpts_flags = 0;
|
rack->r_ctl.rc_hpts_flags = 0;
|
||||||
}
|
}
|
||||||
if (slot) {
|
if (slot) {
|
||||||
/* We are hptsi too */
|
/* We are hptsi too */
|
||||||
rack->r_ctl.rc_hpts_flags |= PACE_PKT_OUTPUT;
|
rack->r_ctl.rc_hpts_flags |= PACE_PKT_OUTPUT;
|
||||||
} else if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) {
|
} else if (rack->r_ctl.rc_hpts_flags & PACE_PKT_OUTPUT) {
|
||||||
/*
|
/*
|
||||||
* We are still left on the hpts when the to goes
|
* We are still left on the hpts when the to goes
|
||||||
* it will be for output.
|
* it will be for output.
|
||||||
*/
|
*/
|
||||||
@ -2428,9 +2428,9 @@ rack_start_hpts_timer(struct tcp_rack *rack, struct tcpcb *tp, uint32_t cts,
|
|||||||
/*
|
/*
|
||||||
* We have a potential attacker on
|
* We have a potential attacker on
|
||||||
* the line. We have possibly some
|
* the line. We have possibly some
|
||||||
* (or now) pacing time set. We want to
|
* (or now) pacing time set. We want to
|
||||||
* slow down the processing of sacks by some
|
* slow down the processing of sacks by some
|
||||||
* amount (if it is an attacker). Set the default
|
* amount (if it is an attacker). Set the default
|
||||||
* slot for attackers in place (unless the orginal
|
* slot for attackers in place (unless the orginal
|
||||||
* interval is longer). Its stored in
|
* interval is longer). Its stored in
|
||||||
* micro-seconds, so lets convert to msecs.
|
* micro-seconds, so lets convert to msecs.
|
||||||
@ -2445,7 +2445,7 @@ rack_start_hpts_timer(struct tcp_rack *rack, struct tcpcb *tp, uint32_t cts,
|
|||||||
if (delayed_ack && ((hpts_timeout == 0) ||
|
if (delayed_ack && ((hpts_timeout == 0) ||
|
||||||
(delayed_ack < hpts_timeout)))
|
(delayed_ack < hpts_timeout)))
|
||||||
hpts_timeout = delayed_ack;
|
hpts_timeout = delayed_ack;
|
||||||
else
|
else
|
||||||
rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_DELACK;
|
rack->r_ctl.rc_hpts_flags &= ~PACE_TMR_DELACK;
|
||||||
/*
|
/*
|
||||||
* If no timers are going to run and we will fall off the hptsi
|
* If no timers are going to run and we will fall off the hptsi
|
||||||
@ -2495,9 +2495,9 @@ rack_start_hpts_timer(struct tcp_rack *rack, struct tcpcb *tp, uint32_t cts,
|
|||||||
}
|
}
|
||||||
if (slot) {
|
if (slot) {
|
||||||
rack->rc_inp->inp_flags2 |= INP_MBUF_QUEUE_READY;
|
rack->rc_inp->inp_flags2 |= INP_MBUF_QUEUE_READY;
|
||||||
if (rack->r_ctl.rc_hpts_flags & PACE_TMR_RACK)
|
if (rack->r_ctl.rc_hpts_flags & PACE_TMR_RACK)
|
||||||
inp->inp_flags2 |= INP_DONT_SACK_QUEUE;
|
inp->inp_flags2 |= INP_DONT_SACK_QUEUE;
|
||||||
else
|
else
|
||||||
inp->inp_flags2 &= ~INP_DONT_SACK_QUEUE;
|
inp->inp_flags2 &= ~INP_DONT_SACK_QUEUE;
|
||||||
rack->r_ctl.rc_last_output_to = cts + slot;
|
rack->r_ctl.rc_last_output_to = cts + slot;
|
||||||
if ((hpts_timeout == 0) || (hpts_timeout > slot)) {
|
if ((hpts_timeout == 0) || (hpts_timeout > slot)) {
|
||||||
@ -2637,7 +2637,7 @@ rack_merge_rsm(struct tcp_rack *rack,
|
|||||||
struct rack_sendmap *l_rsm,
|
struct rack_sendmap *l_rsm,
|
||||||
struct rack_sendmap *r_rsm)
|
struct rack_sendmap *r_rsm)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
* We are merging two ack'd RSM's,
|
* We are merging two ack'd RSM's,
|
||||||
* the l_rsm is on the left (lower seq
|
* the l_rsm is on the left (lower seq
|
||||||
* values) and the r_rsm is on the right
|
* values) and the r_rsm is on the right
|
||||||
@ -2648,7 +2648,7 @@ rack_merge_rsm(struct tcp_rack *rack,
|
|||||||
* the oldest (or last oldest retransmitted).
|
* the oldest (or last oldest retransmitted).
|
||||||
*/
|
*/
|
||||||
struct rack_sendmap *rm;
|
struct rack_sendmap *rm;
|
||||||
|
|
||||||
l_rsm->r_end = r_rsm->r_end;
|
l_rsm->r_end = r_rsm->r_end;
|
||||||
if (l_rsm->r_dupack < r_rsm->r_dupack)
|
if (l_rsm->r_dupack < r_rsm->r_dupack)
|
||||||
l_rsm->r_dupack = r_rsm->r_dupack;
|
l_rsm->r_dupack = r_rsm->r_dupack;
|
||||||
@ -2797,8 +2797,8 @@ need_retran:
|
|||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
/*
|
/*
|
||||||
* We must find the last segment
|
* We must find the last segment
|
||||||
* that was acceptable by the client.
|
* that was acceptable by the client.
|
||||||
*/
|
*/
|
||||||
RB_FOREACH_REVERSE(rsm, rack_rb_tree_head, &rack->r_ctl.rc_mtree) {
|
RB_FOREACH_REVERSE(rsm, rack_rb_tree_head, &rack->r_ctl.rc_mtree) {
|
||||||
@ -3846,7 +3846,7 @@ tcp_rack_xmit_timer_commit(struct tcp_rack *rack, struct tcpcb *tp)
|
|||||||
} else {
|
} else {
|
||||||
#ifdef INVARIANTS
|
#ifdef INVARIANTS
|
||||||
panic("Unknown rtt variant %d", rack->r_ctl.rc_rate_sample_method);
|
panic("Unknown rtt variant %d", rack->r_ctl.rc_rate_sample_method);
|
||||||
#endif
|
#endif
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
if (rtt == 0)
|
if (rtt == 0)
|
||||||
@ -4025,7 +4025,7 @@ rack_update_rtt(struct tcpcb *tp, struct tcp_rack *rack,
|
|||||||
*/
|
*/
|
||||||
rack->r_ctl.rc_prr_sndcnt = ctf_fixed_maxseg(tp);
|
rack->r_ctl.rc_prr_sndcnt = ctf_fixed_maxseg(tp);
|
||||||
rack_log_to_prr(rack, 7);
|
rack_log_to_prr(rack, 7);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (SEQ_LT(rack->r_ctl.rc_rack_tmit_time, rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)])) {
|
if (SEQ_LT(rack->r_ctl.rc_rack_tmit_time, rsm->r_tim_lastsent[(rsm->r_rtr_cnt - 1)])) {
|
||||||
/* New more recent rack_tmit_time */
|
/* New more recent rack_tmit_time */
|
||||||
@ -4034,8 +4034,8 @@ rack_update_rtt(struct tcpcb *tp, struct tcp_rack *rack,
|
|||||||
}
|
}
|
||||||
return (1);
|
return (1);
|
||||||
}
|
}
|
||||||
/*
|
/*
|
||||||
* We clear the soft/rxtshift since we got an ack.
|
* We clear the soft/rxtshift since we got an ack.
|
||||||
* There is no assurance we will call the commit() function
|
* There is no assurance we will call the commit() function
|
||||||
* so we need to clear these to avoid incorrect handling.
|
* so we need to clear these to avoid incorrect handling.
|
||||||
*/
|
*/
|
||||||
@ -4071,7 +4071,7 @@ rack_update_rtt(struct tcpcb *tp, struct tcp_rack *rack,
|
|||||||
* tcp_rack_xmit_timer() are being commented
|
* tcp_rack_xmit_timer() are being commented
|
||||||
* out for now. They give us no more accuracy
|
* out for now. They give us no more accuracy
|
||||||
* and often lead to a wrong choice. We have
|
* and often lead to a wrong choice. We have
|
||||||
* enough samples that have not been
|
* enough samples that have not been
|
||||||
* retransmitted. I leave the commented out
|
* retransmitted. I leave the commented out
|
||||||
* code in here in case in the future we
|
* code in here in case in the future we
|
||||||
* decide to add it back (though I can't forsee
|
* decide to add it back (though I can't forsee
|
||||||
@ -4150,15 +4150,15 @@ rack_log_sack_passed(struct tcpcb *tp,
|
|||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
if (nrsm->r_flags & RACK_ACKED) {
|
if (nrsm->r_flags & RACK_ACKED) {
|
||||||
/*
|
/*
|
||||||
* Skip ack'd segments, though we
|
* Skip ack'd segments, though we
|
||||||
* should not see these, since tmap
|
* should not see these, since tmap
|
||||||
* should not have ack'd segments.
|
* should not have ack'd segments.
|
||||||
*/
|
*/
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
if (nrsm->r_flags & RACK_SACK_PASSED) {
|
if (nrsm->r_flags & RACK_SACK_PASSED) {
|
||||||
/*
|
/*
|
||||||
* We found one that is already marked
|
* We found one that is already marked
|
||||||
* passed, we have been here before and
|
* passed, we have been here before and
|
||||||
* so all others below this are marked.
|
* so all others below this are marked.
|
||||||
@ -4189,7 +4189,7 @@ do_rest_ofb:
|
|||||||
(SEQ_LT(end, rsm->r_start)) ||
|
(SEQ_LT(end, rsm->r_start)) ||
|
||||||
(SEQ_GEQ(start, rsm->r_end)) ||
|
(SEQ_GEQ(start, rsm->r_end)) ||
|
||||||
(SEQ_LT(start, rsm->r_start))) {
|
(SEQ_LT(start, rsm->r_start))) {
|
||||||
/*
|
/*
|
||||||
* We are not in the right spot,
|
* We are not in the right spot,
|
||||||
* find the correct spot in the tree.
|
* find the correct spot in the tree.
|
||||||
*/
|
*/
|
||||||
@ -4217,7 +4217,7 @@ do_rest_ofb:
|
|||||||
* nrsm |----------|
|
* nrsm |----------|
|
||||||
*
|
*
|
||||||
* But before we start down that path lets
|
* But before we start down that path lets
|
||||||
* see if the sack spans over on top of
|
* see if the sack spans over on top of
|
||||||
* the next guy and it is already sacked.
|
* the next guy and it is already sacked.
|
||||||
*/
|
*/
|
||||||
next = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
|
next = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
|
||||||
@ -4258,7 +4258,7 @@ do_rest_ofb:
|
|||||||
counter_u64_add(rack_reorder_seen, 1);
|
counter_u64_add(rack_reorder_seen, 1);
|
||||||
rack->r_ctl.rc_reorder_ts = cts;
|
rack->r_ctl.rc_reorder_ts = cts;
|
||||||
}
|
}
|
||||||
/*
|
/*
|
||||||
* Now we want to go up from rsm (the
|
* Now we want to go up from rsm (the
|
||||||
* one left un-acked) to the next one
|
* one left un-acked) to the next one
|
||||||
* in the tmap. We do this so when
|
* in the tmap. We do this so when
|
||||||
@ -4342,12 +4342,12 @@ do_rest_ofb:
|
|||||||
goto out;
|
goto out;
|
||||||
} else if (SEQ_LT(end, rsm->r_end)) {
|
} else if (SEQ_LT(end, rsm->r_end)) {
|
||||||
/* A partial sack to a already sacked block */
|
/* A partial sack to a already sacked block */
|
||||||
moved++;
|
moved++;
|
||||||
rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
|
rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
|
||||||
goto out;
|
goto out;
|
||||||
} else {
|
} else {
|
||||||
/*
|
/*
|
||||||
* The end goes beyond this guy
|
* The end goes beyond this guy
|
||||||
* repostion the start to the
|
* repostion the start to the
|
||||||
* next block.
|
* next block.
|
||||||
*/
|
*/
|
||||||
@ -4395,8 +4395,8 @@ do_rest_ofb:
|
|||||||
/* This block only - done, setup for next */
|
/* This block only - done, setup for next */
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
/*
|
/*
|
||||||
* There is more not coverend by this rsm move on
|
* There is more not coverend by this rsm move on
|
||||||
* to the next block in the RB tree.
|
* to the next block in the RB tree.
|
||||||
*/
|
*/
|
||||||
nrsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
|
nrsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
|
||||||
@ -4433,14 +4433,14 @@ do_rest_ofb:
|
|||||||
memcpy(nrsm, rsm, sizeof(struct rack_sendmap));
|
memcpy(nrsm, rsm, sizeof(struct rack_sendmap));
|
||||||
prev->r_end = end;
|
prev->r_end = end;
|
||||||
rsm->r_start = end;
|
rsm->r_start = end;
|
||||||
/* Now adjust nrsm (stack copy) to be
|
/* Now adjust nrsm (stack copy) to be
|
||||||
* the one that is the small
|
* the one that is the small
|
||||||
* piece that was "sacked".
|
* piece that was "sacked".
|
||||||
*/
|
*/
|
||||||
nrsm->r_end = end;
|
nrsm->r_end = end;
|
||||||
rsm->r_dupack = 0;
|
rsm->r_dupack = 0;
|
||||||
rack_log_retran_reason(rack, rsm, __LINE__, 0, 2);
|
rack_log_retran_reason(rack, rsm, __LINE__, 0, 2);
|
||||||
/*
|
/*
|
||||||
* Now nrsm is our new little piece
|
* Now nrsm is our new little piece
|
||||||
* that is acked (which was merged
|
* that is acked (which was merged
|
||||||
* to prev). Update the rtt and changed
|
* to prev). Update the rtt and changed
|
||||||
@ -4467,7 +4467,7 @@ do_rest_ofb:
|
|||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
/**
|
/**
|
||||||
* In this case nrsm becomes
|
* In this case nrsm becomes
|
||||||
* nrsm->r_start = end;
|
* nrsm->r_start = end;
|
||||||
* nrsm->r_end = rsm->r_end;
|
* nrsm->r_end = rsm->r_end;
|
||||||
* which is un-acked.
|
* which is un-acked.
|
||||||
@ -4529,8 +4529,8 @@ do_rest_ofb:
|
|||||||
}
|
}
|
||||||
out:
|
out:
|
||||||
if (rsm && (rsm->r_flags & RACK_ACKED)) {
|
if (rsm && (rsm->r_flags & RACK_ACKED)) {
|
||||||
/*
|
/*
|
||||||
* Now can we merge where we worked
|
* Now can we merge where we worked
|
||||||
* with either the previous or
|
* with either the previous or
|
||||||
* next block?
|
* next block?
|
||||||
*/
|
*/
|
||||||
@ -4560,7 +4560,7 @@ out:
|
|||||||
counter_u64_add(rack_sack_proc_short, 1);
|
counter_u64_add(rack_sack_proc_short, 1);
|
||||||
}
|
}
|
||||||
/* Save off the next one for quick reference. */
|
/* Save off the next one for quick reference. */
|
||||||
if (rsm)
|
if (rsm)
|
||||||
nrsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
|
nrsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
|
||||||
else
|
else
|
||||||
nrsm = NULL;
|
nrsm = NULL;
|
||||||
@ -4570,7 +4570,7 @@ out:
|
|||||||
return (changed);
|
return (changed);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void inline
|
static void inline
|
||||||
rack_peer_reneges(struct tcp_rack *rack, struct rack_sendmap *rsm, tcp_seq th_ack)
|
rack_peer_reneges(struct tcp_rack *rack, struct rack_sendmap *rsm, tcp_seq th_ack)
|
||||||
{
|
{
|
||||||
struct rack_sendmap *tmap;
|
struct rack_sendmap *tmap;
|
||||||
@ -4597,8 +4597,8 @@ rack_peer_reneges(struct tcp_rack *rack, struct rack_sendmap *rsm, tcp_seq th_ac
|
|||||||
tmap->r_in_tmap = 1;
|
tmap->r_in_tmap = 1;
|
||||||
rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
|
rsm = RB_NEXT(rack_rb_tree_head, &rack->r_ctl.rc_mtree, rsm);
|
||||||
}
|
}
|
||||||
/*
|
/*
|
||||||
* Now lets possibly clear the sack filter so we start
|
* Now lets possibly clear the sack filter so we start
|
||||||
* recognizing sacks that cover this area.
|
* recognizing sacks that cover this area.
|
||||||
*/
|
*/
|
||||||
if (rack_use_sack_filter)
|
if (rack_use_sack_filter)
|
||||||
@ -4623,14 +4623,14 @@ rack_do_decay(struct tcp_rack *rack)
|
|||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
timersub(&rack->r_ctl.rc_last_ack, &rack->r_ctl.rc_last_time_decay, &res);
|
timersub(&rack->r_ctl.rc_last_ack, &rack->r_ctl.rc_last_time_decay, &res);
|
||||||
#undef timersub
|
#undef timersub
|
||||||
|
|
||||||
rack->r_ctl.input_pkt++;
|
rack->r_ctl.input_pkt++;
|
||||||
if ((rack->rc_in_persist) ||
|
if ((rack->rc_in_persist) ||
|
||||||
(res.tv_sec >= 1) ||
|
(res.tv_sec >= 1) ||
|
||||||
(rack->rc_tp->snd_max == rack->rc_tp->snd_una)) {
|
(rack->rc_tp->snd_max == rack->rc_tp->snd_una)) {
|
||||||
/*
|
/*
|
||||||
* Check for decay of non-SAD,
|
* Check for decay of non-SAD,
|
||||||
* we want all SAD detection metrics to
|
* we want all SAD detection metrics to
|
||||||
* decay 1/4 per second (or more) passed.
|
* decay 1/4 per second (or more) passed.
|
||||||
*/
|
*/
|
||||||
@ -4644,8 +4644,8 @@ rack_do_decay(struct tcp_rack *rack)
|
|||||||
if (rack->rc_in_persist ||
|
if (rack->rc_in_persist ||
|
||||||
(rack->rc_tp->snd_max == rack->rc_tp->snd_una) ||
|
(rack->rc_tp->snd_max == rack->rc_tp->snd_una) ||
|
||||||
(pkt_delta < tcp_sad_low_pps)){
|
(pkt_delta < tcp_sad_low_pps)){
|
||||||
/*
|
/*
|
||||||
* We don't decay idle connections
|
* We don't decay idle connections
|
||||||
* or ones that have a low input pps.
|
* or ones that have a low input pps.
|
||||||
*/
|
*/
|
||||||
return;
|
return;
|
||||||
@ -4660,7 +4660,7 @@ rack_do_decay(struct tcp_rack *rack)
|
|||||||
rack->r_ctl.sack_noextra_move = ctf_decay_count(rack->r_ctl.sack_noextra_move,
|
rack->r_ctl.sack_noextra_move = ctf_decay_count(rack->r_ctl.sack_noextra_move,
|
||||||
tcp_sad_decay_val);
|
tcp_sad_decay_val);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
@ -4674,7 +4674,7 @@ rack_log_ack(struct tcpcb *tp, struct tcpopt *to, struct tcphdr *th)
|
|||||||
int32_t i, j, k, num_sack_blks = 0;
|
int32_t i, j, k, num_sack_blks = 0;
|
||||||
uint32_t cts, acked, ack_point, sack_changed = 0;
|
uint32_t cts, acked, ack_point, sack_changed = 0;
|
||||||
int loop_start = 0, moved_two = 0;
|
int loop_start = 0, moved_two = 0;
|
||||||
|
|
||||||
INP_WLOCK_ASSERT(tp->t_inpcb);
|
INP_WLOCK_ASSERT(tp->t_inpcb);
|
||||||
if (th->th_flags & TH_RST) {
|
if (th->th_flags & TH_RST) {
|
||||||
/* We don't log resets */
|
/* We don't log resets */
|
||||||
@ -4688,7 +4688,7 @@ rack_log_ack(struct tcpcb *tp, struct tcpopt *to, struct tcphdr *th)
|
|||||||
if (rack->sack_attack_disable == 0)
|
if (rack->sack_attack_disable == 0)
|
||||||
rack_do_decay(rack);
|
rack_do_decay(rack);
|
||||||
if (BYTES_THIS_ACK(tp, th) >= ctf_fixed_maxseg(rack->rc_tp)) {
|
if (BYTES_THIS_ACK(tp, th) >= ctf_fixed_maxseg(rack->rc_tp)) {
|
||||||
/*
|
/*
|
||||||
* You only get credit for
|
* You only get credit for
|
||||||
* MSS and greater (and you get extra
|
* MSS and greater (and you get extra
|
||||||
* credit for larger cum-ack moves).
|
* credit for larger cum-ack moves).
|
||||||
@ -4700,8 +4700,8 @@ rack_log_ack(struct tcpcb *tp, struct tcpopt *to, struct tcphdr *th)
|
|||||||
counter_u64_add(rack_ack_total, ac);
|
counter_u64_add(rack_ack_total, ac);
|
||||||
}
|
}
|
||||||
if (rack->r_ctl.ack_count > 0xfff00000) {
|
if (rack->r_ctl.ack_count > 0xfff00000) {
|
||||||
/*
|
/*
|
||||||
* reduce the number to keep us under
|
* reduce the number to keep us under
|
||||||
* a uint32_t.
|
* a uint32_t.
|
||||||
*/
|
*/
|
||||||
rack->r_ctl.ack_count /= 2;
|
rack->r_ctl.ack_count /= 2;
|
||||||
@ -4818,14 +4818,14 @@ rack_log_ack(struct tcpcb *tp, struct tcpopt *to, struct tcphdr *th)
|
|||||||
*/
|
*/
|
||||||
rack->r_ctl.rc_sacked -= (th_ack - rsm->r_start);
|
rack->r_ctl.rc_sacked -= (th_ack - rsm->r_start);
|
||||||
}
|
}
|
||||||
/*
|
/*
|
||||||
* Clear the dup ack count for
|
* Clear the dup ack count for
|
||||||
* the piece that remains.
|
* the piece that remains.
|
||||||
*/
|
*/
|
||||||
rsm->r_dupack = 0;
|
rsm->r_dupack = 0;
|
||||||
rack_log_retran_reason(rack, rsm, __LINE__, 0, 2);
|
rack_log_retran_reason(rack, rsm, __LINE__, 0, 2);
|
||||||
if (rsm->r_rtr_bytes) {
|
if (rsm->r_rtr_bytes) {
|
||||||
/*
|
/*
|
||||||
* It was retransmitted adjust the
|
* It was retransmitted adjust the
|
||||||
* sack holes for what was acked.
|
* sack holes for what was acked.
|
||||||
*/
|
*/
|
||||||
@ -4850,7 +4850,7 @@ proc_sack:
|
|||||||
* that it had previously acked. The only
|
* that it had previously acked. The only
|
||||||
* way that can be true if the peer threw
|
* way that can be true if the peer threw
|
||||||
* away data (space issues) that it had
|
* away data (space issues) that it had
|
||||||
* previously sacked (else it would have
|
* previously sacked (else it would have
|
||||||
* given us snd_una up to (rsm->r_end).
|
* given us snd_una up to (rsm->r_end).
|
||||||
* We need to undo the acked markings here.
|
* We need to undo the acked markings here.
|
||||||
*
|
*
|
||||||
@ -4959,8 +4959,8 @@ again:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
do_sack_work:
|
do_sack_work:
|
||||||
/*
|
/*
|
||||||
* First lets look to see if
|
* First lets look to see if
|
||||||
* we have retransmitted and
|
* we have retransmitted and
|
||||||
* can use the transmit next?
|
* can use the transmit next?
|
||||||
*/
|
*/
|
||||||
@ -4993,8 +4993,8 @@ do_sack_work:
|
|||||||
counter_u64_add(rack_ack_total, (acked / ctf_fixed_maxseg(rack->rc_tp)));
|
counter_u64_add(rack_ack_total, (acked / ctf_fixed_maxseg(rack->rc_tp)));
|
||||||
counter_u64_add(rack_express_sack, 1);
|
counter_u64_add(rack_express_sack, 1);
|
||||||
if (rack->r_ctl.ack_count > 0xfff00000) {
|
if (rack->r_ctl.ack_count > 0xfff00000) {
|
||||||
/*
|
/*
|
||||||
* reduce the number to keep us under
|
* reduce the number to keep us under
|
||||||
* a uint32_t.
|
* a uint32_t.
|
||||||
*/
|
*/
|
||||||
rack->r_ctl.ack_count /= 2;
|
rack->r_ctl.ack_count /= 2;
|
||||||
@ -5013,8 +5013,8 @@ do_sack_work:
|
|||||||
/* Its a sack of some sort */
|
/* Its a sack of some sort */
|
||||||
rack->r_ctl.sack_count++;
|
rack->r_ctl.sack_count++;
|
||||||
if (rack->r_ctl.sack_count > 0xfff00000) {
|
if (rack->r_ctl.sack_count > 0xfff00000) {
|
||||||
/*
|
/*
|
||||||
* reduce the number to keep us under
|
* reduce the number to keep us under
|
||||||
* a uint32_t.
|
* a uint32_t.
|
||||||
*/
|
*/
|
||||||
rack->r_ctl.ack_count /= 2;
|
rack->r_ctl.ack_count /= 2;
|
||||||
@ -5088,8 +5088,8 @@ do_sack_work:
|
|||||||
}
|
}
|
||||||
out_with_totals:
|
out_with_totals:
|
||||||
if (num_sack_blks > 1) {
|
if (num_sack_blks > 1) {
|
||||||
/*
|
/*
|
||||||
* You get an extra stroke if
|
* You get an extra stroke if
|
||||||
* you have more than one sack-blk, this
|
* you have more than one sack-blk, this
|
||||||
* could be where we are skipping forward
|
* could be where we are skipping forward
|
||||||
* and the sack-filter is still working, or
|
* and the sack-filter is still working, or
|
||||||
@ -5105,7 +5105,7 @@ out:
|
|||||||
tcp_sack_to_ack_thresh &&
|
tcp_sack_to_ack_thresh &&
|
||||||
tcp_sack_to_move_thresh &&
|
tcp_sack_to_move_thresh &&
|
||||||
((rack->r_ctl.rc_num_maps_alloced > tcp_map_minimum) || rack->sack_attack_disable)) {
|
((rack->r_ctl.rc_num_maps_alloced > tcp_map_minimum) || rack->sack_attack_disable)) {
|
||||||
/*
|
/*
|
||||||
* We have thresholds set to find
|
* We have thresholds set to find
|
||||||
* possible attackers and disable sack.
|
* possible attackers and disable sack.
|
||||||
* Check them.
|
* Check them.
|
||||||
@ -5138,7 +5138,7 @@ out:
|
|||||||
if ((rack->sack_attack_disable == 0) &&
|
if ((rack->sack_attack_disable == 0) &&
|
||||||
(moveratio > rack_highest_move_thresh_seen))
|
(moveratio > rack_highest_move_thresh_seen))
|
||||||
rack_highest_move_thresh_seen = (uint32_t)moveratio;
|
rack_highest_move_thresh_seen = (uint32_t)moveratio;
|
||||||
if (rack->sack_attack_disable == 0) {
|
if (rack->sack_attack_disable == 0) {
|
||||||
if ((ackratio > tcp_sack_to_ack_thresh) &&
|
if ((ackratio > tcp_sack_to_ack_thresh) &&
|
||||||
(moveratio > tcp_sack_to_move_thresh)) {
|
(moveratio > tcp_sack_to_move_thresh)) {
|
||||||
/* Disable sack processing */
|
/* Disable sack processing */
|
||||||
@ -5148,7 +5148,7 @@ out:
|
|||||||
counter_u64_add(rack_sack_attacks_detected, 1);
|
counter_u64_add(rack_sack_attacks_detected, 1);
|
||||||
}
|
}
|
||||||
if (tcp_attack_on_turns_on_logging) {
|
if (tcp_attack_on_turns_on_logging) {
|
||||||
/*
|
/*
|
||||||
* Turn on logging, used for debugging
|
* Turn on logging, used for debugging
|
||||||
* false positives.
|
* false positives.
|
||||||
*/
|
*/
|
||||||
@ -5171,7 +5171,7 @@ out:
|
|||||||
rack->r_ctl.sack_noextra_move = 1;
|
rack->r_ctl.sack_noextra_move = 1;
|
||||||
rack->r_ctl.ack_count = max(1,
|
rack->r_ctl.ack_count = max(1,
|
||||||
(BYTES_THIS_ACK(tp, th)/ctf_fixed_maxseg(rack->rc_tp)));
|
(BYTES_THIS_ACK(tp, th)/ctf_fixed_maxseg(rack->rc_tp)));
|
||||||
|
|
||||||
if (rack->r_rep_reverse == 0) {
|
if (rack->r_rep_reverse == 0) {
|
||||||
rack->r_rep_reverse = 1;
|
rack->r_rep_reverse = 1;
|
||||||
counter_u64_add(rack_sack_attacks_reversed, 1);
|
counter_u64_add(rack_sack_attacks_reversed, 1);
|
||||||
@ -5451,7 +5451,7 @@ rack_process_ack(struct mbuf *m, struct tcphdr *th, struct socket *so,
|
|||||||
if ((tp->t_state >= TCPS_FIN_WAIT_1) &&
|
if ((tp->t_state >= TCPS_FIN_WAIT_1) &&
|
||||||
(sbavail(&so->so_snd) == 0) &&
|
(sbavail(&so->so_snd) == 0) &&
|
||||||
(tp->t_flags2 & TF2_DROP_AF_DATA)) {
|
(tp->t_flags2 & TF2_DROP_AF_DATA)) {
|
||||||
/*
|
/*
|
||||||
* The socket was gone and the
|
* The socket was gone and the
|
||||||
* peer sent data, time to
|
* peer sent data, time to
|
||||||
* reset him.
|
* reset him.
|
||||||
@ -5472,7 +5472,7 @@ rack_collapsed_window(struct tcp_rack *rack)
|
|||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
* Now we must walk the
|
* Now we must walk the
|
||||||
* send map and divide the
|
* send map and divide the
|
||||||
* ones left stranded. These
|
* ones left stranded. These
|
||||||
* guys can't cause us to abort
|
* guys can't cause us to abort
|
||||||
* the connection and are really
|
* the connection and are really
|
||||||
@ -5483,7 +5483,7 @@ rack_collapsed_window(struct tcp_rack *rack)
|
|||||||
* the win and acked that data. We would
|
* the win and acked that data. We would
|
||||||
* get into an ack war, the simplier
|
* get into an ack war, the simplier
|
||||||
* method then of just pretending we
|
* method then of just pretending we
|
||||||
* did not send those segments something
|
* did not send those segments something
|
||||||
* won't work.
|
* won't work.
|
||||||
*/
|
*/
|
||||||
struct rack_sendmap *rsm, *nrsm, fe, *insret;
|
struct rack_sendmap *rsm, *nrsm, fe, *insret;
|
||||||
@ -5501,7 +5501,7 @@ rack_collapsed_window(struct tcp_rack *rack)
|
|||||||
rack->rc_has_collapsed = 0;
|
rack->rc_has_collapsed = 0;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
/*
|
/*
|
||||||
* Now do we need to split at
|
* Now do we need to split at
|
||||||
* the collapse point?
|
* the collapse point?
|
||||||
*/
|
*/
|
||||||
@ -5525,8 +5525,8 @@ rack_collapsed_window(struct tcp_rack *rack)
|
|||||||
TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext);
|
TAILQ_INSERT_AFTER(&rack->r_ctl.rc_tmap, rsm, nrsm, r_tnext);
|
||||||
nrsm->r_in_tmap = 1;
|
nrsm->r_in_tmap = 1;
|
||||||
}
|
}
|
||||||
/*
|
/*
|
||||||
* Set in the new RSM as the
|
* Set in the new RSM as the
|
||||||
* collapsed starting point
|
* collapsed starting point
|
||||||
*/
|
*/
|
||||||
rsm = nrsm;
|
rsm = nrsm;
|
||||||
@ -6089,7 +6089,7 @@ rack_fastack(struct mbuf *m, struct tcphdr *th, struct socket *so,
|
|||||||
* We made progress, clear the tlp
|
* We made progress, clear the tlp
|
||||||
* out flag so we could start a TLP
|
* out flag so we could start a TLP
|
||||||
* again.
|
* again.
|
||||||
*/
|
*/
|
||||||
rack->r_ctl.rc_tlp_rtx_out = 0;
|
rack->r_ctl.rc_tlp_rtx_out = 0;
|
||||||
/* Did the window get updated? */
|
/* Did the window get updated? */
|
||||||
if (tiwin != tp->snd_wnd) {
|
if (tiwin != tp->snd_wnd) {
|
||||||
@ -6263,7 +6263,7 @@ rack_do_syn_sent(struct mbuf *m, struct tcphdr *th, struct socket *so,
|
|||||||
rack = (struct tcp_rack *)tp->t_fb_ptr;
|
rack = (struct tcp_rack *)tp->t_fb_ptr;
|
||||||
if (thflags & TH_ACK) {
|
if (thflags & TH_ACK) {
|
||||||
int tfo_partial = 0;
|
int tfo_partial = 0;
|
||||||
|
|
||||||
TCPSTAT_INC(tcps_connects);
|
TCPSTAT_INC(tcps_connects);
|
||||||
soisconnected(so);
|
soisconnected(so);
|
||||||
#ifdef MAC
|
#ifdef MAC
|
||||||
@ -6304,12 +6304,12 @@ rack_do_syn_sent(struct mbuf *m, struct tcphdr *th, struct socket *so,
|
|||||||
TCPSTAT_INC(tcps_ecn_shs);
|
TCPSTAT_INC(tcps_ecn_shs);
|
||||||
}
|
}
|
||||||
if (SEQ_GT(th->th_ack, tp->snd_una)) {
|
if (SEQ_GT(th->th_ack, tp->snd_una)) {
|
||||||
/*
|
/*
|
||||||
* We advance snd_una for the
|
* We advance snd_una for the
|
||||||
* fast open case. If th_ack is
|
* fast open case. If th_ack is
|
||||||
* acknowledging data beyond
|
* acknowledging data beyond
|
||||||
* snd_una we can't just call
|
* snd_una we can't just call
|
||||||
* ack-processing since the
|
* ack-processing since the
|
||||||
* data stream in our send-map
|
* data stream in our send-map
|
||||||
* will start at snd_una + 1 (one
|
* will start at snd_una + 1 (one
|
||||||
* beyond the SYN). If its just
|
* beyond the SYN). If its just
|
||||||
@ -6377,7 +6377,7 @@ rack_do_syn_sent(struct mbuf *m, struct tcphdr *th, struct socket *so,
|
|||||||
tp->t_rttlow = t;
|
tp->t_rttlow = t;
|
||||||
tcp_rack_xmit_timer(rack, t + 1);
|
tcp_rack_xmit_timer(rack, t + 1);
|
||||||
tcp_rack_xmit_timer_commit(rack, tp);
|
tcp_rack_xmit_timer_commit(rack, tp);
|
||||||
}
|
}
|
||||||
if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val))
|
if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val))
|
||||||
return (ret_val);
|
return (ret_val);
|
||||||
/* We may have changed to FIN_WAIT_1 above */
|
/* We may have changed to FIN_WAIT_1 above */
|
||||||
@ -6538,7 +6538,7 @@ rack_do_syn_recv(struct mbuf *m, struct tcphdr *th, struct socket *so,
|
|||||||
/*
|
/*
|
||||||
* Account for the ACK of our SYN prior to
|
* Account for the ACK of our SYN prior to
|
||||||
* regular ACK processing below.
|
* regular ACK processing below.
|
||||||
*/
|
*/
|
||||||
tp->snd_una++;
|
tp->snd_una++;
|
||||||
}
|
}
|
||||||
if (tp->t_flags & TF_NEEDFIN) {
|
if (tp->t_flags & TF_NEEDFIN) {
|
||||||
@ -6574,7 +6574,7 @@ rack_do_syn_recv(struct mbuf *m, struct tcphdr *th, struct socket *so,
|
|||||||
tp->t_rttlow = t;
|
tp->t_rttlow = t;
|
||||||
tcp_rack_xmit_timer(rack, t + 1);
|
tcp_rack_xmit_timer(rack, t + 1);
|
||||||
tcp_rack_xmit_timer_commit(rack, tp);
|
tcp_rack_xmit_timer_commit(rack, tp);
|
||||||
}
|
}
|
||||||
if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) {
|
if (rack_process_ack(m, th, so, tp, to, tiwin, tlen, &ourfinisacked, thflags, &ret_val)) {
|
||||||
return (ret_val);
|
return (ret_val);
|
||||||
}
|
}
|
||||||
@ -6833,7 +6833,7 @@ rack_do_close_wait(struct mbuf *m, struct tcphdr *th, struct socket *so,
|
|||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
rack_check_data_after_close(struct mbuf *m,
|
rack_check_data_after_close(struct mbuf *m,
|
||||||
struct tcpcb *tp, int32_t *tlen, struct tcphdr *th, struct socket *so)
|
struct tcpcb *tp, int32_t *tlen, struct tcphdr *th, struct socket *so)
|
||||||
{
|
{
|
||||||
struct tcp_rack *rack;
|
struct tcp_rack *rack;
|
||||||
@ -7314,7 +7314,7 @@ rack_set_pace_segments(struct tcpcb *tp, struct tcp_rack *rack)
|
|||||||
if (rack->rc_inp->inp_socket->so_snd.sb_flags & SB_TLS_IFNET) {
|
if (rack->rc_inp->inp_socket->so_snd.sb_flags & SB_TLS_IFNET) {
|
||||||
tls_seg = ctf_get_opt_tls_size(rack->rc_inp->inp_socket, rack->rc_tp->snd_wnd);
|
tls_seg = ctf_get_opt_tls_size(rack->rc_inp->inp_socket, rack->rc_tp->snd_wnd);
|
||||||
rack->r_ctl.rc_pace_min_segs = tls_seg;
|
rack->r_ctl.rc_pace_min_segs = tls_seg;
|
||||||
} else
|
} else
|
||||||
#endif
|
#endif
|
||||||
rack->r_ctl.rc_pace_min_segs = ctf_fixed_maxseg(tp);
|
rack->r_ctl.rc_pace_min_segs = ctf_fixed_maxseg(tp);
|
||||||
rack->r_ctl.rc_pace_max_segs = ctf_fixed_maxseg(tp) * rack->rc_pace_max_segs;
|
rack->r_ctl.rc_pace_max_segs = ctf_fixed_maxseg(tp) * rack->rc_pace_max_segs;
|
||||||
@ -7557,7 +7557,7 @@ rack_timer_audit(struct tcpcb *tp, struct tcp_rack *rack, struct sockbuf *sb)
|
|||||||
*/
|
*/
|
||||||
struct rack_sendmap *rsm;
|
struct rack_sendmap *rsm;
|
||||||
int tmr_up;
|
int tmr_up;
|
||||||
|
|
||||||
tmr_up = rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK;
|
tmr_up = rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK;
|
||||||
if (rack->rc_in_persist && (tmr_up == PACE_TMR_PERSIT))
|
if (rack->rc_in_persist && (tmr_up == PACE_TMR_PERSIT))
|
||||||
return;
|
return;
|
||||||
@ -7574,7 +7574,7 @@ rack_timer_audit(struct tcpcb *tp, struct tcp_rack *rack, struct sockbuf *sb)
|
|||||||
/* We are supposed to have delayed ack up and we do */
|
/* We are supposed to have delayed ack up and we do */
|
||||||
return;
|
return;
|
||||||
} else if (sbavail(&tp->t_inpcb->inp_socket->so_snd) && (tmr_up == PACE_TMR_RXT)) {
|
} else if (sbavail(&tp->t_inpcb->inp_socket->so_snd) && (tmr_up == PACE_TMR_RXT)) {
|
||||||
/*
|
/*
|
||||||
* if we hit enobufs then we would expect the possiblity
|
* if we hit enobufs then we would expect the possiblity
|
||||||
* of nothing outstanding and the RXT up (and the hptsi timer).
|
* of nothing outstanding and the RXT up (and the hptsi timer).
|
||||||
*/
|
*/
|
||||||
@ -7592,7 +7592,7 @@ rack_timer_audit(struct tcpcb *tp, struct tcp_rack *rack, struct sockbuf *sb)
|
|||||||
((tmr_up == PACE_TMR_TLP) ||
|
((tmr_up == PACE_TMR_TLP) ||
|
||||||
(tmr_up == PACE_TMR_RACK) ||
|
(tmr_up == PACE_TMR_RACK) ||
|
||||||
(tmr_up == PACE_TMR_RXT))) {
|
(tmr_up == PACE_TMR_RXT))) {
|
||||||
/*
|
/*
|
||||||
* Either a Rack, TLP or RXT is fine if we
|
* Either a Rack, TLP or RXT is fine if we
|
||||||
* have outstanding data.
|
* have outstanding data.
|
||||||
*/
|
*/
|
||||||
@ -7607,7 +7607,7 @@ rack_timer_audit(struct tcpcb *tp, struct tcp_rack *rack, struct sockbuf *sb)
|
|||||||
*/
|
*/
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
/*
|
/*
|
||||||
* Ok the timer originally started is not what we want now.
|
* Ok the timer originally started is not what we want now.
|
||||||
* We will force the hpts to be stopped if any, and restart
|
* We will force the hpts to be stopped if any, and restart
|
||||||
* with the slot set to what was in the saved slot.
|
* with the slot set to what was in the saved slot.
|
||||||
@ -8011,7 +8011,7 @@ rack_get_pacing_delay(struct tcp_rack *rack, struct tcpcb *tp, uint32_t len)
|
|||||||
* the peer to have a gap in data sending.
|
* the peer to have a gap in data sending.
|
||||||
*/
|
*/
|
||||||
uint32_t srtt, cwnd, tr_perms = 0;
|
uint32_t srtt, cwnd, tr_perms = 0;
|
||||||
|
|
||||||
old_method:
|
old_method:
|
||||||
if (rack->r_ctl.rc_rack_min_rtt)
|
if (rack->r_ctl.rc_rack_min_rtt)
|
||||||
srtt = rack->r_ctl.rc_rack_min_rtt;
|
srtt = rack->r_ctl.rc_rack_min_rtt;
|
||||||
@ -8038,7 +8038,7 @@ old_method:
|
|||||||
/* Now do we reduce the time so we don't run dry? */
|
/* Now do we reduce the time so we don't run dry? */
|
||||||
if (slot && rack->rc_pace_reduce) {
|
if (slot && rack->rc_pace_reduce) {
|
||||||
int32_t reduce;
|
int32_t reduce;
|
||||||
|
|
||||||
reduce = (slot / rack->rc_pace_reduce);
|
reduce = (slot / rack->rc_pace_reduce);
|
||||||
if (reduce < slot) {
|
if (reduce < slot) {
|
||||||
slot -= reduce;
|
slot -= reduce;
|
||||||
@ -8057,19 +8057,19 @@ old_method:
|
|||||||
bw_est += rack->r_ctl.rc_gp_history[cnt];
|
bw_est += rack->r_ctl.rc_gp_history[cnt];
|
||||||
}
|
}
|
||||||
if (bw_est == 0) {
|
if (bw_est == 0) {
|
||||||
/*
|
/*
|
||||||
* No way yet to make a b/w estimate
|
* No way yet to make a b/w estimate
|
||||||
* (no goodput est yet).
|
* (no goodput est yet).
|
||||||
*/
|
*/
|
||||||
goto old_method;
|
goto old_method;
|
||||||
}
|
}
|
||||||
/* Covert to bytes per second */
|
/* Covert to bytes per second */
|
||||||
bw_est *= MSEC_IN_SECOND;
|
bw_est *= MSEC_IN_SECOND;
|
||||||
/*
|
/*
|
||||||
* Now ratchet it up by our percentage. Note
|
* Now ratchet it up by our percentage. Note
|
||||||
* that the minimum you can do is 1 which would
|
* that the minimum you can do is 1 which would
|
||||||
* get you 101% of the average last N goodput estimates.
|
* get you 101% of the average last N goodput estimates.
|
||||||
* The max you can do is 256 which would yeild you
|
* The max you can do is 256 which would yeild you
|
||||||
* 356% of the last N goodput estimates.
|
* 356% of the last N goodput estimates.
|
||||||
*/
|
*/
|
||||||
bw_raise = bw_est * (uint64_t)rack->rack_per_of_gp;
|
bw_raise = bw_est * (uint64_t)rack->rack_per_of_gp;
|
||||||
@ -8086,7 +8086,7 @@ old_method:
|
|||||||
/* We are enforcing a minimum pace time of 1ms */
|
/* We are enforcing a minimum pace time of 1ms */
|
||||||
slot = rack->r_enforce_min_pace;
|
slot = rack->r_enforce_min_pace;
|
||||||
}
|
}
|
||||||
if (slot)
|
if (slot)
|
||||||
counter_u64_add(rack_calc_nonzero, 1);
|
counter_u64_add(rack_calc_nonzero, 1);
|
||||||
else
|
else
|
||||||
counter_u64_add(rack_calc_zero, 1);
|
counter_u64_add(rack_calc_zero, 1);
|
||||||
@ -8288,8 +8288,8 @@ again:
|
|||||||
long tlen;
|
long tlen;
|
||||||
|
|
||||||
doing_tlp = 1;
|
doing_tlp = 1;
|
||||||
/*
|
/*
|
||||||
* Check if we can do a TLP with a RACK'd packet
|
* Check if we can do a TLP with a RACK'd packet
|
||||||
* this can happen if we are not doing the rack
|
* this can happen if we are not doing the rack
|
||||||
* cheat and we skipped to a TLP and it
|
* cheat and we skipped to a TLP and it
|
||||||
* went off.
|
* went off.
|
||||||
@ -8362,7 +8362,7 @@ again:
|
|||||||
(rack->r_ctl.rc_prr_sndcnt < maxseg)) {
|
(rack->r_ctl.rc_prr_sndcnt < maxseg)) {
|
||||||
/*
|
/*
|
||||||
* prr is less than a segment, we
|
* prr is less than a segment, we
|
||||||
* have more acks due in besides
|
* have more acks due in besides
|
||||||
* what we need to resend. Lets not send
|
* what we need to resend. Lets not send
|
||||||
* to avoid sending small pieces of
|
* to avoid sending small pieces of
|
||||||
* what we need to retransmit.
|
* what we need to retransmit.
|
||||||
@ -8385,8 +8385,8 @@ again:
|
|||||||
counter_u64_add(rack_rtm_prr_retran, 1);
|
counter_u64_add(rack_rtm_prr_retran, 1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
/*
|
/*
|
||||||
* Enforce a connection sendmap count limit if set
|
* Enforce a connection sendmap count limit if set
|
||||||
* as long as we are not retransmiting.
|
* as long as we are not retransmiting.
|
||||||
*/
|
*/
|
||||||
if ((rsm == NULL) &&
|
if ((rsm == NULL) &&
|
||||||
@ -8660,7 +8660,7 @@ again:
|
|||||||
} else if ((rsm == NULL) &&
|
} else if ((rsm == NULL) &&
|
||||||
((doing_tlp == 0) || (new_data_tlp == 1)) &&
|
((doing_tlp == 0) || (new_data_tlp == 1)) &&
|
||||||
(len < rack->r_ctl.rc_pace_max_segs)) {
|
(len < rack->r_ctl.rc_pace_max_segs)) {
|
||||||
/*
|
/*
|
||||||
* We are not sending a full segment for
|
* We are not sending a full segment for
|
||||||
* some reason. Should we not send anything (think
|
* some reason. Should we not send anything (think
|
||||||
* sws or persists)?
|
* sws or persists)?
|
||||||
@ -8677,7 +8677,7 @@ again:
|
|||||||
*/
|
*/
|
||||||
len = 0;
|
len = 0;
|
||||||
if (tp->snd_max == tp->snd_una) {
|
if (tp->snd_max == tp->snd_una) {
|
||||||
/*
|
/*
|
||||||
* Nothing out we can
|
* Nothing out we can
|
||||||
* go into persists.
|
* go into persists.
|
||||||
*/
|
*/
|
||||||
@ -8695,7 +8695,7 @@ again:
|
|||||||
* not having gone off), We have 2 segments or
|
* not having gone off), We have 2 segments or
|
||||||
* more already in flight, its not the tail end
|
* more already in flight, its not the tail end
|
||||||
* of the socket buffer and the cwnd is blocking
|
* of the socket buffer and the cwnd is blocking
|
||||||
* us from sending out a minimum pacing segment size.
|
* us from sending out a minimum pacing segment size.
|
||||||
* Lets not send anything.
|
* Lets not send anything.
|
||||||
*/
|
*/
|
||||||
len = 0;
|
len = 0;
|
||||||
@ -8704,10 +8704,10 @@ again:
|
|||||||
(ctf_flight_size(tp, rack->r_ctl.rc_sacked) > (2 * maxseg)) &&
|
(ctf_flight_size(tp, rack->r_ctl.rc_sacked) > (2 * maxseg)) &&
|
||||||
(len < (int)(sbavail(sb) - sb_offset)) &&
|
(len < (int)(sbavail(sb) - sb_offset)) &&
|
||||||
(TCPS_HAVEESTABLISHED(tp->t_state))) {
|
(TCPS_HAVEESTABLISHED(tp->t_state))) {
|
||||||
/*
|
/*
|
||||||
* Here we have a send window but we have
|
* Here we have a send window but we have
|
||||||
* filled it up and we can't send another pacing segment.
|
* filled it up and we can't send another pacing segment.
|
||||||
* We also have in flight more than 2 segments
|
* We also have in flight more than 2 segments
|
||||||
* and we are not completing the sb i.e. we allow
|
* and we are not completing the sb i.e. we allow
|
||||||
* the last bytes of the sb to go out even if
|
* the last bytes of the sb to go out even if
|
||||||
* its not a full pacing segment.
|
* its not a full pacing segment.
|
||||||
@ -8817,7 +8817,7 @@ again:
|
|||||||
*/
|
*/
|
||||||
if (!(tp->t_flags & TF_MORETOCOME) && /* normal case */
|
if (!(tp->t_flags & TF_MORETOCOME) && /* normal case */
|
||||||
(idle || (tp->t_flags & TF_NODELAY)) &&
|
(idle || (tp->t_flags & TF_NODELAY)) &&
|
||||||
((uint32_t)len + (uint32_t)sb_offset >= sbavail(&so->so_snd)) &&
|
((uint32_t)len + (uint32_t)sb_offset >= sbavail(&so->so_snd)) &&
|
||||||
(tp->t_flags & TF_NOPUSH) == 0) {
|
(tp->t_flags & TF_NOPUSH) == 0) {
|
||||||
pass = 2;
|
pass = 2;
|
||||||
goto send;
|
goto send;
|
||||||
@ -8964,7 +8964,7 @@ just_return_nolock:
|
|||||||
send:
|
send:
|
||||||
if ((flags & TH_FIN) &&
|
if ((flags & TH_FIN) &&
|
||||||
sbavail(&tp->t_inpcb->inp_socket->so_snd)) {
|
sbavail(&tp->t_inpcb->inp_socket->so_snd)) {
|
||||||
/*
|
/*
|
||||||
* We do not transmit a FIN
|
* We do not transmit a FIN
|
||||||
* with data outstanding. We
|
* with data outstanding. We
|
||||||
* need to make it so all data
|
* need to make it so all data
|
||||||
@ -9170,7 +9170,7 @@ send:
|
|||||||
len -= moff;
|
len -= moff;
|
||||||
sendalot = 1;
|
sendalot = 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
/*
|
/*
|
||||||
* In case there are too many small fragments don't
|
* In case there are too many small fragments don't
|
||||||
* use TSO:
|
* use TSO:
|
||||||
@ -9294,14 +9294,14 @@ send:
|
|||||||
tp,
|
tp,
|
||||||
#endif
|
#endif
|
||||||
mb, moff, &len,
|
mb, moff, &len,
|
||||||
if_hw_tsomaxsegcount, if_hw_tsomaxsegsize, msb,
|
if_hw_tsomaxsegcount, if_hw_tsomaxsegsize, msb,
|
||||||
((rsm == NULL) ? hw_tls : 0)
|
((rsm == NULL) ? hw_tls : 0)
|
||||||
#ifdef NETFLIX_COPY_ARGS
|
#ifdef NETFLIX_COPY_ARGS
|
||||||
, &filled_all
|
, &filled_all
|
||||||
#endif
|
#endif
|
||||||
);
|
);
|
||||||
if (len <= (tp->t_maxseg - optlen)) {
|
if (len <= (tp->t_maxseg - optlen)) {
|
||||||
/*
|
/*
|
||||||
* Must have ran out of mbufs for the copy
|
* Must have ran out of mbufs for the copy
|
||||||
* shorten it to no longer need tso. Lets
|
* shorten it to no longer need tso. Lets
|
||||||
* not put on sendalot since we are low on
|
* not put on sendalot since we are low on
|
||||||
@ -10058,13 +10058,13 @@ enobufs:
|
|||||||
rack->r_tlp_running = 0;
|
rack->r_tlp_running = 0;
|
||||||
if (flags & TH_RST) {
|
if (flags & TH_RST) {
|
||||||
/*
|
/*
|
||||||
* We don't send again after sending a RST.
|
* We don't send again after sending a RST.
|
||||||
*/
|
*/
|
||||||
slot = 0;
|
slot = 0;
|
||||||
sendalot = 0;
|
sendalot = 0;
|
||||||
}
|
}
|
||||||
if (rsm && (slot == 0)) {
|
if (rsm && (slot == 0)) {
|
||||||
/*
|
/*
|
||||||
* Dup ack retransmission possibly, so
|
* Dup ack retransmission possibly, so
|
||||||
* lets assure we have at least min rack
|
* lets assure we have at least min rack
|
||||||
* time, if its a rack resend then the rack
|
* time, if its a rack resend then the rack
|
||||||
@ -10282,7 +10282,7 @@ rack_set_sockopt(struct socket *so, struct sockopt *sopt,
|
|||||||
break;
|
break;
|
||||||
case TCP_RACK_GP_INCREASE:
|
case TCP_RACK_GP_INCREASE:
|
||||||
if ((optval >= 0) &&
|
if ((optval >= 0) &&
|
||||||
(optval <= 256))
|
(optval <= 256))
|
||||||
rack->rack_per_of_gp = optval;
|
rack->rack_per_of_gp = optval;
|
||||||
else
|
else
|
||||||
error = EINVAL;
|
error = EINVAL;
|
||||||
|
@ -173,7 +173,7 @@ again:
|
|||||||
* - INP_SUPPORTS_MBUFQ
|
* - INP_SUPPORTS_MBUFQ
|
||||||
* - INP_MBUF_QUEUE_READY
|
* - INP_MBUF_QUEUE_READY
|
||||||
* - INP_DONT_SACK_QUEUE
|
* - INP_DONT_SACK_QUEUE
|
||||||
*
|
*
|
||||||
* These flags help control how LRO will deliver
|
* These flags help control how LRO will deliver
|
||||||
* packets to the transport. You first set in inp_flags2
|
* packets to the transport. You first set in inp_flags2
|
||||||
* the INP_SUPPORTS_MBUFQ to tell the LRO code that you
|
* the INP_SUPPORTS_MBUFQ to tell the LRO code that you
|
||||||
@ -191,9 +191,9 @@ again:
|
|||||||
*
|
*
|
||||||
* Now there are some interesting Caveats that the transport
|
* Now there are some interesting Caveats that the transport
|
||||||
* designer needs to take into account when using this feature.
|
* designer needs to take into account when using this feature.
|
||||||
*
|
*
|
||||||
* 1) It is used with HPTS and pacing, when the pacing timer
|
* 1) It is used with HPTS and pacing, when the pacing timer
|
||||||
* for output calls it will first call the input.
|
* for output calls it will first call the input.
|
||||||
* 2) When you set INP_MBUF_QUEUE_READY this tells LRO
|
* 2) When you set INP_MBUF_QUEUE_READY this tells LRO
|
||||||
* queue normal packets, I am busy pacing out data and
|
* queue normal packets, I am busy pacing out data and
|
||||||
* will process the queued packets before my tfb_tcp_output
|
* will process the queued packets before my tfb_tcp_output
|
||||||
@ -207,7 +207,7 @@ again:
|
|||||||
* the loss.
|
* the loss.
|
||||||
*
|
*
|
||||||
* Now a critical thing you must be aware of here is that the
|
* Now a critical thing you must be aware of here is that the
|
||||||
* use of the flags has a far greater scope then just your
|
* use of the flags has a far greater scope then just your
|
||||||
* typical LRO. Why? Well thats because in the normal compressed
|
* typical LRO. Why? Well thats because in the normal compressed
|
||||||
* LRO case at the end of a driver interupt all packets are going
|
* LRO case at the end of a driver interupt all packets are going
|
||||||
* to get presented to the transport no matter if there is one
|
* to get presented to the transport no matter if there is one
|
||||||
@ -216,9 +216,9 @@ again:
|
|||||||
* a) The flags discussed above allow it.
|
* a) The flags discussed above allow it.
|
||||||
* <or>
|
* <or>
|
||||||
* b) You exceed a ack or data limit (by default the
|
* b) You exceed a ack or data limit (by default the
|
||||||
* ack limit is infinity (64k acks) and the data
|
* ack limit is infinity (64k acks) and the data
|
||||||
* limit is 64k of new TCP data)
|
* limit is 64k of new TCP data)
|
||||||
* <or>
|
* <or>
|
||||||
* c) The push bit has been set by the peer
|
* c) The push bit has been set by the peer
|
||||||
*/
|
*/
|
||||||
|
|
||||||
@ -239,7 +239,7 @@ ctf_process_inbound_raw(struct tcpcb *tp, struct socket *so, struct mbuf *m, int
|
|||||||
* after adjusting the time to match the arrival time.
|
* after adjusting the time to match the arrival time.
|
||||||
* Note that the LRO code assures no IP options are present.
|
* Note that the LRO code assures no IP options are present.
|
||||||
*
|
*
|
||||||
* The symantics for calling tfb_tcp_hpts_do_segment are the
|
* The symantics for calling tfb_tcp_hpts_do_segment are the
|
||||||
* following:
|
* following:
|
||||||
* 1) It returns 0 if all went well and you (the caller) need
|
* 1) It returns 0 if all went well and you (the caller) need
|
||||||
* to release the lock.
|
* to release the lock.
|
||||||
@ -274,7 +274,7 @@ ctf_process_inbound_raw(struct tcpcb *tp, struct socket *so, struct mbuf *m, int
|
|||||||
if (ifp) {
|
if (ifp) {
|
||||||
bpf_req = bpf_peers_present(ifp->if_bpf);
|
bpf_req = bpf_peers_present(ifp->if_bpf);
|
||||||
} else {
|
} else {
|
||||||
/*
|
/*
|
||||||
* We probably should not work around
|
* We probably should not work around
|
||||||
* but kassert, since lro alwasy sets rcvif.
|
* but kassert, since lro alwasy sets rcvif.
|
||||||
*/
|
*/
|
||||||
@ -406,7 +406,7 @@ skip_vnet:
|
|||||||
}
|
}
|
||||||
tlen -= off;
|
tlen -= off;
|
||||||
drop_hdrlen += off;
|
drop_hdrlen += off;
|
||||||
/*
|
/*
|
||||||
* Now lets setup the timeval to be when we should
|
* Now lets setup the timeval to be when we should
|
||||||
* have been called (if we can).
|
* have been called (if we can).
|
||||||
*/
|
*/
|
||||||
@ -470,7 +470,7 @@ ctf_outstanding(struct tcpcb *tp)
|
|||||||
return(tp->snd_max - tp->snd_una);
|
return(tp->snd_max - tp->snd_una);
|
||||||
}
|
}
|
||||||
|
|
||||||
uint32_t
|
uint32_t
|
||||||
ctf_flight_size(struct tcpcb *tp, uint32_t rc_sacked)
|
ctf_flight_size(struct tcpcb *tp, uint32_t rc_sacked)
|
||||||
{
|
{
|
||||||
if (rc_sacked <= ctf_outstanding(tp))
|
if (rc_sacked <= ctf_outstanding(tp))
|
||||||
@ -480,7 +480,7 @@ ctf_flight_size(struct tcpcb *tp, uint32_t rc_sacked)
|
|||||||
#ifdef INVARIANTS
|
#ifdef INVARIANTS
|
||||||
panic("tp:%p rc_sacked:%d > out:%d",
|
panic("tp:%p rc_sacked:%d > out:%d",
|
||||||
tp, rc_sacked, ctf_outstanding(tp));
|
tp, rc_sacked, ctf_outstanding(tp));
|
||||||
#endif
|
#endif
|
||||||
return (0);
|
return (0);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -821,7 +821,7 @@ ctf_fixed_maxseg(struct tcpcb *tp)
|
|||||||
* without a proper loop, and having most of paddings hardcoded.
|
* without a proper loop, and having most of paddings hardcoded.
|
||||||
* We only consider fixed options that we would send every
|
* We only consider fixed options that we would send every
|
||||||
* time I.e. SACK is not considered.
|
* time I.e. SACK is not considered.
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
#define PAD(len) ((((len) / 4) + !!((len) % 4)) * 4)
|
#define PAD(len) ((((len) / 4) + !!((len) % 4)) * 4)
|
||||||
if (TCPS_HAVEESTABLISHED(tp->t_state)) {
|
if (TCPS_HAVEESTABLISHED(tp->t_state)) {
|
||||||
@ -886,12 +886,12 @@ ctf_log_sack_filter(struct tcpcb *tp, int num_sack_blks, struct sackblk *sack_bl
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
uint32_t
|
uint32_t
|
||||||
ctf_decay_count(uint32_t count, uint32_t decay)
|
ctf_decay_count(uint32_t count, uint32_t decay)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
* Given a count, decay it by a set percentage. The
|
* Given a count, decay it by a set percentage. The
|
||||||
* percentage is in thousands i.e. 100% = 1000,
|
* percentage is in thousands i.e. 100% = 1000,
|
||||||
* 19.3% = 193.
|
* 19.3% = 193.
|
||||||
*/
|
*/
|
||||||
uint64_t perc_count, decay_per;
|
uint64_t perc_count, decay_per;
|
||||||
@ -904,8 +904,8 @@ ctf_decay_count(uint32_t count, uint32_t decay)
|
|||||||
decay_per = decay;
|
decay_per = decay;
|
||||||
perc_count *= decay_per;
|
perc_count *= decay_per;
|
||||||
perc_count /= 1000;
|
perc_count /= 1000;
|
||||||
/*
|
/*
|
||||||
* So now perc_count holds the
|
* So now perc_count holds the
|
||||||
* count decay value.
|
* count decay value.
|
||||||
*/
|
*/
|
||||||
decayed_count = count - (uint32_t)perc_count;
|
decayed_count = count - (uint32_t)perc_count;
|
||||||
|
@ -129,13 +129,13 @@ void
|
|||||||
ctf_do_dropwithreset_conn(struct mbuf *m, struct tcpcb *tp, struct tcphdr *th,
|
ctf_do_dropwithreset_conn(struct mbuf *m, struct tcpcb *tp, struct tcphdr *th,
|
||||||
int32_t rstreason, int32_t tlen);
|
int32_t rstreason, int32_t tlen);
|
||||||
|
|
||||||
uint32_t
|
uint32_t
|
||||||
ctf_fixed_maxseg(struct tcpcb *tp);
|
ctf_fixed_maxseg(struct tcpcb *tp);
|
||||||
|
|
||||||
void
|
void
|
||||||
ctf_log_sack_filter(struct tcpcb *tp, int num_sack_blks, struct sackblk *sack_blocks);
|
ctf_log_sack_filter(struct tcpcb *tp, int num_sack_blks, struct sackblk *sack_blocks);
|
||||||
|
|
||||||
uint32_t
|
uint32_t
|
||||||
ctf_decay_count(uint32_t count, uint32_t decay_percentage);
|
ctf_decay_count(uint32_t count, uint32_t decay_percentage);
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
@ -61,7 +61,7 @@ __FBSDID("$FreeBSD$");
|
|||||||
* cum-ack A
|
* cum-ack A
|
||||||
* sack D - E
|
* sack D - E
|
||||||
* sack B - C
|
* sack B - C
|
||||||
*
|
*
|
||||||
* The previous sack information (B-C) is repeated
|
* The previous sack information (B-C) is repeated
|
||||||
* in SACK 2. If the receiver gets SACK 1 and then
|
* in SACK 2. If the receiver gets SACK 1 and then
|
||||||
* SACK 2 then any work associated with B-C as already
|
* SACK 2 then any work associated with B-C as already
|
||||||
@ -69,8 +69,8 @@ __FBSDID("$FreeBSD$");
|
|||||||
* (as in bbr or rack) cases where we walk a linked list.
|
* (as in bbr or rack) cases where we walk a linked list.
|
||||||
*
|
*
|
||||||
* Now the utility trys to keep everything in a single
|
* Now the utility trys to keep everything in a single
|
||||||
* cache line. This means that its not perfect and
|
* cache line. This means that its not perfect and
|
||||||
* it could be that so big of sack's come that a
|
* it could be that so big of sack's come that a
|
||||||
* "remembered" processed sack falls off the list and
|
* "remembered" processed sack falls off the list and
|
||||||
* so gets re-processed. Thats ok, it just means we
|
* so gets re-processed. Thats ok, it just means we
|
||||||
* did some extra work. We could of course take more
|
* did some extra work. We could of course take more
|
||||||
@ -135,7 +135,7 @@ sack_filter_prune(struct sack_filter *sf, tcp_seq th_ack)
|
|||||||
sf->sf_ack = th_ack;
|
sf->sf_ack = th_ack;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Return true if you find that
|
* Return true if you find that
|
||||||
* the sackblock b is on the score
|
* the sackblock b is on the score
|
||||||
* board. Update it along the way
|
* board. Update it along the way
|
||||||
@ -179,7 +179,7 @@ is_sack_on_board(struct sack_filter *sf, struct sackblk *b)
|
|||||||
if(SEQ_LT(sf->sf_blks[i].end, b->start)) {
|
if(SEQ_LT(sf->sf_blks[i].end, b->start)) {
|
||||||
/**
|
/**
|
||||||
* Not near each other:
|
* Not near each other:
|
||||||
*
|
*
|
||||||
* board |---|
|
* board |---|
|
||||||
* sack |---|
|
* sack |---|
|
||||||
*/
|
*/
|
||||||
@ -189,21 +189,21 @@ is_sack_on_board(struct sack_filter *sf, struct sackblk *b)
|
|||||||
if (SEQ_GT(sf->sf_blks[i].start, b->end)) {
|
if (SEQ_GT(sf->sf_blks[i].start, b->end)) {
|
||||||
/**
|
/**
|
||||||
* Not near each other:
|
* Not near each other:
|
||||||
*
|
*
|
||||||
* board |---|
|
* board |---|
|
||||||
* sack |---|
|
* sack |---|
|
||||||
*/
|
*/
|
||||||
goto nxt_blk;
|
goto nxt_blk;
|
||||||
}
|
}
|
||||||
if (SEQ_LEQ(sf->sf_blks[i].start, b->start)) {
|
if (SEQ_LEQ(sf->sf_blks[i].start, b->start)) {
|
||||||
/**
|
/**
|
||||||
* The board block partial meets:
|
* The board block partial meets:
|
||||||
*
|
*
|
||||||
* board |--------|
|
* board |--------|
|
||||||
* sack |----------|
|
* sack |----------|
|
||||||
* <or>
|
* <or>
|
||||||
* board |--------|
|
* board |--------|
|
||||||
* sack |--------------|
|
* sack |--------------|
|
||||||
*
|
*
|
||||||
* up with this one (we have part of it).
|
* up with this one (we have part of it).
|
||||||
* 1) Update the board block to the new end
|
* 1) Update the board block to the new end
|
||||||
@ -215,14 +215,14 @@ is_sack_on_board(struct sack_filter *sf, struct sackblk *b)
|
|||||||
goto nxt_blk;
|
goto nxt_blk;
|
||||||
}
|
}
|
||||||
if (SEQ_GEQ(sf->sf_blks[i].end, b->end)) {
|
if (SEQ_GEQ(sf->sf_blks[i].end, b->end)) {
|
||||||
/**
|
/**
|
||||||
* The board block partial meets:
|
* The board block partial meets:
|
||||||
*
|
*
|
||||||
* board |--------|
|
* board |--------|
|
||||||
* sack |----------|
|
* sack |----------|
|
||||||
* <or>
|
* <or>
|
||||||
* board |----|
|
* board |----|
|
||||||
* sack |----------|
|
* sack |----------|
|
||||||
* 1) Update the board block to the new start
|
* 1) Update the board block to the new start
|
||||||
* and
|
* and
|
||||||
* 2) Update the start of this block to my end.
|
* 2) Update the start of this block to my end.
|
||||||
@ -231,7 +231,7 @@ is_sack_on_board(struct sack_filter *sf, struct sackblk *b)
|
|||||||
sf->sf_blks[i].start = b->start;
|
sf->sf_blks[i].start = b->start;
|
||||||
goto nxt_blk;
|
goto nxt_blk;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
nxt_blk:
|
nxt_blk:
|
||||||
i++;
|
i++;
|
||||||
i %= SACK_FILTER_BLOCKS;
|
i %= SACK_FILTER_BLOCKS;
|
||||||
@ -248,7 +248,7 @@ sack_filter_old(struct sack_filter *sf, struct sackblk *in, int numblks)
|
|||||||
{
|
{
|
||||||
int32_t num, i;
|
int32_t num, i;
|
||||||
struct sackblk blkboard[TCP_MAX_SACK];
|
struct sackblk blkboard[TCP_MAX_SACK];
|
||||||
/*
|
/*
|
||||||
* An old sack has arrived. It may contain data
|
* An old sack has arrived. It may contain data
|
||||||
* we do not have. We might not have it since
|
* we do not have. We might not have it since
|
||||||
* we could have had a lost ack <or> we might have the
|
* we could have had a lost ack <or> we might have the
|
||||||
@ -263,8 +263,8 @@ sack_filter_old(struct sack_filter *sf, struct sackblk *in, int numblks)
|
|||||||
#endif
|
#endif
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
/* Did not find it (or found only
|
/* Did not find it (or found only
|
||||||
* a piece of it). Copy it to
|
* a piece of it). Copy it to
|
||||||
* our outgoing board.
|
* our outgoing board.
|
||||||
*/
|
*/
|
||||||
memcpy(&blkboard[num], &in[i], sizeof(struct sackblk));
|
memcpy(&blkboard[num], &in[i], sizeof(struct sackblk));
|
||||||
@ -279,8 +279,8 @@ sack_filter_old(struct sack_filter *sf, struct sackblk *in, int numblks)
|
|||||||
return (num);
|
return (num);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Given idx its used but there is space available
|
* Given idx its used but there is space available
|
||||||
* move the entry to the next free slot
|
* move the entry to the next free slot
|
||||||
*/
|
*/
|
||||||
static void
|
static void
|
||||||
@ -291,7 +291,7 @@ sack_move_to_empty(struct sack_filter *sf, uint32_t idx)
|
|||||||
i = (idx + 1) % SACK_FILTER_BLOCKS;
|
i = (idx + 1) % SACK_FILTER_BLOCKS;
|
||||||
for (cnt=0; cnt <(SACK_FILTER_BLOCKS-1); cnt++) {
|
for (cnt=0; cnt <(SACK_FILTER_BLOCKS-1); cnt++) {
|
||||||
if (sack_blk_used(sf, i) == 0) {
|
if (sack_blk_used(sf, i) == 0) {
|
||||||
memcpy(&sf->sf_blks[i], &sf->sf_blks[idx], sizeof(struct sackblk));
|
memcpy(&sf->sf_blks[i], &sf->sf_blks[idx], sizeof(struct sackblk));
|
||||||
sf->sf_bits = sack_blk_clr(sf, idx);
|
sf->sf_bits = sack_blk_clr(sf, idx);
|
||||||
sf->sf_bits = sack_blk_set(sf, i);
|
sf->sf_bits = sack_blk_set(sf, i);
|
||||||
return;
|
return;
|
||||||
@ -306,9 +306,9 @@ sack_filter_new(struct sack_filter *sf, struct sackblk *in, int numblks, tcp_seq
|
|||||||
{
|
{
|
||||||
struct sackblk blkboard[TCP_MAX_SACK];
|
struct sackblk blkboard[TCP_MAX_SACK];
|
||||||
int32_t num, i;
|
int32_t num, i;
|
||||||
/*
|
/*
|
||||||
* First lets trim the old and possibly
|
* First lets trim the old and possibly
|
||||||
* throw any away we have.
|
* throw any away we have.
|
||||||
*/
|
*/
|
||||||
for(i=0, num=0; i<numblks; i++) {
|
for(i=0, num=0; i<numblks; i++) {
|
||||||
if (is_sack_on_board(sf, &in[i]))
|
if (is_sack_on_board(sf, &in[i]))
|
||||||
@ -319,7 +319,7 @@ sack_filter_new(struct sack_filter *sf, struct sackblk *in, int numblks, tcp_seq
|
|||||||
if (num == 0)
|
if (num == 0)
|
||||||
return(num);
|
return(num);
|
||||||
|
|
||||||
/* Now what we are left with is either
|
/* Now what we are left with is either
|
||||||
* completely merged on to the board
|
* completely merged on to the board
|
||||||
* from the above steps, or is new
|
* from the above steps, or is new
|
||||||
* and need to be added to the board
|
* and need to be added to the board
|
||||||
@ -328,7 +328,7 @@ sack_filter_new(struct sack_filter *sf, struct sackblk *in, int numblks, tcp_seq
|
|||||||
* First copy it out, we want to return that
|
* First copy it out, we want to return that
|
||||||
* to our caller for processing.
|
* to our caller for processing.
|
||||||
*/
|
*/
|
||||||
memcpy(in, blkboard, (num * sizeof(struct sackblk)));
|
memcpy(in, blkboard, (num * sizeof(struct sackblk)));
|
||||||
numblks = num;
|
numblks = num;
|
||||||
/* Now go through and add to our board as needed */
|
/* Now go through and add to our board as needed */
|
||||||
for(i=(num-1); i>=0; i--) {
|
for(i=(num-1); i>=0; i--) {
|
||||||
@ -370,7 +370,7 @@ static int32_t
|
|||||||
sack_blocks_overlap_or_meet(struct sack_filter *sf, struct sackblk *sb, uint32_t skip)
|
sack_blocks_overlap_or_meet(struct sack_filter *sf, struct sackblk *sb, uint32_t skip)
|
||||||
{
|
{
|
||||||
int32_t i;
|
int32_t i;
|
||||||
|
|
||||||
for(i=0; i<SACK_FILTER_BLOCKS; i++) {
|
for(i=0; i<SACK_FILTER_BLOCKS; i++) {
|
||||||
if (sack_blk_used(sf, i) == 0)
|
if (sack_blk_used(sf, i) == 0)
|
||||||
continue;
|
continue;
|
||||||
@ -379,14 +379,14 @@ sack_blocks_overlap_or_meet(struct sack_filter *sf, struct sackblk *sb, uint32_t
|
|||||||
if (SEQ_GEQ(sf->sf_blks[i].end, sb->start) &&
|
if (SEQ_GEQ(sf->sf_blks[i].end, sb->start) &&
|
||||||
SEQ_LEQ(sf->sf_blks[i].end, sb->end) &&
|
SEQ_LEQ(sf->sf_blks[i].end, sb->end) &&
|
||||||
SEQ_LEQ(sf->sf_blks[i].start, sb->start)) {
|
SEQ_LEQ(sf->sf_blks[i].start, sb->start)) {
|
||||||
/**
|
/**
|
||||||
* The two board blocks meet:
|
* The two board blocks meet:
|
||||||
*
|
*
|
||||||
* board1 |--------|
|
* board1 |--------|
|
||||||
* board2 |----------|
|
* board2 |----------|
|
||||||
* <or>
|
* <or>
|
||||||
* board1 |--------|
|
* board1 |--------|
|
||||||
* board2 |--------------|
|
* board2 |--------------|
|
||||||
* <or>
|
* <or>
|
||||||
* board1 |--------|
|
* board1 |--------|
|
||||||
* board2 |--------|
|
* board2 |--------|
|
||||||
@ -396,14 +396,14 @@ sack_blocks_overlap_or_meet(struct sack_filter *sf, struct sackblk *sb, uint32_t
|
|||||||
if (SEQ_LEQ(sf->sf_blks[i].start, sb->end) &&
|
if (SEQ_LEQ(sf->sf_blks[i].start, sb->end) &&
|
||||||
SEQ_GEQ(sf->sf_blks[i].start, sb->start) &&
|
SEQ_GEQ(sf->sf_blks[i].start, sb->start) &&
|
||||||
SEQ_GEQ(sf->sf_blks[i].end, sb->end)) {
|
SEQ_GEQ(sf->sf_blks[i].end, sb->end)) {
|
||||||
/**
|
/**
|
||||||
* The board block partial meets:
|
* The board block partial meets:
|
||||||
*
|
*
|
||||||
* board |--------|
|
* board |--------|
|
||||||
* sack |----------|
|
* sack |----------|
|
||||||
* <or>
|
* <or>
|
||||||
* board |----|
|
* board |----|
|
||||||
* sack |----------|
|
* sack |----------|
|
||||||
* 1) Update the board block to the new start
|
* 1) Update the board block to the new start
|
||||||
* and
|
* and
|
||||||
* 2) Update the start of this block to my end.
|
* 2) Update the start of this block to my end.
|
||||||
@ -442,7 +442,7 @@ sack_board_collapse(struct sack_filter *sf)
|
|||||||
if (sack_blk_used(sf, i) == 0)
|
if (sack_blk_used(sf, i) == 0)
|
||||||
continue;
|
continue;
|
||||||
/*
|
/*
|
||||||
* Look at all other blocks but this guy
|
* Look at all other blocks but this guy
|
||||||
* to see if they overlap. If so we collapse
|
* to see if they overlap. If so we collapse
|
||||||
* the two blocks together.
|
* the two blocks together.
|
||||||
*/
|
*/
|
||||||
@ -451,7 +451,7 @@ sack_board_collapse(struct sack_filter *sf)
|
|||||||
/* No overlap */
|
/* No overlap */
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
/*
|
/*
|
||||||
* Ok j and i overlap with each other, collapse the
|
* Ok j and i overlap with each other, collapse the
|
||||||
* one out furthest away from the current position.
|
* one out furthest away from the current position.
|
||||||
*/
|
*/
|
||||||
@ -500,11 +500,11 @@ sack_filter_blks(struct sack_filter *sf, struct sackblk *in, int numblks,
|
|||||||
tcp_seq th_ack)
|
tcp_seq th_ack)
|
||||||
{
|
{
|
||||||
int32_t i, ret;
|
int32_t i, ret;
|
||||||
|
|
||||||
if (numblks > TCP_MAX_SACK) {
|
if (numblks > TCP_MAX_SACK) {
|
||||||
#ifdef _KERNEL
|
#ifdef _KERNEL
|
||||||
panic("sf:%p sb:%p Impossible number of sack blocks %d > 4\n",
|
panic("sf:%p sb:%p Impossible number of sack blocks %d > 4\n",
|
||||||
sf, in,
|
sf, in,
|
||||||
numblks);
|
numblks);
|
||||||
#endif
|
#endif
|
||||||
return(numblks);
|
return(numblks);
|
||||||
@ -513,13 +513,13 @@ sack_filter_blks(struct sack_filter *sf, struct sackblk *in, int numblks,
|
|||||||
if ((sf->sf_used > 1) && (no_collapse == 0))
|
if ((sf->sf_used > 1) && (no_collapse == 0))
|
||||||
sack_board_collapse(sf);
|
sack_board_collapse(sf);
|
||||||
|
|
||||||
#else
|
#else
|
||||||
if (sf->sf_used > 1)
|
if (sf->sf_used > 1)
|
||||||
sack_board_collapse(sf);
|
sack_board_collapse(sf);
|
||||||
#endif
|
#endif
|
||||||
if ((sf->sf_used == 0) && numblks) {
|
if ((sf->sf_used == 0) && numblks) {
|
||||||
/*
|
/*
|
||||||
* We are brand new add the blocks in
|
* We are brand new add the blocks in
|
||||||
* reverse order. Note we can see more
|
* reverse order. Note we can see more
|
||||||
* than one in new, since ack's could be lost.
|
* than one in new, since ack's could be lost.
|
||||||
*/
|
*/
|
||||||
@ -560,15 +560,15 @@ sack_filter_blks(struct sack_filter *sf, struct sackblk *in, int numblks,
|
|||||||
void
|
void
|
||||||
sack_filter_reject(struct sack_filter *sf, struct sackblk *in)
|
sack_filter_reject(struct sack_filter *sf, struct sackblk *in)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
* Given a specified block (that had made
|
* Given a specified block (that had made
|
||||||
* it past the sack filter). Reject that
|
* it past the sack filter). Reject that
|
||||||
* block triming it off any sack-filter block
|
* block triming it off any sack-filter block
|
||||||
* that has it. Usually because the block was
|
* that has it. Usually because the block was
|
||||||
* too small and did not cover a whole send.
|
* too small and did not cover a whole send.
|
||||||
*
|
*
|
||||||
* This function will only "undo" sack-blocks
|
* This function will only "undo" sack-blocks
|
||||||
* that are fresh and touch the edges of
|
* that are fresh and touch the edges of
|
||||||
* blocks in our filter.
|
* blocks in our filter.
|
||||||
*/
|
*/
|
||||||
int i;
|
int i;
|
||||||
@ -576,9 +576,9 @@ sack_filter_reject(struct sack_filter *sf, struct sackblk *in)
|
|||||||
for(i=0; i<SACK_FILTER_BLOCKS; i++) {
|
for(i=0; i<SACK_FILTER_BLOCKS; i++) {
|
||||||
if (sack_blk_used(sf, i) == 0)
|
if (sack_blk_used(sf, i) == 0)
|
||||||
continue;
|
continue;
|
||||||
/*
|
/*
|
||||||
* Now given the sack-filter block does it touch
|
* Now given the sack-filter block does it touch
|
||||||
* with one of the ends
|
* with one of the ends
|
||||||
*/
|
*/
|
||||||
if (sf->sf_blks[i].end == in->end) {
|
if (sf->sf_blks[i].end == in->end) {
|
||||||
/* The end moves back to start */
|
/* The end moves back to start */
|
||||||
|
@ -42,7 +42,7 @@
|
|||||||
#define BBR_HAS_FIN 0x0040 /* segment is sent with fin */
|
#define BBR_HAS_FIN 0x0040 /* segment is sent with fin */
|
||||||
#define BBR_TLP 0x0080 /* segment sent as tail-loss-probe */
|
#define BBR_TLP 0x0080 /* segment sent as tail-loss-probe */
|
||||||
#define BBR_HAS_SYN 0x0100 /* segment has the syn */
|
#define BBR_HAS_SYN 0x0100 /* segment has the syn */
|
||||||
#define BBR_MARKED_LOST 0x0200 /*
|
#define BBR_MARKED_LOST 0x0200 /*
|
||||||
* This segments is lost and
|
* This segments is lost and
|
||||||
* totaled into bbr->rc_ctl.rc_lost
|
* totaled into bbr->rc_ctl.rc_lost
|
||||||
*/
|
*/
|
||||||
@ -55,8 +55,8 @@
|
|||||||
#define BBR_INCL_TCP_OH 0x03
|
#define BBR_INCL_TCP_OH 0x03
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* With the addition of both measurement algorithms
|
* With the addition of both measurement algorithms
|
||||||
* I had to move over the size of a
|
* I had to move over the size of a
|
||||||
* cache line (unfortunately). For now there is
|
* cache line (unfortunately). For now there is
|
||||||
* no way around this. We may be able to cut back
|
* no way around this. We may be able to cut back
|
||||||
* at some point I hope.
|
* at some point I hope.
|
||||||
@ -221,8 +221,8 @@ struct bbr_rtt_sample {
|
|||||||
#define BBR_RT_FLAG_LIMITED 0x20 /* Saw application/cwnd or rwnd limited period */
|
#define BBR_RT_FLAG_LIMITED 0x20 /* Saw application/cwnd or rwnd limited period */
|
||||||
#define BBR_RT_SEEN_A_ACK 0x40 /* A ack has been saved */
|
#define BBR_RT_SEEN_A_ACK 0x40 /* A ack has been saved */
|
||||||
#define BBR_RT_PREV_RTT_SET 0x80 /* There was a RTT set in */
|
#define BBR_RT_PREV_RTT_SET 0x80 /* There was a RTT set in */
|
||||||
#define BBR_RT_PREV_SEND_TIME 0x100 /*
|
#define BBR_RT_PREV_SEND_TIME 0x100 /*
|
||||||
*There was a RTT send time set that can be used
|
*There was a RTT send time set that can be used
|
||||||
* no snd_limits
|
* no snd_limits
|
||||||
*/
|
*/
|
||||||
#define BBR_RT_SET_GRADIENT 0x200
|
#define BBR_RT_SET_GRADIENT 0x200
|
||||||
@ -570,7 +570,7 @@ struct bbr_control {
|
|||||||
rc_pace_min_segs:15; /* The minimum single segment size before we enter persists */
|
rc_pace_min_segs:15; /* The minimum single segment size before we enter persists */
|
||||||
|
|
||||||
uint32_t rc_rtt_shrinks; /* Time of last rtt shrinkage Lock(a) */
|
uint32_t rc_rtt_shrinks; /* Time of last rtt shrinkage Lock(a) */
|
||||||
uint32_t r_app_limited_until;
|
uint32_t r_app_limited_until;
|
||||||
uint32_t rc_timer_exp; /* If a timer ticks of expiry */
|
uint32_t rc_timer_exp; /* If a timer ticks of expiry */
|
||||||
uint32_t rc_rcv_epoch_start; /* Start time of the Epoch Lock(a) */
|
uint32_t rc_rcv_epoch_start; /* Start time of the Epoch Lock(a) */
|
||||||
|
|
||||||
@ -598,7 +598,7 @@ struct bbr_control {
|
|||||||
uint32_t rc_reorder_ts; /* Last time we saw reordering Lock(a) */
|
uint32_t rc_reorder_ts; /* Last time we saw reordering Lock(a) */
|
||||||
uint32_t rc_init_rwnd; /* Initial rwnd when we transitioned */
|
uint32_t rc_init_rwnd; /* Initial rwnd when we transitioned */
|
||||||
/*- ---
|
/*- ---
|
||||||
* used only initial and close
|
* used only initial and close
|
||||||
*/
|
*/
|
||||||
uint32_t rc_high_rwnd; /* Highest rwnd seen */
|
uint32_t rc_high_rwnd; /* Highest rwnd seen */
|
||||||
uint32_t rc_lowest_rtt; /* Smallest RTT we have seen */
|
uint32_t rc_lowest_rtt; /* Smallest RTT we have seen */
|
||||||
|
@ -251,7 +251,7 @@ struct rack_control {
|
|||||||
uint32_t rc_rcvtime; /* When we last received data */
|
uint32_t rc_rcvtime; /* When we last received data */
|
||||||
uint32_t rc_num_split_allocs; /* num split map entries allocated */
|
uint32_t rc_num_split_allocs; /* num split map entries allocated */
|
||||||
|
|
||||||
uint32_t rc_last_output_to;
|
uint32_t rc_last_output_to;
|
||||||
uint32_t rc_went_idle_time;
|
uint32_t rc_went_idle_time;
|
||||||
|
|
||||||
struct rack_sendmap *rc_sacklast; /* sack remembered place
|
struct rack_sendmap *rc_sacklast; /* sack remembered place
|
||||||
@ -266,7 +266,7 @@ struct rack_control {
|
|||||||
/* Cache line split 0x140 */
|
/* Cache line split 0x140 */
|
||||||
/* Flags for various things */
|
/* Flags for various things */
|
||||||
uint32_t rc_pace_max_segs;
|
uint32_t rc_pace_max_segs;
|
||||||
uint32_t rc_pace_min_segs;
|
uint32_t rc_pace_min_segs;
|
||||||
uint32_t rc_high_rwnd;
|
uint32_t rc_high_rwnd;
|
||||||
uint32_t ack_count;
|
uint32_t ack_count;
|
||||||
uint32_t sack_count;
|
uint32_t sack_count;
|
||||||
@ -333,7 +333,7 @@ struct tcp_rack {
|
|||||||
uint8_t rc_allow_data_af_clo: 1,
|
uint8_t rc_allow_data_af_clo: 1,
|
||||||
delayed_ack : 1,
|
delayed_ack : 1,
|
||||||
set_pacing_done_a_iw : 1,
|
set_pacing_done_a_iw : 1,
|
||||||
use_rack_cheat : 1,
|
use_rack_cheat : 1,
|
||||||
alloc_limit_reported : 1,
|
alloc_limit_reported : 1,
|
||||||
sack_attack_disable : 1,
|
sack_attack_disable : 1,
|
||||||
do_detection : 1,
|
do_detection : 1,
|
||||||
|
Loading…
x
Reference in New Issue
Block a user