PR SCTP Bugs. Basically a full sized frame of

PR SCTP FWD-TSN's would not be sent and thus
cause a stalled connection. Also the rwnd
Calculation was also off on the receiver side for
PR-SCTP.
MFC after:	1 month
This commit is contained in:
Randall Stewart 2010-07-29 11:37:04 +00:00
parent 762aad8142
commit 44fbe46280
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=210599
4 changed files with 48 additions and 31 deletions

View File

@ -91,8 +91,10 @@ sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
* take out what has NOT been put on socket queue and we yet hold
* for putting up.
*/
calc = sctp_sbspace_sub(calc, (uint32_t) asoc->size_on_reasm_queue);
calc = sctp_sbspace_sub(calc, (uint32_t) asoc->size_on_all_streams);
calc = sctp_sbspace_sub(calc, (uint32_t) (asoc->size_on_reasm_queue +
asoc->cnt_on_reasm_queue * MSIZE));
calc = sctp_sbspace_sub(calc, (uint32_t) (asoc->size_on_all_streams +
asoc->cnt_on_all_streams * MSIZE));
if (calc == 0) {
/* out of space */
@ -3322,6 +3324,10 @@ sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
/* either a RESEND, ACKED, or MARKED */
/* skip */
if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
/* Continue strikin FWD-TSN chunks */
tp1->rec.data.fwd_tsn_cnt++;
}
tp1 = TAILQ_NEXT(tp1, sctp_next);
continue;
}
@ -3707,7 +3713,6 @@ sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
tp1 = TAILQ_FIRST(&asoc->sent_queue);
while (tp1) {
if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
tp1->sent != SCTP_DATAGRAM_ACKED &&
tp1->sent != SCTP_DATAGRAM_RESEND) {
/* no chance to advance, out of here */
break;
@ -3763,8 +3768,7 @@ sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
* the chunk, advance our peer ack point and we can check
* the next chunk.
*/
if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
(tp1->sent == SCTP_DATAGRAM_ACKED)) {
if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
/* advance PeerAckPoint goes forward */
if (compare_with_wrap(tp1->rec.data.TSN_seq,
asoc->advanced_peer_ack_point,
@ -3905,7 +3909,7 @@ sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
} else if (asoc->last_acked_seq == cumack) {
/* Window update sack */
asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
(uint32_t) (asoc->total_flight + (asoc->sent_queue_cnt * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
(uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
/* SWS sender side engages */
asoc->peers_rwnd = 0;
@ -4189,7 +4193,7 @@ sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
}
/* RWND update */
asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
(uint32_t) (asoc->total_flight + (asoc->sent_queue_cnt * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
(uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
/* SWS sender side engages */
asoc->peers_rwnd = 0;
@ -4404,10 +4408,8 @@ sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
asoc->nonce_resync_tsn = asoc->advanced_peer_ack_point;
} else if (lchk) {
/* try to FR fwd-tsn's that get lost too */
lchk->rec.data.fwd_tsn_cnt++;
if (lchk->rec.data.fwd_tsn_cnt > 3) {
if (lchk->rec.data.fwd_tsn_cnt >= 3) {
send_forward_tsn(stcb, asoc);
lchk->rec.data.fwd_tsn_cnt = 0;
}
}
}
@ -5188,10 +5190,10 @@ sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup,
/* Adjust and set the new rwnd value */
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
asoc->peers_rwnd, asoc->total_flight, (asoc->sent_queue_cnt * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
asoc->peers_rwnd, asoc->total_flight, (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
}
asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
(uint32_t) (asoc->total_flight + (asoc->sent_queue_cnt * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
(uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
/* SWS sender side engages */
asoc->peers_rwnd = 0;
@ -5314,6 +5316,7 @@ sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup,
}
if (compare_with_wrap(asoc->advanced_peer_ack_point, old_adv_peer_ack_point,
MAX_TSN)) {
send_forward_tsn(stcb, asoc);
/*
* ECN Nonce: Disable Nonce Sum check when
@ -5323,10 +5326,8 @@ sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup,
asoc->nonce_resync_tsn = asoc->advanced_peer_ack_point;
} else if (lchk) {
/* try to FR fwd-tsn's that get lost too */
lchk->rec.data.fwd_tsn_cnt++;
if (lchk->rec.data.fwd_tsn_cnt > 3) {
if (lchk->rec.data.fwd_tsn_cnt >= 3) {
send_forward_tsn(stcb, asoc);
lchk->rec.data.fwd_tsn_cnt = 0;
}
}
}

View File

@ -6579,6 +6579,8 @@ sctp_clean_up_ctl(struct sctp_tcb *stcb, struct sctp_association *asoc)
chk->data = NULL;
}
asoc->ctrl_queue_cnt--;
if (chk->rec.chunk_id.id == SCTP_FORWARD_CUM_TSN)
asoc->fwd_tsn_cnt--;
sctp_free_a_chunk(stcb, chk);
} else if (chk->rec.chunk_id.id == SCTP_STREAM_RESET) {
/* special handling, we must look into the param */
@ -7799,7 +7801,7 @@ sctp_med_chunk_output(struct sctp_inpcb *inp,
} else
omtu = 0;
/* Here we do NOT factor the r_mtu */
if ((chk->send_size < (int)(mtu - omtu)) ||
if ((chk->send_size <= (int)(mtu - omtu)) ||
(chk->flags & CHUNK_FLAGS_FRAGMENT_OK)) {
/*
* We probably should glom the mbuf chain
@ -9704,6 +9706,7 @@ send_forward_tsn(struct sctp_tcb *stcb,
if (chk == NULL) {
return;
}
asoc->fwd_tsn_cnt++;
chk->copy_by_ref = 0;
chk->rec.chunk_id.id = SCTP_FORWARD_CUM_TSN;
chk->rec.chunk_id.can_take_data = 0;
@ -9735,8 +9738,8 @@ send_forward_tsn(struct sctp_tcb *stcb,
unsigned int cnt_of_skipped = 0;
TAILQ_FOREACH(at, &asoc->sent_queue, sctp_next) {
if ((at->sent != SCTP_FORWARD_TSN_SKIP) &&
(at->sent != SCTP_DATAGRAM_ACKED)) {
if ((at->sent != SCTP_FORWARD_TSN_SKIP) /* && (at->sent !=
SCTP_DATAGRAM_ACKED) */ ) {
/* no more to look at */
break;
}

View File

@ -869,6 +869,7 @@ struct sctp_association {
unsigned int size_on_reasm_queue;
unsigned int cnt_on_reasm_queue;
unsigned int fwd_tsn_cnt;
/* amount of data (bytes) currently in flight (on all destinations) */
unsigned int total_flight;
/* Total book size in flight */

View File

@ -545,7 +545,7 @@ static void
sctp_backoff_on_timeout(struct sctp_tcb *stcb,
struct sctp_nets *net,
int win_probe,
int num_marked)
int num_marked, int num_abandoned)
{
if (net->RTO == 0) {
net->RTO = stcb->asoc.minrto;
@ -554,7 +554,7 @@ sctp_backoff_on_timeout(struct sctp_tcb *stcb,
if (net->RTO > stcb->asoc.maxrto) {
net->RTO = stcb->asoc.maxrto;
}
if ((win_probe == 0) && num_marked) {
if ((win_probe == 0) && (num_marked || num_abandoned)) {
/* We don't apply penalty to window probe scenarios */
/* JRS - Use the congestion control given in the CC module */
stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout(stcb, net);
@ -612,7 +612,8 @@ sctp_mark_all_for_resend(struct sctp_tcb *stcb,
struct sctp_nets *net,
struct sctp_nets *alt,
int window_probe,
int *num_marked)
int *num_marked,
int *num_abandoned)
{
/*
@ -621,10 +622,11 @@ sctp_mark_all_for_resend(struct sctp_tcb *stcb,
* We only mark chunks that have been outstanding long enough to
* have received feed-back.
*/
struct sctp_tmit_chunk *chk, *tp2, *could_be_sent = NULL;
struct sctp_tmit_chunk *chk, *tp2;
struct sctp_nets *lnets;
struct timeval now, min_wait, tv;
int cur_rtt;
int cnt_abandoned;
int audit_tf, num_mk, fir;
unsigned int cnt_mk;
uint32_t orig_flight, orig_tf;
@ -680,6 +682,7 @@ sctp_mark_all_for_resend(struct sctp_tcb *stcb,
net->fast_retran_ip = 0;
/* Now on to each chunk */
cnt_abandoned = 0;
num_mk = cnt_mk = 0;
tsnfirst = tsnlast = 0;
#ifndef INVARIANTS
@ -768,6 +771,7 @@ sctp_mark_all_for_resend(struct sctp_tcb *stcb,
chk,
(SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT),
SCTP_SO_NOT_LOCKED);
cnt_abandoned++;
}
continue;
}
@ -780,6 +784,7 @@ sctp_mark_all_for_resend(struct sctp_tcb *stcb,
chk,
(SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT),
SCTP_SO_NOT_LOCKED);
cnt_abandoned++;
}
continue;
}
@ -841,9 +846,11 @@ sctp_mark_all_for_resend(struct sctp_tcb *stcb,
if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) == 1) {
chk->no_fr_allowed = 1;
}
#ifdef THIS_SHOULD_NOT_BE_DONE
} else if (chk->sent == SCTP_DATAGRAM_ACKED) {
/* remember highest acked one */
could_be_sent = chk;
#endif
}
if (chk->sent == SCTP_DATAGRAM_RESEND) {
cnt_mk++;
@ -870,6 +877,7 @@ sctp_mark_all_for_resend(struct sctp_tcb *stcb,
}
#endif
*num_marked = num_mk;
*num_abandoned = cnt_abandoned;
/*
* Now check for a ECN Echo that may be stranded And include the
* cnt_mk'd to have all resends in the control queue.
@ -890,12 +898,14 @@ sctp_mark_all_for_resend(struct sctp_tcb *stcb,
atomic_add_int(&alt->ref_count, 1);
}
}
#ifdef THIS_SHOULD_NOT_BE_DONE
if ((stcb->asoc.sent_queue_retran_cnt == 0) && (could_be_sent)) {
/* fix it so we retransmit the highest acked anyway */
sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
cnt_mk++;
could_be_sent->sent = SCTP_DATAGRAM_RESEND;
}
#endif
if (stcb->asoc.sent_queue_retran_cnt != cnt_mk) {
#ifdef INVARIANTS
SCTP_PRINTF("Local Audit says there are %d for retran asoc cnt:%d we marked:%d this time\n",
@ -996,7 +1006,7 @@ sctp_t3rxt_timer(struct sctp_inpcb *inp,
struct sctp_nets *net)
{
struct sctp_nets *alt;
int win_probe, num_mk;
int win_probe, num_mk, num_abandoned;
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
sctp_log_fr(0, 0, 0, SCTP_FR_T3_TIMEOUT);
@ -1055,8 +1065,10 @@ sctp_t3rxt_timer(struct sctp_inpcb *inp,
} else { /* CMT is OFF */
alt = sctp_find_alternate_net(stcb, net, 0);
}
(void)sctp_mark_all_for_resend(stcb, net, alt, win_probe, &num_mk);
num_mk = 0;
num_abandoned = 0;
(void)sctp_mark_all_for_resend(stcb, net, alt, win_probe,
&num_mk, &num_abandoned);
/* FR Loss recovery just ended with the T3. */
stcb->asoc.fast_retran_loss_recovery = 0;
@ -1070,7 +1082,7 @@ sctp_t3rxt_timer(struct sctp_inpcb *inp,
stcb->asoc.sat_t3_recovery_tsn = stcb->asoc.sending_seq;
/* Backoff the timer and cwnd */
sctp_backoff_on_timeout(stcb, net, win_probe, num_mk);
sctp_backoff_on_timeout(stcb, net, win_probe, num_mk, num_abandoned);
if (win_probe == 0) {
/* We don't do normal threshold management on window probes */
if (sctp_threshold_management(inp, stcb, net,
@ -1221,7 +1233,7 @@ sctp_t1init_timer(struct sctp_inpcb *inp,
return (1);
}
stcb->asoc.dropped_special_cnt = 0;
sctp_backoff_on_timeout(stcb, stcb->asoc.primary_destination, 1, 0);
sctp_backoff_on_timeout(stcb, stcb->asoc.primary_destination, 1, 0, 0);
if (stcb->asoc.initial_init_rto_max < net->RTO) {
net->RTO = stcb->asoc.initial_init_rto_max;
}
@ -1302,7 +1314,7 @@ sctp_cookie_timer(struct sctp_inpcb *inp,
* an alternate
*/
stcb->asoc.dropped_special_cnt = 0;
sctp_backoff_on_timeout(stcb, cookie->whoTo, 1, 0);
sctp_backoff_on_timeout(stcb, cookie->whoTo, 1, 0, 0);
alt = sctp_find_alternate_net(stcb, cookie->whoTo, 0);
if (alt != cookie->whoTo) {
sctp_free_remote_addr(cookie->whoTo);
@ -1347,7 +1359,7 @@ sctp_strreset_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
* cleared theshold management now lets backoff the address & select
* an alternate
*/
sctp_backoff_on_timeout(stcb, strrst->whoTo, 1, 0);
sctp_backoff_on_timeout(stcb, strrst->whoTo, 1, 0, 0);
alt = sctp_find_alternate_net(stcb, strrst->whoTo, 0);
sctp_free_remote_addr(strrst->whoTo);
strrst->whoTo = alt;
@ -1426,7 +1438,7 @@ sctp_asconf_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
* cleared threshold management, so now backoff the net and
* select an alternate
*/
sctp_backoff_on_timeout(stcb, asconf->whoTo, 1, 0);
sctp_backoff_on_timeout(stcb, asconf->whoTo, 1, 0, 0);
alt = sctp_find_alternate_net(stcb, asconf->whoTo, 0);
if (asconf->whoTo != alt) {
sctp_free_remote_addr(asconf->whoTo);
@ -1643,7 +1655,7 @@ sctp_heartbeat_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
net->ro._s_addr = NULL;
net->src_addr_selected = 0;
}
sctp_backoff_on_timeout(stcb, net, 1, 0);
sctp_backoff_on_timeout(stcb, net, 1, 0, 0);
}
/* Zero PBA, if it needs it */
if (net->partial_bytes_acked) {