Many bug fixes (from the IETF hack-fest):

- PR-SCTP had major issues when skipping through a multi-part message.
  o Did not look at socket buffer.
  o Did not properly handle the reassmebly queue.
  o The MARKED segments could interfere and un-skip a chunk causing
    a problem with the proper FWD-TSN.
  o No FR of FWD-TSN's was being done.
- NR-Sack code was basically disabled. It needed fixes that
  never got into the real code.
- CMT code had issues when the two paths were NOT the same b/w. We
  found a few small bugs, but also the critcal one here was not
  dividing the rwnd amongst the paths.

Obtained from:	Michael Tuexen and myself at the IETF hack-fest ;-)
This commit is contained in:
Randall Stewart 2009-04-04 11:43:32 +00:00
parent dc60165b73
commit 8933fa13b6
10 changed files with 370 additions and 2124 deletions

View File

@ -423,12 +423,13 @@ sctp_service_reassembly(struct sctp_tcb *stcb, struct sctp_association *asoc)
if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
nr_tsn = chk->rec.data.TSN_seq;
if (nr_tsn >= asoc->nr_mapping_array_base_tsn) {
if ((compare_with_wrap(nr_tsn, asoc->nr_mapping_array_base_tsn, MAX_TSN)) ||
(nr_tsn == asoc->nr_mapping_array_base_tsn)) {
nr_gap = nr_tsn - asoc->nr_mapping_array_base_tsn;
} else {
nr_gap = (MAX_TSN - asoc->nr_mapping_array_base_tsn) + nr_tsn + 1;
}
if ((nr_gap >= (SCTP_NR_MAPPING_ARRAY << 3)) ||
if ((nr_gap >= (uint32_t) (asoc->nr_mapping_array_size << 3)) ||
(nr_gap >= (uint32_t) (asoc->nr_mapping_array_size << 3))) {
/*
* EY The 1st should never happen, as in
@ -440,10 +441,11 @@ sctp_service_reassembly(struct sctp_tcb *stcb, struct sctp_association *asoc)
* nr_mapping_array is always expanded when
* mapping_array is expanded
*/
printf("Impossible nr_gap ack range failed\n");
} else {
SCTP_TCB_LOCK_ASSERT(stcb);
SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, nr_gap);
if (nr_tsn > asoc->highest_tsn_inside_nr_map)
if (compare_with_wrap(nr_tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN))
asoc->highest_tsn_inside_nr_map = nr_tsn;
}
}
@ -550,7 +552,9 @@ sctp_service_reassembly(struct sctp_tcb *stcb, struct sctp_association *asoc)
} else {
SCTP_TCB_LOCK_ASSERT(stcb);
SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, nr_gap);
if (nr_tsn > asoc->highest_tsn_inside_nr_map)
if (compare_with_wrap(nr_tsn,
asoc->highest_tsn_inside_nr_map,
MAX_TSN))
asoc->highest_tsn_inside_nr_map = nr_tsn;
}
}
@ -699,7 +703,7 @@ sctp_queue_data_to_stream(struct sctp_tcb *stcb, struct sctp_association *asoc,
} else {
SCTP_TCB_LOCK_ASSERT(stcb);
SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, nr_gap);
if (nr_tsn > asoc->highest_tsn_inside_nr_map)
if (compare_with_wrap(nr_tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN))
asoc->highest_tsn_inside_nr_map = nr_tsn;
}
}
@ -760,7 +764,8 @@ sctp_queue_data_to_stream(struct sctp_tcb *stcb, struct sctp_association *asoc,
} else {
SCTP_TCB_LOCK_ASSERT(stcb);
SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, nr_gap);
if (nr_tsn > asoc->highest_tsn_inside_nr_map)
if (compare_with_wrap(nr_tsn, asoc->highest_tsn_inside_nr_map,
MAX_TSN))
asoc->highest_tsn_inside_nr_map = nr_tsn;
}
}
@ -2390,6 +2395,15 @@ sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
}
SCTP_TCB_LOCK_ASSERT(stcb);
SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) &&
asoc->peer_supports_nr_sack &&
(SCTP_BASE_SYSCTL(sctp_do_drain) == 0)) {
SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
if (compare_with_wrap(tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN)) {
asoc->highest_tsn_inside_nr_map = tsn;
}
}
/* check the special flag for stream resets */
if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
((compare_with_wrap(asoc->cumulative_tsn, liste->tsn, MAX_TSN)) ||
@ -2498,9 +2512,9 @@ sctp_sack_check(struct sctp_tcb *stcb, int ok_to_sack, int was_a_gap, int *abort
int slide_from, slide_end, lgap, distance;
/* EY nr_mapping array variables */
int nr_at;
int nr_last_all_ones = 0;
int nr_slide_from, nr_slide_end, nr_lgap, nr_distance;
/* int nr_at; */
/* int nr_last_all_ones = 0; */
/* int nr_slide_from, nr_slide_end, nr_lgap, nr_distance; */
uint32_t old_cumack, old_base, old_highest;
unsigned char aux_array[64];
@ -2683,102 +2697,19 @@ sctp_sack_check(struct sctp_tcb *stcb, int ok_to_sack, int was_a_gap, int *abort
asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
SCTP_MAP_SLIDE_RESULT);
}
}
}
/*
* EY if doing nr_sacks then slide the nr_mapping_array accordingly
* please
*/
if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
nr_at = 0;
for (nr_slide_from = 0; nr_slide_from < stcb->asoc.nr_mapping_array_size; nr_slide_from++) {
if (asoc->nr_mapping_array[nr_slide_from] == 0xff) {
nr_at += 8;
nr_last_all_ones = 1;
} else {
/* there is a 0 bit */
nr_at += sctp_map_lookup_tab[asoc->nr_mapping_array[nr_slide_from]];
nr_last_all_ones = 0;
break;
}
}
nr_at++;
if (compare_with_wrap(asoc->cumulative_tsn,
asoc->highest_tsn_inside_nr_map, MAX_TSN) && (at >= 8)) {
/* The complete array was completed by a single FR */
/* higest becomes the cum-ack */
int clr;
clr = (nr_at >> 3) + 1;
if (clr > asoc->nr_mapping_array_size)
clr = asoc->nr_mapping_array_size;
memset(asoc->nr_mapping_array, 0, clr);
/* base becomes one ahead of the cum-ack */
asoc->nr_mapping_array_base_tsn = asoc->cumulative_tsn + 1;
asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
} else if (nr_at >= 8) {
/* we can slide the mapping array down */
/* Calculate the new byte postion we can move down */
/*
* now calculate the ceiling of the move using our
* highest TSN value
* EY if doing nr_sacks then slide the
* nr_mapping_array accordingly please
*/
if (asoc->highest_tsn_inside_nr_map >= asoc->nr_mapping_array_base_tsn) {
nr_lgap = asoc->highest_tsn_inside_nr_map -
asoc->nr_mapping_array_base_tsn;
} else {
nr_lgap = (MAX_TSN - asoc->nr_mapping_array_base_tsn) +
asoc->highest_tsn_inside_nr_map + 1;
}
nr_slide_end = nr_lgap >> 3;
if (nr_slide_end < nr_slide_from) {
#ifdef INVARIANTS
panic("impossible slide");
#else
printf("impossible slide?\n");
return;
#endif
}
if (nr_slide_end > asoc->nr_mapping_array_size) {
#ifdef INVARIANTS
panic("would overrun buffer");
#else
printf("Gak, would have overrun map end:%d nr_slide_end:%d\n",
asoc->nr_mapping_array_size, nr_slide_end);
nr_slide_end = asoc->nr_mapping_array_size;
#endif
}
nr_distance = (nr_slide_end - nr_slide_from) + 1;
if (nr_distance + nr_slide_from > asoc->nr_mapping_array_size ||
nr_distance < 0) {
/*
* Here we do NOT slide forward the array so
* that hopefully when more data comes in to
* fill it up we will be able to slide it
* forward. Really I don't think this should
* happen :-0
*/
;
} else {
int ii;
for (ii = 0; ii < nr_distance; ii++) {
if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack) {
for (ii = 0; ii < distance; ii++) {
asoc->nr_mapping_array[ii] =
asoc->nr_mapping_array[nr_slide_from + ii];
asoc->nr_mapping_array[slide_from + ii];
}
for (ii = nr_distance; ii <= nr_slide_end; ii++) {
for (ii = distance; ii <= slide_end; ii++) {
asoc->nr_mapping_array[ii] = 0;
}
asoc->nr_mapping_array_base_tsn += (nr_slide_from << 3);
asoc->nr_mapping_array_base_tsn += (slide_from << 3);
}
}
}
@ -2802,7 +2733,7 @@ sctp_sack_check(struct sctp_tcb *stcb, int ok_to_sack, int was_a_gap, int *abort
* EY if nr_sacks used then send an nr-sack , a sack
* otherwise
*/
if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack)
if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && stcb->asoc.peer_supports_nr_sack)
sctp_send_nr_sack(stcb);
else
sctp_send_sack(stcb);
@ -3496,9 +3427,13 @@ sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct
/*
* All chunks NOT UNSENT
* fall through here and are
* marked
* marked (leave PR-SCTP
* ones that are to skip
* alone though)
*/
tp1->sent = SCTP_DATAGRAM_MARKED;
if (tp1->sent != SCTP_FORWARD_TSN_SKIP)
tp1->sent = SCTP_DATAGRAM_MARKED;
if (tp1->rec.data.chunk_was_revoked) {
/* deflate the cwnd */
tp1->whoTo->cwnd -= tp1->book_size;
@ -5798,7 +5733,9 @@ sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
} else {
SCTP_TCB_LOCK_ASSERT(stcb);
SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, nr_gap);
if (nr_tsn > asoc->highest_tsn_inside_nr_map)
if (compare_with_wrap(nr_tsn,
asoc->highest_tsn_inside_nr_map,
MAX_TSN))
asoc->highest_tsn_inside_nr_map = nr_tsn;
}
@ -5901,7 +5838,8 @@ sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
} else {
SCTP_TCB_LOCK_ASSERT(stcb);
SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, nr_gap);
if (nr_tsn > asoc->highest_tsn_inside_nr_map)
if (compare_with_wrap(nr_tsn, asoc->highest_tsn_inside_nr_map,
MAX_TSN))
asoc->highest_tsn_inside_nr_map = nr_tsn;
}
@ -5963,6 +5901,91 @@ sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
}
}
static void
sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb,
struct sctp_association *asoc,
uint16_t stream, uint16_t seq)
{
struct sctp_tmit_chunk *chk, *at;
if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
/* For each one on here see if we need to toss it */
/*
* For now large messages held on the reasmqueue that are
* complete will be tossed too. We could in theory do more
* work to spin through and stop after dumping one msg aka
* seeing the start of a new msg at the head, and call the
* delivery function... to see if it can be delivered... But
* for now we just dump everything on the queue.
*/
chk = TAILQ_FIRST(&asoc->reasmqueue);
while (chk) {
at = TAILQ_NEXT(chk, sctp_next);
if (chk->rec.data.stream_number != stream) {
chk = at;
continue;
}
if (chk->rec.data.stream_seq == seq) {
/* It needs to be tossed */
TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
if (compare_with_wrap(chk->rec.data.TSN_seq,
asoc->tsn_last_delivered, MAX_TSN)) {
asoc->tsn_last_delivered =
chk->rec.data.TSN_seq;
asoc->str_of_pdapi =
chk->rec.data.stream_number;
asoc->ssn_of_pdapi =
chk->rec.data.stream_seq;
asoc->fragment_flags =
chk->rec.data.rcv_flags;
}
asoc->size_on_reasm_queue -= chk->send_size;
sctp_ucount_decr(asoc->cnt_on_reasm_queue);
/* Clear up any stream problem */
if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) !=
SCTP_DATA_UNORDERED &&
(compare_with_wrap(chk->rec.data.stream_seq,
asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered,
MAX_SEQ))) {
/*
* We must dump forward this streams
* sequence number if the chunk is
* not unordered that is being
* skipped. There is a chance that
* if the peer does not include the
* last fragment in its FWD-TSN we
* WILL have a problem here since
* you would have a partial chunk in
* queue that may not be
* deliverable. Also if a Partial
* delivery API as started the user
* may get a partial chunk. The next
* read returning a new chunk...
* really ugly but I see no way
* around it! Maybe a notify??
*/
asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered =
chk->rec.data.stream_seq;
}
if (chk->data) {
sctp_m_freem(chk->data);
chk->data = NULL;
}
sctp_free_a_chunk(stcb, chk);
} else if (compare_with_wrap(chk->rec.data.stream_seq, seq, MAX_SEQ)) {
/*
* If the stream_seq is > than the purging
* one, we are done
*/
break;
}
chk = at;
}
}
}
void
sctp_handle_forward_tsn(struct sctp_tcb *stcb,
struct sctp_forward_tsn_chunk *fwd, int *abort_flag, struct mbuf *m, int offset)
@ -5992,13 +6015,14 @@ sctp_handle_forward_tsn(struct sctp_tcb *stcb,
*/
struct sctp_association *asoc;
uint32_t new_cum_tsn, gap;
unsigned int i, cnt_gone, fwd_sz, cumack_set_flag, m_size;
unsigned int i, fwd_sz, cumack_set_flag, m_size;
uint32_t str_seq;
struct sctp_stream_in *strm;
struct sctp_tmit_chunk *chk, *at;
struct sctp_queued_to_read *ctl, *sv;
cumack_set_flag = 0;
asoc = &stcb->asoc;
cnt_gone = 0;
if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
SCTPDBG(SCTP_DEBUG_INDATA1,
"Bad size too small/big fwd-tsn\n");
@ -6102,6 +6126,14 @@ sctp_handle_forward_tsn(struct sctp_tcb *stcb,
SCTP_TCB_LOCK_ASSERT(stcb);
for (i = 0; i <= gap; i++) {
SCTP_SET_TSN_PRESENT(asoc->mapping_array, i);
/*
* EY if drain is off then every gap-ack is an
* nr-gap-ack
*/
if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack
&& SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i);
}
}
/*
* Now after marking all, slide thing forward but no sack
@ -6152,7 +6184,6 @@ sctp_handle_forward_tsn(struct sctp_tcb *stcb,
}
asoc->size_on_reasm_queue -= chk->send_size;
sctp_ucount_decr(asoc->cnt_on_reasm_queue);
cnt_gone++;
/* Clear up any stream problem */
if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) !=
@ -6188,45 +6219,17 @@ sctp_handle_forward_tsn(struct sctp_tcb *stcb,
} else {
/*
* Ok we have gone beyond the end of the
* fwd-tsn's mark. Some checks...
* fwd-tsn's mark.
*/
if ((asoc->fragmented_delivery_inprogress) &&
(chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG)) {
uint32_t str_seq;
/*
* Special case PD-API is up and
* what we fwd-tsn' over includes
* one that had the LAST_FRAG. We no
* longer need to do the PD-API.
*/
asoc->fragmented_delivery_inprogress = 0;
str_seq = (asoc->str_of_pdapi << 16) | asoc->ssn_of_pdapi;
sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
stcb, SCTP_PARTIAL_DELIVERY_ABORTED, (void *)&str_seq, SCTP_SO_NOT_LOCKED);
}
break;
}
chk = at;
}
}
if (asoc->fragmented_delivery_inprogress) {
/*
* Ok we removed cnt_gone chunks in the PD-API queue that
* were being delivered. So now we must turn off the flag.
*/
uint32_t str_seq;
str_seq = (asoc->str_of_pdapi << 16) | asoc->ssn_of_pdapi;
sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
stcb, SCTP_PARTIAL_DELIVERY_ABORTED, (void *)&str_seq, SCTP_SO_NOT_LOCKED);
asoc->fragmented_delivery_inprogress = 0;
}
/*************************************************************/
/* 3. Update the PR-stream re-ordering queues */
/*************************************************************/
/*******************************************************/
/* 3. Update the PR-stream re-ordering queues and fix */
/* delivery issues as needed. */
/*******************************************************/
fwd_sz -= sizeof(*fwd);
if (m && fwd_sz) {
/* New method. */
@ -6235,6 +6238,7 @@ sctp_handle_forward_tsn(struct sctp_tcb *stcb,
offset += sizeof(*fwd);
SCTP_INP_READ_LOCK(stcb->sctp_ep);
num_str = fwd_sz / sizeof(struct sctp_strseq);
for (i = 0; i < num_str; i++) {
uint16_t st;
@ -6251,11 +6255,49 @@ sctp_handle_forward_tsn(struct sctp_tcb *stcb,
stseq->stream = st;
st = ntohs(stseq->sequence);
stseq->sequence = st;
/* now process */
/*
* Ok we now look for the stream/seq on the read
* queue where its not all delivered. If we find it
* we transmute the read entry into a PDI_ABORTED.
*/
if (stseq->stream >= asoc->streamincnt) {
/* screwed up streams, stop! */
break;
}
if ((asoc->str_of_pdapi == stseq->stream) &&
(asoc->ssn_of_pdapi == stseq->sequence)) {
/*
* If this is the one we were partially
* delivering now then we no longer are.
* Note this will change with the reassembly
* re-write.
*/
asoc->fragmented_delivery_inprogress = 0;
}
sctp_flush_reassm_for_str_seq(stcb, asoc, stseq->stream, stseq->sequence);
TAILQ_FOREACH(ctl, &stcb->sctp_ep->read_queue, next) {
if ((ctl->sinfo_stream == stseq->stream) &&
(ctl->sinfo_ssn == stseq->sequence)) {
str_seq = (stseq->stream << 16) | stseq->sequence;
ctl->end_added = 1;
ctl->pdapi_aborted = 1;
sv = stcb->asoc.control_pdapi;
stcb->asoc.control_pdapi = ctl;
sctp_notify_partial_delivery_indication(stcb,
SCTP_PARTIAL_DELIVERY_ABORTED,
SCTP_HOLDS_LOCK,
str_seq);
stcb->asoc.control_pdapi = sv;
break;
} else if ((ctl->sinfo_stream == stseq->stream) &&
(compare_with_wrap(ctl->sinfo_ssn, stseq->sequence, MAX_SEQ))) {
/* We are past our victim SSN */
break;
}
}
strm = &asoc->strmin[stseq->stream];
if (compare_with_wrap(stseq->sequence,
strm->last_sequence_delivered, MAX_SEQ)) {
@ -6267,6 +6309,7 @@ sctp_handle_forward_tsn(struct sctp_tcb *stcb,
/* sa_ignore NO_NULL_CHK */
sctp_kick_prsctp_reorder_queue(stcb, strm);
}
SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
}
if (TAILQ_FIRST(&asoc->reasmqueue)) {
/* now lets kick out and check for more fragmented delivery */
@ -7067,7 +7110,8 @@ sctp_handle_nr_sack_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb,
* fall through here and are
* marked
*/
tp1->sent = SCTP_DATAGRAM_MARKED;
if (tp1->sent != SCTP_FORWARD_TSN_SKIP)
tp1->sent = SCTP_DATAGRAM_NR_MARKED;
if (tp1->rec.data.chunk_was_revoked) {
/* deflate the cwnd */
tp1->whoTo->cwnd -= tp1->book_size;
@ -7079,7 +7123,8 @@ sctp_handle_nr_sack_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb,
* nr_marked
*/
if (all_bit) {
tp1->sent = SCTP_DATAGRAM_NR_MARKED;
if (tp1->sent != SCTP_FORWARD_TSN_SKIP)
tp1->sent = SCTP_DATAGRAM_NR_MARKED;
/*
* TAILQ_REMOVE(&asoc
* ->sent_queue,
@ -7198,7 +7243,8 @@ sctp_handle_nr_sack_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb,
while (tp1) {
if (tp1->rec.data.TSN_seq == j) {
if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
tp1->sent = SCTP_DATAGRAM_NR_MARKED;
if (tp1->sent != SCTP_FORWARD_TSN_SKIP)
tp1->sent = SCTP_DATAGRAM_NR_MARKED;
/*
* TAILQ_REMOVE(&asoc
* ->sent_queue,

View File

@ -3150,8 +3150,10 @@ process_chunk_drop(struct sctp_tcb *stcb, struct sctp_chunk_desc *desc,
(uintptr_t) stcb,
tp1->rec.data.TSN_seq);
}
sctp_flight_size_decrease(tp1);
sctp_total_flight_decrease(stcb, tp1);
if (tp1->sent < SCTP_DATAGRAM_RESEND) {
sctp_flight_size_decrease(tp1);
sctp_total_flight_decrease(stcb, tp1);
}
} {
/* audit code */
unsigned int audit;
@ -5606,11 +5608,14 @@ sctp_common_input_processing(struct mbuf **mm, int iphlen, int offset,
/* there was a gap before this data was processed */
was_a_gap = 1;
}
stcb->asoc.send_sack = 1;
sctp_sack_check(stcb, 1, was_a_gap, &abort_flag);
if (abort_flag) {
/* Again, we aborted so NO UNLOCK needed */
goto out_now;
}
} else if (fwd_tsn_seen) {
stcb->asoc.send_sack = 1;
}
/* trigger send of any chunks in queue... */
trigger_send:

File diff suppressed because it is too large Load Diff

View File

@ -3157,8 +3157,9 @@ sctp_inpcb_free(struct sctp_inpcb *inp, int immediate, int from)
SCTP_TCB_UNLOCK(asoc);
continue;
}
if ((SCTP_GET_STATE(&asoc->asoc) == SCTP_STATE_COOKIE_WAIT) ||
(SCTP_GET_STATE(&asoc->asoc) == SCTP_STATE_COOKIE_ECHOED)) {
if (((SCTP_GET_STATE(&asoc->asoc) == SCTP_STATE_COOKIE_WAIT) ||
(SCTP_GET_STATE(&asoc->asoc) == SCTP_STATE_COOKIE_ECHOED)) &&
(asoc->asoc.total_output_queue_size == 0)) {
/*
* If we have data in queue, we don't want
* to just free since the app may have done,
@ -6029,6 +6030,7 @@ sctp_load_addresses_from_init(struct sctp_tcb *stcb, struct mbuf *m,
stcb->asoc.peer_supports_prsctp = 0;
stcb->asoc.peer_supports_pktdrop = 0;
stcb->asoc.peer_supports_strreset = 0;
stcb->asoc.peer_supports_nr_sack = 0;
stcb->asoc.peer_supports_auth = 0;
pr_supported = (struct sctp_supported_chunk_types_param *)phdr;
num_ent = plen - sizeof(struct sctp_paramhdr);
@ -6044,6 +6046,12 @@ sctp_load_addresses_from_init(struct sctp_tcb *stcb, struct mbuf *m,
case SCTP_PACKET_DROPPED:
stcb->asoc.peer_supports_pktdrop = 1;
break;
case SCTP_NR_SELECTIVE_ACK:
if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off))
stcb->asoc.peer_supports_nr_sack = 1;
else
stcb->asoc.peer_supports_nr_sack = 0;
break;
case SCTP_STREAM_RESET:
stcb->asoc.peer_supports_strreset = 1;
break;

View File

@ -196,6 +196,7 @@ struct sctp_nets {
/* smoothed average things for RTT and RTO itself */
int lastsa;
int lastsv;
int rtt; /* last measured rtt value in ms */
unsigned int RTO;
/* This is used for SHUTDOWN/SHUTDOWN-ACK/SEND or INIT timers */
@ -677,7 +678,7 @@ struct sctp_association {
/* primary destination to use */
struct sctp_nets *primary_destination;
/* For CMT */
struct sctp_nets *last_net_data_came_from;
struct sctp_nets *last_net_cmt_send_started;
/* last place I got a data chunk from */
struct sctp_nets *last_data_chunk_from;
/* last place I got a control from */

View File

@ -463,6 +463,7 @@ sctp_assoclist(SYSCTL_HANDLER_ARGS)
xraddr.cwnd = net->cwnd;
xraddr.flight_size = net->flight_size;
xraddr.mtu = net->mtu;
/* xraddr.rtt = net->rtt; Not yet */
xraddr.start_time.tv_sec = (uint32_t) net->start_time.tv_sec;
xraddr.start_time.tv_usec = (uint32_t) net->start_time.tv_usec;
SCTP_INP_RUNLOCK(inp);

View File

@ -74,7 +74,7 @@ struct sctp_sysctl {
uint32_t sctp_nr_outgoing_streams_default;
uint32_t sctp_cmt_on_off;
uint32_t sctp_cmt_use_dac;
/* EY 5/5/08 - nr_sack flag variable */
/* EY 5/5/08 - nr_sack flag variable */
uint32_t sctp_nr_sack_on_off;
uint32_t sctp_cmt_pf;
uint32_t sctp_use_cwnd_based_maxburst;

View File

@ -565,9 +565,9 @@ struct sctp_sack_info {
struct sctp_cwnd_args {
struct sctp_nets *net; /* network to *//* FIXME: LP64 issue */
uint32_t cwnd_new_value;/* cwnd in k */
uint32_t inflight; /* flightsize in k */
uint32_t pseudo_cumack;
uint32_t cwnd_augment; /* increment to it */
uint16_t inflight; /* flightsize in k */
uint16_t cwnd_augment; /* increment to it */
uint8_t meets_pseudo_cumack;
uint8_t need_new_pseudo_cumack;
uint8_t cnt_in_send;
@ -1042,6 +1042,7 @@ struct xsctp_raddr {
uint8_t heartbeat_enabled; /* sctpAssocLocalRemEntry 4 */
struct sctp_timeval start_time; /* sctpAssocLocalRemEntry 8 */
uint32_t extra_padding[8]; /* future */
};
#define SCTP_MAX_LOGGING_SIZE 30000

View File

@ -126,6 +126,10 @@ sctp_pathmtu_adjustment(struct sctp_inpcb *inp,
* since we sent to big of chunk
*/
chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
if (chk->sent < SCTP_DATAGRAM_RESEND) {
sctp_flight_size_decrease(chk);
sctp_total_flight_decrease(stcb, chk);
}
if (chk->sent != SCTP_DATAGRAM_RESEND) {
sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
}
@ -140,8 +144,6 @@ sctp_pathmtu_adjustment(struct sctp_inpcb *inp,
}
/* Clear any time so NO RTT is being done */
chk->do_rtt = 0;
sctp_flight_size_decrease(chk);
sctp_total_flight_decrease(stcb, chk);
}
}
}

View File

@ -970,7 +970,7 @@ sctp_init_asoc(struct sctp_inpcb *m, struct sctp_tcb *stcb,
asoc->sent_queue_retran_cnt = 0;
/* for CMT */
asoc->last_net_data_came_from = NULL;
asoc->last_net_cmt_send_started = NULL;
/* This will need to be adjusted */
asoc->last_cwr_tsn = asoc->init_seq_number - 1;
@ -1222,33 +1222,25 @@ sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed)
SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
asoc->mapping_array = new_array;
asoc->mapping_array_size = new_size;
return (0);
}
/* EY - nr_sack version of the above method */
int
sctp_expand_nr_mapping_array(struct sctp_association *asoc, uint32_t needed)
{
/* nr mapping array needs to grow */
uint8_t *new_array;
uint32_t new_size;
new_size = asoc->nr_mapping_array_size + ((needed + 7) / 8 + SCTP_NR_MAPPING_ARRAY_INCR);
SCTP_MALLOC(new_array, uint8_t *, new_size, SCTP_M_MAP);
if (new_array == NULL) {
/* can't get more, forget it */
SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n",
new_size);
return (-1);
if (asoc->peer_supports_nr_sack) {
new_size = asoc->nr_mapping_array_size + ((needed + 7) / 8 + SCTP_NR_MAPPING_ARRAY_INCR);
SCTP_MALLOC(new_array, uint8_t *, new_size, SCTP_M_MAP);
if (new_array == NULL) {
/* can't get more, forget it */
SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n",
new_size);
return (-1);
}
memset(new_array, 0, new_size);
memcpy(new_array, asoc->nr_mapping_array, asoc->nr_mapping_array_size);
SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP);
asoc->nr_mapping_array = new_array;
asoc->nr_mapping_array_size = new_size;
}
memset(new_array, 0, new_size);
memcpy(new_array, asoc->nr_mapping_array, asoc->nr_mapping_array_size);
SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP);
asoc->nr_mapping_array = new_array;
asoc->nr_mapping_array_size = new_size;
return (0);
}
#if defined(SCTP_USE_THREAD_BASED_ITERATOR)
static void
sctp_iterator_work(struct sctp_iterator *it)
@ -2589,7 +2581,7 @@ sctp_calculate_rto(struct sctp_tcb *stcb,
/***************************/
/* 2. update RTTVAR & SRTT */
/***************************/
o_calctime = calc_time;
net->rtt = o_calctime = calc_time;
/* this is Van Jacobson's integer version */
if (net->RTO_measured) {
calc_time -= (net->lastsa >> SCTP_RTT_SHIFT); /* take away 1/8th when
@ -4650,18 +4642,12 @@ sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1,
seq = tp1->rec.data.stream_seq;
do {
ret_sz += tp1->book_size;
tp1->sent = SCTP_FORWARD_TSN_SKIP;
if (tp1->data != NULL) {
#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
struct socket *so;
#endif
printf("Release PR-SCTP chunk tsn:%u flags:%x\n",
tp1->rec.data.TSN_seq,
(unsigned int)tp1->rec.data.rcv_flags);
if (tp1->sent < SCTP_DATAGRAM_RESEND) {
sctp_flight_size_decrease(tp1);
sctp_total_flight_decrease(stcb, tp1);
}
sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
sctp_flight_size_decrease(tp1);
sctp_total_flight_decrease(stcb, tp1);
stcb->asoc.peers_rwnd += tp1->send_size;
stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh);
sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, reason, tp1, so_locked);
@ -4672,6 +4658,7 @@ sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1,
stcb->asoc.sent_queue_cnt_removeable--;
}
}
tp1->sent = SCTP_FORWARD_TSN_SKIP;
if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) ==
SCTP_DATA_NOT_FRAG) {
/* not frag'ed we ae done */
@ -4715,6 +4702,8 @@ sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1,
sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, reason, tp1, so_locked);
sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
sctp_m_freem(tp1->data);
/* No flight involved here book the size to 0 */
tp1->book_size = 0;
tp1->data = NULL;
if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
foundeom = 1;
@ -4780,6 +4769,7 @@ sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1,
chk->pr_sctp_on = 1;
TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, chk, sctp_next);
stcb->asoc.sent_queue_cnt++;
stcb->asoc.pr_sctp_cnt++;
} else {
chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG;
}
@ -4810,6 +4800,8 @@ sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1,
}
if (do_wakeup_routine) {
#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
struct socket *so;
so = SCTP_INP_SO(stcb->sctp_ep);
if (!so_locked) {
atomic_add_int(&stcb->asoc.refcnt, 1);