* Fix some race condition in SACK/NR-SACK processing.

* Fix handling of mapping arrays when draining mbufs or processing
  FORWARD-TSN chunks.
* Cleanup code (no duplicate code anymore for SACKs and NR-SACKs).
Part of this code was developed together with rrs.
MFC after: 2 weeks.
This commit is contained in:
Michael Tuexen 2010-04-03 15:40:14 +00:00
parent f0564d3739
commit b5c164935e
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=206137
15 changed files with 500 additions and 869 deletions

View File

@ -556,9 +556,9 @@ sctp_process_asconf_set_primary(struct mbuf *m,
* PRIMARY with DELETE IP ADDRESS of the previous primary
* destination, unacknowledged DATA are retransmitted
* immediately to the new primary destination for seamless
* handover. If the destination is UNCONFIRMED and marked
* to REQ_PRIM, The retransmission occur when reception of
* the HEARTBEAT-ACK. (See sctp_handle_heartbeat_ack in
* handover. If the destination is UNCONFIRMED and marked to
* REQ_PRIM, The retransmission occur when reception of the
* HEARTBEAT-ACK. (See sctp_handle_heartbeat_ack in
* sctp_input.c) Also, when change of the primary
* destination, it is better that all subsequent new DATA
* containing already queued DATA are transmitted to the new
@ -1166,7 +1166,7 @@ sctp_path_check_and_react(struct sctp_tcb *stcb, struct sctp_ifa *newifa)
/*
* If number of local valid addresses is 1, the valid address is
* probably newly added address. Several valid addresses in this
* probably newly added address. Several valid addresses in this
* association. A source address may not be changed. Additionally,
* they can be configured on a same interface as "alias" addresses.
* (by micchie)
@ -1210,7 +1210,7 @@ sctp_path_check_and_react(struct sctp_tcb *stcb, struct sctp_ifa *newifa)
/*
* Check if the nexthop is corresponding to the new address.
* If the new address is corresponding to the current
* nexthop, the path will be changed. If the new address is
* nexthop, the path will be changed. If the new address is
* NOT corresponding to the current nexthop, the path will
* not be changed.
*/

View File

@ -544,13 +544,7 @@ __FBSDID("$FreeBSD$");
#define SCTP_INITIAL_MAPPING_ARRAY 16
/* how much we grow the mapping array each call */
#define SCTP_MAPPING_ARRAY_INCR 32
/* EY 05/13/08 - nr_sack version of the previous 3 constants */
/* Maximum the nr mapping array will grow to (TSN mapping array) */
#define SCTP_NR_MAPPING_ARRAY 512
/* size of the inital malloc on the nr mapping array */
#define SCTP_INITIAL_NR_MAPPING_ARRAY 16
/* how much we grow the nr mapping array each call */
#define SCTP_NR_MAPPING_ARRAY_INCR 32
/*
* Here we define the timer types used by the implementation as arguments in
* the set/get timer type calls.
@ -933,6 +927,13 @@ __FBSDID("$FreeBSD$");
#define SCTP_IS_TSN_PRESENT(arry, gap) ((arry[(gap >> 3)] >> (gap & 0x07)) & 0x01)
#define SCTP_SET_TSN_PRESENT(arry, gap) (arry[(gap >> 3)] |= (0x01 << ((gap & 0x07))))
#define SCTP_UNSET_TSN_PRESENT(arry, gap) (arry[(gap >> 3)] &= ((~(0x01 << ((gap & 0x07)))) & 0xff))
#define SCTP_CALC_TSN_TO_GAP(gap, tsn, mapping_tsn) do { \
if (tsn >= mapping_tsn) { \
gap = tsn - mapping_tsn; \
} else { \
gap = (MAX_TSN - mapping_tsn) + tsn + 1; \
} \
} while(0)
#define SCTP_RETRAN_DONE -1

View File

@ -45,13 +45,6 @@ __FBSDID("$FreeBSD$");
#include <netinet/sctp_uio.h>
#include <netinet/sctp_timer.h>
#define SCTP_CALC_TSN_TO_GAP(gap, tsn, mapping_tsn) do { \
if (tsn >= mapping_tsn) { \
gap = tsn - mapping_tsn; \
} else { \
gap = (MAX_TSN - mapping_tsn) + tsn + 1; \
} \
} while(0)
/*
* NOTES: On the outbound side of things I need to check the sack timer to
@ -303,13 +296,13 @@ sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn)
return;
}
SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
#ifdef INVARIANTS
if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
printf("gap:%x tsn:%x\n", gap, tsn);
sctp_print_mapping_array(asoc);
#ifdef INVARIANTS
panic("Things are really messed up now!!");
}
#endif
}
SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
if (compare_with_wrap(tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN)) {
@ -317,7 +310,8 @@ sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn)
}
if (tsn == asoc->highest_tsn_inside_map) {
/* We must back down to see what the new highest is */
for (i = tsn - 1; compare_with_wrap(i, asoc->mapping_array_base_tsn, MAX_TSN); i--) {
for (i = tsn - 1; (compare_with_wrap(i, asoc->mapping_array_base_tsn, MAX_TSN) ||
(i == asoc->mapping_array_base_tsn)); i--) {
SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn);
if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
asoc->highest_tsn_inside_map = i;
@ -411,6 +405,7 @@ sctp_service_reassembly(struct sctp_tcb *stcb, struct sctp_association *asoc)
end = 1;
else
end = 0;
sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
sctp_add_to_readq(stcb->sctp_ep,
stcb, control, &stcb->sctp_socket->so_rcv, end,
SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
@ -420,6 +415,7 @@ sctp_service_reassembly(struct sctp_tcb *stcb, struct sctp_association *asoc)
end = 1;
else
end = 0;
sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
if (sctp_append_to_readq(stcb->sctp_ep, stcb,
stcb->asoc.control_pdapi,
chk->data, end, chk->rec.data.TSN_seq,
@ -454,7 +450,6 @@ sctp_service_reassembly(struct sctp_tcb *stcb, struct sctp_association *asoc)
}
/* pull it we did it */
TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
asoc->fragmented_delivery_inprogress = 0;
if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
@ -501,11 +496,11 @@ sctp_service_reassembly(struct sctp_tcb *stcb, struct sctp_association *asoc)
asoc->size_on_all_streams -= ctl->length;
sctp_ucount_decr(asoc->cnt_on_all_streams);
strm->last_sequence_delivered++;
sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
sctp_add_to_readq(stcb->sctp_ep, stcb,
ctl,
&stcb->sctp_socket->so_rcv, 1,
SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
ctl = ctlat;
} else {
break;
@ -616,11 +611,11 @@ sctp_queue_data_to_stream(struct sctp_tcb *stcb, struct sctp_association *asoc,
sctp_ucount_decr(asoc->cnt_on_all_streams);
strm->last_sequence_delivered++;
sctp_mark_non_revokable(asoc, control->sinfo_tsn);
sctp_add_to_readq(stcb->sctp_ep, stcb,
control,
&stcb->sctp_socket->so_rcv, 1,
SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
sctp_mark_non_revokable(asoc, control->sinfo_tsn);
control = TAILQ_FIRST(&strm->inqueue);
while (control != NULL) {
/* all delivered */
@ -641,13 +636,12 @@ sctp_queue_data_to_stream(struct sctp_tcb *stcb, struct sctp_association *asoc,
sctp_log_strm_del(control, NULL,
SCTP_STR_LOG_FROM_IMMED_DEL);
}
/* EY will be used to calculate nr-gap */
sctp_mark_non_revokable(asoc, control->sinfo_tsn);
sctp_add_to_readq(stcb->sctp_ep, stcb,
control,
&stcb->sctp_socket->so_rcv, 1,
SCTP_READ_LOCK_NOT_HELD,
SCTP_SO_NOT_LOCKED);
sctp_mark_non_revokable(asoc, control->sinfo_tsn);
control = at;
continue;
}
@ -965,8 +959,7 @@ sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
*abort_flag = 1;
} else if ((asoc->fragment_flags & SCTP_DATA_UNORDERED) !=
SCTP_DATA_UNORDERED &&
chk->rec.data.stream_seq !=
asoc->ssn_of_pdapi) {
chk->rec.data.stream_seq != asoc->ssn_of_pdapi) {
/* Got to be the right STR Seq */
SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream seq %d vs %d\n",
chk->rec.data.stream_seq,
@ -1623,7 +1616,6 @@ sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
}
SCTP_STAT_INCR(sctps_badsid);
SCTP_TCB_LOCK_ASSERT(stcb);
SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
if (compare_with_wrap(tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN)) {
asoc->highest_tsn_inside_nr_map = tsn;
@ -1787,6 +1779,7 @@ sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
SCTP_STR_LOG_FROM_EXPRS_DEL);
}
control = NULL;
SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
if (compare_with_wrap(tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN)) {
asoc->highest_tsn_inside_nr_map = tsn;
@ -1853,10 +1846,6 @@ sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
need_reasm_check = 1;
}
}
SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
if (compare_with_wrap(tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN)) {
asoc->highest_tsn_inside_nr_map = tsn;
}
control = NULL;
goto finish_express_del;
}
@ -2059,10 +2048,10 @@ sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
/* ok, if we reach here we have passed the sanity checks */
if (chunk_flags & SCTP_DATA_UNORDERED) {
/* queue directly into socket buffer */
sctp_mark_non_revokable(asoc, control->sinfo_tsn);
sctp_add_to_readq(stcb->sctp_ep, stcb,
control,
&stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
} else {
/*
* Special check for when streams are resetting. We
@ -2134,10 +2123,6 @@ sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
}
}
finish_express_del:
if (tsn == (asoc->cumulative_tsn + 1)) {
/* Update cum-ack */
asoc->cumulative_tsn = tsn;
}
if (last_chunk) {
*m = NULL;
}
@ -2215,43 +2200,43 @@ sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
}
int8_t sctp_map_lookup_tab[256] = {
-1, 0, -1, 1, -1, 0, -1, 2,
-1, 0, -1, 1, -1, 0, -1, 3,
-1, 0, -1, 1, -1, 0, -1, 2,
-1, 0, -1, 1, -1, 0, -1, 4,
-1, 0, -1, 1, -1, 0, -1, 2,
-1, 0, -1, 1, -1, 0, -1, 3,
-1, 0, -1, 1, -1, 0, -1, 2,
-1, 0, -1, 1, -1, 0, -1, 5,
-1, 0, -1, 1, -1, 0, -1, 2,
-1, 0, -1, 1, -1, 0, -1, 3,
-1, 0, -1, 1, -1, 0, -1, 2,
-1, 0, -1, 1, -1, 0, -1, 4,
-1, 0, -1, 1, -1, 0, -1, 2,
-1, 0, -1, 1, -1, 0, -1, 3,
-1, 0, -1, 1, -1, 0, -1, 2,
-1, 0, -1, 1, -1, 0, -1, 6,
-1, 0, -1, 1, -1, 0, -1, 2,
-1, 0, -1, 1, -1, 0, -1, 3,
-1, 0, -1, 1, -1, 0, -1, 2,
-1, 0, -1, 1, -1, 0, -1, 4,
-1, 0, -1, 1, -1, 0, -1, 2,
-1, 0, -1, 1, -1, 0, -1, 3,
-1, 0, -1, 1, -1, 0, -1, 2,
-1, 0, -1, 1, -1, 0, -1, 5,
-1, 0, -1, 1, -1, 0, -1, 2,
-1, 0, -1, 1, -1, 0, -1, 3,
-1, 0, -1, 1, -1, 0, -1, 2,
-1, 0, -1, 1, -1, 0, -1, 4,
-1, 0, -1, 1, -1, 0, -1, 2,
-1, 0, -1, 1, -1, 0, -1, 3,
-1, 0, -1, 1, -1, 0, -1, 2,
-1, 0, -1, 1, -1, 0, -1, 7,
0, 1, 0, 2, 0, 1, 0, 3,
0, 1, 0, 2, 0, 1, 0, 4,
0, 1, 0, 2, 0, 1, 0, 3,
0, 1, 0, 2, 0, 1, 0, 5,
0, 1, 0, 2, 0, 1, 0, 3,
0, 1, 0, 2, 0, 1, 0, 4,
0, 1, 0, 2, 0, 1, 0, 3,
0, 1, 0, 2, 0, 1, 0, 6,
0, 1, 0, 2, 0, 1, 0, 3,
0, 1, 0, 2, 0, 1, 0, 4,
0, 1, 0, 2, 0, 1, 0, 3,
0, 1, 0, 2, 0, 1, 0, 5,
0, 1, 0, 2, 0, 1, 0, 3,
0, 1, 0, 2, 0, 1, 0, 4,
0, 1, 0, 2, 0, 1, 0, 3,
0, 1, 0, 2, 0, 1, 0, 7,
0, 1, 0, 2, 0, 1, 0, 3,
0, 1, 0, 2, 0, 1, 0, 4,
0, 1, 0, 2, 0, 1, 0, 3,
0, 1, 0, 2, 0, 1, 0, 5,
0, 1, 0, 2, 0, 1, 0, 3,
0, 1, 0, 2, 0, 1, 0, 4,
0, 1, 0, 2, 0, 1, 0, 3,
0, 1, 0, 2, 0, 1, 0, 6,
0, 1, 0, 2, 0, 1, 0, 3,
0, 1, 0, 2, 0, 1, 0, 4,
0, 1, 0, 2, 0, 1, 0, 3,
0, 1, 0, 2, 0, 1, 0, 5,
0, 1, 0, 2, 0, 1, 0, 3,
0, 1, 0, 2, 0, 1, 0, 4,
0, 1, 0, 2, 0, 1, 0, 3,
0, 1, 0, 2, 0, 1, 0, 8
};
void
sctp_sack_check(struct sctp_tcb *stcb, int ok_to_sack, int was_a_gap, int *abort_flag)
sctp_slide_mapping_arrays(struct sctp_tcb *stcb)
{
/*
* Now we also need to check the mapping array in a couple of ways.
@ -2259,7 +2244,6 @@ sctp_sack_check(struct sctp_tcb *stcb, int ok_to_sack, int was_a_gap, int *abort
*/
struct sctp_association *asoc;
int at;
int last_all_ones = 0;
int slide_from, slide_end, lgap, distance;
/* EY nr_mapping array variables */
@ -2279,19 +2263,16 @@ sctp_sack_check(struct sctp_tcb *stcb, int ok_to_sack, int was_a_gap, int *abort
* offset of the current cum-ack as the starting point.
*/
at = 0;
for (slide_from = 0; slide_from < stcb->asoc.nr_mapping_array_size; slide_from++) {
for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) {
if (asoc->nr_mapping_array[slide_from] == 0xff) {
at += 8;
last_all_ones = 1;
} else {
/* there is a 0 bit */
at += sctp_map_lookup_tab[asoc->nr_mapping_array[slide_from]];
last_all_ones = 0;
break;
}
}
asoc->cumulative_tsn = asoc->nr_mapping_array_base_tsn + (at - last_all_ones);
at++;
asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at - 1);
if (compare_with_wrap(asoc->cumulative_tsn, asoc->highest_tsn_inside_map, MAX_TSN) &&
compare_with_wrap(asoc->cumulative_tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN)
@ -2320,18 +2301,22 @@ sctp_sack_check(struct sctp_tcb *stcb, int ok_to_sack, int was_a_gap, int *abort
if ((asoc->cumulative_tsn == highest_tsn) && (at >= 8)) {
/* The complete array was completed by a single FR */
/* highest becomes the cum-ack */
int clr;
int clr, i;
/* clear the array */
clr = (at >> 3) + 1;
clr = ((at + 7) >> 3);
if (clr > asoc->mapping_array_size) {
clr = asoc->mapping_array_size;
}
memset(asoc->mapping_array, 0, clr);
memset(asoc->nr_mapping_array, 0, clr);
for (i = 0; i < asoc->mapping_array_size; i++) {
if ((asoc->mapping_array[i]) || (asoc->nr_mapping_array[i])) {
printf("Error Mapping array's not clean at clear\n");
sctp_print_mapping_array(asoc);
}
}
asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
asoc->nr_mapping_array_base_tsn = asoc->cumulative_tsn + 1;
asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
} else if (at >= 8) {
/* we can slide the mapping array down */
@ -2393,12 +2378,11 @@ sctp_sack_check(struct sctp_tcb *stcb, int ok_to_sack, int was_a_gap, int *abort
asoc->nr_mapping_array[slide_from + ii];
}
for (ii = distance; ii <= slide_end; ii++) {
for (ii = distance; ii <= asoc->mapping_array_size; ii++) {
asoc->mapping_array[ii] = 0;
asoc->nr_mapping_array[ii] = 0;
}
asoc->mapping_array_base_tsn += (slide_from << 3);
asoc->nr_mapping_array_base_tsn += (slide_from << 3);
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
sctp_log_map(asoc->mapping_array_base_tsn,
asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
@ -2406,95 +2390,95 @@ sctp_sack_check(struct sctp_tcb *stcb, int ok_to_sack, int was_a_gap, int *abort
}
}
}
}
void
sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap, int *abort_flag)
{
struct sctp_association *asoc;
uint32_t highest_tsn;
asoc = &stcb->asoc;
if (compare_with_wrap(asoc->highest_tsn_inside_nr_map,
asoc->highest_tsn_inside_map,
MAX_TSN)) {
highest_tsn = asoc->highest_tsn_inside_nr_map;
} else {
highest_tsn = asoc->highest_tsn_inside_map;
}
/*
* Now we need to see if we need to queue a sack or just start the
* timer (if allowed).
*/
if (ok_to_sack) {
if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
/*
* Ok special case, in SHUTDOWN-SENT case. here we
* maker sure SACK timer is off and instead send a
* SHUTDOWN and a SACK
*/
if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_INDATA + SCTP_LOC_18);
}
sctp_send_shutdown(stcb, stcb->asoc.primary_destination);
/*
* EY if nr_sacks used then send an nr-sack , a sack
* otherwise
*/
if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && stcb->asoc.peer_supports_nr_sack)
sctp_send_nr_sack(stcb);
else
sctp_send_sack(stcb);
} else {
int is_a_gap;
if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
/*
* Ok special case, in SHUTDOWN-SENT case. here we maker
* sure SACK timer is off and instead send a SHUTDOWN and a
* SACK
*/
if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_INDATA + SCTP_LOC_18);
}
sctp_send_shutdown(stcb, stcb->asoc.primary_destination);
sctp_send_sack(stcb);
} else {
int is_a_gap;
/* is there a gap now ? */
is_a_gap = compare_with_wrap(highest_tsn, stcb->asoc.cumulative_tsn, MAX_TSN);
/* is there a gap now ? */
is_a_gap = compare_with_wrap(highest_tsn, stcb->asoc.cumulative_tsn, MAX_TSN);
/*
* CMT DAC algorithm: increase number of packets
* received since last ack
*/
stcb->asoc.cmt_dac_pkts_rcvd++;
/*
* CMT DAC algorithm: increase number of packets received
* since last ack
*/
stcb->asoc.cmt_dac_pkts_rcvd++;
if ((stcb->asoc.send_sack == 1) || /* We need to send a
* SACK */
((was_a_gap) && (is_a_gap == 0)) || /* was a gap, but no
* longer is one */
(stcb->asoc.numduptsns) || /* we have dup's */
(is_a_gap) || /* is still a gap */
(stcb->asoc.delayed_ack == 0) || /* Delayed sack disabled */
(stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq) /* hit limit of pkts */
) {
if ((stcb->asoc.send_sack == 1) || /* We need to send a
* SACK */
((was_a_gap) && (is_a_gap == 0)) || /* was a gap, but no
* longer is one */
(stcb->asoc.numduptsns) || /* we have dup's */
(is_a_gap) || /* is still a gap */
(stcb->asoc.delayed_ack == 0) || /* Delayed sack disabled */
(stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq) /* hit limit of pkts */
) {
if ((SCTP_BASE_SYSCTL(sctp_cmt_on_off)) &&
(SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) &&
(stcb->asoc.send_sack == 0) &&
(stcb->asoc.numduptsns == 0) &&
(stcb->asoc.delayed_ack) &&
(!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) {
if ((SCTP_BASE_SYSCTL(sctp_cmt_on_off)) &&
(SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) &&
(stcb->asoc.send_sack == 0) &&
(stcb->asoc.numduptsns == 0) &&
(stcb->asoc.delayed_ack) &&
(!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) {
/*
* CMT DAC algorithm: With CMT,
* delay acks even in the face of
*
* reordering. Therefore, if acks that
* do not have to be sent because of
* the above reasons, will be
* delayed. That is, acks that would
* have been sent due to gap reports
* will be delayed with DAC. Start
* the delayed ack timer.
*/
sctp_timer_start(SCTP_TIMER_TYPE_RECV,
stcb->sctp_ep, stcb, NULL);
} else {
/*
* Ok we must build a SACK since the
* timer is pending, we got our
* first packet OR there are gaps or
* duplicates.
*/
(void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
/*
* EY if nr_sacks used then send an
* nr-sack , a sack otherwise
*/
if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && stcb->asoc.peer_supports_nr_sack)
sctp_send_nr_sack(stcb);
else
sctp_send_sack(stcb);
}
/*
* CMT DAC algorithm: With CMT, delay acks
* even in the face of
*
* reordering. Therefore, if acks that do not
* have to be sent because of the above
* reasons, will be delayed. That is, acks
* that would have been sent due to gap
* reports will be delayed with DAC. Start
* the delayed ack timer.
*/
sctp_timer_start(SCTP_TIMER_TYPE_RECV,
stcb->sctp_ep, stcb, NULL);
} else {
if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
sctp_timer_start(SCTP_TIMER_TYPE_RECV,
stcb->sctp_ep, stcb, NULL);
}
/*
* Ok we must build a SACK since the timer
* is pending, we got our first packet OR
* there are gaps or duplicates.
*/
(void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
sctp_send_sack(stcb);
}
} else {
if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
sctp_timer_start(SCTP_TIMER_TYPE_RECV,
stcb->sctp_ep, stcb, NULL);
}
}
}
@ -2834,14 +2818,7 @@ sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
(void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
}
/*
* EY if nr_sacks used then send an nr-sack , a sack
* otherwise
*/
if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && stcb->asoc.peer_supports_nr_sack)
sctp_send_nr_sack(stcb);
else
sctp_send_sack(stcb);
sctp_send_sack(stcb);
} else {
if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
sctp_timer_start(SCTP_TIMER_TYPE_RECV,
@ -2849,7 +2826,7 @@ sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
}
}
} else {
sctp_sack_check(stcb, 1, was_a_gap, &abort_flag);
sctp_sack_check(stcb, was_a_gap, &abort_flag);
}
if (abort_flag)
return (2);
@ -2867,7 +2844,7 @@ sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1
{
struct sctp_tmit_chunk *tp1;
unsigned int theTSN;
int j, wake_him = 0;
int j, wake_him = 0, circled = 0;
/* Recover the tp1 we last saw */
tp1 = *p_tp1;
@ -3045,12 +3022,6 @@ sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1
}
/* NR Sack code here */
if (nr_sacking) {
if (tp1->sent != SCTP_FORWARD_TSN_SKIP)
tp1->sent = SCTP_DATAGRAM_NR_MARKED;
/*
* TAILQ_REMOVE(&asoc->sent_q
* ueue, tp1, sctp_next);
*/
if (tp1->data) {
/*
* sa_ignore
@ -3058,13 +3029,8 @@ sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1
*/
sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
sctp_m_freem(tp1->data);
tp1->data = NULL;
}
tp1->data = NULL;
/* asoc->sent_queue_cnt--; */
/*
* sctp_free_a_chunk(stcb,
* tp1);
*/
wake_him++;
}
}
@ -3075,11 +3041,16 @@ sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1
break;
tp1 = TAILQ_NEXT(tp1, sctp_next);
if ((tp1 == NULL) && (circled == 0)) {
circled++;
tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
}
} /* end while (tp1) */
/* In case the fragments were not in order we must reset */
if (tp1 == NULL) {
circled = 0;
tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
}
/* In case the fragments were not in order we must reset */
} /* end for (j = fragStart */
*p_tp1 = tp1;
return (wake_him); /* Return value only used for nr-sack */
@ -3158,6 +3129,9 @@ sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct
} else {
non_revocable = 1;
}
if (i == num_seg) {
tp1 = NULL;
}
if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end,
non_revocable, &num_frs, biggest_newly_acked_tsn,
this_sack_lowest_newack, ecn_seg_sums)) {
@ -3961,6 +3935,7 @@ sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
#ifdef INVARIANTS
panic("Impossible sack 1");
#else
*abort_now = 1;
/* XXX */
oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
@ -4439,50 +4414,6 @@ sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
}
}
/* EY- nr_sack */
/* Identifies the non-renegable tsns that are revoked*/
static void
sctp_check_for_nr_revoked(struct sctp_tcb *stcb,
struct sctp_association *asoc, uint32_t cumack,
uint32_t biggest_tsn_acked)
{
struct sctp_tmit_chunk *tp1;
for (tp1 = TAILQ_FIRST(&asoc->sent_queue); tp1; tp1 = TAILQ_NEXT(tp1, sctp_next)) {
if (compare_with_wrap(tp1->rec.data.TSN_seq, cumack,
MAX_TSN)) {
/*
* ok this guy is either ACK or MARKED. If it is
* ACKED it has been previously acked but not this
* time i.e. revoked. If it is MARKED it was ACK'ed
* again.
*/
if (compare_with_wrap(tp1->rec.data.TSN_seq, biggest_tsn_acked,
MAX_TSN))
break;
if (tp1->sent == SCTP_DATAGRAM_NR_ACKED) {
/*
* EY! a non-renegable TSN is revoked, need
* to abort the association
*/
/*
* EY TODO: put in the code to abort the
* assoc.
*/
return;
} else if (tp1->sent == SCTP_DATAGRAM_NR_MARKED) {
/* it has been re-acked in this SACK */
tp1->sent = SCTP_DATAGRAM_NR_ACKED;
}
}
if (tp1->sent == SCTP_DATAGRAM_UNSENT)
break;
}
return;
}
void
sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup,
struct sctp_tcb *stcb, struct sctp_nets *net_from,
@ -4588,22 +4519,23 @@ sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup,
sctpchunk_listhead);
send_s = tp1->rec.data.TSN_seq + 1;
} else {
tp1 = NULL;
send_s = asoc->sending_seq;
}
if (cum_ack == send_s ||
compare_with_wrap(cum_ack, send_s, MAX_TSN)) {
#ifndef INVARIANTS
struct mbuf *oper;
#endif
#ifdef INVARIANTS
hopeless_peer:
panic("Impossible sack 1");
#else
/*
* no way, we have not even sent this TSN out yet.
* Peer is hopelessly messed up with us.
*/
printf("NEW cum_ack:%x send_s:%x is smaller or equal\n",
cum_ack, send_s);
if (tp1) {
printf("Got send_s from tsn:%x + 1 of tp1:%p\n",
tp1->rec.data.TSN_seq, tp1);
}
hopeless_peer:
*abort_now = 1;
/* XXX */
@ -4624,7 +4556,6 @@ sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup,
stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_PEER_FAULTY, oper, SCTP_SO_NOT_LOCKED);
return;
#endif
}
}
/**********************/
@ -4844,6 +4775,10 @@ sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup,
* peer is either confused or we are under
* attack. We must abort.
*/
printf("Hopeless peer! biggest_tsn_acked:%x largest seq:%x\n",
biggest_tsn_acked,
send_s);
goto hopeless_peer;
}
}
@ -4991,15 +4926,9 @@ sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup,
*/
if ((tp1->sent == SCTP_DATAGRAM_NR_ACKED) ||
(tp1->sent == SCTP_DATAGRAM_NR_MARKED)) {
/*
* EY! - TODO: Something previously
* nr_gapped is reneged, abort the
* association
*/
return;
continue;
}
if ((tp1->sent > SCTP_DATAGRAM_RESEND) &&
(tp1->sent < SCTP_FORWARD_TSN_SKIP)) {
if (tp1->sent == SCTP_DATAGRAM_ACKED) {
tp1->sent = SCTP_DATAGRAM_SENT;
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
@ -5028,15 +4957,11 @@ sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup,
}
asoc->saw_sack_with_frags = 0;
}
if (num_seg)
if (num_seg || num_nr_seg)
asoc->saw_sack_with_frags = 1;
else
asoc->saw_sack_with_frags = 0;
/* EY! - not sure about if there should be an IF */
if (num_nr_seg > 0)
sctp_check_for_nr_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
/* JRS - Use the congestion control given in the CC module */
asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
@ -5457,11 +5382,10 @@ sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
sctp_ucount_decr(asoc->cnt_on_all_streams);
/* deliver it to at least the delivery-q */
if (stcb->sctp_socket) {
/* EY need the tsn info for calculating nr */
sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
sctp_add_to_readq(stcb->sctp_ep, stcb,
ctl,
&stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
}
} else {
/* no more delivery now. */
@ -5486,10 +5410,10 @@ sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
/* deliver it to at least the delivery-q */
strmin->last_sequence_delivered = ctl->sinfo_ssn;
if (stcb->sctp_socket) {
sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
sctp_add_to_readq(stcb->sctp_ep, stcb,
ctl,
&stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
}
tt = strmin->last_sequence_delivered + 1;
@ -5593,7 +5517,8 @@ sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb,
void
sctp_handle_forward_tsn(struct sctp_tcb *stcb,
struct sctp_forward_tsn_chunk *fwd, int *abort_flag, struct mbuf *m, int offset)
struct sctp_forward_tsn_chunk *fwd,
int *abort_flag, struct mbuf *m, int offset)
{
/*
* ISSUES that MUST be fixed for ECN! When we are the sender of the
@ -5619,8 +5544,8 @@ sctp_handle_forward_tsn(struct sctp_tcb *stcb,
* report where we are.
*/
struct sctp_association *asoc;
uint32_t new_cum_tsn, gap;
unsigned int i, fwd_sz, cumack_set_flag, m_size;
uint32_t new_cum_tsn, tsn, gap;
unsigned int i, fwd_sz, cumack_set_flag, m_size, fnd = 0;
uint32_t str_seq;
struct sctp_stream_in *strm;
struct sctp_tmit_chunk *chk, *at;
@ -5657,7 +5582,7 @@ sctp_handle_forward_tsn(struct sctp_tcb *stcb,
* now we know the new TSN is more advanced, let's find the actual
* gap
*/
SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->nr_mapping_array_base_tsn);
SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn);
asoc->cumulative_tsn = new_cum_tsn;
if (gap >= m_size) {
if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) {
@ -5697,8 +5622,7 @@ sctp_handle_forward_tsn(struct sctp_tcb *stcb,
asoc->mapping_array_base_tsn = new_cum_tsn + 1;
asoc->highest_tsn_inside_map = new_cum_tsn;
memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.nr_mapping_array_size);
asoc->nr_mapping_array_base_tsn = new_cum_tsn + 1;
memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
asoc->highest_tsn_inside_nr_map = new_cum_tsn;
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
@ -5710,14 +5634,32 @@ sctp_handle_forward_tsn(struct sctp_tcb *stcb,
for (i = 0; i <= gap; i++) {
SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, i);
SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i);
/* FIX ME add something to set up highest TSN in map */
}
if (compare_with_wrap(new_cum_tsn, asoc->highest_tsn_inside_nr_map, MAX_TSN)) {
asoc->highest_tsn_inside_nr_map = new_cum_tsn;
}
if (compare_with_wrap(new_cum_tsn, asoc->highest_tsn_inside_map, MAX_TSN) ||
new_cum_tsn == asoc->highest_tsn_inside_map) {
/* We must back down to see what the new highest is */
for (tsn = new_cum_tsn; (compare_with_wrap(tsn, asoc->mapping_array_base_tsn, MAX_TSN) ||
(tsn == asoc->mapping_array_base_tsn)); tsn--) {
SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
asoc->highest_tsn_inside_map = tsn;
fnd = 1;
break;
}
}
if (!fnd) {
asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1;
}
}
/*
* Now after marking all, slide thing forward but no sack
* please.
*/
sctp_sack_check(stcb, 0, 0, abort_flag);
if (*abort_flag)
return;
sctp_slide_mapping_arrays(stcb);
}
/*************************************************************/
/* 2. Clear up re-assembly queue */

View File

@ -121,7 +121,9 @@ sctp_process_data(struct mbuf **, int, int *, int, struct sctphdr *,
struct sctp_inpcb *, struct sctp_tcb *,
struct sctp_nets *, uint32_t *);
void sctp_sack_check(struct sctp_tcb *, int, int, int *);
void sctp_slide_mapping_arrays(struct sctp_tcb *stcb);
void sctp_sack_check(struct sctp_tcb *, int, int *);
#endif
#endif

View File

@ -343,11 +343,6 @@ sctp_process_init(struct sctp_init_chunk *cp, struct sctp_tcb *stcb,
asoc->str_reset_seq_in = asoc->asconf_seq_in + 1;
asoc->mapping_array_base_tsn = ntohl(init->initial_tsn);
/*
* EY 05/13/08 - nr_sack: initialize nr_mapping array's base tsn
* like above
*/
asoc->nr_mapping_array_base_tsn = ntohl(init->initial_tsn);
asoc->tsn_last_delivered = asoc->cumulative_tsn = asoc->asconf_seq_in;
asoc->last_echo_tsn = asoc->asconf_seq_in;
asoc->advanced_peer_ack_point = asoc->last_acked_seq;
@ -1862,7 +1857,7 @@ sctp_process_cookie_existing(struct mbuf *m, int iphlen, int offset,
}
if (asoc->nr_mapping_array) {
memset(asoc->nr_mapping_array, 0,
asoc->nr_mapping_array_size);
asoc->mapping_array_size);
}
SCTP_TCB_UNLOCK(stcb);
SCTP_INP_INFO_WLOCK();
@ -2027,7 +2022,7 @@ sctp_process_cookie_new(struct mbuf *m, int iphlen, int offset,
* socket is unbound and we must do an implicit bind. Since we are
* getting a cookie, we cannot be unbound.
*/
stcb = sctp_aloc_assoc(inp, init_src, 0, &error,
stcb = sctp_aloc_assoc(inp, init_src, &error,
ntohl(initack_cp->init.initiate_tag), vrf_id,
(struct thread *)NULL
);
@ -3236,13 +3231,10 @@ process_chunk_drop(struct sctp_tcb *stcb, struct sctp_chunk_desc *desc,
}
break;
case SCTP_SELECTIVE_ACK:
case SCTP_NR_SELECTIVE_ACK:
/* resend the sack */
sctp_send_sack(stcb);
break;
/* EY for nr_sacks */
case SCTP_NR_SELECTIVE_ACK:
sctp_send_nr_sack(stcb); /* EY resend the nr-sack */
break;
case SCTP_HEARTBEAT_REQUEST:
/* resend a demand HB */
if ((stcb->asoc.overall_error_count + 3) < stcb->asoc.max_send_times) {
@ -3514,8 +3506,7 @@ sctp_handle_stream_reset_response(struct sctp_tcb *stcb,
memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
stcb->asoc.highest_tsn_inside_nr_map = stcb->asoc.highest_tsn_inside_map;
stcb->asoc.nr_mapping_array_base_tsn = stcb->asoc.mapping_array_base_tsn;
memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.nr_mapping_array_size);
memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
stcb->asoc.sending_seq = ntohl(resp->receivers_next_tsn);
stcb->asoc.last_acked_seq = stcb->asoc.cumulative_tsn;
@ -3624,8 +3615,7 @@ sctp_handle_str_reset_request_tsn(struct sctp_tcb *stcb,
stcb->asoc.mapping_array_base_tsn = stcb->asoc.highest_tsn_inside_map + 1;
memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
stcb->asoc.highest_tsn_inside_nr_map = stcb->asoc.highest_tsn_inside_map;
stcb->asoc.nr_mapping_array_base_tsn = stcb->asoc.highest_tsn_inside_map + 1;
memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.nr_mapping_array_size);
memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
atomic_add_int(&stcb->asoc.sending_seq, 1);
/* save off historical data for retrans */
stcb->asoc.last_sending_seq[1] = stcb->asoc.last_sending_seq[0];
@ -5636,7 +5626,7 @@ sctp_common_input_processing(struct mbuf **mm, int iphlen, int offset,
was_a_gap = 1;
}
stcb->asoc.send_sack = 1;
sctp_sack_check(stcb, 1, was_a_gap, &abort_flag);
sctp_sack_check(stcb, was_a_gap, &abort_flag);
if (abort_flag) {
/* Again, we aborted so NO UNLOCK needed */
goto out_now;

View File

@ -9003,6 +9003,11 @@ sctp_chunk_retransmission(struct sctp_inpcb *inp,
/* No, not sent to this net or not ready for rtx */
continue;
}
if (chk->data == NULL) {
printf("TSN:%x chk->snd_count:%d chk->sent:%d can't retran - no data\n",
chk->rec.data.TSN_seq, chk->snd_count, chk->sent);
continue;
}
if ((SCTP_BASE_SYSCTL(sctp_max_retran_chunk)) &&
(chk->snd_count >= SCTP_BASE_SYSCTL(sctp_max_retran_chunk))) {
/* Gak, we have exceeded max unlucky retran, abort! */
@ -9426,14 +9431,7 @@ sctp_chunk_output(struct sctp_inpcb *inp,
* running, if so piggy-back the sack.
*/
if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
/*
* EY if nr_sacks used then send an nr-sack , a sack
* otherwise
*/
if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && asoc->peer_supports_nr_sack)
sctp_send_nr_sack(stcb);
else
sctp_send_sack(stcb);
sctp_send_sack(stcb);
(void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
}
while (asoc->sent_queue_retran_cnt) {
@ -9856,13 +9854,15 @@ void
sctp_send_sack(struct sctp_tcb *stcb)
{
/*-
* Queue up a SACK in the control queue. We must first check to see
* if a SACK is somehow on the control queue. If so, we will take
* and and remove the old one.
* Queue up a SACK or NR-SACK in the control queue.
* We must first check to see if a SACK or NR-SACK is
* somehow on the control queue.
* If so, we will take and and remove the old one.
*/
struct sctp_association *asoc;
struct sctp_tmit_chunk *chk, *a_chk;
struct sctp_sack_chunk *sack;
struct sctp_nr_sack_chunk *nr_sack;
struct sctp_gap_ack_block *gap_descriptor;
struct sack_track *selector;
int mergeable = 0;
@ -9870,12 +9870,20 @@ sctp_send_sack(struct sctp_tcb *stcb)
caddr_t limit;
uint32_t *dup;
int limit_reached = 0;
unsigned int i, jstart, siz, j;
unsigned int num_gap_blocks = 0, space;
unsigned int i, sel_start, siz, j, starting_index;
unsigned int num_gap_blocks = 0, num_nr_gap_blocks = 0, space;
int num_dups = 0;
int space_req;
uint32_t highest_tsn;
uint8_t flags;
uint8_t type;
if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) &&
stcb->asoc.peer_supports_nr_sack) {
type = SCTP_NR_SELECTIVE_ACK;
} else {
type = SCTP_SELECTIVE_ACK;
}
a_chk = NULL;
asoc = &stcb->asoc;
SCTP_TCB_LOCK_ASSERT(stcb);
@ -9883,9 +9891,10 @@ sctp_send_sack(struct sctp_tcb *stcb)
/* Hmm we never received anything */
return;
}
sctp_slide_mapping_arrays(stcb);
sctp_set_rwnd(stcb, asoc);
TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
if (chk->rec.chunk_id.id == SCTP_SELECTIVE_ACK) {
if (chk->rec.chunk_id.id == type) {
/* Hmm, found a sack already on queue, remove it */
TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
asoc->ctrl_queue_cnt++;
@ -9914,8 +9923,7 @@ sctp_send_sack(struct sctp_tcb *stcb)
return;
}
a_chk->copy_by_ref = 0;
/* a_chk->rec.chunk_id.id = SCTP_SELECTIVE_ACK; */
a_chk->rec.chunk_id.id = SCTP_SELECTIVE_ACK;
a_chk->rec.chunk_id.id = type;
a_chk->rec.chunk_id.can_take_data = 1;
}
/* Clear our pkt counts */
@ -9967,7 +9975,11 @@ sctp_send_sack(struct sctp_tcb *stcb)
}
if (highest_tsn == asoc->cumulative_tsn) {
/* no gaps */
space_req = sizeof(struct sctp_sack_chunk);
if (type == SCTP_SELECTIVE_ACK) {
space_req = sizeof(struct sctp_sack_chunk);
} else {
space_req = sizeof(struct sctp_nr_sack_chunk);
}
} else {
/* gaps get a cluster */
space_req = MCLBYTES;
@ -10003,15 +10015,13 @@ sctp_send_sack(struct sctp_tcb *stcb)
limit = mtod(a_chk->data, caddr_t);
limit += space;
sack = mtod(a_chk->data, struct sctp_sack_chunk *);
sack->ch.chunk_type = SCTP_SELECTIVE_ACK;
/* 0x01 is used by nonce for ecn */
if ((SCTP_BASE_SYSCTL(sctp_ecn_enable)) &&
(SCTP_BASE_SYSCTL(sctp_ecn_nonce)) &&
(asoc->peer_supports_ecn_nonce))
sack->ch.chunk_flags = (asoc->receiver_nonce_sum & SCTP_SACK_NONCE_SUM);
flags = (asoc->receiver_nonce_sum & SCTP_SACK_NONCE_SUM);
else
sack->ch.chunk_flags = 0;
flags = 0;
if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
/*-
@ -10019,7 +10029,7 @@ sctp_send_sack(struct sctp_tcb *stcb)
* received, then set high bit to 1, else 0. Reset
* pkts_rcvd.
*/
sack->ch.chunk_flags |= (asoc->cmt_dac_pkts_rcvd << 6);
flags |= (asoc->cmt_dac_pkts_rcvd << 6);
asoc->cmt_dac_pkts_rcvd = 0;
}
#ifdef SCTP_ASOCLOG_OF_TSNS
@ -10029,39 +10039,81 @@ sctp_send_sack(struct sctp_tcb *stcb)
stcb->asoc.cumack_log_atsnt = 0;
}
#endif
sack->sack.cum_tsn_ack = htonl(asoc->cumulative_tsn);
sack->sack.a_rwnd = htonl(asoc->my_rwnd);
asoc->my_last_reported_rwnd = asoc->my_rwnd;
/* reset the readers interpretation */
stcb->freed_by_sorcv_sincelast = 0;
gap_descriptor = (struct sctp_gap_ack_block *)((caddr_t)sack + sizeof(struct sctp_sack_chunk));
if (highest_tsn > asoc->mapping_array_base_tsn)
siz = (((highest_tsn - asoc->mapping_array_base_tsn) + 1) + 7) / 8;
else
siz = (((MAX_TSN - highest_tsn) + 1) + highest_tsn + 7) / 8;
if (type == SCTP_SELECTIVE_ACK) {
sack = mtod(a_chk->data, struct sctp_sack_chunk *);
nr_sack = NULL;
gap_descriptor = (struct sctp_gap_ack_block *)((caddr_t)sack + sizeof(struct sctp_sack_chunk));
if (highest_tsn > asoc->mapping_array_base_tsn) {
siz = (((highest_tsn - asoc->mapping_array_base_tsn) + 1) + 7) / 8;
} else {
siz = (((MAX_TSN - highest_tsn) + 1) + highest_tsn + 7) / 8;
}
} else {
sack = NULL;
nr_sack = mtod(a_chk->data, struct sctp_nr_sack_chunk *);
gap_descriptor = (struct sctp_gap_ack_block *)((caddr_t)nr_sack + sizeof(struct sctp_nr_sack_chunk));
if (asoc->highest_tsn_inside_map > asoc->mapping_array_base_tsn) {
siz = (((asoc->highest_tsn_inside_map - asoc->mapping_array_base_tsn) + 1) + 7) / 8;
} else {
siz = (((MAX_TSN - asoc->mapping_array_base_tsn) + 1) + asoc->highest_tsn_inside_map + 7) / 8;
}
}
if (compare_with_wrap(asoc->mapping_array_base_tsn, asoc->cumulative_tsn, MAX_TSN)) {
offset = 1;
/*-
* cum-ack behind the mapping array, so we start and use all
* entries.
* The base TSN is intialized to be the first TSN the peer
* will send us. If the cum-ack is behind this then when they
* send us the next in sequence it will mark the base_tsn bit.
* Thus we need to use the very first selector and the offset
* is 1. Our table is built for this case.
*/
jstart = 0;
starting_index = 0;
sel_start = 0;
} else {
offset = asoc->mapping_array_base_tsn - asoc->cumulative_tsn;
/*-
* we skip the first one when the cum-ack is at or above the
* mapping array base. Note this only works if
* we skip the first selector when the cum-ack is at or above the
* mapping array base. This is because the bits at the base or above
* are turned on and our first selector in the table assumes they are
* off. We thus will use the second selector (first is 0). We use
* the reverse of our macro to fix the offset, in bits, that our
* table is at. Note that this method assumes that the cum-tsn is
* within the first bit, i.e. its value is 0-7 which means the
* result to our offset will be either a 0 - -7. If the cumack
* is NOT in the first byte (0) (which it should be since we did
* a mapping array slide above) then we need to calculate the starting
* index i.e. which byte of the mapping array we should start at. We
* do this by dividing by 8 and pushing the remainder (mod) into offset.
* then we multiply the offset to be negative, since we need a negative
* offset into the selector table.
*/
jstart = 1;
SCTP_CALC_TSN_TO_GAP(offset, asoc->cumulative_tsn, asoc->mapping_array_base_tsn);
if (offset > 7) {
starting_index = offset / 8;
offset = offset % 8;
printf("Strange starting index is %d offset:%d (not 0/x)\n",
starting_index, offset);
} else {
starting_index = 0;
}
/* We need a negative offset in our table */
offset *= -1;
sel_start = 1;
}
if (compare_with_wrap(highest_tsn, asoc->cumulative_tsn, MAX_TSN)) {
if (((type == SCTP_SELECTIVE_ACK) &&
compare_with_wrap(highest_tsn, asoc->cumulative_tsn, MAX_TSN)) ||
((type == SCTP_NR_SELECTIVE_ACK) &&
compare_with_wrap(asoc->highest_tsn_inside_map, asoc->cumulative_tsn, MAX_TSN))) {
/* we have a gap .. maybe */
for (i = 0; i < siz; i++) {
selector = &sack_array[(asoc->mapping_array[i] | asoc->nr_mapping_array[i])];
for (i = starting_index; i < siz; i++) {
if (type == SCTP_SELECTIVE_ACK) {
selector = &sack_array[asoc->mapping_array[i] | asoc->nr_mapping_array[i]];
} else {
selector = &sack_array[asoc->mapping_array[i]];
}
if (mergeable && selector->right_edge) {
/*
* Backup, left and right edges were ok to
@ -10073,7 +10125,7 @@ sctp_send_sack(struct sctp_tcb *stcb)
if (selector->num_entries == 0)
mergeable = 0;
else {
for (j = jstart; j < selector->num_entries; j++) {
for (j = sel_start; j < selector->num_entries; j++) {
if (mergeable && selector->right_edge) {
/*
* do a merge by NOT setting
@ -10105,17 +10157,86 @@ sctp_send_sack(struct sctp_tcb *stcb)
/* Reached the limit stop */
break;
}
jstart = 0;
sel_start = 0;
offset += 8;
}
if (num_gap_blocks == 0) {
/*
* slide not yet happened, and somehow we got called
* to send a sack. Cumack needs to move up.
*/
int abort_flag = 0;
}
if ((type == SCTP_NR_SELECTIVE_ACK) &&
(limit_reached == 0)) {
sctp_sack_check(stcb, 0, 0, &abort_flag);
mergeable = 0;
if (asoc->highest_tsn_inside_nr_map > asoc->mapping_array_base_tsn)
siz = (((asoc->highest_tsn_inside_nr_map - asoc->mapping_array_base_tsn) + 1) + 7) / 8;
else
siz = (((MAX_TSN - asoc->mapping_array_base_tsn) + 1) + asoc->highest_tsn_inside_nr_map + 7) / 8;
if (compare_with_wrap(asoc->mapping_array_base_tsn, asoc->cumulative_tsn, MAX_TSN)) {
offset = 1;
/*-
* cum-ack behind the mapping array, so we start and use all
* entries.
*/
sel_start = 0;
} else {
offset = asoc->mapping_array_base_tsn - asoc->cumulative_tsn;
/*-
* we skip the first one when the cum-ack is at or above the
* mapping array base. Note this only works if
*/
sel_start = 1;
}
if (compare_with_wrap(asoc->highest_tsn_inside_nr_map, asoc->cumulative_tsn, MAX_TSN)) {
/* we have a gap .. maybe */
for (i = 0; i < siz; i++) {
selector = &sack_array[asoc->nr_mapping_array[i]];
if (mergeable && selector->right_edge) {
/*
* Backup, left and right edges were
* ok to merge.
*/
num_nr_gap_blocks--;
gap_descriptor--;
}
if (selector->num_entries == 0)
mergeable = 0;
else {
for (j = sel_start; j < selector->num_entries; j++) {
if (mergeable && selector->right_edge) {
/*
* do a merge by NOT
* setting the left
* side
*/
mergeable = 0;
} else {
/*
* no merge, set the
* left side
*/
mergeable = 0;
gap_descriptor->start = htons((selector->gaps[j].start + offset));
}
gap_descriptor->end = htons((selector->gaps[j].end + offset));
num_nr_gap_blocks++;
gap_descriptor++;
if (((caddr_t)gap_descriptor + sizeof(struct sctp_gap_ack_block)) > limit) {
/* no more room */
limit_reached = 1;
break;
}
}
if (selector->left_edge) {
mergeable = 1;
}
}
if (limit_reached) {
/* Reached the limit stop */
break;
}
sel_start = 0;
offset += 8;
}
}
}
/* now we must add any dups we are going to report. */
@ -10136,393 +10257,35 @@ sctp_send_sack(struct sctp_tcb *stcb)
* now that the chunk is prepared queue it to the control chunk
* queue.
*/
a_chk->send_size = (sizeof(struct sctp_sack_chunk) +
(num_gap_blocks * sizeof(struct sctp_gap_ack_block)) +
(num_dups * sizeof(int32_t)));
SCTP_BUF_LEN(a_chk->data) = a_chk->send_size;
sack->sack.num_gap_ack_blks = htons(num_gap_blocks);
sack->sack.num_dup_tsns = htons(num_dups);
sack->ch.chunk_length = htons(a_chk->send_size);
if (type == SCTP_SELECTIVE_ACK) {
a_chk->send_size = sizeof(struct sctp_sack_chunk) +
(num_gap_blocks + num_nr_gap_blocks) * sizeof(struct sctp_gap_ack_block) +
num_dups * sizeof(int32_t);
SCTP_BUF_LEN(a_chk->data) = a_chk->send_size;
sack->sack.cum_tsn_ack = htonl(asoc->cumulative_tsn);
sack->sack.a_rwnd = htonl(asoc->my_rwnd);
sack->sack.num_gap_ack_blks = htons(num_gap_blocks);
sack->sack.num_dup_tsns = htons(num_dups);
sack->ch.chunk_type = type;
sack->ch.chunk_flags = flags;
sack->ch.chunk_length = htons(a_chk->send_size);
} else {
a_chk->send_size = sizeof(struct sctp_nr_sack_chunk) +
(num_gap_blocks + num_nr_gap_blocks) * sizeof(struct sctp_gap_ack_block) +
num_dups * sizeof(int32_t);
SCTP_BUF_LEN(a_chk->data) = a_chk->send_size;
nr_sack->nr_sack.cum_tsn_ack = htonl(asoc->cumulative_tsn);
nr_sack->nr_sack.a_rwnd = htonl(asoc->my_rwnd);
nr_sack->nr_sack.num_gap_ack_blks = htons(num_gap_blocks);
nr_sack->nr_sack.num_nr_gap_ack_blks = htons(num_nr_gap_blocks);
nr_sack->nr_sack.num_dup_tsns = htons(num_dups);
nr_sack->nr_sack.reserved = 0;
nr_sack->ch.chunk_type = type;
nr_sack->ch.chunk_flags = flags;
nr_sack->ch.chunk_length = htons(a_chk->send_size);
}
TAILQ_INSERT_TAIL(&asoc->control_send_queue, a_chk, sctp_next);
asoc->ctrl_queue_cnt++;
asoc->send_sack = 0;
SCTP_STAT_INCR(sctps_sendsacks);
return;
}
/* EY - This method will replace sctp_send_sack method if nr_sacks negotiated*/
void
sctp_send_nr_sack(struct sctp_tcb *stcb)
{
/*-
* Queue up an NR-SACK in the control queue. We must first check to see
* if an NR-SACK is somehow on the control queue. If so, we will take
* and and remove the old one.
*/
struct sctp_association *asoc;
struct sctp_tmit_chunk *chk, *a_chk;
struct sctp_nr_sack_chunk *nr_sack;
struct sctp_gap_ack_block *gap_descriptor;
struct sack_track *selector;
struct sack_track *nr_selector;
/* EY do we need nr_mergeable, NO */
int mergeable = 0;
int offset;
caddr_t limit;
uint32_t *dup, highest_tsn;
int limit_reached = 0;
int seen_non_zero = 0;
unsigned int i, jstart, siz, j;
unsigned int num_gap_blocks = 0, num_nr_gap_blocks = 0, space;
int num_dups = 0;
int space_req;
a_chk = NULL;
asoc = &stcb->asoc;
SCTP_TCB_LOCK_ASSERT(stcb);
if (asoc->last_data_chunk_from == NULL) {
/* Hmm we never received anything */
return;
}
sctp_set_rwnd(stcb, asoc);
TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
if (chk->rec.chunk_id.id == SCTP_NR_SELECTIVE_ACK) {
/* Hmm, found a sack already on queue, remove it */
TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
asoc->ctrl_queue_cnt++;
a_chk = chk;
if (a_chk->data) {
sctp_m_freem(a_chk->data);
a_chk->data = NULL;
}
sctp_free_remote_addr(a_chk->whoTo);
a_chk->whoTo = NULL;
break;
}
}
if (a_chk == NULL) {
sctp_alloc_a_chunk(stcb, a_chk);
if (a_chk == NULL) {
/* No memory so we drop the idea, and set a timer */
if (stcb->asoc.delayed_ack) {
sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_5);
sctp_timer_start(SCTP_TIMER_TYPE_RECV,
stcb->sctp_ep, stcb, NULL);
} else {
stcb->asoc.send_sack = 1;
}
return;
}
a_chk->copy_by_ref = 0;
/* a_chk->rec.chunk_id.id = SCTP_SELECTIVE_ACK; */
a_chk->rec.chunk_id.id = SCTP_NR_SELECTIVE_ACK;
a_chk->rec.chunk_id.can_take_data = 1;
}
/* Clear our pkt counts */
asoc->data_pkts_seen = 0;
a_chk->asoc = asoc;
a_chk->snd_count = 0;
a_chk->send_size = 0; /* fill in later */
a_chk->sent = SCTP_DATAGRAM_UNSENT;
a_chk->whoTo = NULL;
if ((asoc->numduptsns) ||
(asoc->last_data_chunk_from->dest_state & SCTP_ADDR_NOT_REACHABLE)
) {
/*-
* Ok, we have some duplicates or the destination for the
* sack is unreachable, lets see if we can select an
* alternate than asoc->last_data_chunk_from
*/
if ((!(asoc->last_data_chunk_from->dest_state &
SCTP_ADDR_NOT_REACHABLE)) &&
(asoc->used_alt_onsack > asoc->numnets)) {
/* We used an alt last time, don't this time */
a_chk->whoTo = NULL;
} else {
asoc->used_alt_onsack++;
a_chk->whoTo = sctp_find_alternate_net(stcb, asoc->last_data_chunk_from, 0);
}
if (a_chk->whoTo == NULL) {
/* Nope, no alternate */
a_chk->whoTo = asoc->last_data_chunk_from;
asoc->used_alt_onsack = 0;
}
} else {
/*
* No duplicates so we use the last place we received data
* from.
*/
asoc->used_alt_onsack = 0;
a_chk->whoTo = asoc->last_data_chunk_from;
}
if (a_chk->whoTo) {
atomic_add_int(&a_chk->whoTo->ref_count, 1);
}
if (compare_with_wrap(asoc->highest_tsn_inside_map, asoc->highest_tsn_inside_nr_map, MAX_TSN)) {
highest_tsn = asoc->highest_tsn_inside_map;
} else {
highest_tsn = asoc->highest_tsn_inside_nr_map;
}
if (highest_tsn == asoc->cumulative_tsn) {
/* no gaps */
space_req = sizeof(struct sctp_nr_sack_chunk);
} else {
/* EY - what is this about? */
/* gaps get a cluster */
space_req = MCLBYTES;
}
/* Ok now lets formulate a MBUF with our sack */
a_chk->data = sctp_get_mbuf_for_msg(space_req, 0, M_DONTWAIT, 1, MT_DATA);
if ((a_chk->data == NULL) ||
(a_chk->whoTo == NULL)) {
/* rats, no mbuf memory */
if (a_chk->data) {
/* was a problem with the destination */
sctp_m_freem(a_chk->data);
a_chk->data = NULL;
}
sctp_free_a_chunk(stcb, a_chk);
/* sa_ignore NO_NULL_CHK */
if (stcb->asoc.delayed_ack) {
sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_OUTPUT + SCTP_LOC_6);
sctp_timer_start(SCTP_TIMER_TYPE_RECV,
stcb->sctp_ep, stcb, NULL);
} else {
stcb->asoc.send_sack = 1;
}
return;
}
/* ok, lets go through and fill it in */
SCTP_BUF_RESV_UF(a_chk->data, SCTP_MIN_OVERHEAD);
space = M_TRAILINGSPACE(a_chk->data);
if (space > (a_chk->whoTo->mtu - SCTP_MIN_OVERHEAD)) {
space = (a_chk->whoTo->mtu - SCTP_MIN_OVERHEAD);
}
limit = mtod(a_chk->data, caddr_t);
limit += space;
nr_sack = mtod(a_chk->data, struct sctp_nr_sack_chunk *);
nr_sack->ch.chunk_type = SCTP_NR_SELECTIVE_ACK;
/* EYJ */
/* 0x01 is used by nonce for ecn */
if ((SCTP_BASE_SYSCTL(sctp_ecn_enable)) &&
(SCTP_BASE_SYSCTL(sctp_ecn_nonce)) &&
(asoc->peer_supports_ecn_nonce))
nr_sack->ch.chunk_flags = (asoc->receiver_nonce_sum & SCTP_SACK_NONCE_SUM);
else
nr_sack->ch.chunk_flags = 0;
if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
/*-
* CMT DAC algorithm: If 2 (i.e., 0x10) packets have been
* received, then set high bit to 1, else 0. Reset
* pkts_rcvd.
*/
/* EY - TODO: which chunk flag is used in here? -The LSB */
nr_sack->ch.chunk_flags |= (asoc->cmt_dac_pkts_rcvd << 6);
asoc->cmt_dac_pkts_rcvd = 0;
}
#ifdef SCTP_ASOCLOG_OF_TSNS
stcb->asoc.cumack_logsnt[stcb->asoc.cumack_log_atsnt] = asoc->cumulative_tsn;
stcb->asoc.cumack_log_atsnt++;
if (stcb->asoc.cumack_log_atsnt >= SCTP_TSN_LOG_SIZE) {
stcb->asoc.cumack_log_atsnt = 0;
}
#endif
nr_sack->nr_sack.cum_tsn_ack = htonl(asoc->cumulative_tsn);
nr_sack->nr_sack.a_rwnd = htonl(asoc->my_rwnd);
asoc->my_last_reported_rwnd = asoc->my_rwnd;
/* reset the readers interpretation */
stcb->freed_by_sorcv_sincelast = 0;
gap_descriptor = (struct sctp_gap_ack_block *)((caddr_t)nr_sack + sizeof(struct sctp_nr_sack_chunk));
if (asoc->highest_tsn_inside_map > asoc->mapping_array_base_tsn)
siz = (((asoc->highest_tsn_inside_map - asoc->mapping_array_base_tsn) + 1) + 7) / 8;
else
siz = (((MAX_TSN - asoc->mapping_array_base_tsn) + 1) + asoc->highest_tsn_inside_map + 7) / 8;
if (compare_with_wrap(asoc->mapping_array_base_tsn, asoc->cumulative_tsn, MAX_TSN)) {
offset = 1;
/*-
* cum-ack behind the mapping array, so we start and use all
* entries.
*/
jstart = 0;
} else {
offset = asoc->mapping_array_base_tsn - asoc->cumulative_tsn;
/*-
* we skip the first one when the cum-ack is at or above the
* mapping array base. Note this only works if
*/
jstart = 1;
}
if (compare_with_wrap(asoc->highest_tsn_inside_map, asoc->cumulative_tsn, MAX_TSN)) {
/* we have a gap .. maybe */
for (i = 0; i < siz; i++) {
seen_non_zero = 1;
selector = &sack_array[asoc->mapping_array[i]];
if (mergeable && selector->right_edge) {
/*
* Backup, left and right edges were ok to
* merge.
*/
num_gap_blocks--;
gap_descriptor--;
}
if (selector->num_entries == 0)
mergeable = 0;
else {
for (j = jstart; j < selector->num_entries; j++) {
if (mergeable && selector->right_edge) {
/*
* do a merge by NOT setting
* the left side
*/
mergeable = 0;
} else {
/*
* no merge, set the left
* side
*/
mergeable = 0;
gap_descriptor->start = htons((selector->gaps[j].start + offset));
}
gap_descriptor->end = htons((selector->gaps[j].end + offset));
num_gap_blocks++;
gap_descriptor++;
if (((caddr_t)gap_descriptor + sizeof(struct sctp_gap_ack_block)) > limit) {
/* no more room */
limit_reached = 1;
break;
}
}
if (selector->left_edge) {
mergeable = 1;
}
}
if (limit_reached) {
/* Reached the limit stop */
break;
}
jstart = 0;
offset += 8;
}
}
if (limit_reached == 0) {
mergeable = 0;
if (asoc->highest_tsn_inside_nr_map > asoc->nr_mapping_array_base_tsn)
siz = (((asoc->highest_tsn_inside_nr_map - asoc->nr_mapping_array_base_tsn) + 1) + 7) / 8;
else
siz = (((MAX_TSN - asoc->nr_mapping_array_base_tsn) + 1) + asoc->highest_tsn_inside_nr_map + 7) / 8;
if (compare_with_wrap(asoc->nr_mapping_array_base_tsn, asoc->cumulative_tsn, MAX_TSN)) {
offset = 1;
/*-
* cum-ack behind the mapping array, so we start and use all
* entries.
*/
jstart = 0;
} else {
offset = asoc->nr_mapping_array_base_tsn - asoc->cumulative_tsn;
/*-
* we skip the first one when the cum-ack is at or above the
* mapping array base. Note this only works if
*/
jstart = 1;
}
if (compare_with_wrap(asoc->highest_tsn_inside_nr_map, asoc->cumulative_tsn, MAX_TSN)) {
/* we have a gap .. maybe */
for (i = 0; i < siz; i++) {
nr_selector = &sack_array[asoc->nr_mapping_array[i]];
if (mergeable && nr_selector->right_edge) {
/*
* Backup, left and right edges were
* ok to merge.
*/
num_nr_gap_blocks--;
gap_descriptor--;
}
if (nr_selector->num_entries == 0)
mergeable = 0;
else {
for (j = jstart; j < nr_selector->num_entries; j++) {
if (mergeable && nr_selector->right_edge) {
/*
* do a merge by NOT
* setting the left
* side
*/
mergeable = 0;
} else {
/*
* no merge, set the
* left side
*/
mergeable = 0;
gap_descriptor->start = htons((nr_selector->gaps[j].start + offset));
}
gap_descriptor->end = htons((nr_selector->gaps[j].end + offset));
num_nr_gap_blocks++;
gap_descriptor++;
if (((caddr_t)gap_descriptor + sizeof(struct sctp_gap_ack_block)) > limit) {
/* no more room */
limit_reached = 1;
break;
}
}
if (nr_selector->left_edge) {
mergeable = 1;
}
}
if (limit_reached) {
/* Reached the limit stop */
break;
}
jstart = 0;
offset += 8;
}
}
}
if ((limit_reached == 0) && (asoc->numduptsns)) {
dup = (uint32_t *) gap_descriptor;
for (i = 0; i < asoc->numduptsns; i++) {
*dup = htonl(asoc->dup_tsns[i]);
dup++;
num_dups++;
if (((caddr_t)dup + sizeof(uint32_t)) > limit) {
/* no more room */
break;
}
}
asoc->numduptsns = 0;
}
/*
* now that the chunk is prepared queue it to the control chunk
* queue.
*/
a_chk->send_size = sizeof(struct sctp_nr_sack_chunk) +
(num_gap_blocks + num_nr_gap_blocks) * sizeof(struct sctp_gap_ack_block) +
num_dups * sizeof(int32_t);
SCTP_BUF_LEN(a_chk->data) = a_chk->send_size;
nr_sack->nr_sack.num_gap_ack_blks = htons(num_gap_blocks);
nr_sack->nr_sack.num_nr_gap_ack_blks = htons(num_nr_gap_blocks);
nr_sack->nr_sack.num_dup_tsns = htons(num_dups);
nr_sack->nr_sack.reserved = 0;
nr_sack->ch.chunk_length = htons(a_chk->send_size);
TAILQ_INSERT_TAIL(&asoc->control_send_queue, a_chk, sctp_next);
asoc->ctrl_queue_cnt++;
asoc->send_sack = 0;
SCTP_STAT_INCR(sctps_sendsacks);
@ -12576,7 +12339,7 @@ sctp_lower_sosend(struct socket *so,
panic("Error, should hold create lock and I don't?");
}
#endif
stcb = sctp_aloc_assoc(inp, addr, 1, &error, 0, vrf_id,
stcb = sctp_aloc_assoc(inp, addr, &error, 0, vrf_id,
p
);
if (stcb == NULL) {

View File

@ -155,9 +155,6 @@ void send_forward_tsn(struct sctp_tcb *, struct sctp_association *);
void sctp_send_sack(struct sctp_tcb *);
/* EY 05/07/08 if nr_sacks used, the following function will be called instead of sctp_send_sack */
void sctp_send_nr_sack(struct sctp_tcb *);
int sctp_send_hb(struct sctp_tcb *, int, struct sctp_nets *);
void sctp_send_ecn_echo(struct sctp_tcb *, struct sctp_nets *, uint32_t);

View File

@ -3960,7 +3960,7 @@ sctp_aloc_a_assoc_id(struct sctp_inpcb *inp, struct sctp_tcb *stcb)
*/
struct sctp_tcb *
sctp_aloc_assoc(struct sctp_inpcb *inp, struct sockaddr *firstaddr,
int for_a_init, int *error, uint32_t override_tag, uint32_t vrf_id,
int *error, uint32_t override_tag, uint32_t vrf_id,
struct thread *p
)
{
@ -4080,7 +4080,7 @@ sctp_aloc_assoc(struct sctp_inpcb *inp, struct sockaddr *firstaddr,
/* setup back pointer's */
stcb->sctp_ep = inp;
stcb->sctp_socket = inp->sctp_socket;
if ((err = sctp_init_asoc(inp, stcb, for_a_init, override_tag, vrf_id))) {
if ((err = sctp_init_asoc(inp, stcb, override_tag, vrf_id))) {
/* failed */
SCTP_TCB_LOCK_DESTROY(stcb);
SCTP_TCB_SEND_LOCK_DESTROY(stcb);
@ -4681,7 +4681,7 @@ sctp_free_assoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int from_inpcbfre
inp->sctp_lport, stcb->rport);
/*
* Now restop the timers to be sure - this is paranoia at is finest!
* Now restop the timers to be sure this is paranoia at is finest!
*/
(void)SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer);
(void)SCTP_OS_TIMER_STOP(&asoc->hb_timer.timer);
@ -6422,9 +6422,11 @@ sctp_drain_mbufs(struct sctp_inpcb *inp, struct sctp_tcb *stcb)
*/
struct sctp_association *asoc;
struct sctp_tmit_chunk *chk, *nchk;
uint32_t cumulative_tsn_p1, tsn;
uint32_t cumulative_tsn_p1;
struct sctp_queued_to_read *ctl, *nctl;
int cnt, strmat, gap;
int cnt, strmat;
uint32_t gap, i;
int fnd = 0;
/* We look for anything larger than the cum-ack + 1 */
@ -6445,13 +6447,7 @@ sctp_drain_mbufs(struct sctp_inpcb *inp, struct sctp_tcb *stcb)
cumulative_tsn_p1, MAX_TSN)) {
/* Yep it is above cum-ack */
cnt++;
tsn = chk->rec.data.TSN_seq;
if (tsn >= asoc->mapping_array_base_tsn) {
gap = tsn - asoc->mapping_array_base_tsn;
} else {
gap = (MAX_TSN - asoc->mapping_array_base_tsn) +
tsn + 1;
}
SCTP_CALC_TSN_TO_GAP(gap, chk->rec.data.TSN_seq, asoc->mapping_array_base_tsn);
asoc->size_on_reasm_queue = sctp_sbspace_sub(asoc->size_on_reasm_queue, chk->send_size);
sctp_ucount_decr(asoc->cnt_on_reasm_queue);
SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
@ -6473,22 +6469,11 @@ sctp_drain_mbufs(struct sctp_inpcb *inp, struct sctp_tcb *stcb)
cumulative_tsn_p1, MAX_TSN)) {
/* Yep it is above cum-ack */
cnt++;
tsn = ctl->sinfo_tsn;
if (tsn >= asoc->mapping_array_base_tsn) {
gap = tsn -
asoc->mapping_array_base_tsn;
} else {
gap = (MAX_TSN -
asoc->mapping_array_base_tsn) +
tsn + 1;
}
SCTP_CALC_TSN_TO_GAP(gap, ctl->sinfo_tsn, asoc->mapping_array_base_tsn);
asoc->size_on_all_streams = sctp_sbspace_sub(asoc->size_on_all_streams, ctl->length);
sctp_ucount_decr(asoc->cnt_on_all_streams);
SCTP_UNSET_TSN_PRESENT(asoc->mapping_array,
gap);
TAILQ_REMOVE(&asoc->strmin[strmat].inqueue,
ctl, next);
SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
TAILQ_REMOVE(&asoc->strmin[strmat].inqueue, ctl, next);
if (ctl->data) {
sctp_m_freem(ctl->data);
ctl->data = NULL;
@ -6500,69 +6485,44 @@ sctp_drain_mbufs(struct sctp_inpcb *inp, struct sctp_tcb *stcb)
ctl = nctl;
}
}
/*
* Question, should we go through the delivery queue? The only
* reason things are on here is the app not reading OR a p-d-api up.
* An attacker COULD send enough in to initiate the PD-API and then
* send a bunch of stuff to other streams... these would wind up on
* the delivery queue.. and then we would not get to them. But in
* order to do this I then have to back-track and un-deliver
* sequence numbers in streams.. el-yucko. I think for now we will
* NOT look at the delivery queue and leave it to be something to
* consider later. An alternative would be to abort the P-D-API with
* a notification and then deliver the data.... Or another method
* might be to keep track of how many times the situation occurs and
* if we see a possible attack underway just abort the association.
*/
if (cnt) {
/* We must back down to see what the new highest is */
for (i = asoc->highest_tsn_inside_map;
(compare_with_wrap(i, asoc->mapping_array_base_tsn, MAX_TSN) || (i == asoc->mapping_array_base_tsn));
i--) {
SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn);
if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
asoc->highest_tsn_inside_map = i;
fnd = 1;
break;
}
}
if (!fnd) {
asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1;
}
/*
* Question, should we go through the delivery queue? The
* only reason things are on here is the app not reading OR
* a p-d-api up. An attacker COULD send enough in to
* initiate the PD-API and then send a bunch of stuff to
* other streams... these would wind up on the delivery
* queue.. and then we would not get to them. But in order
* to do this I then have to back-track and un-deliver
* sequence numbers in streams.. el-yucko. I think for now
* we will NOT look at the delivery queue and leave it to be
* something to consider later. An alternative would be to
* abort the P-D-API with a notification and then deliver
* the data.... Or another method might be to keep track of
* how many times the situation occurs and if we see a
* possible attack underway just abort the association.
*/
#ifdef SCTP_DEBUG
if (cnt) {
SCTPDBG(SCTP_DEBUG_PCB1, "Freed %d chunks from reneg harvest\n", cnt);
}
#endif
if (cnt) {
/*
* Now do we need to find a new
* asoc->highest_tsn_inside_map?
*/
if (asoc->highest_tsn_inside_map >= asoc->mapping_array_base_tsn) {
gap = asoc->highest_tsn_inside_map - asoc->mapping_array_base_tsn;
} else {
gap = (MAX_TSN - asoc->mapping_array_base_tsn) +
asoc->highest_tsn_inside_map + 1;
}
if (gap >= (asoc->mapping_array_size << 3)) {
/*
* Something bad happened or cum-ack and high were
* behind the base, but if so earlier checks should
* have found NO data... wierd... we will start at
* end of mapping array.
*/
SCTP_PRINTF("Gap was larger than array?? %d set to max:%d maparraymax:%x\n",
(int)gap,
(int)(asoc->mapping_array_size << 3),
(int)asoc->highest_tsn_inside_map);
gap = asoc->mapping_array_size << 3;
}
while (gap > 0) {
if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
/* found the new highest */
asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn + gap;
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
sctp_log_map(0, 8, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
}
break;
}
gap--;
}
if (gap == 0) {
/* Nothing left in map */
memset(asoc->mapping_array, 0, asoc->mapping_array_size);
asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
sctp_log_map(0, 9, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
}
}
asoc->last_revoke_count = cnt;
(void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
/* sa_ignore NO_NULL_CHK */

View File

@ -564,7 +564,7 @@ void sctp_inpcb_free(struct sctp_inpcb *, int, int);
struct sctp_tcb *
sctp_aloc_assoc(struct sctp_inpcb *, struct sockaddr *,
int, int *, uint32_t, uint32_t, struct thread *);
int *, uint32_t, uint32_t, struct thread *);
int sctp_free_assoc(struct sctp_inpcb *, struct sctp_tcb *, int, int);

View File

@ -477,7 +477,6 @@ struct sctp_asconf_addr {
struct sctp_ifa *ifa; /* save the ifa for add/del ip */
uint8_t sent; /* has this been sent yet? */
uint8_t special_del; /* not to be used in lookup */
};
struct sctp_scoping {
@ -771,9 +770,7 @@ struct sctp_association {
/* EY - new NR variables used for nr_sack based on mapping_array */
uint8_t *nr_mapping_array;
uint32_t nr_mapping_array_base_tsn;
uint32_t highest_tsn_inside_nr_map;
uint16_t nr_mapping_array_size;
uint32_t last_echo_tsn;
uint32_t last_cwr_tsn;

View File

@ -1492,7 +1492,7 @@ sctp_do_connect_x(struct socket *so, struct sctp_inpcb *inp, void *optval,
/* We are GOOD to go */
stcb = sctp_aloc_assoc(inp, sa, 1, &error, 0, vrf_id,
stcb = sctp_aloc_assoc(inp, sa, &error, 0, vrf_id,
(struct thread *)p
);
if (stcb == NULL) {
@ -4459,7 +4459,7 @@ sctp_connect(struct socket *so, struct sockaddr *addr, struct thread *p)
}
vrf_id = inp->def_vrf_id;
/* We are GOOD to go */
stcb = sctp_aloc_assoc(inp, addr, 1, &error, 0, vrf_id, p);
stcb = sctp_aloc_assoc(inp, addr, &error, 0, vrf_id, p);
if (stcb == NULL) {
/* Gak! no memory */
goto out_now;

View File

@ -108,7 +108,7 @@ extern struct pr_usrreqs sctp_usrreqs;
sctp_auth_key_release((_stcb), (_chk)->auth_keyid); \
(_chk)->holds_key_ref = 0; \
} \
if(_stcb) { \
if (_stcb) { \
SCTP_TCB_LOCK_ASSERT((_stcb)); \
if ((_chk)->whoTo) { \
sctp_free_remote_addr((_chk)->whoTo); \
@ -231,7 +231,7 @@ extern struct pr_usrreqs sctp_usrreqs;
#ifdef SCTP_FS_SPEC_LOG
#define sctp_total_flight_decrease(stcb, tp1) do { \
if(stcb->asoc.fs_index > SCTP_FS_SPEC_LOG_SIZE) \
if (stcb->asoc.fs_index > SCTP_FS_SPEC_LOG_SIZE) \
stcb->asoc.fs_index = 0;\
stcb->asoc.fslog[stcb->asoc.fs_index].total_flight = stcb->asoc.total_flight; \
stcb->asoc.fslog[stcb->asoc.fs_index].tsn = tp1->rec.data.TSN_seq; \
@ -252,7 +252,7 @@ extern struct pr_usrreqs sctp_usrreqs;
} while (0)
#define sctp_total_flight_increase(stcb, tp1) do { \
if(stcb->asoc.fs_index > SCTP_FS_SPEC_LOG_SIZE) \
if (stcb->asoc.fs_index > SCTP_FS_SPEC_LOG_SIZE) \
stcb->asoc.fs_index = 0;\
stcb->asoc.fslog[stcb->asoc.fs_index].total_flight = stcb->asoc.total_flight; \
stcb->asoc.fslog[stcb->asoc.fs_index].tsn = tp1->rec.data.TSN_seq; \

View File

@ -52,9 +52,15 @@ __FBSDID("$FreeBSD$");
#define NUMBER_OF_MTU_SIZES 18
#if defined(__Windows__) && !defined(SCTP_LOCAL_TRACE_BUF)
#include "eventrace_netinet.h"
#include "sctputil.tmh" /* this is the file that will be auto
* generated */
#else
#ifndef KTR_SCTP
#define KTR_SCTP KTR_SUBSYS
#endif
#endif
void
sctp_sblog(struct sockbuf *sb,
@ -869,7 +875,7 @@ sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int sa
int
sctp_init_asoc(struct sctp_inpcb *m, struct sctp_tcb *stcb,
int for_a_init, uint32_t override_tag, uint32_t vrf_id)
uint32_t override_tag, uint32_t vrf_id)
{
struct sctp_association *asoc;
@ -1132,9 +1138,7 @@ sctp_init_asoc(struct sctp_inpcb *m, struct sctp_tcb *stcb,
return (ENOMEM);
}
memset(asoc->mapping_array, 0, asoc->mapping_array_size);
/* EY - initialize the nr_mapping_array just like mapping array */
asoc->nr_mapping_array_size = SCTP_INITIAL_NR_MAPPING_ARRAY;
SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->nr_mapping_array_size,
SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->mapping_array_size,
SCTP_M_MAP);
if (asoc->nr_mapping_array == NULL) {
SCTP_FREE(asoc->strmout, SCTP_M_STRMO);
@ -1142,7 +1146,7 @@ sctp_init_asoc(struct sctp_inpcb *m, struct sctp_tcb *stcb,
SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM);
return (ENOMEM);
}
memset(asoc->nr_mapping_array, 0, asoc->nr_mapping_array_size);
memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size);
/* Now the init of the other outqueues */
TAILQ_INIT(&asoc->free_chunks);
@ -1207,12 +1211,12 @@ sctp_print_mapping_array(struct sctp_association *asoc)
}
printf("\n");
printf("NR Mapping size:%d baseTSN:%8.8x highestTSN:%8.8x\n",
asoc->nr_mapping_array_size,
asoc->nr_mapping_array_base_tsn,
asoc->mapping_array_size,
asoc->mapping_array_base_tsn,
asoc->highest_tsn_inside_nr_map
);
limit = asoc->nr_mapping_array_size;
for (i = asoc->nr_mapping_array_size; i >= 0; i--) {
limit = asoc->mapping_array_size;
for (i = asoc->mapping_array_size; i >= 0; i--) {
if (asoc->nr_mapping_array[i]) {
limit = i;
break;
@ -1233,37 +1237,34 @@ int
sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed)
{
/* mapping array needs to grow */
uint8_t *new_array;
uint8_t *new_array1, *new_array2;
uint32_t new_size;
new_size = asoc->mapping_array_size + ((needed + 7) / 8 + SCTP_MAPPING_ARRAY_INCR);
SCTP_MALLOC(new_array, uint8_t *, new_size, SCTP_M_MAP);
if (new_array == NULL) {
SCTP_MALLOC(new_array1, uint8_t *, new_size, SCTP_M_MAP);
SCTP_MALLOC(new_array2, uint8_t *, new_size, SCTP_M_MAP);
if ((new_array1 == NULL) || (new_array2 == NULL)) {
/* can't get more, forget it */
SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n",
new_size);
SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n", new_size);
if (new_array1) {
SCTP_FREE(new_array1, SCTP_M_MAP);
}
if (new_array2) {
SCTP_FREE(new_array2, SCTP_M_MAP);
}
return (-1);
}
memset(new_array, 0, new_size);
memcpy(new_array, asoc->mapping_array, asoc->mapping_array_size);
memset(new_array1, 0, new_size);
memset(new_array2, 0, new_size);
memcpy(new_array1, asoc->mapping_array, asoc->mapping_array_size);
memcpy(new_array2, asoc->nr_mapping_array, asoc->mapping_array_size);
SCTP_FREE(asoc->mapping_array, SCTP_M_MAP);
asoc->mapping_array = new_array;
asoc->mapping_array_size = new_size;
new_size = asoc->nr_mapping_array_size + ((needed + 7) / 8 + SCTP_NR_MAPPING_ARRAY_INCR);
SCTP_MALLOC(new_array, uint8_t *, new_size, SCTP_M_MAP);
if (new_array == NULL) {
/* can't get more, forget it */
SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n",
new_size);
return (-1);
}
memset(new_array, 0, new_size);
memcpy(new_array, asoc->nr_mapping_array, asoc->nr_mapping_array_size);
SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP);
asoc->nr_mapping_array = new_array;
asoc->nr_mapping_array_size = new_size;
asoc->mapping_array = new_array1;
asoc->nr_mapping_array = new_array2;
asoc->mapping_array_size = new_size;
return (0);
}
@ -1684,21 +1685,9 @@ sctp_timeout_handler(void *t)
if ((stcb == NULL) || (inp == NULL)) {
break;
} {
int abort_flag;
SCTP_STAT_INCR(sctps_timosack);
stcb->asoc.timosack++;
if (stcb->asoc.cumulative_tsn != stcb->asoc.highest_tsn_inside_map)
sctp_sack_check(stcb, 0, 0, &abort_flag);
/*
* EY if nr_sacks used then send an nr-sack , a sack
* otherwise
*/
if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && stcb->asoc.peer_supports_nr_sack)
sctp_send_nr_sack(stcb);
else
sctp_send_sack(stcb);
sctp_send_sack(stcb);
}
#ifdef SCTP_AUDITING_ENABLED
sctp_auditing(4, inp, stcb, net);
@ -4758,8 +4747,7 @@ sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1,
*/
if ((tp1) &&
(tp1->rec.data.stream_number == stream) &&
(tp1->rec.data.stream_seq == seq)
) {
(tp1->rec.data.stream_seq == seq)) {
/*
* save to chk in case we have some on stream out
* queue. If so and we have an un-transmitted one we
@ -5102,14 +5090,7 @@ sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t * freed_so_far, int hold_rlock,
goto out;
}
SCTP_STAT_INCR(sctps_wu_sacks_sent);
/*
* EY if nr_sacks used then send an nr-sack , a sack
* otherwise
*/
if (SCTP_BASE_SYSCTL(sctp_nr_sack_on_off) && stcb->asoc.peer_supports_nr_sack)
sctp_send_nr_sack(stcb);
else
sctp_send_sack(stcb);
sctp_send_sack(stcb);
sctp_chunk_output(stcb->sctp_ep, stcb,
SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED);
@ -5479,8 +5460,7 @@ sctp_sorecvmsg(struct socket *so,
((ctl->some_taken) ||
((ctl->do_not_ref_stcb == 0) &&
((ctl->spec_flags & M_NOTIFICATION) == 0) &&
(ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))
) {
(ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))) {
/*-
* If we have the same tcb, and there is data present, and we
* have the strm interleave feature present. Then if we have
@ -5939,8 +5919,7 @@ sctp_sorecvmsg(struct socket *so,
hold_sblock = 1;
}
if ((copied_so_far) && (control->length == 0) &&
(sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))
) {
(sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))) {
goto release;
}
if (so->so_rcv.sb_cc <= control->held_length) {

View File

@ -81,7 +81,7 @@ uint32_t sctp_select_initial_TSN(struct sctp_pcb *);
uint32_t sctp_select_a_tag(struct sctp_inpcb *, uint16_t lport, uint16_t rport, int);
int sctp_init_asoc(struct sctp_inpcb *, struct sctp_tcb *, int, uint32_t, uint32_t);
int sctp_init_asoc(struct sctp_inpcb *, struct sctp_tcb *, uint32_t, uint32_t);
void sctp_fill_random_store(struct sctp_pcb *);

View File

@ -1033,7 +1033,7 @@ sctp6_connect(struct socket *so, struct sockaddr *addr, struct thread *p)
return (EALREADY);
}
/* We are GOOD to go */
stcb = sctp_aloc_assoc(inp, addr, 1, &error, 0, vrf_id, p);
stcb = sctp_aloc_assoc(inp, addr, &error, 0, vrf_id, p);
SCTP_ASOC_CREATE_UNLOCK(inp);
if (stcb == NULL) {
/* Gak! no memory */