- Found bug in min split point bundling which caused

incorrect, non-bundlable fragmentation.
- Added min residual to better control split points for
  both how big a msg must be as well as how much needs
  to be left over.
- With our new algo in place, we need to implicitly
  set "end of msg" on the sp-> structure otherwise we
  end up with "hung" associations.
- Room reserved up front in IP header by pushing IP
  header to back of mbuf.
- Fix so FR's peg count of retransmissions needed.
- Fix so an unlucky chunk that never gets across
  will kill the assoc via the kill timer and send an
  abort too.
- Fix bug in sctp_input which can result in a crash.
- Do not strip off IP options anymore.
- Clean up sctp_calculate_rto().
- Get rid of unused sysctl.
- Fixed so we discard all M-Cast
- Fixed so port check done AFTER checksum
- Fixed bug in fragmentation code that prevented
  us from fragmenting a small complete message when
  we needed to.
- Window probes were not marked back to unsent and
  flight adjusted when a sack came in with no
  window change or accepting of the probe data.
  We now fix this with having a mark on the net and
  the chunk so we can clear it out when the sack arrives
  forcing it to retran just like it was "new" this
  improves the handling of window probes, which were
  dropped by the receiver.
- Tighten AUTH protocol error checks during INIT/INIT-ACK exchange
This commit is contained in:
Randall Stewart 2007-03-31 11:47:30 +00:00
parent d5750df2ce
commit 5e54f665f0
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=168124
15 changed files with 534 additions and 372 deletions

View File

@ -1413,7 +1413,7 @@ sctp_auth_get_cookie_params(struct sctp_tcb *stcb, struct mbuf *m,
struct sctp_paramhdr *phdr, tmp_param;
uint16_t plen, ptype;
uint8_t random_store[SCTP_PARAM_BUFFER_SIZE];
struct sctp_auth_random *random = NULL;
struct sctp_auth_random *p_random = NULL;
uint16_t random_len = 0;
uint8_t hmacs_store[SCTP_PARAM_BUFFER_SIZE];
struct sctp_auth_hmac_algo *hmacs = NULL;
@ -1444,8 +1444,8 @@ sctp_auth_get_cookie_params(struct sctp_tcb *stcb, struct mbuf *m,
if (phdr == NULL)
return;
/* save the random and length for the key */
random = (struct sctp_auth_random *)phdr;
random_len = plen - sizeof(*random);
p_random = (struct sctp_auth_random *)phdr;
random_len = plen - sizeof(*p_random);
} else if (ptype == SCTP_HMAC_LIST) {
int num_hmacs;
int i;
@ -1503,18 +1503,18 @@ sctp_auth_get_cookie_params(struct sctp_tcb *stcb, struct mbuf *m,
new_key = sctp_alloc_key(keylen);
if (new_key != NULL) {
/* copy in the RANDOM */
if (random != NULL)
bcopy(random->random_data, new_key->key, random_len);
if (p_random != NULL)
bcopy(p_random->random_data, new_key->key, random_len);
}
#else
keylen = sizeof(*random) + random_len + sizeof(*chunks) + num_chunks +
keylen = sizeof(*p_random) + random_len + sizeof(*chunks) + num_chunks +
sizeof(*hmacs) + hmacs_len;
new_key = sctp_alloc_key(keylen);
if (new_key != NULL) {
/* copy in the RANDOM */
if (random != NULL) {
keylen = sizeof(*random) + random_len;
bcopy(random, new_key->key, keylen);
if (p_random != NULL) {
keylen = sizeof(*p_random) + random_len;
bcopy(p_random, new_key->key, keylen);
}
/* append in the AUTH chunks */
if (chunks != NULL) {
@ -1829,7 +1829,7 @@ sctp_validate_init_auth_params(struct mbuf *m, int offset, int limit)
uint16_t ptype, plen;
int peer_supports_asconf = 0;
int peer_supports_auth = 0;
int got_random = 0, got_hmacs = 0;
int got_random = 0, got_hmacs = 0, got_chklist = 0;
/* go through each of the params. */
phdr = sctp_get_next_param(m, offset, &parm_buf, sizeof(parm_buf));
@ -1904,6 +1904,10 @@ sctp_validate_init_auth_params(struct mbuf *m, int offset, int limit)
return (-1);
}
got_hmacs = 1;
} else if (ptype == SCTP_CHUNK_LIST) {
/* did the peer send a non-empty chunk list? */
if (plen > 0)
got_chklist = 1;
}
offset += SCTP_SIZE32(plen);
if (offset >= limit) {
@ -1918,6 +1922,13 @@ sctp_validate_init_auth_params(struct mbuf *m, int offset, int limit)
} else {
peer_supports_auth = 0;
}
if (!peer_supports_auth && got_chklist) {
#ifdef SCTP_DEBUG
if (sctp_debug_on & SCTP_DEBUG_AUTH1)
printf("SCTP: peer sent chunk list w/o AUTH\n");
#endif
return (-1);
}
if (!sctp_asconf_auth_nochk && peer_supports_asconf &&
!peer_supports_auth) {
#ifdef SCTP_DEBUG

View File

@ -440,9 +440,6 @@ __FBSDID("$FreeBSD$");
#define SCTP_STICKY_OPTIONS_MASK 0x0c
/* Chunk flags */
#define SCTP_WINDOW_PROBE 0x01
/*
* SCTP states for internal state machine XXX (should match "user" values)
*/
@ -787,7 +784,7 @@ __FBSDID("$FreeBSD$");
* want to take will fill up a full MTU (assuming
* a 1500 byte MTU).
*/
#define SCTP_DEFAULT_SPLIT_POINT_MIN 1452
#define SCTP_DEFAULT_SPLIT_POINT_MIN 2904
/* ABORT CODES and other tell-tale location
* codes are generated by adding the below
@ -911,6 +908,9 @@ __FBSDID("$FreeBSD$");
#define SCTP_UNSET_TSN_PRESENT(arry, gap) (arry[(gap >> 3)] &= ((~(0x01 << ((gap & 0x07)))) & 0xff))
#define SCTP_RETRAN_DONE -1
#define SCTP_RETRAN_EXIT -2
/*
* This value defines the number of vtag block time wait entry's per list
* element. Each entry will take 2 4 byte ints (and of course the overhead

View File

@ -3201,7 +3201,9 @@ sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
if (stcb->asoc.peer_supports_prsctp) {
if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
/* Is it expired? */
if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
if (
(timevalcmp(&now, &tp1->rec.data.timetodrop, >))
) {
/* Yes so drop it */
if (tp1->data != NULL) {
sctp_release_pr_sctp_chunk(stcb, tp1,
@ -3285,7 +3287,9 @@ sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
tp1->sent,
SCTP_FR_LOG_STRIKE_CHUNK);
#endif
tp1->sent++;
if (tp1->sent < SCTP_DATAGRAM_RESEND) {
tp1->sent++;
}
if (sctp_cmt_on_off && sctp_cmt_use_dac) {
/*
* CMT DAC algorithm: If SACK flag is set to
@ -3296,7 +3300,11 @@ sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
* not between two sacked TSNs, then mark by
* one more.
*/
if ((tp1->sent != SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
/*
* Jana FIX, does this mean you strike it
* twice (see code above?)
*/
if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
compare_with_wrap(this_sack_lowest_newack, tp1->rec.data.TSN_seq, MAX_TSN)) {
#ifdef SCTP_FR_LOGGING
sctp_log_fr(16 + num_dests_sacked,
@ -3314,6 +3322,7 @@ sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
* biggest_newly_acked must be higher than the
* sending_seq at the time we did the FR.
*/
if (
#ifdef SCTP_FR_TO_ALTERNATE
/*
* If FR's go to new networks, then we must only do
@ -3321,11 +3330,12 @@ sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
* go to the same network (Armando's work) then its
* ok to FR multiple times.
*/
if (asoc->numnets < 2)
(asoc->numnets < 2)
#else
if (1)
(1)
#endif
{
) {
if ((compare_with_wrap(biggest_tsn_newly_acked,
tp1->rec.data.fast_retran_tsn, MAX_TSN)) ||
(biggest_tsn_newly_acked ==
@ -3341,7 +3351,9 @@ sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
tp1->sent,
SCTP_FR_LOG_STRIKE_CHUNK);
#endif
tp1->sent++;
if (tp1->sent < SCTP_DATAGRAM_RESEND) {
tp1->sent++;
}
strike_flag = 1;
if (sctp_cmt_on_off && sctp_cmt_use_dac) {
/*
@ -3357,15 +3369,20 @@ sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
* sacked TSNs, then mark by
* one more.
*/
if ((tp1->sent != SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
compare_with_wrap(this_sack_lowest_newack, tp1->rec.data.TSN_seq, MAX_TSN)) {
if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
(num_dests_sacked == 1) &&
compare_with_wrap(this_sack_lowest_newack,
tp1->rec.data.TSN_seq, MAX_TSN)) {
#ifdef SCTP_FR_LOGGING
sctp_log_fr(32 + num_dests_sacked,
tp1->rec.data.TSN_seq,
tp1->sent,
SCTP_FR_LOG_STRIKE_CHUNK);
#endif
tp1->sent++;
if (tp1->sent < SCTP_DATAGRAM_RESEND) {
tp1->sent++;
}
}
}
}
@ -3390,7 +3407,9 @@ sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
tp1->sent,
SCTP_FR_LOG_STRIKE_CHUNK);
#endif
tp1->sent++;
if (tp1->sent < SCTP_DATAGRAM_RESEND) {
tp1->sent++;
}
if (sctp_cmt_on_off && sctp_cmt_use_dac) {
/*
* CMT DAC algorithm: If SACK flag is set to
@ -3401,7 +3420,7 @@ sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
* not between two sacked TSNs, then mark by
* one more.
*/
if ((tp1->sent != SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
compare_with_wrap(this_sack_lowest_newack, tp1->rec.data.TSN_seq, MAX_TSN)) {
#ifdef SCTP_FR_LOGGING
sctp_log_fr(48 + num_dests_sacked,
@ -3426,8 +3445,7 @@ sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
/* This is a subsequent FR */
SCTP_STAT_INCR(sctps_sendmultfastretrans);
}
sctp_ucount_incr(asoc->sent_queue_retran_cnt);
sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
if (sctp_cmt_on_off) {
/*
* CMT: Using RTX_SSTHRESH policy for CMT.
@ -4061,10 +4079,16 @@ sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
struct sctp_nets *net;
struct sctp_association *asoc;
struct sctp_tmit_chunk *tp1, *tp2;
uint32_t old_rwnd;
int win_probe_recovery = 0;
int j;
SCTP_TCB_LOCK_ASSERT(stcb);
asoc = &stcb->asoc;
if (compare_with_wrap(asoc->last_acked_seq, cumack, MAX_TSN)) {
/* old ack */
return;
}
/* First setup for CC stuff */
TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
net->prev_cwnd = net->cwnd;
@ -4116,118 +4140,126 @@ sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
#endif
}
}
old_rwnd = asoc->peers_rwnd;
asoc->this_sack_highest_gap = cumack;
stcb->asoc.overall_error_count = 0;
/* process the new consecutive TSN first */
tp1 = TAILQ_FIRST(&asoc->sent_queue);
while (tp1) {
tp2 = TAILQ_NEXT(tp1, sctp_next);
if (compare_with_wrap(cumack, tp1->rec.data.TSN_seq,
MAX_TSN) ||
cumack == tp1->rec.data.TSN_seq) {
if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
/*
* ECN Nonce: Add the nonce to the sender's
* nonce sum
*/
asoc->nonce_sum_expect_base += tp1->rec.data.ect_nonce;
if (tp1->sent < SCTP_DATAGRAM_ACKED) {
if (compare_with_wrap(cumack, asoc->last_acked_seq, MAX_TSN)) {
/* process the new consecutive TSN first */
tp1 = TAILQ_FIRST(&asoc->sent_queue);
while (tp1) {
tp2 = TAILQ_NEXT(tp1, sctp_next);
if (compare_with_wrap(cumack, tp1->rec.data.TSN_seq,
MAX_TSN) ||
cumack == tp1->rec.data.TSN_seq) {
if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
/*
* If it is less than ACKED, it is
* now no-longer in flight. Higher
* values may occur during marking
* ECN Nonce: Add the nonce to the
* sender's nonce sum
*/
asoc->nonce_sum_expect_base += tp1->rec.data.ect_nonce;
if (tp1->sent < SCTP_DATAGRAM_ACKED) {
/*
* If it is less than ACKED,
* it is now no-longer in
* flight. Higher values may
* occur during marking
*/
#ifdef SCTP_FLIGHT_LOGGING
sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN,
tp1->whoTo->flight_size,
tp1->book_size,
(uintptr_t) stcb,
tp1->rec.data.TSN_seq);
sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN,
tp1->whoTo->flight_size,
tp1->book_size,
(uintptr_t) stcb,
tp1->rec.data.TSN_seq);
#endif
if (tp1->whoTo->flight_size >= tp1->book_size) {
tp1->whoTo->flight_size -= tp1->book_size;
} else {
tp1->whoTo->flight_size = 0;
}
if (asoc->total_flight >= tp1->book_size) {
asoc->total_flight -= tp1->book_size;
if (asoc->total_flight_count > 0)
asoc->total_flight_count--;
} else {
asoc->total_flight = 0;
asoc->total_flight_count = 0;
}
tp1->whoTo->net_ack += tp1->send_size;
if (tp1->snd_count < 2) {
/*
* True non-retransmited
* chunk
*/
tp1->whoTo->net_ack2 +=
tp1->send_size;
/* update RTO too? */
if (tp1->do_rtt) {
tp1->whoTo->RTO =
sctp_calculate_rto(stcb,
asoc, tp1->whoTo,
&tp1->sent_rcv_time);
tp1->do_rtt = 0;
if (tp1->whoTo->flight_size >= tp1->book_size) {
tp1->whoTo->flight_size -= tp1->book_size;
} else {
tp1->whoTo->flight_size = 0;
}
}
/*
* CMT: CUCv2 algorithm. From the
* cumack'd TSNs, for each TSN being
* acked for the first time, set the
* following variables for the
* corresp destination.
* new_pseudo_cumack will trigger a
* cwnd update.
* find_(rtx_)pseudo_cumack will
* trigger search for the next
* expected (rtx-)pseudo-cumack.
*/
tp1->whoTo->new_pseudo_cumack = 1;
tp1->whoTo->find_pseudo_cumack = 1;
tp1->whoTo->find_rtx_pseudo_cumack = 1;
if (asoc->total_flight >= tp1->book_size) {
asoc->total_flight -= tp1->book_size;
if (asoc->total_flight_count > 0)
asoc->total_flight_count--;
} else {
asoc->total_flight = 0;
asoc->total_flight_count = 0;
}
tp1->whoTo->net_ack += tp1->send_size;
if (tp1->snd_count < 2) {
/*
* True
* non-retransmited
* chunk
*/
tp1->whoTo->net_ack2 +=
tp1->send_size;
/* update RTO too? */
if (tp1->do_rtt) {
tp1->whoTo->RTO =
sctp_calculate_rto(stcb,
asoc, tp1->whoTo,
&tp1->sent_rcv_time);
tp1->do_rtt = 0;
}
}
/*
* CMT: CUCv2 algorithm.
* From the cumack'd TSNs,
* for each TSN being acked
* for the first time, set
* the following variables
* for the corresp
* destination.
* new_pseudo_cumack will
* trigger a cwnd update.
* find_(rtx_)pseudo_cumack
* will trigger search for
* the next expected
* (rtx-)pseudo-cumack.
*/
tp1->whoTo->new_pseudo_cumack = 1;
tp1->whoTo->find_pseudo_cumack = 1;
tp1->whoTo->find_rtx_pseudo_cumack = 1;
#ifdef SCTP_CWND_LOGGING
sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
#endif
}
if (tp1->sent == SCTP_DATAGRAM_RESEND) {
sctp_ucount_decr(asoc->sent_queue_retran_cnt);
}
if (tp1->rec.data.chunk_was_revoked) {
/* deflate the cwnd */
tp1->whoTo->cwnd -= tp1->book_size;
tp1->rec.data.chunk_was_revoked = 0;
}
tp1->sent = SCTP_DATAGRAM_ACKED;
}
if (tp1->sent == SCTP_DATAGRAM_RESEND) {
sctp_ucount_decr(asoc->sent_queue_retran_cnt);
}
if (tp1->rec.data.chunk_was_revoked) {
/* deflate the cwnd */
tp1->whoTo->cwnd -= tp1->book_size;
tp1->rec.data.chunk_was_revoked = 0;
}
tp1->sent = SCTP_DATAGRAM_ACKED;
} else {
break;
}
TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
if (tp1->data) {
sctp_free_bufspace(stcb, asoc, tp1, 1);
sctp_m_freem(tp1->data);
}
} else {
break;
}
TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
if (tp1->data) {
sctp_free_bufspace(stcb, asoc, tp1, 1);
sctp_m_freem(tp1->data);
}
#ifdef SCTP_SACK_LOGGING
sctp_log_sack(asoc->last_acked_seq,
cumack,
tp1->rec.data.TSN_seq,
0,
0,
SCTP_LOG_FREE_SENT);
sctp_log_sack(asoc->last_acked_seq,
cumack,
tp1->rec.data.TSN_seq,
0,
0,
SCTP_LOG_FREE_SENT);
#endif
tp1->data = NULL;
asoc->sent_queue_cnt--;
sctp_free_remote_addr(tp1->whoTo);
sctp_free_a_chunk(stcb, tp1);
tp1 = tp2;
tp1->data = NULL;
asoc->sent_queue_cnt--;
sctp_free_remote_addr(tp1->whoTo);
sctp_free_a_chunk(stcb, tp1);
tp1 = tp2;
}
}
if (stcb->sctp_socket) {
SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
@ -4241,9 +4273,12 @@ sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
#endif
}
if (asoc->last_acked_seq != cumack)
sctp_cwnd_update(stcb, asoc, 1, 0, 0);
asoc->last_acked_seq = cumack;
if (TAILQ_EMPTY(&asoc->sent_queue)) {
/* nothing left in-flight */
TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
@ -4312,10 +4347,35 @@ sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
/* SWS sender side engages */
asoc->peers_rwnd = 0;
}
if (asoc->peers_rwnd > old_rwnd) {
win_probe_recovery = 1;
}
/* Now assure a timer where data is queued at */
again:
j = 0;
TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
if (win_probe_recovery && (net->window_probe)) {
net->window_probe = 0;
/*
* Find first chunk that was used with window probe
* and clear the sent
*/
TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
if (tp1->window_probe) {
/* move back to data send queue */
tp1->sent = SCTP_DATAGRAM_UNSENT;
tp1->window_probe = 0;
net->flight_size -= tp1->book_size;
asoc->total_flight -= tp1->book_size;
TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
TAILQ_INSERT_HEAD(&asoc->send_queue, tp1, sctp_next);
asoc->sent_queue_cnt--;
asoc->send_queue_cnt++;
asoc->total_flight_count--;
break;
}
}
}
if (net->flight_size) {
int to_ticks;
@ -4474,7 +4534,8 @@ sctp_handle_sack(struct sctp_sack_chunk *ch, struct sctp_tcb *stcb,
long j;
int accum_moved = 0;
int will_exit_fast_recovery = 0;
uint32_t a_rwnd;
uint32_t a_rwnd, old_rwnd;
int win_probe_recovery = 0;
struct sctp_nets *net = NULL;
int nonce_sum_flag, ecn_seg_sums = 0;
uint8_t reneged_all = 0;
@ -4526,7 +4587,7 @@ sctp_handle_sack(struct sctp_sack_chunk *ch, struct sctp_tcb *stcb,
cmt_dac_flag = ch->ch.chunk_flags & SCTP_SACK_CMT_DAC;
num_dup = ntohs(sack->num_dup_tsns);
old_rwnd = stcb->asoc.peers_rwnd;
stcb->asoc.overall_error_count = 0;
asoc = &stcb->asoc;
#ifdef SCTP_SACK_LOGGING
@ -5323,6 +5384,9 @@ sctp_handle_sack(struct sctp_sack_chunk *ch, struct sctp_tcb *stcb,
/* SWS sender side engages */
asoc->peers_rwnd = 0;
}
if (asoc->peers_rwnd > old_rwnd) {
win_probe_recovery = 1;
}
/*
* Now we must setup so we have a timer up for anyone with
* outstanding data.
@ -5330,6 +5394,29 @@ sctp_handle_sack(struct sctp_sack_chunk *ch, struct sctp_tcb *stcb,
again:
j = 0;
TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
if (win_probe_recovery && (net->window_probe)) {
net->window_probe = 0;
/*-
* Find first chunk that was used with
* window probe and clear the event. Put
* it back into the send queue as if has
* not been sent.
*/
TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
if (tp1->window_probe) {
tp1->sent = SCTP_DATAGRAM_UNSENT;
tp1->window_probe = 0;
net->flight_size -= tp1->book_size;
asoc->total_flight -= tp1->book_size;
TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
TAILQ_INSERT_HEAD(&asoc->send_queue, tp1, sctp_next);
asoc->sent_queue_cnt--;
asoc->send_queue_cnt++;
asoc->total_flight_count--;
break;
}
}
}
if (net->flight_size) {
j++;
sctp_timer_start(SCTP_TIMER_TYPE_SEND,

View File

@ -1313,7 +1313,7 @@ sctp_process_cookie_existing(struct mbuf *m, int iphlen, int offset,
TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
if (chk->sent < SCTP_DATAGRAM_RESEND) {
chk->sent = SCTP_DATAGRAM_RESEND;
stcb->asoc.sent_queue_retran_cnt++;
sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
spec_flag++;
}
}
@ -4702,9 +4702,6 @@ sctp_input(i_pak, off)
SCTP_STAT_INCR(sctps_recvpackets);
SCTP_STAT_INCR_COUNTER64(sctps_inpackets);
/*
* Strip IP options, we don't allow any in or out.
*/
#ifdef SCTP_MBUF_LOGGING
/* Log in any input mbufs */
mat = m;
@ -4715,10 +4712,7 @@ sctp_input(i_pak, off)
mat = SCTP_BUF_NEXT(mat);
}
#endif
if ((size_t)iphlen > sizeof(struct ip)) {
ip_stripoptions(m, (struct mbuf *)0);
iphlen = sizeof(struct ip);
}
/*
* Get IP, SCTP, and first chunk header together in first mbuf.
*/
@ -4738,23 +4732,15 @@ sctp_input(i_pak, off)
if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr))) {
goto bad;
}
if (((ch->chunk_type == SCTP_INITIATION) ||
(ch->chunk_type == SCTP_INITIATION_ACK) ||
(ch->chunk_type == SCTP_COOKIE_ECHO)) &&
(SCTP_IS_IT_BROADCAST(ip->ip_dst, i_pak))) {
if (SCTP_IS_IT_BROADCAST(ip->ip_dst, m)) {
/*
* We only look at broadcast if its a front state, All
* others we will not have a tcb for anyway.
*/
goto bad;
}
/* destination port of 0 is illegal, based on RFC2960. */
if (sh->dest_port == 0) {
SCTP_STAT_INCR(sctps_hdrops);
goto bad;
}
/* validate SCTP checksum */
if ((sctp_no_csum_on_loopback == 0) || !SCTP_IS_IT_LOOPBACK(i_pak)) {
if ((sctp_no_csum_on_loopback == 0) || !SCTP_IS_IT_LOOPBACK(m)) {
/*
* we do NOT validate things from the loopback if the sysctl
* is set to 1.
@ -4795,7 +4781,13 @@ sctp_input(i_pak, off)
sh->checksum = calc_check;
} else {
sctp_skip_csum_4:
mlen = SCTP_HEADER_LEN(i_pak);
mlen = SCTP_HEADER_LEN(m);
}
/* destination port of 0 is illegal, based on RFC2960. */
if (sh->dest_port == 0) {
SCTP_STAT_INCR(sctps_hdrops);
goto bad;
}
/* validate mbuf chain length with IP payload length */
if (mlen < (ip->ip_len - iphlen)) {

View File

@ -201,6 +201,13 @@ typedef struct callout sctp_os_timer_t;
#define SCTP_BUF_TYPE(m) (m->m_type)
#define SCTP_BUF_RECVIF(m) (m->m_pkthdr.rcvif)
#define SCTP_BUF_PREPEND M_PREPEND
#define SCTP_ALIGN_TO_END(m, len) if(m->m_flags & M_PKTHDR) { \
MH_ALIGN(m, len); \
} else if ((m->m_flags & M_EXT) == 0) { \
M_ALIGN(m, len); \
}
/*************************/
/* These are for logging */
/*************************/

View File

@ -3445,6 +3445,7 @@ sctp_lowlevel_chunk_output(struct sctp_inpcb *inp,
sctp_m_freem(m);
return (ENOMEM);
}
SCTP_ALIGN_TO_END(o_pak, sizeof(struct ip));
SCTP_BUF_LEN(SCTP_HEADER_TO_CHAIN(o_pak)) = sizeof(struct ip);
packet_length += sizeof(struct ip);
SCTP_ATTACH_CHAIN(o_pak, m, packet_length);
@ -3670,6 +3671,8 @@ sctp_lowlevel_chunk_output(struct sctp_inpcb *inp,
sctp_m_freem(m);
return (ENOMEM);
}
SCTP_ALIGN_TO_END(o_pak, sizeof(struct ip6_hdr));
SCTP_BUF_LEN(SCTP_HEADER_TO_CHAIN(o_pak)) = sizeof(struct ip6_hdr);
packet_length += sizeof(struct ip6_hdr);
SCTP_ATTACH_CHAIN(o_pak, m, packet_length);
@ -6019,9 +6022,6 @@ sctp_clean_up_datalist(struct sctp_tcb *stcb,
(bundle_at == 1)) {
/* Mark the chunk as being a window probe */
SCTP_STAT_INCR(sctps_windowprobed);
data_list[i]->rec.data.state_flags |= SCTP_WINDOW_PROBE;
} else {
data_list[i]->rec.data.state_flags &= ~SCTP_WINDOW_PROBE;
}
#ifdef SCTP_AUDITING_ENABLED
sctp_audit_log(0xC2, 3);
@ -6102,6 +6102,10 @@ sctp_can_we_split_this(struct sctp_tcb *stcb,
/* you don't want enough */
return (0);
}
if ((sp->length <= goal_mtu) || ((sp->length - goal_mtu) < sctp_min_residual)) {
/* Sub-optimial residual don't split */
return (0);
}
if (sp->msg_is_complete == 0) {
if (eeor_on) {
/*
@ -6186,18 +6190,21 @@ sctp_move_to_outqueue(struct sctp_tcb *stcb, struct sctp_nets *net,
panic("sp length is 0?");
}
some_taken = sp->some_taken;
if ((goal_mtu >= sp->length) && (sp->msg_is_complete)) {
/* It all fits and its a complete msg, no brainer */
if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
sp->msg_is_complete = 1;
}
if (sp->msg_is_complete) {
/* The message is complete */
to_move = min(sp->length, frag_point);
if (to_move == sp->length) {
/* Getting it all */
/* All of it fits in the MTU */
if (sp->some_taken) {
rcv_flags |= SCTP_DATA_LAST_FRAG;
} else {
rcv_flags |= SCTP_DATA_NOT_FRAG;
}
} else {
/* Not getting it all, frag point overrides */
/* Not all of it fits, we fragment */
if (sp->some_taken == 0) {
rcv_flags |= SCTP_DATA_FIRST_FRAG;
}
@ -6562,7 +6569,7 @@ sctp_fill_outqueue(struct sctp_tcb *stcb,
}
}
total_moved += moved_how_much;
goal_mtu -= moved_how_much;
goal_mtu -= (moved_how_much + sizeof(struct sctp_data_chunk));
goal_mtu &= 0xfffffffc;
}
if (total_moved == 0) {
@ -6724,6 +6731,7 @@ sctp_med_chunk_output(struct sctp_inpcb *inp,
start_at = net;
one_more_time:
for (; net != NULL; net = TAILQ_NEXT(net, sctp_next)) {
net->window_probe = 0;
if (old_startat && (old_startat == net)) {
break;
}
@ -7106,9 +7114,9 @@ sctp_med_chunk_output(struct sctp_inpcb *inp,
continue;
}
if ((chk->send_size > omtu) && ((chk->flags & CHUNK_FLAGS_FRAGMENT_OK) == 0)) {
/*
/*-
* strange, we have a chunk that is
* to bit for its destination and
* to big for its destination and
* yet no fragment ok flag.
* Something went wrong when the
* PMTU changed...we did not mark
@ -7178,12 +7186,13 @@ sctp_med_chunk_output(struct sctp_inpcb *inp,
to_out += chk->send_size;
if (to_out > mx_mtu) {
#ifdef INVARIANTS
panic("gag");
panic("Exceeding mtu of %d out size is %d", mx_mtu, to_out);
#else
printf("Exceeding mtu of %d out size is %d\n",
mx_mtu, to_out);
#endif
}
chk->window_probe = 0;
data_list[bundle_at++] = chk;
if (bundle_at >= SCTP_MAX_DATA_BUNDLING) {
mtu = 0;
@ -7209,6 +7218,10 @@ sctp_med_chunk_output(struct sctp_inpcb *inp,
SCTP_STAT_INCR_COUNTER64(sctps_fragusrmsgs);
}
if ((mtu == 0) || (r_mtu == 0) || (one_chunk)) {
if (one_chunk) {
data_list[0]->window_probe = 1;
net->window_probe = 1;
}
break;
}
} else {
@ -7218,7 +7231,7 @@ sctp_med_chunk_output(struct sctp_inpcb *inp,
*/
break;
}
} /* for () */
} /* for (chunk gather loop for this net) */
} /* if asoc.state OPEN */
/* Is there something to send for this destination? */
if (outchain) {
@ -7252,7 +7265,9 @@ sctp_med_chunk_output(struct sctp_inpcb *inp,
shdr->v_tag = htonl(stcb->asoc.peer_vtag);
shdr->checksum = 0;
auth_offset += sizeof(struct sctphdr);
if ((error = sctp_lowlevel_chunk_output(inp, stcb, net,
if ((error = sctp_lowlevel_chunk_output(inp,
stcb,
net,
(struct sockaddr *)&net->ro._l_addr,
outchain,
auth_offset,
@ -7293,7 +7308,15 @@ sctp_med_chunk_output(struct sctp_inpcb *inp,
sctp_move_to_an_alt(stcb, asoc, net);
}
*reason_code = 6;
continue;
/*-
* I add this line to be paranoid. As far as
* I can tell the continue, takes us back to
* the top of the for, but just to make sure
* I will reset these again here.
*/
ctl_cnt = bundle_at = 0;
continue; /* This takes us back to the
* for() for the nets. */
} else {
asoc->ifp_had_enobuf = 0;
}
@ -7371,7 +7394,7 @@ sctp_med_chunk_output(struct sctp_inpcb *inp,
void
sctp_queue_op_err(struct sctp_tcb *stcb, struct mbuf *op_err)
{
/*
/*-
* Prepend a OPERATIONAL_ERROR chunk header and put on the end of
* the control chunk queue.
*/
@ -7423,7 +7446,7 @@ sctp_send_cookie_echo(struct mbuf *m,
struct sctp_tcb *stcb,
struct sctp_nets *net)
{
/*
/*-
* pull out the cookie and put it at the front of the control chunk
* queue.
*/
@ -7812,12 +7835,12 @@ sctp_chunk_retransmission(struct sctp_inpcb *inp,
struct sctp_association *asoc,
int *cnt_out, struct timeval *now, int *now_filled, int *fr_done)
{
/*
/*-
* send out one MTU of retransmission. If fast_retransmit is
* happening we ignore the cwnd. Otherwise we obey the cwnd and
* rwnd. For a Cookie or Asconf in the control chunk queue we
* retransmit them by themselves.
*
*
* For data chunks we will pick out the lowest TSN's in the sent_queue
* marked for resend and bundle them all together (up to a MTU of
* destination). The address to send to should have been
@ -7950,7 +7973,7 @@ sctp_chunk_retransmission(struct sctp_inpcb *inp,
* fwd-tsn with it all.
*/
if (TAILQ_EMPTY(&asoc->sent_queue)) {
return (-1);
return (SCTP_RETRAN_DONE);
}
if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) ||
(SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT)) {
@ -7966,6 +7989,18 @@ sctp_chunk_retransmission(struct sctp_inpcb *inp,
continue;
}
if ((sctp_max_retran_chunk) && (chk->snd_count >= sctp_max_retran_chunk)) {
/* Gak, we have exceeded max unlucky retran, abort! */
#ifdef SCTP_DEBUG
printf("Gak, chk->snd_count:%d >= max:%d - send abort\n",
chk->snd_count,
sctp_max_retran_chunk);
#endif
sctp_send_abort_tcb(stcb, NULL);
sctp_timer_start(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL);
return (SCTP_RETRAN_EXIT);
}
/* pick up the net */
net = chk->whoTo;
if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
@ -7993,6 +8028,11 @@ sctp_chunk_retransmission(struct sctp_inpcb *inp,
one_chunk_around:
if (asoc->peers_rwnd < mtu) {
one_chunk = 1;
if ((asoc->peers_rwnd == 0) &&
(asoc->total_flight == 0)) {
chk->window_probe = 1;
chk->whoTo->window_probe = 1;
}
}
#ifdef SCTP_AUDITING_ENABLED
sctp_audit_log(0xC3, 2);
@ -8056,7 +8096,6 @@ sctp_chunk_retransmission(struct sctp_inpcb *inp,
data_list[bundle_at++] = chk;
if (one_chunk && (asoc->total_flight <= 0)) {
SCTP_STAT_INCR(sctps_windowprobed);
chk->rec.data.state_flags |= SCTP_WINDOW_PROBE;
}
}
if (one_chunk == 0) {
@ -8193,9 +8232,6 @@ sctp_chunk_retransmission(struct sctp_inpcb *inp,
sctp_ucount_decr(asoc->sent_queue_retran_cnt);
/* record the time */
data_list[i]->sent_rcv_time = asoc->time_last_sent;
if (asoc->sent_queue_retran_cnt < 0) {
asoc->sent_queue_retran_cnt = 0;
}
if (data_list[i]->book_size_scale) {
/*
* need to double the book size on
@ -8240,7 +8276,7 @@ sctp_chunk_retransmission(struct sctp_inpcb *inp,
SCTP_STAT_INCR(sctps_sendfastretrans);
if ((data_list[i] == TAILQ_FIRST(&asoc->sent_queue)) &&
(tmr_started == 0)) {
/*
/*-
* ok we just fast-retrans'd
* the lowest TSN, i.e the
* first on the list. In
@ -8312,7 +8348,7 @@ sctp_chunk_output(struct sctp_inpcb *inp,
struct sctp_tcb *stcb,
int from_where)
{
/*
/*-
* Ok this is the generic chunk service queue. we must do the
* following: - See if there are retransmits pending, if so we must
* do these first and return. - Service the stream queue that is
@ -8361,15 +8397,15 @@ sctp_chunk_output(struct sctp_inpcb *inp,
SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
}
while (asoc->sent_queue_retran_cnt) {
/*
/*-
* Ok, it is retransmission time only, we send out only ONE
* packet with a single call off to the retran code.
*/
if (from_where == SCTP_OUTPUT_FROM_COOKIE_ACK) {
/*
* Special hook for handling cookiess discarded by
* peer that carried data. Send cookie-ack only and
* then the next call with get the retran's.
/*-
*Special hook for handling cookiess discarded
* by peer that carried data. Send cookie-ack only
* and then the next call with get the retran's.
*/
(void)sctp_med_chunk_output(inp, stcb, asoc, &num_out, &reason_code, 1,
&cwnd_full, from_where,
@ -8391,7 +8427,7 @@ sctp_chunk_output(struct sctp_inpcb *inp,
}
if (ret > 0) {
/* Can't send anymore */
/*
/*-
* now lets push out control by calling med-level
* output once. this assures that we WILL send HB's
* if queued too.
@ -8405,13 +8441,16 @@ sctp_chunk_output(struct sctp_inpcb *inp,
return (sctp_timer_validation(inp, stcb, asoc, ret));
}
if (ret < 0) {
/*
/*-
* The count was off.. retran is not happening so do
* the normal retransmission.
*/
#ifdef SCTP_AUDITING_ENABLED
sctp_auditing(9, inp, stcb, NULL);
#endif
if (ret == SCTP_RETRAN_EXIT) {
return (-1);
}
break;
}
if (from_where == SCTP_OUTPUT_FROM_T3) {
@ -8442,7 +8481,7 @@ sctp_chunk_output(struct sctp_inpcb *inp,
TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
if ((net->dest_state & SCTP_ADDR_NOT_REACHABLE) ==
SCTP_ADDR_NOT_REACHABLE) {
/*
/*-
* if possible move things off of this address we
* still may send below due to the dormant state but
* we try to find an alternate address to send to
@ -8452,7 +8491,7 @@ sctp_chunk_output(struct sctp_inpcb *inp,
if (net->ref_count > 1)
sctp_move_to_an_alt(stcb, asoc, net);
} else {
/*
/*-
* if ((asoc->sat_network) || (net->addr_is_local))
* { burst_limit = asoc->max_burst *
* SCTP_SAT_NETWORK_BURST_INCR; }
@ -8521,10 +8560,10 @@ sctp_chunk_output(struct sctp_inpcb *inp,
}
#endif
if (nagle_on) {
/*
* When nagle is on, we look at how much is un_sent,
* then if its smaller than an MTU and we have data
* in flight we stop.
/*-
* When nagle is on, we look at how much is un_sent, then
* if its smaller than an MTU and we have data in
* flight we stop.
*/
un_sent = ((stcb->asoc.total_output_queue_size - stcb->asoc.total_flight) +
((stcb->asoc.chunks_on_out_queue - stcb->asoc.total_flight_count)
@ -8566,7 +8605,7 @@ sctp_chunk_output(struct sctp_inpcb *inp,
printf("Ok, we have put out %d chunks\n", tot_out);
}
#endif
/*
/*-
* Now we need to clean up the control chunk chain if a ECNE is on
* it. It must be marked as UNSENT again so next call will continue
* to send it until such time that we get a CWR, to remove it.
@ -8647,7 +8686,7 @@ send_forward_tsn(struct sctp_tcb *stcb,
TAILQ_INSERT_TAIL(&asoc->control_send_queue, chk, sctp_next);
asoc->ctrl_queue_cnt++;
sctp_fill_in_rest:
/*
/*-
* Here we go through and fill out the part that deals with
* stream/seq of the ones we skip.
*/
@ -8685,14 +8724,14 @@ send_forward_tsn(struct sctp_tcb *stcb,
cnt_of_space = asoc->smallest_mtu - ovh;
}
if (cnt_of_space < space_needed) {
/*
/*-
* ok we must trim down the chunk by lowering the
* advance peer ack point.
*/
cnt_of_skipped = (cnt_of_space -
((sizeof(struct sctp_forward_tsn_chunk)) /
sizeof(struct sctp_strseq)));
/*
/*-
* Go through and find the TSN that will be the one
* we report.
*/
@ -8702,7 +8741,7 @@ send_forward_tsn(struct sctp_tcb *stcb,
at = tp1;
}
last = at;
/*
/*-
* last now points to last one I can report, update
* peer ack point
*/
@ -8720,12 +8759,12 @@ send_forward_tsn(struct sctp_tcb *stcb,
(cnt_of_skipped * sizeof(struct sctp_strseq)));
SCTP_BUF_LEN(chk->data) = chk->send_size;
fwdtsn++;
/*
/*-
* Move pointer to after the fwdtsn and transfer to the
* strseq pointer.
*/
strseq = (struct sctp_strseq *)fwdtsn;
/*
/*-
* Now populate the strseq list. This is done blindly
* without pulling out duplicate stream info. This is
* inefficent but won't harm the process since the peer will
@ -8759,7 +8798,7 @@ send_forward_tsn(struct sctp_tcb *stcb,
void
sctp_send_sack(struct sctp_tcb *stcb)
{
/*
/*-
* Queue up a SACK in the control queue. We must first check to see
* if a SACK is somehow on the control queue. If so, we will take
* and and remove the old one.
@ -8833,7 +8872,7 @@ sctp_send_sack(struct sctp_tcb *stcb)
if ((asoc->numduptsns) ||
(asoc->last_data_chunk_from->dest_state & SCTP_ADDR_NOT_REACHABLE)
) {
/*
/*-
* Ok, we have some duplicates or the destination for the
* sack is unreachable, lets see if we can select an
* alternate than asoc->last_data_chunk_from
@ -8913,7 +8952,7 @@ sctp_send_sack(struct sctp_tcb *stcb)
sack->ch.chunk_flags = 0;
if (sctp_cmt_on_off && sctp_cmt_use_dac) {
/*
/*-
* CMT DAC algorithm: If 2 (i.e., 0x10) packets have been
* received, then set high bit to 1, else 0. Reset
* pkts_rcvd.
@ -8934,14 +8973,14 @@ sctp_send_sack(struct sctp_tcb *stcb)
siz = (((asoc->highest_tsn_inside_map - asoc->mapping_array_base_tsn) + 1) + 7) / 8;
if (asoc->cumulative_tsn < asoc->mapping_array_base_tsn) {
offset = 1;
/*
/*-
* cum-ack behind the mapping array, so we start and use all
* entries.
*/
jstart = 0;
} else {
offset = asoc->mapping_array_base_tsn - asoc->cumulative_tsn;
/*
/*-
* we skip the first one when the cum-ack is at or above the
* mapping array base.
*/
@ -9046,7 +9085,7 @@ sctp_send_abort_tcb(struct sctp_tcb *stcb, struct mbuf *operr)
struct sctp_auth_chunk *auth = NULL;
struct sctphdr *shdr;
/*
/*-
* Add an AUTH chunk, if chunk requires it and save the offset into
* the chain for AUTH
*/
@ -9290,7 +9329,7 @@ sctp_select_hb_destination(struct sctp_tcb *stcb, struct timeval *now)
} else
/* Never been sent to */
ms_goneby = 0x7fffffff;
/*
/*-
* When the address state is unconfirmed but still
* considered reachable, we HB at a higher rate. Once it
* goes confirmed OR reaches the "unreachable" state, thenw
@ -9316,7 +9355,7 @@ sctp_select_hb_destination(struct sctp_tcb *stcb, struct timeval *now)
}
if (highest_ms && (((unsigned int)highest_ms >= hnet->RTO) || state_overide)) {
/*
/*-
* Found the one with longest delay bounds OR it is
* unconfirmed and still not marked unreachable.
*/
@ -9352,7 +9391,7 @@ sctp_send_hb(struct sctp_tcb *stcb, int user_req, struct sctp_nets *u_net)
if (user_req == 0) {
net = sctp_select_hb_destination(stcb, &now);
if (net == NULL) {
/*
/*-
* All our busy none to send to, just start the
* timer again.
*/
@ -9447,12 +9486,12 @@ sctp_send_hb(struct sctp_tcb *stcb, int user_req, struct sctp_nets *u_net)
if (sctp_threshold_management(stcb->sctp_ep, stcb, net,
stcb->asoc.max_send_times)) {
/*
* we have lost the association, in a way this is quite bad
* since we really are one less time since we really did not
* send yet. This is the down side to the Q's style as
* defined in the RFC and not my alternate style defined in
* the RFC.
/*-
* we have lost the association, in a way this is
* quite bad since we really are one less time since
* we really did not send yet. This is the down side
* to the Q's style as defined in the RFC and not my
* alternate style defined in the RFC.
*/
atomic_subtract_int(&chk->whoTo->ref_count, 1);
if (chk->data != NULL) {
@ -9466,7 +9505,7 @@ sctp_send_hb(struct sctp_tcb *stcb, int user_req, struct sctp_nets *u_net)
TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, chk, sctp_next);
stcb->asoc.ctrl_queue_cnt++;
SCTP_STAT_INCR(sctps_sendheartbeat);
/*
/*-
* Call directly med level routine to put out the chunk. It will
* always tumble out control chunks aka HB but it may even tumble
* out data too.
@ -9541,7 +9580,7 @@ sctp_send_packet_dropped(struct sctp_tcb *stcb, struct sctp_nets *net,
asoc = &stcb->asoc;
SCTP_TCB_LOCK_ASSERT(stcb);
if (asoc->peer_supports_pktdrop == 0) {
/*
/*-
* peer must declare support before I send one.
*/
return;
@ -9631,7 +9670,7 @@ sctp_send_packet_dropped(struct sctp_tcb *stcb, struct sctp_nets *net,
asoc->my_rwnd_control_len +
stcb->sctp_socket->so_rcv.sb_cc);
} else {
/*
/*-
* If my rwnd is 0, possibly from mbuf depletion as well as
* space used, tell the peer there is NO space aka onq == bw
*/
@ -9723,7 +9762,7 @@ sctp_add_stream_reset_out(struct sctp_tmit_chunk *chk,
}
}
if (SCTP_SIZE32(len) > len) {
/*
/*-
* Need to worry about the pad we may end up adding to the
* end. This is easy since the struct is either aligned to 4
* bytes or 2 bytes off.
@ -9767,7 +9806,7 @@ sctp_add_stream_reset_in(struct sctp_tmit_chunk *chk,
}
}
if (SCTP_SIZE32(len) > len) {
/*
/*-
* Need to worry about the pad we may end up adding to the
* end. This is easy since the struct is either aligned to 4
* bytes or 2 bytes off.
@ -9897,7 +9936,7 @@ sctp_send_str_reset_req(struct sctp_tcb *stcb,
asoc = &stcb->asoc;
if (asoc->stream_reset_outstanding) {
/*
/*-
* Already one pending, must get ACK back to clear the flag.
*/
return (EBUSY);
@ -9972,7 +10011,7 @@ void
sctp_send_abort(struct mbuf *m, int iphlen, struct sctphdr *sh, uint32_t vtag,
struct mbuf *err_cause)
{
/*
/*-
* Formulate the abort message, and send it back down.
*/
struct mbuf *o_pak;
@ -10345,7 +10384,7 @@ sctp_copy_one(struct sctp_stream_queue_pending *sp,
if (m == NULL) {
return (ENOMEM);
}
/*
/*-
* Add this one for m in now, that way if the alloc fails we won't
* have a bad cnt.
*/
@ -10398,7 +10437,7 @@ sctp_copy_it_in(struct sctp_tcb *stcb,
int *error,
int non_blocking)
{
/*
/*-
* This routine must be very careful in its work. Protocol
* processing is up and running so care must be taken to spl...()
* when you need to do something that may effect the stcb/asoc. The
@ -10614,9 +10653,10 @@ sctp_lower_sosend(struct socket *so,
}
hold_tcblock = 0;
} else if (addr) {
/*
* Since we did not use findep we must increment it, and if
* we don't find a tcb decrement it.
/*-
* Since we did not use findep we must
* increment it, and if we don't find a tcb
* decrement it.
*/
SCTP_INP_WLOCK(inp);
SCTP_INP_INCR_REF(inp);
@ -10677,7 +10717,7 @@ sctp_lower_sosend(struct socket *so,
((srcv->sinfo_flags & SCTP_ABORT) ||
((srcv->sinfo_flags & SCTP_EOF) &&
(uio->uio_resid == 0)))) {
/*
/*-
* User asks to abort a non-existant assoc,
* or EOF a non-existant assoc with no data
*/
@ -10770,26 +10810,20 @@ sctp_lower_sosend(struct socket *so,
}
}
for (i = 0; i < asoc->streamoutcnt; i++) {
/*
* inbound side must
* be set to 0xffff,
* also NOTE when we
* get the INIT-ACK
* back (for INIT
* sender) we MUST
/*-
* inbound side must be set
* to 0xffff, also NOTE when
* we get the INIT-ACK back
* (for INIT sender) we MUST
* reduce the count
* (streamoutcnt)
* but first check
* if we sent to any
* of the upper
* streams that were
* dropped (if some
* were). Those that
* were dropped must
* be notified to
* the upper layer
* as failed to
* send.
* (streamoutcnt) but first
* check if we sent to any
* of the upper streams that
* were dropped (if some
* were). Those that were
* dropped must be notified
* to the upper layer as
* failed to send.
*/
asoc->strmout[i].next_sequence_sent = 0x0;
TAILQ_INIT(&asoc->strmout[i].outqueue);
@ -10804,12 +10838,11 @@ sctp_lower_sosend(struct socket *so,
hold_tcblock = 1;
/* out with the INIT */
queue_only_for_init = 1;
/*
* we may want to dig in after this call and adjust
* the MTU value. It defaulted to 1500 (constant)
* but the ro structure may now have an update and
* thus we may need to change it BEFORE we append
* the message.
/*-
* we may want to dig in after this call and adjust the MTU
* value. It defaulted to 1500 (constant) but the ro
* structure may now have an update and thus we may need to
* change it BEFORE we append the message.
*/
net = stcb->asoc.primary_destination;
asoc = &stcb->asoc;
@ -10889,7 +10922,7 @@ sctp_lower_sosend(struct socket *so,
}
}
if ((net->flight_size > net->cwnd) && (sctp_cmt_on_off == 0)) {
/*
/*-
* CMT: Added check for CMT above. net above is the primary
* dest. If CMT is ON, sender should always attempt to send
* with the output routine sctp_fill_outqueue() that loops
@ -10964,7 +10997,7 @@ sctp_lower_sosend(struct socket *so,
if (top == NULL) {
error = uiomove((caddr_t)ph, (int)tot_out, uio);
if (error) {
/*
/*-
* Here if we can't get his data we
* still abort we just don't get to
* send the users note :-0
@ -11249,11 +11282,10 @@ sctp_lower_sosend(struct socket *so,
(un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD))
) {
/*
* Ok, Nagle is set on and we have data
* outstanding. Don't send anything and let
* SACKs drive out the data unless wen have
* a "full" segment to send.
/*-
* Ok, Nagle is set on and we have data outstanding.
* Don't send anything and let SACKs drive out the
* data unless wen have a "full" segment to send.
*/
#ifdef SCTP_NAGLE_LOGGING
sctp_log_nagle_event(stcb, SCTP_NAGLE_APPLIED);
@ -11292,12 +11324,12 @@ sctp_lower_sosend(struct socket *so,
}
if ((queue_only == 0) && (nagle_applies == 0)
) {
/*
* need to start chunk output before
* blocking.. note that if a lock is already
* applied, then the input via the net is
* happening and I don't need to start
* output :-D
/*-
* need to start chunk output
* before blocking.. note that if
* a lock is already applied, then
* the input via the net is happening
* and I don't need to start output :-D
*/
if (hold_tcblock == 0) {
if (SCTP_TCB_TRYLOCK(stcb)) {
@ -11318,20 +11350,19 @@ sctp_lower_sosend(struct socket *so,
}
}
SOCKBUF_LOCK(&so->so_snd);
/*
* This is a bit strange, but I think it will work.
* The total_output_queue_size is locked and
* protected by the TCB_LOCK, which we just
* released. There is a race that can occur between
* releasing it above, and me getting the socket
* lock, where sacks come in but we have not put the
* SB_WAIT on the so_snd buffer to get the wakeup.
* After the LOCK is applied the sack_processing
* will also need to LOCK the so->so_snd to do the
* actual sowwakeup(). So once we have the socket
* buffer lock if we recheck the size we KNOW we
* will get to sleep safely with the wakeup flag in
* place.
/*-
* This is a bit strange, but I think it will
* work. The total_output_queue_size is locked and
* protected by the TCB_LOCK, which we just released.
* There is a race that can occur between releasing it
* above, and me getting the socket lock, where sacks
* come in but we have not put the SB_WAIT on the
* so_snd buffer to get the wakeup. After the LOCK
* is applied the sack_processing will also need to
* LOCK the so->so_snd to do the actual sowwakeup(). So
* once we have the socket buffer lock if we recheck the
* size we KNOW we will get to sleep safely with the
* wakeup flag in place.
*/
if (SCTP_SB_LIMIT_SND(so) < (stcb->asoc.total_output_queue_size + sctp_add_more_threshold)) {
#ifdef SCTP_BLK_LOGGING
@ -11419,11 +11450,11 @@ sctp_lower_sosend(struct socket *so,
asoc->primary_destination);
}
} else {
/*
/*-
* we still got (or just got) data to send, so set
* SHUTDOWN_PENDING
*/
/*
/*-
* XXX sockets draft says that SCTP_EOF should be
* sent with no data. currently, we will allow user
* data to be sent first and move to
@ -11500,10 +11531,10 @@ sctp_lower_sosend(struct socket *so,
(un_sent < (int)(stcb->asoc.smallest_mtu - SCTP_MIN_OVERHEAD))
) {
/*
* Ok, Nagle is set on and we have data outstanding. Don't
* send anything and let SACKs drive out the data unless wen
* have a "full" segment to send.
/*-
* Ok, Nagle is set on and we have data outstanding.
* Don't send anything and let SACKs drive out the
* data unless wen have a "full" segment to send.
*/
#ifdef SCTP_NAGLE_LOGGING
sctp_log_nagle_event(stcb, SCTP_NAGLE_APPLIED);

View File

@ -3887,7 +3887,6 @@ sctp_free_assoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int from_inpcbfre
asoc->streamoutcnt = 0;
if (asoc->strmin) {
struct sctp_queued_to_read *ctl;
int i;
for (i = 0; i < asoc->streamincnt; i++) {
if (!TAILQ_EMPTY(&asoc->strmin[i].inqueue)) {
@ -4530,7 +4529,7 @@ sctp_load_addresses_from_init(struct sctp_tcb *stcb, struct mbuf *m,
struct sockaddr_in sin;
struct sockaddr_in6 sin6;
uint8_t random_store[SCTP_PARAM_BUFFER_SIZE];
struct sctp_auth_random *random = NULL;
struct sctp_auth_random *p_random = NULL;
uint16_t random_len = 0;
uint8_t hmacs_store[SCTP_PARAM_BUFFER_SIZE];
struct sctp_auth_hmac_algo *hmacs = NULL;
@ -4887,8 +4886,8 @@ sctp_load_addresses_from_init(struct sctp_tcb *stcb, struct mbuf *m,
plen);
if (phdr == NULL)
return (-26);
random = (struct sctp_auth_random *)phdr;
random_len = plen - sizeof(*random);
p_random = (struct sctp_auth_random *)phdr;
random_len = plen - sizeof(*p_random);
/* enforce the random length */
if (random_len != SCTP_AUTH_RANDOM_SIZE_REQUIRED) {
#ifdef SCTP_DEBUG
@ -5007,9 +5006,14 @@ sctp_load_addresses_from_init(struct sctp_tcb *stcb, struct mbuf *m,
} else {
stcb->asoc.peer_supports_auth = 0;
}
if (!stcb->asoc.peer_supports_auth && got_chklist) {
/* peer does not support auth but sent a chunks list? */
return (-31);
}
if (!sctp_asconf_auth_nochk && stcb->asoc.peer_supports_asconf &&
!stcb->asoc.peer_supports_auth) {
return (-31);
/* peer supports asconf but not auth? */
return (-32);
}
/* concatenate the full random key */
#ifdef SCTP_AUTH_DRAFT_04
@ -5017,18 +5021,18 @@ sctp_load_addresses_from_init(struct sctp_tcb *stcb, struct mbuf *m,
new_key = sctp_alloc_key(keylen);
if (new_key != NULL) {
/* copy in the RANDOM */
if (random != NULL)
bcopy(random->random_data, new_key->key, random_len);
if (p_random != NULL)
bcopy(p_random->random_data, new_key->key, random_len);
}
#else
keylen = sizeof(*random) + random_len + sizeof(*chunks) + num_chunks +
keylen = sizeof(*p_random) + random_len + sizeof(*chunks) + num_chunks +
sizeof(*hmacs) + hmacs_len;
new_key = sctp_alloc_key(keylen);
if (new_key != NULL) {
/* copy in the RANDOM */
if (random != NULL) {
keylen = sizeof(*random) + random_len;
bcopy(random, new_key->key, keylen);
if (p_random != NULL) {
keylen = sizeof(*p_random) + random_len;
bcopy(p_random, new_key->key, keylen);
}
/* append in the AUTH chunks */
if (chunks != NULL) {
@ -5044,7 +5048,8 @@ sctp_load_addresses_from_init(struct sctp_tcb *stcb, struct mbuf *m,
}
#endif
else {
return (-32);
/* failed to get memory for the key */
return (-33);
}
if (stcb->asoc.authinfo.peer_random != NULL)
sctp_free_key(stcb->asoc.authinfo.peer_random);

View File

@ -192,7 +192,7 @@ struct sctp_epinfo {
struct sctpasochead *sctp_restarthash;
u_long hashrestartmark;
/*
/*-
* The TCP model represents a substantial overhead in that we get an
* additional hash table to keep explicit connections in. The
* listening TCP endpoint will exist in the usual ephash above and
@ -265,7 +265,7 @@ struct sctp_epinfo {
};
/*
/*-
* Here we have all the relevant information for each SCTP entity created. We
* will need to modify this as approprate. We also need to figure out how to
* access /dev/random.
@ -337,7 +337,7 @@ struct sctp_pcb {
#define sctp_lport ip_inp.inp.inp_lport
struct sctp_inpcb {
/*
/*-
* put an inpcb in front of it all, kind of a waste but we need to
* for compatability with all the other stuff.
*/
@ -383,11 +383,13 @@ struct sctp_inpcb {
uint32_t partial_delivery_point;
uint32_t sctp_context;
struct sctp_sndrcvinfo def_send;
/*
* These three are here for the sosend_dgram (pkt, pkt_last and
* control). routine. However, I don't think anyone in the current
* FreeBSD kernel calls this. So they are candidates with sctp_sendm
* for de-supporting.
/*-
* These three are here for the sosend_dgram
* (pkt, pkt_last and control).
* routine. However, I don't think anyone in
* the current FreeBSD kernel calls this. So
* they are candidates with sctp_sendm for
* de-supporting.
*/
struct mbuf *pkt, *pkt_last;
struct mbuf *control;
@ -415,7 +417,7 @@ struct sctp_tcb {
struct sctp_block_entry *block_entry; /* pointer locked by socket
* send buffer */
struct sctp_association asoc;
/*
/*-
* freed_by_sorcv_sincelast is protected by the sockbuf_lock NOT the
* tcb_lock. Its special in this way to help avoid extra mutex calls
* in the reading of data.
@ -485,7 +487,7 @@ void
sctp_move_pcb_and_assoc(struct sctp_inpcb *, struct sctp_inpcb *,
struct sctp_tcb *);
/*
/*-
* For this call ep_addr, the to is the destination endpoint address of the
* peer (relative to outbound). The from field is only used if the TCP model
* is enabled and helps distingush amongst the subset bound (non-boundall).
@ -556,7 +558,7 @@ int sctp_is_vtag_good(struct sctp_inpcb *, uint32_t, struct timeval *);
int sctp_destination_is_reachable(struct sctp_tcb *, struct sockaddr *);
/*
/*-
* Null in last arg inpcb indicate run on ALL ep's. Specific inp in last arg
* indicates run on ONLY assoc's of the specified endpoint.
*/

View File

@ -201,7 +201,6 @@ struct sctp_nets {
uint32_t cwnd; /* actual cwnd */
uint32_t prev_cwnd; /* cwnd before any processing */
uint32_t partial_bytes_acked; /* in CA tracks when to incr a MTU */
uint32_t rtt_variance;
uint32_t prev_rtt;
/* tracking variables to avoid the aloc/free in sack processing */
unsigned int net_ack;
@ -226,6 +225,12 @@ struct sctp_nets {
uint32_t heartbeat_random2;
uint32_t tos_flowlabel;
struct timeval start_time; /* time when this net was created */
uint32_t marked_retrans;/* number or DATA chunks marked for timer
* based retransmissions */
uint32_t marked_fastretrans;
/* if this guy is ok or not ... status */
uint16_t dest_state;
/* number of transmit failures to down this guy */
@ -236,7 +241,6 @@ struct sctp_nets {
uint8_t fast_retran_loss_recovery;
uint8_t will_exit_fast_recovery;
/* Flags that probably can be combined into dest_state */
uint8_t rto_variance_dir; /* increase = 1, decreasing = 0 */
uint8_t fast_retran_ip; /* fast retransmit in progress */
uint8_t hb_responded;
uint8_t saw_newack; /* CMT's SFR algorithm flag */
@ -266,13 +270,10 @@ struct sctp_nets {
uint8_t new_pseudo_cumack; /* CMT CUC algorithm. Flag used to
* indicate if a new pseudo-cumack or
* rtx-pseudo-cumack has been received */
uint8_t window_probe; /* Doing a window probe? */
#ifdef SCTP_HIGH_SPEED
uint8_t last_hs_used; /* index into the last HS table entry we used */
#endif
struct timeval start_time; /* time when this net was created */
uint32_t marked_retrans;/* number or DATA chunks marked for timer
* based retransmissions */
uint32_t marked_fastretrans;
};
@ -341,6 +342,7 @@ struct sctp_tmit_chunk {
uint8_t no_fr_allowed;
uint8_t pr_sctp_on;
uint8_t copy_by_ref;
uint8_t window_probe;
};
/*

View File

@ -91,15 +91,19 @@ uint32_t sctp_chunkscale = SCTP_CHUNKQUEUE_SCALE;
uint32_t sctp_cmt_on_off = 0;
uint32_t sctp_cmt_use_dac = 0;
uint32_t sctp_max_retran_chunk = SCTPCTL_MAX_RETRAN_DEFAULT;
uint32_t sctp_L2_abc_variable = 1;
uint32_t sctp_early_fr = 0;
uint32_t sctp_early_fr_msec = SCTP_MINFR_MSEC_TIMER;
uint32_t sctp_use_rttvar_cc = 0;
uint32_t sctp_says_check_for_deadlock = 0;
uint32_t sctp_asconf_auth_nochk = 0;
uint32_t sctp_auth_disable = 0;
uint32_t sctp_nat_friendly = 1;
uint32_t sctp_min_residual = SCTPCTL_MIN_RESIDUAL_DEFAULT;;
struct sctpstat sctpstat;
#ifdef SCTP_DEBUG
@ -434,10 +438,6 @@ SYSCTL_UINT(_net_inet_sctp, OID_AUTO, early_fast_retran, CTLFLAG_RW,
&sctp_early_fr, 0,
"Early Fast Retransmit with timer");
SYSCTL_UINT(_net_inet_sctp, OID_AUTO, use_rttvar_congctrl, CTLFLAG_RW,
&sctp_use_rttvar_cc, 0,
"Use congestion control via rtt variation");
SYSCTL_UINT(_net_inet_sctp, OID_AUTO, deadlock_detect, CTLFLAG_RW,
&sctp_says_check_for_deadlock, 0,
"SMP Deadlock detection on/off");
@ -486,6 +486,18 @@ SYSCTL_INT(_net_inet_sctp, OID_AUTO, strict_data_order, CTLFLAG_RW,
&sctp_strict_data_order, 0,
"Enforce strict data ordering, abort if control inside data");
SYSCTL_INT(_net_inet_sctp, OID_AUTO, min_residual, CTLFLAG_RW,
&sctp_min_residual, 0,
SCTPCTL_MIN_RESIDUAL_DESC);
SYSCTL_INT(_net_inet_sctp, OID_AUTO, max_retran_chunk, CTLFLAG_RW,
&sctp_max_retran_chunk, 0,
SCTPCTL_MAX_RETRAN_DESC);
SYSCTL_STRUCT(_net_inet_sctp, OID_AUTO, stats, CTLFLAG_RW,
&sctpstat, sctpstat,
"SCTP statistics (struct sctps_stat, netinet/sctp.h");

View File

@ -292,108 +292,118 @@ __FBSDID("$FreeBSD$");
#define SCTPCTL_EARLY_FAST_RETRAN_MAX 0xFFFFFFFF
#define SCTPCTL_EARLY_FAST_RETRAN_DEFAULT 0
/* use_rttvar_congctrl: Use Congestion Control via rtt variation */
#define SCTPCTL_USE_RTTVAR_CONGCTRL 37
#define SCTPCTL_USE_RTTVAR_CONGCTRL_DESC "Use Congestion Control via rtt variation"
#define SCTPCTL_USE_RTTVAR_CONGCTRL_MIN 0
#define SCTPCTL_USE_RTTVAR_CONGCTRL_MAX 1
#define SCTPCTL_USE_RTTVAR_CONGCTRL_DEFAULT 0 /* UNUSED?? */
/* deadlock_detect: SMP Deadlock detection on/off */
#define SCTPCTL_DEADLOCK_DETECT 38
#define SCTPCTL_DEADLOCK_DETECT 37
#define SCTPCTL_DEADLOCK_DETECT_DESC "SMP Deadlock detection on/off"
#define SCTPCTL_DEADLOCK_DETECT_MIN 0
#define SCTPCTL_DEADLOCK_DETECT_MAX 1
#define SCTPCTL_DEADLOCK_DETECT_DEFAULT 0
/* early_fast_retran_msec: Early Fast Retransmit minimum timer value */
#define SCTPCTL_EARLY_FAST_RETRAN_MSEC 39
#define SCTPCTL_EARLY_FAST_RETRAN_MSEC 38
#define SCTPCTL_EARLY_FAST_RETRAN_MSEC_DESC "Early Fast Retransmit minimum timer value"
#define SCTPCTL_EARLY_FAST_RETRAN_MSEC_MIN 0
#define SCTPCTL_EARLY_FAST_RETRAN_MSEC_MAX 0xFFFFFFFF
#define SCTPCTL_EARLY_FAST_RETRAN_MSEC_DEFAULT SCTP_MINFR_MSEC_TIMER
/* asconf_auth_nochk: Disable SCTP ASCONF AUTH requirement */
#define SCTPCTL_ASCONF_AUTH_NOCHK 40
#define SCTPCTL_ASCONF_AUTH_NOCHK 39
#define SCTPCTL_ASCONF_AUTH_NOCHK_DESC "Disable SCTP ASCONF AUTH requirement"
#define SCTPCTL_ASCONF_AUTH_NOCHK_MIN 0
#define SCTPCTL_ASCONF_AUTH_NOCHK_MAX 1
#define SCTPCTL_ASCONF_AUTH_NOCHK_DEFAULT 0
/* auth_disable: Disable SCTP AUTH function */
#define SCTPCTL_AUTH_DISABLE 41
#define SCTPCTL_AUTH_DISABLE 40
#define SCTPCTL_AUTH_DISABLE_DESC "Disable SCTP AUTH function"
#define SCTPCTL_AUTH_DISABLE_MIN 0
#define SCTPCTL_AUTH_DISABLE_MAX 1
#define SCTPCTL_AUTH_DISABLE_DEFAULT 0
/* nat_friendly: SCTP NAT friendly operation */
#define SCTPCTL_NAT_FRIENDLY 42
#define SCTPCTL_NAT_FRIENDLY 41
#define SCTPCTL_NAT_FRIENDLY_DESC "SCTP NAT friendly operation"
#define SCTPCTL_NAT_FRIENDLY_MIN 0
#define SCTPCTL_NAT_FRIENDLY_MAX 1
#define SCTPCTL_NAT_FRIENDLY_DEFAULT 1
/* abc_l_var: SCTP ABC max increase per SACK (L) */
#define SCTPCTL_ABC_L_VAR 43
#define SCTPCTL_ABC_L_VAR 42
#define SCTPCTL_ABC_L_VAR_DESC "SCTP ABC max increase per SACK (L)"
#define SCTPCTL_ABC_L_VAR_MIN 0
#define SCTPCTL_ABC_L_VAR_MAX 0xFFFFFFFF
#define SCTPCTL_ABC_L_VAR_DEFAULT 1
/* max_chained_mbufs: Default max number of small mbufs on a chain */
#define SCTPCTL_MAX_CHAINED_MBUFS 44
#define SCTPCTL_MAX_CHAINED_MBUFS 43
#define SCTPCTL_MAX_CHAINED_MBUFS_DESC "Default max number of small mbufs on a chain"
#define SCTPCTL_MAX_CHAINED_MBUFS_MIN 0
#define SCTPCTL_MAX_CHAINED_MBUFS_MAX 0xFFFFFFFF
#define SCTPCTL_MAX_CHAINED_MBUFS_DEFAULT SCTP_DEFAULT_MBUFS_IN_CHAIN
/* cmt_use_dac: CMT DAC on/off flag */
#define SCTPCTL_CMT_USE_DAC 45
#define SCTPCTL_CMT_USE_DAC 44
#define SCTPCTL_CMT_USE_DAC_DESC "CMT DAC on/off flag"
#define SCTPCTL_CMT_USE_DAC_MIN 0
#define SCTPCTL_CMT_USE_DAC_MAX 1
#define SCTPCTL_CMT_USE_DAC_DEFAULT 0
/* do_sctp_drain: Should SCTP respond to the drain calls */
#define SCTPCTL_DO_SCTP_DRAIN 46
#define SCTPCTL_DO_SCTP_DRAIN 45
#define SCTPCTL_DO_SCTP_DRAIN_DESC "Should SCTP respond to the drain calls"
#define SCTPCTL_DO_SCTP_DRAIN_MIN 0
#define SCTPCTL_DO_SCTP_DRAIN_MAX 1
#define SCTPCTL_DO_SCTP_DRAIN_DEFAULT 1
/* hb_max_burst: Confirmation Heartbeat max burst? */
#define SCTPCTL_HB_MAX_BURST 47
#define SCTPCTL_HB_MAX_BURST 46
#define SCTPCTL_HB_MAX_BURST_DESC "Confirmation Heartbeat max burst?"
#define SCTPCTL_HB_MAX_BURST_MIN 1
#define SCTPCTL_HB_MAX_BURST_MAX 0xFFFFFFFF
#define SCTPCTL_HB_MAX_BURST_DEFAULT SCTP_DEF_MAX_BURST
/* abort_at_limit: When one-2-one hits qlimit abort */
#define SCTPCTL_ABORT_AT_LIMIT 48
#define SCTPCTL_ABORT_AT_LIMIT 47
#define SCTPCTL_ABORT_AT_LIMIT_DESC "When one-2-one hits qlimit abort"
#define SCTPCTL_ABORT_AT_LIMIT_MIN 0
#define SCTPCTL_ABORT_AT_LIMIT_MAX 1
#define SCTPCTL_ABORT_AT_LIMIT_DEFAULT 0
/* strict_data_order: Enforce strict data ordering, abort if control inside data */
#define SCTPCTL_STRICT_DATA_ORDER 49
#define SCTPCTL_STRICT_DATA_ORDER 48
#define SCTPCTL_STRICT_DATA_ORDER_DESC "Enforce strict data ordering, abort if control inside data"
#define SCTPCTL_STRICT_DATA_ORDER_MIN 0
#define SCTPCTL_STRICT_DATA_ORDER_MAX 1
#define SCTPCTL_STRICT_DATA_ORDER_DEFAULT 0
/* min residual in in a data fragment leftover */
#define SCTPCTL_MIN_REDIDUAL 49
#define SCTPCTL_MIN_RESIDUAL_DESC "Minimum residual data chunk in second part of split"
#define SCTPCTL_MIN_RESIDUAL_MIN 20
#define SCTPCTL_MIN_RESIDUAL_MAX 65535
#define SCTPCTL_MIN_RESIDUAL_DEFAULT 1452
/* min residual in in a data fragment leftover */
#define SCTPCTL_MAX_RETRAN 50
#define SCTPCTL_MAX_RETRAN_DESC "Maximum times a unlucky chunk can be retran'd before assoc abort "
#define SCTPCTL_MAX_RETRAN_MIN 0
#define SCTPCTL_MAX_RETRAN_MAX 65535
#define SCTPCTL_MAX_RETRAN_DEFAULT 30
#ifdef SCTP_DEBUG
/* debug: Configure debug output */
#define SCTPCTL_DEBUG 50
#define SCTPCTL_DEBUG 51
#define SCTPCTL_DEBUG_DESC "Configure debug output"
#define SCTPCTL_DEBUG_MIN 0
#define SCTPCTL_DEBUG_MAX 0xFFFFFFFF
#define SCTPCTL_DEBUG_DEFAULT 0
#ifdef SCTP_DEBUG
#define SCTPCTL_MAXID 50
#define SCTPCTL_MAXID 51
#else
#define SCTPCTL_MAXID 49
#define SCTPCTL_MAXID 50
#endif
/*
@ -432,7 +442,6 @@ __FBSDID("$FreeBSD$");
{ "cmt_on_off", CTLTYPE_INT }, \
{ "cwnd_maxburst", CTLTYPE_INT }, \
{ "early_fast_retran", CTLTYPE_INT }, \
{ "use_rttvar_congctrl", CTLTYPE_INT }, \
{ "deadlock_detect", CTLTYPE_INT }, \
{ "early_fast_retran_msec", CTLTYPE_INT }, \
{ "asconf_auth_nochk", CTLTYPE_INT }, \
@ -452,6 +461,8 @@ __FBSDID("$FreeBSD$");
{ "add_more_on_output", CTLTYPE_INT }, \
{ "sys_resource", CTLTYPE_INT }, \
{ "asoc_resource", CTLTYPE_INT }, \
{ "min_residual", CTLTYPE_INT }, \
{ "max_retran_chunk", CTLTYPE_INT }, \
{ "debug", CTLTYPE_INT }, \
}
#else
@ -486,7 +497,6 @@ __FBSDID("$FreeBSD$");
{ "cmt_on_off", CTLTYPE_INT }, \
{ "cwnd_maxburst", CTLTYPE_INT }, \
{ "early_fast_retran", CTLTYPE_INT }, \
{ "use_rttvar_congctrl", CTLTYPE_INT }, \
{ "deadlock_detect", CTLTYPE_INT }, \
{ "early_fast_retran_msec", CTLTYPE_INT }, \
{ "asconf_auth_nochk", CTLTYPE_INT }, \
@ -506,6 +516,8 @@ __FBSDID("$FreeBSD$");
{ "add_more_on_output", CTLTYPE_INT }, \
{ "sys_resource", CTLTYPE_INT }, \
{ "asoc_resource", CTLTYPE_INT }, \
{ "max_retran_chunk", CTLTYPE_INT }, \
{ "min_residual", CTLTYPE_INT }, \
}
#endif
@ -564,6 +576,8 @@ extern uint32_t sctp_do_drain;
extern uint32_t sctp_hb_maxburst;
extern uint32_t sctp_abort_if_one_2_one_hits_limit;
extern uint32_t sctp_strict_data_order;
extern uint32_t sctp_min_residual;
extern uint32_t sctp_max_retran_chunk;
#if defined(SCTP_DEBUG)
extern uint32_t sctp_debug_on;

View File

@ -658,8 +658,6 @@ struct sctp_lock_log {
struct sctp_rto_log {
void *net;
uint32_t rtt;
uint32_t rttvar;
uint8_t direction;
};
struct sctp_nagle_log {

View File

@ -996,7 +996,7 @@ sctp_fill_user_address(struct sockaddr_storage *ss, struct sockaddr *sa)
static size_t
sctp_fill_up_addresses(struct sctp_inpcb *inp,
sctp_fill_up_addresses_vrf(struct sctp_inpcb *inp,
struct sctp_tcb *stcb,
size_t limit,
struct sockaddr_storage *sas,
@ -1153,8 +1153,22 @@ sctp_fill_up_addresses(struct sctp_inpcb *inp,
return (actual);
}
static size_t
sctp_fill_up_addresses(struct sctp_inpcb *inp,
struct sctp_tcb *stcb,
size_t limit,
struct sockaddr_storage *sas)
{
size_t size = 0;
/* fill up addresses for the endpoint's default vrf */
size = sctp_fill_up_addresses_vrf(inp, stcb, limit, sas,
inp->def_vrf_id);
return (size);
}
static int
sctp_count_max_addresses(struct sctp_inpcb *inp, uint32_t vrf_id)
sctp_count_max_addresses_vrf(struct sctp_inpcb *inp, uint32_t vrf_id)
{
int cnt = 0;
struct sctp_vrf *vrf = NULL;
@ -1204,6 +1218,16 @@ sctp_count_max_addresses(struct sctp_inpcb *inp, uint32_t vrf_id)
return (cnt);
}
static int
sctp_count_max_addresses(struct sctp_inpcb *inp)
{
int cnt = 0;
/* count addresses for the endpoint's default VRF */
cnt = sctp_count_max_addresses_vrf(inp, inp->def_vrf_id);
return (cnt);
}
static int
sctp_do_connect_x(struct socket *so, struct sctp_inpcb *inp, void *optval,
@ -1416,7 +1440,6 @@ sctp_getopt(struct socket *so, int optname, void *optval, size_t *optsize,
{
struct sctp_inpcb *inp;
int error, val = 0;
uint32_t vrf_id;
struct sctp_tcb *stcb = NULL;
if (optval == NULL) {
@ -1425,8 +1448,6 @@ sctp_getopt(struct socket *so, int optname, void *optval, size_t *optsize,
inp = (struct sctp_inpcb *)so->so_pcb;
if (inp == 0)
return EINVAL;
vrf_id = SCTP_DEFAULT_VRFID;
error = 0;
switch (optname) {
@ -1797,7 +1818,7 @@ sctp_getopt(struct socket *so, int optname, void *optval, size_t *optsize,
SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize);
SCTP_INP_RLOCK(inp);
*value = sctp_count_max_addresses(inp, vrf_id);
*value = sctp_count_max_addresses(inp);
SCTP_INP_RUNLOCK(inp);
*optsize = sizeof(uint32_t);
}
@ -1898,7 +1919,7 @@ sctp_getopt(struct socket *so, int optname, void *optval, size_t *optsize,
sas = (struct sockaddr_storage *)&saddr->addr[0];
limit = *optsize - sizeof(sctp_assoc_t);
actual = sctp_fill_up_addresses(inp, stcb, limit, sas, vrf_id);
actual = sctp_fill_up_addresses(inp, stcb, limit, sas);
if (stcb)
SCTP_TCB_UNLOCK(stcb);
*optsize = sizeof(struct sockaddr_storage) + actual;

View File

@ -133,8 +133,6 @@ rto_logging(struct sctp_nets *net, int from)
sctp_clog[sctp_cwnd_log_at].event_type = (uint8_t) SCTP_LOG_EVENT_RTT;
sctp_clog[sctp_cwnd_log_at].x.rto.net = (void *)net;
sctp_clog[sctp_cwnd_log_at].x.rto.rtt = net->prev_rtt;
sctp_clog[sctp_cwnd_log_at].x.rto.rttvar = net->rtt_variance;
sctp_clog[sctp_cwnd_log_at].x.rto.direction = net->rto_variance_dir;
}
void
@ -2464,7 +2462,7 @@ sctp_calculate_rto(struct sctp_tcb *stcb,
*/
int calc_time = 0;
int o_calctime;
unsigned int new_rto = 0;
uint32_t new_rto = 0;
int first_measure = 0;
struct timeval now;
@ -2493,7 +2491,7 @@ sctp_calculate_rto(struct sctp_tcb *stcb,
(u_long)old->tv_usec) / 1000;
} else if ((u_long)now.tv_usec < (u_long)old->tv_usec) {
/* impossible .. garbage in nothing out */
return (((net->lastsa >> 2) + net->lastsv) >> 1);
goto calc_rto;
} else if ((u_long)now.tv_usec == (u_long)old->tv_usec) {
/*
* We have to have 1 usec :-D this must be the
@ -2502,11 +2500,11 @@ sctp_calculate_rto(struct sctp_tcb *stcb,
calc_time = 1;
} else {
/* impossible .. garbage in nothing out */
return (((net->lastsa >> 2) + net->lastsv) >> 1);
goto calc_rto;
}
} else {
/* Clock wrapped? */
return (((net->lastsa >> 2) + net->lastsv) >> 1);
goto calc_rto;
}
/***************************/
/* 2. update RTTVAR & SRTT */
@ -2515,15 +2513,6 @@ sctp_calculate_rto(struct sctp_tcb *stcb,
/* this is Van Jacobson's integer version */
if (net->RTO) {
calc_time -= (net->lastsa >> 3);
if ((int)net->prev_rtt > o_calctime) {
net->rtt_variance = net->prev_rtt - o_calctime;
/* decreasing */
net->rto_variance_dir = 0;
} else {
/* increasing */
net->rtt_variance = o_calctime - net->prev_rtt;
net->rto_variance_dir = 1;
}
#ifdef SCTP_RTTVAR_LOGGING
rto_logging(net, SCTP_LOG_RTTVAR);
#endif
@ -2542,13 +2531,12 @@ sctp_calculate_rto(struct sctp_tcb *stcb,
net->lastsa = calc_time;
net->lastsv = calc_time >> 1;
first_measure = 1;
net->rto_variance_dir = 1;
net->prev_rtt = o_calctime;
net->rtt_variance = 0;
#ifdef SCTP_RTTVAR_LOGGING
rto_logging(net, SCTP_LOG_INITIAL_RTT);
#endif
}
calc_rto:
new_rto = ((net->lastsa >> 2) + net->lastsv) >> 1;
if ((new_rto > SCTP_SAT_NETWORK_MIN) &&
(stcb->asoc.sat_network_lockout == 0)) {
@ -2564,8 +2552,8 @@ sctp_calculate_rto(struct sctp_tcb *stcb,
if (new_rto > stcb->asoc.maxrto) {
new_rto = stcb->asoc.maxrto;
}
/* we are now returning the RTT Smoothed */
return ((uint32_t) new_rto);
/* we are now returning the RTO */
return (new_rto);
}
/*

View File

@ -80,12 +80,6 @@ sctp6_input(mp, offp, proto)
m = SCTP_HEADER_TO_CHAIN(*mp);
ip6 = mtod(m, struct ip6_hdr *);
#ifndef PULLDOWN_TEST
/* If PULLDOWN_TEST off, must be in a single mbuf. */
IP6_EXTHDR_CHECK(m, off, (int)(sizeof(*sh) + sizeof(*ch)), IPPROTO_DONE);
sh = (struct sctphdr *)((caddr_t)ip6 + off);
ch = (struct sctp_chunkhdr *)((caddr_t)sh + sizeof(*sh));
#else
/* Ensure that (sctphdr + sctp_chunkhdr) in a row. */
IP6_EXTHDR_GET(sh, struct sctphdr *, m, off, sizeof(*sh) + sizeof(*ch));
if (sh == NULL) {
@ -93,8 +87,6 @@ sctp6_input(mp, offp, proto)
return IPPROTO_DONE;
}
ch = (struct sctp_chunkhdr *)((caddr_t)sh + sizeof(struct sctphdr));
#endif
iphlen = off;
offset = iphlen + sizeof(*sh) + sizeof(*ch);