- added pre-checks to the bindx call.

- use proper tick gathering macro instead of ticks directly.
- Placed reasonable boundaries on sets that a user can do
  that are converted to ticks from ms.
- Fix CMT_PF to always check to be sure CMT is on.
- Fix ticks use of CMT_PF.
- put back code to allow asconfs to be queued while INITs are in flight
  and before the assoc is established.
- During window probes, an ack'd packet might be left with the window
  probe mark on it causing it to be retransmitted. Change so that
  the flight decrease macro clears the window_probe mark.
- Additional logging flight size/reading and ASOC LOG. This
  is only enabled if you manually insert things into opt_sctp.h
  since its a set of debug code only.
- Found an interesting SMP race in the way data was appended which
  could cause a reader to lose a part of a message, had to
  reorder when we marked the message was complete to after
  the data was appended.
- bug in ADD-IP for the subset bound socket case when the peer has only
  one address
- fix ASCONF implicit success/error handling case
- proper support of jails in Freebsd 6>
- copy out the timeval for the 64 bit sparc world on cookie-echo
  alignment error crashes without this).
Approved by:	re(Ken Smith)
This commit is contained in:
Randall Stewart 2007-07-17 20:58:26 +00:00
parent 928e6222fd
commit 18e198d3a3
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=171477
18 changed files with 416 additions and 221 deletions

View File

@ -292,11 +292,11 @@ __attribute__((packed));
#define SCTP_CAUSE_PROTOCOL_VIOLATION 0x000d
/* Error causes from draft-ietf-tsvwg-addip-sctp */
#define SCTP_CAUSE_DELETING_LAST_ADDR 0x0100
#define SCTP_CAUSE_RESOURCE_SHORTAGE 0x0101
#define SCTP_CAUSE_DELETING_SRC_ADDR 0x0102
#define SCTP_CAUSE_ILLEGAL_ASCONF_ACK 0x0103
#define SCTP_CAUSE_REQUEST_REFUSED 0x0104
#define SCTP_CAUSE_DELETING_LAST_ADDR 0xa0
#define SCTP_CAUSE_RESOURCE_SHORTAGE 0xa1
#define SCTP_CAUSE_DELETING_SRC_ADDR 0xa2
#define SCTP_CAUSE_ILLEGAL_ASCONF_ACK 0xa3
#define SCTP_CAUSE_REQUEST_REFUSED 0xa4
/* Error causes from draft-ietf-tsvwg-sctp-auth */
#define SCTP_CAUSE_UNSUPPORTED_HMACID 0x0105
@ -496,6 +496,14 @@ __attribute__((packed));
*/
#define SCTP_PACKET_LOG_SIZE 65536
/* Maximum delays and such a user can set for options that
* take ms.
*/
#define SCTP_MAX_SACK_DELAY 500 /* per RFC4960 */
#define SCTP_MAX_HB_INTERVAL 14400000 /* 4 hours in ms */
#define SCTP_MAX_COOKIE_LIFE 3600000 /* 1 hour in ms */
/* Types of logging/KTR tracing that can be enabled via the
* sysctl net.inet.sctp.sctp_logging. You must also enable
* SUBSYS tracing.

View File

@ -94,26 +94,25 @@ sctp_asconf_get_source_ip(struct mbuf *m, struct sockaddr *sa)
/*
* draft-ietf-tsvwg-addip-sctp
*
* Address management only currently supported For the bound all case: the asoc
* local addr list is always a "DO NOT USE" list For the subset bound case:
* If ASCONFs are allowed: the endpoint local addr list is the usable address
* list the asoc local addr list is the "DO NOT USE" list If ASCONFs are not
* allowed: the endpoint local addr list is the default usable list the asoc
* local addr list is the usable address list
*
* An ASCONF parameter queue exists per asoc which holds the pending address
* operations. Lists are updated upon receipt of ASCONF-ACK.
*
* A restricted_addrs list exists per assoc to hold local addresses that are
* not (yet) usable by the assoc as a source address. These addresses are
* either pending an ASCONF operation (and exist on the ASCONF parameter
* queue), or they are permanently restricted (the peer has returned an
* ERROR indication to an ASCONF(ADD), or the peer does not support ASCONF).
*
* Deleted addresses are always immediately removed from the lists as they will
* (shortly) no longer exist in the kernel. We send ASCONFs as a courtesy,
* only if allowed.
*/
/*
* ASCONF parameter processing response_required: set if a reply is required
* (eg. SUCCESS_REPORT) returns a mbuf to an "error" response parameter or
* NULL/"success" if ok FIX: allocating this many mbufs on the fly is pretty
* inefficient...
* ASCONF parameter processing.
* response_required: set if a reply is required (eg. SUCCESS_REPORT).
* returns a mbuf to an "error" response parameter or NULL/"success" if ok.
* FIX: allocating this many mbufs on the fly is pretty inefficient...
*/
static struct mbuf *
sctp_asconf_success_response(uint32_t id)
@ -908,7 +907,8 @@ sctp_asconf_addr_mgmt_ack(struct sctp_tcb *stcb, struct sctp_ifa *addr,
* for add. If a duplicate operation is found, ignore the new one.
*/
static uint32_t
sctp_asconf_queue_add(struct sctp_tcb *stcb, struct sctp_ifa *ifa, uint16_t type)
sctp_asconf_queue_add(struct sctp_tcb *stcb, struct sctp_ifa *ifa,
uint16_t type)
{
struct sctp_asconf_addr *aa, *aa_next;
struct sockaddr *sa;
@ -1223,8 +1223,9 @@ sctp_asconf_process_error(struct sctp_tcb *stcb,
}
/*
* process an asconf queue param aparam: parameter to process, will be
* removed from the queue flag: 1=success, 0=failure
* process an asconf queue param.
* aparam: parameter to process, will be removed from the queue.
* flag: 1=success case, 0=failure case
*/
static void
sctp_asconf_process_param_ack(struct sctp_tcb *stcb,
@ -1432,11 +1433,9 @@ sctp_handle_asconf_ack(struct mbuf *m, int offset,
* < last_error_id, then success else, failure
*/
if (aa->ap.aph.correlation_id < last_error_id)
sctp_asconf_process_param_ack(stcb, aa,
SCTP_SUCCESS_REPORT);
sctp_asconf_process_param_ack(stcb, aa, 1);
else
sctp_asconf_process_param_ack(stcb, aa,
SCTP_ERROR_CAUSE_IND);
sctp_asconf_process_param_ack(stcb, aa, 0);
} else {
/*
* since we always process in order (FIFO queue) if
@ -1529,13 +1528,6 @@ sctp_addr_mgmt_assoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
}
}
/* put this address on the "pending/do not use yet" list */
/*
* Note: we do this primarily for the subset bind case We don't have
* scoping flags at the EP level, so we must add link local/site
* local addresses to the EP, then need to "negate" them here.
* Recall that this routine is only called for the subset bound
* w/ASCONF allowed case.
*/
sctp_add_local_addr_assoc(stcb, ifa, 1);
/*
* check address scope if address is out of scope, don't queue
@ -1824,7 +1816,6 @@ sctp_iterator_stcb(struct sctp_inpcb *inp, struct sctp_tcb *stcb, void *ptr,
continue;
}
}
}
/* queue an asconf for this address add/delete */
if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_DO_ASCONF)) {
@ -2018,8 +2009,8 @@ sctp_find_valid_localaddr_ep(struct sctp_tcb *stcb)
}
/*
* builds an ASCONF chunk from queued ASCONF params returns NULL on error (no
* mbuf, no ASCONF params queued, etc)
* builds an ASCONF chunk from queued ASCONF params.
* returns NULL on error (no mbuf, no ASCONF params queued, etc).
*/
struct mbuf *
sctp_compose_asconf(struct sctp_tcb *stcb, int *retlen)

View File

@ -235,7 +235,7 @@ sctp_cwnd_update_after_sack(struct sctp_tcb *stcb,
*
* Should we stop any running T3 timer here?
*/
if (sctp_cmt_pf && ((net->dest_state & SCTP_ADDR_PF) ==
if (sctp_cmt_on_off && sctp_cmt_pf && ((net->dest_state & SCTP_ADDR_PF) ==
SCTP_ADDR_PF)) {
net->dest_state &= ~SCTP_ADDR_PF;
net->cwnd = net->mtu * sctp_cmt_pf;
@ -724,7 +724,7 @@ sctp_hs_cwnd_update_after_sack(struct sctp_tcb *stcb,
*
* Should we stop any running T3 timer here?
*/
if (sctp_cmt_pf && ((net->dest_state & SCTP_ADDR_PF) ==
if (sctp_cmt_on_off && sctp_cmt_pf && ((net->dest_state & SCTP_ADDR_PF) ==
SCTP_ADDR_PF)) {
net->dest_state &= ~SCTP_ADDR_PF;
net->cwnd = net->mtu * sctp_cmt_pf;
@ -1047,7 +1047,7 @@ between(uint32_t seq1, uint32_t seq2, uint32_t seq3)
static inline uint32_t
htcp_cong_time(struct htcp *ca)
{
return ticks - ca->last_cong;
return sctp_get_tick_count() - ca->last_cong;
}
static inline uint32_t
@ -1062,7 +1062,7 @@ htcp_reset(struct htcp *ca)
ca->undo_last_cong = ca->last_cong;
ca->undo_maxRTT = ca->maxRTT;
ca->undo_old_maxB = ca->old_maxB;
ca->last_cong = ticks;
ca->last_cong = sctp_get_tick_count();
}
#ifdef SCTP_NOT_USED
@ -1099,7 +1099,7 @@ measure_rtt(struct sctp_tcb *stcb, struct sctp_nets *net)
static void
measure_achieved_throughput(struct sctp_tcb *stcb, struct sctp_nets *net)
{
uint32_t now = ticks;
uint32_t now = sctp_get_tick_count();
if (net->fast_retran_ip == 0)
net->htcp_ca.bytes_acked = net->net_ack;
@ -1303,7 +1303,7 @@ htcp_init(struct sctp_tcb *stcb, struct sctp_nets *net)
net->htcp_ca.alpha = ALPHA_BASE;
net->htcp_ca.beta = BETA_MIN;
net->htcp_ca.bytes_acked = net->mtu;
net->htcp_ca.last_cong = ticks;
net->htcp_ca.last_cong = sctp_get_tick_count();
}
void
@ -1419,7 +1419,7 @@ sctp_htcp_cwnd_update_after_sack(struct sctp_tcb *stcb,
*
* Should we stop any running T3 timer here?
*/
if (sctp_cmt_pf && ((net->dest_state & SCTP_ADDR_PF) ==
if (sctp_cmt_on_off && sctp_cmt_pf && ((net->dest_state & SCTP_ADDR_PF) ==
SCTP_ADDR_PF)) {
net->dest_state &= ~SCTP_ADDR_PF;
net->cwnd = net->mtu * sctp_cmt_pf;
@ -1592,7 +1592,7 @@ sctp_htcp_cwnd_update_after_fr_timer(struct sctp_inpcb *inp,
old_cwnd = net->cwnd;
sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_EARLY_FR_TMR);
net->htcp_ca.last_cong = ticks;
net->htcp_ca.last_cong = sctp_get_tick_count();
/*
* make a small adjustment to cwnd and force to CA.
*/

View File

@ -83,6 +83,10 @@ __FBSDID("$FreeBSD$");
*/
#define SCTP_DEFAULT_VRF_SIZE 4
/* constants for rto calc */
#define sctp_align_safe_nocopy 0
#define sctp_align_unsafe_makecopy 1
/* JRS - Values defined for the HTCP algorithm */
#define ALPHA_BASE (1<<7) /* 1.0 with shift << 7 */
#define BETA_MIN (1<<6) /* 0.5 with shift << 7 */

View File

@ -1611,16 +1611,20 @@ sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
*/
strmseq = ntohs(ch->dp.stream_sequence);
#ifdef SCTP_ASOCLOG_OF_TSNS
SCTP_TCB_LOCK_ASSERT(stcb);
if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) {
asoc->tsn_in_at = 0;
asoc->tsn_in_wrapped = 1;
}
asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn;
asoc->in_tsnlog[asoc->tsn_in_at].strm = strmno;
asoc->in_tsnlog[asoc->tsn_in_at].seq = strmseq;
asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length;
asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags;
asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb;
asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at;
asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1;
asoc->tsn_in_at++;
if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) {
asoc->tsn_in_at = 0;
asoc->tsn_in_wrapped = 1;
}
#endif
if ((chunk_flags & SCTP_DATA_FIRST_FRAG) &&
(TAILQ_EMPTY(&asoc->resetHead)) &&
@ -2998,7 +3002,8 @@ sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct
sctp_calculate_rto(stcb,
asoc,
tp1->whoTo,
&tp1->sent_rcv_time);
&tp1->sent_rcv_time,
sctp_align_safe_nocopy);
tp1->do_rtt = 0;
}
}
@ -3456,7 +3461,7 @@ sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
tp1->no_fr_allowed = 1;
alt = tp1->whoTo;
/* sa_ignore NO_NULL_CHK */
if (sctp_cmt_pf) {
if (sctp_cmt_on_off && sctp_cmt_pf) {
/*
* JRS 5/18/07 - If CMT PF is on,
* use the PF version of
@ -3730,7 +3735,6 @@ sctp_window_probe_recovery(struct sctp_tcb *stcb,
/* First setup this one and get it moved back */
tp1->sent = SCTP_DATAGRAM_UNSENT;
tp1->window_probe = 0;
if (sctp_logging_level & SCTP_FLIGHT_LOGGING_ENABLE) {
sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP,
tp1->whoTo->flight_size,
@ -3752,7 +3756,6 @@ sctp_window_probe_recovery(struct sctp_tcb *stcb,
if (chk->sent == SCTP_DATAGRAM_RESEND) {
/* Another chunk to move */
chk->sent = SCTP_DATAGRAM_UNSENT;
chk->window_probe = 0;
/* It should not be in flight */
TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
TAILQ_INSERT_AFTER(&asoc->send_queue, tp1, chk, sctp_next);
@ -3781,6 +3784,13 @@ sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
}
SCTP_TCB_LOCK_ASSERT(stcb);
#ifdef SCTP_ASOCLOG_OF_TSNS
stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
stcb->asoc.cumack_log_at++;
if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
stcb->asoc.cumack_log_at = 0;
}
#endif
asoc = &stcb->asoc;
old_rwnd = asoc->peers_rwnd;
if (compare_with_wrap(asoc->last_acked_seq, cumack, MAX_TSN)) {
@ -3864,103 +3874,101 @@ sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
if (compare_with_wrap(cumack, tp1->rec.data.TSN_seq,
MAX_TSN) ||
cumack == tp1->rec.data.TSN_seq) {
if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
/*
* ECN Nonce: Add the nonce to the
* sender's nonce sum
*/
asoc->nonce_sum_expect_base += tp1->rec.data.ect_nonce;
if (tp1->sent < SCTP_DATAGRAM_ACKED) {
/*
* If it is less than ACKED,
* it is now no-longer in
* flight. Higher values may
* occur during marking
*/
if (tp1->sent < SCTP_DATAGRAM_RESEND) {
if (sctp_logging_level & SCTP_FLIGHT_LOGGING_ENABLE) {
sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
tp1->whoTo->flight_size,
tp1->book_size,
(uintptr_t) tp1->whoTo,
tp1->rec.data.TSN_seq);
}
sctp_flight_size_decrease(tp1);
sctp_total_flight_decrease(stcb, tp1);
}
tp1->whoTo->net_ack += tp1->send_size;
if (tp1->snd_count < 2) {
/*
* True
* non-retransmited
* chunk
*/
tp1->whoTo->net_ack2 +=
tp1->send_size;
/* update RTO too? */
if (tp1->do_rtt) {
tp1->whoTo->RTO =
sctp_calculate_rto(stcb,
asoc, tp1->whoTo,
&tp1->sent_rcv_time);
tp1->do_rtt = 0;
}
}
/*
* CMT: CUCv2 algorithm.
* From the cumack'd TSNs,
* for each TSN being acked
* for the first time, set
* the following variables
* for the corresp
* destination.
* new_pseudo_cumack will
* trigger a cwnd update.
* find_(rtx_)pseudo_cumack
* will trigger search for
* the next expected
* (rtx-)pseudo-cumack.
*/
tp1->whoTo->new_pseudo_cumack = 1;
tp1->whoTo->find_pseudo_cumack = 1;
tp1->whoTo->find_rtx_pseudo_cumack = 1;
if (sctp_logging_level & SCTP_CWND_LOGGING_ENABLE) {
sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
}
}
if (tp1->sent == SCTP_DATAGRAM_RESEND) {
sctp_ucount_decr(asoc->sent_queue_retran_cnt);
}
if (tp1->rec.data.chunk_was_revoked) {
/* deflate the cwnd */
tp1->whoTo->cwnd -= tp1->book_size;
tp1->rec.data.chunk_was_revoked = 0;
}
tp1->sent = SCTP_DATAGRAM_ACKED;
if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
printf("Warning, an unsent is now acked?\n");
}
/*
* ECN Nonce: Add the nonce to the sender's
* nonce sum
*/
asoc->nonce_sum_expect_base += tp1->rec.data.ect_nonce;
if (tp1->sent < SCTP_DATAGRAM_ACKED) {
/*
* If it is less than ACKED, it is
* now no-longer in flight. Higher
* values may occur during marking
*/
if (tp1->sent < SCTP_DATAGRAM_RESEND) {
if (sctp_logging_level & SCTP_FLIGHT_LOGGING_ENABLE) {
sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
tp1->whoTo->flight_size,
tp1->book_size,
(uintptr_t) tp1->whoTo,
tp1->rec.data.TSN_seq);
}
sctp_flight_size_decrease(tp1);
sctp_total_flight_decrease(stcb, tp1);
}
tp1->whoTo->net_ack += tp1->send_size;
if (tp1->snd_count < 2) {
/*
* True non-retransmited
* chunk
*/
tp1->whoTo->net_ack2 +=
tp1->send_size;
/* update RTO too? */
if (tp1->do_rtt) {
tp1->whoTo->RTO =
sctp_calculate_rto(stcb,
asoc, tp1->whoTo,
&tp1->sent_rcv_time,
sctp_align_safe_nocopy);
tp1->do_rtt = 0;
}
}
/*
* CMT: CUCv2 algorithm. From the
* cumack'd TSNs, for each TSN being
* acked for the first time, set the
* following variables for the
* corresp destination.
* new_pseudo_cumack will trigger a
* cwnd update.
* find_(rtx_)pseudo_cumack will
* trigger search for the next
* expected (rtx-)pseudo-cumack.
*/
tp1->whoTo->new_pseudo_cumack = 1;
tp1->whoTo->find_pseudo_cumack = 1;
tp1->whoTo->find_rtx_pseudo_cumack = 1;
if (sctp_logging_level & SCTP_CWND_LOGGING_ENABLE) {
sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
}
}
if (tp1->sent == SCTP_DATAGRAM_RESEND) {
sctp_ucount_decr(asoc->sent_queue_retran_cnt);
}
if (tp1->rec.data.chunk_was_revoked) {
/* deflate the cwnd */
tp1->whoTo->cwnd -= tp1->book_size;
tp1->rec.data.chunk_was_revoked = 0;
}
tp1->sent = SCTP_DATAGRAM_ACKED;
TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
if (tp1->data) {
sctp_free_bufspace(stcb, asoc, tp1, 1);
sctp_m_freem(tp1->data);
}
if (sctp_logging_level & SCTP_SACK_LOGGING_ENABLE) {
sctp_log_sack(asoc->last_acked_seq,
cumack,
tp1->rec.data.TSN_seq,
0,
0,
SCTP_LOG_FREE_SENT);
}
tp1->data = NULL;
asoc->sent_queue_cnt--;
sctp_free_a_chunk(stcb, tp1);
tp1 = tp2;
} else {
break;
}
TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
if (tp1->data) {
sctp_free_bufspace(stcb, asoc, tp1, 1);
sctp_m_freem(tp1->data);
}
if (sctp_logging_level & SCTP_SACK_LOGGING_ENABLE) {
sctp_log_sack(asoc->last_acked_seq,
cumack,
tp1->rec.data.TSN_seq,
0,
0,
SCTP_LOG_FREE_SENT);
}
tp1->data = NULL;
asoc->sent_queue_cnt--;
sctp_free_a_chunk(stcb, tp1);
tp1 = tp2;
}
}
if (stcb->sctp_socket) {
SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
@ -4268,6 +4276,13 @@ sctp_handle_sack(struct mbuf *m, int offset,
SCTP_STAT_INCR(sctps_slowpath_sack);
nonce_sum_flag = ch->ch.chunk_flags & SCTP_SACK_NONCE_SUM;
cum_ack = last_tsn = ntohl(sack->cum_tsn_ack);
#ifdef SCTP_ASOCLOG_OF_TSNS
stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
stcb->asoc.cumack_log_at++;
if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
stcb->asoc.cumack_log_at = 0;
}
#endif
num_seg = ntohs(sack->num_gap_ack_blks);
a_rwnd = rwnd;
@ -4491,7 +4506,8 @@ sctp_handle_sack(struct mbuf *m, int offset,
tp1->whoTo->RTO =
sctp_calculate_rto(stcb,
asoc, tp1->whoTo,
&tp1->sent_rcv_time);
&tp1->sent_rcv_time,
sctp_align_safe_nocopy);
tp1->do_rtt = 0;
}
}
@ -4626,15 +4642,11 @@ sctp_handle_sack(struct mbuf *m, int offset,
}
if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
/* no more sent on list */
break;
printf("Warning, tp1->sent == %d and its now acked?\n",
tp1->sent);
}
tp2 = TAILQ_NEXT(tp1, sctp_next);
TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
/*
* Friendlier printf in lieu of panic now that I think its
* fixed
*/
if (tp1->pr_sctp_on) {
if (asoc->pr_sctp_cnt != 0)
asoc->pr_sctp_cnt--;

View File

@ -406,6 +406,17 @@ sctp_process_init_ack(struct mbuf *m, int iphlen, int offset,
*abort_no_unlock = 1;
return (-1);
}
/* if the peer doesn't support asconf, flush the asconf queue */
if (asoc->peer_supports_asconf == 0) {
struct sctp_asconf_addr *aparam;
while (!TAILQ_EMPTY(&asoc->asconf_queue)) {
/* sa_ignore FREED_MEMORY */
aparam = TAILQ_FIRST(&asoc->asconf_queue);
TAILQ_REMOVE(&asoc->asconf_queue, aparam, next);
SCTP_FREE(aparam, SCTP_M_ASC_ADDR);
}
}
stcb->asoc.peer_hmac_id = sctp_negotiate_hmacid(stcb->asoc.peer_hmacs,
stcb->asoc.local_hmacs);
if (op_err) {
@ -427,7 +438,7 @@ sctp_process_init_ack(struct mbuf *m, int iphlen, int offset,
asoc->primary_destination, SCTP_FROM_SCTP_INPUT + SCTP_LOC_4);
/* calculate the RTO */
net->RTO = sctp_calculate_rto(stcb, asoc, net, &asoc->time_entered);
net->RTO = sctp_calculate_rto(stcb, asoc, net, &asoc->time_entered, sctp_align_safe_nocopy);
retval = sctp_send_cookie_echo(m, offset, stcb, net);
if (retval < 0) {
@ -555,7 +566,7 @@ sctp_handle_heartbeat_ack(struct sctp_heartbeat_chunk *cp,
* timer is running, for the destination, stop the timer because a
* PF-heartbeat was received.
*/
if (sctp_cmt_pf && (net->dest_state & SCTP_ADDR_PF) ==
if (sctp_cmt_on_off && sctp_cmt_pf && (net->dest_state & SCTP_ADDR_PF) ==
SCTP_ADDR_PF) {
if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
@ -568,7 +579,7 @@ sctp_handle_heartbeat_ack(struct sctp_heartbeat_chunk *cp,
net, net->cwnd);
}
/* Now lets do a RTO with this */
r_net->RTO = sctp_calculate_rto(stcb, &stcb->asoc, r_net, &tv);
r_net->RTO = sctp_calculate_rto(stcb, &stcb->asoc, r_net, &tv, sctp_align_safe_nocopy);
}
static void
@ -1217,7 +1228,7 @@ sctp_process_cookie_existing(struct mbuf *m, int iphlen, int offset,
*/
net->hb_responded = 1;
net->RTO = sctp_calculate_rto(stcb, asoc, net,
&cookie->time_entered);
&cookie->time_entered, sctp_align_unsafe_makecopy);
if (stcb->asoc.sctp_autoclose_ticks &&
(sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE))) {
@ -1801,7 +1812,7 @@ sctp_process_cookie_new(struct mbuf *m, int iphlen, int offset,
(void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered);
if ((netp) && (*netp)) {
(*netp)->RTO = sctp_calculate_rto(stcb, asoc, *netp,
&cookie->time_entered);
&cookie->time_entered, sctp_align_unsafe_makecopy);
}
sctp_send_cookie_ack(stcb);
return (stcb);
@ -2302,7 +2313,7 @@ sctp_handle_cookie_ack(struct sctp_cookie_ack_chunk *cp,
SCTP_STAT_INCR_GAUGE32(sctps_currestab);
if (asoc->overall_error_count == 0) {
net->RTO = sctp_calculate_rto(stcb, asoc, net,
&asoc->time_entered);
&asoc->time_entered, sctp_align_safe_nocopy);
}
(void)SCTP_GETTIME_TIMEVAL(&asoc->time_entered);
sctp_ulp_notify(SCTP_NOTIFY_ASSOC_UP, stcb, 0, NULL);

View File

@ -49,6 +49,8 @@ __FBSDID("$FreeBSD$");
#include <sys/protosw.h>
#include <sys/socket.h>
#include <sys/socketvar.h>
#include <sys/priv.h>
#include <sys/jail.h>
#include <sys/sysctl.h>
#include <sys/resourcevar.h>
#include <sys/uio.h>
@ -299,6 +301,7 @@ typedef struct callout sctp_os_timer_t;
#define SCTP_REGISTER_INTERFACE(ifhandle, af)
#define SCTP_DEREGISTER_INTERFACE(ifhandle, af)
/*************************/
/* These are for logging */
/*************************/

View File

@ -3437,7 +3437,7 @@ sctp_lowlevel_chunk_output(struct sctp_inpcb *inp,
* Stop any running T3
* timers here?
*/
if (sctp_cmt_pf) {
if (sctp_cmt_on_off && sctp_cmt_pf) {
net->dest_state &= ~SCTP_ADDR_PF;
SCTPDBG(SCTP_DEBUG_OUTPUT1, "Destination %p moved from PF to unreachable.\n",
net);
@ -3835,6 +3835,12 @@ sctp_send_initiate(struct sctp_inpcb *inp, struct sctp_tcb *stcb)
return;
}
SCTP_BUF_LEN(m) = sizeof(struct sctp_init_msg);
/*
* assume peer supports asconf in order to be able to queue local
* address changes while an INIT is in flight and before the assoc
* is established.
*/
stcb->asoc.peer_supports_asconf = 1;
/* Now lets put the SCTP header in place */
initm = mtod(m, struct sctp_init_msg *);
initm->sh.src_port = inp->sctp_lport;
@ -6483,16 +6489,20 @@ sctp_move_to_outqueue(struct sctp_tcb *stcb, struct sctp_nets *net,
*/
#ifdef SCTP_ASOCLOG_OF_TSNS
SCTP_TCB_LOCK_ASSERT(stcb);
if (asoc->tsn_out_at >= SCTP_TSN_LOG_SIZE) {
asoc->tsn_out_at = 0;
asoc->tsn_out_wrapped = 1;
}
asoc->out_tsnlog[asoc->tsn_out_at].tsn = chk->rec.data.TSN_seq;
asoc->out_tsnlog[asoc->tsn_out_at].strm = chk->rec.data.stream_number;
asoc->out_tsnlog[asoc->tsn_out_at].seq = chk->rec.data.stream_seq;
asoc->out_tsnlog[asoc->tsn_out_at].sz = chk->send_size;
asoc->out_tsnlog[asoc->tsn_out_at].flgs = chk->rec.data.rcv_flags;
asoc->out_tsnlog[asoc->tsn_out_at].stcb = (void *)stcb;
asoc->out_tsnlog[asoc->tsn_out_at].in_pos = asoc->tsn_out_at;
asoc->out_tsnlog[asoc->tsn_out_at].in_out = 2;
asoc->tsn_out_at++;
if (asoc->tsn_out_at >= SCTP_TSN_LOG_SIZE) {
asoc->tsn_out_at = 0;
asoc->tsn_out_wrapped = 1;
}
#endif
dchkh->ch.chunk_type = SCTP_DATA;
@ -6740,7 +6750,7 @@ sctp_move_to_an_alt(struct sctp_tcb *stcb,
* destination using the PF algorithm for finding alternate
* destinations.
*/
if (sctp_cmt_pf) {
if (sctp_cmt_on_off && sctp_cmt_pf) {
a_net = sctp_find_alternate_net(stcb, net, 2);
} else {
a_net = sctp_find_alternate_net(stcb, net, 0);
@ -7410,7 +7420,7 @@ sctp_med_chunk_output(struct sctp_inpcb *inp,
* restart it.
*/
sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
} else if (sctp_cmt_pf && pf_hbflag && ((net->dest_state & SCTP_ADDR_PF) == SCTP_ADDR_PF)
} else if (sctp_cmt_on_off && sctp_cmt_pf && pf_hbflag && ((net->dest_state & SCTP_ADDR_PF) == SCTP_ADDR_PF)
&& (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer))) {
/*
* JRS 5/14/07 - If a HB has been sent to a
@ -8659,7 +8669,7 @@ sctp_chunk_output(struct sctp_inpcb *inp,
*/
if (net->ref_count > 1)
sctp_move_to_an_alt(stcb, asoc, net);
} else if (sctp_cmt_pf && ((net->dest_state & SCTP_ADDR_PF) ==
} else if (sctp_cmt_on_off && sctp_cmt_pf && ((net->dest_state & SCTP_ADDR_PF) ==
SCTP_ADDR_PF)) {
/*
* JRS 5/14/07 - If CMT PF is on and the current

View File

@ -2125,13 +2125,14 @@ sctp_isport_inuse(struct sctp_inpcb *inp, uint16_t lport, uint32_t vrf_id)
int
sctp_inpcb_bind(struct socket *so, struct sockaddr *addr,
struct sctp_ifa *sctp_ifap, struct thread *p)
struct thread *p)
{
/* bind a ep to a socket address */
struct sctppcbhead *head;
struct sctp_inpcb *inp, *inp_tmp;
struct inpcb *ip_inp;
int bindall;
int prison = 0;
uint16_t lport;
int error;
uint32_t vrf_id;
@ -2153,6 +2154,9 @@ sctp_inpcb_bind(struct socket *so, struct sockaddr *addr,
/* already did a bind, subsequent binds NOT allowed ! */
return (EINVAL);
}
if (jailed(p->td_ucred)) {
prison = 1;
}
if (addr != NULL) {
if (addr->sa_family == AF_INET) {
struct sockaddr_in *sin;
@ -2166,7 +2170,15 @@ sctp_inpcb_bind(struct socket *so, struct sockaddr *addr,
sin = (struct sockaddr_in *)addr;
lport = sin->sin_port;
if (prison) {
/*
* For INADDR_ANY and LOOPBACK the
* prison_ip() call will tranmute the ip
* address to the proper valie.
*/
if (prison_ip(p->td_ucred, 0, &sin->sin_addr.s_addr))
return (EINVAL);
}
if (sin->sin_addr.s_addr != INADDR_ANY) {
bindall = 0;
}
@ -2180,6 +2192,11 @@ sctp_inpcb_bind(struct socket *so, struct sockaddr *addr,
return (EINVAL);
lport = sin6->sin6_port;
/*
* Jail checks for IPv6 should go HERE! i.e. add the
* prison_ip() equivilant in this postion to
* transmute the addresses to the proper one jailed.
*/
if (!IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
bindall = 0;
/* KAME hack: embed scopeid */
@ -2375,11 +2392,8 @@ sctp_inpcb_bind(struct socket *so, struct sockaddr *addr,
* zero out the port to find the address! yuck! can't do
* this earlier since need port for sctp_pcb_findep()
*/
if (sctp_ifap)
ifa = sctp_ifap;
else
ifa = sctp_find_ifa_by_addr((struct sockaddr *)&store_sa,
vrf_id, 0);
ifa = sctp_find_ifa_by_addr((struct sockaddr *)&store_sa,
vrf_id, 0);
if (ifa == NULL) {
/* Can't find an interface with that address */
SCTP_INP_WUNLOCK(inp);
@ -3378,7 +3392,7 @@ sctp_aloc_assoc(struct sctp_inpcb *inp, struct sockaddr *firstaddr,
* ephemerial bind for you.
*/
if ((err = sctp_inpcb_bind(inp->sctp_socket,
(struct sockaddr *)NULL, (struct sctp_ifa *)NULL,
(struct sockaddr *)NULL,
(struct thread *)NULL
))) {
/* bind error, probably perm */
@ -4537,8 +4551,8 @@ sctp_del_local_addr_assoc(struct sctp_tcb *stcb, struct sctp_ifa *ifa)
inp = stcb->sctp_ep;
/* if subset bound and don't allow ASCONF's, can't delete last */
if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) == 0) &&
(sctp_is_feature_off(inp, SCTP_PCB_FLAGS_DO_ASCONF) == 0)) {
if (stcb->asoc.numnets < 2) {
sctp_is_feature_off(inp, SCTP_PCB_FLAGS_DO_ASCONF)) {
if (stcb->sctp_ep->laddr_count < 2) {
/* can't delete last address */
return;
}
@ -4823,6 +4837,12 @@ sctp_load_addresses_from_init(struct sctp_tcb *stcb, struct mbuf *m,
/* the assoc was freed? */
return (-4);
}
/*
* peer must explicitly turn this on. This may have been initialized
* to be "on" in order to allow local addr changes while INIT's are
* in flight.
*/
stcb->asoc.peer_supports_asconf = 0;
/* now we must go through each of the params. */
phdr = sctp_get_next_param(m, offset, &parm_buf, sizeof(parm_buf));
while (phdr) {

View File

@ -291,7 +291,7 @@ struct sctp_pcb {
struct sctp_timer zero_copy_timer;
/* Zero copy app to transport (sendq) read repulse timer */
struct sctp_timer zero_copy_sendq_timer;
int def_cookie_life;
uint32_t def_cookie_life;
/* defaults to 0 */
int auto_close_time;
uint32_t initial_sequence_debug;
@ -312,6 +312,17 @@ struct sctp_pcb {
#define sctp_lport ip_inp.inp.inp_lport
struct sctp_pcbtsn_rlog {
uint32_t vtag;
uint16_t strm;
uint16_t seq;
uint16_t sz;
uint16_t flgs;
};
#define SCTP_READ_LOG_SIZE 135 /* we choose the number to make a pcb a page */
struct sctp_inpcb {
/*-
* put an inpcb in front of it all, kind of a waste but we need to
@ -378,6 +389,10 @@ struct sctp_inpcb {
uint32_t total_recvs;
uint32_t last_abort_code;
uint32_t total_nospaces;
#ifdef SCTP_ASOCLOG_OF_TSNS
struct sctp_pcbtsn_rlog readlog[SCTP_READ_LOG_SIZE];
uint32_t readlog_index;
#endif
};
struct sctp_tcb {
@ -452,7 +467,7 @@ struct sctp_nets *sctp_findnet(struct sctp_tcb *, struct sockaddr *);
struct sctp_inpcb *sctp_pcb_findep(struct sockaddr *, int, int, uint32_t);
int
sctp_inpcb_bind(struct socket *, struct sockaddr *, struct sctp_ifa *,
sctp_inpcb_bind(struct socket *, struct sockaddr *,
struct thread *);
struct sctp_tcb *

View File

@ -51,6 +51,7 @@ sctp_can_peel_off(struct socket *head, sctp_assoc_t assoc_id)
{
struct sctp_inpcb *inp;
struct sctp_tcb *stcb;
uint32_t state;
inp = (struct sctp_inpcb *)head->so_pcb;
if (inp == NULL) {
@ -60,6 +61,14 @@ sctp_can_peel_off(struct socket *head, sctp_assoc_t assoc_id)
if (stcb == NULL) {
return (ENOTCONN);
}
state = SCTP_GET_STATE((&stcb->asoc));
if ((state == SCTP_STATE_EMPTY) ||
(state == SCTP_STATE_INUSE) ||
(state == SCTP_STATE_COOKIE_WAIT) ||
(state == SCTP_STATE_COOKIE_ECHOED)) {
SCTP_TCB_UNLOCK(stcb);
return (ENOTCONN);
}
SCTP_TCB_UNLOCK(stcb);
/* We are clear to peel this one off */
return (0);
@ -70,6 +79,7 @@ sctp_do_peeloff(struct socket *head, struct socket *so, sctp_assoc_t assoc_id)
{
struct sctp_inpcb *inp, *n_inp;
struct sctp_tcb *stcb;
uint32_t state;
inp = (struct sctp_inpcb *)head->so_pcb;
if (inp == NULL)
@ -78,6 +88,14 @@ sctp_do_peeloff(struct socket *head, struct socket *so, sctp_assoc_t assoc_id)
if (stcb == NULL)
return (ENOTCONN);
state = SCTP_GET_STATE((&stcb->asoc));
if ((state == SCTP_STATE_EMPTY) ||
(state == SCTP_STATE_INUSE) ||
(state == SCTP_STATE_COOKIE_WAIT) ||
(state == SCTP_STATE_COOKIE_ECHOED)) {
SCTP_TCB_UNLOCK(stcb);
return (ENOTCONN);
}
n_inp = (struct sctp_inpcb *)so->so_pcb;
n_inp->sctp_flags = (SCTP_PCB_FLAGS_UDPTYPE |
SCTP_PCB_FLAGS_CONNECTED |

View File

@ -484,11 +484,14 @@ struct sctp_scoping {
#define SCTP_TSN_LOG_SIZE 40
struct sctp_tsn_log {
void *stcb;
uint32_t tsn;
uint16_t strm;
uint16_t seq;
uint16_t sz;
uint16_t flgs;
uint16_t in_pos;
uint16_t in_out;
};
#define SCTP_FS_SPEC_LOG_SIZE 200
@ -761,10 +764,13 @@ struct sctp_association {
*/
struct sctp_tsn_log in_tsnlog[SCTP_TSN_LOG_SIZE];
struct sctp_tsn_log out_tsnlog[SCTP_TSN_LOG_SIZE];
uint32_t cumack_log[SCTP_TSN_LOG_SIZE];
uint16_t tsn_in_at;
uint16_t tsn_out_at;
uint16_t tsn_in_wrapped;
uint16_t tsn_out_wrapped;
uint16_t cumack_log_at;
#endif /* SCTP_ASOCLOG_OF_TSNS */
#ifdef SCTP_FS_SPEC_LOG
struct sctp_fs_spec_log fslog[SCTP_FS_SPEC_LOG_SIZE];

View File

@ -211,7 +211,7 @@ sctp_threshold_management(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
* not in PF state.
*/
/* Stop any running T3 timers here? */
if (sctp_cmt_pf) {
if (sctp_cmt_on_off && sctp_cmt_pf) {
net->dest_state &= ~SCTP_ADDR_PF;
SCTPDBG(SCTP_DEBUG_TIMER4, "Destination %p moved from PF to unreachable.\n",
net);
@ -741,7 +741,6 @@ sctp_mark_all_for_resend(struct sctp_tcb *stcb,
SCTP_STAT_INCR(sctps_markedretrans);
/* reset the TSN for striking and other FR stuff */
chk->window_probe = 0;
chk->rec.data.doing_fast_retransmit = 0;
/* Clear any time so NO RTT is being done */
chk->do_rtt = 0;
@ -801,8 +800,8 @@ sctp_mark_all_for_resend(struct sctp_tcb *stcb,
}
if (stcb->asoc.sent_queue_retran_cnt != cnt_mk) {
#ifdef INVARIANTS
SCTP_PRINTF("Local Audit says there are %d for retran asoc cnt:%d\n",
cnt_mk, stcb->asoc.sent_queue_retran_cnt);
SCTP_PRINTF("Local Audit says there are %d for retran asoc cnt:%d we marked:%d this time\n",
cnt_mk, stcb->asoc.sent_queue_retran_cnt, num_mk);
#endif
#ifndef SCTP_AUDITING_ENABLED
stcb->asoc.sent_queue_retran_cnt = cnt_mk;
@ -944,10 +943,10 @@ sctp_t3rxt_timer(struct sctp_inpcb *inp,
* addition, find an alternate destination with PF-based
* find_alt_net().
*/
if (sctp_cmt_pf) {
if (sctp_cmt_on_off && sctp_cmt_pf) {
if ((net->dest_state & SCTP_ADDR_PF) != SCTP_ADDR_PF) {
net->dest_state |= SCTP_ADDR_PF;
net->last_active = ticks;
net->last_active = sctp_get_tick_count();
SCTPDBG(SCTP_DEBUG_TIMER4, "Destination %p moved from active to PF.\n",
net);
}
@ -1063,7 +1062,7 @@ sctp_t3rxt_timer(struct sctp_inpcb *inp,
net->dest_state |= SCTP_ADDR_WAS_PRIMARY;
}
}
} else if (sctp_cmt_pf && (net->dest_state & SCTP_ADDR_PF) == SCTP_ADDR_PF) {
} else if (sctp_cmt_on_off && sctp_cmt_pf && (net->dest_state & SCTP_ADDR_PF) == SCTP_ADDR_PF) {
/*
* JRS 5/14/07 - If the destination hasn't failed completely
* but is in PF state, a PF-heartbeat needs to be sent

View File

@ -277,7 +277,7 @@ sctp_notify(struct sctp_inpcb *inp,
* not in PF state.
*/
/* Stop any running T3 timers here? */
if (sctp_cmt_pf) {
if (sctp_cmt_on_off && sctp_cmt_pf) {
net->dest_state &= ~SCTP_ADDR_PF;
SCTPDBG(SCTP_DEBUG_TIMER4, "Destination %p moved from PF to unreachable.\n",
net);
@ -573,7 +573,7 @@ sctp_bind(struct socket *so, struct sockaddr *addr, struct thread *p)
if (inp == 0)
return EINVAL;
error = sctp_inpcb_bind(so, addr, NULL, p);
error = sctp_inpcb_bind(so, addr, p);
return error;
}
@ -1345,7 +1345,7 @@ sctp_do_connect_x(struct socket *so, struct sctp_inpcb *inp, void *optval,
if ((inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) ==
SCTP_PCB_FLAGS_UNBOUND) {
/* Bind a ephemeral port */
error = sctp_inpcb_bind(so, NULL, NULL, p);
error = sctp_inpcb_bind(so, NULL, p);
if (error) {
goto out_now;
}
@ -2211,24 +2211,26 @@ sctp_getopt(struct socket *so, int optname, void *optval, size_t *optsize,
case SCTP_ASSOCINFO:
{
struct sctp_assocparams *sasoc;
uint32_t oldval;
SCTP_CHECK_AND_CAST(sasoc, optval, struct sctp_assocparams, *optsize);
SCTP_FIND_STCB(inp, stcb, sasoc->sasoc_assoc_id);
if (stcb) {
oldval = sasoc->sasoc_cookie_life;
sasoc->sasoc_cookie_life = TICKS_TO_MSEC(stcb->asoc.cookie_life);
sasoc->sasoc_asocmaxrxt = stcb->asoc.max_send_times;
sasoc->sasoc_number_peer_destinations = stcb->asoc.numnets;
sasoc->sasoc_peer_rwnd = stcb->asoc.peers_rwnd;
sasoc->sasoc_local_rwnd = stcb->asoc.my_rwnd;
sasoc->sasoc_cookie_life = TICKS_TO_MSEC(stcb->asoc.cookie_life);
SCTP_TCB_UNLOCK(stcb);
} else {
SCTP_INP_RLOCK(inp);
sasoc->sasoc_cookie_life = TICKS_TO_MSEC(inp->sctp_ep.def_cookie_life);
sasoc->sasoc_asocmaxrxt = inp->sctp_ep.max_send_times;
sasoc->sasoc_number_peer_destinations = 0;
sasoc->sasoc_peer_rwnd = 0;
sasoc->sasoc_local_rwnd = sbspace(&inp->sctp_socket->so_rcv);
sasoc->sasoc_cookie_life = TICKS_TO_MSEC(inp->sctp_ep.def_cookie_life);
SCTP_INP_RUNLOCK(inp);
}
*optsize = sizeof(*sasoc);
@ -2683,6 +2685,10 @@ sctp_setopt(struct socket *so, int optname, void *optval, size_t optsize,
SCTP_CHECK_AND_CAST(sack, optval, struct sctp_sack_info, optsize);
SCTP_FIND_STCB(inp, stcb, sack->sack_assoc_id);
if (sack->sack_delay) {
if (sack->sack_delay > SCTP_MAX_SACK_DELAY)
sack->sack_delay = SCTP_MAX_SACK_DELAY;
}
if (stcb) {
if (sack->sack_delay) {
if (MSEC_TO_TICKS(sack->sack_delay) < 1) {
@ -3381,9 +3387,11 @@ sctp_setopt(struct socket *so, int optname, void *optval, size_t optsize,
}
if (paddrp->spp_flags & SPP_HB_TIME_IS_ZERO)
inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT] = 0;
else if (paddrp->spp_hbinterval)
else if (paddrp->spp_hbinterval) {
if (paddrp->spp_hbinterval > SCTP_MAX_HB_INTERVAL)
paddrp->spp_hbinterval = SCTP_MAX_HB_INTERVAL;
inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT] = MSEC_TO_TICKS(paddrp->spp_hbinterval);
}
if (paddrp->spp_flags & SPP_HB_ENABLE) {
sctp_feature_off(inp, SCTP_PCB_FLAGS_DONOT_HEARTBEAT);
@ -3454,7 +3462,14 @@ sctp_setopt(struct socket *so, int optname, void *optval, size_t optsize,
SCTP_CHECK_AND_CAST(sasoc, optval, struct sctp_assocparams, optsize);
SCTP_FIND_STCB(inp, stcb, sasoc->sasoc_assoc_id);
if (sasoc->sasoc_cookie_life) {
/* boundary check the cookie life */
if (sasoc->sasoc_cookie_life < 1000)
sasoc->sasoc_cookie_life = 1000;
if (sasoc->sasoc_cookie_life > SCTP_MAX_COOKIE_LIFE) {
sasoc->sasoc_cookie_life = SCTP_MAX_COOKIE_LIFE;
}
}
if (stcb) {
if (sasoc->sasoc_asocmaxrxt)
stcb->asoc.max_send_times = sasoc->sasoc_asocmaxrxt;
@ -3462,9 +3477,8 @@ sctp_setopt(struct socket *so, int optname, void *optval, size_t optsize,
sasoc->sasoc_peer_rwnd = 0;
sasoc->sasoc_local_rwnd = 0;
if (sasoc->sasoc_cookie_life) {
if (sasoc->sasoc_cookie_life < 1000)
sasoc->sasoc_cookie_life = 1000;
stcb->asoc.cookie_life = MSEC_TO_TICKS(sasoc->sasoc_cookie_life);
stcb->asoc.cookie_life = sasoc->sasoc_cookie_life;
}
SCTP_TCB_UNLOCK(stcb);
} else {
@ -3475,8 +3489,6 @@ sctp_setopt(struct socket *so, int optname, void *optval, size_t optsize,
sasoc->sasoc_peer_rwnd = 0;
sasoc->sasoc_local_rwnd = 0;
if (sasoc->sasoc_cookie_life) {
if (sasoc->sasoc_cookie_life < 1000)
sasoc->sasoc_cookie_life = 1000;
inp->sctp_ep.def_cookie_life = MSEC_TO_TICKS(sasoc->sasoc_cookie_life);
}
SCTP_INP_WUNLOCK(inp);
@ -3619,9 +3631,33 @@ sctp_setopt(struct socket *so, int optname, void *optval, size_t optsize,
case SCTP_BINDX_ADD_ADDR:
{
struct sctp_getaddresses *addrs;
int sz;
struct thread *td;
int prison = 0;
td = (struct thread *)p;
if (jailed(td->td_ucred)) {
prison = 1;
}
SCTP_CHECK_AND_CAST(addrs, optval, struct sctp_getaddresses,
optsize);
if (addrs->addr->sa_family == AF_INET) {
sz = sizeof(struct sctp_getaddresses) - sizeof(struct sockaddr) + sizeof(struct sockaddr_in);
if (optsize < sz) {
error = EINVAL;
break;
}
if (prison && prison_ip(td->td_ucred, 0, &(((struct sockaddr_in *)(addrs->addr))->sin_addr.s_addr))) {
error = EADDRNOTAVAIL;
}
} else if (addrs->addr->sa_family == AF_INET6) {
sz = sizeof(struct sctp_getaddresses) - sizeof(struct sockaddr) + sizeof(struct sockaddr_in6);
if (optsize < sz) {
error = EINVAL;
break;
}
/* JAIL XXXX Add else here for V6 */
}
sctp_bindx_add_address(so, inp, addrs->addr,
addrs->sget_assoc_id, vrf_id,
&error, p);
@ -3630,8 +3666,32 @@ sctp_setopt(struct socket *so, int optname, void *optval, size_t optsize,
case SCTP_BINDX_REM_ADDR:
{
struct sctp_getaddresses *addrs;
int sz;
struct thread *td;
int prison = 0;
td = (struct thread *)p;
if (jailed(td->td_ucred)) {
prison = 1;
}
SCTP_CHECK_AND_CAST(addrs, optval, struct sctp_getaddresses, optsize);
if (addrs->addr->sa_family == AF_INET) {
sz = sizeof(struct sctp_getaddresses) - sizeof(struct sockaddr) + sizeof(struct sockaddr_in);
if (optsize < sz) {
error = EINVAL;
break;
}
if (prison && prison_ip(td->td_ucred, 0, &(((struct sockaddr_in *)(addrs->addr))->sin_addr.s_addr))) {
error = EADDRNOTAVAIL;
}
} else if (addrs->addr->sa_family == AF_INET6) {
sz = sizeof(struct sctp_getaddresses) - sizeof(struct sockaddr) + sizeof(struct sockaddr_in6);
if (optsize < sz) {
error = EINVAL;
break;
}
/* JAIL XXXX Add else here for V6 */
}
sctp_bindx_delete_address(so, inp, addrs->addr,
addrs->sget_assoc_id, vrf_id,
&error);
@ -3743,7 +3803,7 @@ sctp_connect(struct socket *so, struct sockaddr *addr, struct thread *p)
if ((inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) ==
SCTP_PCB_FLAGS_UNBOUND) {
/* Bind a ephemeral port */
error = sctp_inpcb_bind(so, NULL, NULL, p);
error = sctp_inpcb_bind(so, NULL, p);
if (error) {
goto out_now;
}
@ -3854,7 +3914,7 @@ sctp_listen(struct socket *so, int backlog, struct thread *p)
/* We must do a bind. */
SOCK_UNLOCK(so);
SCTP_INP_RUNLOCK(inp);
if ((error = sctp_inpcb_bind(so, NULL, NULL, p))) {
if ((error = sctp_inpcb_bind(so, NULL, p))) {
/* bind error, probably perm */
return (error);
}

View File

@ -246,6 +246,7 @@ extern struct pr_usrreqs sctp_usrreqs;
stcb->asoc.fslog[stcb->asoc.fs_index].incr = 0; \
stcb->asoc.fslog[stcb->asoc.fs_index].decr = 1; \
stcb->asoc.fs_index++; \
tp1->window_probe = 0; \
if (stcb->asoc.total_flight >= tp1->book_size) { \
stcb->asoc.total_flight -= tp1->book_size; \
if (stcb->asoc.total_flight_count > 0) \

View File

@ -918,6 +918,16 @@ sctp_init_asoc(struct sctp_inpcb *m, struct sctp_tcb *stcb,
else
asoc->hb_is_disabled = 0;
#ifdef SCTP_ASOCLOG_OF_TSNS
asoc->tsn_in_at = 0;
asoc->tsn_out_at = 0;
asoc->tsn_in_wrapped = 0;
asoc->tsn_out_wrapped = 0;
asoc->cumack_log_at = 0;
#endif
#ifdef SCTP_FS_SPEC_LOG
asoc->fs_index = 0;
#endif
asoc->refcnt = 0;
asoc->assoc_up_sent = 0;
asoc->assoc_id = asoc->my_vtag;
@ -2565,9 +2575,10 @@ uint32_t
sctp_calculate_rto(struct sctp_tcb *stcb,
struct sctp_association *asoc,
struct sctp_nets *net,
struct timeval *old)
struct timeval *told,
int safe)
{
/*
/*-
* given an association and the starting time of the current RTT
* period (in value1/value2) return RTO in number of msecs.
*/
@ -2575,8 +2586,19 @@ sctp_calculate_rto(struct sctp_tcb *stcb,
int o_calctime;
uint32_t new_rto = 0;
int first_measure = 0;
struct timeval now;
struct timeval now, then, *old;
/* Copy it out for sparc64 */
if (safe == sctp_align_unsafe_makecopy) {
old = &then;
memcpy(&then, told, sizeof(struct timeval));
} else if (safe == sctp_align_safe_nocopy) {
old = told;
} else {
/* error */
SCTP_PRINTF("Huh, bad rto calc call\n");
return (0);
}
/************************/
/* 1. calculate new RTT */
/************************/
@ -3650,6 +3672,7 @@ sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
void
sctp_print_out_track_log(struct sctp_tcb *stcb)
{
#ifdef NOSIY_PRINTS
int i;
SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code);
@ -3704,6 +3727,7 @@ sctp_print_out_track_log(struct sctp_tcb *stcb)
stcb->asoc.out_tsnlog[i].sz);
}
}
#endif
}
#endif
@ -4232,15 +4256,6 @@ sctp_append_to_readq(struct sctp_inpcb *inp,
}
tail = m;
}
if (end) {
/* message is complete */
if (stcb && (control == stcb->asoc.control_pdapi)) {
stcb->asoc.control_pdapi = NULL;
}
control->held_length = 0;
control->end_added = 1;
}
atomic_add_int(&control->length, len);
if (control->tail_mbuf) {
/* append */
SCTP_BUF_NEXT(control->tail_mbuf) = m;
@ -4255,6 +4270,15 @@ sctp_append_to_readq(struct sctp_inpcb *inp,
control->data = m;
control->tail_mbuf = tail;
}
atomic_add_int(&control->length, len);
if (end) {
/* message is complete */
if (stcb && (control == stcb->asoc.control_pdapi)) {
stcb->asoc.control_pdapi = NULL;
}
control->held_length = 0;
control->end_added = 1;
}
if (stcb == NULL) {
control->do_not_ref_stcb = 1;
}
@ -4656,7 +4680,6 @@ sctp_sorecvmsg(struct socket *so,
uint32_t rwnd_req = 0;
int hold_sblock = 0;
int hold_rlock = 0;
int alen = 0;
int slen = 0;
uint32_t held_length = 0;
int sockbuf_lock = 0;
@ -5059,6 +5082,26 @@ sctp_sorecvmsg(struct socket *so,
sinfo->sinfo_flags |= SCTP_UNORDERED;
}
}
#ifdef SCTP_ASOCLOG_OF_TSNS
{
int index, newindex;
struct sctp_pcbtsn_rlog *entry;
do {
index = inp->readlog_index;
newindex = index + 1;
if (newindex >= SCTP_READ_LOG_SIZE) {
newindex = 0;
}
} while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0);
entry = &inp->readlog[index];
entry->vtag = control->sinfo_assoc_id;
entry->strm = control->sinfo_stream;
entry->seq = control->sinfo_ssn;
entry->sz = control->length;
entry->flgs = control->sinfo_flags;
}
#endif
if (fromlen && from) {
struct sockaddr *to;
@ -5171,10 +5214,7 @@ sctp_sorecvmsg(struct socket *so,
embuf = m;
copied_so_far += cp_len;
freed_so_far += cp_len;
alen = atomic_fetchadd_int(&control->length, -(cp_len));
if (alen < cp_len) {
panic("Control length goes negative?");
}
atomic_subtract_int(&control->length, cp_len);
control->data = sctp_m_free(m);
m = control->data;
/*
@ -5228,10 +5268,7 @@ sctp_sorecvmsg(struct socket *so,
sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb,
SCTP_LOG_SBRESULT, 0);
}
alen = atomic_fetchadd_int(&control->length, -(cp_len));
if (alen < cp_len) {
panic("Control length goes negative2?");
}
atomic_subtract_int(&control->length, cp_len);
} else {
copied_so_far += cp_len;
}
@ -5895,7 +5932,7 @@ sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp,
*error = EINVAL;
return;
}
*error = sctp_inpcb_bind(so, addr_touse, NULL, p);
*error = sctp_inpcb_bind(so, addr_touse, p);
return;
}
/*

View File

@ -120,7 +120,7 @@ void
uint32_t
sctp_calculate_rto(struct sctp_tcb *, struct sctp_association *,
struct sctp_nets *, struct timeval *);
struct sctp_nets *, struct timeval *, int);
uint32_t sctp_calculate_len(struct mbuf *);

View File

@ -614,7 +614,7 @@ sctp6_bind(struct socket *so, struct sockaddr *addr, struct thread *p)
in6_sin6_2_sin(&sin, sin6_p);
inp6->inp_vflag |= INP_IPV4;
inp6->inp_vflag &= ~INP_IPV6;
error = sctp_inpcb_bind(so, (struct sockaddr *)&sin, NULL, p);
error = sctp_inpcb_bind(so, (struct sockaddr *)&sin, p);
return error;
}
}
@ -634,7 +634,7 @@ sctp6_bind(struct socket *so, struct sockaddr *addr, struct thread *p)
return EINVAL;
}
}
error = sctp_inpcb_bind(so, addr, NULL, p);
error = sctp_inpcb_bind(so, addr, p);
return error;
}