Improvements to CC modules:

1) Add four new points that allow you to get more information
   to cc algo's
2) Fix the case where user changes module on a existing TCB, in
   such a case, the initialization module needs to be called on all nets.
3) Move htcp_cc structure to a union that other modules can use.
4) Add 5th point for get/set socket options for cc_module specific options

MFC after:	2 months
This commit is contained in:
Randall Stewart 2011-02-26 15:23:46 +00:00
parent 65cb6238bd
commit 299108c5a2
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=219057
14 changed files with 214 additions and 57 deletions

View File

@ -161,9 +161,10 @@ struct sctp_paramhdr {
/* JRS - Pluggable Congestion Control Socket option */
#define SCTP_PLUGGABLE_CC 0x00001202
/* RS - Pluggable Stream Scheduling Socket option */
#define SCTP_PLUGGABLE_SS 0x00001203
#define SCTP_SS_VALUE 0x00001204
#define SCTP_PLUGGABLE_SS 0x00001203
#define SCTP_SS_VALUE 0x00001204
#define SCTP_CC_OPTION 0x00001205 /* Options for CC
* modules */
/* read only */
#define SCTP_GET_SNDBUF_USE 0x00001101
#define SCTP_GET_STAT_LOG 0x00001103

View File

@ -1135,10 +1135,10 @@ htcp_reset(struct htcp *ca)
static uint32_t
htcp_cwnd_undo(struct sctp_tcb *stcb, struct sctp_nets *net)
{
net->htcp_ca.last_cong = net->htcp_ca.undo_last_cong;
net->htcp_ca.maxRTT = net->htcp_ca.undo_maxRTT;
net->htcp_ca.old_maxB = net->htcp_ca.undo_old_maxB;
return max(net->cwnd, ((net->ssthresh / net->mtu << 7) / net->htcp_ca.beta) * net->mtu);
net->cc_mod.htcp_ca.last_cong = net->cc_mod.htcp_ca.undo_last_cong;
net->cc_mod.htcp_ca.maxRTT = net->cc_mod.htcp_ca.undo_maxRTT;
net->cc_mod.htcp_ca.old_maxB = net->cc_mod.htcp_ca.undo_old_maxB;
return max(net->cwnd, ((net->ssthresh / net->mtu << 7) / net->cc_mod.htcp_ca.beta) * net->mtu);
}
#endif
@ -1149,15 +1149,15 @@ measure_rtt(struct sctp_tcb *stcb, struct sctp_nets *net)
uint32_t srtt = net->lastsa >> SCTP_RTT_SHIFT;
/* keep track of minimum RTT seen so far, minRTT is zero at first */
if (net->htcp_ca.minRTT > srtt || !net->htcp_ca.minRTT)
net->htcp_ca.minRTT = srtt;
if (net->cc_mod.htcp_ca.minRTT > srtt || !net->cc_mod.htcp_ca.minRTT)
net->cc_mod.htcp_ca.minRTT = srtt;
/* max RTT */
if (net->fast_retran_ip == 0 && net->ssthresh < 0xFFFF && htcp_ccount(&net->htcp_ca) > 3) {
if (net->htcp_ca.maxRTT < net->htcp_ca.minRTT)
net->htcp_ca.maxRTT = net->htcp_ca.minRTT;
if (net->htcp_ca.maxRTT < srtt && srtt <= net->htcp_ca.maxRTT + MSEC_TO_TICKS(20))
net->htcp_ca.maxRTT = srtt;
if (net->fast_retran_ip == 0 && net->ssthresh < 0xFFFF && htcp_ccount(&net->cc_mod.htcp_ca) > 3) {
if (net->cc_mod.htcp_ca.maxRTT < net->cc_mod.htcp_ca.minRTT)
net->cc_mod.htcp_ca.maxRTT = net->cc_mod.htcp_ca.minRTT;
if (net->cc_mod.htcp_ca.maxRTT < srtt && srtt <= net->cc_mod.htcp_ca.maxRTT + MSEC_TO_TICKS(20))
net->cc_mod.htcp_ca.maxRTT = srtt;
}
}
@ -1167,7 +1167,7 @@ measure_achieved_throughput(struct sctp_tcb *stcb, struct sctp_nets *net)
uint32_t now = sctp_get_tick_count();
if (net->fast_retran_ip == 0)
net->htcp_ca.bytes_acked = net->net_ack;
net->cc_mod.htcp_ca.bytes_acked = net->net_ack;
if (!use_bandwidth_switch)
return;
@ -1175,29 +1175,29 @@ measure_achieved_throughput(struct sctp_tcb *stcb, struct sctp_nets *net)
/* achieved throughput calculations */
/* JRS - not 100% sure of this statement */
if (net->fast_retran_ip == 1) {
net->htcp_ca.bytecount = 0;
net->htcp_ca.lasttime = now;
net->cc_mod.htcp_ca.bytecount = 0;
net->cc_mod.htcp_ca.lasttime = now;
return;
}
net->htcp_ca.bytecount += net->net_ack;
net->cc_mod.htcp_ca.bytecount += net->net_ack;
if (net->htcp_ca.bytecount >= net->cwnd - ((net->htcp_ca.alpha >> 7 ? : 1) * net->mtu)
&& now - net->htcp_ca.lasttime >= net->htcp_ca.minRTT
&& net->htcp_ca.minRTT > 0) {
uint32_t cur_Bi = net->htcp_ca.bytecount / net->mtu * hz / (now - net->htcp_ca.lasttime);
if (net->cc_mod.htcp_ca.bytecount >= net->cwnd - ((net->cc_mod.htcp_ca.alpha >> 7 ? : 1) * net->mtu)
&& now - net->cc_mod.htcp_ca.lasttime >= net->cc_mod.htcp_ca.minRTT
&& net->cc_mod.htcp_ca.minRTT > 0) {
uint32_t cur_Bi = net->cc_mod.htcp_ca.bytecount / net->mtu * hz / (now - net->cc_mod.htcp_ca.lasttime);
if (htcp_ccount(&net->htcp_ca) <= 3) {
if (htcp_ccount(&net->cc_mod.htcp_ca) <= 3) {
/* just after backoff */
net->htcp_ca.minB = net->htcp_ca.maxB = net->htcp_ca.Bi = cur_Bi;
net->cc_mod.htcp_ca.minB = net->cc_mod.htcp_ca.maxB = net->cc_mod.htcp_ca.Bi = cur_Bi;
} else {
net->htcp_ca.Bi = (3 * net->htcp_ca.Bi + cur_Bi) / 4;
if (net->htcp_ca.Bi > net->htcp_ca.maxB)
net->htcp_ca.maxB = net->htcp_ca.Bi;
if (net->htcp_ca.minB > net->htcp_ca.maxB)
net->htcp_ca.minB = net->htcp_ca.maxB;
net->cc_mod.htcp_ca.Bi = (3 * net->cc_mod.htcp_ca.Bi + cur_Bi) / 4;
if (net->cc_mod.htcp_ca.Bi > net->cc_mod.htcp_ca.maxB)
net->cc_mod.htcp_ca.maxB = net->cc_mod.htcp_ca.Bi;
if (net->cc_mod.htcp_ca.minB > net->cc_mod.htcp_ca.maxB)
net->cc_mod.htcp_ca.minB = net->cc_mod.htcp_ca.maxB;
}
net->htcp_ca.bytecount = 0;
net->htcp_ca.lasttime = now;
net->cc_mod.htcp_ca.bytecount = 0;
net->cc_mod.htcp_ca.lasttime = now;
}
}
@ -1264,25 +1264,25 @@ htcp_alpha_update(struct htcp *ca)
static void
htcp_param_update(struct sctp_tcb *stcb, struct sctp_nets *net)
{
uint32_t minRTT = net->htcp_ca.minRTT;
uint32_t maxRTT = net->htcp_ca.maxRTT;
uint32_t minRTT = net->cc_mod.htcp_ca.minRTT;
uint32_t maxRTT = net->cc_mod.htcp_ca.maxRTT;
htcp_beta_update(&net->htcp_ca, minRTT, maxRTT);
htcp_alpha_update(&net->htcp_ca);
htcp_beta_update(&net->cc_mod.htcp_ca, minRTT, maxRTT);
htcp_alpha_update(&net->cc_mod.htcp_ca);
/*
* add slowly fading memory for maxRTT to accommodate routing
* changes etc
*/
if (minRTT > 0 && maxRTT > minRTT)
net->htcp_ca.maxRTT = minRTT + ((maxRTT - minRTT) * 95) / 100;
net->cc_mod.htcp_ca.maxRTT = minRTT + ((maxRTT - minRTT) * 95) / 100;
}
static uint32_t
htcp_recalc_ssthresh(struct sctp_tcb *stcb, struct sctp_nets *net)
{
htcp_param_update(stcb, net);
return max(((net->cwnd / net->mtu * net->htcp_ca.beta) >> 7) * net->mtu, 2U * net->mtu);
return max(((net->cwnd / net->mtu * net->cc_mod.htcp_ca.beta) >> 7) * net->mtu, 2U * net->mtu);
}
static void
@ -1323,14 +1323,14 @@ htcp_cong_avoid(struct sctp_tcb *stcb, struct sctp_nets *net)
* net->cwnd += alpha / net->cwnd
*/
/* What is snd_cwnd_cnt?? */
if (((net->partial_bytes_acked / net->mtu * net->htcp_ca.alpha) >> 7) * net->mtu >= net->cwnd) {
if (((net->partial_bytes_acked / net->mtu * net->cc_mod.htcp_ca.alpha) >> 7) * net->mtu >= net->cwnd) {
/*-
* Does SCTP have a cwnd clamp?
* if (net->snd_cwnd < net->snd_cwnd_clamp) - Nope (RRS).
*/
net->cwnd += net->mtu;
net->partial_bytes_acked = 0;
htcp_alpha_update(&net->htcp_ca);
htcp_alpha_update(&net->cc_mod.htcp_ca);
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
sctp_log_cwnd(stcb, net, net->mtu,
SCTP_CWND_LOG_FROM_CA);
@ -1343,7 +1343,7 @@ htcp_cong_avoid(struct sctp_tcb *stcb, struct sctp_nets *net)
}
}
net->htcp_ca.bytes_acked = net->mtu;
net->cc_mod.htcp_ca.bytes_acked = net->mtu;
}
}
@ -1360,11 +1360,11 @@ htcp_min_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net)
static void
htcp_init(struct sctp_tcb *stcb, struct sctp_nets *net)
{
memset(&net->htcp_ca, 0, sizeof(struct htcp));
net->htcp_ca.alpha = ALPHA_BASE;
net->htcp_ca.beta = BETA_MIN;
net->htcp_ca.bytes_acked = net->mtu;
net->htcp_ca.last_cong = sctp_get_tick_count();
memset(&net->cc_mod.htcp_ca, 0, sizeof(struct htcp));
net->cc_mod.htcp_ca.alpha = ALPHA_BASE;
net->cc_mod.htcp_ca.beta = BETA_MIN;
net->cc_mod.htcp_ca.bytes_acked = net->mtu;
net->cc_mod.htcp_ca.last_cong = sctp_get_tick_count();
}
static void
@ -1568,7 +1568,7 @@ sctp_htcp_cwnd_update_after_fr(struct sctp_tcb *stcb,
int old_cwnd = net->cwnd;
/* JRS - reset as if state were changed */
htcp_reset(&net->htcp_ca);
htcp_reset(&net->cc_mod.htcp_ca);
net->ssthresh = htcp_recalc_ssthresh(stcb, net);
net->cwnd = net->ssthresh;
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
@ -1622,7 +1622,7 @@ sctp_htcp_cwnd_update_after_timeout(struct sctp_tcb *stcb,
int old_cwnd = net->cwnd;
/* JRS - reset as if the state were being changed to timeout */
htcp_reset(&net->htcp_ca);
htcp_reset(&net->cc_mod.htcp_ca);
net->ssthresh = htcp_recalc_ssthresh(stcb, net);
net->cwnd = net->mtu;
net->partial_bytes_acked = 0;
@ -1640,7 +1640,7 @@ sctp_htcp_cwnd_update_after_fr_timer(struct sctp_inpcb *inp,
old_cwnd = net->cwnd;
sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_EARLY_FR_TMR, SCTP_SO_NOT_LOCKED);
net->htcp_ca.last_cong = sctp_get_tick_count();
net->cc_mod.htcp_ca.last_cong = sctp_get_tick_count();
/*
* make a small adjustment to cwnd and force to CA.
*/
@ -1665,7 +1665,7 @@ sctp_htcp_cwnd_update_after_ecn_echo(struct sctp_tcb *stcb,
/* JRS - reset hctp as if state changed */
if (in_window == 0) {
htcp_reset(&net->htcp_ca);
htcp_reset(&net->cc_mod.htcp_ca);
SCTP_STAT_INCR(sctps_ecnereducedcwnd);
net->ssthresh = htcp_recalc_ssthresh(stcb, net);
if (net->ssthresh < net->mtu) {

View File

@ -55,6 +55,8 @@ SDT_PROBE_DECLARE(sctp, cwnd, net, bl);
SDT_PROBE_DECLARE(sctp, cwnd, net, ecn);
/* update at a Packet-Drop -- decrease */
SDT_PROBE_DECLARE(sctp, cwnd, net, pd);
/* Rttvar probe declaration */
SDT_PROBE_DECLARE(sctp, cwnd, net, rttvar);
/* One to track an associations rwnd */
SDT_PROBE_DECLARE(sctp, rwnd, assoc, val);

View File

@ -71,6 +71,21 @@ SDT_PROBE_ARGTYPE(sctp, cwnd, net, ack, 3, "int");
/* The new value of the cwnd */
SDT_PROBE_ARGTYPE(sctp, cwnd, net, ack, 4, "int");
/* ACK-INCREASE */
SDT_PROBE_DEFINE(sctp, cwnd, net, rttvar, rttvar);
/* The Vtag << 32 | localport << 16 | remoteport */
SDT_PROBE_ARGTYPE(sctp, cwnd, net, rttvar, 0, "uint64_t");
/* obw | nbw */
SDT_PROBE_ARGTYPE(sctp, cwnd, net, rttvar, 1, "uint64_t");
/* newrtt */
SDT_PROBE_ARGTYPE(sctp, cwnd, net, rttvar, 2, "uint64_t");
/* bwrtt */
SDT_PROBE_ARGTYPE(sctp, cwnd, net, rttvar, 3, "uint64_t");
/* (cwnd << 32) | point << 16 | retval(0/1) */
SDT_PROBE_ARGTYPE(sctp, cwnd, net, rttvar, 4, "uint64_t");
/* FastRetransmit-DECREASE */
SDT_PROBE_DEFINE(sctp, cwnd, net, fr, fr);
/* The Vtag for this end */

View File

@ -2939,6 +2939,10 @@ sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1
tp1->rec.data.TSN_seq);
}
sctp_flight_size_decrease(tp1);
if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
tp1);
}
sctp_total_flight_decrease(stcb, tp1);
tp1->whoTo->net_ack += tp1->send_size;
@ -3442,6 +3446,10 @@ sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
if (tp1->whoTo) {
tp1->whoTo->net_ack++;
sctp_flight_size_decrease(tp1);
if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
tp1);
}
}
if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
@ -3737,6 +3745,10 @@ sctp_window_probe_recovery(struct sctp_tcb *stcb,
return;
}
/* First setup this by shrinking flight */
if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
tp1);
}
sctp_flight_size_decrease(tp1);
sctp_total_flight_decrease(stcb, tp1);
/* Now mark for resend */
@ -3810,6 +3822,9 @@ sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
*/
net->new_pseudo_cumack = 0;
net->will_exit_fast_recovery = 0;
if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
(*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
}
}
if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
uint32_t send_s;
@ -3883,6 +3898,10 @@ sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
tp1->rec.data.TSN_seq);
}
sctp_flight_size_decrease(tp1);
if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
tp1);
}
/* sa_ignore NO_NULL_CHK */
sctp_total_flight_decrease(stcb, tp1);
}
@ -4447,6 +4466,9 @@ sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup,
*/
net->new_pseudo_cumack = 0;
net->will_exit_fast_recovery = 0;
if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
(*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
}
}
/* process the new consecutive TSN first */
TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
@ -4483,6 +4505,10 @@ sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup,
}
sctp_flight_size_decrease(tp1);
sctp_total_flight_decrease(stcb, tp1);
if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
tp1);
}
}
tp1->whoTo->net_ack += tp1->send_size;

View File

@ -6476,6 +6476,9 @@ sctp_clean_up_datalist(struct sctp_tcb *stcb,
asoc->peers_rwnd = 0;
}
}
if (asoc->cc_functions.sctp_cwnd_update_packet_transmitted) {
(*asoc->cc_functions.sctp_cwnd_update_packet_transmitted) (stcb, net);
}
}
static void
@ -7341,6 +7344,10 @@ sctp_med_chunk_output(struct sctp_inpcb *inp,
}
continue;
}
if ((stcb->asoc.cc_functions.sctp_cwnd_new_transmission_begins) &&
(net->flight_size == 0)) {
(*stcb->asoc.cc_functions.sctp_cwnd_new_transmission_begins) (stcb, net);
}
if ((asoc->sctp_cmt_on_off == 0) &&
(asoc->primary_destination != net) &&
(net->ref_count < 2)) {

View File

@ -3953,7 +3953,8 @@ sctp_add_remote_addr(struct sctp_tcb *stcb, struct sockaddr *newaddr,
stcb->asoc.smallest_mtu = net->mtu;
}
/* JRS - Use the congestion control given in the CC module */
stcb->asoc.cc_functions.sctp_set_initial_cc_param(stcb, net);
if (stcb->asoc.cc_functions.sctp_set_initial_cc_param != NULL)
(*stcb->asoc.cc_functions.sctp_set_initial_cc_param) (stcb, net);
/*
* CMT: CUC algo - set find_pseudo_cumack to TRUE (1) at beginning

View File

@ -219,7 +219,6 @@ struct htcp {
uint32_t lasttime;
};
struct sctp_nets {
TAILQ_ENTRY(sctp_nets) sctp_next; /* next link */
@ -254,10 +253,9 @@ struct sctp_nets {
/* last time in seconds I sent to it */
struct timeval last_sent_time;
/* JRS - struct used in HTCP algorithm */
struct htcp htcp_ca;
union cc_control_data {
struct htcp htcp_ca; /* JRS - struct used in HTCP algorithm */
} cc_mod;
int ref_count;
/* Congestion stats per destination */
@ -648,6 +646,15 @@ struct sctp_cc_functions {
struct sctp_nets *net, int burst_limit);
void (*sctp_cwnd_update_after_fr_timer) (struct sctp_inpcb *inp,
struct sctp_tcb *stcb, struct sctp_nets *net);
void (*sctp_cwnd_update_packet_transmitted) (struct sctp_tcb *stcb,
struct sctp_nets *net);
void (*sctp_cwnd_update_tsn_acknowledged) (struct sctp_nets *net,
struct sctp_tmit_chunk *);
void (*sctp_cwnd_new_transmission_begins) (struct sctp_tcb *stcb,
struct sctp_nets *net);
void (*sctp_cwnd_prepare_net_for_sack) (struct sctp_tcb *stcb,
struct sctp_nets *net);
int (*sctp_cwnd_socket_option) (struct sctp_tcb *stcb, int set, struct sctp_cc_option *);
};
/*
@ -1166,6 +1173,7 @@ struct sctp_association {
uint8_t sctp_nr_sack_on_off;
/* JRS 5/21/07 - CMT PF variable */
uint8_t sctp_cmt_pf;
uint8_t use_precise_time;
/*
* The mapping array is used to track out of order sequences above
* last_acked_seq. 0 indicates packet missing 1 indicates packet

View File

@ -115,6 +115,9 @@ sctp_init_sysctls()
SCTP_BASE_SYSCTL(sctp_vtag_time_wait) = SCTPCTL_TIME_WAIT_DEFAULT;
SCTP_BASE_SYSCTL(sctp_buffer_splitting) = SCTPCTL_BUFFER_SPLITTING_DEFAULT;
SCTP_BASE_SYSCTL(sctp_initial_cwnd) = SCTPCTL_INITIAL_CWND_DEFAULT;
SCTP_BASE_SYSCTL(sctp_rttvar_bw) = SCTPCTL_RTTVAR_BW_DEFAULT;
SCTP_BASE_SYSCTL(sctp_rttvar_rtt) = SCTPCTL_RTTVAR_RTT_DEFAULT;
SCTP_BASE_SYSCTL(sctp_rttvar_eqret) = SCTPCTL_RTTVAR_EQRET_DEFAULT;
#if defined(SCTP_LOCAL_TRACE_BUF)
memset(&SCTP_BASE_SYSCTL(sctp_log), 0, sizeof(struct sctp_log));
#endif
@ -633,6 +636,9 @@ sysctl_sctp_check(SYSCTL_HANDLER_ARGS)
RANGECHK(SCTP_BASE_SYSCTL(sctp_vtag_time_wait), SCTPCTL_TIME_WAIT_MIN, SCTPCTL_TIME_WAIT_MAX);
RANGECHK(SCTP_BASE_SYSCTL(sctp_buffer_splitting), SCTPCTL_BUFFER_SPLITTING_MIN, SCTPCTL_BUFFER_SPLITTING_MAX);
RANGECHK(SCTP_BASE_SYSCTL(sctp_initial_cwnd), SCTPCTL_INITIAL_CWND_MIN, SCTPCTL_INITIAL_CWND_MAX);
RANGECHK(SCTP_BASE_SYSCTL(sctp_rttvar_bw), SCTPCTL_RTTVAR_BW_MIN, SCTPCTL_RTTVAR_BW_MAX);
RANGECHK(SCTP_BASE_SYSCTL(sctp_rttvar_rtt), SCTPCTL_RTTVAR_RTT_MIN, SCTPCTL_RTTVAR_RTT_MAX);
RANGECHK(SCTP_BASE_SYSCTL(sctp_rttvar_eqret), SCTPCTL_RTTVAR_EQRET_MIN, SCTPCTL_RTTVAR_EQRET_MAX);
RANGECHK(SCTP_BASE_SYSCTL(sctp_mobility_base), SCTPCTL_MOBILITY_BASE_MIN, SCTPCTL_MOBILITY_BASE_MAX);
RANGECHK(SCTP_BASE_SYSCTL(sctp_mobility_fasthandoff), SCTPCTL_MOBILITY_FASTHANDOFF_MIN, SCTPCTL_MOBILITY_FASTHANDOFF_MAX);
RANGECHK(SCTP_BASE_SYSCTL(sctp_udp_tunneling_for_client_enable), SCTPCTL_UDP_TUNNELING_FOR_CLIENT_ENABLE_MIN, SCTPCTL_UDP_TUNNELING_FOR_CLIENT_ENABLE_MAX);
@ -1100,6 +1106,18 @@ SYSCTL_VNET_PROC(_net_inet_sctp, OID_AUTO, initial_cwnd, CTLTYPE_UINT | CTLFLAG_
&SCTP_BASE_SYSCTL(sctp_initial_cwnd), 0, sysctl_sctp_check, "IU",
SCTPCTL_INITIAL_CWND_DESC);
SYSCTL_VNET_PROC(_net_inet_sctp, OID_AUTO, rttvar_bw, CTLTYPE_UINT | CTLFLAG_RW,
&SCTP_BASE_SYSCTL(sctp_rttvar_bw), 0, sysctl_sctp_check, "IU",
SCTPCTL_RTTVAR_BW_DESC);
SYSCTL_VNET_PROC(_net_inet_sctp, OID_AUTO, rttvar_rtt, CTLTYPE_UINT | CTLFLAG_RW,
&SCTP_BASE_SYSCTL(sctp_rttvar_rtt), 0, sysctl_sctp_check, "IU",
SCTPCTL_RTTVAR_RTT_DESC);
SYSCTL_VNET_PROC(_net_inet_sctp, OID_AUTO, rttvar_eqret, CTLTYPE_UINT | CTLFLAG_RW,
&SCTP_BASE_SYSCTL(sctp_rttvar_eqret), 0, sysctl_sctp_check, "IU",
SCTPCTL_RTTVAR_EQRET_DESC);
#ifdef SCTP_DEBUG
SYSCTL_VNET_PROC(_net_inet_sctp, OID_AUTO, debug, CTLTYPE_UINT | CTLFLAG_RW,
&SCTP_BASE_SYSCTL(sctp_debug_on), 0, sysctl_sctp_check, "IU",

View File

@ -104,6 +104,11 @@ struct sctp_sysctl {
uint32_t sctp_mobility_base;
uint32_t sctp_mobility_fasthandoff;
uint32_t sctp_inits_include_nat_friendly;
#ifdef SCTP_HAS_RTTCC
uint32_t sctp_rttvar_bw;
uint32_t sctp_rttvar_rtt;
uint32_t sctp_rttvar_eqret;
#endif
#if defined(SCTP_LOCAL_TRACE_BUF)
struct sctp_log sctp_log;
#endif
@ -514,6 +519,25 @@ struct sctp_sysctl {
#define SCTPCTL_INITIAL_CWND_MIN 0
#define SCTPCTL_INITIAL_CWND_MAX 0xffffffff
#define SCTPCTL_INITIAL_CWND_DEFAULT 3
#ifdef SCTP_HAS_RTTCC
/* rttvar smooth avg for bw calc */
#define SCTPCTL_RTTVAR_BW_DESC "Shift amount for bw smothing on rtt calc"
#define SCTPCTL_RTTVAR_BW_MIN 0
#define SCTPCTL_RTTVAR_BW_MAX 32
#define SCTPCTL_RTTVAR_BW_DEFAULT 4
/* rttvar smooth avg for bw calc */
#define SCTPCTL_RTTVAR_RTT_DESC "Shift amount for rtt smothing on rtt calc"
#define SCTPCTL_RTTVAR_RTT_MIN 0
#define SCTPCTL_RTTVAR_RTT_MAX 32
#define SCTPCTL_RTTVAR_RTT_DEFAULT 5
#define SCTPCTL_RTTVAR_EQRET_DESC "When rtt and bw are unchanged return what"
#define SCTPCTL_RTTVAR_EQRET_MIN 0
#define SCTPCTL_RTTVAR_EQRET_MAX 1
#define SCTPCTL_RTTVAR_EQRET_DEFAULT 0
#endif
#if defined(SCTP_DEBUG)
/* debug: Configure debug output */

View File

@ -1015,7 +1015,10 @@ sctp_t3rxt_timer(struct sctp_inpcb *inp,
/* CMT FR loss recovery ended with the T3 */
net->fast_retran_loss_recovery = 0;
if ((stcb->asoc.cc_functions.sctp_cwnd_new_transmission_begins) &&
(net->flight_size == 0)) {
(*stcb->asoc.cc_functions.sctp_cwnd_new_transmission_begins) (stcb, net);
}
/*
* setup the sat loss recovery that prevents satellite cwnd advance.
*/

View File

@ -533,6 +533,11 @@ struct sctp_assoc_value {
uint32_t assoc_value;
};
struct sctp_cc_option {
int option;
struct sctp_assoc_value aid_value;
};
struct sctp_stream_value {
sctp_assoc_t assoc_id;
uint16_t stream_id;

View File

@ -1758,6 +1758,25 @@ sctp_getopt(struct socket *so, int optname, void *optval, size_t *optsize,
*optsize = sizeof(*av);
}
break;
case SCTP_CC_OPTION:
{
struct sctp_cc_option *cc_opt;
SCTP_CHECK_AND_CAST(cc_opt, optval, struct sctp_cc_option, *optsize);
SCTP_FIND_STCB(inp, stcb, cc_opt->aid_value.assoc_id);
if (stcb == NULL) {
error = EINVAL;
} else {
if (stcb->asoc.cc_functions.sctp_cwnd_socket_option == NULL) {
error = ENOTSUP;
} else {
error = (*stcb->asoc.cc_functions.sctp_cwnd_socket_option) (stcb, 0,
cc_opt);
*optsize = sizeof(*cc_opt);
}
SCTP_TCB_UNLOCK(stcb);
}
}
/* RS - Get socket option for pluggable stream scheduling */
case SCTP_PLUGGABLE_SS:
{
@ -2929,6 +2948,7 @@ sctp_setopt(struct socket *so, int optname, void *optval, size_t optsize,
case SCTP_PLUGGABLE_CC:
{
struct sctp_assoc_value *av;
struct sctp_nets *net;
SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize);
SCTP_FIND_STCB(inp, stcb, av->assoc_id);
@ -2939,6 +2959,11 @@ sctp_setopt(struct socket *so, int optname, void *optval, size_t optsize,
case SCTP_CC_HTCP:
stcb->asoc.cc_functions = sctp_cc_functions[av->assoc_value];
stcb->asoc.congestion_control_module = av->assoc_value;
if (stcb->asoc.cc_functions.sctp_set_initial_cc_param != NULL) {
TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
stcb->asoc.cc_functions.sctp_set_initial_cc_param(stcb, net);
}
}
break;
default:
SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
@ -2962,6 +2987,24 @@ sctp_setopt(struct socket *so, int optname, void *optval, size_t optsize,
}
}
}
case SCTP_CC_OPTION:
{
struct sctp_cc_option *cc_opt;
SCTP_CHECK_AND_CAST(cc_opt, optval, struct sctp_cc_option, optsize);
SCTP_FIND_STCB(inp, stcb, cc_opt->aid_value.assoc_id);
if (stcb == NULL) {
error = EINVAL;
} else {
if (stcb->asoc.cc_functions.sctp_cwnd_socket_option == NULL) {
error = ENOTSUP;
} else {
error = (*stcb->asoc.cc_functions.sctp_cwnd_socket_option) (stcb, 1,
cc_opt);
}
SCTP_TCB_UNLOCK(stcb);
}
}
break;
/* RS - Set socket option for pluggable stream scheduling */
case SCTP_PLUGGABLE_SS:

View File

@ -2495,7 +2495,11 @@ sctp_calculate_rto(struct sctp_tcb *stcb,
/* 1. calculate new RTT */
/************************/
/* get the current time */
(void)SCTP_GETTIME_TIMEVAL(&now);
if (stcb->asoc.use_precise_time) {
(void)SCTP_GETPTIME_TIMEVAL(&now);
} else {
(void)SCTP_GETTIME_TIMEVAL(&now);
}
timevalsub(&now, old);
/* store the current RTT in us */
net->rtt = (uint64_t) 10000000 *(uint64_t) now.tv_sec +